blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7429637b3d1eff61bac629cdaf2031d9d72d1832
|
a29c6e83ae4f9010941d15c8fd4cfc67680bb054
|
/ml/m15_randomSearch3.py
|
676fef1a96a144265ebefcfd7f4e4e1df752e92b
|
[] |
no_license
|
ym0179/bit_seoul
|
f1ff5faf4ae20fbc8c0e2ed10a005f8bd4b2c2b8
|
14d1fb2752312790c39898fc53a45c1cf427a4d1
|
refs/heads/master
| 2023-02-27T19:52:23.577540
| 2021-02-08T00:30:16
| 2021-02-08T00:30:16
| 311,265,926
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,570
|
py
|
#Day12
#2020-11-24
# 당뇨병 데이터
# 모델 : RandomForestRegressor
import pandas as pd
from sklearn.datasets import load_diabetes
from sklearn.model_selection import train_test_split, KFold, cross_val_score, GridSearchCV, RandomizedSearchCV
from sklearn.metrics import accuracy_score, r2_score
import warnings
from sklearn.svm import LinearSVC, SVC
from sklearn.neighbors import KNeighborsClassifier, KNeighborsRegressor
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor
warnings.filterwarnings('ignore')
# 1. 데이터
x,y = load_diabetes(return_X_y=True)
# train-test split
x_train, x_test, y_train, y_test = train_test_split(x, y, random_state=66, shuffle=True, train_size=0.8)
# 2. 모델
kfold = KFold(n_splits=5, shuffle=True)
params = [
{'n_estimators' : [300, 400, 500], #결정 트리의 개수, default=10, 많을 수록 좋은 성능이 나올 "수"도 있음 (시간이 오래걸림)
'max_depth' : [6, 8, 10], #트리의 깊이, default=None(완벽하게 클래스 값이 결정될 때 까지 분할), 깊이가 깊어지면 과적합될 수 있으므로 적절히 제어 필요
'min_samples_leaf' : [7, 10, 12, 14], #리프노드가 되기 위한 최소한의 샘플 데이터 수, default=2, min_samples_split과 함께 과적합 제어 용도
'min_samples_split' : [12, 14, 16], #노드를 분할하기 위한 최소한의 데이터 수, default=2, 과적합을 제어하는데 사용 (작게 설정할 수록 분할 노드가 많아져 과적합 가능성 증가)
'n_jobs' : [-1]} #모든 코어를 다 쓰겠다
]
model = RandomizedSearchCV(RandomForestRegressor(), params, cv=kfold, verbose=2)
# 3. 훈련
model.fit(x_train,y_train) #model: RandomizedSearchCV
# 4. 평가, 예측
print("최적의 매개변수 : ", model.best_estimator_)
print("최적 하이퍼 파라미터 : ", model.best_params_)
print("최고 정확도 : {0:.4f}".format(model.best_score_))
# RandomizedSearchCV refit으로 이미 학습이 된 estimator 반환
estimator = model.best_estimator_
y_predict = estimator.predict(x_test)
print("(테스트 데이터 세트 r2) 최종정답률 : ", r2_score(y_test,y_predict))
'''
최적의 매개변수 : RandomForestRegressor(max_depth=6, min_samples_leaf=12,
min_samples_split=12,
n_estimators=400, n_jobs=-1)
최적 하이퍼 파라미터 : {'n_jobs': -1, 'n_estimators': 400, 'min_samples_split': 12, 'min_samples_leaf': 12, 'max_depth': 6}
최고 정확도 : 0.4409
(테스트 데이터 세트 r2) 최종정답률 : 0.4142511040047415
'''
|
[
"ym4766@gmail.com"
] |
ym4766@gmail.com
|
13264a103e66b2e23f72deff4819ceff21da6eca
|
1eb2d7d2a6e945a9bc487afcbc51daefd9af02e6
|
/eds/controller/__init__.py
|
deebe85d22d41b69dab825eb5fd53a7005d7ea4e
|
[] |
no_license
|
fengges/eds
|
11dc0fdc7a17b611af1f61894f497ad443439bfe
|
635bcf015e3ec12e96949632c546d29fc99aee31
|
refs/heads/master
| 2021-06-20T04:43:02.019309
| 2019-06-20T12:55:26
| 2019-06-20T12:55:26
| 133,342,023
| 0
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,143
|
py
|
# author :feng
# time :2018/1/25
# function : 扫描所有文件,找出所有蓝图并注册
import os,sys
from flask import Blueprint
#------------扫描所有文件----------
bp_file=[]
def eachFile(filepath):
try :
pathDir = os.listdir(filepath)
except:
return
for allDir in pathDir:
# 忽略__开头的文件和文件夹
if allDir.startswith('__') :
continue
path=filepath+'/'+allDir
#如果是文件夹
if not os.path.isfile(path):
eachFile(path)
else:
map=[filepath,allDir]
bp_file.append(map)
eachFile(sys.path[0]+'/eds/controller')
#------------导入蓝图----------
bp_list=[]
for bp in bp_file:
dirs=bp[0].replace(sys.path[0]+'/','').replace('/','.')
if bp[1].find('.txt')>=0:
continue
name=bp[1].replace('.py','')
code="from "+dirs+" import "+name+" as a"
exec(code)
list=eval("dir(a)")
for l in list:
if l.startswith('__') :
continue
temp=eval('a.'+l)
if type(temp)==Blueprint:
bp_list.append(temp)
|
[
"1059387928@qq.com"
] |
1059387928@qq.com
|
6d118674593e7b3bc5b82d7ca42cd1c3846863ac
|
1fac53ab13a9a682ecd926857ef565fa779afae4
|
/pro/13.py
|
0b8d62883eb73d45ca1280fea31b7038e7b8463f
|
[] |
no_license
|
Shamabanu/python-1
|
339123ff4e7667d6331c207cb1c7ca3fc775dc48
|
4c1642679bb0bdd53a1d21e5421e04eb7abda65b
|
refs/heads/master
| 2020-04-13T23:49:27.700807
| 2018-12-29T15:10:26
| 2018-12-29T15:10:26
| 163,516,492
| 1
| 0
| null | 2018-12-29T14:16:28
| 2018-12-29T14:16:28
| null |
UTF-8
|
Python
| false
| false
| 308
|
py
|
def min(l,s,e):
min=999
for i in range(s,e+1):
if min>l[i]:
min=l[i]
return min
def main():
n=int(input())
l=[]
for i in range(n):
l.append(int(input()))
q=int(input())
out=[]
for i in range(q):
s=int(input())
e=int(input())
out.append(min(l,s,e))
for i in out:
print(i)
p
|
[
"noreply@github.com"
] |
Shamabanu.noreply@github.com
|
fc4eba5935059e6128b133cc2c060cdb972f9a15
|
dcf2ab4e1a66d53eaaa1260a8824882fbd0bbd9f
|
/bonus of salary.py
|
f4ca9a0bd3e168131d1e460d06d77175392a50b1
|
[] |
no_license
|
gauriindalkar/if-else
|
a54189ef8e6368be5cc6b8a9fdafa6e9497843be
|
fac48790ffb9f6d021eff921a80ff57e399a4f77
|
refs/heads/main
| 2023-05-11T05:59:13.275673
| 2021-05-29T17:24:19
| 2021-05-29T17:24:19
| 371,955,342
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 153
|
py
|
salary=int(input("enter salary"))
years=int(input("enter year of salary"))
if years>5%100:
print("bonus is",5/100*salary)
else:
print("no bonus")
|
[
"noreply@github.com"
] |
gauriindalkar.noreply@github.com
|
a42aa2ddf7f80efdf7902f6b7dd7298a8d9b657a
|
23b333449524887594530f73c0079ce60cb8eefb
|
/python_module/examples/279_Perfect_Squares.py
|
2b38c220680fe826fc33b7042145e52e6e043b5d
|
[] |
no_license
|
benbendaisy/CommunicationCodes
|
9deb371095f5d67e260030d3d8abf211c90e7642
|
444cc502ef26810b46115797f2e26ab305a4ebdf
|
refs/heads/master
| 2023-08-09T21:46:58.691987
| 2023-07-20T05:11:39
| 2023-07-20T05:11:39
| 27,856,438
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,495
|
py
|
import math
from cmath import sqrt
from functools import lru_cache
class Solution:
"""
Given an integer n, return the least number of perfect square numbers that sum to n.
A perfect square is an integer that is the square of an integer; in other words, it is the product of some integer with itself. For example, 1, 4, 9, and 16 are perfect squares while 3 and 11 are not.
Example 1:
Input: n = 12
Output: 3
Explanation: 12 = 4 + 4 + 4.
Example 2:
Input: n = 13
Output: 2
Explanation: 13 = 4 + 9.
Constraints:
1 <= n <= 104
"""
def numSquares(self, n: int) -> int:
square_nums = [i ** 2 for i in range(1, int(sqrt(n)) + 1)]
dp = [math.inf] * (n + 1)
dp[0] = 0
for i in range(1, n + 1):
for square in square_nums:
if i < square:
break
dp[i] = min(dp[i], dp[i - square] + 1)
return dp[n]
def numSquares1(self, n: int) -> int:
square_nums = [i ** 2 for i in range(1, int(sqrt(n)) + 1)]
@lru_cache(None)
def squares(k: int) -> int:
if k in square_nums:
return 1
min_square = math.inf
for square in square_nums:
if k < square:
break
min_square = min(min_square, squares(k - square) + 1)
return min_square
return squares(n)
|
[
"benbendaisy@users.noreply.github.com"
] |
benbendaisy@users.noreply.github.com
|
855c6aaf9eed566e4170ab64ac60019fbc1e0d0a
|
76fc4ffc931ce83cfdfc9846435d92f1f217af26
|
/jmbo/migrations/0004_photosize_name_length.py
|
00a8457f321175fcf2da3750bbc25a86dd0be0f3
|
[] |
no_license
|
praekelt/jmbo
|
70d2a4c8c19ffcc0b10ed1b915c05a6453ecb3de
|
b674c14c6611191643870a070ca8c9f229776776
|
refs/heads/develop
| 2020-04-06T06:10:09.524522
| 2018-06-08T10:52:42
| 2018-06-08T10:52:42
| 1,899,978
| 4
| 6
| null | 2018-06-08T10:52:43
| 2011-06-15T12:29:27
|
Python
|
UTF-8
|
Python
| false
| false
| 1,126
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-05-30 12:47
from __future__ import unicode_literals
from django.core.validators import RegexValidator
from django.db import migrations, models
from django.utils import timezone
# We're modifying a field from another app. This requires trickery.
def fix_app_label(apps, schema_editor):
migrations.recorder.MigrationRecorder.Migration.objects.create(
app='jmbo', name='0004_photosize_name_length',
applied=timezone.now()
)
class Migration(migrations.Migration):
dependencies = [
('jmbo', '0003_auto_20160530_1247'),
]
operations = [
migrations.AlterField(
model_name='photosize',
name='name',
field=models.CharField(unique=True, max_length=255, validators=[RegexValidator(regex='^[a-z0-9_]+$', message='Use only plain lowercase letters (ASCII), numbers and underscores.')]),
),
migrations.RunPython(fix_app_label)
]
def __init__(self, *args, **kwargs):
super(Migration, self).__init__(*args, **kwargs)
self.app_label = 'photologue'
|
[
"hedleyroos@gmail.com"
] |
hedleyroos@gmail.com
|
d7870a40f1c0cc50e7e98e6853dcf6f4cf63878b
|
d554b1aa8b70fddf81da8988b4aaa43788fede88
|
/5 - Notebooks e Data/1 - Análises numéricas/Arquivos David/Atualizados/logDicas-master/data/2019-1/225/users/3995/codes/1672_1950.py
|
d9036258f64b368da9802335bbf86f788d624ec8
|
[] |
no_license
|
JosephLevinthal/Research-projects
|
a3bc3ca3b09faad16f5cce5949a2279cf14742ba
|
60d5fd6eb864a5181f4321e7a992812f3c2139f9
|
refs/heads/master
| 2022-07-31T06:43:02.686109
| 2020-05-23T00:24:26
| 2020-05-23T00:24:26
| 266,199,309
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 253
|
py
|
t=float(input("temperatura:"))
v=float(input("velocidade:"))
o=13.12+(0.6215*t)-(11.37*(v**0.16))+(0.3965*t*(v**0.16))
if((t>=-50 and t<10)and(v>=4.8)):
print(round(o, 4))
elif(v<4.8):
print("Velocidade invalida")
else:
print("Temperatura invalida")
|
[
"jvlo@icomp.ufam.edu.br"
] |
jvlo@icomp.ufam.edu.br
|
ebf0411cf8fd15124d51fefb1d9fce1ff401d78a
|
4ca07649f61d70b8803481e89f0f35a3ad7f7f4c
|
/Jamie/env/bin/pyrsa-priv2pub
|
9f127f79577a4e19d9b15f291939113e553dddc1
|
[] |
no_license
|
coralisland-git/web-spider-cloud
|
b5ab74ea4b2188dd18281618ecaf3337163bd4d1
|
1da2aca0c28e6e08db1978939007706fdf60779e
|
refs/heads/master
| 2021-04-09T15:47:18.220441
| 2018-06-01T16:09:55
| 2018-06-01T16:09:55
| 125,665,826
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 303
|
#!/media/apple/f50223eb-8502-4183-93ad-3e6be8fce2e0/work/scrapy/Jamie/env/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from rsa.util import private_to_public
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(private_to_public())
|
[
"coseasonruby@gmail.com"
] |
coseasonruby@gmail.com
|
|
241fa6b221f9014d153bddf461f56f124deed35d
|
bf0d7c8d987d5fda14208eb9ce70e31c83c25c25
|
/c-ex4/viz_hidden.py
|
77f2b6e5a6e6b235b4949916dff588d425775ba8
|
[] |
no_license
|
SummerBigData/SamRepo
|
7876e9393c7175e300e175a60c17633c3b23a1bb
|
fd84ad654370faa48c084349952c2921fde4032d
|
refs/heads/master
| 2020-03-18T05:09:08.787956
| 2018-06-18T17:11:49
| 2018-06-18T17:11:49
| 134,327,137
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 824
|
py
|
import matplotlib.pyplot as plt
import numpy as np
from nn_util import *
n = 400
k = 10
s = [n, 25, k]
L = len(s)
sizes = [(s[i+1], s[i]+1) for i in range(L-1)]
theta_flat = np.genfromtxt('weights.txt')
thetas = thetas_from_flat(theta_flat, sizes)
theta_h = thetas[0][:,1:]
def inv_sigmoid(a):
return np.log(a/(1-a))
small_number = 1e-8
active_val = 1 - small_number
X = np.zeros((25, n))
for i in range(s[1]):
a = np.array(s[1] * [small_number]).reshape((1, s[1]))
a[0,i] = active_val
z = inv_sigmoid(a)
x = z.dot(np.linalg.pinv(theta_h).T)
X[i] = x
rows, cols = 5, 5
_, axs = plt.subplots(rows, cols, figsize=(rows, cols))
row, col = -1, 0
for x in X:
if col % cols == 0:
row += 1
col = 0
x = x.reshape((20, 20)).T
ax = axs[row, col]
ax.imshow(x, cmap='gray')
ax.axis('off')
col += 1
plt.show()
|
[
"lerner98@gmail.com"
] |
lerner98@gmail.com
|
c348b8fbf62271f2569db6ac5e932e2083f492cd
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02687/s366330739.py
|
8212de878e4731ef97587ca957ac1639f31d7b1e
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 148
|
py
|
# A - A?C
# 'ABC'には'ARC'を、'ARC'には'ABC'を返す
S = str(input())
if S == 'ABC':
print('ARC')
elif S == 'ARC':
print('ABC')
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
6be70691a6a372ac9c7ec40b4679883b2939e460
|
641fa8341d8c436ad24945bcbf8e7d7d1dd7dbb2
|
/build/android/pylib/constants/__init__.py
|
80ad2c1a8b45589070e60fb4c57c2f19f2485186
|
[
"BSD-3-Clause"
] |
permissive
|
massnetwork/mass-browser
|
7de0dfc541cbac00ffa7308541394bac1e945b76
|
67526da9358734698c067b7775be491423884339
|
refs/heads/master
| 2022-12-07T09:01:31.027715
| 2017-01-19T14:29:18
| 2017-01-19T14:29:18
| 73,799,690
| 4
| 4
|
BSD-3-Clause
| 2022-11-26T11:53:23
| 2016-11-15T09:49:29
| null |
UTF-8
|
Python
| false
| false
| 7,735
|
py
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Defines a set of constants shared by test runners and other scripts."""
# TODO(jbudorick): Split these constants into coherent modules.
# pylint: disable=W0212
import collections
import glob
import logging
import os
import subprocess
import devil.android.sdk.keyevent
from devil.android.constants import chrome
from devil.android.sdk import version_codes
from devil.constants import exit_codes
keyevent = devil.android.sdk.keyevent
DIR_SOURCE_ROOT = os.environ.get('CHECKOUT_SOURCE_ROOT',
os.path.abspath(os.path.join(os.path.dirname(__file__),
os.pardir, os.pardir, os.pardir, os.pardir)))
PACKAGE_INFO = dict(chrome.PACKAGE_INFO)
PACKAGE_INFO.update({
'legacy_browser': chrome.PackageInfo(
'com.google.android.browser',
'com.android.browser.BrowserActivity',
None,
None),
'chromecast_shell': chrome.PackageInfo(
'com.google.android.apps.mediashell',
'com.google.android.apps.mediashell.MediaShellActivity',
'/data/local/tmp/castshell-command-line',
None),
'android_webview_shell': chrome.PackageInfo(
'org.chromium.android_webview.shell',
'org.chromium.android_webview.shell.AwShellActivity',
'/data/local/tmp/android-webview-command-line',
None),
'gtest': chrome.PackageInfo(
'org.chromium.native_test',
'org.chromium.native_test.NativeUnitTestActivity',
'/data/local/tmp/chrome-native-tests-command-line',
None),
'components_browsertests': chrome.PackageInfo(
'org.chromium.components_browsertests_apk',
('org.chromium.components_browsertests_apk' +
'.ComponentsBrowserTestsActivity'),
'/data/local/tmp/chrome-native-tests-command-line',
None),
'content_browsertests': chrome.PackageInfo(
'org.chromium.content_browsertests_apk',
'org.chromium.content_browsertests_apk.ContentBrowserTestsActivity',
'/data/local/tmp/chrome-native-tests-command-line',
None),
'chromedriver_webview_shell': chrome.PackageInfo(
'org.chromium.chromedriver_webview_shell',
'org.chromium.chromedriver_webview_shell.Main',
None,
None),
})
# Ports arrangement for various test servers used in Chrome for Android.
# Lighttpd server will attempt to use 9000 as default port, if unavailable it
# will find a free port from 8001 - 8999.
LIGHTTPD_DEFAULT_PORT = 9000
LIGHTTPD_RANDOM_PORT_FIRST = 8001
LIGHTTPD_RANDOM_PORT_LAST = 8999
TEST_SYNC_SERVER_PORT = 9031
TEST_SEARCH_BY_IMAGE_SERVER_PORT = 9041
TEST_POLICY_SERVER_PORT = 9051
TEST_EXECUTABLE_DIR = '/data/local/tmp'
# Directories for common java libraries for SDK build.
# These constants are defined in build/android/ant/common.xml
SDK_BUILD_JAVALIB_DIR = 'lib.java'
SDK_BUILD_TEST_JAVALIB_DIR = 'test.lib.java'
SDK_BUILD_APKS_DIR = 'apks'
ADB_KEYS_FILE = '/data/misc/adb/adb_keys'
PERF_OUTPUT_DIR = os.path.join(DIR_SOURCE_ROOT, 'out', 'step_results')
# The directory on the device where perf test output gets saved to.
DEVICE_PERF_OUTPUT_DIR = (
'/data/data/' + PACKAGE_INFO['chrome'].package + '/files')
SCREENSHOTS_DIR = os.path.join(DIR_SOURCE_ROOT, 'out_screenshots')
ANDROID_SDK_VERSION = version_codes.MARSHMALLOW
ANDROID_SDK_BUILD_TOOLS_VERSION = '24.0.2'
ANDROID_SDK_ROOT = os.path.join(DIR_SOURCE_ROOT,
'third_party', 'android_tools', 'sdk')
ANDROID_SDK_TOOLS = os.path.join(ANDROID_SDK_ROOT,
'build-tools', ANDROID_SDK_BUILD_TOOLS_VERSION)
ANDROID_NDK_ROOT = os.path.join(DIR_SOURCE_ROOT,
'third_party', 'android_tools', 'ndk')
PROGUARD_SCRIPT_PATH = os.path.join(
ANDROID_SDK_ROOT, 'tools', 'proguard', 'bin', 'proguard.sh')
PROGUARD_ROOT = os.path.join(DIR_SOURCE_ROOT, 'third_party', 'proguard')
BAD_DEVICES_JSON = os.path.join(DIR_SOURCE_ROOT,
os.environ.get('CHROMIUM_OUT_DIR', 'out'),
'bad_devices.json')
UPSTREAM_FLAKINESS_SERVER = 'test-results.appspot.com'
# TODO(jbudorick): Remove once unused.
DEVICE_LOCAL_PROPERTIES_PATH = '/data/local.prop'
# TODO(jbudorick): Rework this into testing/buildbot/
PYTHON_UNIT_TEST_SUITES = {
'pylib_py_unittests': {
'path': os.path.join(DIR_SOURCE_ROOT, 'build', 'android'),
'test_modules': [
'devil.android.device_utils_test',
'devil.android.md5sum_test',
'devil.utils.cmd_helper_test',
'pylib.results.json_results_test',
'pylib.utils.proguard_test',
]
},
'gyp_py_unittests': {
'path': os.path.join(DIR_SOURCE_ROOT, 'build', 'android', 'gyp'),
'test_modules': [
'java_cpp_enum_tests',
'java_google_api_keys_tests',
]
},
}
LOCAL_MACHINE_TESTS = ['junit', 'python']
VALID_ENVIRONMENTS = ['local']
VALID_TEST_TYPES = ['gtest', 'instrumentation', 'junit', 'linker', 'monkey',
'perf', 'python']
VALID_DEVICE_TYPES = ['Android', 'iOS']
def GetBuildType():
try:
return os.environ['BUILDTYPE']
except KeyError:
raise EnvironmentError(
'The BUILDTYPE environment variable has not been set')
def SetBuildType(build_type):
os.environ['BUILDTYPE'] = build_type
def SetBuildDirectory(build_directory):
os.environ['CHROMIUM_OUT_DIR'] = build_directory
def SetOutputDirectory(output_directory):
os.environ['CHROMIUM_OUTPUT_DIR'] = output_directory
def GetOutDirectory(build_type=None):
"""Returns the out directory where the output binaries are built.
Args:
build_type: Build type, generally 'Debug' or 'Release'. Defaults to the
globally set build type environment variable BUILDTYPE.
"""
if 'CHROMIUM_OUTPUT_DIR' in os.environ:
return os.path.abspath(os.path.join(
DIR_SOURCE_ROOT, os.environ.get('CHROMIUM_OUTPUT_DIR')))
return os.path.abspath(os.path.join(
DIR_SOURCE_ROOT, os.environ.get('CHROMIUM_OUT_DIR', 'out'),
GetBuildType() if build_type is None else build_type))
def CheckOutputDirectory():
"""Checks that CHROMIUM_OUT_DIR or CHROMIUM_OUTPUT_DIR is set.
If neither are set, but the current working directory is a build directory,
then CHROMIUM_OUTPUT_DIR is set to the current working directory.
Raises:
Exception: If no output directory is detected.
"""
output_dir = os.environ.get('CHROMIUM_OUTPUT_DIR')
out_dir = os.environ.get('CHROMIUM_OUT_DIR')
if not output_dir and not out_dir:
# If CWD is an output directory, then assume it's the desired one.
if os.path.exists('build.ninja'):
output_dir = os.getcwd()
SetOutputDirectory(output_dir)
elif os.environ.get('CHROME_HEADLESS'):
# When running on bots, see if the output directory is obvious.
dirs = glob.glob(os.path.join(DIR_SOURCE_ROOT, 'out', '*', 'build.ninja'))
if len(dirs) == 1:
SetOutputDirectory(dirs[0])
else:
raise Exception('Neither CHROMIUM_OUTPUT_DIR nor CHROMIUM_OUT_DIR '
'has been set. CHROME_HEADLESS detected, but multiple '
'out dirs exist: %r' % dirs)
else:
raise Exception('Neither CHROMIUM_OUTPUT_DIR nor CHROMIUM_OUT_DIR '
'has been set')
# TODO(jbudorick): Convert existing callers to AdbWrapper.GetAdbPath() and
# remove this.
def GetAdbPath():
from devil.android.sdk import adb_wrapper
return adb_wrapper.AdbWrapper.GetAdbPath()
# Exit codes
ERROR_EXIT_CODE = exit_codes.ERROR
INFRA_EXIT_CODE = exit_codes.INFRA
WARNING_EXIT_CODE = exit_codes.WARNING
|
[
"xElvis89x@gmail.com"
] |
xElvis89x@gmail.com
|
3ce273ff7c46fa17acd4b7832d4e488d178af564
|
c5278643c1df2a0c1a83156f764ba7b2ebcc8cb8
|
/hc/api/tests/test_pause.py
|
e6c3d4142b8b6977190901b14f8e040aea1f06a6
|
[
"BSD-3-Clause"
] |
permissive
|
protostartup/healthchecks
|
a5757584c4506282d488aff207676f54241c975c
|
25e48f1b9fff10866e4d6a0f875912527a5160bc
|
refs/heads/master
| 2020-04-11T15:24:42.495688
| 2018-12-14T16:58:35
| 2018-12-14T16:58:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,978
|
py
|
from hc.api.models import Check
from hc.test import BaseTestCase
class PauseTestCase(BaseTestCase):
def test_it_works(self):
check = Check(user=self.alice, status="up")
check.save()
url = "/api/v1/checks/%s/pause" % check.code
r = self.client.post(url, "", content_type="application/json",
HTTP_X_API_KEY="X" * 32)
self.assertEqual(r.status_code, 200)
self.assertEqual(r["Access-Control-Allow-Origin"], "*")
check.refresh_from_db()
self.assertEqual(check.status, "paused")
def test_it_handles_options(self):
check = Check(user=self.alice, status="up")
check.save()
r = self.client.options("/api/v1/checks/%s/pause" % check.code)
self.assertEqual(r.status_code, 204)
self.assertIn("POST", r["Access-Control-Allow-Methods"])
def test_it_only_allows_post(self):
url = "/api/v1/checks/1659718b-21ad-4ed1-8740-43afc6c41524/pause"
r = self.client.get(url, HTTP_X_API_KEY="X" * 32)
self.assertEqual(r.status_code, 405)
def test_it_validates_ownership(self):
check = Check(user=self.bob, status="up")
check.save()
url = "/api/v1/checks/%s/pause" % check.code
r = self.client.post(url, "", content_type="application/json",
HTTP_X_API_KEY="X" * 32)
self.assertEqual(r.status_code, 403)
def test_it_validates_uuid(self):
url = "/api/v1/checks/not-uuid/pause"
r = self.client.post(url, "", content_type="application/json",
HTTP_X_API_KEY="X" * 32)
self.assertEqual(r.status_code, 404)
def test_it_handles_missing_check(self):
url = "/api/v1/checks/07c2f548-9850-4b27-af5d-6c9dc157ec02/pause"
r = self.client.post(url, "", content_type="application/json",
HTTP_X_API_KEY="X" * 32)
self.assertEqual(r.status_code, 404)
|
[
"cuu508@gmail.com"
] |
cuu508@gmail.com
|
13ef81d0170c88304adc0fe294d740fb507ae0ef
|
7755efce8e5ec81943ceb491590fae29eaad798a
|
/Codecademy Lesson 3 Control Flow/L3.5_Boolean_Operators_And.py
|
8413b9f09629808f478ec877dbcf37d340a3cfe0
|
[] |
no_license
|
jashidsany/Learning-Python
|
01b3f2f207dfdf6d31f9ca9f5abd38aab710ba1d
|
2c9926fd1b159441cbe8e9e30f0804d99b936573
|
refs/heads/main
| 2023-02-20T03:59:15.123009
| 2021-01-25T01:12:19
| 2021-01-25T01:12:19
| 319,205,996
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 273
|
py
|
statement_one = (2 + 2 + 2 >= 6) and (-1 * -1 < 0)
print(statement_one)
statement_two = (4 * 2 <= 8) and (7 - 1 == 6)
print(statement_two)
def graduation_reqs(gpa, credits):
if gpa >= 2.0 and credits >= 120:
return "You meet the requirements to graduate!"
|
[
"noreply@github.com"
] |
jashidsany.noreply@github.com
|
c8a947e165c9e763ad4bdf98bb89237d7e4bf66e
|
021cf3a95271c46647c45427ca6f4e951a1eacb0
|
/2020/B/b.py
|
3a2882390d01d691f8a0dd5c7c016017fb86c480
|
[] |
no_license
|
calvinchankf/GoogleKickStart
|
9c5fe0a2537c4efba79aa9352284a0b1e6e65445
|
dbcd259b756785ee2864915c22c5ee6abe270581
|
refs/heads/master
| 2023-02-12T14:05:59.787258
| 2023-02-11T16:19:50
| 2023-02-11T16:19:50
| 182,479,858
| 3
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 695
|
py
|
"""
math
Small Pass
Big Pass
"""
def f(nums, D):
rightMost = D
for i in range(len(nums)-1, -1, -1):
rightMost = nums[i] * (rightMost // nums[i])
return rightMost
# a = [3, 7, 2]
# print(f(a, 10))
# a = [11, 10, 5, 50]
# print(f(a, 100))
# a = [1,1]
# print(f(a, 1))
# input() reads a string with a line of input, stripping the ' ' (newline) at the end.
# This is all you need for most Code Jam problems.
t = int(raw_input()) # read a line with a single integer
for i in range(1, t + 1):
N, D = [int(s) for s in raw_input().split(" ")]
arr = [int(s) for s in raw_input().split(" ")]
res = f(arr, D)
print("Case #{}: {}".format(i, res))
|
[
"chan9118kin@gmail.com"
] |
chan9118kin@gmail.com
|
d3854df5c0cfad528f8a66d04ee219f3398b3031
|
f572e0a4b843ed3fd2cd8edec2ad3aab7a0019d3
|
/ows/wcs/v20/exceptions.py
|
8b2d542a5baad97f4eec81ecbf70515ef3aeeb76
|
[
"MIT"
] |
permissive
|
EOxServer/pyows
|
9039c8ed7358c98d736e2b8fd9f47be944f0b0a1
|
e09310f992d6e69088940e9b5dbd7302f697344b
|
refs/heads/master
| 2022-10-09T23:27:43.884159
| 2022-10-04T10:03:25
| 2022-10-04T10:03:25
| 218,005,699
| 1
| 1
| null | 2022-01-04T13:36:06
| 2019-10-28T09:01:51
|
Python
|
UTF-8
|
Python
| false
| false
| 3,719
|
py
|
# ------------------------------------------------------------------------------
#
# Project: pyows <http://eoxserver.org>
# Authors: Fabian Schindler <fabian.schindler@eox.at>
#
# ------------------------------------------------------------------------------
# Copyright (C) 2019 EOX IT Services GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies of this Software or works derived from this Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# ------------------------------------------------------------------------------
class InvalidSubsettingException(Exception):
"""
This exception indicates an invalid WCS 2.0 subsetting parameter was
submitted.
"""
code = "InvalidSubsetting"
locator = "subset"
class InvalidSubsettingCrsException(Exception):
"""
This exception indicates an invalid WCS 2.0 subsettingCrs parameter was
submitted.
"""
code = "SubsettingCrs-NotSupported"
locator = "subsettingCrs"
class InvalidOutputCrsException(Exception):
"""
This exception indicates an invalid WCS 2.0 outputCrs parameter was
submitted.
"""
code = "OutputCrs-NotSupported"
locator = "outputCrs"
class InvalidScaleFactorException(Exception):
""" Error in ScaleFactor and ScaleAxis operations
"""
code = "InvalidScaleFactor"
def __init__(self, scalefactor):
super().__init__(
"Scalefactor '%s' is not valid" % scalefactor
)
self.locator = scalefactor
class InvalidScaleExtentException(Exception):
""" Error in ScaleExtent operations
"""
code = "InvalidExtent"
def __init__(self, low, high):
super().__init__(
"ScaleExtent '%s:%s' is not valid" % (low, high)
)
self.locator = high
class NoSuchCoverageException(Exception):
""" This exception indicates that the requested coverage(s) do not
exist.
"""
code = "NoSuchCoverage"
# def __str__(self):
# return "No such Coverage%s with ID: %s" % (
# "" if len(self.items) == 1 else "s",
# ", ".join(map(lambda i: "'%s'" % i, self.items))
# )
class NoSuchDatasetSeriesOrCoverageException(Exception):
""" This exception indicates that the requested coverage(s) or dataset
series do not exist.
"""
code = "NoSuchDatasetSeriesOrCoverage"
# def __str__(self):
# return "No such Coverage%s or Dataset Series with ID: %s" % (
# " " if len(self.items) == 1 else "s",
# ", ".join(map(lambda i: "'%s'" % i, self.items))
# )
class InterpolationMethodNotSupportedException(Exception):
"""
This exception indicates a not supported interpolation method.
"""
code = "InterpolationMethodNotSupported"
locator = "interpolation"
|
[
"fabian.schindler.strauss@gmail.com"
] |
fabian.schindler.strauss@gmail.com
|
fb90ef3f04640975faca0418775ddddf8837d8db
|
187c27b5d1255f3f08ec87a4bb51cc4056d8e2da
|
/agents/views.py
|
27ef869b7b3b066d67c3ea077e6f104ce339b5d1
|
[] |
no_license
|
LKingJ23/Django-Hotel-Site
|
1f589154d9475731d015c823e83815292b962e11
|
49b1bef7425ff63bf8ec3e178629ccc2bab45c1f
|
refs/heads/master
| 2020-06-14T20:22:17.310187
| 2019-07-03T19:41:30
| 2019-07-03T19:41:30
| 195,115,416
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 268
|
py
|
from django.shortcuts import render
from .models import Agent
# Create your views here.
def agents_list(request):
agent_list = Agent.objects.all()
template = 'agents/agents.html'
context = {
'agent_list': agent_list
}
return render(request, template, context)
|
[
"lkingj23@gmail.com"
] |
lkingj23@gmail.com
|
bc29b15eabbd22e4af5e1f1da7d7846fe6fa794d
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_387/ch136_2020_04_01_12_06_52_438302.py
|
5a78febe80669c25bdb2163ef7afb2a5b120d898
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,151
|
py
|
import random as rnd
def dicas(soma):
num1 = int(input('Primeiro número: '))
num2 = int(input('Segundo número: '))
num3 = int(input('Terceiro número: '))
nums = [num1, num2, num3]
if soma in nums:
return("Está entre os 3")
else:
return("Não está entre os 3")
dado1 = rnd.randint(1,6)
dado2 = rnd.randint(1,6)
dado3 = rnd.randint(1,6)
soma = dado1 + dado2 + dado3
dinheiro = 10
fase_dicas = True
while fase_dicas:
print(f'Você possui {dinheiro} dinheiros')
if dinheiro <= 0:
break
quer_dica = input('Você quer uma dica? (cada dica custa 1 dinheiro) (sim/não)')
if quer_dica == 'sim':
print(dicas(soma))
dinheiro-=1
elif quer_dica == 'não':
fase_dicas = False
while not fase_dicas:
print(f'Você possui {dinheiro} dinheiros')
if dinheiro <= 0:
break
else:
chute = int(input('Chute um número: '))
if chute == soma:
dinheiro*=6
break
else:
dinheiro-=1
if dinheiro > 0:
print(f"Você ganhou o jogo com {dinheiro} dinheiros!")
else:
print("Você perdeu!")
|
[
"you@example.com"
] |
you@example.com
|
75f729bc861f9e09e111d25926334f32e7145824
|
099deeb2c308bdc00a2c423743e4b2aacdac866c
|
/week1/searching_lyrics/youngyun.py
|
ba410784e8f79a376a70247b82107a7c61ea8364
|
[] |
no_license
|
Joonsun-Hwang/coding-test-study-lamda
|
76fed2f18a3220f6731775984425dff49b4379eb
|
0632ec9dd60024203ed10ebeab07aa7da4782806
|
refs/heads/main
| 2023-05-01T21:31:48.174944
| 2021-05-05T09:48:23
| 2021-05-05T09:48:23
| 329,205,708
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,163
|
py
|
# 지나치게 코드가 길며 시간복잡도도 상당히 좋지 않음 글자를 하나하나 맞춰보는 경우 O(n^3)인 경우도 생성됨
def solution(words, queries):
answer = []
print(words)
for query in queries:
match = 0
for word in words:
flag = True
j = 0
i = 0
if len(word) != len(query): # 애초에 길이가 다르면 비교 가치 없음
continue
else:
if query[0] == "?": # 처음엔 ?
while query[i] == "?": # 언제까지 ? 인가?
i += 1
for idx in range(i, len(query)):
if query[idx] != word[idx]:
flag = False
if flag == True:
match += 1
else: # 처음엔 문자
while query[i] != "?":
i += 1
while word[j] == query[j]:
j += 1
if i == j:
match += 1
answer.append(match)
return answer
|
[
"fightnyy@naver.com"
] |
fightnyy@naver.com
|
e51ebc17d32c53b6a368d0019d094b269df2bf23
|
e44d77c4d22a9cd0aa0a536d6d4e522359eedc81
|
/trialscompendium/trials/api/urls.py
|
eb740a50c49131d2158a35228430de7d7ed74501
|
[
"MIT"
] |
permissive
|
nkoech/trialscompendium
|
cfd3f65a88a32e3f71bc27cba6a4ae777e319af4
|
9cd2c28a22957b84d97d87eb6b7b9b1b8616bacb
|
refs/heads/master
| 2021-01-01T04:54:18.162744
| 2018-04-17T15:40:05
| 2018-04-17T15:40:05
| 97,270,861
| 2
| 2
|
MIT
| 2018-02-15T09:48:23
| 2017-07-14T20:36:19
|
Python
|
UTF-8
|
Python
| false
| false
| 1,414
|
py
|
from django.conf.urls import url
from .views import (
plot_views,
trial_yield_views,
treatment_views,
)
# Treatment URLs
urlpatterns = [
url(
r'^treatment/$',
treatment_views['TreatmentListAPIView'].as_view(),
name='treatment_list'
),
url(
r'^treatment/create/$',
treatment_views['TreatmentCreateAPIView'].as_view(),
name='treatment_create'
),
url(
r'^treatment/(?P<pk>[\w-]+)/$',
treatment_views['TreatmentDetailAPIView'].as_view(),
name='treatment_detail'
),
]
# Trial Yield URLs
urlpatterns += [
url(
r'^yield/$',
trial_yield_views['TrialYieldListAPIView'].as_view(),
name='trial_yield_list'
),
url(
r'^yield/create/$',
trial_yield_views['TrialYieldCreateAPIView'].as_view(),
name='trial_yield_create'
),
url(
r'^yield/(?P<pk>[\w-]+)/$',
trial_yield_views['TrialYieldDetailAPIView'].as_view(),
name='trial_yield_detail'
),
]
# Plot URLs
urlpatterns += [
url(
r'^$',
plot_views['PlotListAPIView'].as_view(),
name='plot_list'
),
url(
r'^create/$',
plot_views['PlotCreateAPIView'].as_view(),
name='plot_create'
),
url(
r'^(?P<slug>[\w-]+)/$',
plot_views['PlotDetailAPIView'].as_view(),
name='plot_detail'
),
]
|
[
"koechnicholas@gmail.com"
] |
koechnicholas@gmail.com
|
aec98a6c7f1944baa6fd27a6ecf9f0ca61fb45e8
|
cfa632132cd29a0b58e7f45b441ea4f62b0f5eba
|
/flytekit/models/admin/workflow.py
|
91e82252b40cf9f189066fa756e314dbf9e671ad
|
[
"Apache-2.0"
] |
permissive
|
chixcode/flytekit
|
5b4f2e687e82a0d6527411afcdaf0929a94adb13
|
f901aee721847c6264d44079d4fa31a75b8876e1
|
refs/heads/master
| 2020-08-24T00:06:02.808187
| 2019-10-14T18:34:19
| 2019-10-14T18:34:19
| 216,729,272
| 1
| 0
|
Apache-2.0
| 2019-10-22T05:22:01
| 2019-10-22T05:22:00
| null |
UTF-8
|
Python
| false
| false
| 3,275
|
py
|
from __future__ import absolute_import
from flytekit.models import common as _common
from flytekit.models.core import compiler as _compiler_models, identifier as _identifier
from flyteidl.admin import workflow_pb2 as _admin_workflow
class WorkflowSpec(_common.FlyteIdlEntity):
def __init__(self, template):
"""
This object fully encapsulates the specification of a workflow
:param flytekit.models.core.workflow.WorkflowTemplate template:
"""
self._template = template
@property
def template(self):
"""
:rtype: flytekit.models.core.workflow.WorkflowTemplate.WorkflowTemplate
"""
return self._template
def to_flyte_idl(self):
"""
:rtype: flyteidl.admin.workflow_pb2.WorkflowSpec
"""
return _admin_workflow.WorkflowSpec(
template=self._template.to_flyte_idl()
)
@classmethod
def from_flyte_idl(cls, pb2_object):
"""
:param pb2_object: flyteidl.admin.workflow_pb2.WorkflowSpec
:rtype: WorkflowSpec
"""
return cls(WorkflowSpec.from_flyte_idl(pb2_object.template))
class Workflow(_common.FlyteIdlEntity):
def __init__(
self,
id,
closure
):
"""
:param flytekit.models.core.identifier.Identifier id:
:param WorkflowClosure closure:
"""
self._id = id
self._closure = closure
@property
def id(self):
"""
:rtype: flytekit.models.core.identifier.Identifier
"""
return self._id
@property
def closure(self):
"""
:rtype: WorkflowClosure
"""
return self._closure
def to_flyte_idl(self):
"""
:rtype: flyteidl.admin.workflow_pb2.Workflow
"""
return _admin_workflow.Workflow(
id=self.id.to_flyte_idl(),
closure=self.closure.to_flyte_idl()
)
@classmethod
def from_flyte_idl(cls, pb2_object):
"""
:param flyteidl.admin.workflow_pb2.Workflow pb2_object:
:return: Workflow
"""
return cls(
id=_identifier.Identifier.from_flyte_idl(pb2_object.id),
closure=WorkflowClosure.from_flyte_idl(pb2_object.closure)
)
class WorkflowClosure(_common.FlyteIdlEntity):
def __init__(self, compiled_workflow):
"""
:param flytekit.models.core.compiler.CompiledWorkflowClosure compiled_workflow:
"""
self._compiled_workflow = compiled_workflow
@property
def compiled_workflow(self):
"""
:rtype: flytekit.models.core.compiler.CompiledWorkflowClosure
"""
return self._compiled_workflow
def to_flyte_idl(self):
"""
:rtype: flyteidl.admin.workflow_pb2.WorkflowClosure
"""
return _admin_workflow.WorkflowClosure(
compiled_workflow=self.compiled_workflow.to_flyte_idl()
)
@classmethod
def from_flyte_idl(cls, p):
"""
:param flyteidl.admin.workflow_pb2.WorkflowClosure p:
:rtype: WorkflowClosure
"""
return cls(
compiled_workflow=_compiler_models.CompiledWorkflowClosure.from_flyte_idl(p.compiled_workflow)
)
|
[
"matthewsmith@lyft.com"
] |
matthewsmith@lyft.com
|
b3afe55fffeda671f0363abb51fa4c1e66106c94
|
79ed3f72555aad8548634f523f775f34cfe166e7
|
/catch/datasets/hag1.py
|
fdae6eb661d5e4cfaa9398d0e382898e7a03c753
|
[
"MIT"
] |
permissive
|
John-Bioinfo/catch
|
a2ab188ed598767e7759f74227f24af2b284b379
|
fe63b86bc41396c1da0b449ac440c6ae9e52b2c5
|
refs/heads/master
| 2020-03-18T09:29:10.315733
| 2018-04-17T18:36:47
| 2018-04-17T18:36:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 451
|
py
|
"""Dataset with 'Human associated gemyvongvirus 1' sequences.
A dataset with 2 'Human associated gemyvongvirus 1' genomes.
THIS PYTHON FILE WAS GENERATED BY A COMPUTER PROGRAM! DO NOT EDIT!
"""
import sys
from catch.datasets import GenomesDatasetSingleChrom
__author__ = 'Hayden Metsky <hayden@mit.edu>'
ds = GenomesDatasetSingleChrom(__name__, __file__, __spec__)
ds.add_fasta_path("data/hag1.fasta", relative=True)
sys.modules[__name__] = ds
|
[
"hmetsky@gmail.com"
] |
hmetsky@gmail.com
|
1a9adb679879e2543ec9b2ab25ece1e16b986fb3
|
d569476dd95496339c34b231621ff1f5dfd7fe49
|
/PyTest/SampleWebsite/tests/Pages/FeedbackForm.py
|
9cd2c00d4dbc8431b3b190dfb5b56ffb0c4b290f
|
[] |
no_license
|
monteua/Tests
|
10f21f9bae027ce1763c73e2ea7edaf436140eae
|
553e5f644466683046ea180422727ccb37967b98
|
refs/heads/master
| 2021-01-23T10:28:49.654273
| 2018-05-09T09:11:30
| 2018-05-09T09:11:30
| 93,061,159
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,914
|
py
|
from selenium.webdriver.common.by import By
from selenium.webdriver.common.action_chains import ActionChains
from selenium.common.exceptions import NoSuchElementException
#page URL
base_url = "CLASSIFIED"
# page locators
page_logo = "logo"
company_moto = "//*[@class='col-xs-8 align-center']//p"
company_name = "//*[@class='col-xs-8 align-center']//h1"
mailing_list = "//*[@class='pointer']//span"
mailing_list_popup = "modal-content"
mailing_list_headline = "myModalLabel"
class FeedbackForm(object):
def __init__(self, driver):
self.driver = driver
def open_page(self):
self.driver.get(base_url)
# checking the position of the company logo
def check_logo(self):
logo = self.driver.find_element(By.CLASS_NAME, page_logo)
logo_position = [logo.value_of_css_property("text-align"), logo.value_of_css_property("padding-left")]
return logo_position
# checking the css attributes of company moto
def check_moto(self):
moto = self.driver.find_element(By.XPATH, company_moto)
font_size = moto.value_of_css_property("font-size")
font_name = moto.value_of_css_property("font-family").split(",")[0]
font_style = moto.value_of_css_property("font-style")
text_centered = moto.value_of_css_property("text-align")
return [font_size, font_name, font_style, text_centered]
# checking the css attributes of company name
def check_company_name(self):
name = self.driver.find_element(By.XPATH, company_name)
font_size = name.value_of_css_property("font-size")
font_name = name.value_of_css_property("font-family").split(",")[0]
text_centered = name.value_of_css_property("text-align")
return [font_size, font_name, text_centered]
# checking the css attributes of mailing list button
def check_attr_of_mailing_list(self):
mailing_list_btn = self.driver.find_element(By.XPATH, mailing_list)
btn_text = mailing_list_btn.text
font_size = mailing_list_btn.value_of_css_property("font-size")
font_name = mailing_list_btn.value_of_css_property("font-family").split(",")[0]
#move the mouse pointer to see if text turns yellow
start_color = mailing_list_btn.value_of_css_property("color")
ActionChains(self.driver).move_to_element(mailing_list_btn).perform()
color_changed = mailing_list_btn.value_of_css_property("color")
return [btn_text, font_size, font_name, start_color, color_changed]
# checking if the Mailing List button opens pop-up
def check_mailing_list_popup(self):
mailing_list_btn = self.driver.find_element(By.XPATH, mailing_list)
mailing_list_btn.click()
try:
pop_up = self.driver.find_element(By.CLASS_NAME, mailing_list_popup)
if pop_up.is_displayed:
return True
else:
return False
except NoSuchElementException:
return False
# checking if the headline in mailing list matches desired one
def check_mailing_list_headline(self):
mailing_list_btn = self.driver.find_element(By.XPATH, mailing_list)
mailing_list_btn.click()
return self.driver.find_element(By.ID, mailing_list_headline)
# checking the attributes ofthe mailing list headline
def check_attr_of_mailing_list_headline(self):
mailing_list_btn = self.driver.find_element(By.XPATH, mailing_list)
mailing_list_btn.click()
headline = self.driver.find_element(By.ID, mailing_list_headline)
size = headline.value_of_css_property("font-size")
font_family = headline.value_of_css_property("font-family").split(",")[0]
is_centered = headline.value_of_css_property("text-align")
if is_centered == "center":
return [size, font_family, True]
return [size, font_family, False]
|
[
"arximed.monte@gmail.com"
] |
arximed.monte@gmail.com
|
0f4aaf1913e904c2aa767b8653cd38ce34ff30bf
|
18eef6419da5721139df45b92c7557dbfa654347
|
/apps/users/migrations/0004_auto_20180529_2258.py
|
40546c416a804fa94b82a01a3ea68690b306a82b
|
[] |
no_license
|
taoing/pymx
|
a36989f805c760b535954a0056dcbd8180b32ea3
|
3f24b8d417f1c1896cab73c623ab9774807f520d
|
refs/heads/master
| 2020-03-18T20:58:37.435126
| 2018-06-09T15:16:49
| 2018-06-09T15:16:49
| 135,250,436
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 583
|
py
|
# Generated by Django 2.0.3 on 2018-05-29 22:58
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0003_auto_20180529_1658'),
]
operations = [
migrations.AlterField(
model_name='user',
name='address',
field=models.CharField(blank=True, max_length=100, null=True),
),
migrations.AlterField(
model_name='user',
name='nickname',
field=models.CharField(blank=True, max_length=50, null=True),
),
]
|
[
"="
] |
=
|
762bbd897e6f2e90c32606aa1aa9c01022761f67
|
27a4eaa9b7052d3d65dd6c63952ac3055c73fbf0
|
/ztst/tweet_100th.py
|
1d0d4b8246f3b8f2602b011ce8729e61c5b6ef7b
|
[] |
no_license
|
tobby2002/vec4ir
|
84f4695dc0a58f6cca6aff3c970167893a5d8c68
|
8acd1e91cd9eb0a8625bdf18cc3c37d2b7cc2a44
|
refs/heads/master
| 2021-06-19T20:42:15.913189
| 2021-03-21T10:31:35
| 2021-03-21T10:31:35
| 190,150,202
| 0
| 0
| null | 2019-06-04T07:19:09
| 2019-06-04T07:19:09
| null |
UTF-8
|
Python
| false
| false
| 1,135
|
py
|
from sklearn.feature_extraction.text import TfidfVectorizer
words = ['the cat sat on the mat cat', 'the fat rat sat on a mat', 'the bat and a rat sat on a mat']
tfidf_vectorizer = TfidfVectorizer(min_df=1, use_idf=True)
tfidf_matrix = tfidf_vectorizer.fit_transform(words)
terms_name = tfidf_vectorizer.get_feature_names()
toarry=tfidf_matrix.todense()
for i in tfidf_matrix.toarray():
print(zip(terms_name, i))
print('i:', i)
# [(u'and', 0.0), (u'bat', 0.0), (u'cat', 0.78800079617844954), (u'fat', 0.0), (u'mat', 0.23270298212286766), (u'on', 0.23270298212286766), (u'rat', 0.0), (u'sat', 0.23270298212286766), (u'the', 0.46540596424573533)]
# [(u'and', 0.0), (u'bat', 0.0), (u'cat', 0.0), (u'fat', 0.57989687146162439), (u'mat', 0.34249643393071422), (u'on', 0.34249643393071422), (u'rat', 0.44102651785124652), (u'sat', 0.34249643393071422), (u'the', 0.34249643393071422)]
# [(u'and', 0.50165133177159349), (u'bat', 0.50165133177159349), (u'cat', 0.0), (u'fat', 0.0), (u'mat', 0.29628335772067432), (u'on', 0.29628335772067432), (u'rat', 0.38151876810273028), (u'sat', 0.29628335772067432), (u'the', 0.29628335772067432)]
|
[
"tobby2002@gmailcom"
] |
tobby2002@gmailcom
|
f0ac9ae13d2122d7c283b67e7d26a9210686fa38
|
b08d42933ac06045905d7c005ca9c114ed3aecc0
|
/src/coefSubset/evaluate/ranks/fiftyPercent/rank_4q5e_G.py
|
afe9e17d1c84d179371c4fcf47787fee1df0ea11
|
[] |
no_license
|
TanemuraKiyoto/PPI-native-detection-via-LR
|
d148d53f5eb60a4dda5318b371a3048e3f662725
|
897e7188b0da94e87126a4acc0c9a6ff44a64574
|
refs/heads/master
| 2022-12-05T11:59:01.014309
| 2020-08-10T00:41:17
| 2020-08-10T00:41:17
| 225,272,083
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,390
|
py
|
# 9 July 2019
# Kiyoto Aramis Tanemura
# Several metrics are used to assess the performance of the trained RF model, notably native ranking. This script returns a ranking of the native protein-protein complex among a decoy set. For convenience, I will define as a function and will call in a general performance assessment script.
# Modified 11 July 2019 by Kiyoto Aramis Tanemura. To parallelize the process, I will replace the for loop for the testFileList to a multiprocessing pool.
# Modified 9 September 2019 by Kiyoto Aramis Tanemura. I will use the function to perform the calculation on one CSV file only. Thus instead of a function to import in other scripts, they will be individual jobs parallelized as individual jobs in the queue.
import os
import pandas as pd
import numpy as np
import pickle
os.chdir('/mnt/scratch/tanemur1/')
# Read the model and trainFile
testFile = '4q5e.csv'
identifier = 'G'
coefFrac = 0.5
testFilePath = '/mnt/scratch/tanemur1/CASF-PPI/nonb_descriptors/complete/'
modelPath = '/mnt/home/tanemur1/6May2019/2019-11-11/results/coefSubset/fiftyPercent/'
outputPath = '/mnt/home/tanemur1/6May2019/2019-11-11/results/coefSubset/evaluate/fiftyPercent/ranks/'
pdbID = testFile[:4]
with open(modelPath + 'model' + identifier + '.pkl', 'rb') as f:
clf = pickle.load(f)
result = pd.DataFrame()
scoreList = []
df1 = pd.read_csv(testFilePath + testFile)
dropList = ['Unnamed: 0', 'Unnamed: 0.1', 'ref']
df1 = df1.drop(dropList, axis = 1)
df1 = df1.set_index('Pair_name')
df1 = pd.DataFrame(df1.values.T, columns = df1.index, index = df1.columns)
df1.fillna(0.0, inplace = True)
#df1 = df1.reindex(sorted(df1.columns), axis = 1)
# Keep coefficients within the given fraction when ordered by decreasing order of coefficient magnitude
coefs = pd.read_csv('/mnt/home/tanemur1/6May2019/2019-11-11/results/medianCoefs.csv', index_col = 0, header = None, names = ['coefficients'])
coefs['absVal'] = np.abs(coefs['coefficients'])
coefs.sort_values(by = 'absVal', ascending = False, inplace = True)
coefs = coefs[:int(14028 * coefFrac + 0.5)]
keepList = list(coefs.index)
del coefs
df1 = df1[keepList]
df1 = df1.reindex(sorted(df1.columns), axis = 1)
with open(modelPath + 'standardScaler' + identifier + '.pkl', 'rb') as g:
scaler = pickle.load(g)
for i in range(len(df1)):
# subtract from one row each row of the dataframe, then remove the trivial row[[i]] - row[[i]]. Also some input files have 'class' column. This is erroneous and is removed.
df2 = pd.DataFrame(df1.iloc[[i]].values - df1.values, index = df1.index, columns = df1.columns)
df2 = df2.drop(df1.iloc[[i]].index[0], axis = 0)
# Standardize inut DF using the standard scaler used for training data.
df2 = scaler.transform(df2)
# Predict class of each comparison descriptor and sum the classes to obtain score. Higher score corresponds to more native-like complex
predictions = clf.predict(df2)
score = sum(predictions)
scoreList.append(score)
# Make a new DataFrame to store the score and corresponding descriptorID. Add rank as column. Note: lower rank corresponds to more native-like complex
result = pd.DataFrame(data = {'score': scoreList}, index = df1.index.tolist()).sort_values(by = 'score', ascending = False)
result['rank'] = range(1, len(result) + 1)
with open(outputPath + pdbID + identifier + '.csv', 'w') as h:
result.to_csv(h)
|
[
"tanemur1@msu.edu"
] |
tanemur1@msu.edu
|
396e5b33aa1c7bb304b0785bb82b1573c352d6c1
|
5598c24e484177d6a5b0772013247e3118c6dcf1
|
/spatialmath/base/__init__.py
|
8546e1251ff85aa70bc62e59f7205d645f82cc31
|
[
"MIT"
] |
permissive
|
krenshaw2018/spatialmath-python
|
587136040267984cd6a431eaae3e89c8d740a7f9
|
95629b378422d58a4c77c62ebf3b189ef7a42824
|
refs/heads/master
| 2023-03-28T21:14:03.992328
| 2021-04-06T10:36:53
| 2021-04-06T10:36:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,058
|
py
|
# Part of Spatial Math Toolbox for Python
# Copyright (c) 2000 Peter Corke
# MIT Licence, see details in top-level file: LICENCE
from spatialmath.base.argcheck import * # lgtm [py/polluting-import]
from spatialmath.base.quaternions import * # lgtm [py/polluting-import]
from spatialmath.base.transforms2d import * # lgtm [py/polluting-import]
from spatialmath.base.transforms3d import * # lgtm [py/polluting-import]
from spatialmath.base.transformsNd import * # lgtm [py/polluting-import]
from spatialmath.base.vectors import * # lgtm [py/polluting-import]
from spatialmath.base.symbolic import * # lgtm [py/polluting-import]
from spatialmath.base.animate import * # lgtm [py/polluting-import]
from spatialmath.base.graphics import * # lgtm [py/polluting-import]
from spatialmath.base.numeric import * # lgtm [py/polluting-import]
__all__ = [
# spatialmath.base.argcheck
'assertmatrix',
'ismatrix',
'getvector',
'assertvector',
'isvector',
'isscalar',
'getunit',
'isnumberlist',
'isvectorlist',
# spatialmath.base.quaternions
'pure',
'qnorm',
'unit',
'isunit',
'isequal',
'q2v',
'v2q',
'qqmul',
'inner',
'qvmul',
'vvmul',
'qpow',
'conj',
'q2r',
'r2q',
'slerp',
'rand',
'matrix',
'dot',
'dotb',
'angle',
'qprint',
# spatialmath.base.transforms2d
'rot2',
'trot2',
'transl2',
'ishom2',
'isrot2',
'trlog2',
'trexp2',
'tr2jac2',
'trinterp2',
'trprint2',
'trplot2',
'tranimate2',
'xyt2tr',
'tr2xyt',
'trinv2',
# spatialmath.base.transforms3d
'rotx',
'roty',
'rotz',
'trotx',
'troty',
'trotz',
'transl',
'ishom',
'isrot',
'rpy2r',
'rpy2tr',
'eul2r',
'eul2tr',
'angvec2r',
'angvec2tr',
'oa2r',
'oa2tr',
'tr2angvec',
'tr2eul',
'tr2rpy',
'trlog',
'trexp',
'trnorm',
'trinterp',
'delta2tr',
'trinv',
'tr2delta',
'tr2jac',
'rpy2jac',
'eul2jac',
'exp2jac',
'rot2jac',
'trprint',
'trplot',
'tranimate',
# spatialmath.base.transformsNd
't2r',
'r2t',
'tr2rt',
'rt2tr',
'Ab2M',
'isR',
'isskew',
'isskewa',
'iseye',
'skew',
'vex',
'skewa',
'vexa',
'h2e',
'e2h',
'homtrans',
'rodrigues',
# spatialmath.base.vectors
'colvec',
'unitvec',
'unitvec_norm',
'norm',
'normsq',
'isunitvec',
'iszerovec',
'isunittwist',
'isunittwist2',
'unittwist',
'unittwist_norm',
'unittwist2',
'angdiff',
'removesmall',
'cross',
'iszero',
# spatialmath.base.animate
'Animate',
'Animate2',
#spatial.base.graphics
'plotvol2',
'plotvol3',
'plot_point',
'plot_text',
'plot_box',
'circle',
'ellipse',
'sphere',
'ellipsoid',
'plot_circle',
'plot_ellipse',
'plot_sphere',
'plot_ellipsoid',
'isnotebook',
# spatial.base.numeric
'numjac',
]
|
[
"peter.i.corke@gmail.com"
] |
peter.i.corke@gmail.com
|
482a8b9237a2c8bf0e2469c9062327966a46c836
|
ccc407c900ac36c57f2716adcdd28f38108d62ef
|
/models.py
|
a2cb7960d1b4b75f3ca645404af912b5cd09e2ce
|
[] |
no_license
|
uditduhan23/YT_FastAPI_Celery_Redis_Flower_Introduction
|
d1b3f2f361ae0dc11be39eded78ccde45724b45e
|
124b35e3af08330cd89f3658906ec54b2f623b46
|
refs/heads/main
| 2023-06-16T23:30:36.456816
| 2021-07-16T14:42:16
| 2021-07-16T14:42:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 561
|
py
|
from sqlalchemy import Column, DateTime, Integer, String
from sqlalchemy.sql import func
from db_conf import Base
class Post(Base):
__tablename__ = "post"
id = Column(Integer, primary_key=True, index=True)
title = Column(String)
author = Column(String)
content = Column(String)
time_created = Column(DateTime(timezone=True), server_default=func.now())
class User(Base):
__tablename__ = "user"
id = Column(Integer, primary_key=True, index=True)
username = Column(String, unique=True)
password = Column(String(255))
|
[
"veryacademy@gmail.com"
] |
veryacademy@gmail.com
|
9eb3b5e4e4cdd96a839e5c24b404869c18d5e9ee
|
480e33f95eec2e471c563d4c0661784c92396368
|
/GeneratorInterface/GenFilters/test/test_isotrack_cfg.py
|
af5d058a84dc0c93779710cc9bfda12f465b9313
|
[
"Apache-2.0"
] |
permissive
|
cms-nanoAOD/cmssw
|
4d836e5b76ae5075c232de5e062d286e2026e8bd
|
4eccb8a758b605875003124dd55ea58552b86af1
|
refs/heads/master-cmsswmaster
| 2021-01-23T21:19:52.295420
| 2020-08-27T08:01:20
| 2020-08-27T08:01:20
| 102,867,729
| 7
| 14
|
Apache-2.0
| 2022-05-23T07:58:09
| 2017-09-08T14:03:57
|
C++
|
UTF-8
|
Python
| false
| false
| 1,710
|
py
|
import FWCore.ParameterSet.Config as cms
process = cms.Process("TEST")
process.load("FWCore.Framework.test.cmsExceptionsFatal_cff")
process.load("Configuration.StandardSequences.Services_cff")
process.load("SimGeneral.HepPDTESSource.pythiapdt_cfi")
process.load("Configuration.EventContent.EventContent_cff")
process.load("Configuration.Generator.QCDForPF_14TeV_TuneCUETP8M1_cfi")
process.RandomNumberGeneratorService = cms.Service("RandomNumberGeneratorService",
generator = cms.PSet(
initialSeed = cms.untracked.uint32(123456789),
engineName = cms.untracked.string('HepJamesRandom')
)
)
# The following three lines reduce the clutter of repeated printouts
# of the same exception message.
process.load("FWCore.MessageLogger.MessageLogger_cfi")
process.MessageLogger.categories.append('PythiaFilter')
process.maxEvents = cms.untracked.PSet(input = cms.untracked.int32(1000))
process.source = cms.Source("EmptySource")
process.load("GeneratorInterface.GenFilters.PythiaFilterIsolatedTrack_cfi")
process.GEN = cms.OutputModule("PoolOutputModule",
fileName = cms.untracked.string('QCD14TeVIsoTrack.root'),
SelectEvents = cms.untracked.PSet(
SelectEvents = cms.vstring('p')
),
dataset = cms.untracked.PSet(
dataTier = cms.untracked.string('GEN')
)
)
process.isotrack_filter.minSeedEta = 2.0
process.isotrack_filter.maxSeedEta = 3.0
process.p = cms.Path(process.generator * process.isotrack_filter)
process.outpath = cms.EndPath(process.GEN)
process.schedule = cms.Schedule(process.p, process.outpath)
|
[
"sunanda.banerjee@cern.ch"
] |
sunanda.banerjee@cern.ch
|
d46c7e58abc3648273a301b1ff3f9c4f7e2b1214
|
2c0e8fa208f59fb3d23c7257cb9cff426fa16d85
|
/ga4gh/backend.py
|
2fdefbd883323811fd1ec9d7abe5a4cb4bec5766
|
[
"Apache-2.0"
] |
permissive
|
melaniedc/server
|
db24c37bf0e778630d0910a1101eeb908647b02e
|
9fe974e421d2e4d3510e7928053edbbce47fd4bb
|
refs/heads/master
| 2020-02-26T16:09:18.162175
| 2015-01-30T14:52:46
| 2015-01-30T14:52:46
| 26,870,317
| 0
| 0
| null | 2014-12-05T20:26:25
| 2014-11-19T16:28:39
|
Python
|
UTF-8
|
Python
| false
| false
| 6,006
|
py
|
"""
Module responsible for handling protocol requests and returning
responses.
"""
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import ga4gh.protocol as protocol
class Backend(object):
"""
The GA4GH backend. This class provides methods for all of the GA4GH
protocol end points.
"""
def __init__(self, dataDir, variantSetClass):
self._dataDir = dataDir
self._variantSetIdMap = {}
# All directories in datadir are assumed to correspond to VariantSets.
for variantSetId in os.listdir(self._dataDir):
relativePath = os.path.join(self._dataDir, variantSetId)
if os.path.isdir(relativePath):
self._variantSetIdMap[variantSetId] = variantSetClass(
variantSetId, relativePath)
self._variantSetIds = sorted(self._variantSetIdMap.keys())
def parsePageToken(self, pageToken, numValues):
"""
Parses the specified pageToken and returns a list of the specified
number of values. Page tokens are assumed to consist of a fixed
number of integers seperated by colons. If the page token does
not conform to this specification, raise a InvalidPageToken
exception.
"""
tokens = pageToken.split(":")
# TODO define exceptions.InvalidPageToken and raise here.
if len(tokens) != numValues:
raise Exception("Invalid number of values in page token")
# TODO catch a ValueError here when bad integers are passed and
# convert this into the appropriate InvalidPageToken exception.
values = map(int, tokens)
return values
def runSearchRequest(
self, requestStr, requestClass, responseClass, pageListName,
objectGenerator):
"""
Runs the specified request. The request is a string containing
a JSON representation of an instance of the specified requestClass
in which the page list variable has the specified pageListName.
We return a string representation of an instance of the specified
responseClass in JSON format. Objects are filled into the page list
using the specified object generator, which must return
(object, nextPageToken) pairs, and be able to resume iteration from
any point using the nextPageToken attribute of the request object.
"""
# TODO change this to fromJSONDict and validate
request = requestClass.fromJSONString(requestStr)
pageList = []
nextPageToken = None
for obj, nextPageToken in objectGenerator(request):
pageList.append(obj)
if len(pageList) >= request.pageSize:
break
response = responseClass()
response.nextPageToken = nextPageToken
setattr(response, pageListName, pageList)
return response.toJSONString()
def searchVariantSets(self, request):
"""
Returns a GASearchVariantSetsResponse for the specified
GASearchVariantSetsRequest object.
"""
return self.runSearchRequest(
request, protocol.GASearchVariantSetsRequest,
protocol.GASearchVariantSetsResponse, "variantSets",
self.variantSetsGenerator)
def searchVariants(self, request):
"""
Returns a GASearchVariantsResponse for the specified
GASearchVariantsRequest object.
"""
return self.runSearchRequest(
request, protocol.GASearchVariantsRequest,
protocol.GASearchVariantsResponse, "variants",
self.variantsGenerator)
def variantSetsGenerator(self, request):
"""
Returns a generator over the (variantSet, nextPageToken) pairs defined
by the speficied request.
"""
currentIndex = 0
if request.pageToken is not None:
currentIndex, = self.parsePageToken(request.pageToken, 1)
while currentIndex < len(self._variantSetIds):
variantSet = protocol.GAVariantSet()
variantSet.id = self._variantSetIds[currentIndex]
variantSet.datasetId = "NotImplemented"
variantSet.metadata = self._variantSetIdMap[
variantSet.id].getMetadata()
currentIndex += 1
nextPageToken = None
if currentIndex < len(self._variantSetIds):
nextPageToken = str(currentIndex)
yield variantSet, nextPageToken
def variantsGenerator(self, request):
"""
Returns a generator over the (variant, nextPageToken) pairs defined by
the specified request.
"""
variantSetIds = request.variantSetIds
startVariantSetIndex = 0
startPosition = request.start
if request.pageToken is not None:
startVariantSetIndex, startPosition = self.parsePageToken(
request.pageToken, 2)
for variantSetIndex in range(startVariantSetIndex, len(variantSetIds)):
variantSetId = variantSetIds[variantSetIndex]
if variantSetId in self._variantSetIdMap:
variantSet = self._variantSetIdMap[variantSetId]
iterator = variantSet.getVariants(
request.referenceName, startPosition, request.end,
request.variantName, request.callSetIds)
for variant in iterator:
nextPageToken = "{0}:{1}".format(
variantSetIndex, variant.start + 1)
yield variant, nextPageToken
class MockBackend(Backend):
"""
A mock Backend class for testing.
"""
def __init__(self, dataDir=None):
# TODO make a superclass of backend that does this
# automatically without needing to know about the internal
# details of the backend.
self._dataDir = None
self._variantSetIdMap = {}
self._variantSetIds = []
|
[
"jk@well.ox.ac.uk"
] |
jk@well.ox.ac.uk
|
d06f9c02053dbaa64d0db28cc52b7690a08a2160
|
adc6d8ee596e4710c3241332758bb6990bdd8914
|
/subData/ejemplos ROI/ROI_V3.py
|
48e1600ee1a3a5b4fbd7593a1e7397eb9abe764a
|
[] |
no_license
|
NatalyTinoco/Trabajo-de-grado_Artefactos
|
cf9491c47a8a23ce5bab7c52498093a61319f834
|
5cc4e009f94c871c7ed0d820eb113398ac66ec2f
|
refs/heads/master
| 2022-03-20T00:51:48.420253
| 2019-11-24T19:10:40
| 2019-11-24T19:10:40
| 197,964,659
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,225
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 15 22:32:38 2019
@author: Nataly
"""
import cv2
import pylab as plt
from matplotlib import pyplot as plt
import numpy as np
from pylab import *
from readimg import read_img #leer imagines ### img=read_img(imgfile)##
from skimage.morphology import disk
from skimage.filters import threshold_otsu
from skimage.filters import threshold_li
from skimage.filters import threshold_minimum
from skimage.filters import threshold_triangle
from skimage.filters import try_all_threshold
from skimage.filters import threshold_otsu, threshold_local
from PIL import ImageEnhance
from PIL import Image
import glob
from skimage.segmentation import flood, flood_fill
for imgfile in glob.glob("*.jpg"):
ima=read_img(imgfile)
""" # Primera forma #"""
imR, imG, II=cv2.split(ima)
""" #Segunda forma #"""
#imA=cv2.cvtColor(ima,cv2.COLOR_RGB2HSV)
#I,I,II=cv2.split(imA)
""" # Tercera forma #"""
#imA=cv2.cvtColor(ima,cv2.COLOR_RGB2XYZ)
#I,I,II=cv2.split(imA)
#fig, ax = try_all_threshold(II, figsize=(10, 8), verbose=False)
#plt.show()
#block_size = 51
#thresh = threshold_local(II, block_size, offset=10)
thresh = threshold_li(II)
#thresh=threshold_minimum(II)
#thresh=threshold_otsu(II)
#thresh=threshold_triangle(II)
#ret3,thresh= cv2.threshold(II,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
binary = II > thresh
#"""
binary = (binary*255).astype(np.uint8)
#plt.imshow(binary, cmap=plt.cm.gray)
#plt.show()
#"""
### Transformaciones Morfologicas
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (20, 20))
opening = cv2.morphologyEx(binary, cv2.MORPH_OPEN, kernel)
#dilation = cv2.dilate(opening,kernel,iterations = 1)
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (37, 37))
close=cv2.morphologyEx(opening, cv2.MORPH_CLOSE, kernel)
#plt.imshow(close, cmap=plt.cm.gray)
#plt.show()
dire='./segROI/#3/B/'+imgfile
#img=cv2.cvtColor(im,cv2.COLOR_BGR2RGB)
cv2.imwrite(dire,close)
print(imgfile)
k = cv2.waitKey(1000)
#destroy the window
cv2.destroyAllWindows()
#"""
|
[
"51056570+NatalyTinoco@users.noreply.github.com"
] |
51056570+NatalyTinoco@users.noreply.github.com
|
5b01a0dbeb188b5186ce2c9757ac5da7a4312aa3
|
8e00a42f935ee15ed0dd27241f30fd2a909891c2
|
/config.py
|
481a95097620da6d15f1258a08e9dbf541d129a4
|
[] |
no_license
|
MrBrunotte/Microblog
|
c17965cc34b42332b95748adae75bd0abdd17996
|
b1f33a843d717685481c22ab203c740a589177f2
|
refs/heads/master
| 2020-06-30T10:44:22.527410
| 2019-08-11T15:00:40
| 2019-08-11T15:00:40
| 200,802,140
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 691
|
py
|
import os
basedir = os.path.abspath(os.path.dirname(__file__))
class Config(object):
SECRET_KEY = os.environ.get('SECRET_KEY') or 'you-will-never-guess'
SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL') or \
'sqlite:///' + os.path.join(basedir, 'app.db')
SQLALCHEMY_TRACK_MODIFICATIONS = False
MAIL_SERVER = os.environ.get('MAIL_SERVER')
MAIL_PORT = int(os.environ.get('MAIL_PORT') or 25)
MAIL_USE_TLS = os.environ.get('MAIL_USE_TLS') is not None
MAIL_USERNAME = os.environ.get('MAIL_USERNAME')
MAIL_PASSWORD = os.environ.get('MAIL_PASSWORD')
ADMINS = ['your-email@example.com']
LANGUAGES = ['en', 'es', 'se']
POSTS_PER_PAGE = 25
|
[
"mrbrunotte@gmail.com"
] |
mrbrunotte@gmail.com
|
e2cc047b7cd5216552d5667bf63496afc02e548b
|
e74463d223acfe6b849177177cb409060e7a44d1
|
/Data Structures and Algorithms/01 Algorithmic Toolbox/Week 4 - Divide-and-Conquer/poly_mult.py
|
3d6d01b5f543a0a0108a339b3a2bfa564fdb5742
|
[] |
no_license
|
AlexEngelhardt-old/courses
|
24f4acf6de22f6707568024c5ee4a2fde412e461
|
739be99265b0aca1c58abe6f107b4c49de055b9d
|
refs/heads/master
| 2023-05-05T22:25:50.327739
| 2020-12-09T14:57:46
| 2020-12-09T14:57:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,645
|
py
|
def lfill(arr, length):
"""Prepends [0] elements to arr so that its total length is at least length."""
return [0] * (length - len(arr)) + arr
def poly_mult_naive(A, B):
"""Naive O(n^2) implementation of polynomial multiplication.
A and B are coefficient arrays:
Example
-------
A = 3x^2 + 2x + 5
B = 5x^2 + x + 2
A * B = 15x^4 + 13x^3 + 33x^2 + 9x + 10
>>> poly_mult([3, 2, 5], [5, 1, 2])
[15, 13, 33, 9, 10]
>>> poly_mult([3, 2, 5], [4])
[12, 8, 20]
"""
n = max(len(A), len(B))
A = lfill(A, n)
B = lfill(B, n)
res = [0] * (2*n - 1)
for i in range(n):
for j in range(n):
res[i+j] += A[i] * B[j]
# If there are leading zeroes, remove them again:
while(res[0] == 0):
del res[0]
return res
def poly_mult_better(A, B):
"""Divide-and-conquer implementation for polynomial multiplication.
Still a bit naive though.
Idea: Split up A into D1 and D0, each of degree n/2.
e.g. A = [4, 3, 2, 1], then D1 = [4, 3] and D2 = [2, 1]
Just split them up, no computation necessary.
Split B into E1 and E0 in the same way.
Then AB = D1*E1 * x^n + (D1*E0 + D0*E1) * x^(n/2) + D0*E0
Runtime: T(n) = 4 * T(n/2) + kn
Total runtime: O(n^2)
"""
# Meh, I'll skip this
pass
def poly_mult_fast(A, B):
"""By Karatsuba"""
# meh.
return [1, 2, 3]
if __name__ == "__main__":
print(poly_mult_naive([3, 2, 5], [5, 1, 2]))
print(poly_mult_fast([3, 2, 5], [5, 1, 2]))
print('---')
print(poly_mult_naive([3, 2, 5], [4]))
print(poly_mult_fast([3, 2, 5], [4]))
|
[
"alexander.w.engelhardt@gmail.com"
] |
alexander.w.engelhardt@gmail.com
|
0e91187cd1c363c623c71648471d0a741b416aba
|
0c66e605e6e4129b09ea14dbb6aa353d18aaa027
|
/diventi/accounts/migrations/0118_auto_20190430_1544.py
|
87abb4310a3d8a0e7ce02fa18928b782a13d894b
|
[
"Apache-2.0"
] |
permissive
|
flavoi/diventi
|
58fbc8c947f387cbcc1ce607878a59a6f2b72313
|
c0b1efe2baa3ff816d6ee9a8e86623f297973ded
|
refs/heads/master
| 2023-07-20T09:32:35.897661
| 2023-07-11T19:44:26
| 2023-07-11T19:44:26
| 102,959,477
| 2
| 1
|
Apache-2.0
| 2023-02-08T01:03:17
| 2017-09-09T14:10:51
|
Python
|
UTF-8
|
Python
| false
| false
| 452
|
py
|
# Generated by Django 2.1.7 on 2019-04-30 13:44
import diventi.accounts.models
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('accounts', '0117_auto_20190430_1543'),
]
operations = [
migrations.AlterModelManagers(
name='diventiuser',
managers=[
('objects', diventi.accounts.models.DiventiUserManager()),
],
),
]
|
[
"flavius476@gmail.com"
] |
flavius476@gmail.com
|
5577adbf39c30e7cfa89c76e8eb07f81dca2ff8c
|
37c6507cb937312017fb05e1010007419e68e5a8
|
/post_to_gminer/client_send.py
|
94e58eb54cd07a652d151cf836a26292163b5a0e
|
[] |
no_license
|
bbcf/gdv
|
5fc04e186fd0487db6bce850538cba77e82b6284
|
f8738e7dc49ca826e29f4454484d2da716389bd2
|
refs/heads/master
| 2021-01-21T19:28:54.558176
| 2011-07-01T15:01:21
| 2011-07-01T15:01:21
| 1,186,918
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,181
|
py
|
# Modules #
import httplib2, urllib
# All parameters #
args = {
'data' : '''{"operation_type":"desc_stat","characteristic":"number_of_features","compare_parents":[],"per_chromosome":["per_chromosome"],"filter":[{"name":"Ribi genes","path":"/scratch/genomic/tracks/ribosome_genesis.sql"}],"ntracks":[{"name":"S. cer refseq genes","path":"/scratch/genomic/tracks/all_yeast_genes.sql"},{"name":"RP genes","path":"/scratch/genomic/tracks/ribosome_proteins.sql"}]}''',
'output_location' : '/tmp/gMiner',
'callback_url' : 'http://localhost:9999/',
'job_id' : '1'
}
# Make the request #
connection = httplib2.Http()
body = urllib.urlencode(args)
headers = {'content-type':'application/x-www-form-urlencoded'}
address = "http://localhost:7522/"
# Send it #
response, content = connection.request(address, "POST", body=body, headers=headers)
print "Server status: ", response.status
print "Server reason: ", response.reason
print "Server content:", content
#-----------------------------------------#
# This code was written by Lucas Sinclair #
# lucas.sinclair@epfl.ch #
#-----------------------------------------#
|
[
"lucas.sinclair@me.com"
] |
lucas.sinclair@me.com
|
2b72601aa35f22fe7285d28cc4fd151b2cd8bd11
|
fa798e1779af170ee31bfd710a6faca9904a99ef
|
/6day/2. parameter2.py
|
ee3813869b83825983ac1f98dfcf349e50f27af4
|
[] |
no_license
|
itwebMJ/pythonStudy
|
1c573f98b78ce8c9273ae17a44d59a5a26c61b2c
|
8ea3112c9c587b6aeb8a5fa6ef715053286fbaae
|
refs/heads/master
| 2023-06-28T05:37:29.239010
| 2021-08-06T08:01:54
| 2021-08-06T08:01:54
| 375,879,186
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 381
|
py
|
def f1(*x): #가변인자. 튜플로 받아옴
print('함수시작')
for i in x:
print(i)
print('함수 끝')
def add(*num):
s = 0
for i in num:
s+=i
return s
def main():
f1()
f1('aaa', 'bbb')
f1('ccc','ddd','eee','fff')
s=add(1,2,3)
print('add(1,2,3):',s)
s=add(1,2,3,4,5)
print('add(1,2,3,4,5):', s)
main()
|
[
"rlaalwn61@naver.com"
] |
rlaalwn61@naver.com
|
bfba7db224c5d590de0eaa96f0737a4849accb57
|
d094ba0c8a9b1217fbf014aa79a283a49aabe88c
|
/env/lib/python3.6/site-packages/nipype/interfaces/minc/tests/test_auto_Dump.py
|
bcca2a480195741d1cc2f4ccc5cdf6e3271a276a
|
[
"Apache-2.0"
] |
permissive
|
Raniac/NEURO-LEARN
|
d9274e0baadd97bb02da54bdfcf6ca091fc1c703
|
3c3acc55de8ba741e673063378e6cbaf10b64c7a
|
refs/heads/master
| 2022-12-25T23:46:54.922237
| 2020-09-06T03:15:14
| 2020-09-06T03:15:14
| 182,013,100
| 9
| 2
|
Apache-2.0
| 2022-12-09T21:01:00
| 2019-04-18T03:57:00
|
CSS
|
UTF-8
|
Python
| false
| false
| 1,907
|
py
|
# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from __future__ import unicode_literals
from ..minc import Dump
def test_Dump_inputs():
input_map = dict(
annotations_brief=dict(
argstr='-b %s',
xor=('annotations_brief', 'annotations_full'),
),
annotations_full=dict(
argstr='-f %s',
xor=('annotations_brief', 'annotations_full'),
),
args=dict(argstr='%s', ),
coordinate_data=dict(
argstr='-c',
xor=('coordinate_data', 'header_data'),
),
environ=dict(
nohash=True,
usedefault=True,
),
header_data=dict(
argstr='-h',
xor=('coordinate_data', 'header_data'),
),
input_file=dict(
argstr='%s',
mandatory=True,
position=-2,
),
line_length=dict(argstr='-l %d', ),
netcdf_name=dict(argstr='-n %s', ),
out_file=dict(
argstr='> %s',
genfile=True,
position=-1,
),
output_file=dict(
hash_files=False,
keep_extension=False,
name_source=['input_file'],
name_template='%s_dump.txt',
position=-1,
),
precision=dict(argstr='%s', ),
variables=dict(
argstr='-v %s',
sep=',',
),
)
inputs = Dump.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(inputs.traits()[key], metakey) == value
def test_Dump_outputs():
output_map = dict(output_file=dict(), )
outputs = Dump.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(outputs.traits()[key], metakey) == value
|
[
"leibingye@outlook.com"
] |
leibingye@outlook.com
|
fa43a838c4978b258a9605d31ab39eb249c8b487
|
ce76b3ef70b885d7c354b6ddb8447d111548e0f1
|
/thing_and_time/year/different_group_or_way/own_hand/bad_woman/old_week.py
|
9639b61edb0de71dc9e7ec473e80b69899594523
|
[] |
no_license
|
JingkaiTang/github-play
|
9bdca4115eee94a7b5e4ae9d3d6052514729ff21
|
51b550425a91a97480714fe9bc63cb5112f6f729
|
refs/heads/master
| 2021-01-20T20:18:21.249162
| 2016-08-19T07:20:12
| 2016-08-19T07:20:12
| 60,834,519
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 219
|
py
|
#! /usr/bin/env python
def great_week(str_arg):
year_and_day(str_arg)
print('way_and_little_place')
def year_and_day(str_arg):
print(str_arg)
if __name__ == '__main__':
great_week('want_first_time')
|
[
"jingkaitang@gmail.com"
] |
jingkaitang@gmail.com
|
917dadb212ec658472de836a5b21d4d5c5744946
|
1cc17b2eb1c885389126299602dbaa3bbd1e6dd7
|
/liaoxuefeng_python/innerbuilt/do_wearth_parse.py
|
cc2b69122069756e6f36f542b652328b8a61a467
|
[] |
no_license
|
shulu/python_note
|
e611093ff2af321fbc889167424574b214052b44
|
93b101a1723d2d47b435a25e81e447f0d5d95022
|
refs/heads/master
| 2021-01-02T22:47:53.717930
| 2019-03-26T08:55:48
| 2019-03-26T08:55:48
| 99,391,822
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,138
|
py
|
# -*- coding: utf-8 -*-
from xml.parsers.expat import ParserCreate
import json
data = {}
textlist = []
class WeatherSaxHandler(object):
def start_element(self, name, attr):
if not name in data:
data[name] = []
data[name].append({'attr' : attr})
def char_data(self, text):
textlist.append(text)
def end_element(self, name):
global textlist
str = ''.join(textlist)
data[name][-1]['text'] = str
textlist = []
def parse_weather(xml):
handler = WeatherSaxHandler()
parser = ParserCreate()
parser.StartElementHandler = handler.start_element
parser.EndElementHandler = handler.end_element
parser.CharacterDataHandler = handler.char_data
parser.Parse(xml)
location = data['yweather:location']
forecast = data['yweather:forecast']
return {
'city': location[0]['attr']['city'],
'country': location[0]['attr']['country'],
'today': {
'text': forecast[0]['attr']['text'],
'low': forecast[0]['attr']['low'],
'high': forecast[0]['attr']['high'],
},
'tomorrow': {
'text': forecast[1]['attr']['text'],
'low': forecast[1]['attr']['low'],
'high': forecast[1]['attr']['high'],
},
}
#可将数据写入json文件
# with open('weather_data.json', 'w') as f:
# json.dump(data, f)
if __name__ == '__main__':
from do_weather_xml import getWeatherXML
xml = getWeatherXML()
d = parse_weather(xml)
print(str(d))
data = r'''<?xml version="1.0" encoding="UTF-8" standalone="yes" ?>
<rss version="2.0" xmlns:yweather="http://xml.weather.yahoo.com/ns/rss/1.0" xmlns:geo="http://www.w3.org/2003/01/geo/wgs84_pos#">
<channel>
<title>Yahoo! Weather - Beijing, CN</title>
<lastBuildDate>Wed, 27 May 2015 11:00 am CST</lastBuildDate>
<yweather:location city="Beijing" region="" country="China"/>
<yweather:units temperature="C" distance="km" pressure="mb" speed="km/h"/>
<yweather:wind chill="28" direction="180" speed="14.48" />
<yweather:atmosphere humidity="53" visibility="2.61" pressure="1006.1" rising="0" />
<yweather:astronomy sunrise="4:51 am" sunset="7:32 pm"/>
<item>
<geo:lat>39.91</geo:lat>
<geo:long>116.39</geo:long>
<pubDate>Wed, 27 May 2015 11:00 am CST</pubDate>
<yweather:condition text="Haze" code="21" temp="28" date="Wed, 27 May 2015 11:00 am CST" />
<yweather:forecast day="Wed" date="27 May 2015" low="20" high="33" text="Partly Cloudy" code="30" />
<yweather:forecast day="Thu" date="28 May 2015" low="21" high="34" text="Sunny" code="32" />
<yweather:forecast day="Fri" date="29 May 2015" low="18" high="25" text="AM Showers" code="39" />
<yweather:forecast day="Sat" date="30 May 2015" low="18" high="32" text="Sunny" code="32" />
<yweather:forecast day="Sun" date="31 May 2015" low="20" high="37" text="Sunny" code="32" />
</item>
</channel>
</rss>
'''
print(parse_weather(data))
|
[
"qq961085397@163.com"
] |
qq961085397@163.com
|
d84e266712ab016b57c02f337ed36cb13b123d9a
|
75e16fc6883e3e314b21ccf337beb0320bbcae50
|
/train.py
|
2f18cc8ed8e16739d9b6c577004d390b0b4c6768
|
[] |
no_license
|
mihirp1998/convHypernetComp
|
a0a9404ab6abf29a56733ea88d45a10f397d551d
|
6ed0632af965b81ac38cf3ed7bd9215adadb1902
|
refs/heads/master
| 2021-08-18T11:11:32.464130
| 2020-04-24T23:44:15
| 2020-04-24T23:44:15
| 169,819,367
| 0
| 0
| null | 2019-02-09T18:08:45
| 2019-02-09T01:07:13
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 7,212
|
py
|
import time
import os
import argparse
import numpy as np
import torch
import torch.optim as optim
import torch.optim.lr_scheduler as LS
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import torch.utils.data as data
from torchvision import transforms
from torch.nn.parameter import Parameter
#from unet import UNet,Feedforward
parser = argparse.ArgumentParser()
parser.add_argument(
'--batch-size', '-N', type=int, default=128, help='batch size')
parser.add_argument(
'--train', '-f', required=True, type=str, help='folder of training images')
parser.add_argument(
'--max-epochs', '-e', type=int, default=20000, help='max epochs')
parser.add_argument('--lr', type=float, default=0.0005, help='learning rate')
# parser.add_argument('--cuda', '-g', action='store_true', help='enables cuda')
parser.add_argument(
'--iterations', type=int, default=16, help='unroll iterations')
parser.add_argument('--checkpoint', type=int, help='unroll iterations')
parser.add_argument('--update', type=int, help='unroll update')
args = parser.parse_args()
import new_dataset as dataset
train_set = dataset.ImageFolder(root=args.train,train=False,file_name ="outValid15_100Vids.p")
train_loader = data.DataLoader(
dataset=train_set, batch_size=args.batch_size, shuffle=True, num_workers=1)
print('total images: {}; total batches: {}'.format(
len(train_set), len(train_loader)))
import network
hypernet = network.HyperNetwork(train_set.vid_count).cuda()
encoder = network.EncoderCell().cuda()
binarizer = network.Binarizer().cuda()
decoder = network.DecoderCell().cuda()
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
print("hypernet ",count_parameters(hypernet))
print("encoder ",count_parameters(encoder))
print("decoder ",count_parameters(decoder))
print("binarizer ",count_parameters(binarizer))
solver = optim.Adam(
[
{
'params': hypernet.parameters()
}
],
lr=args.lr)
def resume(epoch=None):
if epoch is None:
s = 'iter'
epoch = 0
else:
s = 'epoch'
print("Loaded")
hypernet.load_state_dict(
torch.load('checkpoint100_100vids_wn/hypernet_{}_{:08d}.pth'.format(s, epoch)))
def save(index, epoch=True):
if not os.path.exists('checkpoint100_100vids_wn'):
os.mkdir('checkpoint100_100vids_wn')
if epoch:
s = 'epoch'
else:
s = 'iter'
torch.save(hypernet.state_dict(), 'checkpoint100_100vids_wn/hypernet_{}_{:08d}.pth'.format(s, index))
#
resume()
scheduler = LS.MultiStepLR(solver, milestones=[50, 100, 200, 300, 400], gamma=0.5)
last_epoch = 100
if args.checkpoint:
resume(args.checkpoint)
#last_epoch = 0
scheduler.last_epoch = last_epoch - 1
vepoch=0
index =0
solver.zero_grad()
loss_mini_batch = 0
all_losses = []
for epoch in range(last_epoch + 1, args.max_epochs + 1):
#scheduler.step()
for batch, (data,id_num,name) in enumerate(train_loader):
batch_t0 = time.time()
data = data[0]
batch_size, input_channels, height, width = data.size()
encoder_h_1 = (Variable(
torch.zeros(batch_size, 256, height // 4, width // 4)),
Variable(
torch.zeros(batch_size, 256, height // 4, width // 4)))
encoder_h_2 = (Variable(
torch.zeros(batch_size, 512, height // 8, width // 8)),
Variable(
torch.zeros(batch_size, 512, height // 8, width // 8)))
encoder_h_3 = (Variable(
torch.zeros(batch_size, 512, height // 16, width // 16)),
Variable(
torch.zeros(batch_size, 512, height // 16, width // 16)))
decoder_h_1 = (Variable(
torch.zeros(batch_size, 512, height // 16, width // 16)),
Variable(
torch.zeros(batch_size, 512, height // 16, width // 16)))
decoder_h_2 = (Variable(
torch.zeros(batch_size, 512, height // 8, width // 8)),
Variable(
torch.zeros(batch_size, 512, height // 8, width // 8)))
decoder_h_3 = (Variable(
torch.zeros(batch_size, 256, height // 4, width // 4)),
Variable(
torch.zeros(batch_size, 256, height // 4, width // 4)))
decoder_h_4 = (Variable(
torch.zeros(batch_size, 128, height // 2, width // 2)),
Variable(
torch.zeros(batch_size, 128, height // 2, width // 2)))
encoder_h_1 = (encoder_h_1[0].cuda(), encoder_h_1[1].cuda())
encoder_h_2 = (encoder_h_2[0].cuda(), encoder_h_2[1].cuda())
encoder_h_3 = (encoder_h_3[0].cuda(), encoder_h_3[1].cuda())
decoder_h_1 = (decoder_h_1[0].cuda(), decoder_h_1[1].cuda())
decoder_h_2 = (decoder_h_2[0].cuda(), decoder_h_2[1].cuda())
decoder_h_3 = (decoder_h_3[0].cuda(), decoder_h_3[1].cuda())
decoder_h_4 = (decoder_h_4[0].cuda(), decoder_h_4[1].cuda())
patches = Variable(data.cuda())
#solver.zero_grad()
losses = []
res = patches - 0.5
id_num = Variable(id_num.cuda())
wenc,wdec,wbin = hypernet(id_num,batch_size)
bp_t0 = time.time()
for i in range(args.iterations):
encoded, encoder_h_1, encoder_h_2, encoder_h_3 = encoder(
res,wenc,encoder_h_1, encoder_h_2, encoder_h_3,batch_size)
codes = binarizer(encoded,wbin,batch_size)
output, decoder_h_1, decoder_h_2, decoder_h_3, decoder_h_4 = decoder(
codes,wdec, decoder_h_1, decoder_h_2, decoder_h_3, decoder_h_4,batch_size)
res = res - output
losses.append(res.abs().mean())
all_losses.append(losses)
bp_t1 = time.time()
loss = sum(losses) / args.iterations
loss = loss/args.update
loss.backward()
loss_mini_batch += loss.data[0]
if (index +1) % args.update == 0:
# Do a SGD step once every iter_size iterations
solver.step()
solver.zero_grad()
# print("Iter: %02d, Loss: %4.4f" % (i, loss_mini_batch/10))
batch_t1 = time.time()
print('[TRAIN] Epoch[{}]({}/{}); Loss: {:.6f}; Backpropagation: {:.4f} sec; Batch: {:.4f} sec'.format(epoch, batch + 1,len(train_loader), loss_mini_batch, bp_t1 - bp_t0, batch_t1 -batch_t0))
print(('{:.4f} ' * args.iterations +'\n').format(* [l.data[0] for l in np.array(all_losses).mean(axis=0)]))
loss_mini_batch = 0
all_losses = []
index = (epoch - 1) * len(train_loader) + batch
if index % 700 == 0 and index != 0:
vepoch+=1
#save(vepoch)
#print("scheduled")
#scheduler.step()
# if index % 2000 == 0 and index != 0:
# vepoch+=1
# scheduler.step()
if index % 2000 == 0 and index != 0:
save(0, False)
if epoch % 100 == 0:
save(epoch)
|
[
"mihirp1998.mp@gmail.com"
] |
mihirp1998.mp@gmail.com
|
04ca532abc1f721a651008e03b2fc480c37452cf
|
9cd25c62e501741bbf4f982058ac60b8cdf815dc
|
/_unittests/ut_testing/test_template_dl_torch.py
|
2ee7a06eb55e118202d4f461b0f1bb82357dabb5
|
[
"MIT"
] |
permissive
|
sdpython/lightmlrestapi
|
c60c2960b271e59750ebfe8fafc9c70304f92cbc
|
def172965eb197d8ab7f812c3f5f5ce129593cef
|
refs/heads/master
| 2022-07-09T06:56:31.458790
| 2022-05-19T23:46:52
| 2022-05-19T23:46:52
| 110,975,162
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,329
|
py
|
"""
@brief test tree node (time=12s)
"""
import os
import unittest
import numpy
from PIL import Image
from pyquickhelper.pycode import get_temp_folder, ExtTestCase
from lightmlrestapi.testing.template_dl_torch import restapi_version, restapi_load, restapi_predict
def get_torch():
try:
import torch # pylint: disable=C0415
return torch
except ImportError:
return None
class TestTemplateDlTorch(ExtTestCase):
@unittest.skipIf(get_torch() is None, reason="no torch")
def test_template_dl_keras(self):
self.assertEqual(restapi_version(), "0.1.1238")
temp = get_temp_folder(__file__, "temp_template_dl_torch")
import torchvision.models as models # pylint: disable=E0401,C0415,R0402
import torch # pylint: disable=E0401,C0415
model = models.squeezenet1_0(pretrained=True)
model_name = os.path.join(temp, "model.torch")
torch.save(model, model_name)
img_input = os.path.join(temp, "..", "data", "wiki_modified2.png")
img_input = numpy.array(Image.open(img_input))
mo = restapi_load({'model': model_name})
pred = restapi_predict(mo, img_input)
self.assertIsInstance(pred, numpy.ndarray)
self.assertEqual(pred.shape, (1, 1000))
if __name__ == "__main__":
unittest.main()
|
[
"xavier.dupre@gmail.com"
] |
xavier.dupre@gmail.com
|
508d6ae083c9e4dd5e88df2f98ff5ffe1ed2f997
|
f1790e298bcbf7b26cacd3c27850f243c446b9eb
|
/courses/pythonBrasil/EstruturaDeDecisao/ex006.py
|
e023d983077c2e8cfaf09eb5788f0c2eeb6359b5
|
[] |
no_license
|
misa9999/python
|
36001a1bf0eb842d00b010b02e05b01aa4dfac57
|
251c5226db1bfef4a8445b025f232a27a6924930
|
refs/heads/master
| 2023-03-04T16:25:48.610233
| 2021-02-22T21:37:51
| 2021-02-22T21:37:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 359
|
py
|
# pede três números e informa o maior deles
# pede os números
n1 = int(input('n1: '))
n2 = int(input('n2: '))
n3 = int(input('n3: '))
maior = n1
# verifica o maior número
if n2 > n1 and n2 > n3:
maior = n2
elif n3 > n1 and n3 > n2:
maior = n3
else:
print('Todos números são iguais')
print(f'{maior} é o maior número.')
|
[
"yuukixasuna00@gmailcom"
] |
yuukixasuna00@gmailcom
|
ad7faa54a46d5965e09d67a6f9c2c498b2bbbec0
|
fcd965c9333ee328ec51bc41f5bc0300cc06dc33
|
/DailyCoding/invert_tree.py
|
9c1f7f53cc075075fb4ac08a3358a5e3da55f484
|
[] |
no_license
|
henrylin2008/Coding_Problems
|
699bb345481c14dc3faa8bab439776c7070a1cb0
|
281067e872f73a27f76ae10ab0f1564916bddd28
|
refs/heads/master
| 2023-01-11T11:55:47.936163
| 2022-12-24T07:50:17
| 2022-12-24T07:50:17
| 170,151,972
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 813
|
py
|
# Daily Coding Problem #83
# Problem
# This problem was asked by Google.
#
# Invert a binary tree.
#
# For example, given the following tree:
#
# a
# / \
# b c
# / \ /
# d e f
# should become:
#
# a
# / \
# c b
# \ / \
# f e d
# Solution
# Assuming we could invert the current node's left and right subtrees, all we'd need to do is then switch the left to now become right, and right to become left. The base case is when the node is None and we can just return None for that case. Then we know this works for the leaf node case since switching left and right subtrees doesn't do anything (since they're both None).
def invert(node):
if not node:
return node
left = invert(node.left)
right = invert(node.right)
node.left, node.right = right, left
return node
|
[
"henrylin2008@yahoo.com"
] |
henrylin2008@yahoo.com
|
59125fada11245cb2fd16d583346fea7808880f8
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03803/s808013287.py
|
d06c71951fe118e1fa63c59d59e0f64d918f0c20
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 134
|
py
|
a, b = map(int, input().split())
a = a if a > 1 else 14
b = b if b > 1 else 14
print("Alice" if a > b else "Bob" if a < b else "Draw")
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
076e9da84a5ca277c5b3c7a7d91b7c7594ad151a
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_118/2720.py
|
ad1a9ec15bca1a694901004c379c73f110bc065c
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 678
|
py
|
def nopalin(li):
count = 0
for i in range(li[0], li[1]):
if ispalin(str(i)):
if ispalin(str(i**2)):
count += 1
return count
def ispalin(a):
return a == a[::-1]
infile = open('C-small-attempt0.in', 'r')
outfile = open('ansfairsq_small.in', 'w')
no = int(infile.readline())
for i in range(no):
li = infile.readline().split()
li[0] = int(li[0])
a = int(li[0]**.5)
if a**2 == li[0]:
li[0] = a
else:
li[0] = a+1
li[1] = int(int(li[1])**.5)+1
ans = nopalin(li)
outfile.write(('Case #{0}: '+str(ans)+'\n').format(i+1))
infile.close()
outfile.close()
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
7593e3d69947e1c79ab1ac96622bbf72ed0e4f02
|
b15d2787a1eeb56dfa700480364337216d2b1eb9
|
/accelbyte_py_sdk/api/seasonpass/models/tier.py
|
2f85703a05dbe98fe8aaa8b3cba6d42d71842b3a
|
[
"MIT"
] |
permissive
|
AccelByte/accelbyte-python-sdk
|
dedf3b8a592beef5fcf86b4245678ee3277f953d
|
539c617c7e6938892fa49f95585b2a45c97a59e0
|
refs/heads/main
| 2023-08-24T14:38:04.370340
| 2023-08-22T01:08:03
| 2023-08-22T01:08:03
| 410,735,805
| 2
| 1
|
MIT
| 2022-08-02T03:54:11
| 2021-09-27T04:00:10
|
Python
|
UTF-8
|
Python
| false
| false
| 5,204
|
py
|
# Copyright (c) 2021 AccelByte Inc. All Rights Reserved.
# This is licensed software from AccelByte Inc, for limitations
# and restrictions contact your company contract manager.
#
# Code generated. DO NOT EDIT!
# template file: ags_py_codegen
# AccelByte Gaming Services Seasonpass Service (1.19.0)
# pylint: disable=duplicate-code
# pylint: disable=line-too-long
# pylint: disable=missing-function-docstring
# pylint: disable=missing-module-docstring
# pylint: disable=too-many-arguments
# pylint: disable=too-many-branches
# pylint: disable=too-many-instance-attributes
# pylint: disable=too-many-lines
# pylint: disable=too-many-locals
# pylint: disable=too-many-public-methods
# pylint: disable=too-many-return-statements
# pylint: disable=too-many-statements
# pylint: disable=unused-import
from __future__ import annotations
from typing import Any, Dict, List, Optional, Tuple, Union
from ....core import Model
class Tier(Model):
"""Tier (Tier)
Properties:
id_: (id) OPTIONAL str
required_exp: (requiredExp) OPTIONAL int
rewards: (rewards) OPTIONAL Dict[str, List[str]]
"""
# region fields
id_: str # OPTIONAL
required_exp: int # OPTIONAL
rewards: Dict[str, List[str]] # OPTIONAL
# endregion fields
# region with_x methods
def with_id(self, value: str) -> Tier:
self.id_ = value
return self
def with_required_exp(self, value: int) -> Tier:
self.required_exp = value
return self
def with_rewards(self, value: Dict[str, List[str]]) -> Tier:
self.rewards = value
return self
# endregion with_x methods
# region to methods
def to_dict(self, include_empty: bool = False) -> dict:
result: dict = {}
if hasattr(self, "id_"):
result["id"] = str(self.id_)
elif include_empty:
result["id"] = ""
if hasattr(self, "required_exp"):
result["requiredExp"] = int(self.required_exp)
elif include_empty:
result["requiredExp"] = 0
if hasattr(self, "rewards"):
result["rewards"] = {
str(k0): [str(i1) for i1 in v0] for k0, v0 in self.rewards.items()
}
elif include_empty:
result["rewards"] = {}
return result
# endregion to methods
# region static methods
@classmethod
def create(
cls,
id_: Optional[str] = None,
required_exp: Optional[int] = None,
rewards: Optional[Dict[str, List[str]]] = None,
**kwargs,
) -> Tier:
instance = cls()
if id_ is not None:
instance.id_ = id_
if required_exp is not None:
instance.required_exp = required_exp
if rewards is not None:
instance.rewards = rewards
return instance
@classmethod
def create_from_dict(cls, dict_: dict, include_empty: bool = False) -> Tier:
instance = cls()
if not dict_:
return instance
if "id" in dict_ and dict_["id"] is not None:
instance.id_ = str(dict_["id"])
elif include_empty:
instance.id_ = ""
if "requiredExp" in dict_ and dict_["requiredExp"] is not None:
instance.required_exp = int(dict_["requiredExp"])
elif include_empty:
instance.required_exp = 0
if "rewards" in dict_ and dict_["rewards"] is not None:
instance.rewards = {
str(k0): [str(i1) for i1 in v0] for k0, v0 in dict_["rewards"].items()
}
elif include_empty:
instance.rewards = {}
return instance
@classmethod
def create_many_from_dict(
cls, dict_: dict, include_empty: bool = False
) -> Dict[str, Tier]:
return (
{k: cls.create_from_dict(v, include_empty=include_empty) for k, v in dict_}
if dict_
else {}
)
@classmethod
def create_many_from_list(
cls, list_: list, include_empty: bool = False
) -> List[Tier]:
return (
[cls.create_from_dict(i, include_empty=include_empty) for i in list_]
if list_
else []
)
@classmethod
def create_from_any(
cls, any_: any, include_empty: bool = False, many: bool = False
) -> Union[Tier, List[Tier], Dict[Any, Tier]]:
if many:
if isinstance(any_, dict):
return cls.create_many_from_dict(any_, include_empty=include_empty)
elif isinstance(any_, list):
return cls.create_many_from_list(any_, include_empty=include_empty)
else:
raise ValueError()
else:
return cls.create_from_dict(any_, include_empty=include_empty)
@staticmethod
def get_field_info() -> Dict[str, str]:
return {
"id": "id_",
"requiredExp": "required_exp",
"rewards": "rewards",
}
@staticmethod
def get_required_map() -> Dict[str, bool]:
return {
"id": False,
"requiredExp": False,
"rewards": False,
}
# endregion static methods
|
[
"elmernocon@gmail.com"
] |
elmernocon@gmail.com
|
f36ba303596cbb747219a2c2e7f7ef047a3de25d
|
555c6ae723f2466673b7e6aeea11e7071461bfb3
|
/sakura/common/tools.py
|
66706081a63c70c046e108faf53597dc919d3359
|
[] |
no_license
|
riteshms/sakura
|
788ade92cd0f65fb891a737d0113b807bf955a33
|
e8d2b4454c26ea80f6275a1bde293db38db73d30
|
refs/heads/master
| 2021-07-05T10:58:41.774892
| 2017-09-20T09:03:15
| 2017-09-22T14:53:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 920
|
py
|
import sys, gevent
class StdoutProxy(object):
def __init__(self, stdout):
self.stdout = stdout
def write(self, s):
self.stdout.write(s)
self.stdout.flush()
def __getattr__(self, attr):
return getattr(self.stdout, attr)
def set_unbuffered_stdout():
sys.stdout = StdoutProxy(sys.stdout)
def wait_greenlets(*greenlets):
gevent.joinall(greenlets, count=1)
class SimpleAttrContainer:
def __init__(self, **kwargs):
for k, v in kwargs.items():
v = self.load_val(v)
setattr(self, k, v)
def load_val(self, v):
if isinstance(v, dict):
v = SimpleAttrContainer(**v)
elif isinstance(v, tuple):
v = tuple(self.load_val(v2) for v2 in v)
elif isinstance(v, list):
v = list(self.load_val(v2) for v2 in v)
return v
def _asdict(self):
return self.__dict__.copy()
|
[
"etienne.duble@imag.fr"
] |
etienne.duble@imag.fr
|
209b79c613856a3a9efc9586a621842d4e69098e
|
f28591fab50d9b7a539c66b5a81fc91d1bc2ce64
|
/py3/def/uint16_bigendian_tobytes.py
|
718ccd6205a399b3b6fa9f1c5e6402ea7d33c9fa
|
[] |
no_license
|
tnzw/tnzw.github.io
|
b8a5fe1f8479736bbf2b3594d511a1282939a3b3
|
6d95968db793cebcfa77cb49eecd987f821350db
|
refs/heads/master
| 2023-04-21T14:22:49.849859
| 2023-03-31T15:55:01
| 2023-03-31T15:55:01
| 176,712,013
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 504
|
py
|
# uint16_bigendian_tobytes.py Version 1.0.0
# Copyright (c) 2020 Tristan Cavelier <t.cavelier@free.fr>
# This program is free software. It comes without any warranty, to
# the extent permitted by applicable law. You can redistribute it
# and/or modify it under the terms of the Do What The Fuck You Want
# To Public License, Version 2, as published by Sam Hocevar. See
# http://www.wtfpl.net/ for more details.
def uint16_bigendian_tobytes(uint16):
return bytes(((uint16 >> 8) & 0xFF, uint16 & 0xFF))
|
[
"tzw56702@outlook.com"
] |
tzw56702@outlook.com
|
7b74cc681fdd50f712c42f1604cb790300fe0a4d
|
ef821468b081ef2a0b81bf08596a2c81e1c1ef1a
|
/PythonWebBasics_Django/petstagram/pets/urls.py
|
212f52bff8daa5f925d309e180d924a318f1cc1b
|
[] |
no_license
|
Ivaylo-Atanasov93/The-Learning-Process
|
71db22cd79f6d961b9852f140f4285ef7820dd80
|
354844e2c686335345f6a54b3af86b78541ed3f3
|
refs/heads/master
| 2023-03-30T20:59:34.304207
| 2021-03-29T15:23:05
| 2021-03-29T15:23:05
| 294,181,544
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 276
|
py
|
from django.urls import path
from pets.views import pets_list, show_pet_details, like_pet
urlpatterns = [
path('', pets_list, name='list pets'),
path('details/<int:pk>/', show_pet_details, name='pet details'),
path('like/<int:pk>/', like_pet, name='like pet'),
]
|
[
"ivailo.atanasov93@gmail.com"
] |
ivailo.atanasov93@gmail.com
|
9bea5e1eada06fa70192f113563e4516d9e6a21e
|
dd097c7ae744227b0312d762ee0482a3380ff8c6
|
/makenei_from_ele_xyz.py
|
5af3305f3e285e46149ab1da568e6c0ac15fdcba
|
[] |
no_license
|
moflaher/workspace_python
|
0d6e98274d923a721db2b345f65c20b02ca59d08
|
6551e3602ead3373eafce10d11ce7b96bdcb106f
|
refs/heads/master
| 2023-03-06T02:15:01.945481
| 2023-03-01T19:15:51
| 2023-03-01T19:15:51
| 20,814,932
| 3
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,260
|
py
|
from __future__ import division,print_function
import numpy as np
import matplotlib as mpl
import scipy as sp
from datatools import *
import matplotlib.tri as mplt
import matplotlib.pyplot as plt
#from mpl_toolkits.basemap import Basemap
import os as os
name='kit4'
xyz=np.genfromtxt(name + '.xyz')
ele=np.genfromtxt(name + '.ele')
if 1==0:
maxnei=np.histogram(ele,bins=ele.max()-1)[0].max()
nnodes=xyz.shape[0]
noderange=np.arange(1,nnodes+1)
xmin=xyz[:,0].min()
xmax=xyz[:,0].max()
ymin=xyz[:,1].min()
ymax=xyz[:,1].max()
neighbourlist=np.zeros([xyz.shape[0],maxnei])
for i in range(1,xyz.shape[0]+1):
print i
idx=np.where(ele==i)[0]
tneilist=np.unique(ele[idx,:])
tneilist=tneilist[tneilist!=i]
neighbourlist[i-1,0:len(tneilist)]=tneilist
fp=open(name + '.nei','w')
fp.write('%d\n' % nnodes)
fp.write('%d\n' % maxnei)
fp.write('%f %f %f %f\n' % (xmax, ymin, xmin, ymax))
for i in range(0,nnodes):
fp.write('%d %f %f %d %f %u %u %u %u %u %u %u %u\n' % (i+1, xyz[i,0], xyz[i,1], 0 ,xyz[i,2],neighbourlist[i,0],neighbourlist[i,1],neighbourlist[i,2],neighbourlist[i,3],neighbourlist[i,4],neighbourlist[i,5],neighbourlist[i,6],neighbourlist[i,7]) )
fp.close()
|
[
"073208o@acadiau.ca"
] |
073208o@acadiau.ca
|
b0b14da84042b9971c28982f9d77f519829a1047
|
aa1352a2f32c0c36194d3a6f8e683adba487a3eb
|
/FiRoom_backend/users/urls.py
|
461aad0c0a56efdbc04145b7094f3c83cd7a2a12
|
[] |
no_license
|
Ace-bb/FiRoom_backend
|
6c98d01c40e8de31ccbe86beaeada6c62516705e
|
efd4d9c1d7265e42f56638d5374a569a146acc03
|
refs/heads/main
| 2023-03-30T15:48:21.376390
| 2021-03-23T15:53:48
| 2021-03-23T15:53:48
| 338,780,869
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 143
|
py
|
from django.urls import path
from . import views
urlpatterns = [
path('masterIdentify/uploadCertification', views.uploadCertification),
]
|
[
"13489323285@163.com"
] |
13489323285@163.com
|
c0118ad0e0f29fc84614d3d9782b2b74eeeff8b8
|
7ee1fd7584f8770cd2381d85f797bf85cb9b4b67
|
/usuarios/applications/users/migrations/0002_auto_20200318_0722.py
|
a428a6912352c1390b859456cca63c55c637915a
|
[] |
no_license
|
neunapp/usuariosdj
|
3171160fdf6898d07d6b353d034c70801e4bc21b
|
3fe69b7357757baa5d799b614f232d75ed659502
|
refs/heads/master
| 2022-12-01T16:51:00.432272
| 2020-09-17T14:28:21
| 2020-09-17T14:28:21
| 237,993,639
| 4
| 2
| null | 2022-11-22T05:17:26
| 2020-02-03T15:10:33
|
Python
|
UTF-8
|
Python
| false
| false
| 550
|
py
|
# Generated by Django 3.0.3 on 2020-03-18 07:22
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='user',
name='apellidos',
field=models.CharField(blank=True, max_length=30),
),
migrations.AlterField(
model_name='user',
name='nombres',
field=models.CharField(blank=True, max_length=30),
),
]
|
[
"csantacruz1127@gmail.com"
] |
csantacruz1127@gmail.com
|
c2108381f0aa3dc54ea2812312d80e98117659a0
|
5574620c834f96d4baf50d6aa349242dae7c17af
|
/126.word-ladder-ii.py
|
1ab52f8b88f5662f72456dcd442d5aab3fe52358
|
[] |
no_license
|
Ming-H/leetcode
|
52dceba5f9a605afbdaa65e286a37205873e21bb
|
057cee4b830603ac12976ed7d5cea8d06a9b46a0
|
refs/heads/main
| 2023-09-02T21:30:48.796395
| 2023-09-01T01:59:48
| 2023-09-01T01:59:48
| 489,290,172
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 959
|
py
|
#
# @lc app=leetcode id=126 lang=python3
#
# [126] Word Ladder II
#
import collections
import string
class Solution:
#def findLadders(self, beginWord: str, endWord: str, wordList: List[str]) -> List[List[str]]:
def findLadders(self, start, end, dic):
dic.add(end)
level = {start}
parents = collections.defaultdict(set)
while level and end not in parents:
next_level = collections.defaultdict(set)
for node in level:
for char in string.ascii_lowercase:
for i in range(len(start)):
n = node[:i]+char+node[i+1:]
if n in dic and n not in parents:
next_level[n].add(node)
level = next_level
parents.update(next_level)
res = [[end]]
while res and res[0][0] != start:
res = [[p]+r for r in res for p in parents[r[0]]]
return res
|
[
"1518246548@qq.com"
] |
1518246548@qq.com
|
7e749d9cfdf6b6af07e6c5e6d4938e412ae0aeb3
|
d26aebefdc6358f63e050f7712589e9fd9f4a258
|
/cat_api_proj/settings.py
|
02f54b418b70047175a1723d0eec397f4420c924
|
[] |
no_license
|
matthewgstillman/Cats_API
|
e93c7319a26c6bb06ed1be211e8f588edc4dc871
|
c0918930d663527535e35d02c5c7ac098dbf6aa4
|
refs/heads/master
| 2020-03-29T20:47:06.514147
| 2018-09-25T21:17:48
| 2018-09-25T21:17:48
| 150,330,437
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,132
|
py
|
"""
Django settings for cat_api_proj project.
Generated by 'django-admin startproject' using Django 1.10.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '^xbh+%sdv@pz=-$3#roy0^a!k(rfx$nfxr--v@(uui#jw^h8@&'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'apps.cat_api',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'cat_api_proj.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'cat_api_proj.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
|
[
"matthewgstillman@gmail.com"
] |
matthewgstillman@gmail.com
|
7e9302d32787862d768a2e2a3a7eeea66f8a542c
|
fc2fb2118ea02867d559bf8027e54e3c6b652cfd
|
/devItems/spring-2020/SEEAccuracyImprove/jirasoftware/step1_vectorize_text.py
|
80e2c2b832ef7448c32e352447bb35267dbcc71c
|
[] |
no_license
|
pdhung3012/SoftwareStoryPointsPrediction
|
2431ad599e0fba37617cfd467de1f4f1afed56cc
|
520990663cb42adcac315b75cd4eb1150c3fc86c
|
refs/heads/master
| 2023-08-29T15:16:30.413766
| 2021-09-18T17:12:20
| 2021-09-18T17:12:20
| 254,596,878
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,611
|
py
|
from sklearn.feature_extraction.text import TfidfVectorizer
import pandas as pd
from nltk.tokenize import word_tokenize
import os
import numpy as np
import gensim
from sklearn.decomposition import PCA
from sklearn.random_projection import GaussianRandomProjection
nameSystem='jirasoftware'
fopVectorAllSystems='vector_tfidf_original_'+nameSystem+'/'
# fopTextPreprocess='te'+nameSystem+'/'
fopDataset='../../dataset/'
import stanza
def addDependenciesToSentence(docObj):
lstSentences=docObj.sentences
lstOutput=[]
for sen in lstSentences:
depends=sen._dependencies
lstDepInfo=[]
# depends=dict(depends)
for deKey in depends:
strElement=' '.join([deKey[2].text,deKey[0].text,deKey[1]])
lstDepInfo.append(strElement)
strDep=' '.join(lstDepInfo)
lstOutput.append((strDep))
strResult=' '.join(lstOutput)
return strResult
def addDependenciesToSentenceCompact(docObj):
lstSentences=docObj.sentences
lstOutput=[]
for sen in lstSentences:
depends=sen._dependencies
lstDepInfo=[]
# depends=dict(depends)
for deKey in depends:
strElement=' '.join([deKey[1]])
lstDepInfo.append(strElement)
strDep=' '.join(lstDepInfo)
lstOutput.append((strDep))
strResult=' '.join(lstOutput)
return strResult
def addDependenciesToSentencePOS(docObj):
lstSentences=docObj.sentences
lstOutput=[]
for sen in lstSentences:
words=sen._words
lstDepInfo=[]
# depends=dict(depends)
for w in words:
strElement=' '.join([w.upos])
lstDepInfo.append(strElement)
strDep=' '.join(lstDepInfo)
lstOutput.append((strDep))
strResult=' '.join(lstOutput)
return strResult
def preprocess(textInLine):
text = textInLine.lower()
doc = word_tokenize(text)
# doc = [word for word in doc if word in words]
# doc = [word for word in doc if word.isalpha()]
return ' '.join(doc)
def createDirIfNotExist(fopOutput):
try:
# Create target Directory
os.mkdir(fopOutput)
print("Directory ", fopOutput, " Created ")
except FileExistsError:
print("Directory ", fopOutput, " already exists")
from os import listdir
from os.path import isfile, join
arrFiles = [f for f in listdir(fopDataset) if isfile(join(fopDataset, f))]
createDirIfNotExist(fopVectorAllSystems)
# createDirIfNotExist(fopTextPreprocess)
nlp = stanza.Pipeline() # This sets up a default neural pipeline in English
for file in arrFiles:
# if not file.endswith('csv'):
# continue
if not file.endswith(nameSystem+'.csv'):
continue
fileCsv = fopDataset + file
fpVectorItemCate=fopVectorAllSystems+file.replace('.csv','')+'_category.csv'
fpVectorItemReg = fopVectorAllSystems + file.replace('.csv','') + '_regression.csv'
fpTextInfo = fopVectorAllSystems + file.replace('.csv', '') + '_textInfo.csv'
raw_data = pd.read_csv(fileCsv)
raw_data_2 = pd.read_csv(fileCsv)
columnId=raw_data['issuekey']
columnRegStory=raw_data_2['storypoint']
raw_data.loc[raw_data.storypoint <= 2, 'storypoint'] = 0 # small
raw_data.loc[(raw_data.storypoint > 2) & (raw_data.storypoint <= 8), 'storypoint'] = 1 # medium
raw_data.loc[(raw_data.storypoint > 8) & (raw_data.storypoint <= 15), 'storypoint'] = 2 # large
raw_data.loc[raw_data.storypoint > 15, 'storypoint'] = 3 # very large
columnCateStory = raw_data['storypoint']
titles_and_descriptions = []
for i in range(0, len(raw_data['description'])):
strContent = ' '.join([str(raw_data['title'][i]),' . ', str(raw_data['description'][i])])
titles_and_descriptions.append(str(strContent))
text_after_tokenize = []
listDependences=[]
index=0
for lineStr in titles_and_descriptions:
lineAppend = preprocess(lineStr)
strToAdd = lineAppend
# try:
# doc = nlp(lineStr)
# strDepend = addDependenciesToSentencePOS(doc)
# strToAdd = ' '.join([lineAppend, strDepend])
# # strToAdd = ' '.join([strDepend])
# except:
# print('{} error on issue {}'.format(index,columnId[index]))
text_after_tokenize.append(strToAdd)
index=index+1
columnTitleRow='no,text\n'
csv = open(fpTextInfo, 'w')
csv.write(columnTitleRow)
for i in range(0, len(text_after_tokenize)):
strItem=text_after_tokenize[i].replace(',',' ')
csv.write(','.join([str(i+1),strItem]))
if(i<(len(text_after_tokenize)-1)):
csv.write('\n')
csv.close()
# get vector using TF-IDF
vectorizer = TfidfVectorizer(ngram_range=(1, 4))
X = vectorizer.fit_transform(text_after_tokenize)
X = X.toarray()
# X = PCA().fit(X)
pca = PCA(n_components=100)
X = pca.fit_transform(X)
# srp=GaussianRandomProjection(n_components=3)
# X=srp.fit_transform(X)
print('end vectorize')
lenVectorOfWord = len(X[0])
columnTitleRow = "no,story,"
for i in range(0,lenVectorOfWord):
item='feature-'+str(i+1)
columnTitleRow = ''.join([columnTitleRow, item])
if i!=lenVectorOfWord-1:
columnTitleRow = ''.join([columnTitleRow, ","])
columnTitleRow = ''.join([columnTitleRow, "\n"])
csv = open(fpVectorItemCate, 'w')
csv.write(columnTitleRow)
csv2 = open(fpVectorItemReg, 'w')
csv2.write(columnTitleRow)
corpusVector = []
for i in range(0,len(text_after_tokenize)):
# arrTokens = word_tokenize(str(text_after_tokenize[i]))
# if not has_vector_representation(dictWordVectors, str(text_after_tokenize[i])):
# continue
# # arrTokens = word_tokenize(str(text_after_tokenize[i]))
vector= X[i]
corpusVector.append(vector)
# strVector=','.join(vector)
strCate=str(columnCateStory[i])
strReg=str(columnRegStory[i])
# strRow=''.join([str(i+1),',','S-'+str(columnStoryPoints[i]),])
# strRow = ''.join([str(i + 1), ',', 'S-' + strCate, ])
strRow = ''.join([str(i + 1), ',', '' + strCate, ])
strRow2 = ''.join([str(i + 1), ',', '' + strReg, ])
for j in range(0,lenVectorOfWord):
strRow=''.join([strRow,',',str(vector[j])])
strRow2 = ''.join([strRow2, ',', str(vector[j])])
strRow = ''.join([strRow, '\n'])
strRow2 = ''.join([strRow2, '\n'])
csv.write(strRow)
csv2.write(strRow2)
csv.close()
csv2.close()
print('Finish {}'.format(file))
|
[
"pdhung3012@gmail.com"
] |
pdhung3012@gmail.com
|
f4d523bf8b922bf42041001ffdc30993d9ab3efc
|
d6780d2a5126bff23d0a46c7376f5085063a3a4e
|
/backend/chat/api/v1/viewsets.py
|
211c8f530ed018dec88234b4bfad728e6cf050ca
|
[] |
no_license
|
crowdbotics-apps/test-25190
|
2721890ce00a97b8168e1188fe5d96ff86e1b6e4
|
48ce90fb307e5d98963a73e8c07a9a631b370f59
|
refs/heads/master
| 2023-03-24T23:43:59.659900
| 2021-03-22T15:53:21
| 2021-03-22T15:53:21
| 350,400,795
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,983
|
py
|
from rest_framework import authentication
from chat.models import (
Message,
ThreadMember,
MessageAction,
ThreadAction,
ForwardedMessage,
Thread,
)
from .serializers import (
MessageSerializer,
ThreadMemberSerializer,
MessageActionSerializer,
ThreadActionSerializer,
ForwardedMessageSerializer,
ThreadSerializer,
)
from rest_framework import viewsets
class MessageActionViewSet(viewsets.ModelViewSet):
serializer_class = MessageActionSerializer
authentication_classes = (
authentication.SessionAuthentication,
authentication.TokenAuthentication,
)
queryset = MessageAction.objects.all()
class ForwardedMessageViewSet(viewsets.ModelViewSet):
serializer_class = ForwardedMessageSerializer
authentication_classes = (
authentication.SessionAuthentication,
authentication.TokenAuthentication,
)
queryset = ForwardedMessage.objects.all()
class ThreadActionViewSet(viewsets.ModelViewSet):
serializer_class = ThreadActionSerializer
authentication_classes = (
authentication.SessionAuthentication,
authentication.TokenAuthentication,
)
queryset = ThreadAction.objects.all()
class ThreadViewSet(viewsets.ModelViewSet):
serializer_class = ThreadSerializer
authentication_classes = (
authentication.SessionAuthentication,
authentication.TokenAuthentication,
)
queryset = Thread.objects.all()
class ThreadMemberViewSet(viewsets.ModelViewSet):
serializer_class = ThreadMemberSerializer
authentication_classes = (
authentication.SessionAuthentication,
authentication.TokenAuthentication,
)
queryset = ThreadMember.objects.all()
class MessageViewSet(viewsets.ModelViewSet):
serializer_class = MessageSerializer
authentication_classes = (
authentication.SessionAuthentication,
authentication.TokenAuthentication,
)
queryset = Message.objects.all()
|
[
"team@crowdbotics.com"
] |
team@crowdbotics.com
|
748188ed28fde4abc2b26a91c727a0d58176ac3f
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2484/60691/284691.py
|
e7ae022ea8ced39460e70e67ef7cc820c09dc498
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 447
|
py
|
def union(s1, s2):
l = s1.split(' ')
l1 = s2.split(' ')
if l1 == ['']:
return len(l)
for i in range(len(l1)):
if (not l1[i] in l) and (l1[i] != ' '):
l.append(l1[i])
print(l)
return len(l)
n = int(input())
useless = []
arra = []
arrb = []
for i in range(n):
useless.append(input())
arra.append(input())
arrb.append(input())
for i in range(n):
print(union(arra[i], arrb[i]))
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
5152b34d823d6a11128d9e66f1e9e53e70047cb9
|
d2c4934325f5ddd567963e7bd2bdc0673f92bc40
|
/tests/model_control/detailed/transf_BoxCox/model_control_one_enabled_BoxCox_PolyTrend_NoCycle_AR.py
|
b8ab652e2b3ff4f4365950f4e030704063914475
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
jmabry/pyaf
|
797acdd585842474ff4ae1d9db5606877252d9b8
|
afbc15a851a2445a7824bf255af612dc429265af
|
refs/heads/master
| 2020-03-20T02:14:12.597970
| 2018-12-17T22:08:11
| 2018-12-17T22:08:11
| 137,104,552
| 0
| 0
|
BSD-3-Clause
| 2018-12-17T22:08:12
| 2018-06-12T17:15:43
|
Python
|
UTF-8
|
Python
| false
| false
| 151
|
py
|
import pyaf.tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['BoxCox'] , ['PolyTrend'] , ['NoCycle'] , ['AR'] );
|
[
"antoine.carme@laposte.net"
] |
antoine.carme@laposte.net
|
6f353343ebfd3b1bb1b40a1c028e27b4db514c59
|
7a550d2268bc4bc7e2fec608ffb1db4b2e5e94a0
|
/1401-1500/1475-Final Prices With a Special Discount in a Shop/1475-Final Prices With a Special Discount in a Shop.py
|
4b181402e0898044f9b11bafe9ab82e4848e82b7
|
[
"MIT"
] |
permissive
|
jiadaizhao/LeetCode
|
be31bd0db50cc6835d9c9eff8e0175747098afc6
|
4ddea0a532fe7c5d053ffbd6870174ec99fc2d60
|
refs/heads/master
| 2021-11-05T04:38:47.252590
| 2021-10-31T09:54:53
| 2021-10-31T09:54:53
| 99,655,604
| 52
| 28
|
MIT
| 2020-10-02T12:47:47
| 2017-08-08T05:57:26
|
C++
|
UTF-8
|
Python
| false
| false
| 362
|
py
|
class Solution:
def finalPrices(self, prices: List[int]) -> List[int]:
n = len(prices)
result = [0] * n
St = []
for i in range(n - 1, -1, -1):
while St and prices[i] < St[-1]:
St.pop()
result[i] = prices[i] - (St[-1] if St else 0)
St.append(prices[i])
return result
|
[
"jiadaizhao@gmail.com"
] |
jiadaizhao@gmail.com
|
ec8f221cde5fdf597b6c8d7493464006d316c717
|
d2dda11e125068512c5c0db0f24b80bc53c94ce3
|
/LeetCode/Ex0/Ex88.py
|
730a58387b0e8f11def225a9734664b6064cfec2
|
[] |
no_license
|
JasonVann/CrackingCodingInterview
|
f90163bcd37e08f6a41525f9f95663d5f42dd8e6
|
8f9327a1879949f61b462cc6c82e00e7c27b8b07
|
refs/heads/master
| 2021-09-02T09:28:34.553704
| 2018-01-01T12:05:12
| 2018-01-01T12:05:12
| 110,519,907
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 813
|
py
|
class Ex88(object):
def merge(self, nums1, m, nums2, n):
"""
:type nums1: List[int]
:type m: int
:type nums2: List[int]
:type n: int
:rtype: void Do not return anything, modify nums1 in-place instead.
"""
k = m + n - 1
i = m - 1
j = n - 1
while j >= 0 and i >= 0:
#print i, j, k
if nums1[i] > nums2[j]:
nums1[k] = nums1[i]
i -= 1
k -= 1
else :
nums1[k] = nums2[j]
k -= 1
j -= 1
#print 'b', nums1, i, j, k
if k >= 0 and j >= 0:
nums1[:k+1] = nums2[:j+1]
return nums1
ex88 = Ex88()
nums1=[2,0]
nums2=[1]
print 88, ex88.merge(nums1, 1, nums2, 1)
|
[
"jasonvanet@gmail.com"
] |
jasonvanet@gmail.com
|
e4e5b5b777d0c2876ccd93f0e0687824c3d9acc0
|
53706aea0f1358c3589b9afa8a94f1c902c2c494
|
/algorithms/tmax/apps/record_trajectory_manually.py
|
29adb24df33967fa011df4579ea3ccccb99c3b98
|
[
"MIT"
] |
permissive
|
alex-petrenko/landmark-exploration
|
6df0eae63ba501c3509e1264f8f99101ff0df345
|
faaeff84176de34e2ab4c18b24ee022bd069299e
|
refs/heads/master
| 2021-06-28T16:43:41.299431
| 2019-06-02T00:09:09
| 2019-06-02T00:09:09
| 164,031,073
| 4
| 1
|
MIT
| 2021-06-18T20:53:45
| 2019-01-03T22:53:03
|
Python
|
UTF-8
|
Python
| false
| false
| 3,668
|
py
|
import datetime
import pickle
import sys
import time
from os.path import join
from threading import Thread
from pynput.keyboard import Key, Listener
from algorithms.utils.algo_utils import main_observation
from algorithms.utils.env_wrappers import reset_with_info
from algorithms.tmax.agent_tmax import AgentTMAX
from algorithms.tmax.tmax_utils import parse_args_tmax
from algorithms.topological_maps.topological_map import TopologicalMap
from algorithms.utils.trajectory import Trajectory
from utils.envs.atari import atari_utils
from utils.envs.doom import doom_utils
from utils.envs.envs import create_env
from utils.envs.generate_env_map import generate_env_map
from utils.timing import Timing
from utils.utils import log, ensure_dir_exists
terminate = False
current_actions = []
key_to_action = None
# noinspection PyCallingNonCallable
def on_press(key):
if key == Key.esc:
global terminate
terminate = True
return False
global current_actions
action = key_to_action(key)
if action is not None:
if action not in current_actions:
current_actions.append(action)
# noinspection PyCallingNonCallable
def on_release(key):
global current_actions
action = key_to_action(key)
if action is not None:
if action in current_actions:
current_actions.remove(action)
def record_trajectory(params, env_id):
def make_env_func():
e = create_env(env_id, skip_frames=True)
e.seed(0)
return e
env = make_env_func()
map_img, coord_limits = generate_env_map(make_env_func)
env_obs, info = reset_with_info(env)
obs = main_observation(env_obs)
done = False
m = TopologicalMap(obs, directed_graph=False, initial_info=info, verbose=True)
trajectory = Trajectory(env_idx=-1)
frame = 0
t = Timing()
while not done and not terminate:
with t.timeit('one_frame'):
env.render()
if len(current_actions) > 0:
action = current_actions[-1]
else:
action = 0
trajectory.add(obs, action, info)
m.add_landmark(obs, info, update_curr_landmark=True)
env_obs, rew, done, info = env.step(action)
obs = main_observation(env_obs)
took_seconds = t.one_frame
desired_fps = 15
wait_seconds = (1.0 / desired_fps) - took_seconds
wait_seconds = max(0.0, wait_seconds)
time.sleep(wait_seconds)
frame += 1
env.render()
time.sleep(0.2)
trajectory_dir = trajectory.save(params.experiment_dir())
m.save_checkpoint(trajectory_dir, map_img=map_img, coord_limits=coord_limits, verbose=True)
env.close()
return 0
def main():
args, params = parse_args_tmax(AgentTMAX.Params)
env_id = args.env
global key_to_action
if 'dmlab' in env_id:
from utils.envs.dmlab import play_dmlab
key_to_action = play_dmlab.key_to_action
elif 'atari' in env_id:
key_to_action = atari_utils.key_to_action
elif 'doom' in env_id:
key_to_action = doom_utils.key_to_action
else:
raise Exception('Unknown env')
# start keypress listener (to pause/resume execution or exit)
def start_listener():
with Listener(on_press=on_press, on_release=on_release) as listener:
listener.join()
listener_thread = Thread(target=start_listener)
listener_thread.start()
status = record_trajectory(params, args.env)
if not terminate:
log.debug('Press ESC to exit...')
listener_thread.join()
return status
if __name__ == '__main__':
sys.exit(main())
|
[
"petrenko@usc.edu"
] |
petrenko@usc.edu
|
522b229ed5441f337e5ffaf6d29ee042868dfd53
|
f48512f1f42d55fabc9ab46f448138e771b78c68
|
/sphinx/conf.py
|
d3bc27682716395cd7e2c284cb4cb038eaed7629
|
[] |
no_license
|
HussainAther/chipseq
|
07871167f894ba612d6ca476b5a6fb37440c682a
|
6d12752c15a0368a5d4b40af6f5916d2c04c767f
|
refs/heads/master
| 2020-03-08T07:49:44.997059
| 2018-04-04T03:58:42
| 2018-04-04T03:58:42
| 128,004,481
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,167
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# snakemake documentation build configuration file, created by
# sphinx-quickstart on Thu Jul 27 17:54:40 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'snakemake'
copyright = '2017, Hussain Ather'
author = 'Hussain Ather'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = ''
# The full version, including alpha/beta/rc tags.
release = ''
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# This is required for the alabaster theme
# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
html_sidebars = {
'**': [
'about.html',
'navigation.html',
'relations.html', # needs 'show_related': True theme option to display
'searchbox.html',
'donate.html',
]
}
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'snakemakedoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'snakemake.tex', 'snakemake Documentation',
'Hussain Ather', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'snakemake', 'snakemake Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'snakemake', 'snakemake Documentation',
author, 'snakemake', 'One line description of project.',
'Miscellaneous'),
]
|
[
"shussainather@gmail.com"
] |
shussainather@gmail.com
|
85f2ca2dc3e54d1d6bd9647dad7d3cbe14a37fcb
|
3f7d5999bb7e5a75454c8df2c5a8adcd1a8341ff
|
/tests/unit/mock/procenv.py
|
a97081a52d2332cb01f5d0017c20c9c3ff26917c
|
[] |
no_license
|
ansible-collection-migration/ansible.fortios
|
f7b1a7a0d4b69c832403bee9eb00d99f3be65e74
|
edad6448f7ff4da05a6c856b0e7e3becd0460f31
|
refs/heads/master
| 2020-12-18T13:08:46.739473
| 2020-02-03T22:10:49
| 2020-02-03T22:10:49
| 235,393,556
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,735
|
py
|
# (c) 2016, Matt Davis <mdavis@ansible.com>
# (c) 2016, Toshio Kuratomi <tkuratomi@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import sys
import json
from contextlib import contextmanager
from io import BytesIO, StringIO
from ansible_collections.ansible.fortios.tests.unit.compat import unittest
from ansible.module_utils.six import PY3
from ansible.module_utils._text import to_bytes
@contextmanager
def swap_stdin_and_argv(stdin_data='', argv_data=tuple()):
"""
context manager that temporarily masks the test runner's values for stdin and argv
"""
real_stdin = sys.stdin
real_argv = sys.argv
if PY3:
fake_stream = StringIO(stdin_data)
fake_stream.buffer = BytesIO(to_bytes(stdin_data))
else:
fake_stream = BytesIO(to_bytes(stdin_data))
try:
sys.stdin = fake_stream
sys.argv = argv_data
yield
finally:
sys.stdin = real_stdin
sys.argv = real_argv
@contextmanager
def swap_stdout():
"""
context manager that temporarily replaces stdout for tests that need to verify output
"""
old_stdout = sys.stdout
if PY3:
fake_stream = StringIO()
else:
fake_stream = BytesIO()
try:
sys.stdout = fake_stream
yield fake_stream
finally:
sys.stdout = old_stdout
class ModuleTestCase(unittest.TestCase):
def setUp(self, module_args=None):
if module_args is None:
module_args = {'_ansible_remote_tmp': '/tmp', '_ansible_keep_remote_files': False}
args = json.dumps(dict(ANSIBLE_MODULE_ARGS=module_args))
# unittest doesn't have a clean place to use a context manager, so we have to enter/exit manually
self.stdin_swap = swap_stdin_and_argv(stdin_data=args)
self.stdin_swap.__enter__()
def tearDown(self):
# unittest doesn't have a clean place to use a context manager, so we have to enter/exit manually
self.stdin_swap.__exit__(None, None, None)
|
[
"ansible_migration@example.com"
] |
ansible_migration@example.com
|
77ceb0223c09fd8f4e1e20c731296bf4051c99b9
|
f4fce41f2b3cba606d2a36075de356434602d1c0
|
/xwing_rulebook/rules/migrations/0005_auto_20170114_1838.py
|
dacc057a40679a2cab8dc474b609d6614fdb0e31
|
[] |
no_license
|
lvisintini/xwing-rulebook
|
24a392c12a2b13027e7cf65b9cc41e8a21585e3c
|
6dc7ac58f962a4928843364dcfc077638384dc16
|
refs/heads/master
| 2021-01-22T19:41:32.960048
| 2018-07-25T10:53:35
| 2018-07-25T10:53:35
| 85,224,617
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 542
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2017-01-14 18:38
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('rules', '0004_auto_20170114_0010'),
]
operations = [
migrations.AlterField(
model_name='content',
name='file',
field=models.FilePathField(blank=True, null=True, path='/home/lvisintini/src/xwing-rulebook/xwing_rulebook/static', recursive=True),
),
]
|
[
"lvisintini@gmail.com"
] |
lvisintini@gmail.com
|
7d538662625104748e0d8ce3db20225b2e8cb6a1
|
30ec40dd6a81dbee73e7f14c144e20495960e565
|
/kubernetes/test/test_v1_persistent_volume.py
|
3f2fe9cff5949e5488c0fb3e07e8efbe740227a8
|
[
"Apache-2.0"
] |
permissive
|
jonathan-kosgei/client-python
|
ae5a46968bcee19a3c62e1cefe227131ac9e7200
|
4729e6865d810824cafa312b4d06dfdb2d4cdb54
|
refs/heads/master
| 2021-01-20T14:59:10.435626
| 2017-05-08T16:55:51
| 2017-05-08T16:55:51
| 90,700,132
| 1
| 0
| null | 2017-05-09T03:50:42
| 2017-05-09T03:50:42
| null |
UTF-8
|
Python
| false
| false
| 883
|
py
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.6.3
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.models.v1_persistent_volume import V1PersistentVolume
class TestV1PersistentVolume(unittest.TestCase):
""" V1PersistentVolume unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1PersistentVolume(self):
"""
Test V1PersistentVolume
"""
model = kubernetes.client.models.v1_persistent_volume.V1PersistentVolume()
if __name__ == '__main__':
unittest.main()
|
[
"mehdy@google.com"
] |
mehdy@google.com
|
60478f86d50f53b1d361c8a21deef2a43bb1ef65
|
6c33e4baeda15398a910382ed64f646825591dd0
|
/run_project/02_Pima_Indian.py
|
893c61f5e55315aec58778663dd56e783fb4b587
|
[] |
no_license
|
ss820938ss/pythonProject_deeplearning
|
0ad26797299df1eb5b549bd2a5309502d58a495c
|
baa55f492f07da955b45573ac52d7f61f2d0ee0d
|
refs/heads/master
| 2023-07-09T15:50:37.319896
| 2021-08-10T05:15:46
| 2021-08-10T05:15:46
| 391,771,606
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,308
|
py
|
# pandas 라이브러리를 불러옵니다.
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
# 피마 인디언 당뇨병 데이터셋을 불러옵니다. 불러올 때 각 컬럼에 해당하는 이름을 지정합니다.
df = pd.read_csv('../dataset/pima-indians-diabetes.csv',
names=["pregnant", "plasma", "pressure", "thickness", "insulin", "BMI", "pedigree", "age", "class"])
# 처음 5줄을 봅니다.
print(df.head(5))
# 데이터의 전반적인 정보를 확인해 봅니다.
print(df.info())
# 각 정보별 특징을 좀더 자세히 출력합니다.
print(df.describe())
# 데이터 중 임신 정보와 클래스 만을 출력해 봅니다.
print(df[['plasma', 'class']])
# 데이터 간의 상관관계를 그래프로 표현해 봅니다.
colormap = plt.cm.gist_heat # 그래프의 색상 구성을 정합니다.
plt.figure(figsize=(12, 12)) # 그래프의 크기를 정합니다.
# 그래프의 속성을 결정합니다. vmax의 값을 0.5로 지정해 0.5에 가까울 수록 밝은 색으로 표시되게 합니다.
sns.heatmap(df.corr(), linewidths=0.1, vmax=0.5, cmap=colormap, linecolor='white', annot=True)
plt.show()
grid = sns.FacetGrid(df, col='class')
grid.map(plt.hist, 'plasma', bins=10)
plt.show()
# 딥러닝을 구동하는 데 필요한 케라스 함수를 불러옵니다.
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
# 필요한 라이브러리를 불러옵니다.
import numpy
import tensorflow as tf
# 실행할 때마다 같은 결과를 출력하기 위해 설정하는 부분입니다.
numpy.random.seed(3)
tf.random.set_seed(3)
# 데이터를 불러 옵니다.
dataset = numpy.loadtxt("../dataset/pima-indians-diabetes.csv", delimiter=",")
X = dataset[:, 0:8]
Y = dataset[:, 8]
# 모델을 설정합니다.
model = Sequential()
model.add(Dense(12, input_dim=8, activation='relu'))
model.add(Dense(8, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
# 모델을 컴파일합니다.
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
# 모델을 실행합니다.
model.fit(X, Y, epochs=200, batch_size=10)
# 결과를 출력합니다.
print("\n Accuracy: %.4f" % (model.evaluate(X, Y)[1]))
|
[
"ss820938ss@gmail.com"
] |
ss820938ss@gmail.com
|
98d4948b986f60c3d4fb60f286e763e24192f924
|
71b11008ab0455dd9fd2c47107f8a27e08febb27
|
/09、UI自动化测试及黑马头条项目实战/day08/03代码/等待操作.py
|
2c49f02ff15dd038da5d5fa6df7401ffbae20dc0
|
[] |
no_license
|
zmh19941223/heimatest2021
|
49ce328f8ce763df0dd67ed1d26eb553fd9e7da4
|
3d2e9e3551a199bda9945df2b957a9bc70d78f64
|
refs/heads/main
| 2023-08-25T17:03:31.519976
| 2021-10-18T05:07:03
| 2021-10-18T05:07:03
| 418,348,201
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,090
|
py
|
import time
from appium import webdriver
from appium.webdriver.common.touch_action import TouchAction
from selenium.webdriver.common.by import By
from utils import get_element, input_text
des_cap = {
"platformName" : "android" , #表示的是android 或者ios
"platformVersion" : "5.1.1", #表示的是平台系统的版本号
"deviceName" : "****", #表示的是设备的ID名称(如果只有一个设备可以用****来代替)
"appPackage" : "com.android.settings", #表示app的包名
"appActivity" : ".Settings", #表示的是app的界面名
"resetKeyboard": True, # 重置设备的输入键盘
"unicodeKeyboard": True # 采用unicode编码输入
####"".module_main.activity.MainActivity""
} #定义字典参数
driver = webdriver.Remote("http://localhost:4723/wd/hub", des_cap)
# 找到wlan元素
wlan_btn = By.XPATH, "//*[@text='WLAN']"
get_element(driver, wlan_btn).click()
time.sleep(2)
# 通过等待及按下和抬起实现长按的操作
TouchAction(driver).press(x=467, y=569).wait(3000).release().perform()
time.sleep(3)
driver.quit()
|
[
"1780858508@qq.com"
] |
1780858508@qq.com
|
6effd0d2562d816e58d889808b24eb4eba7e3903
|
77900cdd9a815caf1cd04705321ca93f5072179f
|
/Project2/Project2/.history/blog/models_20211114171729.py
|
abf6466b64bc0655018e82ba7b33ac84f8f2a026
|
[] |
no_license
|
Bom19990111/helloword_python
|
717799d994223d65de5adaeabecf396ff2bc1fb7
|
2ee2e67a60043f03c1ce4b070470c7d2dcdc72a7
|
refs/heads/master
| 2023-09-06T04:17:02.057628
| 2021-11-21T20:00:46
| 2021-11-21T20:00:46
| 407,063,273
| 0
| 1
| null | 2021-11-21T20:00:47
| 2021-09-16T07:18:35
|
Python
|
UTF-8
|
Python
| false
| false
| 378
|
py
|
from django.db import models
# Create your models here.
class Blog(models.Model):
title = models.CharField(max_length=250, blank=True)
slug = models.SlugField(max_length=250, blank=True)
author = models.ForeignKey(User, on_delete=models.CASCADE)
description = models.TextField()
date = models.DateField()
def __str__(self):
return self.title
|
[
"phanthituyngoc1995@gmail.com"
] |
phanthituyngoc1995@gmail.com
|
e46bcceeb1e79b9edce031b21c399f957deb42c3
|
1843fd5ccb4377240e664acd21ba5a9369eca2ab
|
/bluebottle/cms/migrations/0052_auto_20171027_1419.py
|
f9984292ed4b5065b5abffe1a367865208d217c1
|
[
"BSD-2-Clause"
] |
permissive
|
raux/bluebottle
|
ba2e576cebcb6835065004c410b22bd8a6b9ee29
|
49d92b5deb289c1539f99122abc20f845577b879
|
refs/heads/master
| 2020-03-27T03:20:11.465491
| 2018-08-23T13:09:25
| 2018-08-23T13:09:25
| 145,854,614
| 0
| 0
| null | 2018-08-23T13:05:00
| 2018-08-23T13:04:59
| null |
UTF-8
|
Python
| false
| false
| 513
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2017-10-27 12:19
from __future__ import unicode_literals
from django.db import migrations, connection
from django.core.management import call_command
def migrate_homepage(apps, schema_editor):
call_command('migrate_homepage', tenant=connection.tenant.schema_name)
class Migration(migrations.Migration):
dependencies = [
('cms', '0051_auto_20171024_1631'),
]
operations = [
migrations.RunPython(migrate_homepage)
]
|
[
"ernst@onepercentclub.com"
] |
ernst@onepercentclub.com
|
715184433e0880b06cffd93aafcb0ed70d537c8d
|
930822ded3de346524648244a6f8edc3e7a2a038
|
/leetcode/maxProfit.py
|
876984ef35022d6239ba77600de1160b2c0cb8db
|
[] |
no_license
|
xy2333/Leetcode
|
4ad317fa21d3b3c37859e76b25a87993c22ca1b2
|
5915e039868527d624ee4f0ad431d23c6ed2d8bd
|
refs/heads/master
| 2020-06-19T13:30:14.755534
| 2019-08-12T02:12:03
| 2019-08-12T02:12:03
| 196,726,468
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 279
|
py
|
class Solution:
def maxProfit(self, prices):
if len(prices) <= 1:
return 0
maxprofit = 0
minrise = prices[0]
for i in range(1,len(prices)):
if prices[i] < minrise:
minrise = prices[i]
else:
maxprofit = max(maxprofit,prices[i]-minrise)
return maxprofit
|
[
"2531188679@qq.com"
] |
2531188679@qq.com
|
3fbbc5c0f26e5caddce1e220e7e5e9422ca33c8c
|
6b8366581101e183592ff5d65ba6c228223ef30d
|
/mp/tokenizer.py
|
086a0397d2c952e98301d843c4d9d4c387465260
|
[
"MIT"
] |
permissive
|
BPI-STEAM/mpfshell-lite
|
a3af795502d20f990d2a084a106f3964beb94392
|
e603c2abb942bf45c18519883e1b72760c4db04f
|
refs/heads/master
| 2020-07-22T13:11:57.651653
| 2019-08-27T03:58:41
| 2019-08-27T03:58:41
| 207,213,197
| 3
| 1
|
MIT
| 2019-09-09T03:06:46
| 2019-09-09T03:06:46
| null |
UTF-8
|
Python
| false
| false
| 2,163
|
py
|
##
# The MIT License (MIT)
#
# Copyright (c) 2016 Stefan Wendler
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
##
import re
class Token(object):
STR = "STR"
QSTR = "QSTR"
def __init__(self, kind, value=None):
self._kind = kind
self._value = value
@property
def kind(self):
return self._kind
@property
def value(self):
return self._value
def __repr__(self):
if isinstance(self.value, str):
v = "'%s'" % self.value
else:
v = str(self.value)
return "Token('%s', %s)" % (self.kind, v)
class Tokenizer(object):
def __init__(self):
valid_fnchars = "A-Za-z0-9_%#~@/\$!\*\.\+\-\:\\\\"
tokens = [
(r'[%s]+' % valid_fnchars, lambda scanner, token: Token(Token.STR, token)),
(r'"[%s ]+"' % valid_fnchars, lambda scanner, token: Token(Token.QSTR, token[1:-1])),
(r'[ ]', lambda scanner, token: None)
]
self.scanner = re.Scanner(tokens)
def tokenize(self, string):
# print(string, self.scanner.scan(string))
return self.scanner.scan(string)
|
[
"junhuanchen@qq.com"
] |
junhuanchen@qq.com
|
1fced1f8cce087c0bbfdd61d98a7820b2eeef5ec
|
419637376e445ec9faf04c877d5fb6c09d15903f
|
/steam/user/order/userCancelOrderActivityTest.py
|
76c1e3a715c556e511a7af0fd3bc03f9bc9ddcd7
|
[] |
no_license
|
litaojun/steamOmTest
|
e4203df30acafaa5e282631d77429c0e4483fb88
|
86f84dbd802d947198823e02c2f1ba2695418a76
|
refs/heads/master
| 2020-04-02T21:48:55.115389
| 2019-07-11T06:08:27
| 2019-07-11T06:08:27
| 154,812,217
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,340
|
py
|
#!/usr/bin/env python
# encoding: utf-8
"""
@version: v1.0
@author: Lieb
@license: Apache Licence
@contact: 2750416737@qq.com
@site: http://blog.csdn.net/hqzxsc2006
@software: PyCharm
@file: userCancelOrderActivityTest.py
@time: 2018/7/11 10:09
"""
from steam.util.steamLog import SteamTestCase
from opg.bak.testcaseRunMgr import runTestOneCls
from steam.user.member.memberAddressService import MemberAddressService
from steam.user.order.userCancelOrderActivityService import UserCancelOrderActivityService
from steam.user.order.userOrederActivityService import UserOrderActivityService
from steam.user.weixin.userViewActivityService import UserViewActivityService
from steam.user.search.weixinSearchService import WeixinSearchService
from steam.util.testJsonFormat import initInputService
class UserCancelOrderActivityTest(SteamTestCase):
'''
取消点赞
'''
__interfaceName__ = "/order-service/order/cancel"
@initInputService( services = [ WeixinSearchService ,
UserViewActivityService ,
MemberAddressService ,
UserOrderActivityService ] ,
curser = UserCancelOrderActivityService )
def __init__(self, methodName='runTest', param=None):
super(UserCancelOrderActivityTest,self).__init__(methodName,param)
# def userCancelOrderActivity(self):
# userCancelOrderRsp = self.myservice.userCancelOrderActivity()
# retcode = self.myservice.getRetcodeByOrderRsp(response=userCancelOrderRsp)
# self.assertTrue(retcode == self.expectdata["code"])
if __name__ == "__main__":
kwarg = {
"memberId": "09c1316f-b304-46b1-96ff-c9ebbd93a617" ,
"resourceTypeId":12 ,
"title":"早鸟价!呼伦贝尔|私家牧场任你驰骋策马,原始森林徒步猎奇" ,
"skuName":"价格(成人)" ,
"skuId":1 ,
"resourceId":1
}
runTestOneCls(
casefilepath = "\\steamcase\\user\\order-serviceordercancels.yml",
testclse = UserCancelOrderActivityTest
)
|
[
"li.taojun@opg.cn"
] |
li.taojun@opg.cn
|
aefe209940bc4b34c6bc1de86f72876bf4394890
|
4838552992476399d0452a92d0a38aa9b8b29c63
|
/books/serializers.py
|
9bcac01e22850c80eaea8102c4e89f7fa859d736
|
[] |
no_license
|
sannycand/books
|
16cee4d0f8c1a1a4a52108fd0403c258620e146a
|
593d77ccd1f4b68be0a5ed44adb495c034bea2a1
|
refs/heads/develop
| 2020-04-05T13:31:09.296128
| 2017-06-20T01:59:53
| 2017-06-20T01:59:53
| 94,852,739
| 0
| 0
| null | 2017-06-20T05:23:40
| 2017-06-20T05:23:40
| null |
UTF-8
|
Python
| false
| false
| 412
|
py
|
from rest_framework import serializers
from .models import Book, Review
class BookSerializer(serializers.ModelSerializer):
""" book serializer
"""
reviews = serializers.SerializerMethodField()
class Meta:
model = Book
fields = ('__all__')
def get_reviews(self, instance):
return Review.objects.filter(book=instance) \
.values_list('id', flat=True)
|
[
"earvin.gemenez@gmail.com"
] |
earvin.gemenez@gmail.com
|
d92958e24ff62b6981e69327f0d73818051e91d6
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_135/3035.py
|
cf3cfe0ebd3b61e847863f968039e9fe32db8586
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 656
|
py
|
#! /usr/bin/env python
T = int(raw_input())
t = 1
while t <= T:
a = int(raw_input())
i = 0
while i < a-1:
raw_input()
i += 1
r1 = set(raw_input().split(" "))
while i < 3:
raw_input()
i += 1
a = int(raw_input())
i = 0
while i < a-1:
raw_input()
i += 1
r2 = set(raw_input().split(" "))
c = r1.intersection(r2)
lc = len(c)
if lc == 1:
print "Case #%s: %s"%(t, c.pop())
elif lc == 0:
print "Case #%s: Volunteer cheated!" % t
else:
print "Case #%s: Bad magician!" % t
while i < 3:
raw_input()
i += 1
t += 1
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
2ae90b6dffc7987765ffacf37be52a03f335f474
|
971e0efcc68b8f7cfb1040c38008426f7bcf9d2e
|
/tests/model_control/detailed/transf_Fisher/model_control_one_enabled_Fisher_MovingMedian_Seasonal_WeekOfYear_SVR.py
|
87afffb284e8c95e2d354f9e473c99b687fc4781
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
antoinecarme/pyaf
|
a105d172c2e7544f8d580d75f28b751351dd83b6
|
b12db77cb3fa9292e774b2b33db8ce732647c35e
|
refs/heads/master
| 2023-09-01T09:30:59.967219
| 2023-07-28T20:15:53
| 2023-07-28T20:15:53
| 70,790,978
| 457
| 77
|
BSD-3-Clause
| 2023-03-08T21:45:40
| 2016-10-13T09:30:30
|
Python
|
UTF-8
|
Python
| false
| false
| 162
|
py
|
import tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['Fisher'] , ['MovingMedian'] , ['Seasonal_WeekOfYear'] , ['SVR'] );
|
[
"antoine.carme@laposte.net"
] |
antoine.carme@laposte.net
|
7a4946bf049ebf1ed6ee4360fa21fb2fa3271c02
|
fe19d2fac4580d463132e61509bd6e3cc2cf958d
|
/toontown/coghq/MoleFieldBase.py
|
508ffbac0ff0495f22fc1b06a212904c3fadf747
|
[] |
no_license
|
t00nt0wn1dk/c0d3
|
3e6db6dd42c3aa36ad77709cf9016176a3f3a44f
|
7de105d7f3de0f8704b020e32fd063ee2fad8d0d
|
refs/heads/master
| 2021-01-01T16:00:15.367822
| 2015-03-21T21:25:52
| 2015-03-21T21:25:55
| 32,647,654
| 3
| 5
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,950
|
py
|
# 2013.08.22 22:19:18 Pacific Daylight Time
# Embedded file name: toontown.coghq.MoleFieldBase
import random
HILL_MOLE = 0
HILL_BOMB = 1
HILL_WHACKED = 2
HILL_COGWHACKED = 3
class MoleFieldBase():
__module__ = __name__
WHACKED = 1
MoveUpTimeMax = 1
MoveUpTimeMultiplier = 0.95
MoveUpTimeMin = 0.5
StayUpTimeMax = 7
StayUpTimeMultiplier = 0.95
StayUpTimeMin = 3
MoveDownTimeMax = 1
MoveDownTimeMultiplier = 0.95
MoveDownTimeMin = 0.5
TimeBetweenPopupMax = 1.5
TimeBetweenPopupMultiplier = 0.95
TimeBetweenPopupMin = 0.25
DamageOnFailure = 20
def getRng(self):
return random.Random(self.entId * self.level.doId)
def scheduleMoles(self):
self.schedule = []
totalTime = 0
curMoveUpTime = self.MoveUpTimeMax
curMoveDownTime = self.MoveDownTimeMax
curTimeBetweenPopup = self.TimeBetweenPopupMax
curStayUpTime = self.StayUpTimeMax
curTime = 3
eligibleMoles = range(self.numMoles)
self.getRng().shuffle(eligibleMoles)
usedMoles = []
self.notify.debug('eligibleMoles=%s' % eligibleMoles)
self.endingTime = 0
randOb = random.Random(self.entId * self.level.doId)
while self.endingTime < self.GameDuration:
if len(eligibleMoles) == 0:
eligibleMoles = usedMoles
self.getRng().shuffle(usedMoles)
usedMoles = []
self.notify.debug('eligibleMoles=%s' % eligibleMoles)
moleIndex = eligibleMoles[0]
eligibleMoles.remove(moleIndex)
usedMoles.append(moleIndex)
moleType = randOb.choice([HILL_MOLE,
HILL_MOLE,
HILL_MOLE,
HILL_BOMB])
self.schedule.append((curTime,
moleIndex,
curMoveUpTime,
curStayUpTime,
curMoveDownTime,
moleType))
curTime += curTimeBetweenPopup
curMoveUpTime = self.calcNextMoveUpTime(curTime, curMoveUpTime)
curStayUpTime = self.calcNextStayUpTime(curTime, curStayUpTime)
curMoveDownTime = self.calcNextMoveDownTime(curTime, curMoveDownTime)
curTimeBetweenPopup = self.calcNextTimeBetweenPopup(curTime, curTimeBetweenPopup)
self.endingTime = curTime + curMoveUpTime + curStayUpTime + curMoveDownTime
self.schedule.pop()
self.endingTime = self.schedule[-1][0] + self.schedule[-1][2] + self.schedule[-1][3] + self.schedule[-1][4]
self.notify.debug('schedule length = %d, endingTime=%f' % (len(self.schedule), self.endingTime))
def calcNextMoveUpTime(self, curTime, curMoveUpTime):
newMoveUpTime = curMoveUpTime * self.MoveUpTimeMultiplier
if newMoveUpTime < self.MoveDownTimeMin:
newMoveUpTime = self.MoveDownTimeMin
return newMoveUpTime
def calcNextStayUpTime(self, curTime, curStayUpTime):
newStayUpTime = curStayUpTime * self.StayUpTimeMultiplier
if newStayUpTime < self.StayUpTimeMin:
newStayUpTime = self.StayUpTimeMin
return newStayUpTime
def calcNextMoveDownTime(self, curTime, curMoveDownTime):
newMoveDownTime = curMoveDownTime * self.MoveDownTimeMultiplier
if newMoveDownTime < self.MoveDownTimeMin:
newMoveDownTime = self.MoveDownTimeMin
return newMoveDownTime
def calcNextTimeBetweenPopup(self, curTime, curTimeBetweenPopup):
newTimeBetweenPopup = curTimeBetweenPopup * self.TimeBetweenPopupMultiplier
if newTimeBetweenPopup < self.TimeBetweenPopupMin:
newTimeBetweenPopup = self.TimeBetweenPopupMin
return newTimeBetweenPopup
# okay decompyling C:\Users\Maverick\Documents\Visual Studio 2010\Projects\Unfreezer\py2\toontown\coghq\MoleFieldBase.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2013.08.22 22:19:18 Pacific Daylight Time
|
[
"anonymoustoontown@gmail.com"
] |
anonymoustoontown@gmail.com
|
d4d403b01e00e44d6866bca55752f5509a8b15f8
|
748b8c66d8d9f77e047033e07142328ea6939138
|
/utils/create-captions.py
|
3a8f8271fec5c0ed4ec02b5ca917f56240ba6ce5
|
[
"Apache-2.0"
] |
permissive
|
nakamura196/neural-neighbors
|
68f073511f10fb2df4c9efb2e504d2f9fb3b8455
|
277cb6e6a2102ad9d850c4397ca454ecb347dd1b
|
refs/heads/master
| 2022-01-24T15:16:49.170431
| 2019-08-11T14:37:23
| 2019-08-11T14:37:23
| 201,567,672
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 345
|
py
|
import glob, random, os, json
files = glob.glob(
"assets/images/thumbs/*.jpg")
captions = {}
for file in files:
filename = file.split("/")[-1].split(".")[0]
captions[filename] = filename
fw = open("data/full-captions.json", 'w')
json.dump(captions, fw, ensure_ascii=False, indent=4,
sort_keys=True, separators=(',', ': '))
|
[
"na.kamura.1263@gmail.com"
] |
na.kamura.1263@gmail.com
|
a01fac3aa0ec2d33e854eac1c3973308a9e2b23e
|
8c0b804f1cc8cbf2f8788727df22a2cc149f7b5c
|
/gala/integrate/core.py
|
d7150e9b565f316c3bb7011aa15040378aa0d4e6
|
[
"MIT"
] |
permissive
|
adrn/gala
|
579cc5a4ecb22df118e1c8a2322a46e935825054
|
f62e1a6ae7a8466a4db5c8407471b524cf085637
|
refs/heads/main
| 2023-09-04T11:42:07.278388
| 2023-08-18T18:04:35
| 2023-08-18T18:04:35
| 17,577,779
| 115
| 89
|
MIT
| 2023-09-05T11:40:10
| 2014-03-10T00:56:18
|
Python
|
UTF-8
|
Python
| false
| false
| 4,338
|
py
|
""" Base class for integrators. """
# Third-party
import numpy as np
# This project
from gala.units import UnitSystem, DimensionlessUnitSystem
__all__ = ["Integrator"]
class Integrator(object):
def __init__(
self,
func,
func_args=(),
func_units=None,
progress=False,
store_all=True,
):
if not hasattr(func, "__call__"):
raise ValueError(
"func must be a callable object, e.g., a function."
)
self.F = func
self._func_args = func_args
if func_units is not None and not isinstance(
func_units, DimensionlessUnitSystem
):
func_units = UnitSystem(func_units)
else:
func_units = DimensionlessUnitSystem()
self._func_units = func_units
self.progress = bool(progress)
self.store_all = store_all
def _get_range_func(self):
if self.progress:
try:
from tqdm import trange
return trange
except ImportError:
raise ImportError(
"tqdm must be installed to use progress=True when running "
f"{self.__class__.__name__}"
)
return range
def _prepare_ws(self, w0, mmap, n_steps):
"""
Decide how to make the return array. If ``mmap`` is False, this returns a full
array of zeros, but with the correct shape as the output. If ``mmap`` is True,
return a pointer to a memory-mapped array. The latter is particularly useful for
integrating a large number of orbits or integrating a large number of time
steps.
"""
from ..dynamics import PhaseSpacePosition
if not isinstance(w0, PhaseSpacePosition):
w0 = PhaseSpacePosition.from_w(w0)
arr_w0 = w0.w(self._func_units)
self.ndim, self.norbits = arr_w0.shape
self.ndim = self.ndim // 2
if self.store_all:
return_shape = (2 * self.ndim, n_steps + 1, self.norbits)
else:
return_shape = (2 * self.ndim, self.norbits)
if mmap is None:
# create the return arrays
ws = np.zeros(return_shape, dtype=float)
else:
if mmap.shape != return_shape:
raise ValueError(
"Shape of memory-mapped array doesn't match expected shape of "
f"return array ({mmap.shape} vs {return_shape})"
)
if not mmap.flags.writeable:
raise TypeError(
f"Memory-mapped array must be a writable mode, not '{mmap.mode}'"
)
ws = mmap
return w0, arr_w0, ws
def _handle_output(self, w0, t, w):
""" """
if w.shape[-1] == 1:
w = w[..., 0]
pos_unit = self._func_units["length"]
t_unit = self._func_units["time"]
vel_unit = pos_unit / t_unit
from ..dynamics import Orbit
orbit = Orbit(
pos=w[:self.ndim] * pos_unit,
vel=w[self.ndim:] * vel_unit,
t=t * t_unit,
)
return orbit
def run(self):
"""
Run the integrator starting from the specified phase-space position.
The initial conditions ``w0`` should be a
`~gala.dynamics.PhaseSpacePosition` instance.
There are a few combinations of keyword arguments accepted for
specifying the timestepping. For example, you can specify a fixed
timestep (``dt``) and a number of steps (``n_steps``), or an array of
times::
dt, n_steps[, t1] : (numeric, int[, numeric])
A fixed timestep dt and a number of steps to run for.
dt, t1, t2 : (numeric, numeric, numeric)
A fixed timestep dt, an initial time, and a final time.
t : array-like
An array of times to solve on.
Parameters
----------
w0 : `~gala.dynamics.PhaseSpacePosition`
Initial conditions.
**time_spec
Timestep information passed to
`~gala.integrate.time_spec.parse_time_specification`.
Returns
-------
orbit : `~gala.dynamics.Orbit`
"""
pass
|
[
"adrian.prw@gmail.com"
] |
adrian.prw@gmail.com
|
6a4b0552b744dec84088346b499869e5c6e2f442
|
284b88b3ff07430e17c04503f646db50677f627b
|
/Algorithm_w_Python/breakingRecords/breakin.py
|
bcea1259a3ac5551c57727e3d6f685001859b3ce
|
[] |
no_license
|
ybgirgin3/hackerrank-solutions
|
ae61c27173c24c920f6e002a12a1acd20928cf59
|
58c4f62585d115eff3e1a43595d6a8375f185696
|
refs/heads/master
| 2023-05-27T11:32:06.099639
| 2021-06-13T18:12:22
| 2021-06-13T18:12:22
| 280,741,585
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,038
|
py
|
#!/usr/bin/python3
### çalışmıyor aq
import math
import os
import random
import re
import sys
import numpy as np
np.set_printoptions(suppress=True)
# Complete the breakingRecords function below.
def breakingRecords(scores):
# n tane sayı girilecek
# girilen sayıların max ve min terimlerinin
# hangi indexte olduğunu ekrana dönmek gerek
# print(type(scores))
# turn list to np array
arr = np.array(scores)
# find max ind
# maxEL = np.amax(arr)
# find min ind
# minEL = np.amin(arr)
# finds elements
# print(maxEL, minEL)
maxRes = np.where(arr == np.amax(arr))
minRes = np.where(arr == np.amin(arr))
# prints (array([9]),) (array([0]),)
# which is true but not wanted
# print(maxRes, minRes)
maxIt = ' '.join(map(str, maxRes[0]))
minIt = ' '.join(map(str, minRes[0]))
print(maxIt, minIt)
if __name__ == '__main__':
n = int(input())
scores = list(map(int, input().rstrip().split()))
result = breakingRecords(scores)
|
[
"ybgirgin3@gmail.com"
] |
ybgirgin3@gmail.com
|
8d86da40269921bffa1c814385522d20c36cf4da
|
28b0928057e96da28b268cbe1fe64ede3a2a20c5
|
/addons/edi_product/models/edi_product_record.py
|
eeff16a7b54cf541e2c862d2bf6087f2e3be2e98
|
[] |
no_license
|
sasakuma/odoo-edi
|
370061221e09f7ade1a7753ff237ebec24b55694
|
31a0ff761be3984adc5d6ceaabe781801715ad14
|
refs/heads/master
| 2020-03-29T18:24:55.576689
| 2018-10-29T04:50:51
| 2018-10-29T04:50:51
| 150,211,179
| 0
| 0
| null | 2018-10-29T04:50:52
| 2018-09-25T05:14:48
|
Python
|
UTF-8
|
Python
| false
| false
| 2,829
|
py
|
"""EDI product records"""
from odoo import api, fields, models
class EdiDocument(models.Model):
"""Extend ``edi.document`` to include EDI product records"""
_inherit = 'edi.document'
product_ids = fields.One2many('edi.product.record', 'doc_id',
string="Products")
inactive_product_ids = fields.One2many('edi.inactive.product.record',
'doc_id', string="Inactive Products")
class EdiProductRecord(models.Model):
"""EDI product record
This is the base model for EDI product records. Each row
represents a product that will be created or updated when the
document is executed.
The fields within each record represent the fields within the
source document, which may not exactly correspond to fields of the
``product.product`` model. For example: the source document may
define a weight as an integer number of grams, whereas the
``product.product.weight`` field is defined as a floating point
number of kilograms.
Derived models should implement :meth:`~.target_values`.
"""
_name = 'edi.product.record'
_inherit = 'edi.record.sync.active'
_description = "Product"
_edi_sync_target = 'product_id'
_edi_sync_via = 'default_code'
product_id = fields.Many2one('product.product', string="Product",
required=False, readonly=True, index=True,
auto_join=True)
description = fields.Char(string="Description", required=True,
readonly=True, default="Unknown")
@api.model
def targets_by_key(self, vlist):
"""Construct lookup cache of target records indexed by key field"""
products_by_key = super().targets_by_key(vlist)
# Cache product templates to minimise subsequent database lookups
Product = self.browse()[self._edi_sync_target].with_context(
active_test=False
)
Template = Product.product_tmpl_id
products = Product.browse([x.id for x in products_by_key.values()])
templates = Template.browse(products.mapped('product_tmpl_id.id'))
templates.mapped('name')
return products_by_key
@api.model
def target_values(self, record_vals):
"""Construct ``product.product`` field value dictionary"""
product_vals = super().target_values(record_vals)
product_vals.update({
'name': record_vals['description'],
})
return product_vals
class EdiInactiveProductRecord(models.Model):
"""EDI inactive product record"""
_name = 'edi.inactive.product.record'
_inherit = 'edi.record.deactivator'
_description = "Inactive Product"
target_id = fields.Many2one('product.product', string="Product")
|
[
"mbrown@fensystems.co.uk"
] |
mbrown@fensystems.co.uk
|
942fb16d6341d7adf7dec90c86062803d46ebb56
|
db053c220094368ecb784fbe62375378c97457c2
|
/92.reverse-linked-list-ii.py
|
01812dfd90c75c46be6106a88de76827a53e89a1
|
[] |
no_license
|
thegamingcoder/leetcode
|
8c16e7ac9bda3e34ba15955671a91ad072e87d94
|
131facec0a0c70d319982e78e772ed1cb94bc461
|
refs/heads/master
| 2020-03-22T14:51:45.246495
| 2018-07-09T00:00:06
| 2018-07-09T00:00:06
| 140,211,147
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 784
|
py
|
#
# [92] Reverse Linked List II
#
# https://leetcode.com/problems/reverse-linked-list-ii/description/
#
# algorithms
# Medium (31.88%)
# Total Accepted: 145.3K
# Total Submissions: 455.9K
# Testcase Example: '[1,2,3,4,5]\n2\n4'
#
# Reverse a linked list from position m to n. Do it in one-pass.
#
# Note: 1 ≤ m ≤ n ≤ length of list.
#
# Example:
#
#
# Input: 1->2->3->4->5->NULL, m = 2, n = 4
# Output: 1->4->3->2->5->NULL
#
#
#
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
def reverseBetween(self, head, m, n):
"""
:type head: ListNode
:type m: int
:type n: int
:rtype: ListNode
"""
|
[
"sharanbale@yahoo-inc.com"
] |
sharanbale@yahoo-inc.com
|
ed93f6359fc8c624ea8f4a3efb3595b551de4a00
|
b7b2f80ab5e1ee0ea028576e3014b62b8d3a8d7e
|
/pyinst/pyinst-000/postinst.py
|
fe27556ee5b15caee330210d5f7103f3ac5dc6f8
|
[] |
no_license
|
pglen/pgpygtk
|
4d1405478a714f003984cf3e3db04ff1f767470b
|
33f58010e304f1a312f2356de453ecedb7aa21ef
|
refs/heads/master
| 2021-01-22T01:18:52.238415
| 2019-01-01T01:37:24
| 2019-01-01T01:37:24
| 102,215,955
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 987
|
py
|
#!/usr/bin/env python
import os, sys, getopt, signal, select, time
import subprocess
verbose = False
# ------------------------------------------------------------------------
def exec_program2(fname, arg1, arg2, arg3):
global verbose
try:
if verbose:
print "Started", fname
pp = subprocess.Popen([fname, arg1, arg2, arg3])
ret = os.waitpid(pp.pid, 0)
if ret[1] != 0:
print "Warninig: ", fname, "returned with", ret[1]
if verbose:
print "Ended ", fname
except:
print "Cannot execute script", fname, sys.exc_info()
raise
return True
# ------------------------------------------------------------------------
if __name__ == '__main__':
#print "In install.py"
#time.sleep(1)
pass
# Create menus for your app.
# Edit entry.desktop to taste
exec_program2("xdg-desktop-menu", "install", "--novendor", "entry.desktop")
|
[
"peterglen99@gmail.com"
] |
peterglen99@gmail.com
|
a4e291834a035910169cf5c8fa887a2feef65ec6
|
7b13e6acb2a1f26936462ed795ee4508b4088042
|
/算法题目/算法题目/二分查找/LeetCode69求开方.py
|
cdee55e6ed01d6470cb969ad904f4d20d55e2c37
|
[] |
no_license
|
guojia60180/algorithm
|
ed2b0fd63108f30cd596390e64ae659666d1c2c6
|
ea81ff2722c7c350be5e1f0cd6d4290d366f2988
|
refs/heads/master
| 2020-04-19T08:25:55.110548
| 2019-05-13T13:29:39
| 2019-05-13T13:29:39
| 168,076,375
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 410
|
py
|
#Author guo
class Solution:
def mySqrt(self, x):
# if x==1:
# return 1
# if not x:
# return 0
l = 0
r = x
while l <= r:
mid = (l + r) // 2
if mid * mid <= x < (mid + 1) * (mid + 1):
return mid
elif mid * mid > x:
r = mid - 1
else:
l = mid + 1
|
[
"44565715+guojia60180@users.noreply.github.com"
] |
44565715+guojia60180@users.noreply.github.com
|
7281d23d02e3de9896dd3690adde741d708c1fe7
|
1b60b5c4d2a873b643dbd04fb77504d596237ba2
|
/runtests.py
|
d8b2a28529b2ecfaaa4faf76e40c0f1029086034
|
[
"BSD-3-Clause"
] |
permissive
|
amolm-cuelogic/django-charsleft-widget
|
32d832c90c9d269efd5877e83983c78a2ff5a0db
|
bbe0196c597c7b25f51d204acb10d05ab348d703
|
refs/heads/master
| 2021-01-22T19:09:15.856856
| 2016-10-14T08:15:51
| 2016-10-14T08:15:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,855
|
py
|
#!/usr/bin/env python
import sys
from os import path
import django
from django.conf import settings, global_settings
from django.core.management import execute_from_command_line
if not settings.configured:
BASE_DIR = path.dirname(path.realpath(__file__))
settings.configure(
DEBUG = False,
TEMPLATE_DEBUG = True,
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
],
},
},
],
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:'
}
},
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.staticfiles',
'django.contrib.contenttypes',
'charsleft_widget',
),
TEST_RUNNER = 'django.test.simple.DjangoTestSuiteRunner' if django.VERSION < (1,6) else 'django.test.runner.DiscoverRunner',
STATIC_URL = '/static/',
)
def runtests():
argv = sys.argv[:1] + ['test', 'charsleft_widget'] + sys.argv[1:]
execute_from_command_line(argv)
if __name__ == '__main__':
runtests()
|
[
"basil.shubin@gmail.com"
] |
basil.shubin@gmail.com
|
131fa69d7c88902d3272e686e559688f3e1406f6
|
5c7f2ff956b1fd1477d56486e239b6e661a08efd
|
/reinforcement_learning/0x00-q_learning/3-q_learning.py
|
38289878256043dd1d61c99f45447b4db0addd4e
|
[] |
no_license
|
diego0096/holbertonschool-machine_learning
|
60c5f40e185df04d02d9887d966542e85a981896
|
64b8984846c2b2b88bbf11125b55b482c7b74eea
|
refs/heads/master
| 2023-04-02T01:27:59.263397
| 2021-04-02T21:33:51
| 2021-04-02T21:33:51
| 279,229,931
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,253
|
py
|
#!/usr/bin/env python3
"""3-q_learning.py"""
import numpy as np
epsilon_greedy = __import__('2-epsilon_greedy').epsilon_greedy
def train(env, Q, episodes=5000, max_steps=100, alpha=0.1,
gamma=0.99, epsilon=1, min_epsilon=0.1, epsilon_decay=0.05):
"""function that performs Q-learning"""
total_rewards = []
for episode in range(episodes):
state = env.reset()
done = False
for step in range(max_steps):
action = epsilon_greedy(Q, state, epsilon)
new_state, reward, done, info = env.step(action)
map_size = env.desc.shape[0]
new_state_on_map = env.desc[int(np.floor(new_state / map_size)),
new_state % map_size]
if new_state_on_map == b'H':
reward = -1.0
Q[state, action] = ((1 - alpha) * Q[state, action] + alpha *
(reward + gamma * np.max(Q[new_state, :])))
state = new_state
if done is True:
break
max_epsilon = 1
epsilon = (min_epsilon + (max_epsilon - min_epsilon) *
np.exp(-epsilon_decay * episode))
total_rewards.append(reward)
return Q, total_rewards
|
[
"dfqz93@hotmail.com"
] |
dfqz93@hotmail.com
|
674b0a7991768af96255c795dd3126b23dd92600
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_172/ch87_2020_04_29_11_23_49_782896.py
|
b473d466d2bfc573a99a1bf4c984260fa1ce298e
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 190
|
py
|
with open('churras.txt', 'r') as arquivo:
conteudo = arquivo.readlines()
y = 0
for item in conteudo:
x = item.split(',')
y = y + x[1]*x[2]
print (y)
|
[
"you@example.com"
] |
you@example.com
|
acbc151d7b384b7d09b14aef7407e39844b2cb9e
|
a55d515cf59f4ee898892fbf358c327ff53bce96
|
/djangodialogs/auth/urls.py
|
b76c9847559dc83f03b63bbafd5035c1a6a268a5
|
[] |
no_license
|
furious-luke/django-dialogs
|
859a3c30970f0a40813f828a0e0909cf5de29d24
|
142b5075910c940091a1f58a69d5192f74c10f9c
|
refs/heads/master
| 2016-09-05T17:23:22.244282
| 2012-03-14T04:24:59
| 2012-03-14T04:24:59
| 1,964,463
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 183
|
py
|
from django.conf.urls.defaults import *
from views import *
urlpatterns = patterns('',
url(r'^accounts/login/ajax/$', login),
url(r'^accounts/register/ajax/$', register),
)
|
[
"furious.luke@gmail.com"
] |
furious.luke@gmail.com
|
a3853c891ca016a85276919f21c11e6ea1299f0e
|
490f5e517942f529ddc8c1e0d421a208ff1ca29b
|
/02_code/listinstance2.py
|
c198804a387d5ef0f574c63f87e3a85e035230d8
|
[] |
no_license
|
emnglang/py-lab
|
facdc464a8c84b90f06b5cb639315981c0b4ba8d
|
bc3566da81e0b2cfa9ce563ffc198d35294971a1
|
refs/heads/master
| 2020-03-25T15:10:42.856062
| 2018-08-24T14:54:33
| 2018-08-24T14:54:33
| 143,869,343
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 772
|
py
|
#!python
# File listinstance.py (2.X + 3.X)
class ListInstance:
"""
Mix-in class that provides a formatted print() or str() of instances via
inheritance of __str__ coded here; displays instance attrs only; self is
instance of lowest class; __X names avoid clashing with client's attrs
"""
def __attrnames(self):
return ''.join('\t%s=%s\n' % (attr, self.__dict__[attr])
for attr in sorted(self.__dict__))
def __str__(self):
return '<Instance of %s, address %s:\n%s>' % (
self.__class__.__name__, # My class's name
id(self), # My address
self.__attrnames()) # name=value list
if __name__ == '__main__':
import testmixin
testmixin.tester(ListInstance)
|
[
"linja1688@gmail.com"
] |
linja1688@gmail.com
|
4c6f6c224597b05b7e7ca61fee80f16589525350
|
248cf77b8a24b3b35e658d81b4c9cb8c450c2ca4
|
/cryptex/streamers/bitbay/__init__.py
|
cdb4ef4c0f13d264b7f1b3ca9447113e54d3cb0b
|
[] |
no_license
|
Reynaldo61/cryptex
|
fee1e124f544a049a74775fab4540cfd89d405df
|
deb34a449bdeb4e26009ef21035383ecf463cb3e
|
refs/heads/master
| 2020-09-09T21:04:44.855833
| 2018-03-04T17:41:32
| 2018-03-04T17:41:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 245
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from .bitbay import BitbayBase as base
from .wrapper import BitbayWrapper as wrapper
from .streamer import BitbayStreamer as streamer
__version__ = 0.1
__exchange__ = "bitbay"
__method__ = "rest"
|
[
"ran@aroussi.com"
] |
ran@aroussi.com
|
5628e326c354f7cd3c1d230e5e6e83140c249278
|
a40950330ea44c2721f35aeeab8f3a0a11846b68
|
/INTERACTION1/DRIVER/Interaction/PumpsStation/Liquid/_OneSensor.py
|
b7ac8d7e289207ed76be73e90b0fa19808b3bab0
|
[] |
no_license
|
huang443765159/kai
|
7726bcad4e204629edb453aeabcc97242af7132b
|
0d66ae4da5a6973e24e1e512fd0df32335e710c5
|
refs/heads/master
| 2023-03-06T23:13:59.600011
| 2023-03-04T06:14:12
| 2023-03-04T06:14:12
| 233,500,005
| 3
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,567
|
py
|
import time
import random
import pigpio
import threading
BAUD = 9600
class OneSensor(object):
def __init__(self, pi, sid, rx_pin, tx_pin, data_cb):
self._pi = pi
self._sid = sid
self._rx_pin = rx_pin
self._tx_pin = tx_pin
self._data_cb = data_cb
# PI
self._pi.set_mode(self._rx_pin, pigpio.INPUT)
self._pi.set_mode(self._tx_pin, pigpio.OUTPUT)
pigpio.exceptions = False
self._pi.bb_serial_read_close(self._rx_pin)
pigpio.exceptions = True
self._pi.bb_serial_read_open(self._rx_pin, BAUD, 8)
self._msg = bytes()
# THREAD
self._thread = threading.Thread(target=self._working)
self._thread.daemon = True
self._thread.start()
def _working(self):
while 1:
count, data = self._pi.bb_serial_read(self._rx_pin)
if count:
self._msg += data
if len(self._msg) == 4:
if (self._msg[0] + self._msg[1] + self._msg[2]) & 0x00ff == self._msg[3]:
self._data_cb(sid=self._sid, data=self._msg[1] * 256 + self._msg[2])
self._msg = bytes()
time.sleep(1)
if __name__ == '__main__':
import os
def _data_cb(sid, data):
print(sid, data)
_pi = pigpio.pi()
if not _pi.connected:
os.system('sudo pigpiod')
_pi = pigpio.pi()
sensor = OneSensor(pi=_pi, sid=1, rx_pin=15, tx_pin=14, data_cb=_data_cb)
# rx_pin=15, 24, 8, 12, 20
# tx_pin=14, 23, 25, 7, 16
|
[
"443765159@qq.com"
] |
443765159@qq.com
|
60413c7f96d539ba2b325c06b0678d2f48db7667
|
1fe8d4133981e53e88abf633046060b56fae883e
|
/venv/lib/python3.8/site-packages/scipy/fft/setup.py
|
f0d5dade992f334540d4759fc4100bb1c8bdaf3c
|
[] |
no_license
|
Akira331/flask-cifar10
|
6c49db8485038731ce67d23f0972b9574746c7a7
|
283e7a2867c77d4b6aba7aea9013bf241d35d76c
|
refs/heads/master
| 2023-06-14T16:35:06.384755
| 2021-07-05T14:09:15
| 2021-07-05T14:09:15
| 382,864,970
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 128
|
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:f32208914b4d7de0ea26596a8be64ef729f4cc5799cd04da84cd8f639c4796de
size 448
|
[
"business030301@gmail.com"
] |
business030301@gmail.com
|
67cf67841c5e1908a4086e5b9737233c7076141c
|
2698b0148191078f36efe266c3572d9f30724255
|
/sharedslides.py
|
d49c4cdff40d8d397bccf9a8082771f798ea1678
|
[] |
no_license
|
sugar-activities/4196-activity
|
8c81dc6c0aa0c7e7fd8bd08da79c5a1279d7d400
|
8eaefd6bc6429694e2d765b70f5bdd42b1a5286a
|
refs/heads/master
| 2021-01-19T23:15:28.535962
| 2017-04-21T05:45:34
| 2017-04-21T05:45:34
| 88,937,828
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,085
|
py
|
# -*- mode:python; tab-width:4; indent-tabs-mode:nil; -*-
# sharedslides.py
#
# Class that performs all work relating to the sharing of slide decks and ink.
# Kris Plunkett <kp86@cs.washington.edu>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import logging
import sys
import os
import time
import random
import gobject
import telepathy
import telepathy.client
import dbus
from dbus.service import method, signal
from dbus.gobject_service import ExportedGObject
from sugar.presence import presenceservice
from sugar import network
from sugar.presence.tubeconn import TubeConnection
SERVICE = "edu.washington.cs.ClassroomPresenterXO"
IFACE = SERVICE
PATH = "/edu/washington/cs/ClassroomPresenterXO"
# Define a simple HTTP server for sharing data.
class ReadHTTPRequestHandler(network.ChunkedGlibHTTPRequestHandler):
def translate_path(self, path):
return self.server._filepath
class ReadHTTPServer(network.GlibTCPServer):
def __init__(self, server_address, filepath):
self._filepath = filepath
network.GlibTCPServer.__init__(self, server_address, ReadHTTPRequestHandler)
class SharedSlides(gobject.GObject):
""" Handles all sharing of slides and ink """
__gsignals__ = {
'deck-download-complete' : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, ()),
}
def __init__(self, init, cpxo_path, shared_activity, read_file_cb):
gobject.GObject.__init__(self)
self.__is_initiating = init
self.__cpxo_path = cpxo_path
self.__shared_activity = shared_activity
self.read_file_cb = read_file_cb
self.__logger = logging.getLogger('SharedSlides')
self.__tubes_chan = self.__shared_activity.telepathy_tubes_chan
self.__iface = self.__tubes_chan[telepathy.CHANNEL_TYPE_TUBES]
if (self.__is_initiating):
self.__logger.debug('Hello from SharedSlides (sharer).')
self.__have_deck = True
self.share_deck()
else:
# find a stream tube to download the slide deck from
self.__logger.debug('Hello from SharedSlides (joiner).')
self.__iface.connect_to_signal('NewTube', self.new_tube_cb)
self.__have_deck = False
self.get_stream_tube()
def get_stream_tube(self):
""" Attempts to download the slide deck from an available stream tube """
self.__iface.ListTubes(
reply_handler=self.list_tubes_reply_cb,
error_handler=self.list_tubes_error_cb)
def handle_download_fail(self):
""" If an attempt to download the deck fails, this method takes care of it """
self.__logger.error('Download failed! Sleeping five seconds and trying again.')
time.sleep(5)
self.get_stream_tube()
def list_tubes_reply_cb(self, tubes):
for tube_info in tubes:
self.new_tube_cb(*tube_info)
def list_tubes_error_cb(self, e):
self.__logger.error('ListTubes() failed: %s', e)
self.handle_download_fail
def new_tube_cb(self, id, initiator, type, service, params, state):
self.__logger.debug('New tube: ID=%d initiator=%d type=%d service=%s params=%r state=%d',
id, initiator, type, service, params, state)
if (not self.__have_deck and
type == telepathy.TUBE_TYPE_STREAM and
service == SERVICE and
state == telepathy.TUBE_STATE_LOCAL_PENDING):
addr = self.__iface.AcceptStreamTube(id,
telepathy.SOCKET_ADDRESS_TYPE_IPV4,
telepathy.SOCKET_ACCESS_CONTROL_LOCALHOST, 0,
utf8_strings=True)
self.__logger.debug("Got a stream tube!")
# sanity checks
assert isinstance(addr, dbus.Struct)
assert len(addr) == 2
assert isinstance(addr[0], str)
assert isinstance(addr[1], (int, long))
assert addr[1] > 0 and addr[1] < 65536
ip_addr = addr[0]
port = int(addr[1])
self.__logger.debug("The stream tube is good!")
self.download_file(ip_addr, port, id)
def download_file(self, ip_addr, port, tube_id):
""" Performs the actual download of the slide deck """
self.__logger.debug("Downloading from ip %s and port %d.", ip_addr, port)
getter = network.GlibURLDownloader("http://%s:%d/document" % (ip_addr, port))
getter.connect("finished", self.download_result_cb, tube_id)
getter.connect("progress", self.download_progress_cb, tube_id)
getter.connect("error", self.download_error_cb, tube_id)
self.__logger.debug("Starting download to %s...", self.__cpxo_path)
getter.start(self.__cpxo_path)
def download_result_cb(self, getter, tempfile, suggested_name, tube_id):
""" Called when the file download was successful """
self.__logger.debug("Got file %s (%s) from tube %u",
tempfile, suggested_name, tube_id)
self.emit('deck-download-complete')
self.read_file_cb(self.__cpxo_path)
def download_progress_cb(self, getter, bytes_downloaded, tube_id):
tmp = True
#self.__logger.debug("Bytes downloaded from tube %u: %u", tube_id, bytes_downloaded)
def download_error_cb(self, getter, err, tube_id):
self.__logger.error('Download failed on tube %u: %s', tube_id, err)
self.handle_download_fail()
def share_deck(self):
""" As the instructor XO, or as a student that has completed the deck download
share the deck with others in the activity """
# get a somewhat random port number
self.__port = random.randint(1024, 65535)
self.__ip_addr = "127.0.0.1"
self._fileserver = ReadHTTPServer(("", self.__port), self.__cpxo_path)
self.__logger.debug('Started an HTTP server on port %d', self.__port)
self.__iface.OfferStreamTube(SERVICE, {},
telepathy.SOCKET_ADDRESS_TYPE_IPV4,
(self.__ip_addr, dbus.UInt16(self.__port)),
telepathy.SOCKET_ACCESS_CONTROL_LOCALHOST, 0)
self.__logger.debug('Made a stream tube.')
gobject.type_register(SharedSlides)
|
[
"ignacio@sugarlabs.org"
] |
ignacio@sugarlabs.org
|
a3e4ee41e561ef8abd0f4c96f80857d31f2980d3
|
9e988c0dfbea15cd23a3de860cb0c88c3dcdbd97
|
/sdBs/AllRun/sdssj_031858.28+002325.7/sdB_sdssj_031858.28+002325.7_lc.py
|
015e2ab508983a3ff321694093bcee12e4301b9c
|
[] |
no_license
|
tboudreaux/SummerSTScICode
|
73b2e5839b10c0bf733808f4316d34be91c5a3bd
|
4dd1ffbb09e0a599257d21872f9d62b5420028b0
|
refs/heads/master
| 2021-01-20T18:07:44.723496
| 2016-08-08T16:49:53
| 2016-08-08T16:49:53
| 65,221,159
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 369
|
py
|
from gPhoton.gAperture import gAperture
def main():
gAperture(band="NUV", skypos=[49.742833,0.390472], stepsz=30., csvfile="/data2/fleming/GPHOTON_OUTPU/LIGHTCURVES/sdBs/sdB_sdssj_031858.28+002325.7/sdB_sdssj_031858.28+002325.7_lc.csv", maxgap=1000., overwrite=True, radius=0.00555556, annulus=[0.005972227,0.0103888972], verbose=3)
if __name__ == "__main__":
main()
|
[
"thomas@boudreauxmail.com"
] |
thomas@boudreauxmail.com
|
37d6f988a200dc46a67310f1d35ed4b3cdc5d949
|
853d4cec42071b76a80be38c58ffe0fbf9b9dc34
|
/venv/Lib/site-packages/nltk/chat/iesha.py
|
225e8202b10289cc992b3c089c98d1784db5c7ee
|
[] |
no_license
|
msainTesting/TwitterAnalysis
|
5e1646dbf40badf887a86e125ef30a9edaa622a4
|
b1204346508ba3e3922a52380ead5a8f7079726b
|
refs/heads/main
| 2023-08-28T08:29:28.924620
| 2021-11-04T12:36:30
| 2021-11-04T12:36:30
| 424,242,582
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,402
|
py
|
# Natural Language Toolkit: Teen Chatbot
#
# Copyright (C) 2001-2021 NLTK Project
# Author: Selina Dennis <sjmd@csse.unimelb.edu.au>
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
"""
This chatbot is a tongue-in-cheek take on the average teen
anime junky that frequents YahooMessenger or MSNM.
All spelling mistakes and flawed grammar are intentional.
"""
from nltk.chat.util import Chat
reflections = {
"am": "r",
"was": "were",
"i": "u",
"i'd": "u'd",
"i've": "u'v",
"ive": "u'v",
"i'll": "u'll",
"my": "ur",
"are": "am",
"you're": "im",
"you've": "ive",
"you'll": "i'll",
"your": "my",
"yours": "mine",
"you": "me",
"u": "me",
"ur": "my",
"urs": "mine",
"me": "u",
}
# Note: %1/2/etc are used without spaces prior as the chat bot seems
# to add a superfluous space when matching.
pairs = (
(
r"I\'m (.*)",
(
"ur%1?? that's so cool! kekekekeke ^_^ tell me more!",
"ur%1? neat!! kekeke >_<",
),
),
(
r"(.*) don\'t you (.*)",
(
r"u think I can%2??! really?? kekeke \<_\<",
"what do u mean%2??!",
"i could if i wanted, don't you think!! kekeke",
),
),
(r"ye[as] [iI] (.*)", ("u%1? cool!! how?", "how come u%1??", "u%1? so do i!!")),
(
r"do (you|u) (.*)\??",
("do i%2? only on tuesdays! kekeke *_*", "i dunno! do u%2??"),
),
(
r"(.*)\?",
(
"man u ask lots of questions!",
"booooring! how old r u??",
"boooooring!! ur not very fun",
),
),
(
r"(cos|because) (.*)",
("hee! i don't believe u! >_<", "nuh-uh! >_<", "ooooh i agree!"),
),
(
r"why can\'t [iI] (.*)",
(
"i dunno! y u askin me for!",
"try harder, silly! hee! ^_^",
"i dunno! but when i can't%1 i jump up and down!",
),
),
(
r"I can\'t (.*)",
(
"u can't what??! >_<",
"that's ok! i can't%1 either! kekekekeke ^_^",
"try harder, silly! hee! ^&^",
),
),
(
r"(.*) (like|love|watch) anime",
(
"omg i love anime!! do u like sailor moon??! ^&^",
"anime yay! anime rocks sooooo much!",
"oooh anime! i love anime more than anything!",
"anime is the bestest evar! evangelion is the best!",
"hee anime is the best! do you have ur fav??",
),
),
(
r"I (like|love|watch|play) (.*)",
("yay! %2 rocks!", "yay! %2 is neat!", "cool! do u like other stuff?? ^_^"),
),
(
r"anime sucks|(.*) (hate|detest) anime",
(
"ur a liar! i'm not gonna talk to u nemore if u h8 anime *;*",
"no way! anime is the best ever!",
"nuh-uh, anime is the best!",
),
),
(
r"(are|r) (you|u) (.*)",
("am i%1??! how come u ask that!", "maybe! y shud i tell u?? kekeke >_>"),
),
(
r"what (.*)",
("hee u think im gonna tell u? .v.", "booooooooring! ask me somethin else!"),
),
(r"how (.*)", ("not tellin!! kekekekekeke ^_^",)),
(r"(hi|hello|hey) (.*)", ("hi!!! how r u!!",)),
(
r"quit",
(
"mom says i have to go eat dinner now :,( bye!!",
"awww u have to go?? see u next time!!",
"how to see u again soon! ^_^",
),
),
(
r"(.*)",
(
"ur funny! kekeke",
"boooooring! talk about something else! tell me wat u like!",
"do u like anime??",
"do u watch anime? i like sailor moon! ^_^",
"i wish i was a kitty!! kekekeke ^_^",
),
),
)
iesha_chatbot = Chat(pairs, reflections)
def iesha_chat():
print("Iesha the TeenBoT\n---------")
print("Talk to the program by typing in plain English, using normal upper-")
print('and lower-case letters and punctuation. Enter "quit" when done.')
print("=" * 72)
print("hi!! i'm iesha! who r u??!")
iesha_chatbot.converse()
def demo():
iesha_chat()
if __name__ == "__main__":
demo()
|
[
"msaineti@icloud.com"
] |
msaineti@icloud.com
|
6284ac6837cd1258e98d0c17dd4b125e0698cde9
|
bc32a53cfebc7c03987ed44492ed8afb79aed5a3
|
/customers/management/commands/generate_purchases.py
|
c2b3220e7ef021aca32af2aadbcce1d5bd7d9792
|
[
"MIT"
] |
permissive
|
jeremy886/bookstore
|
abe537df821d3d2b873c166965aac2a01e0d5914
|
d220efcc21b95942d14fae672da746b1abcaf750
|
refs/heads/master
| 2020-03-20T19:42:04.639190
| 2017-05-12T19:07:39
| 2017-05-12T19:07:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 852
|
py
|
from django.core.management.base import BaseCommand, CommandError
import factory
from ...factories import PurchaseFactory
from ...models import Purchase
class Command(BaseCommand):
help = 'Generate N-number Purchases'
def add_arguments(self, parser):
parser.add_argument('num', nargs='?', default=10, type=int)
parser.add_argument(
'--clear',
action='store_true',
dest='clear',
default=False,
help='Clear out all Purchases before generating new'
)
def handle(self, *args, **options):
if options['clear']:
Purchase.objects.all().delete()
factory.build_batch(PurchaseFactory, size=options['num'])
self.stdout.write(
self.style.SUCCESS('Successfully generated %s purchase(s)' % options['num'])
)
|
[
"kenneth@gigantuan.net"
] |
kenneth@gigantuan.net
|
85a17047bc108250cf945819caff9c91c8ad3cf9
|
4142b8c513d87361da196631f7edd82f11465abb
|
/python/1263A.py
|
b8918a15dc5cdc4a97064bbcc7dad5e27403ed25
|
[] |
no_license
|
npkhanhh/codeforces
|
b52b66780426682ea1a3d72c66aedbe6dc71d7fe
|
107acd623b0e99ef0a635dfce3e87041347e36df
|
refs/heads/master
| 2022-02-08T17:01:01.731524
| 2022-02-07T10:29:52
| 2022-02-07T10:29:52
| 228,027,631
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 170
|
py
|
n = int(input())
for _ in range(n):
a, b, c = sorted(list(map(int, input().split())))
if a + b >= c:
print(int((a+b+c)/2))
else:
print(a+b)
|
[
"npkhanh93@gmail.com"
] |
npkhanh93@gmail.com
|
b583f8756e2031e3b01beb734e64fdfc0770f0c3
|
ab19c3757766f00c7414aa10641f8c7e6321375d
|
/40.py
|
7ef3408fe4ce923ea05c074a96da77db063a15e7
|
[] |
no_license
|
HHariHHaran/python-programming
|
2c70ff0c4b24ae48b8096075a29ffc0edfe1ef00
|
c2db869e352d7ee22d499dd772f5cb2285b2822f
|
refs/heads/master
| 2020-04-19T09:19:56.918989
| 2019-01-22T09:50:28
| 2019-01-22T09:50:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 310
|
py
|
n11 = 0
n12 = 1
count = 0
nterms=int(input("n"))
if nterms <= 0:
print("Positive integer")
elif nterms == 1:
print("Fibonacci sequence upto",nterms,":")
print(n11)
else:
print("Fibonacci sequence upto",nterms,":")
while count < nterms:
print(n11,end=' ')
nth = n11 + n12
n11 = n12
n12 = nth
count += 1
|
[
"noreply@github.com"
] |
HHariHHaran.noreply@github.com
|
6d4c342943a36cbc73c3e96e062a082e56c15181
|
aa853a9094fff4b6e9b0ddc7469be29ad5f0f811
|
/poi_stock_account_consolidate/models/__init__.py
|
646eda432c164d20562a02dbbb872636bba80b21
|
[] |
no_license
|
blue-connect/illuminati
|
40a13e1ebeaceee39f17caa360f79e8deeaebf58
|
6682e60630064641474ddb2d8cbc520e30f64832
|
refs/heads/master
| 2022-01-06T00:55:58.465611
| 2018-11-24T04:30:03
| 2018-11-24T04:30:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,049
|
py
|
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
# Copyright (C) 2010-2012 OpenERP s.a. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import stock_account
|
[
"yori.quisbert@poiesisconsulting.com"
] |
yori.quisbert@poiesisconsulting.com
|
45e8a9ce23db31e8bffc2db20e4031eff14e986a
|
ebb2a06a025b3c25443b2287c7c9b130cad5b5f1
|
/unsourced/scrape.py
|
e9a26d03bd11897e45f1248ad095f5c358f6b1c8
|
[] |
no_license
|
bcampbell/unsourced
|
b587d423e2422ad2e5060263404b062df2941dbe
|
4e0b418a957bbac9c7d3358425cdf11bcd436388
|
refs/heads/master
| 2021-01-23T12:20:54.033881
| 2014-02-18T08:43:25
| 2014-02-18T08:43:25
| 2,531,708
| 0
| 1
| null | 2012-10-04T01:14:19
| 2011-10-07T10:18:04
|
Python
|
UTF-8
|
Python
| false
| false
| 2,037
|
py
|
import urllib
import collections
import json
import datetime
from unsourced import util,analyser,highlight
from unsourced.forms import EnterArticleForm
from unsourced.models import Article,ArticleURL,Action
class Status:
""" status codes returned by scrapomat """
SUCCESS = 0
NET_ERROR = 1
BAD_REQ = 2
PAYWALLED = 3
PARSE_ERROR = 4
def process_scraped(url,response):
""" process http response from scrapomat, return an article (or raise exception) """
scraped_art = None
enter_form = EnterArticleForm(url=url)
err_msg = None
if response.error:
# scrapomat down :-(
raise Exception("Sorry, there was a problem reading the article.")
results = json.loads(response.body)
if results['status'] != Status.SUCCESS:
error_messages = {
Status.PAYWALLED: u"Sorry, that article seems to be behind a paywall.",
Status.PARSE_ERROR: u"Sorry, we couldn't read the article",
Status.BAD_REQ: u"Sorry, that URL doesn't look like an article",
Status.NET_ERROR: u"Sorry, we couldn't read that article - is the URL correct?",
}
err_msg = error_messages.get(results['status'],"Unknown error")
raise Exception(err_msg)
scraped_art = results['article']
scraped_art['pubdate'] = datetime.datetime.fromtimestamp(scraped_art['pubdate'])
# use entry form to validate everything's there (ugh!)
enter_form.url.data = url
enter_form.title.data = scraped_art['headline']
enter_form.pubdate.data = scraped_art['pubdate']
if not enter_form.validate():
scraped_art = None
err_msg = u"Sorry, we weren't able to automatically read all the details"
raise Exception(err_msg)
# if we've got this far, we now have all the details needed to load the article into the DB. Yay!
url_objs = [ArticleURL(url=u) for u in scraped_art['urls']]
art = Article(scraped_art['headline'],scraped_art['permalink'], scraped_art['pubdate'], url_objs)
return art
|
[
"ben@scumways.com"
] |
ben@scumways.com
|
ca1be42514d3cf0cd7dd055208df6e2fb2b5309b
|
bbeecb7cff56a96c580709b425823cde53f21621
|
/msw/spots/north_america/nova_scotia.py
|
7d3025323b4dad586bac40ff88083a92701a9107
|
[] |
no_license
|
hhubbell/python-msw
|
f8a2ef8628d545b3d57a5e54468222177dc47b37
|
5df38db1dc7b3239a6d00e0516f2942077f97099
|
refs/heads/master
| 2020-04-05T23:46:21.209888
| 2015-06-16T01:36:43
| 2015-06-16T01:36:43
| 37,476,303
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 142
|
py
|
LAWRENCETOWN = 342
WESTERN_HEAD = 343
POINT_MICHAUD = 758
COW_BAY = 786
SUMMERVILLE = 788
MARTINIQUE_BEACH = 814
NOVA_SCOTIA_HURRICANE = 1096
|
[
"hhubbell@uvm.edu"
] |
hhubbell@uvm.edu
|
217faec137550783954ec982386548f78c4d0443
|
b483c598fa375e9af02348960f210b9f482bd655
|
/cursoemvideo/desafios/Desafio037.py
|
7c82f9d600b1128328fb5824c6e2d85828beca8b
|
[
"MIT"
] |
permissive
|
brunofonsousa/python
|
6f766d08bf193180ea9a4903cb93ffd167db588d
|
8f2f26c77015c0baaa76174e004406b4115272c7
|
refs/heads/master
| 2022-09-30T14:58:01.080749
| 2020-06-08T09:55:35
| 2020-06-08T09:55:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 795
|
py
|
'''
Escreva um programa em Python que leia um número inteiro qualquer e peça para
o usuário escolher qual será a base de conversão: 1 para binário, 2 para octal
e 3 para hexadecimal.
'''
num = int(input('Digite um número inteiro: '))
print('Escolha uma das bases para conversão: ')
print('[ 1 ] converter para BINÁRIO')
print('[ 2 ] converter para OCTAL')
print('[ 3 ] converter para HEXADECIMAL')
opcao = int(input('Sua opção: '))
if opcao == 1:
print('{} convertido para BINÁRIO é igual a {}'.format(num, bin(num)[2:]))
elif opcao == 2:
print('{} convertido para OCTAL é igual a {}'.format(num, oct(num)[2:]))
elif opcao == 3:
print('{} convertido para HEXADECIMAL é igual a {}'.format(num, hex(num)[2:]))
else:
print('Opção inválida, tente novamente!')
|
[
"brunofonsousa@gmail.com"
] |
brunofonsousa@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.