blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7e7f0b264959e12721da8946df1b7451fb442b0a
|
d41d18d3ea6edd2ec478b500386375a8693f1392
|
/plotly/validators/histogram/marker/_showscale.py
|
9ae878dd8c435f88b4a47da9bbee1a5d4207006c
|
[
"MIT"
] |
permissive
|
miladrux/plotly.py
|
38921dd6618650d03be9891d6078e771ffccc99a
|
dbb79e43e2cc6c5762251537d24bad1dab930fff
|
refs/heads/master
| 2020-03-27T01:46:57.497871
| 2018-08-20T22:37:38
| 2018-08-20T22:37:38
| 145,742,203
| 1
| 0
|
MIT
| 2018-08-22T17:37:07
| 2018-08-22T17:37:07
| null |
UTF-8
|
Python
| false
| false
| 451
|
py
|
import _plotly_utils.basevalidators
class ShowscaleValidator(_plotly_utils.basevalidators.BooleanValidator):
def __init__(
self,
plotly_name='showscale',
parent_name='histogram.marker',
**kwargs
):
super(ShowscaleValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type='calc',
role='info',
**kwargs
)
|
[
"adam.kulidjian@gmail.com"
] |
adam.kulidjian@gmail.com
|
38d7a85500b2bafd860e807a46b3baa9a9b67ed5
|
6250be4dcf519d0f0b152b10a641b2d12a35c53f
|
/모두의 파이썬/09B-walk2.py
|
d4d4e3deb14c67dd29c705c8b12e322122bfcb15
|
[] |
no_license
|
heechul90/study-python-basic-2
|
babd01f6a7c2b1fbed4aa31030bb40128960a989
|
94780478e59dfc998baa6e3069ab2473c5df74db
|
refs/heads/master
| 2022-10-27T14:44:00.688509
| 2022-10-24T10:55:33
| 2022-10-24T10:55:33
| 196,142,410
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 616
|
py
|
# 마음대로 걷는 거북이2
import turtle as t
import random
t.shape("turtle") # ‘거북이’모양의 거북이 그래픽을 사용합니다.
t.speed(0)
for x in range(500): # 거북이를 500번 움직입니다.
a = random.randint(1, 360) # 1~360 사이의 아무 수나 골라 a에 저장합니다.
t.setheading(a) # a 각도로 거북이의 방향을 돌립니다.
b = random.randint(1,20) # 추가: 1~20 사이의 아무 수나 골라 b에 저장합니다.
t.forward(b) # 수정: 10을 b로 고칩니다.
|
[
"heechul4296@gmail.com"
] |
heechul4296@gmail.com
|
897865636e0146212d66ac2daf1a4de0fd3a642f
|
c80dff81cfec241e6baac8d7211ac16a969d28a4
|
/Fitting/gaussian.py
|
138544e57dcef5f40cb42915f41608b1afe6e8d2
|
[] |
no_license
|
Silentsoul04/PythonCode
|
0043354002b3117f7c2b69a52ca14e664f978576
|
c7301fb7983edbe3492e7d78d531ba2f427d2f3e
|
refs/heads/master
| 2023-01-27T14:26:07.609433
| 2020-12-11T20:23:48
| 2020-12-11T20:23:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,311
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 7 17:30:06 2014
@author: rhf
- Gaussian distribution
- histogram
- fit of the histogram
- smooth of the plot
"""
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
from scipy.interpolate import spline
# Define some test data which is close to Gaussian
data = np.random.normal(loc=0.0, scale=1.0, size=10000)
hist, bin_edges = np.histogram(data,bins=25, density=True)
bin_centres = (bin_edges[:-1] + bin_edges[1:])/2
# Define model function to be used to fit to the data above:
def gauss(x, *p):
A, mu, sigma = p
return A*np.exp(-(x-mu)**2/(2.*sigma**2))
# p0 is the initial guess for the fitting coefficients (A, mu and sigma above)
p0 = [1., 0., 1.]
coeff, var_matrix = curve_fit(gauss, bin_centres, hist, p0=p0)
# Get the fitted curve
hist_fit = gauss(bin_centres, *coeff)
plt.plot(bin_centres, hist,'bo', label='Test data')
plt.plot(bin_centres, hist_fit,'rx', label='Fitted data')
# smooth
xnew = np.linspace(bin_centres[0],bin_centres[-1],200)
hist_smooth = spline(bin_centres,hist_fit,xnew)
plt.plot(xnew,hist_smooth,'r-')
# Finally, lets get the fitting parameters, i.e. the mean and standard deviation:
print 'Fitted mean = ', coeff[1]
print 'Fitted standard deviation = ', coeff[2]
plt.show()
|
[
"ricleal@gmail.com"
] |
ricleal@gmail.com
|
bef4645a495593d7e79910acfbbbf98082fabfe9
|
ec53949dafa4b6ad675d679b05ed7c83fef2c69a
|
/DataStructuresAndAlgo/InterviewQuestions/ValidateBST.py
|
3108de4956d06bb38f0473cbf994c5b1572e2a3d
|
[] |
no_license
|
tpotjj/Python
|
9a5a20a53cd7a6ec14386c1db8ce155e0fc9ab8a
|
ca73c116ada4d05c0c565508163557744c86fc76
|
refs/heads/master
| 2023-07-11T16:37:10.039522
| 2021-08-14T11:17:55
| 2021-08-14T11:17:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 724
|
py
|
class TreeNode:
def __init__(self, value):
self.value = value
self.left = None
self.right = None
def helper(node, minValue = float('-inf'), maxValue = float('inf')):
if not node:
return True
val = node.value
if val <= minValue or val >= maxValue:
return False
if not helper(node.right, val, maxValue):
return False
if not helper(node.left, minValue, val):
return False
return True
def isValidBST(root):
return helper(root)
root1 = TreeNode(2)
root1.left = TreeNode(1)
root1.right = TreeNode(4)
print(isValidBST(root1))
root2 = TreeNode(4)
root2.left = TreeNode(1)
root2.right = TreeNode(3)
print(isValidBST(root2))
|
[
"joris97jansen@gmail.com"
] |
joris97jansen@gmail.com
|
68d13a2a50ef4b39ae111b75a765b6bc473dd10c
|
2c4efe2ce49a900c68348f50e71802994c84900a
|
/braindecode/braindecode/venv1/Lib/site-packages/numba/cuda/tests/cudapy/test_forall.py
|
f6d600d8bb11534ddaaad7b10be0bc1287032cde
|
[
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
sisi2/Masterthesis
|
b508632526e82b23c2efb34729141bfdae078fa0
|
7ce17644af47db4ad62764ed062840a10afe714d
|
refs/heads/master
| 2022-11-19T15:21:28.272824
| 2018-08-13T15:02:20
| 2018-08-13T15:02:20
| 131,345,102
| 2
| 1
| null | 2022-11-15T14:08:07
| 2018-04-27T21:09:21
|
Python
|
UTF-8
|
Python
| false
| false
| 1,123
|
py
|
from __future__ import print_function, absolute_import
import numpy as np
from numba import cuda
import numba.unittest_support as unittest
from numba.cuda.testing import skip_on_cudasim, SerialMixin
@skip_on_cudasim('forall API unsupported in the simulator')
class TestForAll(SerialMixin, unittest.TestCase):
def test_forall_1(self):
@cuda.jit
def foo(x):
i = cuda.grid(1)
if i < x.size:
x[i] += 1
arr = np.arange(11)
orig = arr.copy()
foo.forall(arr.size)(arr)
self.assertTrue(np.all(arr == orig + 1))
def test_forall_2(self):
@cuda.jit("void(float32, float32[:], float32[:])")
def bar(a, x, y):
i = cuda.grid(1)
if i < x.size:
y[i] = a * x[i] + y[i]
x = np.arange(13, dtype=np.float32)
y = np.arange(13, dtype=np.float32)
oldy = y.copy()
a = 1.234
bar.forall(y.size)(a, x, y)
self.assertTrue(np.all(y == (a * x + oldy)))
if __name__ == '__main__':
unittest.main()
|
[
"dansyefila@gmail.com"
] |
dansyefila@gmail.com
|
52ae38711097f2236ddfae0e9e374633a596c0bd
|
a64b8fc6c9e81d433878009249fe9c9a109a602c
|
/core/confdb/syntax/meta/interfaces.py
|
a55f214f6c750f612e2563ff8dd20d85ccb8d42d
|
[
"BSD-3-Clause"
] |
permissive
|
ewwwcha/noc
|
d1de6fe1d556e0f14a0dd31c600844cf43c96728
|
aba08dc328296bb0e8e181c2ac9a766e1ec2a0bb
|
refs/heads/master
| 2020-07-29T10:10:30.862660
| 2019-09-20T07:54:52
| 2019-09-20T07:54:52
| 209,755,887
| 1
| 0
|
NOASSERTION
| 2019-09-20T09:36:22
| 2019-09-20T09:36:22
| null |
UTF-8
|
Python
| false
| false
| 5,016
|
py
|
# -*- coding: utf-8 -*-
# ----------------------------------------------------------------------
# ConfDB interfaces X meta syntax
# ----------------------------------------------------------------------
# Copyright (C) 2007-2019 The NOC Project
# See LICENSE for details
# ----------------------------------------------------------------------
# Python modules
from __future__ import absolute_import
# NOC modules
from ..defs import DEF
from ..patterns import ANY, INTEGER
INTERFACES_META_SYNTAX = DEF(
"meta",
[
DEF(
"profile",
[
DEF(
"id",
[DEF(ANY, name="id", required=True, gen="make_interfaces_meta_profile_id")],
),
DEF(
"name",
[DEF(ANY, name="name", required=True, gen="make_interfaces_meta_profile_name")],
),
],
),
DEF(
"link",
[
DEF(
ANY,
[
DEF(
"object",
[
DEF(
"id",
[
DEF(
ANY,
name="object_id",
required=True,
gen="make_interfaces_meta_link_object_id",
)
],
),
DEF(
"name",
[
DEF(
ANY,
name="object_name",
required=True,
gen="make_interfaces_meta_link_object_name",
)
],
),
DEF(
"profile",
[
DEF(
"id",
[
DEF(
ANY,
name="id",
required=True,
gen="make_interfaces_meta_link_object_profile_id",
)
],
),
DEF(
"name",
[
DEF(
ANY,
name="name",
required=True,
gen="make_interfaces_meta_link_object_profile_name",
)
],
),
DEF(
"level",
[
DEF(
INTEGER,
name="level",
required=True,
gen="make_interfaces_meta_link_object_profile_level",
)
],
),
],
),
],
),
DEF(
"interface",
[
DEF(
ANY,
name="remote_interface",
required=True,
multi=True,
gen="make_interfaces_meta_link_interface",
)
],
),
],
name="link",
multi=True,
)
],
),
],
)
|
[
"dv@nocproject.org"
] |
dv@nocproject.org
|
1dc9756617bc7606a3e5f69b72ea4f6279f1e493
|
43b36890c037da0f8a1ee4c6f03349c5e70b6333
|
/modules/ieee/doc/next.py
|
c6c4b76a779cc42c762ff8aae6a23a7b615c2e7c
|
[
"BSL-1.0"
] |
permissive
|
msuchard/nt2
|
3a07decd8d184e3067452bc7f075e392c7bacc03
|
082d79abd069f4c356bfe10fd113de024f90a5f8
|
refs/heads/master
| 2021-01-18T12:29:32.075028
| 2011-11-15T23:18:23
| 2011-11-15T23:19:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,330
|
py
|
[ ## this file was manually modified by jt
{
'functor' : {
'arity' : '1',
'call_types' : [],
'ret_arity' : '0',
'rturn' : {
'default' : 'T',
},
'simd_types' : ['real_'],
'type_defs' : [],
'types' : ['real_', 'unsigned_int_', 'signed_int_'],
},
'info' : 'manually modified',
'unit' : {
'global_header' : {
'first_stamp' : 'modified by jt the 04/12/2010',
'included' : ['#include <nt2/include/functions/successor.hpp>', '#include <nt2/include/constants/eps_related.hpp>'],
'no_ulp' : 'True',
'notes' : [],
'stamp' : 'modified by jt the 12/12/2010',
},
'ranges' : {
'real_' : [['T(-10)', 'T(10)']],
'signed_int_' : [['-100', '100']],
'unsigned_int_' : [['0', '100']],
},
'specific_values' : {
'default' : {
},
'real_' : {
'nt2::Inf<T>()' : 'nt2::Inf<r_t>()',
'nt2::Minf<T>()' : 'nt2::Valmin<r_t>()',
'nt2::Mone<T>()' : 'nt2::Mone<r_t>()+nt2::Eps<r_t>()/2',
'nt2::Nan<T>()' : 'nt2::Nan<r_t>()',
'nt2::One<T>()' : 'nt2::One<r_t>()+nt2::Eps<r_t>()',
'nt2::Zero<T>()' : 'nt2::Zero<r_t>()+nt2::Mindenormal<T>()',
'nt2::Valmax<T>()' : 'nt2::Inf<r_t>()',
},
'signed_int_' : {
'nt2::Valmax<T>()' : 'nt2::Valmax<r_t>()',
'nt2::Mone<T>()' : 'nt2::Zero<r_t>()',
'nt2::One<T>()' : 'nt2::Two<r_t>()',
'nt2::Zero<T>()' : 'nt2::One<r_t>()',
},
'unsigned_int_' : {
'nt2::Valmax<T>()' : 'nt2::Valmax<r_t>()',
'nt2::One<T>()' : 'nt2::Two<r_t>()',
'nt2::Zero<T>()' : 'nt2::One<r_t>()',
},
},
'verif_test' : {
'property_call' : {
'default' : ['nt2::next(a0)'],
},
'property_value' : {
'default' : ['nt2::successor(a0)'],
},
'ulp_thresh' : '0',
},
},
'version' : '0.1',
},
]
|
[
"jtlapreste@gmail.com"
] |
jtlapreste@gmail.com
|
d8b7625ba76b77d1482fe1ee3866c6c8f7ec0a41
|
3d6a348d703bcef1ff0b11f340a9b63f4ffec534
|
/app/__init__.py
|
200cd3e2f574bb9f57b349ec8c5a9a9a836cde1a
|
[] |
no_license
|
Boomatang/Clue
|
717fed577c15203e8a86de2da96650d46170cc03
|
1b397b815d20c87bf1a0935f6c3f0502ceaddf6e
|
refs/heads/master
| 2021-05-09T10:48:04.036476
| 2019-09-02T17:41:37
| 2019-09-02T17:41:37
| 118,974,799
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,870
|
py
|
from flask import Flask
from flask_bootstrap import Bootstrap
from flask_sqlalchemy import SQLAlchemy
from app.utils import logger
from config import config
from flask_login import LoginManager
from flask_mail import Mail
bootstrap = Bootstrap()
db = SQLAlchemy()
mail = Mail()
login_manager = LoginManager()
login_manager.session_protection = "strong"
login_manager.login_view = "auth.login"
@logger.catch()
def create_app(config_name):
app = Flask(__name__)
app.config.from_object(config[config_name])
config[config_name].init_app(app)
bootstrap.init_app(app)
db.init_app(app)
mail.init_app(app)
login_manager.init_app(app)
# if not app.debug and not app.testing and not app.config['SSL_DISABLE']:
# from flask_sslify import SSLify
# sslify = SSLify(app)
from .main import main as main_blueprint
app.register_blueprint(main_blueprint)
from .user import user as user_blueprint
app.register_blueprint(user_blueprint)
from .cutlist import cutlist as cutlist_blueprint
app.register_blueprint(cutlist_blueprint)
from .tools import tools as tools_blueprint
app.register_blueprint(tools_blueprint)
from .librarys import library as library_blueprint
app.register_blueprint(library_blueprint, url_prefix="/materials")
from .BOM import BOM as BOM_blueprint
app.register_blueprint(BOM_blueprint)
from .api import api as api_blueprint
app.register_blueprint(api_blueprint, url_prefix="/api/v1")
from .cert import cert as cert_blueprint
app.register_blueprint(cert_blueprint, url_prefix="/cert")
from .project import project as project_blueprint
app.register_blueprint(project_blueprint, url_prefix="/project")
from .auth import auth as main_blueprint
app.register_blueprint(main_blueprint, url_prefix="/auth")
return app
|
[
"jimfity@gmail.com"
] |
jimfity@gmail.com
|
629a0a7c039064ace700531452345db4a512f867
|
f654f5f07dd8109c0ee31ba89dd4804e6b288343
|
/src/test/config/sections/brain/test_oob.py
|
bde9b4d6a7293565166e49830fe171d49a3fb640
|
[
"MIT"
] |
permissive
|
sprinteroz/program-y
|
3d1f5f28e4f3be770705d4bef15410b8b78f19da
|
454c6bde225dce7c3fb01c549d46249248caf7b5
|
refs/heads/master
| 2021-01-19T16:05:25.636700
| 2017-08-22T03:56:33
| 2017-08-22T03:56:33
| 100,986,551
| 1
| 0
| null | 2017-08-21T19:43:43
| 2017-08-21T19:43:43
| null |
UTF-8
|
Python
| false
| false
| 1,628
|
py
|
import unittest
from programy.config.file.yaml_file import YamlConfigurationFile
from programy.config.sections.brain.oob import BrainOOBConfiguration
from programy.config.sections.client.console import ConsoleConfiguration
class BrainOOBConfigurationTests(unittest.TestCase):
def test_oob_with_data(self):
yaml = YamlConfigurationFile()
self.assertIsNotNone(yaml)
yaml.load_from_text("""
brain:
oobs:
default:
classname: programy.utils.oob.default.DefaultOutOfBandProcessor
""", ConsoleConfiguration(), ".")
brain_config = yaml.get_section("brain")
self.assertIsNotNone(brain_config)
oobs_config = yaml.get_section("oobs", brain_config)
self.assertIsNotNone(oobs_config)
oob_config = BrainOOBConfiguration("default")
oob_config.load_config_section(yaml, oobs_config, ".")
self.assertEqual("programy.utils.oob.default.DefaultOutOfBandProcessor", oob_config.classname)
def test_default_without_data(self):
yaml = YamlConfigurationFile()
self.assertIsNotNone(yaml)
yaml.load_from_text("""
brain:
oobs:
default:
""", ConsoleConfiguration(), ".")
brain_config = yaml.get_section("brain")
self.assertIsNotNone(brain_config)
oobs_config = yaml.get_section("oobs", brain_config)
self.assertIsNotNone(oobs_config)
oob_config = BrainOOBConfiguration("default")
oob_config.load_config_section(yaml, oobs_config, ".")
self.assertIsNone(oob_config.classname)
|
[
"keith@keithsterling.com"
] |
keith@keithsterling.com
|
15364f7c33c0d232d41563d89fefbb13af61eba3
|
bd9f8187b7821b2e6ffd949386b791f264a7ccab
|
/adam.py
|
8ac53ebe61738f4f6d795a3b2d41cf4a933fbc85
|
[] |
no_license
|
LeonKennedy/qkids
|
353786666536a745d6ecdc75b70be0ff988580a3
|
d7c8bdea4d6ccd257750e05fb113d2f39622ff0e
|
refs/heads/master
| 2020-03-17T13:41:32.466438
| 2018-11-25T12:25:21
| 2018-11-25T12:25:21
| 133,641,201
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,496
|
py
|
#!/usr/bin/env python
#-*- coding: utf-8 -*-
# @Filename: adma.py
# @Author: olenji - lionhe0119@hotmail.com
# @Description: COO 所需数据
# @Create: 2018-11-25 19:00:56
# @Last Modified: 2018-11-25 19:00:56
#
from LocalDatabase import get_schedule_connection, get_product_connection
import pandas as pd
import pdb
class M1:
def get_users(self):
conn = get_product_connection()
sql = "select user_id, first_large_buy_at from users where vip = 1 and first_large_buy_at > '2016-01-01' and deleted_at is null"
df = pd.read_sql(sql, conn, index_col = 'user_id')
df['count'] = 0
self._df = df
def count_consume(self):
conn = get_schedule_connection()
sql = "select student_id, created_at from student_appointments \
where status = 3 and deleted_at is null and created_at > \
'2016-01-01'"
with conn.cursor() as cur:
cur.execute(sql)
while cur.rownumber < cur.rowcount:
student, date = cur.fetchone()
if student in self._df.index:
a = date - self._df.loc[student, 'first_large_buy_at']
if a.days < 365:
self._df.loc[student,'count'] += 1
if cur.rownumber % 10000 == 0:
print(date)
self._df.to_csv('user_comsume.csv')
if __name__ == "__main__":
m = M1()
m.get_users()
m.count_consume()
|
[
"lionhe0119@hotmail.com"
] |
lionhe0119@hotmail.com
|
fd801218ea84ab029a1a97a8ee592cf3140712b6
|
54f352a242a8ad6ff5516703e91da61e08d9a9e6
|
/Source Codes/CodeJamData/16/31/0.py
|
b3e9c53ea22fc72878d34f6b44630978001fd602
|
[] |
no_license
|
Kawser-nerd/CLCDSA
|
5cbd8a4c3f65173e4e8e0d7ed845574c4770c3eb
|
aee32551795763b54acb26856ab239370cac4e75
|
refs/heads/master
| 2022-02-09T11:08:56.588303
| 2022-01-26T18:53:40
| 2022-01-26T18:53:40
| 211,783,197
| 23
| 9
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 698
|
py
|
from sys import stdin
def getint():
return int(stdin.readline())
def getints():
return tuple(int(z) for z in stdin.readline().split())
for cn in xrange(1,1 + getint()):
n = getint()
ps = getints()
m1 = max((ps[i],i) for i in xrange(n))[1]
m2 = max((ps[i],i) for i in xrange(n) if i != m1)[1]
plan = []
for k in xrange(ps[m1]-ps[m2]):
plan.append([m1])
for j in xrange(n):
if j != m1 and j != m2:
for z in xrange(ps[j]):
plan.append([j])
for j in xrange(ps[m2]):
plan.append([m1,m2])
print "Case #{}: {}".format(cn, " ".join("".join(chr(65+i) for i in step) for step in plan))
|
[
"kwnafi@yahoo.com"
] |
kwnafi@yahoo.com
|
98201188154bbccad11c7feb8172dedeb36eaaed
|
e6cb78ce3575334a8dbc871371c04572ba47f20f
|
/0x01-python-if_else_loops_functions/101-remove_char_at.py
|
faf681782e69b2a646f204769c4488356fad4753
|
[] |
no_license
|
SoniaChevli/holbertonschool-higher_level_programming
|
c781903157136a550b151cac7975ac54015f4e45
|
0c210f6d18781bfe10fb8c87da7cc7177e58f46e
|
refs/heads/master
| 2020-03-09T13:07:48.416175
| 2018-09-07T18:09:16
| 2018-09-07T18:09:16
| 128,802,668
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 155
|
py
|
#!/usr/bin/python3
def remove_char_at(str, n):
copy = ''
for x in range(len(str)):
if (x != n):
copy += str[x]
return copy
|
[
"you@example.com"
] |
you@example.com
|
60096a85f40d711369a08c7b1b86636e2e7a41d9
|
13a66c5a9ff2490ee747a22dc6d264d7d5899278
|
/scripts/app_client.py
|
8a9b78ee34fb69a119e5d89e237e33ea6132f9da
|
[] |
no_license
|
sandhyazdodiya/pure_django_api_vs_drf
|
4b39e7974eb23baad3609c148b7983ed9b91e39d
|
4d59f5072bad988a20b6185e8e0672dbf7179109
|
refs/heads/main
| 2023-02-05T10:49:22.088156
| 2020-12-31T12:42:01
| 2020-12-31T12:42:01
| 322,474,346
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 646
|
py
|
import requests
import json
import MySQLdb
BASE_URL = "http://127.0.0.1:8000/"
ENDPOINT = "api/updates/"
def get_list():
r = requests.get(BASE_URL+ENDPOINT) # Get list
print(r.status_code)
data = r.json()
for obj in data:
if obj['id'] ==1:
r2 = requests.get(BASE_URL+ENDPOINT+str(obj["id"])) # get one data
print(r2.json())
return r.json()
def create_update():
new_data ={
"user" :1,
"content" : "Some new Update"
}
r = requests.delete(BASE_URL+ENDPOINT, data=new_data)
print(r.status_code)
return r.text
print(create_update())
# print(get_list())
|
[
"you@example.com"
] |
you@example.com
|
8d653e447ccd1bbc5219799f747b89e50269d731
|
b988ae0baa8c6a8098fadbe96a8a4ba7a0016f36
|
/social/backends/live.py
|
2884438fbc0955e10b4cfc8bc8c723ec050ee504
|
[
"Python-2.0",
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
stephenmcd/python-social-auth
|
6cedefa282975ce6043632f8272c527cc738abdd
|
f70500ad966887a793ea17ed7b794e5526f828f1
|
refs/heads/master
| 2021-01-21T08:51:52.779136
| 2013-12-03T10:28:38
| 2013-12-03T10:28:38
| 14,885,186
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,257
|
py
|
"""
Live OAuth2 backend, docs at:
http://psa.matiasaguirre.net/docs/backends/live.html
"""
from social.backends.oauth import BaseOAuth2
class LiveOAuth2(BaseOAuth2):
name = 'live'
AUTHORIZATION_URL = 'https://login.live.com/oauth20_authorize.srf'
ACCESS_TOKEN_URL = 'https://login.live.com/oauth20_token.srf'
ACCESS_TOKEN_METHOD = 'POST'
SCOPE_SEPARATOR = ','
DEFAULT_SCOPE = ['wl.basic', 'wl.emails']
EXTRA_DATA = [
('id', 'id'),
('access_token', 'access_token'),
('reset_token', 'reset_token'),
('expires', 'expires'),
('email', 'email'),
('first_name', 'first_name'),
('last_name', 'last_name'),
]
def get_user_details(self, response):
"""Return user details from Live Connect account"""
return {'username': response.get('name'),
'email': response.get('emails', {}).get('account', ''),
'first_name': response.get('first_name'),
'last_name': response.get('last_name')}
def user_data(self, access_token, *args, **kwargs):
"""Loads user data from service"""
return self.get_json('https://apis.live.net/v5.0/me', params={
'access_token': access_token
})
|
[
"matiasaguirre@gmail.com"
] |
matiasaguirre@gmail.com
|
2631a8e662e432ab59e792b069be027fbf8dd507
|
d41d18d3ea6edd2ec478b500386375a8693f1392
|
/plotly/validators/bar/_constraintext.py
|
930b959ee4bb09faf0e0d0634c33bccb98c98747
|
[
"MIT"
] |
permissive
|
miladrux/plotly.py
|
38921dd6618650d03be9891d6078e771ffccc99a
|
dbb79e43e2cc6c5762251537d24bad1dab930fff
|
refs/heads/master
| 2020-03-27T01:46:57.497871
| 2018-08-20T22:37:38
| 2018-08-20T22:37:38
| 145,742,203
| 1
| 0
|
MIT
| 2018-08-22T17:37:07
| 2018-08-22T17:37:07
| null |
UTF-8
|
Python
| false
| false
| 487
|
py
|
import _plotly_utils.basevalidators
class ConstraintextValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self, plotly_name='constraintext', parent_name='bar', **kwargs
):
super(ConstraintextValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type='calc',
role='info',
values=['inside', 'outside', 'both', 'none'],
**kwargs
)
|
[
"adam.kulidjian@gmail.com"
] |
adam.kulidjian@gmail.com
|
b9ceed3c8c034db5703e61e8a45a35719069f193
|
60d5ea4f007d49768d250ef394003f554003e4d0
|
/python/Backtracking/051.N-Queens.py
|
ee7c4ab3c9a52e343f670af51f54f6fa41acebf8
|
[] |
no_license
|
EvanJamesMG/Leetcode
|
dd7771beb119ea1250dbb3b147a09053298cd63b
|
fa638c7fda3802e9f4e0751a2c4c084edf09a441
|
refs/heads/master
| 2021-01-10T17:11:10.896393
| 2017-12-01T16:04:44
| 2017-12-01T16:04:44
| 46,968,756
| 5
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,899
|
py
|
/*
The n-queens puzzle is the problem of placing n queens on an n×n chessboard such that no two queens attack each other.
Given an integer n, return all distinct solutions to the n-queens puzzle.
Each solution contains a distinct board configuration of the n-queens' placement, where 'Q' and '.' both indicate a queen and an empty space respectively.
For example,
There exist two distinct solutions to the 4-queens puzzle:
[
[".Q..", // Solution 1
"...Q",
"Q...",
"..Q."],
["..Q.", // Solution 2
"Q...",
"...Q",
".Q.."]
]
*/
#python 版本
'''
这类型问题统称为递归回溯问题,也可以叫做对决策树的深度优先搜索(dfs)。
N皇后问题有个技巧的关键在于棋盘的表示方法,这里使用一个数组就可以表达了。
比如board=[1, 3, 0, 2],这是4皇后问题的一个解,意思是:在第0行,皇后放在第1列;在第1行,皇后放在第3列;在第2行,皇后放在第0列;
在第3行,皇后放在第2列。这道题提供一个递归解法,下道题使用非递归。check函数用来检查在第k行,皇后是否可以放置在第j列。
'''
class Solution:
# @return a list of lists of string
def solveNQueens(self, n):
def check(k, j): # check if the kth queen can be put in column j!
for i in range(k):
if board[i]==j or abs(k-i)==abs(board[i]-j):
return False
return True
def dfs(depth, valuelist):
if depth==n: res.append(valuelist); return
for i in range(n):
if check(depth,i):
board[depth]=i
s='.'*n
dfs(depth+1, valuelist+[s[:i]+'Q'+s[i+1:]])
board=[-1 for i in range(n)]
res=[]
dfs(0,[])
return res
#Java 版本
'''
我们把这一题分成几个小问题
1. 传统的dfs递归
2. 验证放置Queen的地方是否合法
3. 输出Board结果
这么做的好处就是,一开始,我们不用建立一个庞大的Board,我们选用一个数组对应Board里面的每一行,数组每一个值对应这一行放置Queen的列号
比如: int[ ] {3,1,4,2} 代表放置的地点分别为[1,3], [2,1], [3,4], [4,2] 这么一来,我们用很简单的用数组表示了整个Board,
而且在isValid函数里判断的时候会非常简洁,而且把输出Board单独隔离了出来
dfs的循环是指这一行里,从第一列到最后一列放置的所有可能,如果放置的地点通过isValid验证,通过cur+1进入下一行进行递归,如果没通过验证,试下一个位置,如果所有位置都不Valid,跳回上一层
采用int[ ]的好处是,每一次我们只需改变一个数字就相当于改变了棋子的放置位置
isValid函数,首先int[ ]代表行,这样就避免了每一行出现重复的Queen (因为你不可能在一个int里面放2个值)这样简化了验证 接下来我们只需验证列和对角线
验证列的时候,要验证这一行之前的行有没有重复的(注意是验证之前的喔)
验证对角线,根据对角线性质,长 = 宽 那么我们不难写出 Math.abs(loc[i] - loc[cur]) == (cur - i)
最后loc[]里面记录的是解的信息(如果有解)我们把它转换成String, 输出Board即可
'''
'''
public class Solution
{
public ArrayList<String[]> solveNQueens(int n)
{
ArrayList<String[]> res = new ArrayList<String[]>();
int[] loc = new int[n]; //记录皇后处于哪一列,列数组
dfs(res,loc,0,n);
return res;
}
public void dfs(ArrayList<String[]> res, int[] loc, int cur, int n)
{
if(cur==n)
printboard(res,loc,n);
else
{
for(int i=0;i<n;i++)
{
loc[cur] = i;
if(isValid(loc,cur))
dfs(res,loc,cur+1,n);
}
}
}
public boolean isValid(int[] loc, int cur)
{
for(int i=0;i<cur;i++)//只需要保证与那些已经就位的皇后不冲突即可
{
if(loc[i]==loc[cur]||Math.abs(loc[i]-loc[cur])==(cur-i)) //验证对角线,根据对角线性质,长 = 宽
return false; // 那么我们不难写出 Math.abs(loc[i] - loc[cur]) == (cur - i)
}
return true;
}
public void printboard(ArrayList<String[]> res, int[] loc, int n)
{
String[] ans = new String[n];
for(int i=0;i<n;i++)
{
String row = new String();
for(int j=0;j<n;j++)
{
if(j==loc[i]) row += "Q";
else row += ".";
}
ans[i] = row;
}
res.add(ans);
}
}
'''
|
[
"Evan123mg@gmail.com"
] |
Evan123mg@gmail.com
|
9c24ec6281f68a5650163c101e90e9fc247a1dcf
|
33edba95f6313b2f5003d5973a37ac1e5988442f
|
/src/users/SConstruct
|
9b1f195bae06f126f6515df699eb7be726cced63
|
[] |
no_license
|
faustus123/bdxReco
|
25a8883eb23f8bb6ee7f76050141fcc5ca8add79
|
c173906d248146df3279347d7ff8f60211b8b601
|
refs/heads/master
| 2020-12-24T12:40:05.523304
| 2016-02-03T16:07:52
| 2016-02-03T16:07:52
| 51,010,837
| 0
| 0
| null | 2016-02-03T16:07:53
| 2016-02-03T15:56:13
|
C++
|
UTF-8
|
Python
| false
| false
| 565
|
from utils import *
Import('env')
users_names = getSubdirs('./')
for user in users_names :
plugins = getSubdirs(str(user))
for plugin in plugins:
print bcolors.OKGREEN,"Found plugin ",plugin," for user ",user,bcolors.ENDC
#We do not want to increment "at libitum" CPPPATH
tmp_cpppath=env['CPPPATH'];
dir = str(user)+'/'+str(plugin)
user_plugin_src = Glob(dir+'/*.cc');
env.Append(CPPPATH=dir)
env.SharedLibrary(source=user_plugin_src,target='#/lib/users/'+user+'_'+plugin,SHLIBPREFIX='',SHLIBSUFFIX='.so')
env.Replace(CPPPATH = tmp_cpppath)
|
[
"andrea.celentano@ge.infn.it"
] |
andrea.celentano@ge.infn.it
|
|
80c552dd0deb638466da67214246f640e2852f73
|
1d7eec692553afc411ec1e7325634f71a2aed291
|
/backend/core/apps.py
|
348e92f897b24984f69c0822f7848e13ccd76b1e
|
[] |
no_license
|
Andy-Nkumane/Tilde
|
a41a2a65b3901b92263ae94d527de403f59a5caf
|
80de97edaf99f4831ca8cb989b93e3be5e09fdd6
|
refs/heads/develop
| 2023-05-09T10:02:41.240517
| 2021-05-28T09:20:51
| 2021-05-28T09:20:51
| 299,501,586
| 0
| 0
| null | 2020-10-25T22:37:30
| 2020-09-29T04:10:48
|
Python
|
UTF-8
|
Python
| false
| false
| 164
|
py
|
from django.apps import AppConfig
class CoreConfig(AppConfig):
name = "core"
def ready(self):
# register the signals
import core.signals
|
[
"sheena.oconnell@gmail.com"
] |
sheena.oconnell@gmail.com
|
5c0cb0db96cc9683da39a17d57d2fd157c236521
|
050be63ad4e88890954756fd1a06f93a7993f732
|
/backend/lizz_jun18_mob4_dev_6256/wsgi.py
|
f43e047ac58a377ae0ced3d71b1e6d9d63b0bcea
|
[] |
no_license
|
crowdbotics-apps/lizz-jun18-mob4-dev-6256
|
46cb7ed037b5e223b151e799db3aa2891257f407
|
4a889dc560f2d463e437978d665d0b00a7b4c87f
|
refs/heads/master
| 2022-11-04T23:40:05.810141
| 2020-06-19T00:15:09
| 2020-06-19T00:15:09
| 273,366,024
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 425
|
py
|
"""
WSGI config for lizz_jun18_mob4_dev_6256 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'lizz_jun18_mob4_dev_6256.settings')
application = get_wsgi_application()
|
[
"team@crowdbotics.com"
] |
team@crowdbotics.com
|
faf352b930625907a784008bebc86e45147b6b82
|
0ca21d1d60bb9dbe70fb55b7ebace0510695d489
|
/examples/youtube_banner.py
|
5c280f0137305a832cf9912b4b0e9c0d10e8f3f5
|
[
"Apache-2.0"
] |
permissive
|
colinmford/coldtype
|
fa5cc65ac318ee22138857ea60d964ca4ecb8267
|
8462dbd5f65f3ef8f3cbc8662a866b7e20ec5985
|
refs/heads/main
| 2023-07-02T12:34:18.700566
| 2021-08-06T03:40:21
| 2021-08-06T03:40:21
| 393,237,997
| 0
| 0
|
Apache-2.0
| 2021-08-06T03:35:41
| 2021-08-06T03:35:40
| null |
UTF-8
|
Python
| false
| false
| 534
|
py
|
from coldtype import *
from coldtype.fx.skia import phototype
obv = Font.Cacheable("assets/ColdtypeObviously-VF.ttf")
logos = raw_ufo("assets/logos.ufo")
@renderable((2048, 1152))
def banner(r):
return (DATPens([
DATPen().rect(r).f(hsl(0.65)),
(StyledString("COLDTYPE",
Style(obv, 300, wdth=1, tu=-90, r=1, rotate=-10))
.pens()
.f(1)
.understroke(sw=35)
.align(r)
.translate(0, 5)
.ch(phototype(r, blur=5, cut=200, cutw=10)))]))
|
[
"rob.stenson@gmail.com"
] |
rob.stenson@gmail.com
|
b0b33db89eb9adc6024895b5acb3afee09f7ccec
|
3bbdcdfa6ee6631bea52dd651914cb898062b870
|
/base_standard_profiler/cprofile_basemark4.py
|
d2538ef458d9e697088f2c64b1f273964a85926a
|
[] |
no_license
|
xiaoyeren/python_high_performance
|
55ea5ee9f628e1c1155a6946274c862bda57ae2c
|
496a5e55e7f40033c80e9ee3b9190c184d4701d9
|
refs/heads/master
| 2020-05-24T15:20:00.576786
| 2019-05-13T06:01:35
| 2019-05-13T06:01:35
| 187,329,222
| 1
| 0
| null | 2019-05-18T07:43:11
| 2019-05-18T07:43:11
| null |
UTF-8
|
Python
| false
| false
| 1,653
|
py
|
# -*- coding: utf-8 -*-
'''
Created on 2019年4月8日 下午9:21:00
Zhukun Luo
Jiangxi University of Finance and Economics
'''
#memory profiler
from app import ParticleSimulator,particle
from random import uniform
from memory_profiler import profile
import sys
def test_evolve():
particles=[particle(0.3,0.5,1),particle(0.0,-0.5,-1),particle(-0.1,-0.4,3)]
simulator=ParticleSimulator(particles)
simulator.evolve(0.1)
p0,p1,p2=particles
def fequal(a,b,eps=1e-5):
return abs(a-b)<eps
assert fequal(p0.x, 0.210269,eps=1e-5)
assert fequal(p0.y,0.543863,eps=1e-5)
assert fequal(p1.x, -0.099334)
assert fequal(p1.y,-0.490034)
assert fequal(p2.x, 0.191358)
assert fequal(p2.y, -0.365227)
@profile
def benchmark():
particles=[particle(uniform(-1.0,1.0),uniform(-1.0,1.0),uniform(-1.0,1.0)) for i in range(1000)]#生成1000个随机测试用例
simulator=ParticleSimulator(particles)
simulator.evolve(0.1)
if __name__ == "__main__":
# test_evolve()
benchmark()
'''
Filename: /home/agnostic/git/python_high_performance/base_standard_profiler/cprofile_basemark4.py
Line # Mem usage Increment Line Contents
================================================
26 70.3 MiB 70.3 MiB @profile
27 def benchmark():
28 70.4 MiB 0.2 MiB particles=[particle(uniform(-1.0,1.0),uniform(-1.0,1.0),uniform(-1.0,1.0)) for i in range(1000)]#生成1000个随机测试用例
29 70.4 MiB 0.0 MiB simulator=ParticleSimulator(particles)
30 70.4 MiB 0.0 MiB simulator.evolve(0.1)
'''
|
[
"luozhukun@163.com"
] |
luozhukun@163.com
|
afb1396453b05ec538e23fc9b5de341ed7ce174a
|
6bd4eff3c0ca5b2dd575dbee2b0a02a898f642d6
|
/爬虫代码/selenium_study/5.py
|
c2a473f4c6da7b44b74892a34249d1757934f0aa
|
[] |
no_license
|
marvinlizhaorui/youshu
|
620a9820c6bb4b4729b9a1cbccf4d426afde110e
|
bf62387ffe564668681b8a514b98d539b7290b4c
|
refs/heads/master
| 2020-04-09T07:39:27.525808
| 2018-09-27T13:05:40
| 2018-09-27T13:05:40
| 160,164,943
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,717
|
py
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Created on 2018/7/6 17:32
# Project:
# @Author: ZQJ
# @Email : zihe@yscredit.com
#延时等待
#在selenium中,get()方法会在网页框架加载结束后结束执行,此时如果获取page-source,可能并不是浏览器完全加载完成的页面
#如果某些页面有额外的Ajax请求,我们在网页源代码中也不一定能获取的到,所以这里需要延时等待一段时间,确保节点已经加载出来了
from selenium import webdriver
#隐式等待
#如果selenium没有在DOM中找到节点,将继续等待,等一会再找,超过设定时间后,则抛出找不到节点的异常,默认时间是0
browser = webdriver.Chrome()
browser.implicitly_wait(10)#隐式等待10秒
browser.get('https://www.zhihu.com/explore')
inputs = browser.find_element_by_class_name('zu-top-add-question')
print(inputs)
browser.close()
#显式等待
#指定要查找的节点,然后指定一个最长的等待时间,如果在规定时间内加载出了这个节点,就返回查找的节点,没有就异常
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
browser = webdriver.Chrome()
browser.get('https://www.taobao.com/')
wait = WebDriverWait(browser,10)
inputs = wait.until(EC.presence_of_all_elements_located((By.ID,'q')))
button = wait.until(EC.element_to_be_clickable((By.CSS_SELECTOR,'.btn-search')))
print(inputs,button)
#关于等待条件,还有很多,比如判断标题内容,判断某个节点是否出现了某文字等。p259表格
|
[
"30498473+zhaoqiji@users.noreply.github.com"
] |
30498473+zhaoqiji@users.noreply.github.com
|
2404ebd5a62590faa092657e82cf1301d404f999
|
8db1b9528ace3ce142ea52da1017eb6359f46967
|
/py1811/company/employees.py
|
558c8b144ea76cb40a9f65e4fb51a1eb1e7a5345
|
[] |
no_license
|
ul8ksgdmy/myPythonlearning
|
01fbd49d227e2b3095250e7ee40768fa3179f26e
|
2ad0d3525cd505edba3fd37e21f601a32872259b
|
refs/heads/master
| 2020-03-31T14:33:13.448891
| 2019-01-09T18:13:26
| 2019-01-09T18:13:26
| 152,299,859
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,594
|
py
|
class Staff:
def __init__(self, empid, name, deptname, gender, tech, age):
self.__empid = empid
self.__name = name
self.__deptname = deptname
self.__gender = gender
self.__tech = tech
self.__age = age
@property
def empid(self):
return self.__empid
@empid.setter
def empid(self, value):
self.__empid = value
@property
def name(self):
return self.__name
@name.setter
def name(self, value):
self.__name = value
@property
def deptname(self):
return self.__deptname
@deptname.setter
def deptname(self, value):
self.__deptname = value
@property
def gender(self):
return self.__gender
@gender.setter
def gender(self, value):
self.__gender = value
@property
def tech(self):
return self.__tech
@tech.setter
def tech(self, value):
self.__tech = value
@property
def age(self):
return self.__age
@age.setter
def age(self, value):
self.__age = value
def __str__(self):
msg = '%s %s %s %s %s %s' % (self.__empid, self.__name, self.__deptname, self.__gender, self.__tech, self.__age)
def printbio(self):
if self.__gender == 'M':
print('%s 직원은 나이가 %s이고, 성별은 남자입니다' % (self.__name, self.age))
else:
print('%s 직원은 나이가 %s이고, 성별은 여자입니다' % (self.__name, self.age))
e = Staff('3', 'Ernie', 'Sales', 'M', 'UNIX, Perl', '23')
e.printbio()
|
[
"clickmy7@gmail.com"
] |
clickmy7@gmail.com
|
27952a77379562f88c4995946dc5c424979e9996
|
90419da201cd4948a27d3612f0b482c68026c96f
|
/sdk/python/pulumi_azure_nextgen/customerinsights/v20170101/hub.py
|
e8e2e5c389f8388281f251dbf8d15ef4de29ea61
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
test-wiz-sec/pulumi-azure-nextgen
|
cd4bee5d70cb0d332c04f16bb54e17d016d2adaf
|
20a695af0d020b34b0f1c336e1b69702755174cc
|
refs/heads/master
| 2023-06-08T02:35:52.639773
| 2020-11-06T22:39:06
| 2020-11-06T22:39:06
| 312,993,761
| 0
| 0
|
Apache-2.0
| 2023-06-02T06:47:28
| 2020-11-15T09:04:00
| null |
UTF-8
|
Python
| false
| false
| 6,944
|
py
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
from ._inputs import *
__all__ = ['Hub']
class Hub(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
hub_billing_info: Optional[pulumi.Input[pulumi.InputType['HubBillingInfoFormatArgs']]] = None,
hub_name: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
tenant_features: Optional[pulumi.Input[int]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Hub resource.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[pulumi.InputType['HubBillingInfoFormatArgs']] hub_billing_info: Billing settings of the hub.
:param pulumi.Input[str] hub_name: The name of the Hub.
:param pulumi.Input[str] location: Resource location.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
:param pulumi.Input[int] tenant_features: The bit flags for enabled hub features. Bit 0 is set to 1 indicates graph is enabled, or disabled if set to 0. Bit 1 is set to 1 indicates the hub is disabled, or enabled if set to 0.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['hub_billing_info'] = hub_billing_info
if hub_name is None:
raise TypeError("Missing required property 'hub_name'")
__props__['hub_name'] = hub_name
__props__['location'] = location
if resource_group_name is None:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['tags'] = tags
__props__['tenant_features'] = tenant_features
__props__['api_endpoint'] = None
__props__['name'] = None
__props__['provisioning_state'] = None
__props__['type'] = None
__props__['web_endpoint'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:customerinsights/latest:Hub"), pulumi.Alias(type_="azure-nextgen:customerinsights/v20170426:Hub")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(Hub, __self__).__init__(
'azure-nextgen:customerinsights/v20170101:Hub',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'Hub':
"""
Get an existing Hub resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
return Hub(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="apiEndpoint")
def api_endpoint(self) -> pulumi.Output[str]:
"""
API endpoint URL of the hub.
"""
return pulumi.get(self, "api_endpoint")
@property
@pulumi.getter(name="hubBillingInfo")
def hub_billing_info(self) -> pulumi.Output[Optional['outputs.HubBillingInfoFormatResponse']]:
"""
Billing settings of the hub.
"""
return pulumi.get(self, "hub_billing_info")
@property
@pulumi.getter
def location(self) -> pulumi.Output[Optional[str]]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
Provisioning state of the hub.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="tenantFeatures")
def tenant_features(self) -> pulumi.Output[Optional[int]]:
"""
The bit flags for enabled hub features. Bit 0 is set to 1 indicates graph is enabled, or disabled if set to 0. Bit 1 is set to 1 indicates the hub is disabled, or enabled if set to 0.
"""
return pulumi.get(self, "tenant_features")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="webEndpoint")
def web_endpoint(self) -> pulumi.Output[str]:
"""
Web endpoint URL of the hub.
"""
return pulumi.get(self, "web_endpoint")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
|
[
"public@paulstack.co.uk"
] |
public@paulstack.co.uk
|
3b67bff3e9866cdc68fd19f374d328849aa0c7eb
|
accb0a012d257731a98376cbc5a10a279c91cfbd
|
/euler/solutions/euler43.py
|
998756e0b04353988b0b697948538ef291856916
|
[] |
no_license
|
Danang691/Euler.Py
|
d52ac436d399a46553be0d9bd24736477a3d2cb0
|
f8a198f5b06fd55388e8833108fbeed11e9b8d8b
|
refs/heads/master
| 2022-01-08T00:32:06.724008
| 2019-01-27T22:33:40
| 2019-01-27T22:33:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,096
|
py
|
# -*- coding: utf-8 -*-
from euler.baseeuler import BaseEuler
from itertools import permutations as perms
"""
1. there are only 3,628,800 (10!) 0 to 9 pandigital numbers, so there's no need
to examine all 10 billion 10-digit numbers.
2. consider only numbers whose last 3 digits divide by 17
3. now only check the permutations of the 7 remaining digits
4. this can be reduced further by considering the division by 13 and so on
"""
class Euler(BaseEuler):
def __init__(self):
self._F1 = lambda s: int(s[1:4]) % 2 == 0
self._F2 = lambda s: int(s[2:5]) % 3 == 0
self._F3 = lambda s: int(s[3:6]) % 5 == 0
self._F4 = lambda s: int(s[4:7]) % 7 == 0
self._F5 = lambda s: int(s[5:8]) % 11 == 0
self._F6 = lambda s: int(s[6:9]) % 13 == 0
self._F7 = lambda s: int(s[7:]) % 17 == 0
def solve(self):
t = list(perms('0123456789', 10))
p = [''.join(s) for s in t]
l = [i for i in p if self._F7(i)]
l = [i for i in l if self._F6(i)]
l = [i for i in l if self._F5(i)]
l = [i for i in l if self._F4(i)]
l = [i for i in l if self._F3(i)]
l = [i for i in l if self._F2(i)]
l = [i for i in l if self._F1(i)]
return sum([int(i) for i in l])
@property
def answer(self):
return ('The sum of all 0 to 9 pandigital numbers with this ' +
'property is %d.' % self.solve())
@property
def problem(self):
return '''
Project Euler Problem 43
The number, 1406357289, is a 0 to 9 pandigital number because it is made up of
each of the digits 0 to 9 in some order, but it also has a rather interesting
sub-string divisibility property.
Let d1 be the 1st digit, d2 be the 2nd digit, and so on. In this way, we note
the following:
- d2d3d4=406 is divisible by 2
- d3d4d5=063 is divisible by 3
- d4d5d6=635 is divisible by 5
- d5d6d7=357 is divisible by 7
- d6d7d8=572 is divisible by 11
- d7d8d9=728 is divisible by 13
- d8d9d10=289 is divisible by 17
Find the sum of all 0 to 9 pandigital numbers with this property.
'''
|
[
"jgorauskas@gmail.com"
] |
jgorauskas@gmail.com
|
a88d08e13d37cfdd2c1dc346f7a68574139545b7
|
9e988c0dfbea15cd23a3de860cb0c88c3dcdbd97
|
/sdBs/AllRun/sdssj_202550.42+133040.7/sdB_SDSSJ_202550.42+133040.7_lc.py
|
3380ae095c4ebc5b68cf82334e179ac3fe41df68
|
[] |
no_license
|
tboudreaux/SummerSTScICode
|
73b2e5839b10c0bf733808f4316d34be91c5a3bd
|
4dd1ffbb09e0a599257d21872f9d62b5420028b0
|
refs/heads/master
| 2021-01-20T18:07:44.723496
| 2016-08-08T16:49:53
| 2016-08-08T16:49:53
| 65,221,159
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 372
|
py
|
from gPhoton.gAperture import gAperture
def main():
gAperture(band="NUV", skypos=[306.460083,13.511306], stepsz=30., csvfile="/data2/fleming/GPHOTON_OUTPU/LIGHTCURVES/sdBs/sdB_SDSSJ_202550.42+133040.7 /sdB_SDSSJ_202550.42+133040.7_lc.csv", maxgap=1000., overwrite=True, radius=0.00555556, annulus=[0.005972227,0.0103888972], verbose=3)
if __name__ == "__main__":
main()
|
[
"thomas@boudreauxmail.com"
] |
thomas@boudreauxmail.com
|
2b6d28c56de1c130b329b37f364f6eea788b0a12
|
c1f09426670b5efe35956acd19c67a2de72af284
|
/python/8.web/2.Django/mysql_demo/mysql_demo/settings.py
|
2d96898508b90792184d03bf68d39719647d02c2
|
[
"Apache-2.0"
] |
permissive
|
keasyops/BaseCode
|
388218d89d60b958c1fcc50eb15f29eafabaea1f
|
0255f498e1fe67ed2b3f66c84c96e44ef1f7d320
|
refs/heads/master
| 2023-05-08T05:08:39.754170
| 2021-05-26T10:48:01
| 2021-05-26T10:48:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,191
|
py
|
"""
Django settings for mysql_demo project.
Generated by 'django-admin startproject' using Django 2.2.
For more information on this file, see
https://docs.djangoproject.com/zh-hans/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/zh-hans/2.2/ref/settings
"""
import os
# Base_dir:当前项目的绝对路径
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/zh-hans/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'o96t+^%_!-@qlgo*0qqr%)2gca9s(l-!ihz_k252hol-ct_r#+'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
"userapp",
]
# 中间件:https://docs.djangoproject.com/zh-hans/2.2/topics/http/middleware/
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysql_demo.urls'
# https://docs.djangoproject.com/zh-hans/2.2/ref/settings/#templates
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')], # 模版文件的绝对路径
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysql_demo.wsgi.application'
# Database
# https://docs.djangoproject.com/zh-hans/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'django', # 使用哪个数据库
'USER': 'root', # mysql的用户名
'PASSWORD': 'dntdnt', # 用户名对应的密码
'HOST': '127.0.0.1', # 数据库服务的ip地址
'PORT': 3306, # 对应的端口
# https://docs.djangoproject.com/en/2.2/ref/settings/#std:setting-OPTIONS
'OPTIONS': {
# https://docs.djangoproject.com/en/2.2/ref/settings/#autocommit
# 'AUTOCOMMIT': False,
# https://docs.djangoproject.com/zh-hans/2.2/ref/databases/#setting-sql-mode
# SQLMode可以看我之前写的文章:https://www.cnblogs.com/dotnetcrazy/p/10374091.html
'init_command': "SET sql_mode='STRICT_TRANS_TABLES'", # 设置SQL_Model
},
}
}
# Password validation
# https://docs.djangoproject.com/zh-hans/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/zh-hans/2.2/topics/i18n/
# 使用中文(zh-hans可以这么记==>zh-汉'字')
LANGUAGE_CODE = 'zh-hans'
# 设置中国时间
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/zh-hans/2.2/howto/static-files/
STATIC_URL = '/static/'
|
[
"39723758+lotapp@users.noreply.github.com"
] |
39723758+lotapp@users.noreply.github.com
|
a1f62bcfd6d251a8de4cba3fe7fae4c1151067fd
|
09e5cfe06e437989a2ccf2aeecb9c73eb998a36c
|
/base/lib/python2.7/site-packages/mrcfile/gzipmrcfile.py
|
8fd97b41138e1bcef75c9b61cb7c4068055225b7
|
[
"LicenseRef-scancode-python-cwi",
"GPL-1.0-or-later",
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-free-unknown",
"Python-2.0",
"BSD-3-Clause"
] |
permissive
|
jorgediazjr/dials-dev20191018
|
b81b19653624cee39207b7cefb8dfcb2e99b79eb
|
77d66c719b5746f37af51ad593e2941ed6fbba17
|
refs/heads/master
| 2020-08-21T02:48:54.719532
| 2020-01-25T01:41:37
| 2020-01-25T01:41:37
| 216,089,955
| 0
| 1
|
BSD-3-Clause
| 2020-01-25T01:41:39
| 2019-10-18T19:03:17
|
Python
|
UTF-8
|
Python
| false
| false
| 2,723
|
py
|
# Copyright (c) 2016, Science and Technology Facilities Council
# This software is distributed under a BSD licence. See LICENSE.txt.
"""
gzipmrcfile
-----------
Module which exports the :class:`GzipMrcFile` class.
Classes:
:class:`GzipMrcFile`: An object which represents a gzipped MRC file.
"""
# Import Python 3 features for future-proofing
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import gzip
from .mrcfile import MrcFile
class GzipMrcFile(MrcFile):
""":class:`~mrcfile.mrcfile.MrcFile` subclass for handling gzipped files.
Usage is the same as for :class:`~mrcfile.mrcfile.MrcFile`.
"""
def __repr__(self):
return "GzipMrcFile('{0}', mode='{1}')".format(self._fileobj.name,
self._mode)
def _open_file(self, name):
"""Override _open_file() to open both normal and gzip files."""
self._fileobj = open(name, self._mode + 'b')
self._iostream = gzip.GzipFile(fileobj=self._fileobj, mode='rb')
def _close_file(self):
"""Override _close_file() to close both normal and gzip files."""
self._iostream.close()
self._fileobj.close()
def _read(self, header_only=False):
"""Override _read() to ensure gzip file is in read mode."""
self._ensure_readable_gzip_stream()
super(GzipMrcFile, self)._read(header_only)
def _ensure_readable_gzip_stream(self):
"""Make sure _iostream is a gzip stream that can be read."""
if self._iostream.mode != gzip.READ:
self._iostream.close()
self._fileobj.seek(0)
self._iostream = gzip.GzipFile(fileobj=self._fileobj, mode='rb')
def _get_file_size(self):
"""Override _get_file_size() to avoid seeking from end."""
self._ensure_readable_gzip_stream()
pos = self._iostream.tell()
extra = len(self._iostream.read())
return pos + extra
def flush(self):
"""Override :meth:`~mrcfile.mrcinterpreter.MrcInterpreter.flush` since
GzipFile objects need special handling.
"""
if not self._read_only:
self._iostream.close()
self._fileobj.seek(0)
self._iostream = gzip.GzipFile(fileobj=self._fileobj, mode='wb')
# Arrays converted to bytes so gzip can calculate sizes correctly
self._iostream.write(self.header.tobytes())
self._iostream.write(self.extended_header.tobytes())
self._iostream.write(self.data.tobytes())
self._iostream.flush()
self._fileobj.truncate()
|
[
"jorge7soccer@gmail.com"
] |
jorge7soccer@gmail.com
|
8b96e0486d3ae2eff2814c19873fad1b007d1395
|
487ce91881032c1de16e35ed8bc187d6034205f7
|
/codes/CodeJamCrawler/16_1_2/sproctor/rank_and_file.py
|
b7de93089153574c7d0d13e2b4e4ebb4fcfe08eb
|
[] |
no_license
|
DaHuO/Supergraph
|
9cd26d8c5a081803015d93cf5f2674009e92ef7e
|
c88059dc66297af577ad2b8afa4e0ac0ad622915
|
refs/heads/master
| 2021-06-14T16:07:52.405091
| 2016-08-21T13:39:13
| 2016-08-21T13:39:13
| 49,829,508
| 2
| 0
| null | 2021-03-19T21:55:46
| 2016-01-17T18:23:00
|
Python
|
UTF-8
|
Python
| false
| false
| 863
|
py
|
#!/usr/bin/python
import sys
maxHeight = 2500
def solve(heights):
missingList = []
for i in range(maxHeight + 1):
if heights[i] % 2 == 1:
missingList.append(str(i))
return missingList
def main():
filename = sys.argv[1]
filehandle = open(filename, 'r')
lines = filehandle.readlines()
numberOfTests = int(lines.pop(0))
for i in range(numberOfTests):
n = int(lines.pop(0))
heights = dict([(x, 0) for x in range(maxHeight + 1)])
for j in range(n * 2 - 1):
line = lines.pop(0)
heightStrings = line.split(' ')
heightList = []
for k in range(n):
heights[int(heightStrings[k])] += 1
missingList = solve(heights)
print "Case #" + str(i + 1) + ": " + " ".join(missingList)
if __name__ == "__main__":
main()
|
[
"[dhuo@tcd.ie]"
] |
[dhuo@tcd.ie]
|
14bf94956faba485bce2be72addcd37cd6573471
|
739e41d4f24f79c772d266cded0de9b759c6e953
|
/venv/lib/python3.6/site-packages/nlp/datasets/docred/24f714c390b57d5e0642c513b1e98b62e281a8f9d2f875ac1d3e103b07de3cee/docred.py
|
3fbcbd2045e9177caa4d92aed978cc11bf9ed9ae
|
[
"MIT"
] |
permissive
|
MachineLearningBCAM/Minimax-risk-classifiers-NeurIPS-2020
|
24b7bbdecf459292f8b58be286feab3b9aa341ba
|
82586c632268c103de269bcbffa5f7849b174a29
|
refs/heads/main
| 2023-05-18T15:41:13.495286
| 2021-06-11T18:21:35
| 2021-06-11T18:21:35
| 304,268,819
| 3
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,257
|
py
|
"""DocRED: A Large-Scale Document-Level Relation Extraction Dataset"""
from __future__ import absolute_import, division, print_function
import json
import os
import nlp
_CITATION = """\
@inproceedings{yao2019DocRED,
title={{DocRED}: A Large-Scale Document-Level Relation Extraction Dataset},
author={Yao, Yuan and Ye, Deming and Li, Peng and Han, Xu and Lin, Yankai and Liu, Zhenghao and Liu, \
Zhiyuan and Huang, Lixin and Zhou, Jie and Sun, Maosong},
booktitle={Proceedings of ACL 2019},
year={2019}
}
"""
_DESCRIPTION = """\
Multiple entities in a document generally exhibit complex inter-sentence relations, and cannot be well handled by \
existing relation extraction (RE) methods that typically focus on extracting intra-sentence relations for single \
entity pairs. In order to accelerate the research on document-level RE, we introduce DocRED, a new dataset constructed \
from Wikipedia and Wikidata with three features:
- DocRED annotates both named entities and relations, and is the largest human-annotated dataset for document-level RE from plain text.
- DocRED requires reading multiple sentences in a document to extract entities and infer their relations by synthesizing all information of the document.
- Along with the human-annotated data, we also offer large-scale distantly supervised data, which enables DocRED to be adopted for both supervised and weakly supervised scenarios.
"""
_URLS = {
"dev": "https://drive.google.com/uc?export=download&id=1fDmfUUo5G7gfaoqWWvK81u08m71TK2g7",
"train_distant": "https://drive.google.com/uc?export=download&id=1fDmfUUo5G7gfaoqWWvK81u08m71TK2g7",
"train_annotated": "https://drive.google.com/uc?export=download&id=1NN33RzyETbanw4Dg2sRrhckhWpzuBQS9",
"test": "https://drive.google.com/uc?export=download&id=1lAVDcD94Sigx7gR3jTfStI66o86cflum",
"rel_info": "https://drive.google.com/uc?id=1y9A0zKrvETc1ddUFuFhBg3Xfr7FEL4dW&export=download",
}
class DocRed(nlp.GeneratorBasedBuilder):
"""DocRED: A Large-Scale Document-Level Relation Extraction Dataset"""
def _info(self):
return nlp.DatasetInfo(
description=_DESCRIPTION,
features=nlp.Features(
{
"title": nlp.Value("string"),
"sents": nlp.features.Sequence(nlp.features.Sequence(nlp.Value("string"))),
"vertexSet": nlp.features.Sequence(nlp.features.Sequence({
"name": nlp.Value("string"),
"sent_id": nlp.Value("int32"),
"pos": nlp.features.Sequence(nlp.Value("int32")),
"type": nlp.Value("string"),
})),
"labels": nlp.features.Sequence(
{
"head": nlp.Value("int32"),
"tail": nlp.Value("int32"),
"relation_id": nlp.Value("string"),
"relation_text": nlp.Value("string"),
"evidence": nlp.features.Sequence(nlp.Value("int32")),
}
),
}
),
supervised_keys=None,
homepage="https://github.com/thunlp/DocRED",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
downloads = {}
for key in _URLS.keys():
downloads[key] = dl_manager.download_and_extract(_URLS[key])
# Fix for dummy data
if os.path.isdir(downloads[key]):
downloads[key] = os.path.join(downloads[key], key + ".json")
return [
nlp.SplitGenerator(
name=nlp.Split.VALIDATION, gen_kwargs={"filepath": downloads["dev"], "rel_info": downloads["rel_info"]}
),
nlp.SplitGenerator(
name=nlp.Split.TEST, gen_kwargs={"filepath": downloads["test"], "rel_info": downloads["rel_info"]}
),
nlp.SplitGenerator(
name="train_annotated",
gen_kwargs={"filepath": downloads["train_annotated"], "rel_info": downloads["rel_info"]},
),
nlp.SplitGenerator(
name="train_distant",
gen_kwargs={"filepath": downloads["train_distant"], "rel_info": downloads["rel_info"]},
),
]
def _generate_examples(self, filepath, rel_info):
"""Generate DocRED examples."""
relation_name_map = json.load(open(rel_info))
data = json.load(open(filepath))
for idx, example in enumerate(data):
# Test set has no labels - Results need to be uploaded to Codalab
if "labels" not in example.keys():
example["labels"] = []
for label in example["labels"]:
# Rename and include full relation names
label["relation_text"] = relation_name_map[label["r"]]
label["relation_id"] = label["r"]
label["head"] = label["h"]
label["tail"] = label["t"]
del label["r"]
del label["h"]
del label["t"]
raise ValueError(example)
yield idx, example
|
[
"adiaz@bcamath.org"
] |
adiaz@bcamath.org
|
10805b6505117c9dbc083c4c4ea2311f0f7b4543
|
5235a27898cdaeab012c447c50076e7e4641dbf7
|
/code/diagnosis/grtest.py
|
5b0fabaa9066f506ce45528174962a0401846b6a
|
[
"MIT"
] |
permissive
|
mjvakili/gambly
|
0a9ab638a5be987efb683acdc8282a8c09880532
|
611765bc42d8c42d76558b486c4025532155036a
|
refs/heads/master
| 2020-04-10T05:32:55.171621
| 2020-03-22T18:51:10
| 2020-03-22T18:51:10
| 50,690,345
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,006
|
py
|
'''
implementation of Gelman-Rubin convergence test
'''
import numpy as np
def single_parameter_gr_test(chains):
"""
inputs:
chains : MCMC samples for one parameter.
shape = (nwalkers , niters)
returns:
potential scale reduction factor
and the variance of the distribution
"""
nwalkers = chains.shape[0]
niters = chains.shape[1]
#Discarding the first half of draws:
chains = chains[: , niters/2:]
nwalkers , niters = chains.shape[0] , chains.shape[1]
#Calculating the within-chain variance:
W = np.mean(np.var(chains, axis=1))
#Calculating the between-chain variance:
chains_means = np.mean(chains, axis=1)
mean_of_chains_means = np.mean(chains_means)
B = (niters/(nwalkers-1.0)) * np.sum((chains_means - mean_of_chains_means)**2.)
# Estimating the variance of distribution:
V = (1. - 1./niters) * W + (1./niters) * B
# Calculating the potential scale reduction factor:
R = np.sqrt(V/W)
return R , V
def gr_test(sample , nwalkers , nburnins , npars):
"""
inputs:
sample = an emcee sample
nwalkers = number of walkers
returns:
Rs = npar-dimensional vector of the
potential scale reduction factors
Vs = npar-dimensional vector of the
variances
"""
#npar = len(sample[0])
niters = len(sample)
chain_ensemble = sample.reshape(niters , nwalkers , npars)
chain_ensemble = chain_ensemble[nburnins: , :]
Rs = np.zeros((npars))
Vs = np.zeros((npars))
for i in range(npars):
chains = chain_ensemble[ : , : , i].T
Rs[i] = single_parameter_gr_test(chains)[0]
Vs[i] = single_parameter_gr_test(chains)[1]
return Rs , Vs
if __name__ == "__main__":
###########NOTE: This part is provided by the user#########
import h5py
sample = h5py.File("emcee2.hdf5")["k"]
nwalkers = 6
nburnins = 1700
npars = 3
print gr_test(sample , nwalkers , nburnins , npars)
|
[
"mjvakili@nyu.edu"
] |
mjvakili@nyu.edu
|
1a35b54134a858ba2ad3e718170f76012f9d65b9
|
0958db6bf636722685f33501408b3e28a45b2b14
|
/basic/key_event/key_event2.py
|
39ce162819423ee6e70a5b0378eace2fcacbefd3
|
[
"MIT"
] |
permissive
|
KeitaIto123/pygame
|
eee68fe59f7bf3eb408ff98737150a7d5fe258fc
|
cf5ba2331dc6b4a930f9c3dacbaa7954f51498db
|
refs/heads/master
| 2020-03-26T10:57:52.613169
| 2017-03-12T14:06:35
| 2017-03-12T14:06:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,056
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import pygame
from pygame.locals import *
import sys
SCREEN_SIZE = (640, 480)
pygame.init()
screen = pygame.display.set_mode(SCREEN_SIZE)
pygame.display.set_caption(u"キーイベント2")
img = pygame.image.load("python.png").convert_alpha()
img_rect = img.get_rect()
img_rect.center = (320, 240)
vx = vy = 10 # キーを押したときの移動距離
while True:
# 押されているキーをチェック
pressed_keys = pygame.key.get_pressed()
# 押されているキーに応じて画像を移動
if pressed_keys[K_LEFT]:
img_rect.move_ip(-vx, 0)
if pressed_keys[K_RIGHT]:
img_rect.move_ip(vx, 0)
if pressed_keys[K_UP]:
img_rect.move_ip(0, -vy)
if pressed_keys[K_DOWN]:
img_rect.move_ip(0, vy)
screen.fill((0,0,255))
screen.blit(img, img_rect)
pygame.display.update()
for event in pygame.event.get():
if event.type == QUIT: sys.exit()
if event.type == KEYDOWN and event.key == K_ESCAPE: sys.exit()
|
[
"f2forest@gmail.com"
] |
f2forest@gmail.com
|
1fbe9ed32949b7b2644e5a348f35cba67707f693
|
cd40fd66338bab16c3cac360ec68d0410daf85dc
|
/asyncio_study/factorial.py
|
1646369d3db811c34c538e76b12a6d84dc7cc229
|
[] |
no_license
|
suhjohn/Asyncio-Study
|
c74a95c37d6ce1d0983b5626a4f68d2b80d7ec79
|
d9c5a092924a32f18849787fd30cb322a0ff8b15
|
refs/heads/master
| 2021-05-12T12:28:15.749447
| 2018-01-14T17:25:22
| 2018-01-14T17:25:22
| 117,414,697
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 665
|
py
|
import asyncio
async def factorial(number):
result = 1
for i in range(2, number+1):
print(f"Task for {number}: Compute factorial: {i}")
await asyncio.sleep(1)
result *= i
print(f'Task for {number} finished. Factorial({number}) = {result}')
loop = asyncio.get_event_loop()
'''
Method using async.gather in https://docs.python.org/3/library/asyncio-task.html#future
loop.run_until_complete(asyncio.gather(
factorial(2),
factorial(3),
factorial(4),
))
loop.close()
'''
tasks = [loop.create_task(factorial(i)) for i in range(2, 5)]
wait_tasks = asyncio.wait(tasks)
loop.run_until_complete(wait_tasks)
loop.close()
|
[
"johnsuh94@gmail.com"
] |
johnsuh94@gmail.com
|
be668b7d094dabdba76537bfc6f3d63deed2fd52
|
6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4
|
/YGhrwfg6k6zHnmeDh_6.py
|
325649a3d1cc40c3f3f3aee4f24b5ae109e7c2e6
|
[] |
no_license
|
daniel-reich/ubiquitous-fiesta
|
26e80f0082f8589e51d359ce7953117a3da7d38c
|
9af2700dbe59284f5697e612491499841a6c126f
|
refs/heads/master
| 2023-04-05T06:40:37.328213
| 2021-04-06T20:17:44
| 2021-04-06T20:17:44
| 355,318,759
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 160
|
py
|
def get_discounts(nums, d):
discount = int(d.strip("%")) / 100
new_price = []
for x in nums:
new_price.append(x * discount)
return new_price
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
916f0b24c134a8b15a9019a7392d2f69d9481bed
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p00005/s108100200.py
|
51c7c84132f7cb691304ec7ee31bbe534e088dfc
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 253
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
def gcd( a, b ):
while b > 0:
a, b = b, a%b
return a
def lcm( a, b ):
return a*b/gcd( a, b )
for s in sys.stdin:
d = map(int, s.split())
a,b = d[0],d[1]
print gcd(a,b),lcm(a,b)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
f626a5fe63505768adc81155c86801ddb59cb619
|
2b8fe23680fb8c6596c8b4fd53a2547e32e84617
|
/8-DS-Design/EventCounter.py
|
e1d0c32e969f979b8ced3409173abec7d7174853
|
[] |
no_license
|
jigarshah2811/Python-Programming
|
b441c4815f80bef4d17611cdea851254c59739a9
|
a60a11ad29e9dde9e9960006f887d9b66d29e427
|
refs/heads/master
| 2022-11-20T18:09:11.955564
| 2022-11-04T05:58:19
| 2022-11-04T05:58:19
| 67,324,037
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,007
|
py
|
N = 3
import collections
class Solution(object):
def __init__(self):
self.q = collections.deque()
# Queue((Second, Frequency), )
def eventOccured():
timeMS = getEpochInMill()
second = qtimeMS // 1000
(s, c) = q[-1]
if s == second:
q[-1][1] += 1 # HERE we can save MAX
# if max(q[-1][1], maxval)
else:
q.append((second, 1))
# Purge old records
while len(e) > N:
q.popleft()
def getNumEvents():
res = 0
# Purge old records
while len(self.q) > N:
q.popleft()
for ele in self.q:
res += ele[1]
return res
s = Solution()
assert (s.getNumEvents() == 0)
for i in range(5):
s.eventOccured()
time.sleep(1000) # Adding 5 events in 5 seconds
assert (s.getNumEvents() == 5)
for i in range(5):
s.eventOccured()
time.sleep(10) # Adding 5 events in 6th second
assert (s.getNumEvents == 7)
|
[
"jshah@pinterest.com"
] |
jshah@pinterest.com
|
b49d40170a4c2d9ecd8aa6b7b668cfdd08d2c580
|
ee8c4c954b7c1711899b6d2527bdb12b5c79c9be
|
/assessment2/amazon/run/core/controllers/spoon.py
|
e4d6d5e20ebc0f06012645de11ba00ebf94a7cac
|
[] |
no_license
|
sqlconsult/byte
|
02ac9899aebea4475614969b594bfe2992ffe29a
|
548f6cb5038e927b54adca29caf02c981fdcecfc
|
refs/heads/master
| 2021-01-25T14:45:42.120220
| 2018-08-11T23:45:31
| 2018-08-11T23:45:31
| 117,135,069
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 364
|
py
|
#!/usr/bin/env python3
from flask import Blueprint, Flask, render_template, request, url_for
controller = Blueprint('spoon', __name__, url_prefix='/spoon')
# @controller.route('/<string:title>', methods=['GET'])
# def lookup(title):
# if title == 'Republic': # TODO 2
# return render_template('republic.html') # TODO 2
# else:
# pass
|
[
"sqlconsult@hotmail.com"
] |
sqlconsult@hotmail.com
|
cee71c5efb06a3baf0f28594bbcb11c9bdac0c86
|
79c6594e76cbbb5de8fe9dc73a23946c90fac2b1
|
/torchbiggraph/util.py
|
c5954e623f8d6dc9b7058a967b9ea2cfd10fa7c5
|
[
"BSD-3-Clause"
] |
permissive
|
middle-plat-ai/PyTorch-BigGraph
|
e47e03104b6d640b47ac103170b09e411f0c8391
|
701ec1736ef17280981311f7214f35c96c190434
|
refs/heads/master
| 2021-11-03T19:47:13.676050
| 2019-04-26T16:49:13
| 2019-04-26T16:52:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,232
|
py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import os
import os.path
from collections import defaultdict
from datetime import datetime
from typing import Any, Dict, Iterable, Optional, Set, Tuple
import torch
import torch.multiprocessing as mp
from torch.optim import Optimizer
from .config import ConfigSchema
from .types import Side, EntityName, FloatTensorType
def log(msg: str) -> None:
"""Log msg to stdout with a timestamp. Flush stdout.
"""
print("%s %s" % (datetime.now().strftime("%Y-%m-%d %H:%M:%S"), msg), flush=True)
_verbosity_level = 0
def vlog(msg: str, level: int = 1) -> None:
if _verbosity_level >= level:
log(msg)
def split_almost_equally(size: int, *, num_parts: int) -> Iterable[slice]:
"""Split an interval of the given size into the given number of subintervals
The sizes of the subintervals will be between the floor and the ceil of the
"exact" fractional size, with larger intervals preceding smaller ones.
"""
size_per_part = size // num_parts
num_larger_parts = size % num_parts
prev = 0
for i in range(num_parts):
next_ = prev + size_per_part + (1 if i < num_larger_parts else 0)
yield slice(prev, next_)
prev = next_
def round_up_to_nearest_multiple(value: int, factor: int) -> int:
return ((value - 1) // factor + 1) * factor
def fast_approx_rand(numel: int) -> FloatTensorType:
if numel < 1_000_003:
tensor = torch.randn(numel)
# share_memory_ does return the tensor but its type annotation says it
# doesn't, thus we do this in two separate steps.
tensor.share_memory_()
return tensor
# construct the tensor storage in shared mem so we don't have to copy it
storage = torch.FloatStorage._new_shared(numel)
tensor = torch.FloatTensor(storage)
rand = torch.randn(1_000_003)
excess = numel % 1_000_003
# Using just `-excess` would give bad results when excess == 0.
tensor[:numel - excess].view(-1, 1_000_003)[...] = rand
tensor[numel - excess:] = rand[:excess]
return tensor
class DummyOptimizer(Optimizer):
def __init__(self) -> None:
# This weird dance makes Optimizer accept an empty parameter list.
super().__init__([{'params': []}], {})
def step(self, closure: None = None) -> None:
pass
def state_dict(self) -> Dict[str, Any]:
return {}
def load_state_dict(self, state_dict: Dict[str, Any]) -> None:
pass
def share_memory(self) -> None:
pass
# HOGWILD
def _pool_init():
torch.set_num_threads(1)
torch.manual_seed(os.getpid())
def create_pool(num_workers: int) -> mp.Pool:
# PyTorch relies on OpenMP, which by default parallelizes operations by
# implicitly spawning as many threads as there are cores, and synchronizing
# them with each other. This interacts poorly with Hogwild!-style subprocess
# pools as if each child process spawns its own OpenMP threads there can
# easily be thousands of threads that mostly wait in barriers. Calling
# set_num_threads(1) in both the parent and children prevents this.
# OpenMP can also lead to deadlocks if it gets initialized in the parent
# process before the fork (this bit us in unit tests, due to the generation
# of the test input data). Using the "spawn" context (i.e., fork + exec)
# solved the issue in most cases but still left some deadlocks. See
# https://github.com/pytorch/pytorch/issues/17199 for some more information
# and discussion.
torch.set_num_threads(1)
return mp.Pool(num_workers, initializer=_pool_init)
# config routines
def get_partitioned_types(
config: ConfigSchema,
side: Side,
) -> Tuple[int, Set[EntityName]]:
"""Return the number of partitions on a given side and the partitioned entity types
Each of the entity types that appear on the given side (LHS or RHS) of a relation
type is split into some number of partitions. The ones that are split into one
partition are called "unpartitioned" and behave as if all of their entities
belonged to all buckets. The other ones are the "properly" partitioned ones.
Currently, they must all be partitioned into the same number of partitions. This
function returns that number and the names of the properly partitioned entity
types.
"""
entity_names_by_num_parts: Dict[int, Set[EntityName]] = defaultdict(set)
for relation_config in config.relations:
entity_name = side.pick(relation_config.lhs, relation_config.rhs)
entity_config = config.entities[entity_name]
entity_names_by_num_parts[entity_config.num_partitions].add(entity_name)
if 1 in entity_names_by_num_parts:
del entity_names_by_num_parts[1]
if len(entity_names_by_num_parts) == 0:
return 1, set()
if len(entity_names_by_num_parts) > 1:
raise RuntimeError("Currently num_partitions must be a single "
"value across all partitioned entities.")
(num_partitions, partitioned_entity_names), = entity_names_by_num_parts.items()
return num_partitions, partitioned_entity_names
# compute a randomized AUC using a fixed number of sample points
# NOTE: AUC is the probability that a randomly chosen positive example
# has a higher score than a randomly chosen negative example
def compute_randomized_auc(
pos_: FloatTensorType,
neg_: FloatTensorType,
num_samples: int,
) -> float:
pos_, neg_ = pos_.view(-1), neg_.view(-1)
diff = (pos_[torch.randint(len(pos_), (num_samples,))]
> neg_[torch.randint(len(neg_), (num_samples,))])
return float(diff.float().mean())
def get_num_workers(override: Optional[int]) -> int:
if override is not None:
return override
cpu_count = os.cpu_count()
if cpu_count is not None:
return cpu_count
result = 40
print("WARNING: number of workers unspecified and couldn't autodetect "
"CPU count; defaulting to %d workers." % result)
return result
|
[
"facebook-github-bot@users.noreply.github.com"
] |
facebook-github-bot@users.noreply.github.com
|
e58223496a4579245283ccdb1478dd5d10774f3b
|
81d635211686b1bc87af5892bd9e0fb95cc2ddb8
|
/adwords api/googleads-python-lib-master/examples/dfp/v201511/report_service/run_sales_report.py
|
b3ab57226822fd2a475aea1e43470dcd44e9a4d3
|
[
"Apache-2.0"
] |
permissive
|
analyticsbot/Python-Code---Part-2
|
de2f0581258b6c8b8808b4ef2884fe7e323876f0
|
12bdcfdef4472bcedc77ae61707c25a4a09cba8a
|
refs/heads/master
| 2021-06-04T05:10:33.185766
| 2016-08-31T13:45:45
| 2016-08-31T13:45:45
| 66,679,512
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,017
|
py
|
#!/usr/bin/python
#
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This code example runs a report equal to the "Sales by salespersons report."
"""
import tempfile
# Import appropriate modules from the client library.
from googleads import dfp
from googleads import errors
def main(client):
# Initialize a DataDownloader.
report_downloader = client.GetDataDownloader(version='v201511')
# Create report job.
report_job = {
'reportQuery': {
'dimensions': ['SALESPERSON_ID', 'SALESPERSON_NAME'],
'columns': ['AD_SERVER_IMPRESSIONS', 'AD_SERVER_CPM_AND_CPC_REVENUE',
'AD_SERVER_WITHOUT_CPD_AVERAGE_ECPM'],
'dateRangeType': 'LAST_MONTH'
}
}
try:
# Run the report and wait for it to finish.
report_job_id = report_downloader.WaitForReport(report_job)
except errors.DfpReportError, e:
print 'Failed to generate report. Error was: %s' % e
# Change to your preferred export format.
export_format = 'CSV_DUMP'
report_file = tempfile.NamedTemporaryFile(suffix='.csv.gz', delete=False)
# Download report data.
report_downloader.DownloadReportToFile(
report_job_id, export_format, report_file)
report_file.close()
# Display results.
print 'Report job with id \'%s\' downloaded to:\n%s' % (
report_job_id, report_file.name)
if __name__ == '__main__':
# Initialize client object.
dfp_client = dfp.DfpClient.LoadFromStorage()
main(dfp_client)
|
[
"ravi.shankar1788@gmail.com"
] |
ravi.shankar1788@gmail.com
|
260bf1b93bcd27bc0cb7694536a4ba9c217b4072
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/104/usersdata/228/50746/submittedfiles/av1_2.py
|
bae0b9ed8cdec01e64f282f927ac05793347dc28
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 470
|
py
|
# -*- coding: utf-8 -*-
import math
n=int(input('digite a dimensão da barra de chocolate seu cuzão:'))
x1=int(input('digite a coordenada x da figura 1:'))
y1=int(input('digite a coordenada y da figura 1:'))
x2=int(input('digite a coordenada x da figura 2:'))
y2=int(input('digite a coordenada y da figura 2:'))
if x1<=(n/2) and y1<=(n/2) and x2>=(n/2) and y2>=(n/2) or x2<=(n/2) and y2<=(n/2) and x1>=(n/2) and y1>=(n/2):
print('S')
else:
print('N')
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
26a9cb5f30a3cf4c4bf5cd521eeab51868367484
|
eddb9243d65ff6b0c51502c877e91c034ca5b349
|
/desktop_new.py
|
422713fc6c35f00901b3136bbb7e29a4dbabe62e
|
[] |
no_license
|
cristianmusic7/Donkey-car-image-recognition
|
1a90b1261f78d986171fab5d13b6f75ceba11c63
|
e08afc1c250cc563a8a776785c56cb15ef9c3e58
|
refs/heads/master
| 2020-09-03T00:38:46.930267
| 2019-11-04T03:47:28
| 2019-11-04T03:47:28
| 219,342,257
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,299
|
py
|
# desktop.py
import asyncio
import aiohttp
import cv2
import json
import numpy as np
import argparse
from imutils.video import FPS
import imutils
from rtcbot import RTCConnection, Gamepad, CVDisplay
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-p", "--prototxt", required=True,
help="path to Caffe 'deploy' prototxt file")
ap.add_argument("-m", "--model", required=True,
help="path to Caffe pre-trained model")
ap.add_argument("-c", "--confidence", type=float, default=0.2,
help="minimum probability to filter weak detections")
args = vars(ap.parse_args())
# load our serialized model from disk
print("[INFO] loading model...")
net = cv2.dnn.readNetFromCaffe(args["prototxt"], args["model"])
print("[INFO] loading model done")
# initialize the list of class labels MobileNet SSD was trained to
# detect, then generate a set of bounding box colors for each class
CLASSES = ["background", "aeroplane", "bicycle", "bird", "boat",
"bottle", "bus", "car", "cat", "chair", "cow", "diningtable",
"dog", "horse", "motorbike", "person", "pottedplant", "sheep",
"sofa", "train", "tvmonitor"]
CONSIDER = set(["dog", "person", "car"])
objCount = {obj: 0 for obj in CONSIDER}
disp = CVDisplay()
#g = Gamepad()
conn = RTCConnection()
fps = FPS().start()
async def fpsCheck():
while True:
await asyncio.sleep(5)
if fps:
fps.stop()
print("[INFO] elasped time: {:.2f}".format(fps.elapsed()))
print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))
@conn.video.subscribe
def onFrame(frame):
# Show a 4x larger image so that it is easy to see
#resized = cv2.resize(frame, (frame.shape[1], frame.shape[0]))
#resized = imutils.resize(frame, width=400)
fps.update()
frame = imutils.resize(frame, width=400)
(h, w) = frame.shape[:2]
blob = cv2.dnn.blobFromImage(cv2.resize(frame, (300, 300)),0.007843, (300, 300), 127.5)
# pass the blob through the network and obtain the detections and
# predictions
net.setInput(blob)
detections = net.forward()
# reset the object count for each object in the CONSIDER set
objCount = {obj: 0 for obj in CONSIDER}
# loop over the detections
for i in np.arange(0, detections.shape[2]):
# extract the confidence (i.e., probability) associated with
# the prediction
confidence = detections[0, 0, i, 2]
# filter out weak detections by ensuring the confidence is
# greater than the minimum confidence
if confidence > args["confidence"]:
# extract the index of the class label from the
# detections
idx = int(detections[0, 0, i, 1])
# check to see if the predicted class is in the set of
# classes that need to be considered
if CLASSES[idx] in CONSIDER:
# increment the count of the particular object
# detected in the frame
objCount[CLASSES[idx]] += 1
# compute the (x, y)-coordinates of the bounding box
# for the object
box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
(startX, startY, endX, endY) = box.astype("int")
# draw the bounding box around the detected object on
# the frame
cv2.rectangle(frame, (startX, startY), (endX, endY),
(255, 0, 0), 2)
# draw the object count on the frame
label = ", ".join("{}: {}".format(obj, count) for (obj, count) in objCount.items())
cv2.putText(frame, label, (10, h - 20),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255,0), 2)
#cv2.imshow("Home pet location monitor ({})".format(i), frame)
disp.put_nowait(frame)
async def connect():
localDescription = await conn.getLocalDescription()
async with aiohttp.ClientSession() as session:
async with session.post(
"http://192.168.0.3:8080/connect", data=json.dumps(localDescription)
) as resp:
response = await resp.json()
await conn.setRemoteDescription(response)
# Start sending gamepad controls
#g.subscribe(conn)
asyncio.ensure_future(fpsCheck())
asyncio.ensure_future(connect())
try:
asyncio.get_event_loop().run_forever()
finally:
fps.stop()
conn.close()
disp.close()
#g.close()
|
[
"you@example.com"
] |
you@example.com
|
bac6cf0ef69a3305e112f61db28295b9b5055f98
|
54f352a242a8ad6ff5516703e91da61e08d9a9e6
|
/Source Codes/CodeJamData/15/02/18.py
|
7fff1f128ae1ac791095e13e767772b235e64379
|
[] |
no_license
|
Kawser-nerd/CLCDSA
|
5cbd8a4c3f65173e4e8e0d7ed845574c4770c3eb
|
aee32551795763b54acb26856ab239370cac4e75
|
refs/heads/master
| 2022-02-09T11:08:56.588303
| 2022-01-26T18:53:40
| 2022-01-26T18:53:40
| 211,783,197
| 23
| 9
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 406
|
py
|
import sys
import math
t = int(sys.stdin.readline().strip())
for i in range(t):
d = int(sys.stdin.readline().strip())
p = [int(k) for k in sys.stdin.readline().strip().split()]
minm = max(p)
j = 1
while j < minm:
m = 0
for k in range(d):
m += (p[k] + j - 1) / j - 1
minm = min(m + j, minm)
j += 1
print "Case #%d: %d" % (i + 1, minm)
|
[
"kwnafi@yahoo.com"
] |
kwnafi@yahoo.com
|
b01ad9c8a93447aaab97d72aebb1b10d25c1222b
|
1b862f34c125ce200244dd79e4fda4b5b605ce2e
|
/.history/images_20210218100216.py
|
7b30a0f4a984693ddd55e1ad78ab2b6c5d15a53b
|
[] |
no_license
|
edwino26/CoreImages
|
26085a49cf1cb79442ae563a88354b2fdceace87
|
6bf6e68cac8ab36c87b1e6ea702bfe6882b0f40e
|
refs/heads/master
| 2023-06-22T12:53:37.344895
| 2021-07-21T04:31:44
| 2021-07-21T04:31:44
| 309,553,247
| 0
| 4
| null | 2021-04-29T23:23:15
| 2020-11-03T02:45:07
|
Lasso
|
UTF-8
|
Python
| false
| false
| 2,677
|
py
|
#
# %%
import glob
import cv2
import os.path
import numpy as np
import matplotlib.pyplot as plt
# %%
cores_per_image = 6
uvFiles = glob.glob('./Photos/*.jpg')
fname = uvFiles[0][9:25]
print(fname)
# Picture path
img = cv2.imread(uvFiles[0])
a = []
b = []
# %%
def oneventlbuttondown(event, x, y, flags, param):
if event == cv2.EVENT_LBUTTONDOWN:
xy = "%d,%d" % (x, y)
a.append(x)
b.append(y)
cv2.circle(img, (x, y), 10, (0, 0, 255), thickness=-1)
# cv2.putText(img, xy, (x, y), cv2.FONT_HERSHEY_PLAIN, 1.0, (0, 0, 0), thickness=1)
cv2.imshow("image", img)
core_length = 3
vc = []
do = int(fname[0:3])
dn = int(fname[5:8])
for k in range(len(uvFiles)): #Loop through various files containing images
for i in range(cores_per_image):
if k == 0 and i == 0:
cv2.namedWindow("image", cv2.WINDOW_NORMAL)
# cv2.resizeWindow("output", 400, 300)
cv2.setMouseCallback("image", oneventlbuttondown)
cv2.imshow("image", img)
print(
'Click 1) left upper corner 2) right lower corner in leftmost core and 3) leftupper corner in second core')
cv2.waitKey(0)
y = b[0];
x = a[0];
dy = b[1] - b[0];
dx = a[1] - a[0]
gap = a[2] - a[1]
if i == 3:
midgap = gap * 4
else:
midgap = 0
if i > 0: x = x + (dx + gap) + midgap
crop_img = img[y:y + dy, x:x + dx]
if i == 0:
vc = crop_img
else:
vc = cv2.vconcat([vc, crop_img])
crop_name = str(int(fname[0:3]) + (core_length * i)) + ".jpg"
path = os.path.join(os.path.relpath('Cropped', start=os.curdir), crop_name)
cv2.imwrite(path, crop_img)
concat_name = fname[0:3] + "-" + fname[5:8] + ".jpg"
path = os.path.join(os.path.relpath('Cropped', start=os.curdir), concat_name)
cv2.imwrite(path, vc)
p = vc.shape
vc_gray = cv2.cvtColor(vc, cv2.COLOR_BGR2GRAY)
print(vc.shape) # Dimensions of Image
print(vc_gray.shape) # It is already a numpy array
print(type(vc_gray))
# print(p[:10, :10, 1 ])
img_log = np.average(vc_gray[:, 80:120], axis=1)
depths = np.arange(do, dn, (dn - do) / len(img_log))
plt.figure()
# plt.subplot(1, 2, 1)
plt.subplot2grid((1, 10), (0, 0), colspan=3)
plt.plot(img_log, depths, 'green');
plt.axis([0, 120, do, dn]);
plt.gca().invert_yaxis();
plt.gca().invert_xaxis()
# plt.subplot(1, 2 ,2)
plt.subplot2grid((1, 10), (0, 3), colspan=7)
plt.imshow(vc_gray[:, 40:120], aspect='auto', origin='upper');
plt.colorbar()
p_50 = np.percentile(img_log, 50)
plt.show()
# %%
|
[
"ortega.edwin.y@gmail.com"
] |
ortega.edwin.y@gmail.com
|
cc2dc649da57f9b2130bd313729257af71306cba
|
e777b4cd72b6f8eb0eb451943edc2b2cac3a367a
|
/setup.py
|
67443f8896345b4dd232e8701ae5f396baad255e
|
[] |
no_license
|
bannsec/xss_catcher
|
3620f523d0a44d54116148d2f70fe7faaef366dd
|
a2b363c0ff98dfd164903a6b42150ce679488b48
|
refs/heads/master
| 2021-06-18T19:35:55.816391
| 2017-06-19T00:11:40
| 2017-06-19T00:11:40
| 63,832,134
| 1
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,124
|
py
|
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
long_description = "See website for more info."
setup(
name='xss_catcher',
version='0.0.2',
description='Simple pythonic script to catch Cross Site Scripting (XSS) connections',
long_description=long_description,
url='https://github.com/owlz/xss_catcher',
author='Michael Bann',
author_email='self@bannsecurity.com',
license='MIT',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: End Users/Desktop',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Operating System :: POSIX :: Linux',
'Environment :: Console'
],
keywords='xss',
packages=find_packages(exclude=['contrib', 'docs', 'tests','dist']),
install_requires=[],
entry_points={
'console_scripts': [
'xss_catcher = xss_catcher.xss_catcher:main',
],
},
)
|
[
"whootandahalf@gmail.com"
] |
whootandahalf@gmail.com
|
c0ee3d3acb28c074c182de16fb58bda400405331
|
0f96f24c8682ece3b501904baaa0eef411969bb1
|
/0x0F-python-object_relational_mapping/6-model_state.py
|
3b6b8d1e31f9d3168bf63050b77ae4dcab6af4a6
|
[] |
no_license
|
dkokonkwo/holbertonschool-higher_level_programming
|
95c5103001e807bd46767f66d97568e23d893e68
|
5fa97f754afaf7326550113416e80fd942226254
|
refs/heads/master
| 2023-03-18T03:38:33.386497
| 2020-09-28T02:56:13
| 2020-09-28T02:56:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 368
|
py
|
#!/usr/bin/python3
"""Start link class to table in database
"""
import sys
from model_state import Base, State
from sqlalchemy import (create_engine)
if __name__ == "__main__":
engine = create_engine('mysql+mysqldb://{}:{}@localhost/{}'.format(
sys.argv[1], sys.argv[2], sys.argv[3]), pool_pre_ping=True)
Base.metadata.create_all(engine)
|
[
"eislek02@gmail.com"
] |
eislek02@gmail.com
|
bd3f1ffa0841425a78c119759282d4003145b05f
|
2ee1fa9a5983c057f050a93c46bc894974856179
|
/botorch/utils/multi_objective/scalarization.py
|
67a16f8103e1d54fb9a1704949c6f9b1097d21bd
|
[
"MIT"
] |
permissive
|
jelena-markovic/botorch
|
8bd9641f165264ed1fea68af0a95f0e37b5e187c
|
dc868cc569e266abb4e7e112d019bd1bd6af27c4
|
refs/heads/master
| 2023-08-17T05:04:03.033178
| 2020-11-04T23:57:09
| 2020-11-04T23:58:39
| 310,691,257
| 0
| 0
|
MIT
| 2020-11-06T19:45:42
| 2020-11-06T19:45:41
| null |
UTF-8
|
Python
| false
| false
| 2,455
|
py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
r"""
Helper utilities for constructing scalarizations.
References
.. [Knowles2005]
J. Knowles, "ParEGO: a hybrid algorithm with on-line landscape approximation
for expensive multiobjective optimization problems," in IEEE Transactions
on Evolutionary Computation, vol. 10, no. 1, pp. 50-66, Feb. 2006.
"""
from __future__ import annotations
from typing import Callable
import torch
from botorch.exceptions.errors import BotorchTensorDimensionError
from botorch.utils.transforms import normalize
from torch import Tensor
def get_chebyshev_scalarization(
weights: Tensor, Y: Tensor, alpha: float = 0.05
) -> Callable[[Tensor], Tensor]:
r"""Construct an augmented Chebyshev scalarization.
Outcomes are first normalized to [0,1] and then an augmented
Chebyshev scalarization is applied.
Augmented Chebyshev scalarization:
objective(y) = min(w * y) + alpha * sum(w * y)
Note: this assumes maximization.
See [Knowles2005]_ for details.
This scalarization can be used with qExpectedImprovement to implement q-ParEGO
as proposed in [Daulton2020qehvi]_.
Args:
weights: A `m`-dim tensor of weights.
Y: A `n x m`-dim tensor of observed outcomes, which are used for
scaling the outcomes to [0,1].
alpha: Parameter governing the influence of the weighted sum term. The
default value comes from [Knowles2005]_.
Returns:
Transform function using the objective weights.
Example:
>>> weights = torch.tensor([0.75, 0.25])
>>> transform = get_aug_chebyshev_scalarization(weights, Y)
"""
if weights.shape != Y.shape[-1:]:
raise BotorchTensorDimensionError(
"weights must be an `m`-dim tensor where Y is `... x m`."
f"Got shapes {weights.shape} and {Y.shape}."
)
elif Y.ndim > 2:
raise NotImplementedError("Batched Y is not currently supported.")
Y_bounds = torch.stack([Y.min(dim=-2).values, Y.max(dim=-2).values])
def obj(Y: Tensor) -> Tensor:
# scale to [0,1]
Y_normalized = normalize(Y, bounds=Y_bounds)
product = weights * Y_normalized
return product.min(dim=-1).values + alpha * product.sum(dim=-1)
return obj
|
[
"facebook-github-bot@users.noreply.github.com"
] |
facebook-github-bot@users.noreply.github.com
|
71357d96f44f5b2a00c333e37802a5dd988ffbdd
|
04c06575a49a3f4e30e4f3f2bf2365585664d2e8
|
/python_leetcode_2020/Python_Leetcode_2020/1465_max_area_of_pieceofcake_after_cuts.py
|
84149a9672842b58f898ef87d46a45ad4451afe6
|
[] |
no_license
|
xiangcao/Leetcode
|
18da3d5b271ff586fdf44c53f1a677423ca3dfed
|
d953abe2c9680f636563e76287d2f907e90ced63
|
refs/heads/master
| 2022-06-22T04:45:15.446329
| 2022-06-17T13:03:01
| 2022-06-17T13:03:01
| 26,052,392
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,291
|
py
|
"""
Given a rectangular cake with height h and width w, and two arrays of integers horizontalCuts and verticalCuts where horizontalCuts[i] is the distance from the top of the rectangular cake to the ith horizontal cut and similarly, verticalCuts[j] is the distance from the left of the rectangular cake to the jth vertical cut.
Return the maximum area of a piece of cake after you cut at each horizontal and vertical position provided in the arrays horizontalCuts and verticalCuts. Since the answer can be a huge number, return this modulo 10^9 + 7.
Constraints:
2 <= h, w <= 10^9
1 <= horizontalCuts.length < min(h, 10^5)
1 <= verticalCuts.length < min(w, 10^5)
1 <= horizontalCuts[i] < h
1 <= verticalCuts[i] < w
It is guaranteed that all elements in horizontalCuts are distinct.
It is guaranteed that all elements in verticalCuts are distinct.
"""
class Solution {
public int maxArea(int h, int w, int[] hc, int[] vc) {
return (int) ((getMaxDist(h, hc) * getMaxDist(w, vc)) % (Math.pow(10, 9) + 7));
}
private long getMaxDist(int end, int[] cuts) {
Arrays.sort(cuts);
long res = 0, from = 0;
for (int c : cuts) {
res = Math.max(res, c - from);
from = c;
}
return Math.max(res, end - from);
}
}
|
[
"xiangcao_liu@apple.com"
] |
xiangcao_liu@apple.com
|
063947a01bf8d4c0106d3e960c33eb7208b143f8
|
d80ef8c716bcc5ea54e87540dbf0463f15bf44ce
|
/Proxy/test/mitmproxy/test_version.py
|
f87b08517cd20bf6f7ab33dd2aa65b4da988f2a7
|
[
"MIT"
] |
permissive
|
YagiGo/YPTN
|
5043d22eb131c7164d3fa575f0c4e3d8a963dbf4
|
d7692a68ee1bf578536b4c09c566272210fc8b69
|
refs/heads/master
| 2018-10-16T03:44:18.024169
| 2018-07-24T08:53:57
| 2018-07-24T08:53:57
| 107,633,669
| 4
| 1
|
MIT
| 2018-06-08T09:04:29
| 2017-10-20T04:55:22
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 248
|
py
|
import runpy
from mitmproxy import version
def test_version(capsys):
runpy.run_module('mitmproxy.version', run_name='__main__')
stdout, stderr = capsys.readouterr()
assert len(stdout) > 0
assert stdout.strip() == version.VERSION
|
[
"jeremywu1995@gmail.com"
] |
jeremywu1995@gmail.com
|
fc6d2a4cbbd5a898cc67e115186e1724231f9eaf
|
7e7e2c5d327a518a03b2307f7f3ece37517fa361
|
/ThirdWeek/Task9.py
|
196edbf3279906c37f30e69bd31f1ff91ba3f5d3
|
[] |
no_license
|
Midnight1Knight/HSE-course
|
0fdd995f2e8bf98ecd5fc4ecbcd503e6ef2150ab
|
9b79c359fc65d260e3d454de5464abd5c89be770
|
refs/heads/master
| 2022-09-24T14:36:29.472005
| 2020-06-04T17:11:37
| 2020-06-04T17:11:37
| 269,414,804
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 286
|
py
|
p = float(0)
n = int(input())
x = float(input())
sumX = float
counter = 0
while n > 0:
k = float(input())
sumX = x
counter = n
while counter > 1:
sumX *= x
counter -= 1
p += float(k * sumX)
n -= 1
k = float(input())
p += float(k)
print(float(p))
|
[
"andrkar008@gmail.com"
] |
andrkar008@gmail.com
|
13109ecbe3376732d745c55c29a10df925581684
|
c87b2388fe94dbc081ad01e59f8b66d90f2ded46
|
/tests/test_project/app_rename_table/migrations/0001_initial.py
|
ea12d2850c4271965b9e03aa7b2c261527668328
|
[
"Apache-2.0"
] |
permissive
|
rajuniit/django-migration-linter
|
f6ac60064cd1feb6a1b6a552ff1656619213a419
|
8db24f919dd2411d0c7d000ab51a92398771cb24
|
refs/heads/main
| 2023-07-26T17:57:03.535454
| 2021-09-07T05:31:08
| 2021-09-07T05:31:08
| 403,851,905
| 0
| 0
|
Apache-2.0
| 2021-09-07T05:29:01
| 2021-09-07T05:29:01
| null |
UTF-8
|
Python
| false
| false
| 709
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.18 on 2019-04-14 15:00
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = []
operations = [
migrations.CreateModel(
name="A",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("field", models.IntegerField()),
],
)
]
|
[
"david.wobrock@gmail.com"
] |
david.wobrock@gmail.com
|
8867e7d73b6bbba89607d65330e7999f1bbe7e27
|
2a3a1f103a324bd36e1f557643bf785a782ac5bf
|
/1097.py
|
972c97114436ab2cb4c479a435b64015bbca0b26
|
[] |
no_license
|
dbgkswn7581/python_basic_questions
|
4c381847c6ab90647cc4dddaca3831210dbbe721
|
5042ea2a2e84a71e81ab8eccf940abde1e82a68f
|
refs/heads/master
| 2023-02-08T04:59:51.654197
| 2021-01-04T06:54:18
| 2021-01-04T06:54:18
| 326,170,578
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 597
|
py
|
arr = []
b = []
for i in range(19):
a = input()
for j in a:
if j == '0':
b.append(0)
elif j == '1':
b.append(1)
arr.append(b)
b = []
def rever(x,y):
for i in range(19):
if arr[x-1][i] == 1:
arr[x-1][i] = 0
else:
arr[x-1][i] = 1
if arr[i][y-1] == 1:
arr[i][y-1] = 0
else:
arr[i][y-1] = 1
m = int(input())
for p in range(m):
x,y = map(int, input().split())
rever(x,y)
for i in arr:
for j in range(19):
print(i[j], end=' ')
print()
|
[
"72482222+dbgkswn7581@users.noreply.github.com"
] |
72482222+dbgkswn7581@users.noreply.github.com
|
0ca434186efe591b665ab5be8163b5b376502a87
|
b373c3b1e9b60f604c5213b8678cc374e4d60621
|
/tasks.py
|
429eda6543b1c0d1c4e9ccd8fa56b119ee23974b
|
[
"MIT"
] |
permissive
|
dgilland/sqlservice
|
db512bc3328477472298ca5186de3e9d4b0541d4
|
0be2bb62b2655916f5958e7eccaed63e83f1fe7b
|
refs/heads/master
| 2023-05-31T02:35:33.651188
| 2022-10-12T00:37:13
| 2022-10-12T00:37:13
| 59,388,509
| 173
| 13
| null | 2018-08-03T21:23:52
| 2016-05-22T00:56:21
|
Python
|
UTF-8
|
Python
| false
| false
| 3,950
|
py
|
"""
This module provides the CLI interface for invoke tasks.
All tasks can be executed from this file's directory using:
$ inv <task>
Where <task> is a function defined below with the @task decorator.
"""
from functools import partial
import os
from invoke import Exit, UnexpectedExit, run as _run, task
PACKAGE_NAME = "sqlservice"
PACKAGE_SOURCE = f"src/{PACKAGE_NAME}"
TEST_TARGETS = f"{PACKAGE_SOURCE} tests"
LINT_TARGETS = f"{TEST_TARGETS} tasks.py"
EXIT_EXCEPTIONS = (Exit, UnexpectedExit, SystemExit)
# Set pyt=True to enable colored output when available.
run = partial(_run, pty=True)
@task
def black(ctx, quiet=False):
"""Autoformat code using black."""
run(f"black {LINT_TARGETS}", hide=quiet)
@task
def isort(ctx, quiet=False):
"""Autoformat Python imports."""
run(f"isort {LINT_TARGETS}", hide=quiet)
@task
def docformatter(ctx):
"""Autoformat docstrings using docformatter."""
run(
f"docformatter -r {LINT_TARGETS} "
f"--in-place --pre-summary-newline --wrap-descriptions 100 --wrap-summaries 100"
)
@task
def fmt(ctx):
"""Autoformat code and docstrings."""
print("Preparing to run formatters: docformatter, isort, black\n")
print("Running docformatter")
docformatter(ctx)
print("Running isort")
isort(ctx, quiet=True)
print("Running black")
black(ctx, quiet=True)
@task
def flake8(ctx):
"""Check code for PEP8 violations using flake8."""
run(f"flake8 --format=pylint {LINT_TARGETS}")
@task
def pylint(ctx):
"""Check code for static errors using pylint."""
run(f"pylint {LINT_TARGETS}")
@task
def mypy(ctx):
"""Check code using mypy type checker."""
run(f"mypy {LINT_TARGETS}")
@task
def lint(ctx):
"""Run linters."""
linters = {"flake8": flake8, "pylint": pylint, "mypy": mypy}
failures = []
print(f"Preparing to run linters: {', '.join(linters)}\n")
for name, linter in linters.items():
print(f"Running {name}")
try:
linter(ctx)
except EXIT_EXCEPTIONS:
failures.append(name)
result = "FAILED"
else:
result = "PASSED"
print(f"{result}\n")
if failures:
failed = ", ".join(failures)
raise Exit(f"ERROR: Linters that failed: {failed}")
@task(help={"args": "Override default pytest arguments"})
def test(ctx, args=f"{TEST_TARGETS} --cov={PACKAGE_NAME}"):
"""Run unit tests using pytest."""
tox_env_site_packages_dir = os.getenv("TOX_ENV_SITE_PACKAGES_DIR")
if tox_env_site_packages_dir:
# Re-path package source to match tox env so that we generate proper coverage report.
tox_env_pkg_src = os.path.join(tox_env_site_packages_dir, os.path.basename(PACKAGE_SOURCE))
args = args.replace(PACKAGE_SOURCE, tox_env_pkg_src)
run(f"pytest {args}")
@task
def ci(ctx):
"""Run linters and tests."""
print("Building package")
build(ctx)
print("Building docs")
docs(ctx)
print("Checking linters")
lint(ctx)
print("Running unit tests")
test(ctx)
@task
def docs(ctx, serve=False, bind="127.0.0.1", port=8000):
"""Build docs."""
run("rm -rf docs/_build")
run("sphinx-build -q -W -b html docs docs/_build/html")
if serve:
print(f"Serving docs on {bind} port {port} (http://{bind}:{port}/) ...")
run(f"python -m http.server -b {bind} --directory docs/_build/html {port}", hide=True)
@task
def build(ctx):
"""Build Python package."""
run("rm -rf dist build docs/_build")
run("python -m build")
@task
def clean(ctx):
"""Remove temporary files related to development."""
run("find . -type f -name '*.py[cod]' -delete -o -type d -name __pycache__ -delete")
run("rm -rf .tox .coverage .cache .pytest_cache .mypy_cache **/.egg* **/*.egg* dist build")
@task(pre=[build])
def release(ctx):
"""Release Python package."""
run("twine upload dist/*")
|
[
"dgilland@gmail.com"
] |
dgilland@gmail.com
|
a9b1d887ebcfe4a2b8adbc771fbe2a309ca4ab71
|
37572d62583271b872ec0e8f051701111d8d9230
|
/migrations/versions/1b2b0d93a8ba_16.py
|
cd05612485ac7c331d9aa19c6caa6a5372232951
|
[] |
no_license
|
linwenjunid/flasky
|
6b4b920b56194b04d2a33b2c843b858c519dffb5
|
ea1e74bbc1185ade002cdfbee96b99675ee3c22d
|
refs/heads/master
| 2020-03-18T23:40:47.958836
| 2019-03-29T01:27:40
| 2019-03-29T01:27:40
| 135,418,770
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 663
|
py
|
"""16
Revision ID: 1b2b0d93a8ba
Revises: 5ae2353a173c
Create Date: 2018-08-21 16:10:28.530756
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '1b2b0d93a8ba'
down_revision = '5ae2353a173c'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('celery_tasks', sa.Column('task_percent', sa.Float(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('celery_tasks', 'task_percent')
# ### end Alembic commands ###
|
[
"linwenjunid@163.com"
] |
linwenjunid@163.com
|
7b31e92e4ea4f7d437d2d38e4498703c7211dc33
|
0fd9cb5e8bfb26fa62b90e65f7fbaa2fd233d3d3
|
/napalm/python/netmiko/fortinet/fortinet_ssh.py
|
f20231334f1f3a4dffb086717c2d3a33a95eebde
|
[] |
no_license
|
levensailor/pip4lambda
|
0cff15b2dba3ba586652c6cc914252daf01a874b
|
22a83a43141f9bf72fdd0cd5faee3b88cc7e49fa
|
refs/heads/master
| 2022-03-02T05:31:48.894906
| 2022-02-11T16:38:00
| 2022-02-11T16:38:00
| 174,207,440
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,833
|
py
|
from __future__ import unicode_literals
import paramiko
import time
import re
from netmiko.cisco_base_connection import CiscoSSHConnection
class FortinetSSH(CiscoSSHConnection):
def _modify_connection_params(self):
"""Modify connection parameters prior to SSH connection."""
paramiko.Transport._preferred_kex = ('diffie-hellman-group14-sha1',
'diffie-hellman-group-exchange-sha1',
'diffie-hellman-group-exchange-sha256',
'diffie-hellman-group1-sha1',)
def session_preparation(self):
"""Prepare the session after the connection has been established."""
self._test_channel_read()
self.set_base_prompt(alt_prompt_terminator='$')
self.disable_paging()
# Clear the read buffer
time.sleep(.3 * self.global_delay_factor)
self.clear_buffer()
def disable_paging(self, delay_factor=1):
"""Disable paging is only available with specific roles so it may fail."""
check_command = "get system status | grep Virtual"
output = self.send_command_timing(check_command)
self.allow_disable_global = True
self.vdoms = False
self._output_mode = 'more'
if "Virtual domain configuration: enable" in output:
self.vdoms = True
vdom_additional_command = "config global"
output = self.send_command_timing(vdom_additional_command, delay_factor=2)
if "Command fail" in output:
self.allow_disable_global = False
self.remote_conn.close()
self.establish_connection(width=100, height=1000)
new_output = ''
if self.allow_disable_global:
self._retrieve_output_mode()
disable_paging_commands = ["config system console", "set output standard", "end"]
# There is an extra 'end' required if in multi-vdoms are enabled
if self.vdoms:
disable_paging_commands.append("end")
outputlist = [self.send_command_timing(command, delay_factor=2)
for command in disable_paging_commands]
# Should test output is valid
new_output = self.RETURN.join(outputlist)
return output + new_output
def _retrieve_output_mode(self):
"""Save the state of the output mode so it can be reset at the end of the session."""
reg_mode = re.compile(r'output\s+:\s+(?P<mode>.*)\s+\n')
output = self.send_command("get system console")
result_mode_re = reg_mode.search(output)
if result_mode_re:
result_mode = result_mode_re.group('mode').strip()
if result_mode in ['more', 'standard']:
self._output_mode = result_mode
def cleanup(self):
"""Re-enable paging globally."""
if self.allow_disable_global:
# Return paging state
output_mode_cmd = "set output {}".format(self._output_mode)
enable_paging_commands = ["config system console",
output_mode_cmd,
"end"]
if self.vdoms:
enable_paging_commands.insert(0, "config global")
# Should test output is valid
for command in enable_paging_commands:
self.send_command_timing(command)
def config_mode(self, config_command=''):
"""No config mode for Fortinet devices."""
return ''
def exit_config_mode(self, exit_config=''):
"""No config mode for Fortinet devices."""
return ''
def save_config(self, cmd='', confirm=True, confirm_response=''):
"""Not Implemented"""
raise NotImplementedError
|
[
"jlevensailor@presidio.com"
] |
jlevensailor@presidio.com
|
75f4afcd3f28a07b31a0169bdacd3a042829807a
|
fb754d8c5abf560bb0f0c123dde2f6790d71452c
|
/dataset/dota_coco/dota_generate_test_result.py
|
44c8f1804bcff723e1dbb5df2e172340c4b1e41e
|
[
"Apache-2.0"
] |
permissive
|
ellinyang/PaddleDetection
|
024c256c620949828fd3d2a7592aaebc6e3fef92
|
993119bbc3de330310083e9487fef5147d82c087
|
refs/heads/master
| 2021-11-04T17:56:50.729293
| 2021-10-28T02:30:12
| 2021-10-28T02:30:12
| 222,353,395
| 3
| 0
|
Apache-2.0
| 2019-11-18T03:11:33
| 2019-11-18T03:11:32
| null |
UTF-8
|
Python
| false
| false
| 7,901
|
py
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
import glob
import numpy as np
from multiprocessing import Pool
from functools import partial
from shapely.geometry import Polygon
import argparse
nms_thresh = 0.1
class_name_15 = [
'plane', 'baseball-diamond', 'bridge', 'ground-track-field',
'small-vehicle', 'large-vehicle', 'ship', 'tennis-court',
'basketball-court', 'storage-tank', 'soccer-ball-field', 'roundabout',
'harbor', 'swimming-pool', 'helicopter'
]
class_name_16 = [
'plane', 'baseball-diamond', 'bridge', 'ground-track-field',
'small-vehicle', 'large-vehicle', 'ship', 'tennis-court',
'basketball-court', 'storage-tank', 'soccer-ball-field', 'roundabout',
'harbor', 'swimming-pool', 'helicopter', 'container-crane'
]
def rbox_iou(g, p):
"""
iou of rbox
"""
g = np.array(g)
p = np.array(p)
g = Polygon(g[:8].reshape((4, 2)))
p = Polygon(p[:8].reshape((4, 2)))
g = g.buffer(0)
p = p.buffer(0)
if not g.is_valid or not p.is_valid:
return 0
inter = Polygon(g).intersection(Polygon(p)).area
union = g.area + p.area - inter
if union == 0:
return 0
else:
return inter / union
def py_cpu_nms_poly_fast(dets, thresh):
"""
Args:
dets: pred results
thresh: nms threshold
Returns: index of keep
"""
obbs = dets[:, 0:-1]
x1 = np.min(obbs[:, 0::2], axis=1)
y1 = np.min(obbs[:, 1::2], axis=1)
x2 = np.max(obbs[:, 0::2], axis=1)
y2 = np.max(obbs[:, 1::2], axis=1)
scores = dets[:, 8]
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
polys = []
for i in range(len(dets)):
tm_polygon = [
dets[i][0], dets[i][1], dets[i][2], dets[i][3], dets[i][4],
dets[i][5], dets[i][6], dets[i][7]
]
polys.append(tm_polygon)
polys = np.array(polys)
order = scores.argsort()[::-1]
keep = []
while order.size > 0:
ovr = []
i = order[0]
keep.append(i)
xx1 = np.maximum(x1[i], x1[order[1:]])
yy1 = np.maximum(y1[i], y1[order[1:]])
xx2 = np.minimum(x2[i], x2[order[1:]])
yy2 = np.minimum(y2[i], y2[order[1:]])
w = np.maximum(0.0, xx2 - xx1)
h = np.maximum(0.0, yy2 - yy1)
hbb_inter = w * h
hbb_ovr = hbb_inter / (areas[i] + areas[order[1:]] - hbb_inter)
# h_keep_inds = np.where(hbb_ovr == 0)[0]
h_inds = np.where(hbb_ovr > 0)[0]
tmp_order = order[h_inds + 1]
for j in range(tmp_order.size):
iou = rbox_iou(polys[i], polys[tmp_order[j]])
hbb_ovr[h_inds[j]] = iou
# ovr.append(iou)
# ovr_index.append(tmp_order[j])
try:
if math.isnan(ovr[0]):
pdb.set_trace()
except:
pass
inds = np.where(hbb_ovr <= thresh)[0]
order = order[inds + 1]
return keep
def poly2origpoly(poly, x, y, rate):
origpoly = []
for i in range(int(len(poly) / 2)):
tmp_x = float(poly[i * 2] + x) / float(rate)
tmp_y = float(poly[i * 2 + 1] + y) / float(rate)
origpoly.append(tmp_x)
origpoly.append(tmp_y)
return origpoly
def nmsbynamedict(nameboxdict, nms, thresh):
"""
Args:
nameboxdict: nameboxdict
nms: nms
thresh: nms threshold
Returns: nms result as dict
"""
nameboxnmsdict = {x: [] for x in nameboxdict}
for imgname in nameboxdict:
keep = nms(np.array(nameboxdict[imgname]), thresh)
outdets = []
for index in keep:
outdets.append(nameboxdict[imgname][index])
nameboxnmsdict[imgname] = outdets
return nameboxnmsdict
def merge_single(output_dir, nms, pred_class_lst):
"""
Args:
output_dir: output_dir
nms: nms
pred_class_lst: pred_class_lst
class_name: class_name
Returns:
"""
class_name, pred_bbox_list = pred_class_lst
nameboxdict = {}
for line in pred_bbox_list:
splitline = line.split(' ')
subname = splitline[0]
splitname = subname.split('__')
oriname = splitname[0]
pattern1 = re.compile(r'__\d+___\d+')
x_y = re.findall(pattern1, subname)
x_y_2 = re.findall(r'\d+', x_y[0])
x, y = int(x_y_2[0]), int(x_y_2[1])
pattern2 = re.compile(r'__([\d+\.]+)__\d+___')
rate = re.findall(pattern2, subname)[0]
confidence = splitline[1]
poly = list(map(float, splitline[2:]))
origpoly = poly2origpoly(poly, x, y, rate)
det = origpoly
det.append(confidence)
det = list(map(float, det))
if (oriname not in nameboxdict):
nameboxdict[oriname] = []
nameboxdict[oriname].append(det)
nameboxnmsdict = nmsbynamedict(nameboxdict, nms, nms_thresh)
# write result
dstname = os.path.join(output_dir, class_name + '.txt')
with open(dstname, 'w') as f_out:
for imgname in nameboxnmsdict:
for det in nameboxnmsdict[imgname]:
confidence = det[-1]
bbox = det[0:-1]
outline = imgname + ' ' + str(confidence) + ' ' + ' '.join(
map(str, bbox))
f_out.write(outline + '\n')
def dota_generate_test_result(pred_txt_dir,
output_dir='output',
dota_version='v1.0'):
"""
pred_txt_dir: dir of pred txt
output_dir: dir of output
dota_version: dota_version v1.0 or v1.5 or v2.0
"""
pred_txt_list = glob.glob("{}/*.txt".format(pred_txt_dir))
# step1: summary pred bbox
pred_classes = {}
class_lst = class_name_15 if dota_version == 'v1.0' else class_name_16
for class_name in class_lst:
pred_classes[class_name] = []
for current_txt in pred_txt_list:
img_id = os.path.split(current_txt)[1]
img_id = img_id.split('.txt')[0]
with open(current_txt) as f:
res = f.readlines()
for item in res:
item = item.split(' ')
pred_class = item[0]
item[0] = img_id
pred_bbox = ' '.join(item)
pred_classes[pred_class].append(pred_bbox)
pred_classes_lst = []
for class_name in pred_classes.keys():
print('class_name: {}, count: {}'.format(class_name,
len(pred_classes[class_name])))
pred_classes_lst.append((class_name, pred_classes[class_name]))
# step2: merge
pool = Pool(len(class_lst))
nms = py_cpu_nms_poly_fast
mergesingle_fn = partial(merge_single, output_dir, nms)
pool.map(mergesingle_fn, pred_classes_lst)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='dota anno to coco')
parser.add_argument('--pred_txt_dir', help='path of pred txt dir')
parser.add_argument(
'--output_dir', help='path of output dir', default='output')
parser.add_argument(
'--dota_version',
help='dota_version, v1.0 or v1.5 or v2.0',
type=str,
default='v1.0')
args = parser.parse_args()
# process
dota_generate_test_result(args.pred_txt_dir, args.output_dir,
args.dota_version)
print('done!')
|
[
"noreply@github.com"
] |
ellinyang.noreply@github.com
|
0e4d12400d385371006b7ad61680b902e236e83c
|
dc42a65f63ca8327ee74675c7606a9b6e3b39d40
|
/tpdatasrc/co8fixes/scr/py00081spugnoir.py
|
9d67ab60fadca2c1158c5ab7df6dfd15a16fc507
|
[
"MIT"
] |
permissive
|
anatoliy-savchak/TemplePlus
|
03cda558de2fd30d34305b7e5b548d202ba97f76
|
50922bb14cc2d7dcf8fceeccf45c3b905c1b512f
|
refs/heads/master_old
| 2023-04-28T12:01:06.205497
| 2021-09-25T13:03:51
| 2021-09-25T13:03:51
| 172,583,383
| 2
| 0
|
MIT
| 2019-02-25T20:56:01
| 2019-02-25T20:56:00
| null |
UTF-8
|
Python
| false
| false
| 3,263
|
py
|
from toee import *
from utilities import *
from combat_standard_routines import *
def san_dialog( attachee, triggerer ):
if (game.global_flags[819] == 1):
attachee.attack(triggerer)
return SKIP_DEFAULT
if (attachee.leader_get() != OBJ_HANDLE_NULL):
triggerer.begin_dialog( attachee, 100 ) ## spugnoir in party
elif (game.global_vars[913] == 32):
triggerer.begin_dialog( attachee, 140 ) ## have attacked 3 or more farm animals with spugnoir in party
elif (game.leader.reputation_has(32) == 1 or game.leader.reputation_has(30) == 1 or game.leader.reputation_has(29) == 1):
attachee.float_line(11004,triggerer) ## have lawbreaker or convict or banished from hommlet rep
else:
triggerer.begin_dialog( attachee, 1 ) ## none of the above
return SKIP_DEFAULT
def san_first_heartbeat( attachee, triggerer ):
if (attachee.leader_get() == OBJ_HANDLE_NULL):
if (game.global_vars[501] == 4 or game.global_vars[501] == 5 or game.global_vars[501] == 6 or game.global_vars[510] == 2):
attachee.object_flag_set(OF_OFF)
else:
attachee.object_flag_unset(OF_OFF)
if (attachee.leader_get() == OBJ_HANDLE_NULL and not game.combat_is_active()):
game.global_vars[712] = 0
return RUN_DEFAULT
def san_dying( attachee, triggerer ):
if should_modify_CR( attachee ):
modify_CR( attachee, get_av_level() )
attachee.float_line(12014,triggerer)
if (attachee.leader_get() != OBJ_HANDLE_NULL):
game.global_vars[29] = game.global_vars[29] + 1
return RUN_DEFAULT
def san_enter_combat( attachee, triggerer ):
print "Spugnoir Enter Combat"
ProtectTheInnocent( attachee, triggerer)
return RUN_DEFAULT
def san_heartbeat( attachee, triggerer ):
if (game.global_vars[712] == 0 and attachee.leader_get() == OBJ_HANDLE_NULL and not game.combat_is_active()) and game.party_alignment & ALIGNMENT_GOOD == 0:
attachee.cast_spell(spell_mage_armor, attachee)
attachee.spells_pending_to_memorized()
game.global_vars[712] = 1
if (not game.combat_is_active()):
if (game.global_vars[913] >= 3):
if (attachee != OBJ_HANDLE_NULL):
leader = attachee.leader_get()
if (leader != OBJ_HANDLE_NULL):
leader.follower_remove(attachee)
attachee.float_line(22000,triggerer)
return RUN_DEFAULT
def san_join( attachee, triggerer ):
create_item_in_inventory( 4645, attachee)
create_item_in_inventory( 4647, attachee)
create_item_in_inventory( 4224, attachee)
create_item_in_inventory( 12848, attachee)
game.new_sid = 0
return RUN_DEFAULT
def san_new_map( attachee, triggerer ):
if ((attachee.map == 5006) and (game.global_vars[695] == 1 or game.global_vars[695] == 2)):
attachee.float_line(12070,triggerer)
elif ((attachee.map == 5024) and (is_daytime() != 1)):
attachee.float_line(10019,triggerer)
return RUN_DEFAULT
def equip_transfer( attachee, triggerer ):
itemA = attachee.item_find(6081)
if (itemA != OBJ_HANDLE_NULL):
itemA.destroy()
create_item_in_inventory( 6081, triggerer )
itemB = attachee.item_find(6023)
if (itemB != OBJ_HANDLE_NULL):
itemB.destroy()
create_item_in_inventory( 6023, triggerer )
itemC = attachee.item_find(4060)
if (itemC != OBJ_HANDLE_NULL):
itemC.destroy()
create_item_in_inventory( 4060, triggerer )
create_item_in_inventory( 7001, attachee )
return RUN_DEFAULT
|
[
"doug1234@unknown"
] |
doug1234@unknown
|
3264f45ff99ac37d6e6a01b5f9e165a75bcb3531
|
39d3af3e67a00b93d4325ecd6942b021bf4acbba
|
/deployTest1/wsgi.py
|
0e2c9f328e4ad3751809c8a8c259b2991f57ade7
|
[] |
no_license
|
dmhburke/DeployTest1GIT
|
51fcc1405eaf8420aed60ab42c2e21a4a1a36f59
|
002d06e9dca5bc9d032fd7953fcb1ea16fb8994e
|
refs/heads/master
| 2020-05-04T03:03:59.969438
| 2019-04-02T17:07:58
| 2019-04-02T17:07:58
| 178,936,800
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 399
|
py
|
"""
WSGI config for deployTest1 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'deployTest1.settings')
application = get_wsgi_application()
|
[
"dmhburke@gmail.com"
] |
dmhburke@gmail.com
|
260b0f3bafcd890c3109cd70051c8239142693c3
|
27d670c976ce0771087008c74e8960e25c4b2b01
|
/src/main/resources/script_templates/Hadim_Scripts/Preprocessing/Preprocess_TIRF_Images.py
|
bd2a01cd42ed7c27cae7045060318000cfffdb88
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] |
permissive
|
hadim/Hadim_Scripts
|
2f068ead5df4a299e2ff871ebff4ddc6fdb4a39f
|
dc44a12e294402ac3c76062197f391dad1ebcaaf
|
refs/heads/master
| 2022-12-31T08:47:49.386465
| 2020-10-07T15:51:29
| 2020-10-07T15:51:29
| 29,701,480
| 2
| 3
|
BSD-3-Clause
| 2019-05-17T03:06:59
| 2015-01-22T21:35:15
|
Python
|
UTF-8
|
Python
| false
| false
| 1,592
|
py
|
# @Float(label="Sigma 1", value=6) sigma1
# @Float(label="Sigma 2", value=2) sigma2
# @Float(label="Gaussian Filter Size", value=50) gaussian_filter_size
# @Boolean(label="Iterate XY plane (reduce memory usage)", value=False) iterate_plane
# @Boolean(label="Save Preprocessed Image", value=False) save_image
# @Dataset data
# @CommandService cs
# @ModuleService ms
# @PluginService ps
from sc.fiji.plugin.hadimscripts import DOGFilterCommand
from sc.fiji.plugin.hadimscripts import PseudoFlatFieldCorrectionCommand
def runCommand(inputs, command, showOutputs=False):
from org.scijava.module.process import PreprocessorPlugin
from org.scijava.module.process import PostprocessorPlugin
command = cs.getCommand(command)
pre = ps.createInstancesOfType(PreprocessorPlugin)
post = ps.createInstancesOfType(PostprocessorPlugin)
if showOutputs:
module = ms.waitFor(ms.run(command, pre, post, inputs))
else:
module = ms.waitFor(ms.run(command, pre, None, inputs))
return module
inputs = {"input": data,
"sigma1": sigma1,
"sigma2": sigma2,
"normalizeIntensity": True,
"saveImage": False,
"suffix": ""}
module = runCommand(inputs, DOGFilterCommand, showOutputs=False)
filtered_dataset = module.getOutput("output")
inputs = {"input": filtered_dataset,
"gaussianFilterSize": gaussian_filter_size,
"normalizeIntensity": True,
"iteratePlane": iterate_plane,
"saveImage": save_image,
"suffix": "-Preprocessed"}
module = runCommand(inputs, PseudoFlatFieldCorrectionCommand, showOutputs=True)
|
[
"hadrien.mary@gmail.com"
] |
hadrien.mary@gmail.com
|
53603194cbdad317cab36c3658300ab37fce1e8f
|
658e2e3cb8a4d5343a125f7deed19c9ebf06fa68
|
/course_DE/data-engineering-nanodegree-master/4-data-pipelines-with-airflow/L2_exercises/exercise3.py
|
f0b82c490da3c700ec1b44526eff4e1861fa0d42
|
[] |
no_license
|
yennanliu/analysis
|
3f0018809cdc2403f4fbfe4b245df1ad73fa08a5
|
643ad3fed41961cddd006fadceb0e927f1db1f23
|
refs/heads/master
| 2021-01-23T21:48:58.572269
| 2020-10-13T22:47:12
| 2020-10-13T22:47:12
| 57,648,676
| 11
| 9
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,315
|
py
|
#Instructions
#1 - Modify the bikeshare DAG to load data month by month, instead of loading it all at once, every time.
#2 - Use time partitioning to parallelize the execution of the DAG.
import datetime
import logging
from airflow import DAG
from airflow.contrib.hooks.aws_hook import AwsHook
from airflow.hooks.postgres_hook import PostgresHook
from airflow.operators.postgres_operator import PostgresOperator
from airflow.operators.python_operator import PythonOperator
import sql_statements
def load_trip_data_to_redshift(*args, **kwargs):
aws_hook = AwsHook("aws_credentials")
credentials = aws_hook.get_credentials()
redshift_hook = PostgresHook("redshift")
# # #
execution_date = datetime.datetime.strptime(kwargs["ds"], '%Y-%m-%d')
# # #
sql_stmt = sql_statements.COPY_MONTHLY_TRIPS_SQL.format(
credentials.access_key,
credentials.secret_key,
year=execution_date.year,
month=execution_date.month
)
redshift_hook.run(sql_stmt)
def load_station_data_to_redshift(*args, **kwargs):
aws_hook = AwsHook("aws_credentials")
credentials = aws_hook.get_credentials()
redshift_hook = PostgresHook("redshift")
sql_stmt = sql_statements.COPY_STATIONS_SQL.format(
credentials.access_key,
credentials.secret_key,
)
redshift_hook.run(sql_stmt)
dag = DAG(
'lesson2.exercise3',
start_date=datetime.datetime(2018, 1, 1, 0, 0, 0, 0),
end_date=datetime.datetime(2019, 1, 1, 0, 0, 0, 0),
schedule_interval='@monthly',
max_active_runs=1
)
create_trips_table = PostgresOperator(
task_id="create_trips_table",
dag=dag,
postgres_conn_id="redshift",
sql=sql_statements.CREATE_TRIPS_TABLE_SQL
)
copy_trips_task = PythonOperator(
task_id='load_trips_from_s3_to_redshift',
dag=dag,
python_callable=load_trip_data_to_redshift,
provide_context=True
)
create_stations_table = PostgresOperator(
task_id="create_stations_table",
dag=dag,
postgres_conn_id="redshift",
sql=sql_statements.CREATE_STATIONS_TABLE_SQL,
)
copy_stations_task = PythonOperator(
task_id='load_stations_from_s3_to_redshift',
dag=dag,
python_callable=load_station_data_to_redshift,
)
create_trips_table >> copy_trips_task
create_stations_table >> copy_stations_task
|
[
"f339339@gmail.com"
] |
f339339@gmail.com
|
76a9353851b054c98e8378600f4307a284e242c9
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02662/s484498630.py
|
e29ce3a687f8624090192e32bfac0f5f683a21d9
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 417
|
py
|
import numpy as np
n,s = map(int,input().split())
a = list(map(int,input().split()))
dp = np.zeros(s+1, dtype=np.int64)
"""
dp[i][j] := i番目までを選んで、和がちょうどjのときの場合の数
遷移 → 1増えると全体集合に選ぶ, Aには選ばない, Aに選ぶの3種類.
"""
mod = 998244353
dp[0] = 1
for i in range(n):
p = (dp * 2) % mod
p[a[i]:] += dp[:-a[i]]
dp = p % mod
print(dp[s])
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
37533e7e38a05044b0bdae8d1f37b6698f44c722
|
b53816386ea9ec52e7fce862a06fd065928c0083
|
/EpExtractor.py
|
aa1b30688f6534562cb53a1d826dfc6f5830a2be
|
[] |
no_license
|
grburgess/mnEpFit
|
3e3bcbfca1e244029f04144e67e3d86a65853219
|
c7008f3a71ebd9dade7e4746507024868ede7d3a
|
refs/heads/master
| 2021-05-30T01:21:23.896379
| 2015-05-20T07:50:46
| 2015-05-20T07:50:46
| 35,933,877
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 742
|
py
|
import json
import matplotlib.pyplot as plt
class EpExtractor(object):
def __init__(self,file,ext_name=""):
self.ext_name = ext_name
self._ReadFile(file)
self._PrepareData()
print self._tbins[0]
plt.loglog(self._tbins,self._Ep)
self._WriteJSON()
def _ReadFile(self,file):
'''
Virtual function
'''
pass
def _WriteJSON(self):
outdata = {"Ep":self._Ep,\
"EpErr":self._EpErr,\
"tbins":self._tbins}
f = open("%sep_save_file.json" % self.ext_name,'w')
json.dump(outdata,f) # Write to a JSON file
f.close()
|
[
"jmichaelburgess@gmail.com"
] |
jmichaelburgess@gmail.com
|
58ddad151756db23c9a9cf8a848d1e2796b0cd70
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/nouns/_elderly.py
|
e148c0716dce29ed715268660229fed3e87d894e
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 316
|
py
|
#calss header
class _ELDERLY():
def __init__(self,):
self.name = "ELDERLY"
self.definitions = [u'old people considered as a group: ']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'nouns'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
f9a2749282d1c35368f740f61adfa1e06a07cfa0
|
131caeecc070839555b95382fe9c6ea77a618dce
|
/.history/Classiles/light_switch_20210614212521.py
|
c23dce879e163a83c076bdb9319ed48fee6b9b64
|
[
"Unlicense"
] |
permissive
|
minefarmer/Coding101-OOP
|
f128e34c95f5362b3d9a53bbac3d862c3f256263
|
d5655977559e3bd1acf6a4f185a6121cc3b05ce4
|
refs/heads/main
| 2023-05-22T18:42:37.769345
| 2021-06-18T00:28:06
| 2021-06-18T00:28:06
| 376,620,545
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 291
|
py
|
"""[Practice: Light Switch]
Variable name class name
("instance")
ice = Ice)
"""
class Light:
def toggle(self):
print
self.on = not self.on
def __init__(self):
self.on = False
def is_on(sel):
return self.on
|
[
"pgoldfarm@yahoo.com"
] |
pgoldfarm@yahoo.com
|
c6486839fa928e444c6ac41aad0403ed310a77f4
|
67d9310435d8f99fc1c76f1374da97e4af1137a6
|
/myProject/settings.py
|
3587bb0cc665c5955134e27fe93c8dfe4c86bb87
|
[] |
no_license
|
coralisland-git/Account-app
|
a99c400b9ceb469626843e5b6441700f72e2c5ef
|
dbac5f677860fdbc03ebf4878c030542746b214f
|
refs/heads/master
| 2021-06-18T13:09:07.360079
| 2017-06-23T15:18:35
| 2017-06-23T15:18:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,946
|
py
|
"""
Django settings for myProject project.
Generated by 'django-admin startproject' using Django 1.8.7.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'a6-gzpe_89pwr@0)s2bv_%92*p&*sm11s6(nui7+ngpqn!1o5n'
STATICFILES_STORAGE = 'whitenoise.django.GzipManifestStaticFilesStorage'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'mydemo',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'myProject.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'myProject.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static")
]
ADMIN_NAME = "rubyadmin"
ADMIN_PASSWORD = "rubyadmin"
adminName = "ruby"
adminpassword = "rubyhome1127password"
|
[
"coseasonruby@gmail.com"
] |
coseasonruby@gmail.com
|
ba4c496e73c24377a0a74528dc5b31c715cbf133
|
10717fe6f68c4ee9bcf27ee62e89581f4a030b8e
|
/extractor/douyutv.py
|
1560c79cb11ad6615149dcf68205d73daa098fda
|
[] |
no_license
|
HagerHosny199/Testing_Project
|
ff7f9a54b7a213c9d9ade0c5192845c2a29adc8b
|
9bc170263e239cc24ccfb2aa33b9913ff799ffe9
|
refs/heads/master
| 2020-05-17T20:57:01.750640
| 2019-05-08T22:13:06
| 2019-05-08T22:13:06
| 183,954,736
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,876
|
py
|
# coding: utf-8
from __future__ import unicode_literals
import time
import hashlib
import re
from .common import InfoExtractor
from utils import (
ExtractorError,
unescapeHTML,
unified_strdate,
urljoin,
)
class DouyuTVIE(InfoExtractor):
IE_DESC = '斗鱼'
_VALID_URL = r'https?://(?:www\.)?douyu(?:tv)?\.com/(?:[^/]+/)*(?P<id>[A-Za-z0-9]+)'
_TESTS = [{
'url': 'http://www.douyutv.com/iseven',
'info_dict': {
'id': '17732',
'display_id': 'iseven',
'ext': 'flv',
'title': 're:^清晨醒脑!根本停不下来! [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$',
'description': r're:.*m7show@163\.com.*',
'thumbnail': r're:^https?://.*\.jpg$',
'uploader': '7师傅',
'is_live': True,
},
'params': {
'skip_download': True,
},
}, {
'url': 'http://www.douyutv.com/85982',
'info_dict': {
'id': '85982',
'display_id': '85982',
'ext': 'flv',
'title': 're:^小漠从零单排记!——CSOL2躲猫猫 [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$',
'description': 'md5:746a2f7a253966a06755a912f0acc0d2',
'thumbnail': r're:^https?://.*\.jpg$',
'uploader': 'douyu小漠',
'is_live': True,
},
'params': {
'skip_download': True,
},
'skip': 'Room not found',
}, {
'url': 'http://www.douyutv.com/17732',
'info_dict': {
'id': '17732',
'display_id': '17732',
'ext': 'flv',
'title': 're:^清晨醒脑!根本停不下来! [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$',
'description': r're:.*m7show@163\.com.*',
'thumbnail': r're:^https?://.*\.jpg$',
'uploader': '7师傅',
'is_live': True,
},
'params': {
'skip_download': True,
},
}, {
'url': 'http://www.douyu.com/xiaocang',
'only_matching': True,
}, {
# \"room_id\"
'url': 'http://www.douyu.com/t/lpl',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
if video_id.isdigit():
room_id = video_id
else:
page = self._download_webpage(url, video_id)
room_id = self._html_search_regex(
r'"room_id\\?"\s*:\s*(\d+),', page, 'room id')
# Grab metadata from mobile API
room = self._download_json(
'http://m.douyu.com/html5/live?roomId=%s' % room_id, video_id,
note='Downloading room info')['data']
# 1 = live, 2 = offline
if room.get('show_status') == '2':
raise ExtractorError('Live stream is offline', expected=True)
# Grab the URL from PC client API
# The m3u8 url from mobile API requires re-authentication every 5 minutes
tt = int(time.time())
signContent = 'lapi/live/thirdPart/getPlay/%s?aid=pcclient&rate=0&time=%d9TUk5fjjUjg9qIMH3sdnh' % (room_id, tt)
sign = hashlib.md5(signContent.encode('ascii')).hexdigest()
video_url = self._download_json(
'http://coapi.douyucdn.cn/lapi/live/thirdPart/getPlay/' + room_id,
video_id, note='Downloading video URL info',
query={'rate': 0}, headers={
'auth': sign,
'time': str(tt),
'aid': 'pcclient'
})['data']['live_url']
title = self._live_title(unescapeHTML(room['room_name']))
description = room.get('show_details')
thumbnail = room.get('room_src')
uploader = room.get('nickname')
return {
'id': room_id,
'display_id': video_id,
'url': video_url,
'title': title,
'description': description,
'thumbnail': thumbnail,
'uploader': uploader,
'is_live': True,
}
class DouyuShowIE(InfoExtractor):
_VALID_URL = r'https?://v(?:mobile)?\.douyu\.com/show/(?P<id>[0-9a-zA-Z]+)'
_TESTS = [{
'url': 'https://v.douyu.com/show/rjNBdvnVXNzvE2yw',
'md5': '0c2cfd068ee2afe657801269b2d86214',
'info_dict': {
'id': 'rjNBdvnVXNzvE2yw',
'ext': 'mp4',
'title': '陈一发儿:砒霜 我有个室友系列!04-01 22点场',
'duration': 7150.08,
'thumbnail': r're:^https?://.*\.jpg$',
'uploader': '陈一发儿',
'uploader_id': 'XrZwYelr5wbK',
'uploader_url': 'https://v.douyu.com/author/XrZwYelr5wbK',
'upload_date': '20170402',
},
}, {
'url': 'https://vmobile.douyu.com/show/rjNBdvnVXNzvE2yw',
'only_matching': True,
}]
def _real_extract(self, url):
url = url.replace('vmobile.', 'v.')
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
room_info = self._parse_json(self._search_regex(
r'var\s+\$ROOM\s*=\s*({.+});', webpage, 'room info'), video_id)
video_info = None
for trial in range(5):
# Sometimes Douyu rejects our request. Let's try it more times
try:
video_info = self._download_json(
'https://vmobile.douyu.com/video/getInfo', video_id,
query={'vid': video_id},
headers={
'Referer': url,
'x-requested-with': 'XMLHttpRequest',
})
break
except ExtractorError:
self._sleep(1, video_id)
if not video_info:
raise ExtractorError('Can\'t fetch video info')
formats = self._extract_m3u8_formats(
video_info['data']['video_url'], video_id,
entry_protocol='m3u8_native', ext='mp4')
upload_date = unified_strdate(self._html_search_regex(
r'<em>上传时间:</em><span>([^<]+)</span>', webpage,
'upload date', fatal=False))
uploader = uploader_id = uploader_url = None
mobj = re.search(
r'(?m)<a[^>]+href="/author/([0-9a-zA-Z]+)".+?<strong[^>]+title="([^"]+)"',
webpage)
if mobj:
uploader_id, uploader = mobj.groups()
uploader_url = urljoin(url, '/author/' + uploader_id)
return {
'id': video_id,
'title': room_info['name'],
'formats': formats,
'duration': room_info.get('duration'),
'thumbnail': room_info.get('pic'),
'upload_date': upload_date,
'uploader': uploader,
'uploader_id': uploader_id,
'uploader_url': uploader_url,
}
|
[
"hagarhosny19@gmail.com"
] |
hagarhosny19@gmail.com
|
a52be94d2d29ec70efb3833b2e39a16938664fb2
|
1beb0d3a73a97c5367cc54d37b34a7536b975d68
|
/object/LocateDealOrderObject.py
|
3ce43eb2ac775bea904fa2525cfb92d284d3714e
|
[] |
no_license
|
Hardworking-tester/HuaYing
|
a24aa271afe81c95241818586b1d1d5abd6b4282
|
4dd065806f20bfdec885fa2b40f2c22e5a8d4f15
|
refs/heads/master
| 2021-06-03T10:06:33.604494
| 2017-06-22T09:32:13
| 2017-06-22T09:32:13
| 42,507,030
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,034
|
py
|
#encoding:utf-8
from Data import get_number_by_data,ReadExcel
from selenium import webdriver
from object import OperateDealOrderObject
from selenium.webdriver.common.by import By
from PublicMethod.LocateElementCommonClass import CommonClass
class LocateDealOrderObject():
def getElementList(self,br,testcaseId):
testcase_id=testcaseId
excel=ReadExcel.ReadExcel()
excel_path="F:\\pytest\\xebest-autotest\\Data\\deal_order.xls"
excel_sheet=excel.getTableBySheetName(excel_path,"objname_locatemethod_locatedata")
rows=excel_sheet.nrows
element_list=[]
for i in range(1,rows):
element_list.append(excel_sheet.cell_value(i,0))
for element in element_list:
#调用判断元素是否操作的方法
if self.judgeElementIsOperate(element,testcase_id):
#调用定位元素的方法
self.getLocateMethodAndData(br,element)
else:
print ("元素:%s不需要操作" %element)
def judgeElementIsOperate(self,element,testcase_id):
excel=ReadExcel.ReadExcel()
excel_path="F:\\pytest\\xebest-autotest\\Data\\deal_order.xls"
excel_sheet=excel.getTableBySheetName(excel_path,"isoperateElement")
#得到元素所在的行号
row_index=get_number_by_data.GetRowAndColNumber().getRowAndColNumber(excel_path,"isoperateElement",element)[0]
#得到测试用例所在的列号
col_index=get_number_by_data.GetRowAndColNumber().getRowAndColNumber(excel_path,"isoperateElement",testcase_id)[1]
operateFlag=excel_sheet.cell_value(row_index,col_index)
if operateFlag=="Y":
return True
else:
return False
def getLocateMethodAndData(self,br,element):
element=element
excel=ReadExcel.ReadExcel()
object_excel_path="F:\\pytest\\xebest-autotest\\Data\\deal_order.xls"
object_sheet=excel.getTableBySheetName(object_excel_path,"objname_locatemethod_locatedata")
#得到元素所在行
row_index=get_number_by_data.GetRowAndColNumber().getRowAndColNumber(object_excel_path,"objname_locatemethod_locatedata",element)[0]
#得到定位方式所在列
locateMethod_Key=u"定位方式简述"
locateMethod_colNumber=get_number_by_data.GetRowAndColNumber().getRowAndColNumber(object_excel_path,"objname_locatemethod_locatedata",locateMethod_Key)[1]
#得到定位所需数据所在列
locateData_key=u"定位所需数据"
locateData_colNumber=get_number_by_data.GetRowAndColNumber().getRowAndColNumber(object_excel_path,"objname_locatemethod_locatedata",locateData_key)[1]
#得到判断元素是否需要二次定位的判断条件所在列
isSecondLocate_key=u"是否需要二次定位"
SecondLocate_key_colNumber=get_number_by_data.GetRowAndColNumber().getRowAndColNumber(object_excel_path,"objname_locatemethod_locatedata",isSecondLocate_key)[1]
#存储从excel表中取出的定位方式和定位数据,以及判断是否需要二次定位的条件
old_how=object_sheet.cell_value(row_index,locateMethod_colNumber)
old_what=object_sheet.cell_value(row_index,locateData_colNumber)
secondLocate=object_sheet.cell_value(row_index,SecondLocate_key_colNumber)
locate_method_dict={'id':By.ID,'css':By.CSS_SELECTOR,'xpath':By.XPATH,'linktext':By.LINK_TEXT}
if not(CommonClass().judgeSecondLocateElement(secondLocate)):
if old_how=="switchHandle":
index=int(old_what)
CommonClass().switchHandle(br,index)
else:
new_how=locate_method_dict[old_how]
located_element=CommonClass().findElement(br,new_how,old_what)
OperateDealOrderObject.OperateDealOrderObject().operateLocatedElement(element,located_element)
else:
new_first_how=locate_method_dict[old_how]
#得到二次定位方式的值所在列
secondLocatemethod_key=u"二次定位的方式"
secondLocate_colNumber=get_number_by_data.GetRowAndColNumber().getRowAndColNumber(object_excel_path,"objname_locatemethod_locatedata",secondLocatemethod_key)[1]
old_second_how=object_sheet.cell_value(row_index,secondLocate_colNumber)
#得到二次定位所需值
secondLocateData_key=u"二次定位所需数据"
secondLocateData_colNumber=get_number_by_data.GetRowAndColNumber().getRowAndColNumber(object_excel_path,"objname_locatemethod_locatedata",secondLocateData_key)[1]
secondLocate_what=object_sheet.cell_value(row_index,secondLocateData_colNumber)
new_second_how=locate_method_dict[old_second_how]
second_located_element=CommonClass().locateElementIndirectly(br,new_first_how,old_what,new_second_how,secondLocate_what)
OperateDealOrderObject.OperateDealOrderObject().operateLocatedElement(element,second_located_element)
|
[
"373391120@qq.com"
] |
373391120@qq.com
|
1e8d43e99c4a6c18af0fa23b95b9988a735d38cb
|
437e905d8c214dc25c559b1dc03eaf9f0c85326f
|
/is28/beresnev28/beresnev_lr/zadanie_2-16.py
|
c09833e26b9950e9eca8f7d7c177ee605c551572
|
[] |
no_license
|
AnatolyDomrachev/karantin
|
542ca22c275e39ef3491b1c0d9838e922423b5a9
|
0d9f60207e80305eb713fd43774e911fdbb9fbad
|
refs/heads/master
| 2021-03-29T03:42:43.954727
| 2020-05-27T13:24:36
| 2020-05-27T13:24:36
| 247,916,390
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 352
|
py
|
a=[[1,2,3,4],[5,6,7,8],[9,10,11,12],[13,14,15,16]]
b=[[],[],[],]
print(a[0])
print(a[1])
print(a[2])
print(a[3])
i=(int(input('введитe номер строки = '))-1)
k=(int(input('введите номер столбца = '))-1)
for f in range(4):
a[f][k]=0
for t in range(4):
a[i][t]=0
print(a[0])
print(a[1])
print(a[2])
print(a[3])
|
[
"you@example.com"
] |
you@example.com
|
99520ba9383618cd1f2b8302fea12bb4177eff2a
|
8f8ac99fd3ed9ceb36778b404f6fdd0b6899d3f4
|
/pyobjc-framework-FileProviderUI/PyObjCTest/test_fpuiactionextensioncontext.py
|
5cce4d7ed43ae4829e41889ea9bd508810e4738a
|
[
"MIT"
] |
permissive
|
strogo/pyobjc
|
ac4201c7742eb75348328eeecb7eedf4e3458de3
|
2579c5eaf44b0c5af77ee195c417d2c65e72dfda
|
refs/heads/master
| 2023-07-13T00:41:56.448005
| 2021-08-24T06:42:53
| 2021-08-24T06:42:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 698
|
py
|
import FileProviderUI
from PyObjCTools.TestSupport import TestCase, min_os_level
class TestFPUIActionExtensionContext(TestCase):
def test_constants(self):
self.assertEqual(FileProviderUI.FPUIExtensionErrorCodeUserCancelled, 0)
self.assertEqual(FileProviderUI.FPUIExtensionErrorCodeFailed, 1)
@min_os_level("10.15")
def test_constants10_15(self):
self.assertIsInstance(FileProviderUI.FPUIErrorDomain, str)
@min_os_level("10.15")
def test_methods10_15(self):
self.assertArgIsBlock(
FileProviderUI.FPUIActionExtensionContext.completeRequestReturningItems_completionHandler_, # noqa: B950
1,
b"vZ",
)
|
[
"ronaldoussoren@mac.com"
] |
ronaldoussoren@mac.com
|
4feae1113ac543f26e086b2e4dd8be4307a102f4
|
08bb74d61a8bd8da07784f1146b6a2461f56b02a
|
/demo_python/p_1/tm.py
|
406a15bead40a566d612b61596980efdcf5e7757
|
[] |
no_license
|
hphp/demo
|
6ee25df2e6b4f33e5c2c4b276ec394ea1a0f817c
|
f83cd0ce569d92410ca1320e229cde0e88561ec8
|
refs/heads/master
| 2021-05-16T02:04:47.979776
| 2016-09-19T13:16:10
| 2016-09-19T13:16:10
| 9,994,208
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,779
|
py
|
#!/usr/local/bin/python
import os
import time
import _mysql
def hp_query(query):
con = None
version = -1
try :
con = _mysql.connect('localhost','root','','ip_distribution')
con.query(query)
result = con.use_result()
v = []
while True:
rows = result.fetch_row()
if not rows :
break
v.append(rows[0])
for i in enumerate(v):
version = i[1][0]
print version
except _mysql.Error, e :
print "Error %d: %s" % (e.args[0] , e.args[1])
sys.exit(1)
finally:
if con:
con.close()
return version
while True :
version = -1
version = hp_query("select max(version) from configure where state = 1 ")
print 'hi' + format(version)
if version < 0 :
time.sleep(6)
print "no state = 1 -- pause"
continue
'''
query = "select count(*) from step_display where version = %d and if_stored = 0 " % (version)
cnt = hp_query(query)
if(cnt > 0)
'''
file = time.gmtime()
file_name = format(file.tm_year) + format(file.tm_mon) + format(file.tm_mday) + format(file.tm_hour) + format(file.tm_min) + format(file.tm_sec)
print file_name
out_file = "/root/happyhan/ip_distribution/tm.out" + file_name
print out_file
f_tm_in = open("/root/happyhan/ip_distribution/tm.in","r+")
content = format(version) + "\n100 100 0.85\n4\n1000\n0\n1\n1\n"
f_tm_in.write(content)
f_tm_in = open("/root/happyhan/ip_distribution/tm.in","r")
print f_tm_in.readlines()
cmd_2 = "/root/happyhan/ip_distribution/tm > " + out_file + " < /root/happyhan/ip_distribution/tm.in"
print cmd_2
os.system(cmd_2)
time.sleep(6)
|
[
"hphpcarrot@gmail.com"
] |
hphpcarrot@gmail.com
|
e7c4de9a6b3d90e5b4be81f8597dbcefe813443e
|
b980c0bae0cff8533253c135449beb6e09759dca
|
/Grader_Exercise/08_Dict/08_Dict_23.py
|
82d1da939f42f32157d3dff8a5303588ffde5419
|
[] |
no_license
|
manhanton/COM-PROG
|
1f76985b3f3fea54057a0da1d3911dc91998c5be
|
7a4f2c62ecd6677ec1f818a5d115aa0fb182b3a2
|
refs/heads/main
| 2023-06-18T10:25:26.448133
| 2021-07-16T07:46:45
| 2021-07-16T07:46:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 402
|
py
|
def reverse(d):
nd = {}
for key,value in d.items() : nd[value] = key
return nd
d = {}
for i in range(int(input())) :
x = input().split()
d[x[0]+' '+x[1]] = x[2]
rd = reverse(d)
for i in range(int(input())) :
y = input()
if y in d.keys() : print('%s --> %s' % (y,d[y]))
elif y in rd.keys() : print('%s --> %s' % (y,rd[y]))
else : print('%s --> %s' % (y,'Not found'))
|
[
"meen2545@gmail.com"
] |
meen2545@gmail.com
|
0429e2284d15b50a7b4df5b5e76249314c694204
|
2d01e5d2da7bf0836bd0ebb9185469e041e0f577
|
/script/script_scrape_symbol_ard_marketwatch.py
|
2fe5444add389a69002f5f70bd7ee5400c2687ef
|
[] |
no_license
|
kozzion/rivernode_finance
|
434c0f668a3131ad0bf94b9d57c1886c32ac6d5e
|
17e88f990c9f5e65dc3fe892a28c163a4e6769f2
|
refs/heads/master
| 2022-11-15T03:44:14.936657
| 2020-06-22T15:44:22
| 2020-06-22T15:44:22
| 254,896,067
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,538
|
py
|
import sys
import os
import json
import hashlib
sys.path.append(os.path.abspath('../../rivernode_core'))
from rivernode_core.system_config import SystemConfig
from rivernode_core.persistency.table_object_loader_disk import TableObjectLoaderDisk
sys.path.append(os.path.abspath('../../rivernode_finance'))
from rivernode_finance.client_marketwatch import ClientMarketwatch
from rivernode_finance.persistency_quote import PersistencyQuote
system_config = SystemConfig()
loader_table = TableObjectLoaderDisk(system_config.load_path_dir_database())
pq = PersistencyQuote(loader_table.load_table_for_list_key(['trading']))
client = ClientMarketwatch()
## scrape
# client.scape_symbol(pq)
## clean up
# pq.delete_tag('sector_')
# pq.replace_tag('country_trade_de&iso=xber', 'country_trade_de')
# pq.replace_tag('country_trade_de&iso=xfra', 'country_trade_de')
# pq.replace_tag('country_trade_de&iso=xstu', 'country_trade_de')
# pq.replace_tag('country_trade_de&iso=xdus', 'country_trade_de')
# pq.replace_tag('country_trade_de&iso=xham', 'country_trade_de')
# pq.replace_tag('country_trade_de&iso=xhan', 'country_trade_de')
# pq.replace_tag('country_trade_de&iso=xmun', 'country_trade_de')
pq.add_tag_default()
# # country_ar = 'argentina'
list_symbol = pq.load_list_symbol_for_tag_all(['country_work_pe', 'exchange_xnys'])
for symbol in list_symbol:
print(symbol)
list_symbol = pq.load_list_symbol_for_tag_all(['country_work_pe', 'exchange_xnas'])
for symbol in list_symbol:
print(symbol)
|
[
"jaap.oosterbroek@gmail.com"
] |
jaap.oosterbroek@gmail.com
|
a74dca56a62b18a2bb84892722b4cfe1650e9df0
|
2709e527c217a8264b48e2f549b3284e5ccb9551
|
/0x0F-python-object_relational_mapping/relationship_state.py
|
d28e8a488307b18586655ba4e87563fc4629fed4
|
[] |
no_license
|
kwhit2/holbertonschool-higher_level_programming
|
489d6b88ed14b9f2efd4637d8a71ae569b5027f6
|
2660516b12fee0f03c4025ba1d8d2762a8880a06
|
refs/heads/main
| 2023-05-22T17:57:02.035803
| 2021-06-12T18:43:54
| 2021-06-12T18:43:54
| 319,346,696
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 533
|
py
|
#!/usr/bin/python3
""" Improved the file model_state.py with relationship class attribute """
from sqlalchemy import Column, Integer, String, ForeignKey
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship
Base = declarative_base()
class State(Base):
""" class State that inherits from Base """
__tablename__ = 'states'
id = Column(Integer, primary_key=True, nullable=False)
name = Column(String(128), nullable=False)
cities = relationship('City', backref='state')
|
[
"kfw2@outlook.com"
] |
kfw2@outlook.com
|
64c3eb71437a4c5058884aa72b33396beb0067b1
|
18a1a72b5f2f1c4a668a96a6365daabe35729750
|
/user/social_views.py
|
e457377a82d90aadc07ff6d46f278b3873719d25
|
[] |
no_license
|
AktanKasymaliev/django-ashar-app-backend
|
200d0af0fd60d90834895f2c8924d0f5fe3a03c5
|
0b8d59203399487a1ee4dcae24ed6c716960380c
|
refs/heads/main
| 2023-07-02T06:55:26.903742
| 2021-08-04T14:54:03
| 2021-08-04T14:54:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 358
|
py
|
from allauth.socialaccount.providers.google.views import GoogleOAuth2Adapter
from allauth.socialaccount.providers.facebook.views import FacebookOAuth2Adapter
from user.views import SocialLoginView
class GoogleLogin(SocialLoginView):
adapter_class = GoogleOAuth2Adapter
class FacebookLogin(SocialLoginView):
adapter_class = FacebookOAuth2Adapter
|
[
"aktan.kasymaliev@icloud.com"
] |
aktan.kasymaliev@icloud.com
|
2f970ad0404436ed1c8ea829b0d349c8cf94ea3e
|
09bb17f07b127ac7d7d44969d55c92fa979111e2
|
/.PyCharmCE2019.2/system/python_stubs/-1448197883/PIL/_imagingcms.py
|
627fb7996c066f82fdbeb37c708d6041179e88c7
|
[] |
no_license
|
Tusharec82/ROCKY
|
ea19201275f853603870a68275bdf7d1e64a6084
|
d9812b0fefde8c3d9ffe7947cf331af12dd25ab5
|
refs/heads/master
| 2020-08-09T00:34:50.707405
| 2019-10-09T18:13:26
| 2019-10-09T18:13:26
| 213,957,191
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,810
|
py
|
# encoding: utf-8
# module PIL._imagingcms
# from /usr/lib/python3/dist-packages/PIL/_imagingcms.cpython-36m-x86_64-linux-gnu.so
# by generator 1.147
# no doc
# no imports
# Variables with simple values
littlecms_version = '20.90'
# functions
def buildProofTransform(*args, **kwargs): # real signature unknown
pass
def buildTransform(*args, **kwargs): # real signature unknown
pass
def createProfile(*args, **kwargs): # real signature unknown
pass
def profile_frombytes(*args, **kwargs): # real signature unknown
pass
def profile_fromstring(*args, **kwargs): # real signature unknown
pass
def profile_open(*args, **kwargs): # real signature unknown
pass
def profile_tobytes(*args, **kwargs): # real signature unknown
pass
# classes
class CmsProfile(object):
# no doc
def is_intent_supported(self, *args, **kwargs): # real signature unknown
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
@staticmethod # known case of __new__
def __new__(*args, **kwargs): # real signature unknown
""" Create and return a new object. See help(type) for accurate signature. """
pass
attributes = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
blue_colorant = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
blue_primary = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
chromaticity = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
chromatic_adaptation = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
clut = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
colorant_table = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
colorant_table_out = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
colorimetric_intent = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
color_space = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
connection_space = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
copyright = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
creation_date = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
device_class = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
green_colorant = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
green_primary = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
header_flags = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
header_manufacturer = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
header_model = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
icc_measurement_condition = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
icc_version = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
icc_viewing_condition = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
intent_supported = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
is_matrix_shaper = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
luminance = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
manufacturer = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
media_black_point = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
media_white_point = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
media_white_point_temperature = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
model = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
pcs = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
perceptual_rendering_intent_gamut = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
product_copyright = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
product_desc = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
product_description = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
product_manufacturer = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
product_model = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
profile_description = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
profile_id = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
red_colorant = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
red_primary = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
rendering_intent = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
saturation_rendering_intent_gamut = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
screening_description = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
target = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
technology = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
version = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
viewing_condition = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
xcolor_space = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
# variables with complex values
__loader__ = None # (!) real value is '<_frozen_importlib_external.ExtensionFileLoader object at 0x7f610258f860>'
__spec__ = None # (!) real value is "ModuleSpec(name='PIL._imagingcms', loader=<_frozen_importlib_external.ExtensionFileLoader object at 0x7f610258f860>, origin='/usr/lib/python3/dist-packages/PIL/_imagingcms.cpython-36m-x86_64-linux-gnu.so')"
|
[
"TPatel@.dal.ca"
] |
TPatel@.dal.ca
|
73d8ba35ec791beffc2e3f13c980754b8a857f23
|
f445450ac693b466ca20b42f1ac82071d32dd991
|
/generated_tempdir_2019_09_15_163300/generated_part003405.py
|
5a684c947584fd60604a2244ef648e9a15ca31ab
|
[] |
no_license
|
Upabjojr/rubi_generated
|
76e43cbafe70b4e1516fb761cabd9e5257691374
|
cd35e9e51722b04fb159ada3d5811d62a423e429
|
refs/heads/master
| 2020-07-25T17:26:19.227918
| 2019-09-15T15:41:48
| 2019-09-15T15:41:48
| 208,357,412
| 4
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,655
|
py
|
from sympy.abc import *
from matchpy.matching.many_to_one import CommutativeMatcher
from matchpy import *
from matchpy.utils import VariableWithCount
from collections import deque
from multiset import Multiset
from sympy.integrals.rubi.constraints import *
from sympy.integrals.rubi.utility_function import *
from sympy.integrals.rubi.rules.miscellaneous_integration import *
from sympy import *
class CommutativeMatcher37493(CommutativeMatcher):
_instance = None
patterns = {
0: (0, Multiset({0: 1}), [
(VariableWithCount('i2.2.1.2.2.2.1.0', 1, 1, S(1)), Mul)
])
}
subjects = {}
subjects_by_id = {}
bipartite = BipartiteGraph()
associative = Mul
max_optional_count = 1
anonymous_patterns = set()
def __init__(self):
self.add_subject(None)
@staticmethod
def get():
if CommutativeMatcher37493._instance is None:
CommutativeMatcher37493._instance = CommutativeMatcher37493()
return CommutativeMatcher37493._instance
@staticmethod
def get_match_iter(subject):
subjects = deque([subject]) if subject is not None else deque()
subst0 = Substitution()
# State 37492
if len(subjects) >= 1 and isinstance(subjects[0], Pow):
tmp1 = subjects.popleft()
subjects2 = deque(tmp1._args)
# State 37494
if len(subjects2) >= 1:
tmp3 = subjects2.popleft()
subst1 = Substitution(subst0)
try:
subst1.try_add_variable('i2.2.1.1', tmp3)
except ValueError:
pass
else:
pass
# State 37495
if len(subjects2) >= 1:
tmp5 = subjects2.popleft()
subst2 = Substitution(subst1)
try:
subst2.try_add_variable('i2.2.1.2', tmp5)
except ValueError:
pass
else:
pass
# State 37496
if len(subjects2) == 0:
pass
# State 37497
if len(subjects) == 0:
pass
# 0: x**j
yield 0, subst2
subjects2.appendleft(tmp5)
subjects2.appendleft(tmp3)
subjects.appendleft(tmp1)
return
yield
from collections import deque
|
[
"franz.bonazzi@gmail.com"
] |
franz.bonazzi@gmail.com
|
82c1c52804b41ca95993b01dc43ebd395f50a466
|
7ae44f6975561ff5542cd369dcd04d53093db887
|
/Data Structures and Algorithms in Python/11_Linked_List_2/test.py
|
23bb821782d5d54fb6567da6bc799dcb4f68d659
|
[] |
no_license
|
ashisharora24/learning_tutorials_practice
|
89208a77ad162265c6573ca4559ebf6f4a6f8f18
|
57f461908d0c4d58d831ec375c428179fa69cb3f
|
refs/heads/master
| 2020-05-21T05:32:26.397725
| 2019-07-23T10:36:06
| 2019-07-23T10:36:06
| 185,923,049
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 326
|
py
|
from time import *
t2 = perf_counter()
t1 = perf_counter()
counter = 0
for i in range(10000):
counter += 1
print(i, end=" ")
# r = req...
if counter == 61:
t2 = perf_counter()
print("\n sleeping \n",10 - t2 + t1)
sleep(10 - t2 + t1)
counter = 1
t1 = perf_counter()
|
[
"ashisharora24@gmail.com"
] |
ashisharora24@gmail.com
|
996eb7912cb82e037205e02ff650c6061318278d
|
266947fd84eed629ed0c21f6d91134239512afd9
|
/BeginnerContest_B/147.py
|
1181ff0ecd9ce31a917fcf62e9df63d19fa0c9c8
|
[] |
no_license
|
SkiMsyk/AtCoder
|
c86adeec4fa470ec14c1be7400c9fc8b3fb301cd
|
8102b99cf0fb6d7fa304edb942d21cf7016cba7d
|
refs/heads/master
| 2022-09-03T01:23:10.748038
| 2022-08-15T01:19:55
| 2022-08-15T01:19:55
| 239,656,752
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 207
|
py
|
# Palindrome-philia
# input
S = input()
# solve
if len(S) % 2 != 0:
S = S[:len(S)//2] + S[len(S)//2 + 1:]
res = sum([i != j for i, j in zip(S[:len(S)//2], S[len(S)//2:][-1::-1])])
# output
print(res)
|
[
"sakaimasayuki@sakaimasayukinoMacBook-puro.local"
] |
sakaimasayuki@sakaimasayukinoMacBook-puro.local
|
8a2ff5c31ecbccf07f06b151a8aae5d8e32aa02a
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/21/usersdata/69/7600/submittedfiles/exercicio24.py
|
7231844c7a7561c81bde56eb4f4805952f045a09
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 243
|
py
|
# -*- coding: utf-8 -*-
from __future__ import division
import math
a=int(input('Digite o valor de a:'))
b=int(input('Digite o valor de b:'))
i=1
c=0
if a>=b:
while i<=b:
if a%i==0 and b%i==0:
i=c
i=i+1
print c
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
aee318c6b0606346facce9b35398fcde1d8aefe0
|
08ee36e0bb1c250f7f2dfda12c1a73d1984cd2bc
|
/src/mnistk/networks/conv3dtanh_20.py
|
5ceab17b3a130f554d3b8d621403d7c98fcfe4f2
|
[] |
no_license
|
ahgamut/mnistk
|
58dadffad204602d425b18549e9b3d245dbf5486
|
19a661185e6d82996624fc6fcc03de7ad9213eb0
|
refs/heads/master
| 2021-11-04T07:36:07.394100
| 2021-10-27T18:37:12
| 2021-10-27T18:37:12
| 227,103,881
| 2
| 1
| null | 2020-02-19T22:07:24
| 2019-12-10T11:33:09
|
Python
|
UTF-8
|
Python
| false
| false
| 1,600
|
py
|
# -*- coding: utf-8 -*-
"""
conv3dtanh_20.py
:copyright: (c) 2019 by Gautham Venkatasubramanian.
:license: MIT
"""
import torch
from torch import nn
class Conv3dTanh_20(nn.Module):
def __init__(self):
nn.Module.__init__(self)
self.f0 = nn.Conv3d(in_channels=1, out_channels=13, kernel_size=(7, 7, 7), stride=(1, 1, 1), padding=(0, 0, 0), dilation=(1, 1, 1), groups=1, bias=True, padding_mode='zeros')
self.f1 = nn.Tanh()
self.f2 = nn.Conv3d(in_channels=13, out_channels=25, kernel_size=(1, 1, 1), stride=(1, 1, 1), padding=(0, 0, 0), dilation=(1, 1, 1), groups=1, bias=True, padding_mode='zeros')
self.f3 = nn.Conv3d(in_channels=25, out_channels=64, kernel_size=(1, 1, 1), stride=(1, 1, 1), padding=(0, 0, 0), dilation=(1, 1, 1), groups=1, bias=False, padding_mode='zeros')
self.f4 = nn.Conv3d(in_channels=64, out_channels=28, kernel_size=(1, 1, 1), stride=(1, 1, 1), padding=(0, 0, 0), dilation=(1, 1, 1), groups=1, bias=False, padding_mode='zeros')
self.f5 = nn.Tanh()
self.f6 = nn.Conv3d(in_channels=28, out_channels=10, kernel_size=(10, 1, 1), stride=(1, 1, 1), padding=(0, 0, 0), dilation=(1, 1, 1), groups=1, bias=False, padding_mode='zeros')
self.f7 = nn.LogSoftmax(dim=1)
def forward(self, *inputs):
x = inputs[0]
x = x.view(x.shape[0],1,16,7,7)
x = self.f0(x)
x = self.f1(x)
x = self.f2(x)
x = self.f3(x)
x = self.f4(x)
x = self.f5(x)
x = self.f6(x)
x = x.view(x.shape[0],10)
x = self.f7(x)
return x
|
[
"41098605+ahgamut@users.noreply.github.com"
] |
41098605+ahgamut@users.noreply.github.com
|
86298f50d044ed13be501c49ad6bcf4ba3d5ffaf
|
b99f518e5289da9d068154cefd7b9f13c8d6564f
|
/VSCode_work/chapter16/chapter16_16_5.py
|
98addd58a6f830d481ce26f5bae44635ea5d202a
|
[
"MIT"
] |
permissive
|
yangyahu-1994/Python-Crash-Course
|
e6a81feba4bdbef297387577ea36b7027bf61c8f
|
6f8ef7fe8466d88931a0d3cc423ba5d966663b9d
|
refs/heads/master
| 2023-07-29T22:29:06.151253
| 2021-10-05T14:04:32
| 2021-10-05T14:04:32
| 313,314,086
| 18
| 12
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,423
|
py
|
# 导入必要的模块
import json
import pygal_maps_world.maps
from chapter16_16_5_country_codes import get_country_code
from pygal.style import RotateStyle
# 将数据加载到列表中
filename = '/home/yyh/Documents/VSCode_work/chapter16/population_data.json'
with open(filename) as f:
pop_data = json.load(f)
# 创建一个包含人口数量的字典
cc_populations = {}
for pop_dict in pop_data: # 将每个字典依次存储在pop_dict中
if pop_dict['Year'] == '2010':
country = pop_dict['Country Name']
population = int(float(pop_dict['Value']))
code = get_country_code(country)
if code:
cc_populations[code] = population
# 根据人口数量将所有的国家分成三组
cc_pops_1, cc_pops_2, cc_pops_3 = {}, {}, {}
for cc, pop in cc_populations.items():
if pop < 10000000:
cc_pops_1[cc] = pop
elif pop < 1000000000:
cc_pops_2[cc] = pop
else:
cc_pops_3[cc] = pop
# 看看每组分别包含多少个国家
print(len(cc_pops_1), len(cc_pops_2), len(cc_pops_3))
wm = pygal_maps_world.maps.World()
wm_style = RotateStyle('#336699') # 创建实例
wm = pygal_maps_world.maps.World(style=wm_style)
wm.title = 'World Population in 2010, by Country'
wm.add('0-10m', cc_pops_1)
wm.add('10m-1bn', cc_pops_2)
wm.add('>1bn', cc_pops_3)
wm.render_to_file('/home/yyh/Documents/VSCode_work/chapter16/world_population_16_5.svg')
|
[
"yyh_19940317@163.com"
] |
yyh_19940317@163.com
|
c7e7f24927496acf6bd5567a6c1947d2a8713bd1
|
100fdb7bc25acda0953f238c6d1104fda786ee7a
|
/probreg/callbacks.py
|
17c2290b8cc732327fb23d0422177f6903656178
|
[
"MIT"
] |
permissive
|
Yellowshuohahaha/probreg
|
1f219821fe4706b58aaf70f040aea3bc660bc103
|
31cade406af7f20a20a85e2e3764c55e5b9e986f
|
refs/heads/master
| 2022-11-16T08:54:45.610504
| 2020-07-18T02:30:28
| 2020-07-18T02:30:58
| 283,975,360
| 1
| 0
|
MIT
| 2020-07-31T07:46:06
| 2020-07-31T07:46:06
| null |
UTF-8
|
Python
| false
| false
| 3,385
|
py
|
import copy
import open3d as o3
import matplotlib.pyplot as plt
class Plot2DCallback(object):
"""Display the 2D registration result of each iteration.
Args:
source (numpy.ndarray): Source point cloud data.
target (numpy.ndarray): Target point cloud data.
save (bool, optional): If this flag is True,
each iteration image is saved in a sequential number.
"""
def __init__(self, source, target, save=False,
keep_window=True):
self._source = source
self._target = target
self._result = copy.deepcopy(self._source)
self._save = save
self._cnt = 0
plt.axis('equal')
plt.plot(self._source[:, 0], self._source[:, 1], 'ro', label='source')
plt.plot(self._target[:, 0], self._target[:, 1], 'g^', label='taget')
plt.plot(self._result[:, 0], self._result[:, 1], 'bo', label='result')
plt.legend()
plt.draw()
def __call__(self, transformation):
self._result = transformation.transform(self._source)
plt.cla()
plt.axis('equal')
plt.plot(self._source[:, 0], self._source[:, 1], 'ro', label='source')
plt.plot(self._target[:, 0], self._target[:, 1], 'g^', label='taget')
plt.plot(self._result[:, 0], self._result[:, 1], 'bo', label='result')
plt.legend()
if self._save:
plt.savefig('image_%04d.png' % self._cnt)
plt.draw()
plt.pause(0.001)
self._cnt += 1
class Open3dVisualizerCallback(object):
"""Display the 3D registration result of each iteration.
Args:
source (numpy.ndarray): Source point cloud data.
target (numpy.ndarray): Target point cloud data.
save (bool, optional): If this flag is True,
each iteration image is saved in a sequential number.
keep_window (bool, optional): If this flag is True,
the drawing window blocks after registration is finished.
fov: Field of view (degree).
"""
def __init__(self, source, target, save=False,
keep_window=True, fov=None):
self._vis = o3.visualization.Visualizer()
self._vis.create_window()
self._source = source
self._target = target
self._result = copy.deepcopy(self._source)
self._save = save
self._keep_window = keep_window
self._source.paint_uniform_color([1, 0, 0])
self._target.paint_uniform_color([0, 1, 0])
self._result.paint_uniform_color([0, 0, 1])
self._vis.add_geometry(self._source)
self._vis.add_geometry(self._target)
self._vis.add_geometry(self._result)
if not fov is None:
ctr = self._vis.get_view_control()
ctr.change_field_of_view(step=fov)
self._cnt = 0
def __del__(self):
if self._keep_window:
self._vis.run()
self._vis.destroy_window()
def __call__(self, transformation):
self._result.points = transformation.transform(self._source.points)
self._vis.update_geometry(self._source)
self._vis.update_geometry(self._target)
self._vis.update_geometry(self._result)
self._vis.poll_events()
self._vis.update_renderer()
if self._save:
self._vis.capture_screen_image("image_%04d.jpg" % self._cnt)
self._cnt += 1
|
[
"nekanat.stock@gmail.com"
] |
nekanat.stock@gmail.com
|
02c7a58b6b502e705b6ff5ff30e1577c75b11799
|
b4cc597f7efa74d3bdea48aaeca7c675235351d6
|
/param_files/0.0eV/reps_files/file_format_Hz.py
|
39e4383d427caaa7151dd4a352dab909becdb16d
|
[] |
no_license
|
franciscovillaescusa/Euclid_neutrino_comparison
|
52de9fc72b1240319bd63341b94abb82471c721a
|
9483de6512693d7303c51b371e8746425ea9c3aa
|
refs/heads/master
| 2023-04-19T21:45:42.932627
| 2021-04-29T17:54:32
| 2021-04-29T17:54:32
| 303,197,191
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,072
|
py
|
import numpy as np
import sys
########################## create Hz file ###################################
f_in = '0.0eV_hubble.txt' #file provided by Matteo
f_out = 'Hz.txt' #file to be used by Gadget
bins = 1000 #number of bins in the new file
z_max = 127.0 #maximum redshift of the new file
z_min = 0.0 #minimum redshift of the new file
#read original file; H(z) in km/s/Mpc
z,Hz = np.loadtxt(f_in,unpack=True)
#create a new (1+z) array
z_new = np.logspace(np.log10(1.0+z_min),np.log10(1.0+z_max),bins)
#interpolate from the original table. The H(z) has to be in Gadget units:
#km/s/(kpc/h), thus we need to multiply the Hz of Matteo by 1.0/(1000.0*h)
Hz_new = np.interp(z_new-1,z,Hz)/(Hz[0]/100.0)/1000.0
#create the w=-1 array
w = np.ones(bins)*(-1.0)
#the format of the H(z) file is 1+z,w,H(z), where 1+z should be decreasing
np.savetxt(f_out,np.transpose([z_new[::-1],w,Hz_new[::-1]]))
#############################################################################
|
[
"villaescusa.francisco@gmail.com"
] |
villaescusa.francisco@gmail.com
|
a245e763e652cab4a89be27f2d41294d4526d33a
|
a9dc42e9f54b549fcdd695817e347abfd8f2869f
|
/old/old_bin/cru_ts323_prism_update_launcher.py
|
9f4c80bba7d44a96d914f80d42d3712c30f99745
|
[
"MIT"
] |
permissive
|
yusheng-wang/downscale
|
2e77d070115ead3034c154d29f1c533976228f13
|
3fe8ea1774cf82149d19561ce5f19b25e6cba6fb
|
refs/heads/master
| 2023-04-10T03:25:08.806859
| 2019-09-21T17:34:35
| 2019-09-21T17:34:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,713
|
py
|
# SCRIPT TO RUN THE CRU TS3.1 BUILT SCRIPT OVER THE CRU TS3.2.3 UPDATE (SEPT.2015)
# WHICH EXTENDS THE SERIES TO 12/2014.
# THIS IS CURRENTLY WORKING FOR CLD, TMP, VAP, AND MORE TO COME!
# # # # #
# Author: Michael Lindgren (malindgren@alaska.edu)
# # # # #
# CURRENTLY SETUP TO RUN ON EOS.
# # CLD
# import os
# os.chdir( '/workspace/Shared/Tech_Projects/ALFRESCO_Inputs/project_data/CODE/tem_ar5_inputs/downscale_cmip5/bin' )
# ncores = '14'
# base_path = '/workspace/Shared/Tech_Projects/ALFRESCO_Inputs/project_data/TEM_Data/cru_ts323_prism'
# cru_ts31 = '/workspace/Shared/Tech_Projects/ALFRESCO_Inputs/project_data/TEM_Data/cru_ts323/cru_ts3.23.1901.2014.cld.dat.nc'
# cl20_path = '/workspace/Shared/Tech_Projects/ALFRESCO_Inputs/project_data/TEM_Data/cru_october_final/cru_cl20/cld/akcan'
# template_raster_fn = '/workspace/Shared/Tech_Projects/ALFRESCO_Inputs/project_data/TEM_Data/templates/tas_mean_C_AR5_GFDL-CM3_historical_01_1860.tif'
# anomalies_calc_type = 'relative'
# downscaling_operation = 'mult'
# climatology_begin = '1961'
# climatology_end = '1990'
# year_begin = '1901'
# year_end = '2014'
# variable = 'cld'
# metric = 'pct'
# args_tuples = [ ('hi', cru_ts31), ('ci', cl20_path), ('tr', template_raster_fn),
# ('base', base_path), ('bt', year_begin), ('et', year_end),
# ('cbt', climatology_begin), ('cet', climatology_end),
# ('nc', ncores), ('at', anomalies_calc_type), ('m', metric),
# ('dso', downscaling_operation), ('v', variable) ]
# args = ''.join([ ' -'+flag+' '+value for flag, value in args_tuples ])
# os.system( 'ipython2.7 -- tas_cld_cru_ts31_to_cl20_downscaling.py ' + args )
# TAS
import os
os.chdir( '/workspace/Shared/Tech_Projects/ALFRESCO_Inputs/project_data/CODE/tem_ar5_inputs/downscale_cmip5/bin' )
ncores = '14'
base_path = '/workspace/Shared/Tech_Projects/ALFRESCO_Inputs/project_data/TEM_Data/cru_ts323_prism'
cru_ts31 = '/workspace/Shared/Tech_Projects/ALFRESCO_Inputs/project_data/TEM_Data/cru_ts323/cru_ts3.23.1901.2014.tmp.dat.nc'
cl20_path = '/Data/Base_Data/Climate/AK_CAN_2km/historical/singleBand/prism/AK_CAN_2km_PRISM/AK_CAN_geotiffs/tas/ak83albers'
template_raster_fn = '/workspace/Shared/Tech_Projects/ALFRESCO_Inputs/project_data/TEM_Data/templates/tas_mean_C_AR5_GFDL-CM3_historical_01_1860.tif'
anomalies_calc_type = 'absolute'
downscaling_operation = 'add'
climatology_begin = '1971'
climatology_end = '2000'
year_begin = '1901'
year_end = '2014'
variable = 'tas'
metric = 'C'
args_tuples = [ ('hi', cru_ts31), ('ci', cl20_path), ('tr', template_raster_fn),
('base', base_path), ('bt', year_begin), ('et', year_end),
('cbt', climatology_begin), ('cet', climatology_end),
('nc', ncores), ('at', anomalies_calc_type), ('m', metric),
('dso', downscaling_operation), ('v', variable) ]
args = ''.join([ ' -'+flag+' '+value for flag, value in args_tuples ])
os.system( 'ipython2.7 -- tas_cld_cru_ts31_to_cl20_downscaling.py ' + args )
# VAP (HUR)
import os
os.chdir( '/workspace/Shared/Tech_Projects/ALFRESCO_Inputs/project_data/CODE/tem_ar5_inputs/downscale_cmip5/bin' )
ncores = '14'
base_path = '/workspace/Shared/Tech_Projects/ALFRESCO_Inputs/project_data/TEM_Data/cru_ts323'
cru_ts31_vap = '/workspace/Shared/Tech_Projects/ALFRESCO_Inputs/project_data/TEM_Data/cru_ts323/cru_ts3.23.1901.2014.vap.dat.nc'
cru_ts31_tas = '/workspace/Shared/Tech_Projects/ALFRESCO_Inputs/project_data/TEM_Data/cru_ts323/cru_ts3.23.1901.2014.tmp.dat.nc'
cl20_path = '/workspace/Shared/Tech_Projects/ALFRESCO_Inputs/project_data/TEM_Data/cru_october_final/cru_cl20/hur/akcan' # hur
template_raster_fn = '/workspace/Shared/Tech_Projects/ALFRESCO_Inputs/project_data/TEM_Data/templates/tas_mean_C_AR5_GFDL-CM3_historical_01_1860.tif'
anomalies_calc_type = 'relative'
downscaling_operation = 'mult'
climatology_begin = '1961'
climatology_end = '1990'
year_begin = '1901'
year_end = '2014'
variable = 'hur'
metric = 'pct'
args_tuples = [ ('hhi', cru_ts31_vap), ('thi', cru_ts31_tas), ('ci', cl20_path), ('tr', template_raster_fn),
('base', base_path), ('bt', year_begin), ('et', year_end),
('cbt', climatology_begin), ('cet', climatology_end),
('nc', ncores), ('at', anomalies_calc_type), ('m', metric),
('dso', downscaling_operation), ('v', variable) ]
args = ''.join([ ' -'+flag+' '+value for flag, value in args_tuples ])
os.system( 'ipython2.7 -i -- hur_cru_ts31_to_cl20_downscaling.py ' + args )
# PRECIP
import os
os.chdir( '/workspace/Shared/Tech_Projects/ALFRESCO_Inputs/project_data/CODE/tem_ar5_inputs/downscale_cmip5/bin' )
ncores = '14'
base_path = '/workspace/Shared/Tech_Projects/ALFRESCO_Inputs/project_data/TEM_Data/cru_ts323'
cru_ts31 = '/workspace/Shared/Tech_Projects/ALFRESCO_Inputs/project_data/TEM_Data/cru_ts323/cru_ts3.23.1901.2014.pre.dat.nc'
cl20_path = '/workspace/Shared/Tech_Projects/ALFRESCO_Inputs/project_data/TEM_Data/cru_october_final/cru_cl20/pre/akcan'
template_raster_fn = '/workspace/Shared/Tech_Projects/ALFRESCO_Inputs/project_data/TEM_Data/templates/tas_mean_C_AR5_GFDL-CM3_historical_01_1860.tif'
anomalies_calc_type = 'relative'
downscaling_operation = 'mult'
climatology_begin = '1961'
climatology_end = '1990'
year_begin = '1901'
year_end = '2014'
variable = 'pre'
metric = 'mm'
args_tuples = [ ('hi', cru_ts31), ('ci', cl20_path), ('tr', template_raster_fn),
('base', base_path), ('bt', year_begin), ('et', year_end),
('cbt', climatology_begin), ('cet', climatology_end),
('nc', ncores), ('at', anomalies_calc_type), ('m', metric),
('dso', downscaling_operation), ('v', variable) ]
args = ''.join([ ' -'+flag+' '+value for flag, value in args_tuples ])
os.system( 'ipython2.7 -- tas_cld_cru_ts31_to_cl20_downscaling.py ' + args )
|
[
"lindgren.mike@gmail.com"
] |
lindgren.mike@gmail.com
|
3bb4882df0c8b66b1d985109247e95926e438367
|
28dbe47aba287ed94ef7bba734203736bcc06249
|
/.history/dmac_20200713134926.py
|
e0c5592f917f60de87f93354003a2b45895781c6
|
[] |
no_license
|
ntung88/Trading_Algorithms
|
242fd816b19df95e02e9fcd8c5c91c862d2ede40
|
d96488b1754e3751f739d9c3f094a8f8dc54a0a9
|
refs/heads/master
| 2022-11-19T16:04:07.800344
| 2020-07-17T21:14:10
| 2020-07-17T21:14:10
| 276,239,640
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,330
|
py
|
import yfinance as yf
import numpy as np
import pandas as pd
from pandasgui import show
from scipy.optimize import minimize
import matplotlib.pyplot as plt
'''
A library for running Dual Moving Average Crossover trading strategy, with backtesting,
period optimization, and vizualization tools.
'''
#Period of time (in years) that we look back when optimizing in return calculation
HINDSIGHT = 2
def clean_data(data):
'''
Removes row (days) with no data from dataframe or series
'''
incomplete_idxs = False
if isinstance(data, pd.DataFrame):
for col in data.columns:
incomplete_idxs |= np.isnan(data[col])
else:
incomplete_idxs |= np.isnan(data)
return data[~incomplete_idxs]
def calc_crossovers(sma, lma):
'''
Returns a dataframe containing only the rows where a crossover of the sma and lma
is detected. 1 indicates a buy point (sma moving above lma), -1 a sell point
'''
num_points = len(clean_data(lma))
high = (sma > lma)[-num_points:]
crossovers = high.astype(int).diff()[1:]
trimmed = crossovers[crossovers != 0]
return trimmed
def profit(data, crossovers):
'''
Calculates profit assuming data covers a continuous time period with the given crossovers
'''
if len(crossovers) == 0:
return 0
total = 0
# If first crossover is a sell point assume implicit buy point at very start of data
if crossovers.iloc[0] == -1:
total += data.loc[crossovers.index[0]] - data.iloc[0]
# Add the difference between value at sell points and value at buy points to our profit
for i in range(1,len(crossovers)):
left_bound = crossovers.index[i-1]
if crossovers.loc[left_bound] == 1:
right_bound = crossovers.index[i]
total += data.loc[right_bound] - data.loc[left_bound]
# If last crossover is a buy point assume implicit sell point at end of data (include
# profit we have made on current holding)
if crossovers.iloc[-1] == 1:
total += data.iloc[-1] - data.loc[crossovers.index[-1]]
return total
def optimize(data):
'''
Uses scipy's convex minimization library to find optimal short period and long period
for moving averages. Because the profit certainly isn't a truly convex function I use a
wide range of seeds as initial guesses in hopes of detecting all the local minimums
and comparing them to get a good guess of the global min
'''
cons = ({'type': 'ineq', 'fun': lambda x: x[1] - x[0]},
{'type': 'ineq', 'fun': lambda x: x[0] - 5})
# Ranges of initial guesses for short and long periods
#30 and 40 step size for max accuracy, larger for faster runtime
short_seeds = range(5, 300, 50)
long_seeds = range(20, 800, 70)
# short_seeds = [100]
# long_seeds = [750]
minimum = float('inf')
best_short = 0
best_long = 0
for short_seed in short_seeds:
for long_seed in long_seeds:
# Use all combinations of ranges where long_seed > short_seed as initial guesses
if long_seed > short_seed:
res = minimize(lambda x, data: -1 * run_analysis(x, data), [short_seed, long_seed], args=(data,), method='COBYLA', constraints=cons, options={'rhobeg': 10.0, 'catol': 0.0})
if res.fun < minimum:
best_short = res.x[0]
best_long = res.x[1]
minimum = res.fun
return (int(round(best_short)), int(round(best_long)), minimum)
def run_analysis(periods, data):
'''
Objective function for minimization, runs profit calculation with given periods and data
Returns negative profit for minimization (maximization of profit)
'''
short_period = int(round(periods[0]))
long_period = int(round(periods[1]))
sma = data.rolling(short_period).mean()
lma = data.rolling(long_period).mean()
crossovers = calc_crossovers(sma, lma)
return profit(data, crossovers)
def visualize(data, short_period, long_period):
'''
Useful for visualizing the algorithm's decisions. Plots the stock price with colored
vertical bars at buy and sell points
'''
sma = data.rolling(short_period).mean()
lma = data.rolling(long_period).mean()
crossovers = calc_crossovers(sma, lma)
buys = pd.DataFrame(crossovers[crossovers == 1.0])
sells = pd.DataFrame(crossovers[crossovers == -1.0])
data.plot(color='black')
for buy in buys.index:
plt.axvline(buy, color="green")
for sell in sells.index:
plt.axvline(sell, color="red")
plt.show()
def split_year(data):
'''
Split dataframe into a list of dataframes, each corresponding to the data for each year
'''
years = np.unique(data.index.year)
split = []
for year in years:
split.append(data[data.index.year == year])
return split
def calc_returns(split_data):
'''
Calculate annual returns for periods optimized over slices (of size HINDSIGHT) of past data. Gives an idea of what kind of results to realistically expect
'''
annual_returns = []
max_return = float('-inf')
min_return = float('inf')
for i in range(2, len(split_data)):
test_year = split_data[i]
optimize_period = pd.DataFrame(np.concatenate(split_data[i-HINDSIGHT:i]))
periods = optimize(optimize_period)
profit = run_analysis(periods, test_year)
annual_returns.append(profit)
if profit > max_return: max_return = profit
if profit < min_return: min_return = profit
return annual_returns, max_return, min_return
def main():
'''
Main's current functionality: Find optimal windows for TSLA and print them, along with profit since 6/29/2010
'''
ticker = yf.Ticker('PSV')
# data = yf.download(tickers, period='max', group_by='ticker')
data = ticker.history(period="max")
dirty = pd.DataFrame(data)
#Currently using only closing prices
frame = clean_data(dirty)['Close']
# split_year(frame)
# periods = optimize(frame)
print('optimizing')
results = calc_returns(split_year(frame))
print(results)
# visualize(frame, periods[0], periods[1])
if __name__ == "__main__":
main()
'''
how to quantify number of shares you want to buy (steepness of trend, volatility, top 20 stocks?)
'''
|
[
"nathantung@Nathans-MacBook-Pro.local"
] |
nathantung@Nathans-MacBook-Pro.local
|
c6a8b0b84a6a412e24430e7c5dcbbac95110038a
|
e4e1d12269cea35451bbe552af3262a9a428faa1
|
/trainer/trainer_lm.py
|
2da3166825d2ea23258bb8cdeaf7dff9f50eedd5
|
[
"Apache-2.0"
] |
permissive
|
12341123/pylon
|
286e033623b13846ca9dd5d9b84c7788f6c70a1c
|
e26202b2c1cfbb8b5c444f840763f0ce839f048a
|
refs/heads/master
| 2023-05-07T20:25:22.134413
| 2021-06-08T03:51:29
| 2021-06-08T03:51:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 994
|
py
|
import torch.nn.functional as F
from .trainer_base import *
from .callbacks.start import *
class LMTrainer(BaseTrainer):
"""language modeling trainer
"""
def on_ep_begin(self, **kwargs):
self.hidden = None
def forward_pass(self, data):
x, y = data
pred, hidden = self.net(x, self.hidden)
self.hidden = detach(hidden)
# flatten pred and target before loss
t, b, n_token = pred.shape
loss = F.cross_entropy(pred.view(-1, n_token), y.view(-1))
with torch.no_grad():
ppl = torch.exp(loss)
return {
'x': x,
'y': y,
'pred': pred,
'loss': loss,
'ppl': ppl,
'hidden': self.hidden,
'n': y.shape[0] * y.shape[1]
}
def make_default_callbacks(self):
return super().make_default_callbacks() + [MovingAvgCb(['loss', 'ppl'])]
def __repr__(self):
return f'<LMTrainer {self.looper.state}>'
|
[
"the.akita.ta@gmail.com"
] |
the.akita.ta@gmail.com
|
8f5086512825812a077bc0c8c1f29b3a00095003
|
a14af281ab0c6189d5b6c48a25732fb286f34c8d
|
/rbac/__init__.py
|
c51b6a735b7d3bac790f423527d6132e53670915
|
[
"MIT"
] |
permissive
|
jackeyGao/simple-rbac
|
62112301fbad8d8bdc6639b4a3038bc0de3a7638
|
967dee94be4ad6c93999ce52655eada2477c9046
|
refs/heads/master
| 2020-04-14T15:31:43.655283
| 2019-01-03T06:20:22
| 2019-01-03T06:20:22
| 163,929,777
| 2
| 0
|
MIT
| 2019-01-03T06:01:11
| 2019-01-03T06:01:10
| null |
UTF-8
|
Python
| false
| false
| 161
|
py
|
from __future__ import absolute_import
"""Simple RBAC
This is a simple role based access control utility in Python.
"""
__all__ = ["acl", "context", "proxy"]
|
[
"tonyseek@gmail.com"
] |
tonyseek@gmail.com
|
4c633e20b0ec5f68658ba51e9e74fa3d3f7c6cd4
|
e6a8793b1b12d47e57f00485350d122946618245
|
/services/models.py
|
8dad81c83ffca9da8fcc511e7630e1ee93c018e7
|
[] |
no_license
|
Fabricourt/school
|
70b2eba2c0b8ff9b9290eb0f68d730698a6d3a63
|
dad80c36be34b432dfadef195eb9e867f82cafff
|
refs/heads/main
| 2023-01-01T15:48:43.760288
| 2020-10-26T11:15:32
| 2020-10-26T11:15:32
| 305,829,630
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,678
|
py
|
from django.db import models
from django.utils import timezone
from django.contrib.auth.models import User
from django.urls import reverse
from ckeditor.fields import RichTextField
from django.utils.html import mark_safe
from PIL import Image
class Service(models.Model):
title1 = models.CharField(max_length=200, blank=False, null=True)
title2 = models.CharField(max_length=200, blank=False, null=True)
title3 = models.CharField(max_length=200, blank=False, null=True)
title4 = models.CharField(max_length=200, blank=False, null=True)
subtitle1 = models.CharField(max_length=200, blank=False, null=True)
subtitle2 = models.CharField(max_length=200, blank=False, null=True)
subtitle3 = models.CharField(max_length=200, blank=False, null=True)
subtitle4 = models.CharField(max_length=200, blank=False, null=True)
statement1 = models.CharField(max_length=50, blank=False, null=True)
statement2 = models.CharField(max_length=50, blank=False, null=True)
statement3 = models.CharField(max_length=50, blank=False, null=True)
statement4 = models.CharField(max_length=50, blank=False, null=True)
icon1 = models.CharField(max_length=200, blank=False, null=True)
icon2 = models.CharField(max_length=200, blank=False, null=True)
icon3 = models.CharField(max_length=200, blank=False, null=True)
icon4 = models.CharField(max_length=200, blank=False, null=True)
date_posted = models.DateTimeField(default=timezone.now)
is_published = models.BooleanField(default=True)
#image = models.ImageField(upload_to='Service/%Y/%m/%d/', null=True, blank=False)
def __str__(self):
return str(self.title1)
|
[
"mfalme2030@gmail.com"
] |
mfalme2030@gmail.com
|
c799710f27d097dea81aebba892ed99561c9b17d
|
9aa7d3c6d563a434595141f5b4dd8c54252a4d40
|
/tweets/migrations/0013_post_image.py
|
93841f7f324a2610760c5b2d94d0924b9898e049
|
[] |
no_license
|
akhad97/Test-Web-App
|
9e200fc6d394cf6d52e72cb5f360d013e777fa9c
|
eb9b3480732c86f836748967bcfd6201dac6a6ee
|
refs/heads/master
| 2023-07-19T12:24:26.400998
| 2021-09-06T11:55:20
| 2021-09-06T11:55:20
| 402,657,324
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 485
|
py
|
# Generated by Django 2.2.13 on 2021-07-31 05:30
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('tweets', '0012_auto_20210730_2226'),
]
operations = [
migrations.AddField(
model_name='post',
name='image',
field=models.ImageField(default=django.utils.timezone.now, upload_to='post'),
preserve_default=False,
),
]
|
[
"ahadjon.abdullaev1997@gmail.com"
] |
ahadjon.abdullaev1997@gmail.com
|
811e71674e45e86b8f44ddac7576883ae69df87c
|
b33ad1c8560fc22a7e4ae9dec7f3c778b70941fa
|
/abc180/c.py
|
95d7109f8ff66c38bc7f9687872ddbacdf95636a
|
[] |
no_license
|
Tommy-somen/atcoder_record
|
36f226ffe6465dd5f8ae4986195510d00da46ffb
|
0e549a72cec3b87accefc52d5cd56420251361b9
|
refs/heads/master
| 2023-08-16T10:23:39.609512
| 2021-09-27T11:38:57
| 2021-09-27T11:38:57
| 410,585,072
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 191
|
py
|
n = int(input())
front,back = [],[]
i = 1
while i*i <= n:
if n%i == 0:
front.append(i)
if i != n//i:
back.append(n//i)
i += 1
front.extend(back[::-1])
for k in front:
print(k)
|
[
"tomohiro1015w@gmail.com"
] |
tomohiro1015w@gmail.com
|
f8c09a385fefe6b5064760cdcaa4f6f4794a0e07
|
52b5773617a1b972a905de4d692540d26ff74926
|
/.history/new_20200709211421.py
|
be8abece3db7727532968727ef71c47945c6b598
|
[] |
no_license
|
MaryanneNjeri/pythonModules
|
56f54bf098ae58ea069bf33f11ae94fa8eedcabc
|
f4e56b1e4dda2349267af634a46f6b9df6686020
|
refs/heads/master
| 2022-12-16T02:59:19.896129
| 2020-09-11T12:05:22
| 2020-09-11T12:05:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 737
|
py
|
# [3,4] [1,2,7,7,0]
# --> new set {1,2,7,7}
# 4-3 = 1
# [5,9] [1,2,6,7]
# smaller = 5
#
# ---> 3
# left = {1:4,2:5,7:10,0:3}
# ---> 4
# right = {1:5,2:6,7:11,0:4}
#
# newArr = sorted(min[[1,0],[2,1]])
# newArr =[2,6]
# if newArr[0] == 0:
#
# 2,6
# "not possible"
# 13,4 - [1,2,3,6,14]
'''
smaller = 4
answer = 4+1+2 = 7
4+1+3 = 8
4+1+6 = 11
4+1+14 = 19
answer = 13+1=14
13+2 = 15
13+3 = 16
13+6 = 19
continue
4+2+3 = 9
4+2+6 = 12
4+2+14 = 20
4+3+6 = 13
[6,2] [1,10,6,5]
2-6 = 4
# sort -->[1,5,6,10]
2 + 1 = 3
2+5 = 7
'''
# bigger = scale[0]
# smaller = scale[1]
# diff = 9
# find two values that add up to 9
# --> weight[i] + weights[i+1] == diff:
# newArr
# return "not possible"
|
[
"mary.jereh@gmail.com"
] |
mary.jereh@gmail.com
|
4c4af7c1fc3dd62c467f8350dc9f109165a94352
|
82fce9aae9e855a73f4e92d750e6a8df2ef877a5
|
/Lab/venv/lib/python3.8/site-packages/OpenGL/raw/GL/SGIX/shadow_ambient.py
|
883eef870fb602d8947fba66023b3d22c72e48fc
|
[] |
no_license
|
BartoszRudnik/GK
|
1294f7708902e867dacd7da591b9f2e741bfe9e5
|
6dc09184a3af07143b9729e42a6f62f13da50128
|
refs/heads/main
| 2023-02-20T19:02:12.408974
| 2021-01-22T10:51:14
| 2021-01-22T10:51:14
| 307,847,589
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 468
|
py
|
'''Autogenerated by xml_generate script, do not edit!'''
from OpenGL import platform as _p
from OpenGL.constant import Constant as _C
# Code generation uses this
# End users want this...
from OpenGL.raw.GL import _errors
_EXTENSION_NAME = 'GL_SGIX_shadow_ambient'
def _f(function):
return _p.createFunction(function, _p.PLATFORM.GL, 'GL_SGIX_shadow_ambient', error_checker=_errors._error_checker)
GL_SHADOW_AMBIENT_SGIX = _C('GL_SHADOW_AMBIENT_SGIX', 0x80BF)
|
[
"rudnik49@gmail.com"
] |
rudnik49@gmail.com
|
3f6e4ee13b6391fbdcae1d4ce6f0e96b1d7d1ae6
|
70fa6468c768d4ec9b4b14fc94fa785da557f1b5
|
/lib/googlecloudsdk/command_lib/meta/regen.py
|
19ec509f43fa92f3587e9fbdab809d6f1a30549a
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
kylewuolle/google-cloud-sdk
|
d43286ef646aec053ecd7eb58566ab2075e04e76
|
75f09ebe779e99fdc3fd13b48621fe12bfaa11aa
|
refs/heads/master
| 2020-04-20T22:10:41.774132
| 2019-01-26T09:29:26
| 2019-01-26T09:29:26
| 169,131,028
| 0
| 0
|
NOASSERTION
| 2019-02-04T19:04:40
| 2019-02-04T18:58:36
|
Python
|
UTF-8
|
Python
| false
| false
| 1,013
|
py
|
# -*- coding: utf-8 -*- #
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for the gcloud meta apis surface."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.core import exceptions
class Error(exceptions.Error):
pass
class UnknownApi(Error):
"""Raised when api is not found."""
class ConfigFileError(Error):
pass
class DiscoveryDocError(Error):
pass
|
[
"cloudsdk.mirror@gmail.com"
] |
cloudsdk.mirror@gmail.com
|
644f6c6ccb4805245ea968549b49decd927c466f
|
fc1b227f04158c0929b1e66106e0acd842844a88
|
/pipeline/processor/Mosher.py
|
7222950af17cc29b9752bfe9e9e1be70b3c550af
|
[] |
no_license
|
csboling/pi-rt-video
|
72603a8c5bac6e2edf7e3e0b55d0b87bbd95bd30
|
274b0fb3837b04958f229e6a96934561f5da9f05
|
refs/heads/master
| 2021-05-09T00:09:39.798926
| 2018-04-18T04:37:16
| 2018-04-18T04:37:16
| 119,735,150
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 482
|
py
|
from abc import abstractmethod
from itertools import islice
import numpy as np
from pipeline.processor.pure import PureFunction
class Mosher(PureFunction):
def __call__(self, frame):
raw = frame.tobytes()
count = len(raw)
moshed = b''.join(self.mosh(raw))[:count]
return np.frombuffer(
moshed, dtype=frame.dtype
).reshape(
frame.shape
)
@abstractmethod
def mosh(self, in_bytes):
pass
|
[
"charles.samuel.boling@gmail.com"
] |
charles.samuel.boling@gmail.com
|
97dd05a028b4a33a6d447d46642a0deaecd17029
|
60acb606318869410d7437bf6c1a16fd6762b6b4
|
/app/api/img/image_info.py
|
65775a862d3bc5708bec535f36d2bce928fd5b49
|
[
"Apache-2.0"
] |
permissive
|
heraclitusj/mgek_imgbed
|
8fb0c69599fab3fce06684f659dfd5c0b4c5f866
|
d8a77ba1401f42237adda1b3ea8611f6464a704e
|
refs/heads/master
| 2022-07-28T01:48:51.314094
| 2020-05-20T05:35:52
| 2020-05-20T05:35:52
| 265,461,338
| 0
| 0
| null | 2020-05-20T05:31:37
| 2020-05-20T05:31:37
| null |
UTF-8
|
Python
| false
| false
| 755
|
py
|
# -*- coding: utf-8 -*-
# @Author: Landers
# @Github: Landers1037
# @File: image_info.py
# @Date: 2020-05-14
from app.api.img import img
from app.utils import format_response
from flask import request
from app.database import database
from app import global_config
@img.route('/api/image_info')
def image_info():
# 根据图片的id获取图片信息
try:
name = request.json["name"]
img = database().get(global_config.engine,'image',name)
if img:
return format_response('ok',img)
else:
return format_response('error','图片信息获取失败')
except Exception as e:
return format_response('error', '图片信息获取失败')
|
[
"32225052+Landers1037@users.noreply.github.com"
] |
32225052+Landers1037@users.noreply.github.com
|
a6f5e5981efd1440e11604556dda3322676ef081
|
4364fb1fec2ebda2cd240ddc19ef89243812c122
|
/tensorflow_datasets/core/utils/version_test.py
|
29f7c9e85799c708ef7350ffb36ccf426741908b
|
[
"Apache-2.0"
] |
permissive
|
undeadinu/datasets
|
67ebbe6c20462ed6f58713ccd8dc1d67db89f4d9
|
a6f1bce86404d534b7343fb90f0ebfd6d098c346
|
refs/heads/master
| 2020-04-16T03:31:37.564934
| 2019-01-11T10:12:42
| 2019-01-11T10:13:12
| 165,234,637
| 0
| 0
|
Apache-2.0
| 2019-01-11T11:44:44
| 2019-01-11T11:41:26
|
Python
|
UTF-8
|
Python
| false
| false
| 2,090
|
py
|
# coding=utf-8
# Copyright 2018 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tensorflow_datasets.core.utils.version."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow_datasets.core.utils import version
class VersionTest(tf.test.TestCase):
def test_version(self):
"""Test the zip nested function."""
self.assertEqual(version.Version(), version.Version(0, 0, 0))
self.assertEqual(version.Version('1.3.534'), version.Version(1, 3, 534))
self.assertEqual(
version.Version(major=1, minor=3, patch=5), version.Version(1, 3, 5))
self.assertEqual(version.Version('latest'), version.Version.LATEST)
self.assertEqual(
version.Version(version.Version('1.3.5')), version.Version(1, 3, 5))
self.assertEqual(str(version.Version(10, 2, 3)), '10.2.3')
self.assertEqual(str(version.Version()), '0.0.0')
with self.assertRaisesWithPredicateMatch(ValueError, 'Format should be '):
version.Version('1.3.-534')
with self.assertRaisesWithPredicateMatch(ValueError, 'Format should be '):
version.Version('1.3')
with self.assertRaisesWithPredicateMatch(ValueError, 'Format should be '):
version.Version('1.3.')
with self.assertRaisesWithPredicateMatch(ValueError, 'Format should be '):
version.Version('1..5')
with self.assertRaisesWithPredicateMatch(ValueError, 'Format should be '):
version.Version('a.b.c')
if __name__ == '__main__':
tf.test.main()
|
[
"copybara-piper@google.com"
] |
copybara-piper@google.com
|
11446499dcc2fcc6f3086f797eac1521364b3182
|
0a530e71f248f0f731c6a3f28f090d1bb26b55e3
|
/apps/testdatas/migrations/0045_iframebodyinputtext.py
|
64f33a379d95c61ef45c53a1b937676f5cd9d202
|
[] |
no_license
|
wawj901124/fuwuqi
|
cca9935f7b110dfdf38c93d59602a596ecac0d58
|
e388fd10cf4fdb889d6566b7a30702b7c7351750
|
refs/heads/master
| 2022-12-13T13:12:10.854319
| 2020-01-18T00:42:52
| 2020-01-18T00:42:52
| 209,901,486
| 0
| 0
| null | 2022-11-22T02:40:33
| 2019-09-21T00:23:51
|
Python
|
UTF-8
|
Python
| false
| false
| 3,286
|
py
|
# Generated by Django 2.0.5 on 2019-11-12 15:40
import datetime
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('testdatas', '0044_newaddandcheck_depend_new_add_and_check_case'),
]
operations = [
migrations.CreateModel(
name='IframeBodyInputText',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('iframe_ele_find', models.CharField(blank=True, default='xpath', help_text='元素查找风格:id、name、class_name、tag_name、link_text、partial_link_text、css_selector、xpath', max_length=100, null=True, verbose_name='iframe查找风格')),
('iframe_ele_find_value', models.CharField(blank=True, default='', max_length=1000, null=True, verbose_name='iframe查找风格的确切值')),
('input_ele_find', models.CharField(blank=True, default='xpath', help_text='元素查找风格:id、name、class_name、tag_name、link_text、partial_link_text、css_selector、xpath', max_length=100, null=True, verbose_name='输入框查找风格')),
('input_ele_find_value', models.CharField(blank=True, default='', max_length=1000, null=True, verbose_name='输入框查找风格的确切值')),
('is_auto_input', models.BooleanField(default=False, verbose_name='是否自动输入')),
('auto_input_type', models.CharField(blank=True, choices=[('1', '数字'), ('2', '字母(小写)'), ('3', '字母(大写)'), ('4', '特殊符号'), ('5', '数字和字母(小写)'), ('6', '数字和字母(大写)'), ('7', '字母(大小写)'), ('8', '数字和字母(大小写)'), ('9', '数字和字母和特殊符号'), ('10', '数字和字母和特殊符号和空白字符'), ('11', '汉字'), ('12', '手机号'), ('13', '身份证号')], default='11', max_length=10, null=True, verbose_name='自动输入字符的类型')),
('auto_input_long', models.CharField(blank=True, default='300', help_text='字符的个数,请填写数字,例如:1、2、3', max_length=100, null=True, verbose_name='自动输入的字符的个数')),
('input_text', models.CharField(blank=True, default='', max_length=300, null=True, verbose_name='输入框中要输入的内容')),
('is_with_time', models.BooleanField(default=True, verbose_name='是否带时间串')),
('is_check', models.BooleanField(default=True, verbose_name='是否进行验证')),
('add_time', models.DateTimeField(auto_now_add=True, null=True, verbose_name='添加时间')),
('update_time', models.DateTimeField(blank=True, default=datetime.datetime.now, null=True, verbose_name='更新时间')),
('newaddandcheck', models.ForeignKey(blank=True, default='', null=True, on_delete=django.db.models.deletion.PROTECT, to='testdatas.NewAddAndCheck', verbose_name='依赖的添加场景')),
],
options={
'verbose_name': '富文本输入框相关内容',
'verbose_name_plural': '富文本输入框相关内容',
},
),
]
|
[
"wawj900805"
] |
wawj900805
|
9a14bed785bfa237ff5c6773094106b0f1003dfd
|
0c0168a4676bce7453836a7509e7133044aa8975
|
/byceps/services/board/dbmodels/last_topic_view.py
|
bf001ad5b4d7e1f3890dc09d3263e58d53b1050d
|
[
"BSD-3-Clause"
] |
permissive
|
byceps/byceps
|
0aad3c4d974f76c6f8c3674d5539a80c9107b97a
|
eaee2b7fdc08c76c16ddf7f436110e0b5f1812e5
|
refs/heads/main
| 2023-09-01T04:03:13.365687
| 2023-09-01T03:28:18
| 2023-09-01T03:28:18
| 40,150,239
| 44
| 23
|
BSD-3-Clause
| 2023-05-16T18:41:32
| 2015-08-03T22:05:23
|
Python
|
UTF-8
|
Python
| false
| false
| 1,280
|
py
|
"""
byceps.services.board.dbmodels.last_topic_view
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:Copyright: 2014-2023 Jochen Kupperschmidt
:License: Revised BSD (see `LICENSE` file for details)
"""
from datetime import datetime
from byceps.database import db
from byceps.services.board.models import TopicID
from byceps.typing import UserID
from byceps.util.instances import ReprBuilder
from .topic import DbTopic
class DbLastTopicView(db.Model):
"""The last time a user looked into specific topic."""
__tablename__ = 'board_topics_lastviews'
user_id = db.Column(db.Uuid, db.ForeignKey('users.id'), primary_key=True)
topic_id = db.Column(
db.Uuid, db.ForeignKey('board_topics.id'), primary_key=True
)
topic = db.relationship(DbTopic)
occurred_at = db.Column(db.DateTime, nullable=False)
def __init__(
self, user_id: UserID, topic_id: TopicID, occurred_at: datetime
) -> None:
self.user_id = user_id
self.topic_id = topic_id
self.occurred_at = occurred_at
def __repr__(self) -> str:
return (
ReprBuilder(self)
.add_with_lookup('user_id')
.add('topic', self.topic.title)
.add_with_lookup('occurred_at')
.build()
)
|
[
"homework@nwsnet.de"
] |
homework@nwsnet.de
|
d6a6a6f115d922ccb5f1b7753846a7efdb2e2a9f
|
86bfb43636b24eef7ea580544797a5f84bf555b5
|
/DjangoBlog/tests.py
|
47fda838a1c4eca60f51314e7e57e8f125b6da98
|
[
"MIT"
] |
permissive
|
kongnyc/DjangoBlog
|
3e83af3b5e96706055964c3554cdff1b0b80f06e
|
38173bf3b47bafa82fadc9e7a95fad0c959c3dd5
|
refs/heads/master
| 2023-08-22T10:01:36.074884
| 2021-10-18T08:29:29
| 2021-10-18T08:29:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 845
|
py
|
#!/usr/bin/env python
# encoding: utf-8
"""
@version: ??
@author: liangliangyy
@license: MIT Licence
@contact: liangliangyy@gmail.com
@site: https://www.lylinux.net/
@software: PyCharm
@file: tests.py
@time: 2017/10/25 下午10:16
"""
from django.test import TestCase
from DjangoBlog.utils import *
class DjangoBlogTest(TestCase):
def setUp(self):
pass
def test_utils(self):
md5 = get_sha256('test')
self.assertIsNotNone(md5)
c = CommonMarkdown.get_markdown('''
# Title1
```python
import os
```
[url](https://www.lylinux.net/)
[ddd](http://www.baidu.com)
''')
self.assertIsNotNone(c)
d = {
'd': 'key1',
'd2': 'key2'
}
data = parse_dict_to_url(d)
self.assertIsNotNone(data)
|
[
"liangliangyy@gmail.com"
] |
liangliangyy@gmail.com
|
cae778a22518efc67a593ae11d6e3eae36f3c314
|
9693f521acf87faa6406f071db432a248fbaa731
|
/API_CRUD/asgi.py
|
e8ba240642e4df756cdd613ee48fb1a816350b0c
|
[] |
no_license
|
musfiqraihan/Django_API_CRUD
|
a8678c4ed04ada73139cbc20740e5e1471f11ef0
|
1405fc867a797580ae0d3753fc6f09914d008d3d
|
refs/heads/master
| 2023-02-11T16:56:24.290716
| 2020-12-30T11:13:40
| 2020-12-30T11:13:40
| 325,232,539
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 393
|
py
|
"""
ASGI config for API_CRUD project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'API_CRUD.settings')
application = get_asgi_application()
|
[
"musfiqak@gmail.com"
] |
musfiqak@gmail.com
|
b7f5069628cdc9ce0511d09d0286eda67f188636
|
b1bc2e54f8cd35c9abb6fc4adb35b386c12fe6b4
|
/toontown/src/coghq/DistributedMintBattle.py
|
01664f0a9ce6d947852b03f28be3e5495ec65230
|
[] |
no_license
|
satire6/Anesidora
|
da3a44e2a49b85252b87b612b435fb4970469583
|
0e7bfc1fe29fd595df0b982e40f94c30befb1ec7
|
refs/heads/master
| 2022-12-16T20:05:13.167119
| 2020-09-11T16:58:04
| 2020-09-11T17:02:06
| 294,751,966
| 89
| 32
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,711
|
py
|
from pandac.PandaModules import *
from direct.interval.IntervalGlobal import *
from toontown.battle.BattleBase import *
from toontown.coghq import DistributedLevelBattle
from direct.directnotify import DirectNotifyGlobal
from toontown.toon import TTEmote
from otp.avatar import Emote
from toontown.battle import SuitBattleGlobals
import random
from toontown.suit import SuitDNA
from direct.fsm import State
from direct.fsm import ClassicFSM, State
from toontown.toonbase import ToontownGlobals
class DistributedMintBattle(DistributedLevelBattle.DistributedLevelBattle):
notify = DirectNotifyGlobal.directNotify.newCategory('DistributedMintBattle')
def __init__(self, cr):
"""
cr is a ClientRepository.
"""
DistributedLevelBattle.DistributedLevelBattle.__init__(self,cr)
# Add a new reward state to the battle ClassicFSM
self.fsm.addState(State.State('MintReward',
self.enterMintReward,
self.exitMintReward,
['Resume']))
offState = self.fsm.getStateNamed('Off')
offState.addTransition('MintReward')
playMovieState = self.fsm.getStateNamed('PlayMovie')
playMovieState.addTransition('MintReward')
##### MintReward state #####
def enterMintReward(self, ts):
self.notify.debug('enterMintReward()')
self.disableCollision()
self.delayDeleteMembers()
if (self.hasLocalToon()):
NametagGlobals.setMasterArrowsOn(0)
if self.bossBattle:
messenger.send('localToonConfrontedMintBoss')
self.movie.playReward(ts, self.uniqueName('building-reward'),
self.__handleMintRewardDone)
def __handleMintRewardDone(self):
self.notify.debug('mint reward done')
if (self.hasLocalToon()):
self.d_rewardDone(base.localAvatar.doId)
self.movie.resetReward()
# Now request our local battle object enter the Resume state,
# which frees us from the battle. The distributed object may
# not enter the Resume state yet (it has to wait until all the
# toons involved have reported back up), but there's no reason
# we have to wait around for that.
self.fsm.request('Resume')
def exitMintReward(self):
self.notify.debug('exitMintReward()')
# In case we're observing and the server cuts us off
# this guarantees all final animations get started and things
# get cleaned up
self.movie.resetReward(finish=1)
self._removeMembersKeep()
NametagGlobals.setMasterArrowsOn(1)
|
[
"66761962+satire6@users.noreply.github.com"
] |
66761962+satire6@users.noreply.github.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.