blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
beb7d60cedbda9144e4ee017272c8d5542552808
|
ec8414291c40bbdef2b43d4360ad2e046109056a
|
/datashape/promote.py
|
20ba0884261dec8c8e714d97c0957577faadf5e5
|
[
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
llllllllll/datashape
|
bd1b2ad09b01bae014af438c754d21ee87bf72c5
|
891e397b3facceede5f277ef578cccdd2319fd6f
|
refs/heads/master
| 2020-12-26T03:56:35.854808
| 2015-08-14T15:29:49
| 2015-08-14T15:29:49
| 37,879,722
| 0
| 0
| null | 2015-06-22T20:46:14
| 2015-06-22T20:46:14
| null |
UTF-8
|
Python
| false
| false
| 1,561
|
py
|
from __future__ import absolute_import
import numpy as np
import datashape
__all__ = ['promote', 'optionify']
def promote(lhs, rhs):
"""Promote two scalar dshapes to a possibly larger, but compatible type.
Examples
--------
>>> from datashape import int32, int64, Option
>>> x = Option(int32)
>>> y = int64
>>> promote(x, y)
?int64
>>> promote(int64, int64)
ctype("int64")
Notes
----
This uses ``numpy.result_type`` for type promotion logic. See the numpy
documentation at
http://docs.scipy.org/doc/numpy/reference/generated/numpy.result_type.html
"""
if lhs == rhs:
return lhs
else:
left, right = getattr(lhs, 'ty', lhs), getattr(rhs, 'ty', rhs)
dtype = np.result_type(datashape.to_numpy_dtype(left),
datashape.to_numpy_dtype(right))
return optionify(lhs, rhs, datashape.CType.from_numpy_dtype(dtype))
def optionify(lhs, rhs, dshape):
"""Check whether a binary operation's dshape came from
:class:`~datashape.coretypes.Option` typed operands and construct an
:class:`~datashape.coretypes.Option` type accordingly.
Examples
--------
>>> from datashape import int32, int64, Option
>>> x = Option(int32)
>>> x
?int32
>>> y = int64
>>> y
ctype("int64")
>>> optionify(x, y, int64)
?int64
"""
if hasattr(dshape.measure, 'ty'):
return dshape
if hasattr(lhs, 'ty') or hasattr(rhs, 'ty'):
return datashape.Option(dshape)
return dshape
|
[
"cpcloud@gmail.com"
] |
cpcloud@gmail.com
|
3b314af16cc710d4bbcdf1d55df86397678882d4
|
99e65ad1427b8997a6d433f233bcc60ef2b9bc92
|
/tests/contrib/test_dropbox.py
|
5693d59696509d92837fedf07c9f98dcca8036e8
|
[
"MIT"
] |
permissive
|
gregorynicholas/flask-dance
|
29ea359ab98a661cf0920328132700399d32c7fb
|
eb3f947340a372cd596cb743353b7e3ed5682e76
|
refs/heads/master
| 2020-12-29T00:41:40.345560
| 2015-05-13T03:25:37
| 2015-05-13T03:25:37
| 36,555,631
| 1
| 1
| null | 2015-05-30T11:37:05
| 2015-05-30T11:37:05
| null |
UTF-8
|
Python
| false
| false
| 3,767
|
py
|
from __future__ import unicode_literals
import pytest
import responses
from urlobject import URLObject
from flask import Flask
from flask_dance.contrib.dropbox import make_dropbox_blueprint, dropbox
from flask_dance.consumer import OAuth2ConsumerBlueprint
from flask_dance.consumer.backend import MemoryBackend
def test_blueprint_factory():
dropbox_bp = make_dropbox_blueprint(
app_key="foo",
app_secret="bar",
)
assert isinstance(dropbox_bp, OAuth2ConsumerBlueprint)
assert dropbox_bp.session.base_url == "https://api.dropbox.com/1/"
assert dropbox_bp.session.client_id == "foo"
assert dropbox_bp.client_secret == "bar"
assert dropbox_bp.authorization_url == "https://www.dropbox.com/1/oauth2/authorize"
assert dropbox_bp.token_url == "https://api.dropbox.com/1/oauth2/token"
def test_load_from_config():
app = Flask(__name__)
app.secret_key = "anything"
app.config["DROPBOX_OAUTH_APP_KEY"] = "foo"
app.config["DROPBOX_OAUTH_APP_SECRET"] = "bar"
dropbox_bp = make_dropbox_blueprint()
app.register_blueprint(dropbox_bp)
resp = app.test_client().get("/dropbox")
url = resp.headers["Location"]
client_id = URLObject(url).query.dict.get("client_id")
assert client_id == "foo"
@responses.activate
def test_context_local():
responses.add(responses.GET, "https://dropbox.com")
# set up two apps with two different set of auth tokens
app1 = Flask(__name__)
dropbox_bp1 = make_dropbox_blueprint(
"foo1", "bar1", redirect_to="url1",
backend=MemoryBackend({"access_token": "app1"}),
)
app1.register_blueprint(dropbox_bp1)
app2 = Flask(__name__)
dropbox_bp2 = make_dropbox_blueprint(
"foo2", "bar2", redirect_to="url2",
backend=MemoryBackend({"access_token": "app2"}),
)
app2.register_blueprint(dropbox_bp2)
# outside of a request context, referencing functions on the `dropbox` object
# will raise an exception
with pytest.raises(RuntimeError):
dropbox.get("https://dropbox.com")
# inside of a request context, `dropbox` should be a proxy to the correct
# blueprint session
with app1.test_request_context("/"):
app1.preprocess_request()
dropbox.get("https://dropbox.com")
request = responses.calls[0].request
assert request.headers["Authorization"] == "Bearer app1"
with app2.test_request_context("/"):
app2.preprocess_request()
dropbox.get("https://dropbox.com")
request = responses.calls[1].request
assert request.headers["Authorization"] == "Bearer app2"
def test_force_reapprove():
app = Flask(__name__)
app.secret_key = "forced"
dropbox_bp = make_dropbox_blueprint("foo", "bar", force_reapprove=True)
app.register_blueprint(dropbox_bp)
with app.test_client() as client:
resp = client.get(
"/dropbox",
base_url="https://a.b.c",
follow_redirects=False,
)
# check that there is a `force_reapprove=true` query param in the redirect URL
assert resp.status_code == 302
location = URLObject(resp.headers["Location"])
assert location.query_dict["force_reapprove"] == "true"
def test_disable_signup():
app = Flask(__name__)
app.secret_key = "apple-app-store"
dropbox_bp = make_dropbox_blueprint(
"foo", "bar", disable_signup=True,
)
app.register_blueprint(dropbox_bp)
with app.test_client() as client:
resp = client.get(
"/dropbox",
base_url="https://a.b.c",
follow_redirects=False,
)
assert resp.status_code == 302
location = URLObject(resp.headers["Location"])
assert location.query_dict["disable_signup"] == "true"
|
[
"david@davidbaumgold.com"
] |
david@davidbaumgold.com
|
47ab0810b795e184979408bc0e50bdf7fa92bd5c
|
3a891a79be468621aae43defd9a5516f9763f36e
|
/desktop/core/ext-py/urllib3-1.22/setup.py
|
35e02aabe194d0fa87fe0847dc389aeb4b9afb9a
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
oyorooms/hue
|
b53eb87f805063a90f957fd2e1733f21406269aa
|
4082346ef8d5e6a8365b05752be41186840dc868
|
refs/heads/master
| 2020-04-15T20:31:56.931218
| 2019-01-09T19:02:21
| 2019-01-09T19:05:36
| 164,998,117
| 4
| 2
|
Apache-2.0
| 2019-01-10T05:47:36
| 2019-01-10T05:47:36
| null |
UTF-8
|
Python
| false
| false
| 2,693
|
py
|
#!/usr/bin/env python
from setuptools import setup
import os
import re
import codecs
base_path = os.path.dirname(__file__)
# Get the version (borrowed from SQLAlchemy)
with open(os.path.join(base_path, 'urllib3', '__init__.py')) as fp:
VERSION = re.compile(r".*__version__ = '(.*?)'",
re.S).match(fp.read()).group(1)
with codecs.open('README.rst', encoding='utf-8') as fp:
readme = fp.read()
with codecs.open('CHANGES.rst', encoding='utf-8') as fp:
changes = fp.read()
version = VERSION
setup(name='urllib3',
version=version,
description="HTTP library with thread-safe connection pooling, file post, and more.",
long_description=u'\n\n'.join([readme, changes]),
classifiers=[
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Software Development :: Libraries',
],
keywords='urllib httplib threadsafe filepost http https ssl pooling',
author='Andrey Petrov',
author_email='andrey.petrov@shazow.net',
url='https://urllib3.readthedocs.io/',
license='MIT',
packages=['urllib3',
'urllib3.packages', 'urllib3.packages.ssl_match_hostname',
'urllib3.packages.backports', 'urllib3.contrib',
'urllib3.contrib._securetransport', 'urllib3.util',
],
requires=[],
tests_require=[
# These are a less-specific subset of dev-requirements.txt, for the
# convenience of distro package maintainers.
'pytest',
'nose',
'mock',
'tornado',
],
test_suite='test',
extras_require={
'secure': [
'pyOpenSSL>=0.14',
'cryptography>=1.3.4',
'idna>=2.0.0',
'certifi',
"ipaddress",
],
'socks': [
'PySocks>=1.5.6,<2.0,!=1.5.7',
]
},
)
|
[
"ranade@cloudera.com"
] |
ranade@cloudera.com
|
7f97e535826d343972a73c7e1377a0d2fcd7d1e0
|
ce7da62e2d6e7820fd66031299702b08bf1260fd
|
/15-Threading/print_time.py
|
c8143269ef6cb4acee35a93cd26224c9677a4b4f
|
[] |
no_license
|
philipz/fasttrack-python
|
f0644aacf62d0130fa4631f4973fa6d64c46989c
|
cb4cafebbd04e9b66353fec4919f9cd0e161e84e
|
refs/heads/master
| 2021-01-17T08:50:38.702044
| 2014-07-16T14:26:14
| 2014-07-16T14:26:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 199
|
py
|
import time
import threading
def print_time():
while True:
print time.ctime()
time.sleep(1)
t = threading.Thread(target=print_time)
t.setDaemon(True)
t.start()
time.sleep(10)
|
[
"rick@arborian.com"
] |
rick@arborian.com
|
df2a22532f04f775815daf02c36c2768d748d1d9
|
3c59b7bde01cfbc1fbd170883393e8ebf7a0a92f
|
/백준/1074번 Z.py
|
0b07500773bb2212f4d32f574c05561e7542de28
|
[] |
no_license
|
gf234/python_problem_solving
|
93ae00d940091131d8f8b06e478e385e4c2a4503
|
4c95751f5a687215c14bf61c37e6dc2e7e752342
|
refs/heads/main
| 2023-05-10T07:28:12.351006
| 2021-06-14T04:59:33
| 2021-06-14T04:59:33
| 314,479,583
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 397
|
py
|
n, r, c = map(int, input().split())
answer = 0
while n:
mid = 2**(n-1)
sum = 4**(n-1)
if r < mid:
if c < mid:
pass
else:
c -= mid
answer += sum
else:
if c < mid:
r -= mid
answer += sum*2
else:
r -= mid
c -= mid
answer += sum*3
n -= 1
print(answer)
|
[
"gf265@naver.com"
] |
gf265@naver.com
|
dc41a632cd458f4e263f559d9c3c0c90d16474d1
|
f3b233e5053e28fa95c549017bd75a30456eb50c
|
/mcl1_input/L54/54-23_MD_NVT_rerun/set_1ns_equi_1.py
|
d5950a09373a0929e4576ed73deb39bcd3249847
|
[] |
no_license
|
AnguseZhang/Input_TI
|
ddf2ed40ff1c0aa24eea3275b83d4d405b50b820
|
50ada0833890be9e261c967d00948f998313cb60
|
refs/heads/master
| 2021-05-25T15:02:38.858785
| 2020-02-18T16:57:04
| 2020-02-18T16:57:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 925
|
py
|
import os
dir = '/mnt/scratch/songlin3/run/mcl1/L54/MD_NVT_rerun/ti_one-step/54_23/'
filesdir = dir + 'files/'
temp_equiin = filesdir + 'temp_equi_1.in'
temp_pbs = filesdir + 'temp_1ns_equi_1.pbs'
lambd = [ 0.00922, 0.04794, 0.11505, 0.20634, 0.31608, 0.43738, 0.56262, 0.68392, 0.79366, 0.88495, 0.95206, 0.99078]
for j in lambd:
os.system("rm -r %6.5f" %(j))
os.system("mkdir %6.5f" %(j))
os.chdir("%6.5f" %(j))
os.system("rm *")
workdir = dir + "%6.5f" %(j) + '/'
#equiin
eqin = workdir + "%6.5f_equi_1.in" %(j)
os.system("cp %s %s" %(temp_equiin, eqin))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, eqin))
#PBS
pbs = workdir + "%6.5f_1ns_equi_1.pbs" %(j)
os.system("cp %s %s" %(temp_pbs, pbs))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, pbs))
#top
os.system("cp ../54-23_merged.prmtop .")
os.system("cp ../0.5_equi_0.rst .")
#submit pbs
os.system("qsub %s" %(pbs))
os.chdir(dir)
|
[
"songlin3@msu.edu"
] |
songlin3@msu.edu
|
338ce5bfb6eb581344b48a4f6403e87e9de8d671
|
a74cabbe1b11fc8ef575ea86f2543cd95db78ec9
|
/python_program/q1296_Divide_Array_in_Sets_of_K_Consecutive_Numbers.py
|
e29aed5088a3550289b744c6e3c78f9e8e068699
|
[] |
no_license
|
tszandy/leetcode
|
87e3ccf291b2879637d2d8238935a455b401a78a
|
f1f4361541dcffbb291285663c8820d7ffb37d2f
|
refs/heads/master
| 2023-04-06T15:34:04.847875
| 2023-03-26T12:22:42
| 2023-03-26T12:22:42
| 204,069,234
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,492
|
py
|
from typing import List
from collections import Counter,defaultdict,deque
from math import *
from functools import reduce,lru_cache,total_ordering
import numpy as np
from heapq import *
from bisect import bisect_left,bisect_right
from itertools import count,zip_longest
import queue
class Solution:
def isPossibleDivide(self, nums: List[int], k: int) -> bool:
nums.sort()
n = len(nums)
if n%k!=0:
return False
while nums:
first_num = nums[0]
for i in range(k):
index = bisect_left(nums,first_num+i)
if nums[index]!=first_num+i:
return False
nums.pop(index)
return True
sol = Solution()
# input
nums = [1,2,3,3,4,4,5,6]
k = 4
# output
output = sol.isPossibleDivide(nums,k)
# answer
answer = True
print(output, answer, answer == output)
# input
nums = [3,2,1,2,3,4,3,4,5,9,10,11]
k = 3
# output
output = sol.isPossibleDivide(nums,k)
# answer
answer = True
print(output, answer, answer == output)
# input
nums = [3,3,2,2,1,1]
k = 3
# output
output = sol.isPossibleDivide(nums,k)
# answer
answer = True
print(output, answer, answer == output)
# input
nums = [1,2,3,4]
k = 3
# output
output = sol.isPossibleDivide(nums,k)
# answer
answer = False
print(output, answer, answer == output)
# input
nums = [1,1,2,2,3,3]
k = 2
# output
output = sol.isPossibleDivide(nums,k)
# answer
answer = False
print(output, answer, answer == output)
|
[
"444980834@qq.com"
] |
444980834@qq.com
|
2370bf29a494f7f47ab8a1880ffe74984620fb45
|
2ca9e61829dd28113abb971d7db1c46cec64f10c
|
/app.py
|
9244898c8240111091aa08a35e372220a5d2c367
|
[] |
no_license
|
Fordalex/task_manager
|
90b8591591ea49be16dd32805de21cd8a939ccea
|
4f9ba9057ddb2b1fdd52ce5d664796dd07529ced
|
refs/heads/master
| 2023-05-10T05:49:20.194423
| 2020-01-14T11:05:38
| 2020-01-14T11:05:38
| 232,828,008
| 0
| 0
| null | 2023-05-01T21:19:27
| 2020-01-09T14:29:17
|
HTML
|
UTF-8
|
Python
| false
| false
| 3,117
|
py
|
import os
from flask import Flask, render_template, redirect, request, url_for
from flask_pymongo import PyMongo
from bson.objectid import ObjectId
app = Flask(__name__)
app.config["MONGO_DBNAME"] = 'task_manager'
app.config["MONGO_URI"] = 'mongodb+srv://root:r00tUser@myfirstcluster-fbkah.mongodb.net/task_manager?retryWrites=true&w=majority'
mongo = PyMongo(app)
@app.route('/')
@app.route('/get_tasks')
def get_tasks():
return render_template("tasks.html", tasks=mongo.db.tasks.find())
@app.route('/add_task')
def add_task():
return render_template('addtask.html', categories=mongo.db.categories.find())
@app.route('/insert_task', methods=['POST'])
def insert_task():
tasks = mongo.db.tasks
tasks.insert_one(request.form.to_dict())
return redirect(url_for('get_tasks'))
@app.route('/edit_task/<task_id>')
def edit_task(task_id):
the_task = mongo.db.tasks.find_one({"_id": ObjectId(task_id)})
all_categories = mongo.db.categories.find()
return render_template('edittask.html', task=the_task,
categories=all_categories)
@app.route('/update_task/<task_id>', methods=["POST"])
def update_task(task_id):
tasks = mongo.db.tasks
tasks.update({'_id': ObjectId(task_id)},
{
'task_name': request.form.get('task_name'),
'category_name': request.form.get('category_name'),
'task_description': request.form.get('task_description'),
'due_date': request.form.get('due_date'),
'is_urgent': request.form.get('is_urgent')
})
return redirect(url_for('get_tasks'))
@app.route('/delete_task/<task_id>')
def delete_task(task_id):
mongo.db.tasks.remove({'_id': ObjectId(task_id)})
return redirect(url_for('get_tasks'))
@app.route('/get_categories')
def get_categories():
return render_template('categories.html',
categories=mongo.db.categories.find())
@app.route('/delete_category/<category_id>')
def delete_category(category_id):
mongo.db.categories.remove({'_id': ObjectId(category_id)})
return redirect(url_for('get_categories'))
@app.route('/edit_category/<category_id>')
def edit_category(category_id):
return render_template('editcategory.html',
category=mongo.db.categories.find_one(
{'_id': ObjectId(category_id)}))
@app.route('/update_category/<category_id>', methods=['POST'])
def update_category(category_id):
mongo.db.categories.update(
{'_id': ObjectId(category_id)},
{'category_name': request.form.get('category_name')})
return redirect(url_for('get_categories'))
@app.route('/insert_category', methods=['POST'])
def insert_category():
category_doc = {'category_name': request.form.get('category_name')}
mongo.db.categories.insert_one(category_doc)
return redirect(url_for('get_categories'))
@app.route('/add_category')
def add_category():
return render_template('addcategory.html')
if __name__ == '__main__':
app.run(host=os.environ.get('IP'),
port=os.environ.get('PORT'),
debug=True)
|
[
"alex96ford19@gmail.com"
] |
alex96ford19@gmail.com
|
48a6fe8f5a0e1c8b1711c0c50824fa37bf0d24f2
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_207/336.py
|
8db68177c284762ccb22195d0da91c770b0ce592
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,801
|
py
|
#Name: Robin Park
#Username: robinp
#Google Code Jam Round 1B 2017
import random
import math
def isValid(arr):
length = len(arr)
for k in range(length):
if arr[k%length] == arr[(k+1)%length]:
return False
return True
def solve(N, R, O, Y, G, B, V):
if R > N/2 or Y > N/2 or B > N/2:
return "IMPOSSIBLE"
if R == 0:
if Y == B:
return "YB"*int(N/2)
if Y == 0:
if R == B:
return "RB"*int(N/2)
if B == 0:
if R == Y:
return "YR"*int(N/2)
if R == Y and Y == B:
return "RYB"*int(N/3)
min_color = min(R, Y, B) # recur over R, Y, B a la euclidean algorithm style
R = R - min_color
Y = Y - min_color
B = B - min_color
new_N = R + Y + B
#if R >= new_N/2 or Y >= new_N/2 or B >= new_N/2:
# return "IMPOSSIBLE"
if R == Y and R == 0:
if B <= min_color:
return "BRBY"*B + "BRY"*(min_color-B)
if B == Y and Y == 0:
if R <= min_color:
return "RYRB"*R + "RYB"*(min_color-R)
if R == B and R == 0:
if Y <= min_color:
return "YRYB"*Y + "YRB"*(min_color-Y)
if R == 0:
if Y > B:
if Y - B <= min_color:
return "RYBY"*(Y-B) + "RBY"*(min_color-Y+B) + "BY"*B
else:
return "IMPOSSIBLE"
else:
if B - Y <= min_color:
return "RBYB"*(B-Y) + "RYB"*(min_color-B+Y) + "YB"*Y
else:
return "IMPOSSIBLE"
if Y == 0:
if B > R:
if B - R <= min_color:
return "YBRB"*(B-R) + "YRB"*(min_color-B+R) + "RB"*R
else:
return "IMPOSSIBLE"
else:
if R - B <= min_color:
return "YRBR"*(R-B) + "YBR"*(min_color-R+B) + "BR"*B
else:
return "IMPOSSIBLE"
if B == 0:
if R > Y:
if R - Y <= min_color:
return "BRYR"*(R-Y) + "BYR"*(min_color-R+Y) + "YR"*Y
else:
return "IMPOSSIBLE"
else:
if Y - R <= min_color:
return "BYRY"*(Y-R) + "BRY"*(min_color-Y+R) + "RY"*R
else:
return "IMPOSSIBLE"
if __name__ == '__main__':
with open('unicorn.in', 'r') as file, open('unicorn.out', 'w') as w:
T = int(file.readline().strip())
for t in range(T):
N, R, O, Y, G, B, V = map(int, file.readline().strip().split())
w.write('Case #' + str(t+1) + ': ')
w.write(solve(N, R, O, Y, G, B, V))
w.write('\n')
print("done")
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
53f6236b4cdfc792e33b41c54cf098a380b42ad8
|
cdc95cd7cbce8d9e904bb769b981f8b87d86ca7e
|
/Geeks for geeks/Dynamic Programming/Subset Sum Problem.py
|
91572f0834aad557afef3571b28d5cc6dd656c0a
|
[] |
no_license
|
amit-kr-debug/CP
|
c99ba608edf943b807d9cb707a9f10820ef1d6d6
|
1423a558904c4497c505c34ec38345ee979a036b
|
refs/heads/master
| 2023-05-10T15:51:35.905745
| 2021-06-13T15:59:15
| 2021-06-13T15:59:15
| 291,243,005
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 862
|
py
|
"""
Given an array of non-negative integers and a value sum, determine if there is a subset of the given set with sum equal to given sum.
Examples:
Input : arr[] = {4, 1, 10, 12, 5, 2},
sum = 9
Output : TRUE
{4, 5} is a subset with sum 9.
Input : arr[] = {1, 8, 2, 5},
sum = 4
Output : FALSE
There exists no subset with sum 4.
"""
# User function Template for Python3
def subsetSum(arr, N, S) :
dp = [[False for x in range(S + 1)] for y in range(N + 1)]
for i in range(N + 1) :
dp[i][0] = True
for i in range(1, S + 1) :
dp[0][i] = False
for i in range(1, N + 1) :
for j in range(1, S + 1) :
if arr[i - 1] <= j :
dp[i][j] = dp[i][j - arr[i - 1]] or dp[i - 1][j]
else :
dp[i][j] = dp[i - 1][j]
if dp[N][S] :
return 1
return 0
|
[
"amitkr0819@gmail.com"
] |
amitkr0819@gmail.com
|
c6c8f87d0f8a443a9de6ef96207e645fd2b836e0
|
527fd39d3a1555800c2c32025fdd15fd86ba6672
|
/Decorators/decorator.py
|
a11513c818b71b811384630675baface6261b694
|
[] |
no_license
|
rohanwarange/Python-Tutorials
|
cfd39551f7ff62bd032946976ba3820474e42405
|
53d8fb226f94d027ae7999f9678697206d37d83a
|
refs/heads/master
| 2023-06-18T10:45:36.884324
| 2021-07-07T17:44:22
| 2021-07-07T17:44:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 417
|
py
|
def decorator_func(any_function):
def wrapper_function(*args,**kwargs):
print("this is awasom function")
return any_function(*args,**kwargs)
return wrapper_function
# this is awasom function
@decorator_func
def func(a):
print(f"This is function with argument{a}")
# def func():
# print(f"This is Function")
@decorator_func
def add(a,b):
return a+b
print(add(2,3))
|
[
"rohanwarange24@gmail.com"
] |
rohanwarange24@gmail.com
|
44c089e6367d19a106156ab03e0795e412f567ef
|
c1f205b4cc86456ad9d8c4b05c2c7a2a09818ec5
|
/10_preprocessing.py
|
272dfa9b82bd04c39cab4cf3a7a7f4a972d66de0
|
[] |
no_license
|
GINK03/deep-recommnder
|
d6c7c41188224c721f31b72333167cba16c11a4e
|
3039c03755b73a04adde6ef84ff2c7da6987dddb
|
refs/heads/master
| 2020-04-22T14:38:32.307010
| 2019-02-05T02:13:19
| 2019-02-05T02:13:19
| 170,450,796
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 697
|
py
|
import glob
from io import StringIO
import pandas as pd
def get_movie(fn):
lines = open(fn).readlines()
movie = lines.pop(0).strip()
csv = ''.join(lines)
csv = StringIO(csv)
df = pd.read_csv(csv, header=None, sep=',')
df.columns = ['userId', 'score', 'date']
df['movieId'] = movie.replace(':', '')
df = df.drop(['date'], axis=1)
#print(df.head())
return df
dfs = []
files = glob.glob('./download/training_set/*.txt')
for index, fn in enumerate(files):
print(index, len(files), fn)
df = get_movie(fn)
dfs.append(df)
from pathlib import Path
df = pd.concat(dfs, axis=0)
Path('works/dataset').mkdir(exist_ok=True, parents=True)
df.to_csv('works/dataset/preprocess.csv', index=None)
|
[
"gim.kobayashi@gmail.com"
] |
gim.kobayashi@gmail.com
|
5d61a7e605d2e57dbf982682dbb8931f9342f0fd
|
9b6632b532c1ece623c8c0bd81fc1fac88ee423c
|
/gluster/peer_op.py
|
0939576d9edbe310fb6b3f1e8847fc094ab297c6
|
[] |
no_license
|
sun7shines/GlusterFS
|
8542bc213d97e001952606881e0e3c42941901f9
|
1e1b3da72fe030307bb45b4c42260477fc826902
|
refs/heads/master
| 2021-01-20T13:48:42.785399
| 2015-09-08T07:11:30
| 2015-09-08T07:11:30
| 42,085,677
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,832
|
py
|
# -*- coding: utf-8 -*-
import operation.gluster.peer_db
import operation.gluster.peer_cmd
import operation.gluster.volume_clr
import operation.gluster.volume_ifo
import system.network.dns_service_op
import vmd_utils
import support.uuid_op
import os
def create_peer(param):
#没有错误返回,如果增加错误返回可以添加新的事件
#检查主机,是否包含gluster cluster信息
gluster_ip = param.get('gluster_ip')
operation.gluster.volume_clr.clear_peer_cfgs()
flag,sysid = operation.gluster.volume_ifo.getsysid()
if flag:
operation.gluster.peer_db.insert_peer(sysid,gluster_ip)
target_ip = param.get('target_ip')
if target_ip and target_ip != 'None':
(flag, psh) = vmd_utils.get_rpcConnection(target_ip)
if not flag:
return False,psh
flag,msg = psh.do_probe_peer(gluster_ip)
if not flag:
return False,msg
cmd = "echo '%s' > /var/lib/glusterd/glfs_ip" % (gluster_ip)
os.system(cmd)
return True,''
def delete_peer(param):
#检查host上是否存在 被使用的brick
dcuuid = operation.gluster.peer_db.get_host_dcuuid()
gluster_ip = operation.gluster.peer_db.get_host_gluster_ip()
if not gluster_ip:
gluster_ip = system.network.dns_service_op.get_localhost_ip()
is_vcuuid,vcuuid,vc_ip=support.uuid_op.get_vc_uuid()
if is_vcuuid and vcuuid!="127.0.0.1":
_,target_ip = operation.gluster.peer_db.get_available_peer_target_ip(dcuuid,gluster_ip, vcuuid,vc_ip)
if target_ip and target_ip != 'None':
operation.gluster.peer_cmd.detach_peer(target_ip,gluster_ip)
operation.gluster.peer_db.clear_peer()
operation.gluster.volume_clr.clear_peer_cfgs()
return True,''
|
[
"sheshisheng@163.com"
] |
sheshisheng@163.com
|
84e58d3da90fb71cd6644555c0120c70d49027a7
|
e0a51ac08f13f4d3d89ccd770225a9ca0cecb80a
|
/seucorretor/seucorretor/settings/localtests.py
|
f9f76f542e0e0c472e4eea530a8fba85bc505816
|
[] |
no_license
|
MarcosDihl/corretaza-buscador
|
8bbc94a81f7414a3cbc4a1b7ce7b841431209b1c
|
a3579059839f32c585dda05775fa525fdd34121e
|
refs/heads/master
| 2022-04-04T03:36:47.360708
| 2018-01-31T03:05:13
| 2018-01-31T03:05:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 357
|
py
|
"""
Make the tests run faster on localmachines
IMPORTANT: Avoid using this settins on staging and CI environments
"""
from .base import *
ADMINS = (
('', ''),
)
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': join(BASE_DIR, 'dbtest.sqlite3'),
}
}
ALLOWED_HOSTS = ['localhost', '127.0.0.1', ]
|
[
"huogerac@gmail.com"
] |
huogerac@gmail.com
|
e5f484cd73d29d1f7bd4fc93cb9787b6732685cd
|
f4a1c3157b2544cf0240579607acb116de0818bd
|
/lookups/v1/phone_number/fetch-payfone-tcpa-compliance/fetch-payfone-tcpa-compliance.6.x.py
|
2c83bf73afdc86fe9c84bbbc668dc323c1508bfb
|
[] |
no_license
|
raybanain/sample-code
|
31568cc3bcfd87ca33937740d7c264ab40f23b04
|
8778483e064ba571523fa5cc32e677fe5844e7a5
|
refs/heads/master
| 2020-03-22T05:02:54.452336
| 2018-06-28T18:23:04
| 2018-06-28T18:23:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 531
|
py
|
# Download the helper library from https://www.twilio.com/docs/python/install
from twilio.rest import Client
# Your Account Sid and Auth Token from twilio.com/console
account_sid = 'ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX'
auth_token = 'your_auth_token'
client = Client(account_sid, auth_token)
phone_number = client.lookups.phone_numbers('+16502530000') \
.fetch(add_ons='payfone_tcpa_compliance', add_ons_data={
'payfone_tcpa_compliance.right_party_contacted_date': '20160101'
})
print(phone_number.caller_name)
|
[
"jose.oliveros.1983@gmail.com"
] |
jose.oliveros.1983@gmail.com
|
edcdc247341adbfd4332e2863e2faae3274b9082
|
8353888e4970dda70b2f4dbd7944e948ff024b94
|
/games/migrations/0012_leaderboard.py
|
b6a42f52663f11dd7ff44ee2b5b66ad4ef2ec530
|
[
"MIT"
] |
permissive
|
munisisazade/diplom_isi
|
fa420f8f7d960c65dc193d50b5989a69e2d43491
|
767531ef3a4b090d1bc0963e687b5215d6f92f53
|
refs/heads/master
| 2022-12-08T11:02:16.618962
| 2018-05-01T15:46:04
| 2018-05-01T15:46:04
| 131,720,529
| 1
| 1
|
MIT
| 2022-12-08T00:44:33
| 2018-05-01T14:09:30
|
CSS
|
UTF-8
|
Python
| false
| false
| 734
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-08-16 13:40
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('games', '0011_auto_20170810_1759'),
]
operations = [
migrations.CreateModel(
name='LeaderBoard',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateTimeField(auto_now_add=True)),
('games', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='games.GameTime')),
],
),
]
|
[
"munisisazade@gmail.com"
] |
munisisazade@gmail.com
|
f05da529322bfe042d838fae947143e6c3dae144
|
4f04ce5667f895889cfe54ed5f0dec6f5e7d4e4e
|
/bert_brain/data_sets/choice_of_plausible_alternatives.py
|
3dcaca9a9b92be43edf07b18daef78b2337f1dc4
|
[] |
no_license
|
danrsc/bert_brain
|
e172859b7ab93b0a05ed7c5b936778fae134eabb
|
eca204f163018270ac6b6687c2f3b6b5b158a89c
|
refs/heads/master
| 2022-11-28T14:32:45.420452
| 2020-08-03T00:14:42
| 2020-08-03T00:14:42
| 167,277,920
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,219
|
py
|
import os
import json
from dataclasses import dataclass
import numpy as np
from .input_features import RawData, KindData, ResponseKind, FieldSpec
from .corpus_base import CorpusBase, CorpusExampleUnifier, path_attribute_field
__all__ = ['ChoiceOfPlausibleAlternatives']
@dataclass(frozen=True)
class ChoiceOfPlausibleAlternatives(CorpusBase):
path: str = path_attribute_field('choice_of_plausible_alternatives_path')
@staticmethod
def _read_examples(path, example_manager: CorpusExampleUnifier, labels):
examples = list()
with open(path, 'rt') as f:
for line in f:
fields = json.loads(line.strip('\n'))
premise = fields['premise'].split()
multipart_id = len(example_manager)
choices = list()
while True:
choice_name = 'choice{}'.format(len(choices) + 1)
if choice_name not in fields:
break
choices.append(fields[choice_name].split())
question_expansions = {
'cause': 'What was the cause of this?',
'effect': 'What happened as a result?'}
if fields['question'] not in question_expansions:
raise ValueError('Uknown question type: {}'.format(fields['question']))
question = question_expansions[fields['question']].split()
label = fields['label'] if 'label' in fields else 1
for index_choice, choice in enumerate(choices):
data_ids = -1 * np.ones(len(premise) + len(question) + len(choice), dtype=np.int64)
# doesn't matter which word we attach the label to since we specify below that is_sequence=False
data_ids[0] = len(labels)
choice_label = 1 if label == index_choice else 0
examples.append(example_manager.add_example(
example_key=None,
words=premise + question + choice,
sentence_ids=[0] * len(premise) + [1] * len(question) + [2] * len(choice),
data_key='copa',
data_ids=data_ids,
start=0,
stop=len(premise),
start_sequence_2=len(premise),
stop_sequence_2=len(premise) + len(question),
start_sequence_3=len(premise) + len(question),
stop_sequence_3=len(premise) + len(question) + len(choice),
multipart_id=multipart_id))
labels.append(choice_label)
return examples
@classmethod
def response_key(cls) -> str:
return 'copa'
@classmethod
def num_classes(cls) -> int:
return 2
def _load(self, example_manager: CorpusExampleUnifier, use_meta_train: bool):
labels = list()
train = ChoiceOfPlausibleAlternatives._read_examples(
os.path.join(self.path, 'train.jsonl'), example_manager, labels)
meta_train = None
if use_meta_train:
from sklearn.model_selection import train_test_split
idx_train, idx_meta_train = train_test_split(np.arange(len(train)), test_size=0.2)
meta_train = [train[i] for i in idx_meta_train]
train = [train[i] for i in idx_train]
validation = ChoiceOfPlausibleAlternatives._read_examples(
os.path.join(self.path, 'val.jsonl'), example_manager, labels)
test = ChoiceOfPlausibleAlternatives._read_examples(
os.path.join(self.path, 'test.jsonl'), example_manager, labels)
labels = np.array(labels, dtype=np.float64)
labels.setflags(write=False)
return RawData(
input_examples=train,
validation_input_examples=validation,
test_input_examples=test,
meta_train_input_examples=meta_train,
response_data={type(self).response_key(): KindData(ResponseKind.generic, labels)},
is_pre_split=True,
field_specs={type(self).response_key(): FieldSpec(is_sequence=False)})
|
[
"daniel.robert.schwartz@gmail.com"
] |
daniel.robert.schwartz@gmail.com
|
96a0b1057d72d29888d23095ba8c569604278730
|
27be1bab83751703d94a5d2387cc316fcad5192e
|
/bin/thresholder.py
|
9199f1cc0653afc4e891dba5b7188dbf489ce885
|
[] |
no_license
|
golamrabbii/rapido-env
|
b57ebad1f04dcaef60943f097bb976391428eb94
|
8c0f533a49f76e293af96822459f2cdc42c87def
|
refs/heads/main
| 2023-05-22T01:06:26.180692
| 2021-06-07T12:09:25
| 2021-06-07T12:09:25
| 374,652,187
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,822
|
py
|
#!/home/rapido-live/rapido-env35/bin/python3.5
#
# The Python Imaging Library
# $Id$
#
# this demo script illustrates how a 1-bit BitmapImage can be used
# as a dynamically updated overlay
#
try:
from tkinter import *
except ImportError:
from Tkinter import *
from PIL import Image, ImageTk
import sys
#
# an image viewer
class UI(Frame):
def __init__(self, master, im, value=128):
Frame.__init__(self, master)
self.image = im
self.value = value
self.canvas = Canvas(self, width=im.size[0], height=im.size[1])
self.backdrop = ImageTk.PhotoImage(im)
self.canvas.create_image(0, 0, image=self.backdrop, anchor=NW)
self.canvas.pack()
scale = Scale(self, orient=HORIZONTAL, from_=0, to=255,
resolution=1, command=self.update_scale, length=256)
scale.set(value)
scale.bind("<ButtonRelease-1>", self.redraw)
scale.pack()
# uncomment the following line for instant feedback (might
# be too slow on some platforms)
# self.redraw()
def update_scale(self, value):
self.value = eval(value)
self.redraw()
def redraw(self, event=None):
# create overlay (note the explicit conversion to mode "1")
im = self.image.point(lambda v, t=self.value: v >= t, "1")
self.overlay = ImageTk.BitmapImage(im, foreground="green")
# update canvas
self.canvas.delete("overlay")
self.canvas.create_image(0, 0, image=self.overlay, anchor=NW,
tags="overlay")
# --------------------------------------------------------------------
# main
root = Tk()
im = Image.open(sys.argv[1])
if im.mode != "L":
im = im.convert("L")
# im.thumbnail((320,200))
UI(root, im).pack()
root.mainloop()
|
[
"root@ip-172-31-28-98.ap-southeast-1.compute.internal"
] |
root@ip-172-31-28-98.ap-southeast-1.compute.internal
|
e462ebb803471d1bf3b942ca4c3a191aa1d00f36
|
910590eef6ef4dbccd73f5a3c665e4e06ebd58a3
|
/sklearn_porter/classifier/BernoulliNB/__init__.py
|
7c61f2d7115ac5da83585a7cfac9a4815b6c2230
|
[
"MIT"
] |
permissive
|
prashanthgedde/sklearn-porter
|
9a6226dd443fd76171d275a84712bae7fe58339e
|
70f2fc7e9e924b803c896035840c4c28c5c4007f
|
refs/heads/master
| 2021-01-23T04:59:45.676275
| 2017-03-23T23:56:08
| 2017-03-23T23:56:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,295
|
py
|
# -*- coding: utf-8 -*-
import numpy as np
from ...Template import Template
class BernoulliNB(Template):
"""
See also
--------
...
"""
SUPPORTED_METHODS = ['predict']
# @formatter:off
TEMPLATES = {
'java': {
'type': '{0}',
'arr': '{{{0}}}',
'arr[]': '{type}[] {name} = {{{values}}};',
'arr[][]': '{type}[][] {name} = {{{values}}};',
'indent': ' ',
},
}
# @formatter:on
def __init__(self, model, target_language='java', target_method='predict', **kwargs):
super(BernoulliNB, self).__init__(model, target_language=target_language, target_method=target_method, **kwargs)
self.model = model
# self.n_features = len(model.sigma_[0])
self.n_classes = len(model.classes_)
self.n_features = len(model.feature_log_prob_[0])
# jll = safe_sparse_dot(X, (self.feature_log_prob_ - neg_prob).T)
# jll += self.class_log_prior_ + neg_prob.sum(axis=1)
# Create class prior probabilities:
priors = [self.temp('type').format(repr(p)) for p in
model.class_log_prior_]
priors = ', '.join(priors)
self.priors = self.temp('arr[]').format(type='double', name='priors',
values=priors)
# Create probabilities:
# probs = []
# for prob in model.feature_log_prob_:
# tmp = [self.temp('type').format(repr(p)) for p in prob]
# tmp = self.temp('arr').format(', '.join(tmp))
# probs.append(tmp)
# probs = ', '.join(probs)
# self.pos_probs = self.temp('arr[][]').format(type='double',
# name='posProbs',
# values=probs)
# Create negative probabilities:
neg_prob = np.log(1 - np.exp(model.feature_log_prob_))
probs = []
for prob in neg_prob:
tmp = [self.temp('type').format(repr(p)) for p in prob]
tmp = self.temp('arr').format(', '.join(tmp))
probs.append(tmp)
probs = ', '.join(probs)
self.neg_probs = self.temp('arr[][]').format(type='double',
name='negProbs',
values=probs)
delta_probs = (model.feature_log_prob_ - neg_prob).T
probs = []
for prob in delta_probs:
tmp = [self.temp('type').format(repr(p)) for p in prob]
tmp = self.temp('arr').format(', '.join(tmp))
probs.append(tmp)
probs = ', '.join(probs)
self.del_probs = self.temp('arr[][]').format(type='double',
name='delProbs',
values=probs)
def export(self, class_name, method_name):
"""
Port a trained model to the syntax of a chosen programming language.
Parameters
----------
:param model : GaussianNB
An instance of a trained GaussianNB classifier.
"""
self.class_name = class_name
self.method_name = method_name
if self.target_method == 'predict':
return self.predict()
def predict(self):
"""
Port the predict method.
Returns
-------
:return: out : string
The ported predict method.
"""
return self.create_class(self.create_method())
def create_method(self):
"""
Build the model method or function.
Returns
-------
:return out : string
The built method as string.
"""
n_indents = 1 if self.target_language in ['java'] else 0
return self.temp('method.predict', n_indents=n_indents,
skipping=True).format(**self.__dict__)
def create_class(self, method):
"""
Build the model class.
Returns
-------
:return out : string
The built class as string.
"""
self.__dict__.update(dict(method=method))
return self.temp('class').format(**self.__dict__)
|
[
"darius.morawiec@nok.onl"
] |
darius.morawiec@nok.onl
|
198860c44630bb080bcbf2da9c6818be18e5abfc
|
e755453c853ae400d94f562ad215b59166b63782
|
/tests/trees_tests/strategies.py
|
bb194a8823fc239f882e0ca9573791bea619c9fe
|
[
"MIT"
] |
permissive
|
lycantropos/dendroid
|
0cb3e276dd9c476b82b0b7a17c25c2e05616a993
|
fd11c74a395eb791caf803c848805569869080f6
|
refs/heads/master
| 2023-04-07T11:07:55.550796
| 2023-03-27T00:46:03
| 2023-03-27T00:46:03
| 215,369,321
| 0
| 1
|
MIT
| 2020-09-24T05:02:02
| 2019-10-15T18:29:36
|
Python
|
UTF-8
|
Python
| false
| false
| 4,990
|
py
|
from functools import partial
from operator import attrgetter
from typing import (Callable,
List,
Tuple)
from hypothesis import strategies
from dendroid import (avl,
binary,
red_black,
splay)
from dendroid.hints import (Item,
Key)
from tests.strategies import (non_empty_values_lists_with_orders,
single_values_with_orders,
to_values_lists_with_orders,
two_or_more_values_with_orders,
values_lists_with_orders,
values_with_orders_strategies)
from tests.utils import (Node,
Strategy,
Tree,
ValuesListWithOrder,
ValuesListsWithOrder,
compose,
has_size_two_or_more)
factories = (strategies.sampled_from([binary.map_, avl.map_, red_black.map_,
splay.map_])
.map(partial(compose, attrgetter('tree'))))
def values_list_with_order_to_items_list(values_list_with_order
: ValuesListWithOrder) -> List[Item]:
values_list, order = values_list_with_order
return ([(value, value) for value in values_list]
if order is None
else [(order(value), value) for value in values_list])
items_lists = (values_lists_with_orders
.map(values_list_with_order_to_items_list))
non_empty_items_lists = (non_empty_values_lists_with_orders
.map(values_list_with_order_to_items_list))
single_items = (single_values_with_orders
.map(values_list_with_order_to_items_list))
two_or_more_items = (two_or_more_values_with_orders
.map(values_list_with_order_to_items_list))
def to_tree(factory: Callable[..., Tree], items: List[Item]) -> Tree:
return factory(*items)
empty_trees = strategies.builds(to_tree, factories,
strategies.builds(list))
trees = strategies.builds(to_tree, factories, items_lists)
non_empty_trees = strategies.builds(to_tree, factories,
non_empty_items_lists)
trees_with_two_or_more_nodes = (strategies.builds(to_tree, factories,
two_or_more_items)
.filter(has_size_two_or_more))
def to_tree_with_key(factory: Callable[..., Tree],
items: List[Item]) -> Tuple[Tree, Key]:
*rest_items, (key, _) = items
return factory(*rest_items), key
empty_trees_with_keys = strategies.builds(to_tree_with_key,
factories, single_items)
trees_with_keys = strategies.builds(to_tree_with_key, factories,
non_empty_items_lists)
def to_non_empty_trees_with_their_keys(tree: Tree
) -> Strategy[Tuple[Tree, Key]]:
return strategies.tuples(strategies.just(tree),
strategies.sampled_from(tree.keys))
non_empty_trees_with_their_keys = (
non_empty_trees.flatmap(to_non_empty_trees_with_their_keys))
def to_non_empty_trees_with_their_nodes(tree: Tree
) -> Strategy[Tuple[Tree, Node]]:
return strategies.tuples(strategies.just(tree),
strategies.sampled_from(list(tree)))
non_empty_trees_with_their_nodes = (
non_empty_trees.flatmap(to_non_empty_trees_with_their_nodes))
def values_lists_with_order_to_items_lists(values_lists_with_order
: ValuesListsWithOrder
) -> Tuple[List[Item], ...]:
*values_lists, order = values_lists_with_order
return (tuple([(value, value) for value in values_list]
for values_list in values_lists)
if order is None
else tuple([(order(value), value) for value in values_list]
for values_list in values_lists))
def to_trees_tuple(factory: Callable[..., Tree],
items_lists: List[List[Item]]
) -> Tuple[Tree, ...]:
return tuple(factory(*items_list) for items_list in items_lists)
trees_pairs = strategies.builds(
to_trees_tuple,
factories,
(values_with_orders_strategies
.flatmap(partial(to_values_lists_with_orders,
sizes=[(0, None)] * 2))
.map(values_lists_with_order_to_items_lists)))
trees_triplets = strategies.builds(
to_trees_tuple,
factories,
(values_with_orders_strategies
.flatmap(partial(to_values_lists_with_orders,
sizes=[(0, None)] * 3))
.map(values_lists_with_order_to_items_lists)))
|
[
"azatibrakov@gmail.com"
] |
azatibrakov@gmail.com
|
9711398bdfc3dc34609e4442b4e41256f5e89cec
|
2f114ea4068b47949532955ef52d54478cb322fa
|
/venv/Lib/site-packages/sqlalchemy/dialects/mysql/__init__.py
|
067d00386e1363160144b7d7f2d3abe719673497
|
[] |
no_license
|
tgkaiching/tgcb
|
b8f7fcd6761d3a772df13964527c89412ffa8045
|
d0dec634191eb26fb5fa875e5ab608981b79f7f7
|
refs/heads/master
| 2022-10-09T17:25:50.604485
| 2018-08-16T00:55:56
| 2018-08-16T00:55:56
| 139,693,981
| 1
| 2
| null | 2022-10-02T04:37:09
| 2018-07-04T08:40:35
|
Python
|
UTF-8
|
Python
| false
| false
| 1,185
|
py
|
# mysql/__init__.py
# Copyright (C) 2005-2018 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from . import base, mysqldb, oursql, \
pyodbc, zxjdbc, mysqlconnector, pymysql,\
gaerdbms, cymysql
# default dialect
base.dialect = mysqldb.dialect
from .base import \
BIGINT, BINARY, BIT, BLOB, BOOLEAN, CHAR, DATE, DATETIME, \
DECIMAL, DOUBLE, ENUM, DECIMAL,\
FLOAT, INTEGER, INTEGER, JSON, LONGBLOB, LONGTEXT, MEDIUMBLOB, \
MEDIUMINT, MEDIUMTEXT, NCHAR, \
NVARCHAR, NUMERIC, SET, SMALLINT, REAL, TEXT, TIME, TIMESTAMP, \
TINYBLOB, TINYINT, TINYTEXT,\
VARBINARY, VARCHAR, YEAR, dialect
__all__ = (
'BIGINT', 'BINARY', 'BIT', 'BLOB', 'BOOLEAN', 'CHAR', 'DATE', 'DATETIME',
'DECIMAL', 'DOUBLE', 'ENUM', 'DECIMAL', 'FLOAT', 'INTEGER', 'INTEGER',
'JSON', 'LONGBLOB', 'LONGTEXT', 'MEDIUMBLOB', 'MEDIUMINT', 'MEDIUMTEXT',
'NCHAR', 'NVARCHAR', 'NUMERIC', 'SET', 'SMALLINT', 'REAL', 'TEXT', 'TIME',
'TIMESTAMP', 'TINYBLOB', 'TINYINT', 'TINYTEXT', 'VARBINARY', 'VARCHAR',
'YEAR', 'dialect'
)
|
[
"kc.suen@towngas.com"
] |
kc.suen@towngas.com
|
9eed196e985af87474e283f2699e0539d16aef11
|
c6414efe635bc5ea4680252f66dd24e2ce3bd087
|
/test/test_eigen.py
|
a4653627b63d47f5e8e345cfcc9b7ce168516102
|
[] |
no_license
|
nicolasfauchereau/spectrum
|
7e180dc625c8a9c486df5399246593acb7b69ca2
|
de3fea857f2d8e883258b6999ec1a43a230602db
|
refs/heads/master
| 2021-01-19T13:45:47.786187
| 2017-03-08T13:07:00
| 2017-03-08T13:07:00
| 88,107,023
| 2
| 1
| null | 2017-04-13T00:01:52
| 2017-04-13T00:01:52
| null |
UTF-8
|
Python
| false
| false
| 560
|
py
|
from spectrum import *
import numpy
#from spectrum import MINEIGVAL
from nose.tools import assert_almost_equal
def test_mineigval():
tol = 1e-10
T0=3
T = numpy.array([-2+.5j, .7-1j],dtype=complex)
eigval, eigvec = MINEIGVAL(T0 , T, tol)
print('Eigenvalue=',eigval)
print('Eigenvector=',eigvec)
assert_almost_equal(eigval, .488694078106)
expected_eigvec = numpy.array([ 0.13790622 -1.74155903e-02j , 0.21272177 -4.65701963e-18j, 0.13790622 +1.74155903e-02j])
assert_almost_equal(eigvec.all(), expected_eigvec.all())
|
[
"cokelaer@gmail.com"
] |
cokelaer@gmail.com
|
dfd5fe1cc7aa6d241684d6759ef0894b6ec15e4f
|
87ced16167203723557f75dc005c3aaae7e3f404
|
/online-judges/leetcode/making-a-large-island.py
|
3207a4b14ad2c990a0669813a191359769dcac11
|
[] |
no_license
|
joao-conde/competitive-programming
|
87e0c46f06bc017eea2701b9be860ee614c0e159
|
0d2d7375f0603142febab69707496d3b5b985054
|
refs/heads/master
| 2023-08-07T01:47:19.864827
| 2023-07-25T11:43:39
| 2023-07-25T11:43:39
| 132,962,490
| 6
| 3
| null | 2020-04-20T23:15:25
| 2018-05-10T22:55:01
|
C++
|
UTF-8
|
Python
| false
| false
| 1,802
|
py
|
# https://leetcode.com/problems/making-a-large-island/
from collections import defaultdict
class Solution:
def find_island(self, grid, si, sj, seen):
points = set()
if (si, sj) in seen:
return points
if si < 0 or sj < 0:
return points
if si >= len(grid) or sj >= len(grid):
return points
if grid[si][sj] == 0:
return points
seen.add((si, sj))
points.add((si, sj))
points.update(self.find_island(grid, si + 1, sj, seen))
points.update(self.find_island(grid, si - 1, sj, seen))
points.update(self.find_island(grid, si, sj + 1, seen))
points.update(self.find_island(grid, si, sj - 1, seen))
return points
def largestIsland(self, grid: list[list[int]]) -> int:
largest = 0
seen = set()
islands = defaultdict(lambda: set())
for i in range(len(grid)):
for j in range(len(grid)):
island = self.find_island(grid, i, j, seen)
largest = max(largest, len(island))
for si, sj in island:
islands[(si, sj)] = island
for i in range(len(grid)):
for j in range(len(grid)):
if grid[i][j] == 1:
continue
flipped = set().union(
islands[(i + 1, j)],
islands[(i - 1, j)],
islands[(i, j + 1)],
islands[(i, j - 1)],
)
largest = max(largest, len(flipped) + 1)
return largest
# Tests
solver = Solution()
assert solver.largestIsland([[1, 0], [0, 1]]) == 3
assert solver.largestIsland([[1, 1], [1, 0]]) == 4
assert solver.largestIsland([[1, 1], [1, 1]]) == 4
|
[
"joaodiasconde@gmail.com"
] |
joaodiasconde@gmail.com
|
b1bf4084761434d71fa9e9b667adbfd207cd9ffd
|
51108a50ffb48ad154f587c230045bb783f22240
|
/bfgame/attacks/melee.py
|
d8a34595ca3b6e66cc6ad08e789a13ab8b479c5a
|
[
"MIT"
] |
permissive
|
ChrisLR/BasicDungeonRL
|
c90bd0866c457557cccbad24e14689d5d6db7b00
|
b293d40bd9a0d3b7aec41b5e1d58441165997ff1
|
refs/heads/master
| 2021-06-15T13:56:53.888646
| 2019-08-05T16:33:57
| 2019-08-05T16:33:57
| 104,269,987
| 3
| 0
|
MIT
| 2019-08-05T16:28:23
| 2017-09-20T21:35:19
|
Python
|
UTF-8
|
Python
| false
| false
| 1,899
|
py
|
import inspect
from bflib import dice
from core.attacks.base import Attack
class MeleeAttack(Attack):
base_attack = None
needs_weapon = False
@classmethod
def make_melee_hit_roll(cls, attacker, defender, sneak_attack=False):
target_ac = defender.combat.armor_class
if target_ac is None:
target_ac = 0
modifier = 0
modifier += attacker.combat.attack_bonus
modifier += attacker.stats.strength_modifier if attacker.stats else 0
# TODO If attacker is behind defender, +2 to hit roll
# TODO If attacker invisible, +4
# TODO If defender invisible, -4
# TODO If defender is pinned, +
if sneak_attack:
modifier += 4
if not defender.health.conscious:
modifier += 8
roll = dice.D20.manual_roll_total(1)
if roll == 1:
return False
if roll == 20:
# TODO Some defenders CANNOT be hit, it should still fail.
return True
roll += modifier
if roll >= target_ac:
# TODO Some defenders CANNOT be hit, it should still fail.
return True
else:
return False
@classmethod
def make_melee_damage_roll(cls, attacker, damage_dice, other_modifier=0, sneak_attack=False):
total_damage = 0
if inspect.isclass(damage_dice):
total_damage += damage_dice.manual_roll_total(1)
else:
total_damage += damage_dice.roll_total()
total_damage += attacker.stats.strength_modifier if attacker.stats else 0
total_damage += other_modifier
if total_damage <= 0:
if sneak_attack:
return 2
else:
return 1
else:
if sneak_attack:
return total_damage * 2
else:
return total_damage
|
[
"arzhul@gmail.com"
] |
arzhul@gmail.com
|
c81c476ebf8843aad428297899a3c6fe89568a1a
|
1ebf64e6526b050bb770ac401ba21d4d44bca495
|
/Testing/demo files/veQ/marl.py
|
b62f5c8b8cf79ccf3d1a660a0ae806ec6b4e4875
|
[] |
no_license
|
sihaanssr/BE-major-MARL
|
1b928a44cc38a9319d512b0a89d767ece9747fd0
|
8427c7ffbabd0bae085bf1cf4259210b619d6f20
|
refs/heads/main
| 2023-05-05T02:49:25.179793
| 2021-05-28T13:45:12
| 2021-05-28T13:45:12
| 368,431,309
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,003
|
py
|
'''
Created on 7/10/2016
@author: CarolinaHiguera
'''
import var
exec(compile(open("./var.py", "rb").read(), "./var.py", 'exec'))
# import arrivalRateGen
# exec(compile(open("./arrivalRateGen.py", "rb").read(), "./arrivalRateGen.py", 'exec'))
# import fun
# exec(compile(open("./fun.py", "rb").read(), "./fun.py", 'exec'))
# import train2_RL
# exec(compile(open("./train2_RL.py", "rb").read(), "./train2_RL.py", 'exec'))
import test2_RL
exec(compile(open("./test2_RL.py", "rb").read(), "./test2_RL.py", 'exec'))
global baselineMean, baselineMedian, baselineMin
#=========== DISCRETIZE SPACE STATE FOR EACH AGENT
#arrivalRateGen.createPolyFlow()
#fun.learnDiscretization(var.totalDaysObs)
#fun.writeDataClusters()
#fun.plotClusterHistograms()
#=========== TRAINING PROCESS
#print('---------- Training --------------')
#train2_RL.train()
#=========== TESTING PROCESS
print('---------- Testing ---------------')
test2_RL.test()
print('----------- END -----------')
|
[
"anup20joseph@gmail.com"
] |
anup20joseph@gmail.com
|
850456b7d377656d9b88254f9b4014dc903358f5
|
6e4e6b64c035881f1cff39db616b0a80e1568c51
|
/fes2016QualA/q3.py
|
3f46237051c316ebde9c9bd3a46679ec69f95363
|
[] |
no_license
|
Lischero/Atcoder
|
f7471a85ee553e3ae791e3e5670468aea1fa53cc
|
f674d6a20a56eebdafa6d50d5d2d0f4030e5eace
|
refs/heads/master
| 2020-05-21T16:23:36.095929
| 2018-10-18T04:27:55
| 2018-10-18T04:27:55
| 60,671,810
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 317
|
py
|
# -*- coding:utf-8 -*-
s = list(input())
k = int(input())
for tmp in range(len(s)):
if k == 0:
break
a = 26-(ord(s[tmp]) - ord('a'))
if s[tmp] != 'a' and k >= a:
k -= a
s[tmp] = 'a'
else:
pass
if k > 0:
s[len(s)-1] = chr((ord(s[len(s)-1])+k%26))
print(''.join(s))
|
[
"vermouth.lischero@gmail.com"
] |
vermouth.lischero@gmail.com
|
257bc81119c88a14a85b2ca30ba61dca1c79409b
|
8c1b8ef19c55de8da1341995053769b03af433a4
|
/code/processing/growth_curves_plate_reader/20200708_r1_O1_T_beta-thujaplicin/growth_plate_reader_analysis.py
|
fa5c3b78a45ecd67ec2238eb1e7b5b006c20f759
|
[
"MIT",
"CC-BY-4.0",
"CC-BY-ND-4.0",
"CC-BY-ND-3.0"
] |
permissive
|
mrazomej/evo_mwc
|
badb78238a129cc0c863af3ca424691a188cb87b
|
b69c800c5518d906cd2c65334c6feffdbab5acf1
|
refs/heads/master
| 2023-07-20T04:13:53.025102
| 2021-05-19T01:57:59
| 2021-05-19T01:57:59
| 185,700,015
| 0
| 1
|
MIT
| 2023-07-06T21:42:28
| 2019-05-09T00:49:59
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 7,762
|
py
|
# -*- coding: utf-8 -*-
# %%
import numpy as np
import pandas as pd
import string
import os
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
import evo_mwc.viz
import evo_mwc.fitderiv
import seaborn as sns
import statsmodels.api as sm
import git
# Import libraries necessary for Bayesian analysis
import cmdstanpy
import arviz as az
# Find home directory for repo
repo = git.Repo("./", search_parent_directories=True)
homedir = repo.working_dir
# Define directory where stan file exists
standir = f"{homedir}/evo_mwc/stan_code/"
matplotlib.use('Agg')
evo_mwc.viz.pboc_style_mpl()
# Find date
workdir = os.getcwd().split('/')[-1]
DATE = int(workdir.split('_')[0])
RUN_NO = int(workdir.split('_')[1][-1])
# Define parameters to group strains by
GROUP = ['strain', 'neg_selection']
# Define if you only want to plot existing results
REPLOT = False
# %%
# Load the data.
data = pd.read_csv(f'output/{DATE}_r{RUN_NO}_growth_plate.csv')
# Generate a dictionary of the mean blank at each time point.
blank_vals = {t: val['OD600'].mean() for t, val in
data[data['strain'] == 'blank'].groupby(['time_min'])}
# Add mean blank values for each time point to the dataframe,
# as well as background subtracted OD values.
for k, v in blank_vals.items():
data.loc[data['time_min'] == k, 'blank_val'] = v
data['OD_sub'] = data['OD600'] - data['blank_val']
# %%
# Compute growth rate for individual well data
# Group data by well and strain
# NOTE: The strain grouping is to remove blanks from analysis
data_group = data.groupby(['well', 'strain'])
# List groups
groups = [group for group, data in data_group]
# Initialize data frame to save derivatives
df_gp = pd.DataFrame([])
# Check if the analysis should be done
if (not REPLOT):
print("Compiling Stan program")
sm = cmdstanpy.CmdStanModel(
stan_file=f"{standir}/gp_growth_rate_prior_deriv.stan"
)
# Loop through groups
for group, df in data_group:
# Check if the group is not a blank
if group[1] == 'blank':
continue
print(group)
# Build input as required by the Gaussian process function.
# Define time points were data was measured
t = df["time_min"].values
# Define number of time points
N = len(t)
# Define OD measurements
y = df["OD600"].values
# Define where PPC samples will be taken
t_predict = t
# Define number of points in PPC
N_predict = len(t_predict)
# Pack parameters in dictionary
data = {
"N" : N, # number of time points
"t": t, # time points where data was evaluated
"y": y, # data's optical density
"N_predict": N_predict, # number of datum in PPC
"t_predict": t_predict, # time points where PPC is evaluated
"alpha_param": [0, 1], # parameters for alpha prior
"sigma_param": [0, 1], # parameters for sigma prior
"rho_param": [1000, 1000], # parameters for rho prior
}
print(f"Sampling GP for well {group[0]}")
samples = sm.sample(
data=data,
chains=6,
iter_sampling=400,
show_progress=False,
)
print("Done!")
samples = az.from_cmdstanpy(posterior=samples)
# Extract GP OD data, stacking together chains and draws as a single
# dimension
data_ppc = samples.posterior["y_predict"].stack(
{"sample": ("chain", "draw")}
).transpose("sample", "y_predict_dim_0")
# Append inferred OD columns
df = df.assign(
gp_OD600 = np.median(data_ppc.squeeze().values, axis=0),
gp_OD600_std = np.std(data_ppc.squeeze().values, axis=0),
)
# Extract GP derivative data, stacking together chains and draws as a
# single dimension
data_ppc = samples.posterior["dy_predict"].stack(
{"sample": ("chain", "draw")}
).transpose("sample", "dy_predict_dim_0")
# Append inferred derivative columns
df = df.assign(
gp_growth_rate = np.median(data_ppc.squeeze().values, axis=0),
gp_growth_rate_std = np.std(data_ppc.squeeze().values, axis=0),
)
# Extract GP doubling time data, stacking together chains and draws as a
# single dimension
data_ppc = samples.posterior["doubling_time"].stack(
{"sample": ("chain", "draw")}
).transpose("sample", "doubling_time_dim_0")
# Append inferred derivative columns
df = df.assign(
gp_doubling_time = np.median(data_ppc.squeeze().values, axis=0),
gp_doubling_time_std = np.std(data_ppc.squeeze().values, axis=0),
)
# Append dataframe
df_gp = pd.concat([df_gp, df], ignore_index=True)
# Export result
df_gp.to_csv(f'output/{DATE}_r{RUN_NO}_gp_per_well.csv',
index=False)
# Read derivatives
df_gp = pd.read_csv(f'output/{DATE}_r{RUN_NO}_gp_per_well.csv')
# group derivatives
df_gp_group = df_gp.groupby(['well', 'strain'])
# Print growth curve and its derivative for each group
# Initialize multi-page PDF
with PdfPages('output/growth_rate_per_well.pdf') as pdf:
# Loop through groups
for group in groups:
# check that there are no blanks
if group[1] == 'blank':
continue
# Initialize figure
fig, ax = plt.subplots(2, 1, figsize=(4, 4), sharex=True)
# Extract curve data
growth_data = data_group.get_group(group)
rate_data = df_gp_group.get_group(group)
# Plot plate reade data
ax[0].plot(growth_data.time_min, growth_data.OD600, lw=0,
marker='.')
# Plot growth rate with credible region
ax[1].plot(rate_data.time_min, rate_data.gp_growth_rate)
ax[1].fill_between(rate_data.time_min,
rate_data.gp_growth_rate +
rate_data.gp_growth_rate_std,
rate_data.gp_growth_rate -
rate_data.gp_growth_rate_std,
alpha=0.5)
# Label plot
ax[0].set_title(str(group))
ax[0].set_ylabel(r'OD$_{600}$')
ax[1].set_ylabel(r'growth rate (min$^{-1}$)')
ax[1].set_xlabel('time (min)')
plt.tight_layout()
pdf.savefig()
plt.close()
# Make summary figure of growth rates.
# find number of rows and columns from layout
layout = pd.read_excel(f'./{DATE}_plate_layout.xlsx', sheet_name='well',
header=None).values
layout_shape = layout.shape
# Initlaize plot
fig, ax = plt.subplots(
layout_shape[0],
layout_shape[1],
figsize=(8, 4),
sharex=True,
sharey=True
)
# Loop through each well
for group, df in df_gp_group:
# Find corresponding row and column of plot
r, c = [int(x) for x in np.where(layout == group[0])]
# Set plot axis
# Plot growth rate
ax[r][c].plot(df.sort_values('time_min').time_min,
df.sort_values('time_min').gp_growth_rate)
# Set ylim for plot
ax[0][0].set_ylim([
df.gp_growth_rate.min() - 0.001,
df.gp_growth_rate.max() + 0.001
])
# Remove axis from all plots
ax = ax.ravel() # ravel list of axis
# Loop through axis
for a in ax:
a.get_xaxis().set_visible(False)
a.get_yaxis().set_visible(False)
fig.suptitle(f'{DATE}_r{RUN_NO} whole plate growth rates', y=0.95)
plt.savefig(f'output/growth_rate_summary.png',
bbox_inches='tight')
|
[
"manuel.razo.m@gmail.com"
] |
manuel.razo.m@gmail.com
|
85bd3c963b228d902b75d1155a4d7c4abe708fdd
|
586383ed657389cc67ca6c822b3ebd7e91e4d5a9
|
/app_page_cap_img/models.py
|
38ed270aeb636e415f69df0ba512aa59a72cbf83
|
[] |
no_license
|
idelfrides/app_capturepage_django
|
d510e824ca57e598ec7c8bcc2e9e7c7fa04099f6
|
6ad6d87e76deb6075195ee2117c0974a6b480b5f
|
refs/heads/master
| 2022-06-14T17:44:15.945803
| 2022-06-07T20:30:18
| 2022-06-07T20:30:18
| 225,614,573
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,651
|
py
|
from distutils.command.config import config
from django.db import models
from django.conf import settings
from .managers import Manager
POSITION_CHOICES = (
('E', 'Esquerda'),
('D', 'Direita'),
('C', 'Centralizado'),
)
TYPE_MIDEA_CHOICES = (
('I', 'Imagem'),
('V', 'Vídeo')
)
class PageCapImage(models.Model):
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
default=1,
on_delete=models.CASCADE
)
material = models.CharField(
max_length=100,
default='E-book vendas online'
)
headline = models.TextField(
default='Coloque sua Headline aqui.'
)
copy_descricao = models.TextField(
default='Sua Copy descrição aqui.'
)
image = models.ImageField(
upload_to='images/',
null=True,
blank=True
)
update = models.DateTimeField(
auto_now=True,
auto_now_add=False
)
timestamp = models.DateTimeField(
auto_now=False,
auto_now_add=True
)
def __str__(self):
return self.material
class Meta:
verbose_name_plural = 'Material'
class Configuracao(models.Model):
tipo_media = models.CharField(
choices=TYPE_MIDEA_CHOICES,
default='Imagem',
max_length=20
)
media_position = models.CharField(
choices=POSITION_CHOICES,
default='Esquerda',
max_length=20
)
update = models.DateTimeField(
auto_now=True,
auto_now_add=False
)
timestamp = models.DateTimeField(
auto_now=False,
auto_now_add=True
)
def __str__(self):
config_ = "Configurações"
return config_
class Meta:
verbose_name_plural = 'Configuracoes'
class Media(models.Model):
imagem = models.ImageField(upload_to='images/')
video = models.FileField(
upload_to='videos/',
null=True,
blank=True
)
arquivo_pdf = models.FileField(upload_to='files/')
update = models.DateTimeField(auto_now=True, auto_now_add=False)
timestamp = models.DateTimeField(auto_now=False, auto_now_add=True)
def __str__(self):
# man = Manager()
# c = man.set_count(1)
nome = "Media" # + str(self.count)
return nome
class Meta:
verbose_name_plural = 'Medias'
# def get_absolute_url(self):
# return "app_name/%s/" %(self.id)
class LeadsEmail(models.Model):
email = models.EmailField(
default='idelfridesjorgepapai@gmail.com'
)
timestamp = models.DateTimeField(auto_now=True)
def __str__(self):
return self.email
|
[
"idelfridesjorgepapai@gmail.com"
] |
idelfridesjorgepapai@gmail.com
|
07bb340293a0865e794ea8be4102ebb9ec0411d2
|
b1e785280635716d50d68d628d0d76b20dc4c386
|
/game_tracker/wsgi.py
|
3cf1c1ee788b8014eb824d61ad71b6c4b652404d
|
[] |
no_license
|
CoreyWilson319/game_tracker
|
17f684c59a466bcbc47a3940a434bd1cbba78c3b
|
e1f8962159f87d603bb0d928633876509ce76bdd
|
refs/heads/main
| 2023-02-21T13:27:44.377667
| 2021-01-27T14:17:04
| 2021-01-27T14:17:04
| 331,335,068
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 401
|
py
|
"""
WSGI config for game_tracker project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'game_tracker.settings')
application = get_wsgi_application()
|
[
"you@example.com"
] |
you@example.com
|
71fe7e8212514b76527330fe88222c93e5297c78
|
157d2a2f4031c58e5504bcbac5348ff53883facc
|
/rDj48/enroll/forms.py
|
9b2ec9d69b4dcf64f0fc4ebc028105648ccddbd1
|
[] |
no_license
|
optirg-39/Django_gekSh
|
d78b635fd3ee88addd084b68ec35c6284adfb55c
|
1129a6df35c110dfeeeaaf1a76b2ebc192a5f1ce
|
refs/heads/master
| 2023-04-15T13:09:03.067099
| 2021-04-26T12:15:35
| 2021-04-26T12:15:35
| 352,018,795
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 206
|
py
|
from django import forms
from .models import User
from django.forms import ModelForm
class UserForm(forms.ModelForm):
class Meta:
model=User
fields=['name','email','password']
|
[
"opti39rg@gmail.com"
] |
opti39rg@gmail.com
|
750ef2857f71cdbfb166b0d44ab0fb803c25890c
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_2/232.py
|
75a478a83dc3509f3ffb15597d23d5c54bbb573b
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,768
|
py
|
#!/usr/bin/env python
"train timetable"
import sys
class Event(dict):
"Event"
LOC_A, LOC_B = "A", "B"
TYP_DEP, TYP_ARR = '1departure', '0arrival'
def __init__(self, time, orig, type, turnaround):
super(Event, self).__init__()
self['time'] = self.parse_time(time)
if type == Event.TYP_ARR:
self['time'] += turnaround
self['orig'] = orig
self['dest'] = self.other_location(orig)
self['type'] = type
def other_location(self, loc):
if loc == Event.LOC_A:
return Event.LOC_B
return Event.LOC_A
def parse_time(self, time):
hours, mins = time.strip().split(':')
hours, mins = int(hours), int(mins)
return hours * 60 + mins
@staticmethod
def cmp(ev_a, ev_b):
if ev_a['time'] == ev_b['time']:
return cmp(ev_a['type'], ev_b['type'])
return cmp(ev_a['time'], ev_b['time'])
def read_input(finp):
N = int(finp.readline())
for n in xrange(N):
T = int(finp.readline())
NA, NB = finp.readline().strip().split()
NA, NB = int(NA), int(NB)
events = []
for na in xrange(NA):
departure, arrival = finp.readline().strip().split()
events.append(Event(departure, Event.LOC_A, Event.TYP_DEP, T))
events.append(Event(arrival, Event.LOC_A, Event.TYP_ARR, T))
for nb in xrange(NB):
departure, arrival = finp.readline().strip().split()
events.append(Event(departure, Event.LOC_B, Event.TYP_DEP, T))
events.append(Event(arrival, Event.LOC_B, Event.TYP_ARR, T))
if False: print n, na, nb
events.sort(cmp=Event.cmp)
#from com.moveki import progbase
#progbase.yaml_dump('-', events)
needed_in = {
Event.LOC_A : 0,
Event.LOC_B : 0,
}
max_needed_in = {
Event.LOC_A : 0,
Event.LOC_B : 0,
}
for e in events:
if e['type'] == Event.TYP_ARR:
needed_in[e['dest']] -= 1
elif e['type'] == Event.TYP_DEP:
needed_in[e['orig']] += 1
if needed_in[e['orig']] > max_needed_in[e['orig']]:
max_needed_in[e['orig']] = needed_in[e['orig']]
#print "-------------"
#progbase.yaml_dump('-', e)
#progbase.yaml_dump('-', needed_in)
else:
raise RuntimeError("oops")
max_needed_in['ncase'] = n + 1
print "Case #%(ncase)d: %(A)d %(B)d" % (max_needed_in)
#progbase.yaml_dump('-', max_needed_in)
def main():
read_input(sys.stdin)
if __name__ == "__main__":
main()
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
14d1fcc9d5916920ca2b2c816f8c4fd4d335dcf8
|
ed44c039862c6bde4c790c29f49d4e1012ae04ff
|
/sep11/venv/bin/rst2xml.py
|
e63dbdf9839a7860856a699d0d912f53ddf6e6f3
|
[] |
no_license
|
ravijaya/sep13
|
983bc2fc62a03c607478400dbdf9f91acc028b5d
|
fca95700ec9e3b56fc99621396d72ae411b3be92
|
refs/heads/master
| 2022-09-19T05:04:29.422670
| 2019-09-13T13:17:21
| 2019-09-13T13:17:21
| 208,267,991
| 0
| 0
| null | 2022-09-13T23:02:52
| 2019-09-13T13:15:15
|
Python
|
UTF-8
|
Python
| false
| false
| 646
|
py
|
#!/home/ravijaya/Trainings/Python-Devops/sep11/venv/bin/python
# $Id: rst2xml.py 4564 2006-05-21 20:44:42Z wiemann $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
A minimal front end to the Docutils Publisher, producing Docutils XML.
"""
try:
import locale
locale.setlocale(locale.LC_ALL, '')
except:
pass
from docutils.core import publish_cmdline, default_description
description = ('Generates Docutils-native XML from standalone '
'reStructuredText sources. ' + default_description)
publish_cmdline(writer_name='xml', description=description)
|
[
"ravijaya@localhost.localdomain"
] |
ravijaya@localhost.localdomain
|
65d8dad340c685fb2a9eb0d09bd3e8560bf36bc5
|
fd02e8924ba325f2a62bbf97e460740a65559c74
|
/PythonStart/Blackhorse/HM_Class/384封装案例-需求分析01.py
|
b7dc1e8c2a808237cebcf1689430d8d72663d433
|
[] |
no_license
|
ShiJingChao/Python-
|
51ee62f7f39e0d570bdd853794c028020ca2dbc2
|
26bc75c1981a1ffe1b554068c3d78455392cc7b2
|
refs/heads/master
| 2020-07-08T00:05:16.532383
| 2019-10-14T15:19:49
| 2019-10-14T15:19:49
| 203,512,684
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,553
|
py
|
# CLASS 384——385-386面向对象封装案例
# 1.封装是面向对象编程的一大特点
# 2.面向对象编程的第一个步——将属性和方法封装到一个抽象的类中
# 3.外界使用类创建对象,然后让对象调用方法
# 4.对象方法的细节都被封装在类的内部
# 一个对象的属性可以是另一个类创建的对象
# 01 士兵突击
class Gun:
def __init__(self, model):
# 1.枪的型号
self.model = model
# 2.子弹的数量
self.bullet_count = 0
def add_bullet(self, count):
self.bullet_count += count
def shoot(self):
if self.bullet_count <= 0:
print("%s没有子弹,请加子弹" % self.model)
self.bullet_count -= 1
print("%s哒哒哒,剩余子弹%d" % (self.model, self.bullet_count))
class Soldier():
def __init__(self, name):
self.name = name
self.gun = None
# 1.创建枪对象
ak47 = Gun("AK47")
ak47.add_bullet(50)
ak47.shoot()
tuoni = Soldier("托尼")
tuoni.gun = ak47
print(tuoni.gun)
# 386——创建初始化方法
# 开发士兵类
# 假设每一个新兵都没有枪
# 定义没有初始值的属性
# 在定义属性时,如果不知道设置什么初始值,可以设置为None
# None关键字表示什么都没有
# 可以表示一个空对象,没有方法和属性,是一个特殊的常量
# 可以将None赋值给任意一个变量
# fire 方法需求
# 1.判断是否有枪,没有枪没办法冲锋
# 2.喊一声口号
# 3.填装子弹
# 4.射击
|
[
"1015174363@qq.com"
] |
1015174363@qq.com
|
ae52c62a6fcd0663134cd4a812fc27643c009470
|
2a120a15321101c09fbc2016f28f49662a64a692
|
/Codes/AdS/First try/pyeq2/ExtendedVersionHandlers/ExtendedVersionHandler_Offset.py
|
1a22f5514e1488978c538a26039acc3d71bd853e
|
[
"BSD-2-Clause"
] |
permissive
|
afarahi/QFT
|
ba7abd1f3520faa31a521d1db4ce313e684e478e
|
d8e676b8e80033b69d7df99f5ed2897273a8055f
|
refs/heads/master
| 2021-01-16T18:21:11.210549
| 2012-09-16T11:54:05
| 2012-09-16T11:54:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,885
|
py
|
# pyeq2 is a collection of equations expressed as Python classes
#
# Copyright (C) 2012 James R. Phillips
# 2548 Vera Cruz Drive
# Birmingham, AL 35235 USA
#
# email: zunzun@zunzun.com
# web: http://zunzun.com
#
# License: BSD-style (see LICENSE.txt in main source directory)
# Version info: $Id: ExtendedVersionHandler_Offset.py 21 2012-03-10 19:48:51Z zunzun.com@gmail.com $
import pyeq2
import IExtendedVersionHandler
class ExtendedVersionHandler_Offset(IExtendedVersionHandler.IExtendedVersionHandler):
def AssembleDisplayHTML(self, inModel):
return inModel._HTML + " + Offset"
def AssembleDisplayName(self, inModel):
return inModel._baseName + " With Offset"
def AssembleSourceCodeName(self, inModel):
return inModel.__class__.__name__ + "_Offset"
def AssembleCoefficientDesignators(self, inModel):
return inModel._coefficientDesignators + ['Offset']
# overridden from abstract parent class
def AppendAdditionalCoefficientBounds(self, inModel):
if inModel.upperCoefficientBounds != []:
inModel.upperCoefficientBounds.append(None)
if inModel.lowerCoefficientBounds != []:
inModel.lowerCoefficientBounds.append(None)
def AssembleOutputSourceCodeCPP(self, inModel):
return inModel.SpecificCodeCPP() + "\ttemp += Offset;\n"
# overridden from abstract parent class
def GetAdditionalDataCacheFunctions(self, inModel, inDataCacheFunctions):
return inDataCacheFunctions
def GetAdditionalModelPredictions(self, inBaseModelCalculation, inCoeffs, inDataCacheDictionary, inModel):
return self.ConvertInfAndNanToLargeNumber(inBaseModelCalculation + inCoeffs[len(inCoeffs)-1])
# overridden from abstract parent class
def CanLinearSolverBeUsedForSSQABS(self, inModelFlag):
return False
|
[
"aryaf66@gmail.com"
] |
aryaf66@gmail.com
|
724be9c7d7e5d959d5d756efdf52aa7275c3b593
|
c7f0bef042fe7ec5636c4c68db0828dcac4d7a68
|
/nbresuse/handlers.py
|
2df514302af8cf90146d678ee6817cf777563e4b
|
[
"BSD-2-Clause"
] |
permissive
|
zzhangjii/nbresuse
|
93ca3cdac06f57b13954d2a8d149e73d287bdca7
|
0daa6ec97667959d24dd89f3beeecd462afd51db
|
refs/heads/master
| 2021-01-21T14:16:56.642914
| 2017-06-24T01:01:20
| 2017-06-24T01:01:20
| 95,262,522
| 0
| 0
| null | 2017-06-23T23:06:46
| 2017-06-23T23:06:46
| null |
UTF-8
|
Python
| false
| false
| 724
|
py
|
import os
import json
import psutil
from notebook.utils import url_path_join
from notebook.base.handlers import IPythonHandler
def get_metrics():
cur_process = psutil.Process()
all_processes = [cur_process] + cur_process.children(recursive=True)
rss = sum([p.memory_info().rss for p in all_processes])
return {
'rss': rss,
'limits': {
'memory': int(os.environ.get('MEM_LIMIT', None))
}
}
class MetricsHandler(IPythonHandler):
def get(self):
self.finish(json.dumps(get_metrics()))
def setup_handlers(web_app):
route_pattern = url_path_join(web_app.settings['base_url'], '/metrics')
web_app.add_handlers('.*', [(route_pattern, MetricsHandler)])
|
[
"yuvipanda@gmail.com"
] |
yuvipanda@gmail.com
|
83accf7861f2d63d8f5fd0191a3bfd0fcf4c1c7e
|
19c8d82a713ab69f4f4163a00b5a5519148116db
|
/python/psutil/examples/meminfo.py
|
f1bbbd8ee2df0f34ca4253f907407b8a604d98fe
|
[
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] |
permissive
|
humphd/mozilla-central
|
6b758084c5a7afe2b2f81934ce26aadbe536a504
|
1acf9935d4409757e6970a0ecba6c0736dff4782
|
refs/heads/master
| 2021-01-17T10:44:32.739863
| 2013-01-13T14:39:32
| 2013-01-13T14:39:32
| 1,617,367
| 6
| 27
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 806
|
py
|
#!/usr/bin/env python
#
# $Id: meminfo.py 1509 2012-08-13 12:31:18Z g.rodola $
#
# Copyright (c) 2009, Jay Loden, Giampaolo Rodola'. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Print system memory information.
"""
import psutil
from psutil._compat import print_
def to_meg(n):
return str(int(n / 1024 / 1024)) + "M"
def pprint_ntuple(nt):
for name in nt._fields:
value = getattr(nt, name)
if name != 'percent':
value = to_meg(value)
print_('%-10s : %7s' % (name.capitalize(), value))
def main():
print_('MEMORY\n------')
pprint_ntuple(psutil.virtual_memory())
print_('\nSWAP\n----')
pprint_ntuple(psutil.swap_memory())
if __name__ == '__main__':
main()
|
[
"gps@mozilla.com"
] |
gps@mozilla.com
|
03f459a285c75fba42e137c8229e79543bb0b0eb
|
ad59fb12042bfd3f5c43eca057d0f747f9e148cf
|
/StreamLink/usr/lib/python2.7/site-packages/streamlink/plugins/abweb.py
|
87094ccda556b88c13bf0a2c31b13caa6650d815
|
[] |
no_license
|
lexlong2007/eePlugins
|
d62b787100a7069ad5713a47c5688008063b45ec
|
167b262fe36901a2d3a2fae6d0f85e2307b3eff7
|
refs/heads/master
| 2022-03-09T05:37:37.567937
| 2022-02-27T01:44:25
| 2022-02-27T01:44:25
| 253,012,126
| 0
| 0
| null | 2020-04-04T14:03:29
| 2020-04-04T14:03:29
| null |
UTF-8
|
Python
| false
| false
| 4,983
|
py
|
import logging
import re
from streamlink.plugin import Plugin, PluginArgument, PluginArguments, PluginError, pluginmatcher
from streamlink.plugin.api.utils import itertags
from streamlink.stream import HLSStream
from streamlink.utils import update_scheme
from streamlink.utils.url import url_concat
log = logging.getLogger(__name__)
@pluginmatcher(re.compile(
r"https?://(?:www\.)?abweb\.com/BIS-TV-Online/bistvo-tele-universal\.aspx",
re.IGNORECASE
))
class ABweb(Plugin):
url_l = 'https://www.abweb.com/BIS-TV-Online/identification.aspx?ReturnUrl=%2fBIS-TV-Online%2fbistvo-tele-universal.aspx'
_hls_re = re.compile(r'''["']file["']:\s?["'](?P<url>[^"']+\.m3u8[^"']+)["']''')
arguments = PluginArguments(
PluginArgument(
"username",
requires=["password"],
sensitive=True,
metavar="USERNAME",
help="""
The username associated with your ABweb account, required to access any
ABweb stream.
""",
prompt="Enter ABweb username"
),
PluginArgument(
"password",
sensitive=True,
metavar="PASSWORD",
help="A ABweb account password to use with --abweb-username.",
prompt="Enter ABweb password"
),
PluginArgument(
"purge-credentials",
action="store_true",
help="""
Purge cached ABweb credentials to initiate a new session and
reauthenticate.
""")
)
def __init__(self, url):
super(ABweb, self).__init__(url)
self._authed = (self.session.http.cookies.get('ASP.NET_SessionId', domain='.abweb.com')
and self.session.http.cookies.get('.abportail1', domain='.abweb.com'))
def _login(self, username, password):
log.debug('Attempting to login.')
data = {}
for i in itertags(self.session.http.get(self.url_l).text, 'input'):
data[i.attributes.get('name')] = i.attributes.get('value', '')
if not data:
raise PluginError('Missing input data on login website.')
data.update({
'ctl00$ContentPlaceHolder1$Login1$UserName': username,
'ctl00$ContentPlaceHolder1$Login1$Password': password,
'ctl00$ContentPlaceHolder1$Login1$LoginButton.x': '0',
'ctl00$ContentPlaceHolder1$Login1$LoginButton.y': '0',
'ctl00$ContentPlaceHolder1$Login1$RememberMe': 'on',
})
self.session.http.post(self.url_l, data=data)
if (self.session.http.cookies.get('ASP.NET_SessionId') and self.session.http.cookies.get('.abportail1')):
for cookie in self.session.http.cookies:
# remove www from cookie domain
cookie.domain = '.abweb.com'
self.save_cookies(default_expires=3600 * 24)
return True
else:
log.error('Failed to login, check your username/password')
return False
def _get_streams(self):
self.session.http.headers.update({
'Referer': 'http://www.abweb.com/BIS-TV-Online/bistvo-tele-universal.aspx'
})
login_username = self.get_option('username')
login_password = self.get_option('password')
if self.options.get('purge_credentials'):
self.clear_cookies()
self._authed = False
log.info('All credentials were successfully removed.')
if self._authed:
log.info('Attempting to authenticate using cached cookies')
elif not self._authed and not (login_username and login_password):
log.error('A login for ABweb is required, use --abweb-username USERNAME --abweb-password PASSWORD')
return
elif not self._authed and not self._login(login_username, login_password):
return
log.debug('get iframe_url')
res = self.session.http.get(self.url)
for iframe in itertags(res.text, 'iframe'):
iframe_url = iframe.attributes.get('src')
if iframe_url.startswith('/'):
iframe_url = url_concat('https://www.abweb.com', iframe_url)
else:
iframe_url = update_scheme('https://', iframe_url)
log.debug('iframe_url={0}'.format(iframe_url))
break
else:
raise PluginError('No iframe_url found.')
self.session.http.headers.update({'Referer': iframe_url})
res = self.session.http.get(iframe_url)
m = self._hls_re.search(res.text)
if not m:
raise PluginError('No hls_url found.')
hls_url = update_scheme('https://', m.group('url'))
streams = HLSStream.parse_variant_playlist(self.session, hls_url)
if streams:
for stream in streams.items():
yield stream
else:
yield 'live', HLSStream(self.session, hls_url)
__plugin__ = ABweb
|
[
"zdzislaw22@windowslive.com"
] |
zdzislaw22@windowslive.com
|
16ab018bbe05629c3a99ac29b6b85d081004be41
|
cb1c3c624fb2e8e65f8b80109bada6811d399205
|
/creme/optim/losses/base.py
|
f01955e2beafbab39ff9ea37379017111d10b6ab
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
2torus/creme
|
5cc830bb59ea95d7ed69ceaeed1d285f1544beae
|
bcc5e2a0155663a1f0ba779c68f23456695bcb54
|
refs/heads/master
| 2020-06-14T00:19:03.981678
| 2019-06-24T13:45:06
| 2019-06-24T13:45:06
| 194,833,808
| 4
| 0
|
NOASSERTION
| 2019-07-02T09:35:20
| 2019-07-02T09:35:19
| null |
UTF-8
|
Python
| false
| false
| 754
|
py
|
import abc
from ... import utils
class Loss(abc.ABC):
@abc.abstractmethod
def __call__(self, y_true, y_pred) -> float:
"""Returns the loss."""
@abc.abstractmethod
def gradient(self, y_true, y_pred) -> float:
"""Returns the gradient with respect to ``y_pred``."""
class ClassificationLoss(Loss):
@staticmethod
def clamp_proba(p):
return utils.clamp(p, minimum=1e-15, maximum=1 - 1e-15)
class BinaryClassificationLoss(ClassificationLoss):
"""A loss appropriate binary classification tasks."""
class MultiClassificationLoss(ClassificationLoss):
"""A loss appropriate for multi-class classification tasks."""
class RegressionLoss(Loss):
"""A loss appropriate for regression tasks."""
|
[
"maxhalford25@gmail.com"
] |
maxhalford25@gmail.com
|
74afd6f1f7551b0108ee82bdb3d16190f8713a30
|
afbaa5685bf737ec7d16fee2bab54ae13caf96f9
|
/geekbang/core/07.py
|
cc9651a2933ba0aaaf2541bf66dba49a05132e46
|
[] |
no_license
|
ykdsg/myPython
|
9dcc9afe6f595e51b72257875d66ada1ba04bba6
|
77d2eaa2acb172664b632cc2720cef62dff8f235
|
refs/heads/master
| 2023-06-10T20:11:08.061075
| 2023-06-03T11:39:53
| 2023-06-03T11:39:53
| 10,655,956
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 870
|
py
|
d = {'name': 'jason', 'dob': '2000-01-01', 'gender': 'male'}
# 迭代的是键
for k in d:
print("key:" + k)
for v in d.values():
print("value:", v)
for it in d.items():
print("item:", it)
l = [1, 2, 3, 4, 5, 6, 7]
for index in range(0, len(l)):
if index < 5:
print(l[index])
text = ' Today, is, Sunday'
text_list = [s.strip() for s in text.split(",") if len(s.strip()) > 3]
print(text_list)
attributes = ['name', 'dob', 'gender']
values = [['jason', '2000-01-01', 'male'],
['mike', '1999-01-01', 'male'],
['nancy', '2001-02-01', 'female']
]
queen_list = []
for va in values:
dic = {}
for i in range(0, len(attributes)):
dic[attributes[i]] = va[i]
queen_list.append(dic)
print(queen_list)
result2 = [{attributes[i]: va[i] for i in range(len(attributes))} for va in values]
print(result2)
|
[
"17173as@163.com"
] |
17173as@163.com
|
a82a9f43ecaaedd29851c1774f84ce30246134ad
|
6c92af1a600a707232f36dbb17d3944b95b1206c
|
/src/collective/caching/hostname/Extensions/Install.py
|
bc37f6e28fdb9cc8d3c2ffe43e866e25c849fd10
|
[] |
no_license
|
simplesconsultoria/collective.caching.hostname
|
f1a8c33fa7b720086c98f4c6311ad7974902ddcf
|
71cb740abfcfcecb8155fb336a0e0a30907b5502
|
refs/heads/master
| 2021-01-22T01:54:35.904018
| 2012-02-23T18:39:51
| 2012-02-23T18:39:51
| 3,528,231
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 389
|
py
|
# -*- coding:utf-8 -*-
from Products.CMFCore.utils import getToolByName
def uninstall(portal, reinstall=False):
if not reinstall:
# normal uninstall
setup_tool = getToolByName(portal, 'portal_setup')
profile = 'profile-collective.caching.hostname:uninstall'
setup_tool.runAllImportStepsFromProfile(profile)
return "Ran all uninstall steps."
|
[
"erico@simplesconsultoria.com.br"
] |
erico@simplesconsultoria.com.br
|
521a53b5d74cabc70ce1ca76fa0a85326ae842c0
|
8db5ef7e8355c1beff7b55313b3aa57737b56df6
|
/guidez/local_settings_safe.py
|
1fa3c50654e99d4310cd0d3b4780f3d525511c79
|
[] |
no_license
|
Seredyak1/guidez
|
ec8748df9e45131119b18ecaccf9bc8afb4489d3
|
88ff2eb0b0d2b9a2e736712f0ff049d6c2108107
|
refs/heads/master
| 2022-12-10T04:16:56.158178
| 2020-04-14T06:58:16
| 2020-04-14T06:58:16
| 208,821,905
| 0
| 0
| null | 2022-12-08T03:13:30
| 2019-09-16T14:34:11
|
Python
|
UTF-8
|
Python
| false
| false
| 822
|
py
|
DEBUG = True
SITE = ""
SWAGGER_SETTINGS = {
"SUPPORTED_SUBMIT_METHOD": ['get', 'post', 'put', 'delete', ],
'USE_SESSION_AUTH': False,
'JSON_EDITOR': True,
'SECURITY_DEFINITIONS': {
'api_key': {
'type': 'apiKey',
'description': 'Personal API Key authorization',
'name': 'Authorization',
'in': 'header',
}
},
'APIS_SORTER': 'alpha',
"SHOW_REQUEST_HEADERS": True,
"VALIDATOR_URL": None
}
DATABASES = {
'default': {
'ENGINE': '',
'NAME': '',
'USER': '',
'PASSWORD': '',
'HOST': '',
'PORT': '',
}
}
ALLOWED_HOSTS = ['*']
# WORKED EMAIL CONFIGURATION
EMAIL_BACKEND = ''
EMAIL_USE_TLS = True
EMAIL_HOST = ''
EMAIL_HOST_USER = ''
EMAIL_HOST_PASSWORD = ''
EMAIL_PORT = 587
|
[
"sanya.seredyak@gmail.com"
] |
sanya.seredyak@gmail.com
|
383eb73d0bbe59aa068c39befe436da753716ea1
|
2638a861e7ac0b37361348babc18212176cb75cb
|
/solvers/results.py
|
66e6fa60532a809fa4f5b9d80f6f8b2b58431eff
|
[
"Apache-2.0"
] |
permissive
|
jcarpent/osqp_benchmarks
|
64de68f111d464810983d2f4ea31962b8646b041
|
787f46e73ce22bcdc9055a4fea56fc812a7d6e5f
|
refs/heads/master
| 2020-04-18T11:50:55.026557
| 2019-01-31T13:09:21
| 2019-01-31T13:09:21
| 167,514,778
| 0
| 0
|
Apache-2.0
| 2019-01-25T08:43:05
| 2019-01-25T08:43:04
| null |
UTF-8
|
Python
| false
| false
| 296
|
py
|
class Results(object):
'''
Results class from QP solution
'''
def __init__(self, status, obj_val, x, y, run_time, niter):
self.status = status
self.obj_val = obj_val
self.x = x
self.y = y
self.run_time = run_time
self.niter = niter
|
[
"bartolomeo.stellato@gmail.com"
] |
bartolomeo.stellato@gmail.com
|
ccd57a1a2366440d8df6ef67e648d00666d96a92
|
ac549e553263801bdc6962a10ebbe784dc2631df
|
/Python/tree/tree.py
|
cf633f53da24c08f988ca5e6d17f85ca37ac644a
|
[] |
no_license
|
Bishal44/DataStructure
|
e595890d18bde39e65f02a7ca3a6904c6070c3c8
|
939c47de6dcfe3b2578aaa0610d3cdc5726572c7
|
refs/heads/master
| 2020-09-10T22:40:46.368607
| 2020-03-28T12:15:08
| 2020-03-28T12:15:08
| 221,854,694
| 0
| 0
| null | 2019-12-10T15:47:45
| 2019-11-15T05:59:40
|
Python
|
UTF-8
|
Python
| false
| false
| 189
|
py
|
"""
Datastructure_and_algorithm
Created by Bishal on 25 Mar 2020
"""
class TreeNode:
def __init__(self, val=0):
self.val = val
self.left = None
self.right = None
|
[
"bhattaraibishal704@gmail.com"
] |
bhattaraibishal704@gmail.com
|
a2573aed665d4ec84c3a3a988bbfc2e97bbc1c92
|
13d8ede6d23ed0a375bbc9310d93be035fd164e9
|
/InterviewBits/arrays/first-missing-positive.py
|
b52f188313126497bbe4e63e6fbff1cee6428e8c
|
[] |
no_license
|
iamrishap/PythonBits
|
192d3fb7bce101485eb81da2153e5b0c82b6872a
|
dcbc5f087ad78110a98e78dd6e5943ed971309c2
|
refs/heads/master
| 2022-03-10T07:16:08.601170
| 2019-11-17T04:01:00
| 2019-11-17T04:01:00
| 206,778,724
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 827
|
py
|
"""
Given an unsorted integer array, find the first missing positive integer.
Example:
Given [1,2,0] return 3,
[3,4,-1,1] return 2,
[-8, -7, -6] returns 1
Your algorithm should run in O(n) time and use constant space.
"""
class Solution:
# @param A : list of integers
# @return an integer
def firstMissingPositive(self, A):
A = list(filter(lambda x: x > 0, A))
# print(A)
A = [len(A) + 2] + A # Add the next number. This is for proper indexing (zero based).
# print(A)
for i, num in enumerate(A):
num = abs(num)
if num < len(A):
A[num] = - abs(A[num])
# print(A)
for i in range(1, len(A)):
if A[i] > 0:
return i
return len(A)
s = Solution()
s.firstMissingPositive([3, 5, 2, 1])
|
[
"rishap.sharma@iress.com"
] |
rishap.sharma@iress.com
|
382fc999a896f8ab47db64595325ce80048bb9bd
|
cbb3ff933ecd7c113eb5740d1206caf8e099c599
|
/communityapp/images/models.py
|
346e5a05ae6b3bf5fad7f6ccfbc3105a99be3026
|
[] |
no_license
|
userzeroc/communityapp
|
1cbb2a0021d124f9cfb2f9eec9e30e5c1d40f04b
|
9bc2300c18f74b03706896cf43d0d6d9bbdd0725
|
refs/heads/master
| 2021-01-15T04:55:08.625686
| 2020-02-25T01:38:03
| 2020-02-25T01:38:03
| 242,884,202
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,346
|
py
|
from django.db import models
from django.conf import settings
from django.urls import reverse
from django.core.files.base import ContentFile
from urllib import request
# Create your models here.
from django.utils.text import slugify
class Image(models.Model):
user = models.ForeignKey(settings.AUTH_USER_MODEL,
related_name='images_created',
on_delete=models.CASCADE)
title = models.CharField(max_length=200)
slug = models.SlugField(max_length=200,
blank=True)
url = models.URLField()
image = models.ImageField(upload_to='images/%Y/%m/%d/')
description = models.TextField(blank=True)
created = models.DateField(auto_now_add=True,
db_index=True)
users_like = models.ManyToManyField(settings.AUTH_USER_MODEL,
related_name='images_liked',
blank=True)
class Meta:
verbose_name_plural = '相册'
def __str__(self):
return self.title
def save(self, *args, **kwargs):
if not self.slug:
self.slug = slugify(self.title)
super(Image, self).save(*args, **kwargs)
def get_absolute_url(self):
return reverse('images:detail', args=[self.id, self.slug])
|
[
"972763474@qq.com"
] |
972763474@qq.com
|
68622fb85d1a8503ea0a50083047d2cee133d957
|
9ae6ce54bf9a2a86201961fdbd5e7b0ec913ff56
|
/google/ads/googleads/v9/enums/types/asset_set_link_status.py
|
9d699ae74e4f85fcf250f0a500f0e697bf8d3f45
|
[
"Apache-2.0"
] |
permissive
|
GerhardusM/google-ads-python
|
73b275a06e5401e6b951a6cd99af98c247e34aa3
|
676ac5fcb5bec0d9b5897f4c950049dac5647555
|
refs/heads/master
| 2022-07-06T19:05:50.932553
| 2022-06-17T20:41:17
| 2022-06-17T20:41:17
| 207,535,443
| 0
| 0
|
Apache-2.0
| 2019-09-10T10:58:55
| 2019-09-10T10:58:55
| null |
UTF-8
|
Python
| false
| false
| 1,237
|
py
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package="google.ads.googleads.v9.enums",
marshal="google.ads.googleads.v9",
manifest={"AssetSetLinkStatusEnum",},
)
class AssetSetLinkStatusEnum(proto.Message):
r"""Container for enum describing possible statuses of the
linkage between asset set and its container.
"""
class AssetSetLinkStatus(proto.Enum):
r"""The possible statuses of he linkage between asset set and its
container.
"""
UNSPECIFIED = 0
UNKNOWN = 1
ENABLED = 2
REMOVED = 3
__all__ = tuple(sorted(__protobuf__.manifest))
|
[
"noreply@github.com"
] |
GerhardusM.noreply@github.com
|
12c03e0136926e99299b7f34ac1906d3942df778
|
f0d713996eb095bcdc701f3fab0a8110b8541cbb
|
/pzXrBSiQdMqvRWazp_19.py
|
7f991dbda4e79416075e6ab120701b4d37e78023
|
[] |
no_license
|
daniel-reich/turbo-robot
|
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
|
a7a25c63097674c0a81675eed7e6b763785f1c41
|
refs/heads/main
| 2023-03-26T01:55:14.210264
| 2021-03-23T16:08:01
| 2021-03-23T16:08:01
| 350,773,815
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 991
|
py
|
"""
Imagine you run a website that presents users with different coding challenges
in levels Easy, Medium, and Hard, where users get points for completing
challenges. An _Easy_ challenge is worth `5` points, a _Medium_ challenge is
worth `10` points, and a _Hard_ challenge is worth `20` points.
Create a function that takes the amount of challenges a user has completed for
each challenge level, and calculates the user's total number of points. Keep
in mind that a user cannot complete negative challenges, so the function
should return the string `"invalid"` if any of the passed parameters are
negative.
### Examples
score_calculator(1, 2, 3) ➞ 85
score_calculator(1, 0, 10) ➞ 205
score_calculator(5, 2, -6) ➞ "invalid"
### Notes
N/A
"""
def score_calculator(easy, med, hard):
if easy < 0 or med < 0 or hard < 0:
return "invalid"
sumEasy = easy * 5
sumMed = med * 10
sumHard = hard * 20
sum = sumEasy + sumMed + sumHard
return sum
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
e969edd68f04ac0c73ce426cfef0f6feafce4b3b
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02407/s301960424.py
|
a10803ef0f9b093d880f02df430cca95ab0192af
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 232
|
py
|
import sys
n = int( sys.stdin.readline() )
nums = sys.stdin.readline().rstrip().split( " " )
nums.reverse()
output = []
for i in range( n ):
output.append( nums[i] )
if i < (n-1):
output.append( " " )
print( "".join( output ) )
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
34151eaa6872df5b6510a87dfdd997d91fd16980
|
7a35edf3f38622c94df0b0f7bdcbd1a9ae63fd3d
|
/app/slack/tests/test_slack_service.py
|
82f0afea1ee91fc9359032b39a5b3f8f71261f16
|
[] |
no_license
|
hamimhamim21/clerk
|
2d0cc846c99f6c7f80f7a0fe6ac9e0cc447dbd8f
|
e39122d37bbc938af154e5f74fa45f34e1195fa1
|
refs/heads/master
| 2022-10-01T20:25:42.795653
| 2020-06-04T02:31:08
| 2020-06-04T02:31:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,439
|
py
|
import json
import pytest
import responses
from slack.services import send_slack_message
from slack.models import SlackUser, SlackChannel, SlackMessage
@responses.activate
@pytest.mark.django_db
@pytest.mark.parametrize("slug", ["client-intake", "landing-form"])
def test_send_submission_slack(slug):
"""
Ensure send_submission_slack call Slack without anything exploding
https://github.com/getsentry/responses
"""
# Set up API response.
responses.add(
method=responses.POST, url="https://example.com", status=200, json={}
) # Not used
# Prepare database
channel = SlackChannel.objects.last()
assert channel.webhook_url == "https://example.com"
msg = SlackMessage.objects.select_related("channel").get(slug=slug)
assert msg.channel == channel
user_1 = SlackUser.objects.create(name="Alice", slack_id="1234")
user_2 = SlackUser.objects.create(name="Bob", slack_id="5678")
msg.users.add(user_1)
msg.users.add(user_2)
msg.save()
# Send the message
text = "This is a cool Slack message!"
send_slack_message(msg.slug, text)
# Check it worked!
assert len(responses.calls) == 1
body_text = responses.calls[0].request.body.decode("utf-8")
body_json = json.loads(body_text)
assert body_json["text"] == (
"Hi <@1234> and <@5678>.\n\n"
"This is a cool Slack message!\n\n"
":heart: Client Bot :robot_face:"
)
|
[
"mattdsegal@gmail.com"
] |
mattdsegal@gmail.com
|
5d76d28eb84b8d878d86d65b0351df821c2ffe28
|
67117705720a3e3d81253ba48c1826d36737b126
|
/Wk9_STRANDS/optimum_clusters_DBSCAN.py
|
f85c49fff153b79e54df7ac3e897fa742f6afa7d
|
[] |
no_license
|
pyliut/Rokos2021
|
41f0f96bc396b6e8a5e268e31a38a4a4b288c370
|
70753ab29afc45766eb502f91b65cc455e6055e1
|
refs/heads/main
| 2023-08-13T17:29:30.013829
| 2021-09-26T19:01:35
| 2021-09-26T19:01:35
| 382,092,802
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,593
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Fri Aug 27 13:24:13 2021
@author: pyliu
"""
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib import cm
import numpy as np
import math
import scipy as sp
from scipy import stats
import sklearn
from sklearn.cluster import DBSCAN
from sklearn.metrics import silhouette_score
from sklearn.metrics import calinski_harabasz_score
from sklearn.metrics import davies_bouldin_score
def optimum_clusters_DBSCAN(distance_matrix, method = "ss", min_samples = 1):
"""
Returns score for each choice of n_cluster
Parameters
----------
distance_matrix : TYPE
DESCRIPTION.
method : STR
Choose from ["ss", "ch", "db"], where "ss" uses silhouette score, "ch" uses Calinski-Harabasz index, "db" uses Davies-Bouldin index.
The default is "ss".
min_samples : INT
another tuning param for DBSCAN. The Default is 1.
Raises
------
ValueError
When method is invalid.
Can only be one of ["ss", "ch", "db"]
Returns
-------
tune_list : INT, vector
tuning param
s_list : FLOAT, vector
score for each value of tuning parameter
"""
samples = np.arange(1,min_samples+1,1)
tune = np.arange(0.03,0.99,0.01)
s_dict = {}
tune_dict = {}
for j in samples:
s_list = [] #score list
tune_list = [] #corresponding tuning params
for i in tune:
clustering = DBSCAN(eps = i, min_samples = j, metric='precomputed')
labels = clustering.fit_predict( distance_matrix )
if len(np.unique(labels)) < 2:
continue
if method == "ss":
s = silhouette_score(distance_matrix , labels, metric='euclidean')
elif method == "ch":
s = calinski_harabasz_score(distance_matrix , labels)
elif method == "db":
s = davies_bouldin_score(distance_matrix , labels)
else:
raise ValueError("Method can be one of ['ss','ch','db']")
s_list.append(s)
tune_list.append(i)
s_dict[str(j)] = s_list
tune_dict[str(j)] = tune_list
plt.plot(tune_list,s_list)
if method == "ss":
print("min_samples:",j)
print("Optimum tuning param:",np.round(tune_list[np.argmax(s_list)],4))
print("Max SS:", np.round(np.max(s_list),4))
elif method == "ch":
print("min_samples:",j)
print("Optimum tuning param:",np.round(tune_list[np.argmax(s_list)],4))
print("Max CH:", np.round(np.max(s_list),4))
elif method == "db":
print("min_samples:",j)
print("Optimum tuning param:",np.round(tune_list[np.argmin(s_list)],4))
print("Min DB:", np.round(np.min(s_list),4))
else:
raise ValueError("Method can be one of ['ss','ch','db']")
plt.xlabel("tuning param")
plt.xlim([tune_list[0], tune_list[-1]]);
plt.legend(samples)
if method == "ss":
plt.title("Silhouette Coefficient")
plt.ylabel("Silhouette coeff")
elif method == "ch":
plt.title("Calinski-Harabasz Index")
plt.ylabel("CH Index")
elif method == "db":
plt.title("Davies-Bouldin Index")
plt.ylabel("DB Index")
else:
raise ValueError("Method can be one of ['ss','ch','db']")
return tune_dict, s_dict
|
[
"noreply@github.com"
] |
pyliut.noreply@github.com
|
00c5161c9f2a576c3823118a495a77ffe2d93f6f
|
7b8105666c77c80737e2cf4e8f89e85fedda74af
|
/SBaaS_ale/stage01_ale_trajectories_postgresql_models.py
|
bc5e630a390c384ca7cf098ec10e2f921cfe104c
|
[
"MIT"
] |
permissive
|
dmccloskey/SBaaS_ale
|
0869a52889bf66a9c26b9343bdfa4cec00fe99c3
|
e225162b2f90117cfecbc0065a43382571dce95a
|
refs/heads/master
| 2021-01-13T00:16:48.458778
| 2016-02-14T01:03:31
| 2016-02-14T01:03:31
| 51,099,927
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,061
|
py
|
from SBaaS_base.postgresql_orm_base import *
class data_stage01_ale_trajectories(Base):
__tablename__ = 'data_stage01_ale_trajectories'
id = Column(Integer, Sequence('data_stage01_ale_trajectories_id_seq'), primary_key=True)
experiment_id = Column(String(50))
ale_id = Column(String(100))
ale_time=Column(Float,nullable=False);
ale_time_units=Column(String(50))
generations=Column(Float)
ccd=Column(Float) #cumulative cell divisions
rate = Column(Float)
rate_units = Column(String(50))
used_ = Column(Boolean)
comment_ = Column(Text);
__table_args__ = (UniqueConstraint('experiment_id','ale_id','ale_time','ale_time_units','rate_units'),
)
def __init__(self,data_dict_I):
self.ale_id=data_dict_I['ale_id'];
self.ale_time=data_dict_I['ale_time'];
self.ale_time_units=data_dict_I['ale_time_units'];
self.generations=data_dict_I['generations'];
self.ccd=data_dict_I['ccd'];
self.rate=data_dict_I['rate'];
self.rate_units=data_dict_I['rate_units'];
self.comment_=data_dict_I['comment_'];
self.used_=data_dict_I['used_'];
self.experiment_id=data_dict_I['experiment_id'];
def __set__row__(self,
experiment_id_I,
ale_id_I,
ale_time_I,
ale_time_units_I,
generations_I,
ccd_I,
rate_I,
rate_units_I,
used__I,
comment__I):
self.experiment_id=experiment_id_I
self.ale_id=ale_id_I
self.ale_time=ale_time_I
self.ale_time_units=ale_time_units_I
self.generations=generations_I
self.ccd=ccd_I
self.rate=rate_I
self.rate_units=rate_units_I
self.used_=used__I
self.comment_=comment__I
def __repr__dict__(self):
return {'experiment_id':self.experiment_id,
'ale_id':self.ale_id,
'ale_time':self.ale_time,
'ale_time_units':self.ale_time_units,
'generations':self.generations,
'ccd':self.ccd,
'rate':self.rate,
'rate_units':self.rate_units,
'used_':self.used_,
'comment_':self.comment_}
def __repr__json__(self):
return json.dumps(self.__repr__dict__())
class data_stage01_ale_jumps(Base):
__tablename__ = 'data_stage01_ale_jumps'
id = Column(Integer, Sequence('data_stage01_ale_jumps_id_seq'), primary_key=True)
experiment_id = Column(String(50), primary_key=True)
ale_id = Column(String(100))
jump_region_start = Column(Float)
jump_region_stop = Column(Float)
used_ = Column(Boolean)
comment_ = Column(Text);
def __init__(self,data_dict_I):
pass;
def __set__row__(self,experiment_id_I,
ale_id_I,
ale_time_I,
ale_time_units_I,
rate_fitted_I,
rate_fitted_units_I,
jump_region_I,
used__I,
comment__I):
self.experiment_id=experiment_id_I
self.ale_id=ale_id_I
self.ale_time=ale_time_I
self.ale_time_units=ale_time_units_I
self.rate_fitted=rate_fitted_I
self.rate_fitted_units=rate_fitted_units_I
self.jump_region=jump_region_I
self.used_=used__I
self.comment_=comment__I
def __repr__dict__(self):
return {'experiment_id':self.experiment_id,
'ale_id':self.ale_id,
'ale_time':self.ale_time,
'ale_time_units':self.ale_time_units,
'rate_fitted':self.rate_fitted,
'rate_fitted_units':self.rate_fitted_units,
'jump_region':self.jump_region,
'used_':self.used_,
'comment_':self.comment_}
def __repr__json__(self):
return json.dumps(self.__repr__dict__())
class data_stage01_ale_stocks(Base):
__tablename__ = 'data_stage01_ale_stocks'
id = Column(Integer, Sequence('data_stage01_ale_stocks_id_seq'), primary_key=True)
experiment_id = Column(String(50))
ale_id = Column(String(100))
sample_name_abbreviation = Column(String(100))
time_point=Column(String(10));
ale_time=Column(Float,nullable=False);
ale_time_units=Column(String(50))
used_ = Column(Boolean)
comment_ = Column(Text);
__table_args__ = (UniqueConstraint('experiment_id','ale_id','sample_name_abbreviation','time_point','ale_time','ale_time_units'),
)
def __init__(self,data_dict_I):
self.ale_id=data_dict_I['ale_id'];
self.sample_name_abbreviation=data_dict_I['sample_name_abbreviation'];
self.used_=data_dict_I['used_'];
self.ale_time=data_dict_I['ale_time'];
self.comment_=data_dict_I['comment_'];
self.time_point=data_dict_I['time_point'];
self.ale_time_units=data_dict_I['ale_time_units'];
self.experiment_id=data_dict_I['experiment_id'];
def __set__row__(self,
experiment_id_I,
ale_id_I,
sample_name_abbreviation_I,
time_point_I,
ale_time_I,
ale_time_units_I,
used__I,
comment__I):
self.experiment_id=experiment_id_I
self.ale_id=ale_id_I
self.ale_time=ale_time_I
self.ale_time_units=ale_time_units_I
self.time_point=time_point_I
self.sample_name_abbreviation=sample_name_abbreviation_I
self.used_=used__I
self.comment_=comment__I
def __repr__dict__(self):
return {'experiment_id':self.experiment_id,
'ale_id':self.ale_id,
'ale_time':self.ale_time,
'ale_time_units':self.ale_time_units,
'time_point':self.time_point,
'sample_name_abbreviation':self.sample_name_abbreviation,
'used_':self.used_,
'comment_':self.comment_}
def __repr__json__(self):
return json.dumps(self.__repr__dict__())
|
[
"dmccloskey87@gmail.com"
] |
dmccloskey87@gmail.com
|
f4e8b00eaaf6d0a6068ad4122cf0673de696d7f6
|
45de5d3a46623222adab00f1f2905d89708aa492
|
/tests/test_writer.py
|
c358686e03ffc64faa8009081a4266ed1ec88461
|
[
"BSD-3-Clause"
] |
permissive
|
jayvdb/pyexcel-xls
|
9ccc0439df9499b8711b69740a9ebd391bce67f2
|
e191abd3c329d1459c843204a5d5acde14dc2da7
|
refs/heads/master
| 2020-12-11T03:54:30.494345
| 2016-05-10T03:21:25
| 2016-05-10T06:43:32
| 57,425,804
| 0
| 0
| null | 2016-04-30T03:42:12
| 2016-04-30T03:42:11
|
Python
|
UTF-8
|
Python
| false
| false
| 1,447
|
py
|
import os
from pyexcel_xls.xls import XLSWriter, XLSBook
from base import PyexcelWriterBase, PyexcelHatWriterBase
class TestNativeXLWriter:
def test_write_book(self):
self.content = {
"Sheet1": [[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3]],
"Sheet2": [[4, 4, 4, 4], [5, 5, 5, 5], [6, 6, 6, 6]],
"Sheet3": [[u'X', u'Y', u'Z'], [1, 4, 7], [2, 5, 8], [3, 6, 9]]
}
self.testfile = "xlwriter.xls"
writer = XLSWriter()
writer.open(self.testfile)
writer.write(self.content)
writer.close()
reader = XLSBook()
reader.open(self.testfile)
content = reader.read_all()
reader.close()
for key in content.keys():
content[key] = list(content[key])
assert content == self.content
def tearDown(self):
if os.path.exists(self.testfile):
os.unlink(self.testfile)
class TestXLSnCSVWriter(PyexcelWriterBase):
def setUp(self):
self.testfile="test.xls"
self.testfile2="test.csv"
def tearDown(self):
if os.path.exists(self.testfile):
os.unlink(self.testfile)
if os.path.exists(self.testfile2):
os.unlink(self.testfile2)
class TestXLSHatWriter(PyexcelHatWriterBase):
def setUp(self):
self.testfile="test.xls"
def tearDown(self):
if os.path.exists(self.testfile):
os.unlink(self.testfile)
|
[
"wangc_2011@hotmail.com"
] |
wangc_2011@hotmail.com
|
ff7c3b59b4e78c80186597eeeca89b557c13fbc7
|
94f1c6d11953bab63a5ce468cf279e409a45eb9b
|
/Round B/Palindromes.py
|
42fdc22fcff2938725d7869ab1e5bfeb55c89edf
|
[] |
no_license
|
rocket3989/KickStart2019
|
582434385842e6d82c638805f79a91bee861000f
|
37a96a100b9b3c16116ac29d74826f8d6e4ee3f6
|
refs/heads/master
| 2020-05-01T08:39:01.632233
| 2019-04-29T00:02:31
| 2019-04-29T00:02:31
| 177,383,386
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 792
|
py
|
from collections import defaultdict
T = int(input())
for test in range(1,T + 1):
N, Q = list(map(int, input().split()))
stringIn = input()
correct = 0
for question in range(Q):
L, R = list(map(int, input().split()))
charCount = defaultdict(int)
for c in stringIn[L-1:R:]:
charCount[c] += 1
odds = 0
for k, v in charCount.items():
if v % 2 == 1:
odds += 1
if odds > 1:
continue
if odds == 1:
if (R - L) % 2 == 0:
correct += 1
continue
else:
if (R - L) % 2 == 1:
correct += 1
continue
print("Case #{}: {}".format(test,correct))
|
[
"rocket3989@gmail.com"
] |
rocket3989@gmail.com
|
57a63773e18313b81b384addf36304382c2a0ac4
|
2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02
|
/PyTorch/contrib/cv/classification/Centroids-reid/pytorch_lightning/trainer/connectors/debugging_connector.py
|
ecba35d5dbf55f3b03d6bd297f7778a67ddfd510
|
[
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference",
"GPL-1.0-or-later"
] |
permissive
|
Ascend/ModelZoo-PyTorch
|
4c89414b9e2582cef9926d4670108a090c839d2d
|
92acc188d3a0f634de58463b6676e70df83ef808
|
refs/heads/master
| 2023-07-19T12:40:00.512853
| 2023-07-17T02:48:18
| 2023-07-17T02:48:18
| 483,502,469
| 23
| 6
|
Apache-2.0
| 2022-10-15T09:29:12
| 2022-04-20T04:11:18
|
Python
|
UTF-8
|
Python
| false
| false
| 3,825
|
py
|
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Union
from pytorch_lightning.loggers.base import DummyLogger
from pytorch_lightning.utilities import rank_zero_info, rank_zero_warn
from pytorch_lightning.utilities.exceptions import MisconfigurationException
class DebuggingConnector:
def __init__(self, trainer):
self.trainer = trainer
def on_init_start(
self,
limit_train_batches,
limit_val_batches,
limit_test_batches,
val_check_interval,
overfit_batches,
fast_dev_run
):
if not isinstance(fast_dev_run, (bool, int)):
raise MisconfigurationException(
f'fast_dev_run={fast_dev_run} is not a valid configuration.'
' It should be either a bool or an int >= 0'
)
if isinstance(fast_dev_run, int) and (fast_dev_run < 0):
raise MisconfigurationException(
f'fast_dev_run={fast_dev_run} is not a'
' valid configuration. It should be >= 0.'
)
self.trainer.fast_dev_run = fast_dev_run
fast_dev_run = int(fast_dev_run)
# set fast_dev_run=True when it is 1, used while logging
if fast_dev_run == 1:
self.trainer.fast_dev_run = True
if fast_dev_run:
limit_train_batches = fast_dev_run
limit_val_batches = fast_dev_run
limit_test_batches = fast_dev_run
self.trainer.max_steps = fast_dev_run
self.trainer.num_sanity_val_steps = 0
self.trainer.max_epochs = 1
self.trainer.val_check_interval = 1.0
self.trainer.check_val_every_n_epoch = 1
self.trainer.logger = DummyLogger()
rank_zero_info(
'Running in fast_dev_run mode: will run a full train,'
f' val and test loop using {fast_dev_run} batch(es).'
)
self.trainer.limit_train_batches = _determine_batch_limits(limit_train_batches, 'limit_train_batches')
self.trainer.limit_val_batches = _determine_batch_limits(limit_val_batches, 'limit_val_batches')
self.trainer.limit_test_batches = _determine_batch_limits(limit_test_batches, 'limit_test_batches')
self.trainer.val_check_interval = _determine_batch_limits(val_check_interval, 'val_check_interval')
self.trainer.overfit_batches = _determine_batch_limits(overfit_batches, 'overfit_batches')
self.determine_data_use_amount(self.trainer.overfit_batches)
def determine_data_use_amount(self, overfit_batches: float) -> None:
"""Use less data for debugging purposes"""
if overfit_batches > 0:
self.trainer.limit_train_batches = overfit_batches
self.trainer.limit_val_batches = overfit_batches
self.trainer.limit_test_batches = overfit_batches
def _determine_batch_limits(batches: Union[int, float], name: str) -> Union[int, float]:
if 0 <= batches <= 1:
return batches
elif batches > 1 and batches % 1.0 == 0:
return int(batches)
else:
raise MisconfigurationException(
f'You have passed invalid value {batches} for {name}, it has to be in [0.0, 1.0] or an int.'
)
|
[
"wangjiangben@huawei.com"
] |
wangjiangben@huawei.com
|
1adc72136d6926164b025510774a87f8be7e2a4b
|
fe7ca92531708b2d1cc30917bac61a2dd5565432
|
/switch-snmp/netsnmp/netsnmp/python-openstackclient-master/openstackclient/compute/v2/service.py
|
89f5cad94fef7325919c9b48291279ce6de5de3a
|
[
"Apache-2.0",
"BSD-3-Clause"
] |
permissive
|
lizhenfen/python
|
817131e170a434c9a065589420c08fe8908e3536
|
5ffdb34a9d252eedbd4551fd694ce6f9e6cdd8b0
|
refs/heads/master
| 2021-01-21T13:48:28.731629
| 2016-08-22T08:57:47
| 2016-08-22T08:57:47
| 54,445,601
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,280
|
py
|
# Copyright 2013 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Service action implementations"""
from openstackclient.common import command
from openstackclient.common import utils
class DeleteService(command.Command):
"""Delete service command"""
def get_parser(self, prog_name):
parser = super(DeleteService, self).get_parser(prog_name)
parser.add_argument(
"service",
metavar="<service>",
help="Compute service to delete (ID only)")
return parser
def take_action(self, parsed_args):
compute_client = self.app.client_manager.compute
compute_client.services.delete(parsed_args.service)
class ListService(command.Lister):
"""List service command"""
def get_parser(self, prog_name):
parser = super(ListService, self).get_parser(prog_name)
parser.add_argument(
"--host",
metavar="<host>",
help="Name of host")
parser.add_argument(
"--service",
metavar="<service>",
help="Name of service")
parser.add_argument(
"--long",
action="store_true",
default=False,
help="List additional fields in output"
)
return parser
def take_action(self, parsed_args):
compute_client = self.app.client_manager.compute
if parsed_args.long:
columns = (
"Id",
"Binary",
"Host",
"Zone",
"Status",
"State",
"Updated At",
"Disabled Reason"
)
else:
columns = (
"Id",
"Binary",
"Host",
"Zone",
"Status",
"State",
"Updated At"
)
data = compute_client.services.list(parsed_args.host,
parsed_args.service)
return (columns,
(utils.get_item_properties(
s, columns,
) for s in data))
class SetService(command.Command):
"""Set service command"""
def get_parser(self, prog_name):
parser = super(SetService, self).get_parser(prog_name)
parser.add_argument(
"host",
metavar="<host>",
help="Name of host")
parser.add_argument(
"service",
metavar="<service>",
help="Name of service")
enabled_group = parser.add_mutually_exclusive_group()
enabled_group.add_argument(
"--enable",
dest="enabled",
default=True,
help="Enable a service (default)",
action="store_true")
enabled_group.add_argument(
"--disable",
dest="enabled",
help="Disable a service",
action="store_false")
parser.add_argument(
"--disable-reason",
default=None,
metavar="<reason>",
help="Reason for disabling the service (in quotas)"
)
return parser
def take_action(self, parsed_args):
compute_client = self.app.client_manager.compute
cs = compute_client.services
if not parsed_args.enabled:
if parsed_args.disable_reason:
cs.disable_log_reason(parsed_args.host,
parsed_args.service,
parsed_args.disable_reason)
else:
cs.disable(parsed_args.host, parsed_args.service)
else:
cs.enable(parsed_args.host, parsed_args.service)
|
[
"743564797@qq.com"
] |
743564797@qq.com
|
11f5c3bcb9eba7676885da12844d29f27363fd23
|
630c0dfb160605f12c2cd344ceb48e4bac12219f
|
/lib/threads.py
|
6352af7b239322c116c602323519496bc0a1fab1
|
[
"MIT"
] |
permissive
|
autowitch/llama
|
44611bcc4d9c8b51a7e2001a9632315bbb7b7288
|
cc18e1f646e6deae5a461b8a4f3a914463999b35
|
refs/heads/master
| 2016-09-02T01:01:49.928364
| 2013-08-30T21:02:53
| 2013-08-30T21:02:53
| 12,006,453
| 1
| 1
| null | 2013-08-10T16:49:03
| 2013-08-09T17:17:08
| null |
UTF-8
|
Python
| false
| false
| 7,426
|
py
|
import copy
from lib.program_state import ProgramState
class Threads(object):
def __init__(self):
self.threads = [ProgramState()]
self.alive = True
def thread(self, thread=0):
return self.threads[thread]
def importance(self, thread=0):
if not self.threads[thread]:
return None
return self.threads[thread].importance
def set_importance(self, new_value, thread=0):
if not self.threads[thread]:
return None
self.threads[thread].importance = new_value
def inc_importance(self, amount=1, thread=0):
if not self.threads[thread]:
return None
self.threads[thread].importance += amount
def dec_importance(self, amount=1, thread=0):
if not self.threads[thread]:
return None
if self.threads[thread].importance > 1:
self.threads[thread].importance -= amount
def code(self, thread):
if not self.threads[thread]:
return None
return self.threads[thread].code
def ip(self, thread=0, subthread=0):
if not self.threads[thread]:
return None
return self.threads[thread].ip
def set_ip(self, new_value, thread=0, subthread=0):
if not self.threads[thread]:
return None
self.threads[thread].ip = new_value
def ip_dir(self, thread=0, subthread=0):
if not self.threads[thread]:
return None
return self.threads[thread].ip_dir
def set_ip_dir(self, new_value, thread=0, subthread=0):
if not self.threads[thread]:
return None
self.threads[thread].ip_dir = new_value
def execution_probability(self, thread=0):
if not self.threads[thread]:
return None
return self.threads[thread].execution_probability
def command_stack(self, thread=0):
if not self.threads[thread]:
return None
return self.threads[thread].command_stack
def symbol_table(self, thread=0):
if not self.threads[thread]:
return None
return self.threads[thread].symbol_table
def last_value(self, thread=0, subthread=0):
if not self.threads[thread]:
return None
return self.threads[thread].last_value
def set_last_value(self, new_value, thread=0, subthread=0):
if not self.threads[thread]:
return None
self.threads[thread].last_value = new_value
def code_line_stack(self, thread=0):
if not self.threads[thread]:
return None
return self.threads[thread].code_line_stack
def full_command_cache(self, thread=0):
if not self.threads[thread]:
return None
return self.threads[thread].full_command_cache
def maybe_stack(self, thread=0):
if not self.threads[thread]:
return None
return self.threads[thread].maybe_stack
def forget_stack(self, thread=0):
if not self.threads[thread]:
return None
return self.threads[thread].forget_stack
def invert_next_importance_check(self, thread=0):
if not self.threads[thread]:
return None
return self.threads[thread].invert_next_importance_check
def set_invert_next_importance_check(self, new_value, thread=0):
if not self.threads[thread]:
return None
self.threads[thread].invert_next_importance_check = new_value
def swing_ip(self, thread=0):
if not self.threads[thread]:
return None
return self.threads[thread].swing_ip
def enable_rhi(self, thread=0):
if not self.threads[thread]:
return None
return self.threads[thread].enable_rhi
def mangle(self, thread=0):
if not self.threads[thread]:
return None
return self.threads[thread].mangle
def assign_mangle_source(self, thread=0):
if not self.threads[thread]:
return None
return self.threads[thread].assign_mangle_source
def reverse_next_assignment_arrow(self, thread=0):
if not self.threads[thread]:
return None
return self.threads[thread].reverse_next_assignment_arrow
def instruction_skip(self, thread=0):
if not self.threads[thread]:
return None
return self.threads[thread].instruction_skip
def swap_rhi_and_value(self, thread=0):
if not self.threads[thread]:
return None
return self.threads[thread].swap_rhi_and_value
def invert_this(self, thread=0):
if not self.threads[thread]:
return None
return self.threads[thread].invert_this
def set_invert_this(self, new_value, thread=0):
if not self.threads[thread]:
return None
self.threads[thread].invert_this = new_value
def last_smiley(self, thread=0, subthread=0):
if not self.threads[thread]:
return None
return self.threads[thread].last_smiley
def set_last_smiley(self, new_value, thread=0):
if not self.threads[thread]:
return None
self.threads[thread].last_smiley = new_value
# command builder stuff
def command(self, thread=0):
if not self.threads[thread]:
return None
return self.threads[thread].command
def set_command(self, new_value, thread=0):
if not self.threads[thread]:
return None
self.threads[thread].command = new_value
def command_state(self, thread=0):
if not self.threads[thread]:
return None
return self.threads[thread].command_state
def set_command_state(self, new_value, thread=0):
if not self.threads[thread]:
return None
self.threads[thread].command_state = new_value
def complete(self, thread=0):
if not self.threads[thread]:
return None
return self.threads[thread].complete
def set_complete(self, complete=False, thread=0):
if not self.threads[thread]:
return None
self.threads[thread].complete = complete
def copy(self, source_thread):
if not self.threads[source_thread]:
return None
new_thread = copy.deepcopy(self.threads[source_thread])
new_thread.last_smiley = None
new_thread.command_stack = []
new_thread.command = []
new_thread.command_state = 0
new_thread.complete = False
self.threads.append(new_thread)
new_thread_id = len(self.threads) - 1
self.debug(4, "New thread %s created from %s" % (new_thread_id, source_thread))
return new_thread_id
def delete(self, thread_id):
if not self.threads[thread_id]:
return None
self.debug(4, "Deleting thread %d" % thread_id)
self.threads[thread_id] = None
thread_count = 0
for x in self.threads:
if x:
thread_count += 1
if not thread_count:
self.debug(4, "No threads remain, we are no longer alive")
self.alive = False
else:
self.debug(5, "%d active threads" % thread_count)
def collapse(self, thread_1, thread_2):
pass
def mingle(self, thread_1, thread_2):
pass
def thread_alive(self, thread=0):
return self.threads[thread] != None
|
[
"autowitch@autowit.ch"
] |
autowitch@autowit.ch
|
36538ead1548aad6d6b1bc9ecaa050eda7d7fdae
|
a42fdd9571bdb548d275d54aaaeccce10a6d5eca
|
/users/utils.py
|
d89fd35b4402fcd49aa375668ddddd91879af96e
|
[] |
no_license
|
brodanielx/foidatasuite
|
86dfbb30c0ab8339860bfe80f3cb507e79c0f58c
|
6ee5ab985e62af6eddf4f1219aa8a3661e745b62
|
refs/heads/master
| 2022-12-11T01:57:16.407929
| 2020-09-08T12:34:18
| 2020-09-08T12:34:18
| 254,344,525
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,776
|
py
|
import pandas as pd
import random
import string
from django.contrib.auth.models import User
from .models import Profile
from foidata.data.data import FOIData
class UserUtils:
def __init__(self):
self.data = FOIData()
def create_or_update(self):
self.roster = self.data.roster
created_count = 0
updated_count = 0
for _, row in self.roster.iterrows():
username = self.get_username(row)
try:
user = User.objects.get(profile__nation_id=int(row['NationId']))
except User.DoesNotExist:
user = None
if user:
user.username = username
user.first_name = row['FirstName']
user.last_name = row['LastName']
user.email = row['Email']
user.is_active = bool(row['Active'])
user.save()
profile = self.update_profile(user, row)
updated_count += 1
print(f'Updated: {user} - {profile}')
else:
password = self.generate_password()
user = User.objects.create_user(
username,
first_name = row['FirstName'],
last_name = row['LastName'],
email = row['Email'],
password = password,
is_active = bool(row['Active'])
)
user.save()
profile = self.update_profile(user, row)
created_count += 1
print(f'Created: username: {user} - password: {password} - {profile}')
return created_count, updated_count
def update_profile(self, user, roster_row):
row = roster_row
profile = Profile.objects.get(user=user)
profile.nation_id = int(row['NationId'])
profile.city = row['City']
profile.rank = row['Rank']
profile.receive_emails = row['ReceiveEmails']
profile.save()
return profile
def get_username(self, df_row):
first_name = df_row['FirstName'].lower().strip()
last_initial = df_row['LastName'][0].lower().strip()
nation_id_str = str(df_row['NationId']).strip()
return f'{first_name}{last_initial}{nation_id_str}'
def generate_password(self, string_length=10):
characters = string.ascii_letters + string.digits
return ''.join(random.choice(characters) for i in range(string_length))
def get_profiles_df(self):
profiles = Profile.objects.active()
values = profiles.values('nation_id', 'user__first_name', 'user__last_name')
return pd.DataFrame.from_records(values)
|
[
"bro.danielx@gmail.com"
] |
bro.danielx@gmail.com
|
41eb20af7b951785f6b081f2505c64455c80e8e6
|
2a3dd37d150ca6dd0bbf1b1915bf9141527f6643
|
/pyircbot/modules/DCC.py
|
bb01dcdab2bef1ea0d8e42d1835806f21b1eb67d
|
[] |
no_license
|
dpedu/pyircbot
|
17a13bdb43dd86e150f42fd1fe19a9612765a3ba
|
13ea0fe52fd5c41e2f947be17ef19e3d0b62f72f
|
refs/heads/master
| 2021-06-13T09:38:21.845096
| 2021-03-04T21:12:12
| 2021-03-04T21:12:12
| 141,069,697
| 0
| 2
| null | 2021-03-04T21:12:12
| 2018-07-16T00:56:28
|
Python
|
UTF-8
|
Python
| false
| false
| 5,456
|
py
|
"""
.. module:: DCC
:synopsis: Module providing support for IRC's dcc protocol
.. moduleauthor:: Dave Pedu <dave@davepedu.com>
"""
import os
from pyircbot.modulebase import ModuleBase
import socket
from threading import Thread
from random import randint
from time import sleep
BUFFSIZE = 8192
class TransferFailedException(Exception):
pass
def ip2int(ipstr):
"""
Convert an ip address string to an integer
"""
num = 0
for octet in ipstr.split("."):
num = num << 8 | int(octet)
return num
def int2ip(num):
"""
Convert an integer to an ip address string
"""
octs = []
for octet in range(0, 4):
octs.append(str((num & (255 << (8 * octet))) >> (8 * octet)))
return ".".join(octs[::-1])
class DCC(ModuleBase):
def __init__(self, bot, name):
super().__init__(bot, name)
self.services = ["dcc"]
self.is_kill = False
self.transfers = []
def offer(self, file_path, port=None):
"""
Offer a file to another user.
- check file size
- start listener socket thread on some port
- info about the file: tuple of (ip, port, size)
"""
port_range = self.config.get("port_range", [40000, 60000]) # TODO it would be better to let the system assign
port = randint(*port_range)
bind_addr = self.config.get("bind_host", "0.0.0.0")
advertise_addr = self.config.get("public_addr", bind_addr)
flen = os.path.getsize(file_path)
offer = OfferThread(self, file_path, bind_addr, port) # offers are considered ephemeral. even if this module is
# unloaded, initiated transfers may continue. They will not block python from exiting (at which time they *will*
# be terminated).
offer.start()
return (ip2int(advertise_addr), port, flen, offer)
def recieve(self, host, port, length):
"""
Receive a file another user has offered. Returns a generator that yields data chunks.
"""
return RecieveGenerator(host, port, length)
class RecieveGenerator(object):
def __init__(self, host, port, length):
self.host = host
self.port = port
self.length = length
def __iter__(self):
self.sock = socket.create_connection((self.host, self.port), timeout=10)
total = 0
try:
while True:
if total == self.length:
break
chunk = self.sock.recv(BUFFSIZE)
total += len(chunk)
if not chunk:
break
yield chunk
if total >= self.length:
break
print("total", total, "expected", self.length)
if total != self.length:
raise TransferFailedException("Transfer failed: expected {} bytes but got {}".format(self.length, total))
finally:
# self.sock.shutdown(socket.SHUT_RDWR)
self.sock.close()
class OfferThread(Thread):
def __init__(self, master, path, bind_addr, port, timeout=30):
"""
DCC file transfer offer listener
:param master: reference to the parent module
:param path: file path to be opened and transferred
:param bind_addr: address str to bind the listener socket to
:param port: port number int to listen on
:param timeout: number of seconds to give up after
"""
super().__init__()
self.master = master
self.path = path
self.bind = bind_addr
self.port = port
self.timeout = timeout
self.listener = None
self.daemon = True
self.bound = False
Thread(target=self.abort, daemon=True).start()
def run(self):
"""
Open a server socket that accepts a single connections. When the first client connects, send the contents of the
offered file.
"""
self.listener = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
self.listener.bind((self.bind, self.port))
self.listener.listen(1)
self.bound = True
(clientsocket, address) = self.listener.accept()
try:
self.send_file(clientsocket)
finally:
clientsocket.shutdown(socket.SHUT_RDWR)
clientsocket.close()
finally:
# try:
# self.listener.shutdown(socket.SHUT_RDWR)
self.listener.close()
# except Exception:
# pass
def abort(self):
"""
Expire the offer after a timeout.
"""
sleep(self.timeout)
self.stopoffer()
def send_file(self, socket):
"""
Send the contents of the offered file to the passed socket
:param socket: socket object ready for sending
:type socket: socket.socket
"""
with open(self.path, 'rb') as f:
while not self.master.is_kill:
chunk = f.read(BUFFSIZE)
if not chunk:
break
socket.send(chunk)
def stopoffer(self):
"""
Prematurely shut down & cleanup the offer socket
"""
try:
self.listener.shutdown(socket.SHUT_RDWR)
self.listener.close()
except Exception: # should only error if already cleaned up
pass
|
[
"dave@davepedu.com"
] |
dave@davepedu.com
|
0a738c3349324ee6b41daae6d171fc4fd442ef42
|
0a6309d2aa2dd577fe5a1aed8e6ebf2b01c670a2
|
/0x0F-python-object_relational_mapping/5-filter_cities.py
|
f2c23fbb44de055e0fd86790118ac104232de653
|
[] |
no_license
|
arleybri18/holbertonschool-higher_level_programming
|
74675ce64801c72fc96318fc568b040b9f4b761e
|
2f0b9a7f462595f67de12d4c3880375e87552357
|
refs/heads/master
| 2020-05-18T02:22:10.969677
| 2019-09-26T20:32:27
| 2019-09-26T20:32:27
| 184,113,244
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 713
|
py
|
#!/usr/bin/python3
""" This script return cities and states """
if __name__ == '__main__':
import MySQLdb
from sys import argv
db = MySQLdb.connect(host="localhost",
port=3306,
user=argv[1],
passwd=argv[2],
db=argv[3])
cur = db.cursor()
cur.execute("""SELECT c.name FROM cities c
LEFT JOIN states s ON c.state_id = s.id
WHERE s.name LIKE BINARY %s
ORDER BY c.id ASC""", (argv[4],))
rows = cur.fetchall()
cont = 0
lista = []
for row in rows:
lista.append(row[0])
print(", ".join(lista))
cur.close()
db.close()
|
[
"arleybri18@gmail.com"
] |
arleybri18@gmail.com
|
b55aa38b6aeafae6ab84550e4f4d05cf5ecb48e9
|
57eb44ce1d84aca3580e28688cf645db483d0d03
|
/plots/model_explorer/plotters/bar_plot/date_distribution/configuration.py
|
ad5343c47a19a4f6849a1e464300eac6c366a6f2
|
[
"Apache-2.0"
] |
permissive
|
TheLabbingProject/pylabber
|
cbfd7a6663d56f779dde96bd6e0281f5c8f06393
|
4b51065f457ab86ed311f222080187caf1979fea
|
refs/heads/master
| 2023-04-08T05:29:08.356479
| 2023-03-29T09:06:11
| 2023-03-29T09:06:11
| 205,411,164
| 5
| 3
|
Apache-2.0
| 2023-03-29T09:06:13
| 2019-08-30T15:40:47
|
Python
|
UTF-8
|
Python
| false
| false
| 1,659
|
py
|
from bokeh.layouts import column
from bokeh.models import ColorPicker, Div, RadioButtonGroup
from functools import partial
from plots.model_explorer.plotters.bar_plot.configuration import BarPlotConfigration
from plots.model_explorer.plotters.bar_plot.date_distribution.time_bins import TimeBins
class DateDistributionConfiguration(BarPlotConfigration):
TIME_BIN_LABELS = [TimeBins.DAY.value, TimeBins.MONTH.value, TimeBins.YEAR.value]
def __init__(self, plot: list, field_name: str, time_bin: TimeBins):
self.plot = plot
self.field_name = field_name
self.source = self.plot[0].data_source
self.time_bin = time_bin
self.time_bin_title = Div(text="Bins")
self.time_bin_select = self.create_time_bin_select()
self.color_pickers = self.create_color_pickers()
def create_time_bin_select(self) -> RadioButtonGroup:
active = self.TIME_BIN_LABELS.index(self.time_bin.value)
rbg = RadioButtonGroup(labels=self.TIME_BIN_LABELS, active=active, width=210)
return rbg
def create_color_pickers(self) -> column:
color_pickers = []
for i, plot in enumerate(self.plot):
picker = ColorPicker(color=plot.glyph.fill_color, width=100)
picker.on_change("color", partial(self.handle_color_change, i))
color_pickers.append(picker)
return column(*color_pickers)
def handle_color_change(self, index: int, attr: str, old: str, new: str) -> None:
self.plot[index].glyph.fill_color = new
def create_layout(self) -> column:
return column(self.time_bin_title, self.time_bin_select, self.color_pickers)
|
[
"z.baratz@gmail.com"
] |
z.baratz@gmail.com
|
7a361d1adf99fae459b1da2f52cd47a77019879f
|
c5b9f0fabffb6b2d13c6e350c8187a922709ac60
|
/build/pal_behaviour_msgs/catkin_generated/pkg.develspace.context.pc.py
|
6f9cd8175d514a31c32fd406360b8c85f26e133b
|
[] |
no_license
|
MohamedEhabHafez/Sorting_Aruco_Markers
|
cae079fdce4a14561f5e092051771d299b06e789
|
0f820921c9f42b39867565441ed6ea108663ef6c
|
refs/heads/master
| 2020-12-09T02:43:00.731223
| 2020-01-15T17:31:29
| 2020-01-15T17:31:29
| 233,154,293
| 0
| 0
| null | 2020-10-13T18:46:44
| 2020-01-11T00:41:38
|
Makefile
|
UTF-8
|
Python
| false
| false
| 598
|
py
|
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/mohamed/tiago_public_ws/devel/.private/pal_behaviour_msgs/include".split(';') if "/home/mohamed/tiago_public_ws/devel/.private/pal_behaviour_msgs/include" != "" else []
PROJECT_CATKIN_DEPENDS = "message_runtime;std_msgs;actionlib_msgs".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "pal_behaviour_msgs"
PROJECT_SPACE_DIR = "/home/mohamed/tiago_public_ws/devel/.private/pal_behaviour_msgs"
PROJECT_VERSION = "0.12.14"
|
[
"mohamed@radiirobotics.com"
] |
mohamed@radiirobotics.com
|
877377666b0e187e0c2651818c61dbb37457cf93
|
27e890f900bd4bfb2e66f4eab85bc381cf4d5d3f
|
/plugins/lookup/consul_kv.py
|
c38e7bf7125ed4ca8fbb3fd8c1caa1a023f840b3
|
[] |
no_license
|
coll-test/notstdlib.moveitallout
|
eb33a560070bbded5032385d0aea2f3cf60e690b
|
0987f099b783c6cf977db9233e1c3d9efcbcb3c7
|
refs/heads/master
| 2020-12-19T22:28:33.369557
| 2020-01-23T18:51:26
| 2020-01-23T18:51:26
| 235,865,139
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,712
|
py
|
# (c) 2015, Steve Gargan <steve.gargan@gmail.com>
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
lookup: consul_kv
short_description: Fetch metadata from a Consul key value store.
description:
- Lookup metadata for a playbook from the key value store in a Consul cluster.
Values can be easily set in the kv store with simple rest commands
- C(curl -X PUT -d 'some-value' http://localhost:8500/v1/kv/ansible/somedata)
requirements:
- 'python-consul python library U(https://python-consul.readthedocs.io/en/latest/#installation)'
options:
_raw:
description: List of key(s) to retrieve.
type: list
required: True
recurse:
type: boolean
description: If true, will retrieve all the values that have the given key as prefix.
default: False
index:
description:
- If the key has a value with the specified index then this is returned allowing access to historical values.
datacenter:
description:
- Retrieve the key from a consul datatacenter other than the default for the consul host.
token:
description: The acl token to allow access to restricted values.
host:
default: localhost
description:
- The target to connect to, must be a resolvable address.
Will be determined from C(ANSIBLE_CONSUL_URL) if that is set.
- "C(ANSIBLE_CONSUL_URL) should look like this: C(https://my.consul.server:8500)"
env:
- name: ANSIBLE_CONSUL_URL
ini:
- section: lookup_consul
key: host
port:
description:
- The port of the target host to connect to.
- If you use C(ANSIBLE_CONSUL_URL) this value will be used from there.
default: 8500
scheme:
default: http
description:
- Whether to use http or https.
- If you use C(ANSIBLE_CONSUL_URL) this value will be used from there.
validate_certs:
default: True
description: Whether to verify the ssl connection or not.
env:
- name: ANSIBLE_CONSUL_VALIDATE_CERTS
ini:
- section: lookup_consul
key: validate_certs
client_cert:
description: The client cert to verify the ssl connection.
env:
- name: ANSIBLE_CONSUL_CLIENT_CERT
ini:
- section: lookup_consul
key: client_cert
'''
EXAMPLES = """
- debug:
msg: 'key contains {{item}}'
with_consul_kv:
- 'key/to/retrieve'
- name: Parameters can be provided after the key be more specific about what to retrieve
debug:
msg: 'key contains {{item}}'
with_consul_kv:
- 'key/to recurse=true token=E6C060A9-26FB-407A-B83E-12DDAFCB4D98'
- name: retrieving a KV from a remote cluster on non default port
debug:
msg: "{{ lookup('consul_kv', 'my/key', host='10.10.10.10', port='2000') }}"
"""
RETURN = """
_raw:
description:
- Value(s) stored in consul.
"""
import os
from ansible.module_utils.six.moves.urllib.parse import urlparse
from ansible.errors import AnsibleError, AnsibleAssertionError
from ansible.plugins.lookup import LookupBase
from ansible_collections.notstdlib.moveitallout.plugins.module_utils._text import to_text
try:
import consul
HAS_CONSUL = True
except ImportError as e:
HAS_CONSUL = False
class LookupModule(LookupBase):
def run(self, terms, variables=None, **kwargs):
if not HAS_CONSUL:
raise AnsibleError(
'python-consul is required for consul_kv lookup. see http://python-consul.readthedocs.org/en/latest/#installation')
values = []
try:
for term in terms:
params = self.parse_params(term)
try:
url = os.environ['ANSIBLE_CONSUL_URL']
validate_certs = os.environ['ANSIBLE_CONSUL_VALIDATE_CERTS'] or True
client_cert = os.environ['ANSIBLE_CONSUL_CLIENT_CERT'] or None
u = urlparse(url)
consul_api = consul.Consul(host=u.hostname, port=u.port, scheme=u.scheme, verify=validate_certs,
cert=client_cert)
except KeyError:
port = kwargs.get('port', '8500')
host = kwargs.get('host', 'localhost')
scheme = kwargs.get('scheme', 'http')
validate_certs = kwargs.get('validate_certs', True)
client_cert = kwargs.get('client_cert', None)
consul_api = consul.Consul(host=host, port=port, scheme=scheme, verify=validate_certs,
cert=client_cert)
results = consul_api.kv.get(params['key'],
token=params['token'],
index=params['index'],
recurse=params['recurse'],
dc=params['datacenter'])
if results[1]:
# responds with a single or list of result maps
if isinstance(results[1], list):
for r in results[1]:
values.append(to_text(r['Value']))
else:
values.append(to_text(results[1]['Value']))
except Exception as e:
raise AnsibleError(
"Error locating '%s' in kv store. Error was %s" % (term, e))
return values
def parse_params(self, term):
params = term.split(' ')
paramvals = {
'key': params[0],
'token': None,
'recurse': False,
'index': None,
'datacenter': None
}
# parameters specified?
try:
for param in params[1:]:
if param and len(param) > 0:
name, value = param.split('=')
if name not in paramvals:
raise AnsibleAssertionError("%s not a valid consul lookup parameter" % name)
paramvals[name] = value
except (ValueError, AssertionError) as e:
raise AnsibleError(e)
return paramvals
|
[
"wk@sydorenko.org.ua"
] |
wk@sydorenko.org.ua
|
9a902b4125b04a3bd948e287ab07dfd3cb806616
|
5e381364c2ab31ff3618369085afffba6caa8edb
|
/recipes/libgpiod/all/conanfile.py
|
db1d1a01710835418891263138eb1d28aea2198b
|
[
"MIT"
] |
permissive
|
CAMOBAP/conan-center-index
|
16aea68a6d22da22831ba985773125e8eda08f00
|
67d57532bdad549fef3fa6cb8fcdfa86bc55e4f1
|
refs/heads/master
| 2023-07-30T08:58:57.285571
| 2021-10-02T14:57:54
| 2021-10-02T14:57:54
| 323,262,699
| 1
| 0
|
MIT
| 2021-05-29T13:37:04
| 2020-12-21T07:30:02
|
Python
|
UTF-8
|
Python
| false
| false
| 3,349
|
py
|
from conans import ConanFile, tools, AutoToolsBuildEnvironment
from conans.errors import ConanInvalidConfiguration
import os
required_conan_version = ">=1.33.0"
class LibgpiodConan(ConanFile):
name = "libgpiod"
url = "https://github.com/conan-io/conan-center-index"
homepage = "https://git.kernel.org/pub/scm/libs/libgpiod/libgpiod.git/"
license = "LGPL-2.1-or-later"
description = "C library and tools for interacting with the linux GPIO character device"
topics = ("gpio", "libgpiod", "libgpiodcxx", "linux")
settings = "os", "arch", "compiler", "build_type"
options = {
"shared": [True, False],
"fPIC": [True, False],
"enable_bindings_cxx": [True, False],
"enable_tools": [True, False],
}
default_options = {
"shared": False,
"fPIC": True,
"enable_bindings_cxx": False,
"enable_tools": False,
}
_autotools = None
@property
def _source_subfolder(self):
return "source_subfolder"
def validate(self):
if self.settings.os != "Linux":
raise ConanInvalidConfiguration("libgpiod supports only Linux")
def configure(self):
if self.options.shared:
del self.options.fPIC
if not self.options.enable_bindings_cxx:
del self.settings.compiler.libcxx
del self.settings.compiler.cppstd
def build_requirements(self):
self.build_requires("libtool/2.4.6")
self.build_requires("pkgconf/1.7.4")
self.build_requires("autoconf-archive/2021.02.19")
self.build_requires("linux-headers-generic/5.13.9")
def source(self):
tools.get(**self.conan_data["sources"][self.version], destination=self._source_subfolder, strip_root=True)
def _configure_autotools(self):
if self._autotools:
return self._autotools
self._autotools = AutoToolsBuildEnvironment(self)
yes_no = lambda v: "yes" if v else "no"
args = [
"--enable-shared={}".format(yes_no(self.options.shared)),
"--enable-static={}".format(yes_no(not self.options.shared)),
"--enable-bindings-cxx={}".format(yes_no(self.options.enable_bindings_cxx)),
"--enable-tools={}".format(yes_no(self.options.enable_tools)),
]
self._autotools.configure(args=args, configure_dir=self._source_subfolder)
return self._autotools
def build(self):
with tools.chdir(os.path.join(self._source_subfolder)):
self.run("{} -fiv".format(tools.get_env("AUTORECONF")), run_environment=True)
autotools = self._configure_autotools()
autotools.make()
def package(self):
self.copy("COPYING", dst="licenses", src=self._source_subfolder)
autotools = self._configure_autotools()
autotools.install()
tools.remove_files_by_mask(self.package_folder, "*.la")
tools.rmdir(os.path.join(self.package_folder, "lib", "pkgconfig"))
def package_info(self):
self.cpp_info.components["gpiod"].libs = ["gpiod"]
self.cpp_info.components["gpiod"].names["pkg_config"] = "gpiod"
if self.options.enable_bindings_cxx:
self.cpp_info.components["gpiodcxx"].libs = ["gpiodcxx"]
self.cpp_info.components["gpiodcxx"].names["pkg_config"] = "gpiodcxx"
|
[
"noreply@github.com"
] |
CAMOBAP.noreply@github.com
|
e20b16e57e9aa44e69a14efd8227487613b32c35
|
bb3ab1d635a1696bb8cb81c672d06747d9a521a6
|
/test/functional/rpc_help.py
|
6c7126e7e75d42fc5c4a85c81eb2b201fd9eb441
|
[
"MIT"
] |
permissive
|
dogxteam/dogxwallet-master
|
55ab22aa37c7ce131a06151958743acb1f3e12af
|
346189354bdec9a80c20bdc429ddec15c3b17b73
|
refs/heads/master
| 2020-04-28T23:42:38.257585
| 2019-03-14T17:19:07
| 2019-03-14T17:19:07
| 175,666,685
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,921
|
py
|
#!/usr/bin/env python3
# Copyright (c) 2018 The dogxcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test RPC help output."""
from test_framework.test_framework import dogxcoinTestFramework
from test_framework.util import assert_equal, assert_raises_rpc_error
import os
class HelpRpcTest(dogxcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
def run_test(self):
self.test_categories()
self.dump_help()
def test_categories(self):
node = self.nodes[0]
# wrong argument count
assert_raises_rpc_error(-1, 'help', node.help, 'foo', 'bar')
# invalid argument
assert_raises_rpc_error(-1, 'JSON value is not a string as expected', node.help, 0)
# help of unknown command
assert_equal(node.help('foo'), 'help: unknown command: foo')
# command titles
titles = [line[3:-3] for line in node.help().splitlines() if line.startswith('==')]
components = ['Blockchain', 'Control', 'Generating', 'Mining', 'Network', 'Rawtransactions', 'Util']
if self.is_wallet_compiled():
components.append('Wallet')
if self.is_zmq_compiled():
components.append('Zmq')
assert_equal(titles, components)
def dump_help(self):
dump_dir = os.path.join(self.options.tmpdir, 'rpc_help_dump')
os.mkdir(dump_dir)
calls = [line.split(' ', 1)[0] for line in self.nodes[0].help().splitlines() if line and not line.startswith('==')]
for call in calls:
with open(os.path.join(dump_dir, call), 'w', encoding='utf-8') as f:
# Make sure the node can generate the help at runtime without crashing
f.write(self.nodes[0].help(call))
if __name__ == '__main__':
HelpRpcTest().main()
|
[
"alizha@tom.com"
] |
alizha@tom.com
|
8be304163c504a477830cf9789a1f3a87f80b7fc
|
dfc3232256294e740d2d2eabf24d4390d99bf7c3
|
/basic_app/views.py
|
84e62fbdbc72b87744fce7df7e69f7034dbe2636
|
[] |
no_license
|
pritamSarkar123/django2020-PracOne
|
0158867ba0fa16acdf3acf5799b87cfad47662a1
|
6075939371aa353ae9db2c07e9d1061a8bd629a7
|
refs/heads/master
| 2022-07-12T15:34:26.080300
| 2020-05-14T18:32:54
| 2020-05-14T18:32:54
| 263,191,616
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,303
|
py
|
from django.shortcuts import render
from . forms import UserProfileInfoForm, UserForm
# Create your views here.
def index(request):
return render(request, 'basic_app/index.html')
def login(request):
pass
def register(request):
registered = False # still not registered
if request.method == 'POST':
user_form = UserForm(data=request.POST)
profile_form = UserProfileInfoForm(data=request.POST)
if user_form.is_valid() and profile_form.is_valid():
user = user_form.save()
user.set_password(user.password)
user.save()
profile = profile_form.save(commit=False)
profile.user = user
if 'profile_pic' in request.FILES:
profile.profile_pic = request.FILES['profile_pic']
profile.save()
registered = True
else:
print(user_form.errors, profile_form.errors)
return render(request, 'basic_app/registration.html', {'user_form': user_form, 'profile_form': profile_form, 'registered': registered})
else:
user_form = UserForm()
profile_form = UserProfileInfoForm()
return render(request, 'basic_app/registration.html', {'user_form': user_form, 'profile_form': profile_form, 'registered': registered})
|
[
"pritamsarkar84208220@gmail.com"
] |
pritamsarkar84208220@gmail.com
|
3ce812d39b7566be6bfeeb56bcb099674c4f6e92
|
7385c450eca8be719ba45686db698b747e01cd91
|
/examples/ad_manager/v201905/user_service/deactivate_users.py
|
099e00a0b9aed6cbca48c385661ef364fe509473
|
[
"Apache-2.0"
] |
permissive
|
tanmaykhattar/googleads-python-lib
|
44f15b9f6a0c2a3da7f19c17133b5fba842daf07
|
81742dc3571c9413196cfceb57f761c79db6857a
|
refs/heads/master
| 2020-06-01T16:06:05.797538
| 2019-05-22T14:57:29
| 2019-05-22T14:57:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,245
|
py
|
#!/usr/bin/env python
#
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This code example deactivates a user.
Deactivated users can no longer make requests to the API. The user making the
request cannot deactivate itself. To determine which users exist, run
get_all_users.py.
"""
# Import appropriate modules from the client library.
from googleads import ad_manager
USER_ID = 'INSERT_USER_ID_TO_DEACTIVATE_HERE'
def main(client, user_id):
# Initialize appropriate service.
user_service = client.GetService('UserService', version='v201905')
# Create query.
statement = (ad_manager.StatementBuilder(version='v201905')
.Where('id = :userId')
.WithBindVariable('userId', long(user_id)))
# Get users by statement.
response = user_service.getUsersByStatement(statement.ToStatement())
users = response['results'] if 'results' in response else []
for user in users:
print ('User with id "%s", email "%s", and status "%s" will be '
'deactivated.'
% (user['id'], user['email'],
{'true': 'ACTIVE', 'false': 'INACTIVE'}[user['isActive']]))
print 'Number of users to be deactivated: %s' % len(users)
# Perform action.
result = user_service.performUserAction({'xsi_type': 'DeactivateUsers'},
statement.ToStatement())
# Display results.
if result and int(result['numChanges']) > 0:
print 'Number of users deactivated: %s' % result['numChanges']
else:
print 'No users were deactivated.'
if __name__ == '__main__':
# Initialize client object.
ad_manager_client = ad_manager.AdManagerClient.LoadFromStorage()
main(ad_manager_client, USER_ID)
|
[
"davidwihl@users.noreply.github.com"
] |
davidwihl@users.noreply.github.com
|
6ff849f533bab517e0957bcbed0ee59fad455e3a
|
7326300e7888435dd374250a7573e6a3e1be1794
|
/setup.py
|
9d4cfc28fa60266faae98fc23116b0fb04e1aff6
|
[
"BSD-3-Clause",
"MIT"
] |
permissive
|
lietu/wsps-python
|
8ac67a4622e4b1be147e84512364ca89f988200d
|
782146880bd47c3d96b2550e48cceeffcb9c2c2e
|
refs/heads/master
| 2021-07-12T16:03:57.392071
| 2021-06-20T08:41:55
| 2021-06-20T08:41:55
| 35,605,582
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 592
|
py
|
from distutils.core import setup
from pip.req import parse_requirements
requirements = [
str(entry.req)
for entry in parse_requirements("requirements.txt")
]
setup(
name='wsps',
packages=['wsps'],
version='1.0.0',
description='Python client library for WSPS server',
author='Janne Enberg',
author_email='janne.enberg@lietu.net',
url='https://github.com/lietu/wsps-python',
download_url='https://github.com/lietu/wsps-python/tarball/0.1',
keywords=['wsps', 'client', 'pubsub', 'websocket'],
classifiers=[],
install_requires=requirements
)
|
[
"janne.enberg@lietu.net"
] |
janne.enberg@lietu.net
|
17835928e3b930371bb7d6ad88507962c20101f0
|
821687f9f0f523a1deea879f1bc4b866b489857f
|
/interaction3/cli.py
|
ff57e09ccd1f2aa6fa7988f2197101e67f277915
|
[
"MIT"
] |
permissive
|
bdshieh/interaction3
|
1426254e1153ad75fde828e7fee0905ced0a2566
|
b44c390045cf3b594125e90d2f2f4f617bc2433b
|
refs/heads/master
| 2021-05-09T13:00:49.681525
| 2020-07-08T14:41:09
| 2020-07-08T14:41:09
| 119,022,280
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,408
|
py
|
## interaction3 / cli.py
import argparse
import interaction3.bem.scripts
import interaction3.mfield.scripts
bem_scripts = {}
bem_scripts['t-crosstalk'] = interaction3.bem.scripts.simulate_transmit_crosstalk
bem_scripts['r-crosstalk'] = interaction3.bem.scripts.simulate_receive_crosstalk
bem_scripts['build-orders-db'] = interaction3.bem.scripts.build_orders_database
bem_scripts['build-translations-db'] = interaction3.bem.scripts.build_translations_database
mfield_scripts = {}
mfield_scripts['t-beamplot'] = interaction3.mfield.scripts.simulate_transmit_beamplot
mfield_scripts['tr-beamplot'] = interaction3.mfield.scripts.simulate_transmit_receive_beamplot
# define master parser
parser = argparse.ArgumentParser()
# define subparsers
subparsers = parser.add_subparsers()
bem_parser = subparsers.add_parser('bem')
mfield_parser = subparsers.add_parser('mfield')
array_parser = subparsers.add_parser('array')
# define bem subparser arguments
bem_parser.add_argument('script_name')
bem_parser.set_defaults(lookup=bem_scripts)
# define mfield subparser arguments
mfield_parser.add_argument('script_name')
mfield_parser.set_defaults(lookup=mfield_scripts)
def main():
args, unknown_args = parser.parse_known_args()
args.lookup[args.script_name].main(unknown_args)
# print(args)
# print(unknown_args)
# script_name = args.pop('script_name')
# lookup = args.pop('lookup')
|
[
"bsauce@gmail.com"
] |
bsauce@gmail.com
|
8c2a9ca981026afbb3ed01666198c967722c8fd6
|
7b76e80f2057d78a721373e8818e153eecebe8f0
|
/Examples/ev1.py
|
7d661238bcdd22c777dc6495cae6820446b961cb
|
[] |
no_license
|
dev-area/Python
|
c744cf6eb416a74a70ad55d2bcfa8a6166adc45d
|
1421a1f154fe314453d2da8b0fafae79aa5086a6
|
refs/heads/master
| 2023-02-01T10:37:47.796198
| 2020-10-15T19:33:49
| 2020-10-15T19:33:49
| 86,337,870
| 35
| 44
| null | 2023-01-12T09:02:59
| 2017-03-27T13:20:49
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,168
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Sun Jan 18 01:08:50 2015
@author: liran
"""
import numpy as np
import matplotlib.pyplot as plt
N = 5
menMeans = (20, 35, 30, 35, 27)
menStd = (2, 3, 4, 1, 2)
ind = np.arange(N) # the x locations for the groups
width = 0.35 # the width of the bars
fig, ax = plt.subplots()
rects1 = ax.bar(ind, menMeans, width, color='r', yerr=menStd)
womenMeans = (25, 32, 34, 20, 25)
womenStd = (3, 5, 2, 3, 3)
rects2 = ax.bar(ind+width, womenMeans, width, color='y', yerr=womenStd)
# add some text for labels, title and axes ticks
ax.set_ylabel('Scores')
ax.set_title('Scores by group and gender')
ax.set_xticks(ind+width)
ax.set_xticklabels( ('G1', 'G2', 'G3', 'G4', 'G5') )
ax.legend( (rects1[0], rects2[0]), ('Men', 'Women') )
def autolabel(rects):
# attach some text labels
for rect in rects:
height = rect.get_height()
ax.text(rect.get_x()+rect.get_width()/2., 1.05*height, '%d'%int(height),
ha='center', va='bottom')
autolabel(rects1)
autolabel(rects2)
def onpick(event):
legline = event.artist
print legline
fig.canvas.mpl_connect('pick_event', onpick)
plt.show()
|
[
"liranbh@gmail.com"
] |
liranbh@gmail.com
|
f99c455fe294a2c29567b235098b104ac61644f8
|
1f51c4e89a71ea3fcc2cc921613aacc19e078b69
|
/12_Intermediate Data Visualization with Seaborn/02_Customizing Seaborn Plots/02_Comparing styles.py
|
b0a035357655de5933a5af2ad05b2de639a0f090
|
[
"MIT"
] |
permissive
|
CodeHemP/CAREER-TRACK-Data-Scientist-with-Python
|
871bafbd21c4e754beba31505965572dd8457adc
|
13ebb10cf9083343056d5b782957241de1d595f9
|
refs/heads/main
| 2023-03-26T08:43:37.054410
| 2021-03-22T15:08:12
| 2021-03-22T15:08:12
| 471,015,287
| 1
| 0
|
MIT
| 2022-03-17T13:52:32
| 2022-03-17T13:52:31
| null |
UTF-8
|
Python
| false
| false
| 643
|
py
|
'''
02 - Comparing styles
Seaborn supports setting different styles that can control the aesthetics of
the final plot. In this exercise, you will plot the same data in two different
styles in order to see how the styles change the output.
Instructions 1/2
- Create a distplot() of the fmr_2 column in df using a 'dark' style. Use plt.clf()
to clear the figure.
'''
sns.set_style('dark')
sns.distplot(df['fmr_2'])
plt.show()
plt.clf()
'''
Instructions 2/2
- Create the same distplot() of fmr_2 using a 'whitegrid' style. Clear the plot after
showing it.
'''
sns.set_style('whitegrid')
sns.distplot(df['fmr_2'])
plt.show()
plt.clf()
|
[
"ifaizymohd@gmail.com"
] |
ifaizymohd@gmail.com
|
813c427562de14035661bccacb522817f608ea31
|
d247a30f42a26476f8005b0f963880df6ca568b9
|
/4.py
|
7458a177cb3988d4c433fa5e94c8dbe9b8e228d1
|
[] |
no_license
|
hurtadojara/AirBnB_clone_v2
|
824689cf440a1717178c6562e06562dc55d1fa69
|
9ca6fd3e61bcdb38bb4e3d2bedbc5026e62c2534
|
refs/heads/master
| 2023-02-18T20:58:49.166959
| 2021-01-21T04:46:02
| 2021-01-21T04:46:02
| 321,381,842
| 0
| 0
| null | 2020-12-16T00:38:40
| 2020-12-14T14:57:40
|
HTML
|
UTF-8
|
Python
| false
| false
| 2,626
|
py
|
#!/usr/bin/python3
"""
Fabric script that distributes an archive to web servers
"""
from fabric.api import *
from fabric.operations import put, run, sudo
import os
from fabric.api import run, local, sudo, env
from datetime import datetime
dt = datetime.now()
env.hosts = ['35.237.118.171', '35.237.236.118']
env.user = 'ubuntu'
def do_pack():
""" Packs web_static files into .tgz file
"""
file_name = 'versions/web_static_{}{}{}{}{}{}.tgz'.format(
dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second)
local('mkdir -p versions')
command = local("tar -cvzf " + file_name + " ./web_static/")
if command == 0:
return file_name
return None
def do_deploy(archive_path):
""" deploy an archive from the archive_path
"""
if os.path.exists(archive_path) is False:
return False
file_name = os.path.splitext(os.path.split(archive_path)[1])[0]
target = '/data/web_static/releases/' + file_name
path = archive_path.split('/')[1]
try:
"""put("{}/tmp/".format(archive_path))
run('sudo mkdir -p {}'.format(target))
run('sudo tar -xzf /tmp/{} -C {}/'.format(path, target))
run('sudo rm /tmp/{}'.format(path))
run('sudo mv {}/web_static/* {}/'.format(target, target))
run('sudo rm -rf {}/web_static'.format(target))
run('sudo rm -rf /data/web_static/current')
run('sudo ln -s {}/ /data/web_static/current'.format(target))"""
put(archive_path, "/tmp/")
run('sudo mkdir -p ' + target)
run('sudo tar -xzf /tmp/' + path + ' -C ' + target + '/')
run('sudo rm /tmp/' + path)
run('sudo mv ' + target + '/web_static/* ' + target + '/')
run('sudo rm -rf ' + target + '/web_static')
run('sudo rm -rf /data/web_static/current')
run('sudo ln -s ' + target + '/ /data/web_static/current')
return True
except:
return False
def deploy():
""" make and ship static
"""
path = do_pack()
if path is None:
return False
return do_deploy(path)
def do_clean(number=0):
""" Deletes out-of-date archives. Cleans old archives
"""
number = int(number)
with lcd('versions'):
if number == 0 or number == 1:
local('ls -t | tail -n +2 | xargs rm -rfv')
else:
local('ls -t | tail -n +{} | xargs rm -rfv'.format(number + 1))
with cd('/data/web_static/releases/'):
if number == 0 or number == 1:
run('ls -t | tail -n +2 | xargs rm -rfv')
else:
run('ls -t | tail -n +{} | xargs rm -rfv'.format(number + 1))
|
[
"andreshurtadojaramillo@gmail.com"
] |
andreshurtadojaramillo@gmail.com
|
c75befccf0a71431a43b7b0548261388ce48988d
|
09e57dd1374713f06b70d7b37a580130d9bbab0d
|
/benchmark/startQiskit1922.py
|
41b4d478181709e482e2313f7435b41cda296cb4
|
[
"BSD-3-Clause"
] |
permissive
|
UCLA-SEAL/QDiff
|
ad53650034897abb5941e74539e3aee8edb600ab
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
refs/heads/main
| 2023-08-05T04:52:24.961998
| 2021-09-19T02:56:16
| 2021-09-19T02:56:16
| 405,159,939
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,626
|
py
|
# qubit number=5
# total number=63
import cirq
import qiskit
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2,floor, sqrt, pi
import numpy as np
import networkx as nx
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f^\pm
# NOTE: use U1 gate (P gate) with \lambda = 180 ==> CZ gate
# or multi_control_Z_gate (issue #127)
controls = QuantumRegister(n, "ofc")
oracle = QuantumCircuit(controls, name="Zf")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.h(controls[n])
if n >= 2:
oracle.mcu1(pi, controls[1:], controls[0])
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.h(input_qubit[0]) # number=3
prog.h(input_qubit[1]) # number=4
prog.h(input_qubit[2]) # number=5
prog.h(input_qubit[3]) # number=6
prog.h(input_qubit[3]) # number=59
prog.h(input_qubit[4]) # number=21
prog.h(input_qubit[0]) # number=43
prog.cz(input_qubit[4],input_qubit[0]) # number=44
prog.h(input_qubit[0]) # number=45
prog.h(input_qubit[0]) # number=56
prog.cz(input_qubit[4],input_qubit[0]) # number=57
prog.h(input_qubit[0]) # number=58
prog.cx(input_qubit[4],input_qubit[0]) # number=60
prog.z(input_qubit[4]) # number=61
prog.cx(input_qubit[4],input_qubit[0]) # number=62
prog.cx(input_qubit[4],input_qubit[0]) # number=48
prog.h(input_qubit[0]) # number=37
prog.cz(input_qubit[4],input_qubit[0]) # number=38
prog.h(input_qubit[0]) # number=39
Zf = build_oracle(n, f)
repeat = floor(sqrt(2 ** n) * pi / 4)
for i in range(repeat):
prog.append(Zf.to_gate(), [input_qubit[i] for i in range(n)])
prog.h(input_qubit[0]) # number=1
prog.rx(-1.0430087609918113,input_qubit[4]) # number=36
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=7
prog.h(input_qubit[3]) # number=8
prog.cx(input_qubit[1],input_qubit[0]) # number=40
prog.cx(input_qubit[1],input_qubit[0]) # number=52
prog.x(input_qubit[0]) # number=53
prog.cx(input_qubit[1],input_qubit[0]) # number=54
prog.h(input_qubit[0]) # number=49
prog.cz(input_qubit[1],input_qubit[0]) # number=50
prog.h(input_qubit[0]) # number=51
prog.x(input_qubit[1]) # number=10
prog.rx(-0.06597344572538572,input_qubit[3]) # number=27
prog.cx(input_qubit[0],input_qubit[2]) # number=22
prog.x(input_qubit[2]) # number=23
prog.h(input_qubit[2]) # number=28
prog.cz(input_qubit[0],input_qubit[2]) # number=29
prog.h(input_qubit[2]) # number=30
prog.x(input_qubit[3]) # number=12
if n>=2:
prog.mcu1(pi,input_qubit[1:],input_qubit[0])
prog.x(input_qubit[0]) # number=13
prog.x(input_qubit[1]) # number=14
prog.x(input_qubit[2]) # number=15
prog.x(input_qubit[3]) # number=16
prog.h(input_qubit[4]) # number=35
prog.h(input_qubit[0]) # number=17
prog.rx(2.4912829742967055,input_qubit[2]) # number=26
prog.h(input_qubit[1]) # number=18
prog.h(input_qubit[2]) # number=19
prog.h(input_qubit[2]) # number=55
prog.h(input_qubit[2]) # number=25
prog.h(input_qubit[3]) # number=20
# circuit end
for i in range(n):
prog.measure(input_qubit[i], classical[i])
return prog
if __name__ == '__main__':
key = "00000"
f = lambda rep: str(int(rep == key))
prog = make_circuit(5,f)
backend = BasicAer.get_backend('qasm_simulator')
sample_shot =7924
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit1922.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.depth(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
|
[
"wangjiyuan123@yeah.net"
] |
wangjiyuan123@yeah.net
|
0e8548ada59f62ec926c7d0af635680d2de0bb83
|
3227285b8463b7d7ff7588f38829184d1d0d96cd
|
/icare/core/endpoints.py
|
589e98bd16972b1638de0a127302d783b1bbe412
|
[] |
no_license
|
IpoLa/icare
|
40592f657ea44340ffe2e5a71d48860d8c9692ad
|
d3d894f88290c6afcabfde95ae49647254c461d0
|
refs/heads/master
| 2023-04-02T16:41:39.803276
| 2021-04-10T12:25:01
| 2021-04-10T12:25:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,631
|
py
|
"""API Endpoints For Tasks, Lists, and Folders"""
import logging
import sys
import time
import datetime
from rest_framework.generics import ListAPIView, RetrieveAPIView
from rest_framework.permissions import IsAuthenticated
from rest_framework.views import APIView
from rest_framework.response import Response
from . import utils as u, payloads as p
from .models import List, Folder, Task, Attachment
from .serializers import (
ListSerializer,
FolderSerializer,
FolderDetailSerializer,
NewRequestSerializer,
AttachmentSerializer,
UpdateRequestSerializer,
)
logger = logging.getLogger(__name__)
# TODO Remove this after making sure it's not needed
class Lists(ListAPIView):
permission_classes = (IsAuthenticated,)
serializer_class = ListSerializer
queryset = List.objects.all()
class Folders(ListAPIView):
permission_classes = (IsAuthenticated,)
serializer_class = FolderSerializer
queryset = Folder.objects.filter(is_active=True)
class FolderDetail(RetrieveAPIView):
permission_classes = (IsAuthenticated,)
serializer_class = FolderDetailSerializer
queryset = Folder.objects.filter(is_active=True)
class ICareRequest(APIView):
permission_classes = (IsAuthenticated,)
def post(self, request, *args, **kwargs):
"""New Request"""
# data validation
serializer = NewRequestSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
# data extraction
vd = serializer.validated_data
name = vd.get("name")
description = vd.get("description")
due_date = vd.get("due_date")
_list = vd.get("list")
# logic
clickup_description = f"{description}\n\n user's email: {request.user.email}\n"
if due_date:
date_to_time = time.mktime(
datetime.datetime.strptime(str(due_date), "%Y-%m-%d").timetuple()
)
due_date = int(date_to_time * 1000)
remote_task = u.create_task(
_list.clickup_id,
p.create_task_payload(name, clickup_description, due_date=due_date,),
)
if remote_task and remote_task.get("err"):
logger.error(str(remote_task), exc_info=sys.exc_info())
return Response(
{"detail": "Request creation failed. Try again later"}, status=400
)
Task.objects.create(
clickup_id=remote_task.get("id"),
created_json=remote_task,
name=remote_task.get("name"),
description=description,
_list=_list,
is_active=True,
user=request.user,
status=remote_task.get("status").get("status"),
)
return Response({"detail": "Request created successfully!"})
def put(self, request, *args, **kwargs):
"""Update Request"""
# data validation
serializer = UpdateRequestSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
# data extraction
vd = serializer.validated_data
name = vd.get("name")
description = vd.get("description")
due_date = vd.get("due_date")
task = vd.get("task")
# logic
clickup_description = f"{description}\n\n user's email: {request.user.email}\n"
if due_date:
date_to_time = time.mktime(
datetime.datetime.strptime(str(due_date), "%Y-%m-%d").timetuple()
)
due_date = int(date_to_time * 1000)
remote_task = u.update_task(
task.clickup_id,
p.create_task_payload(name, clickup_description, due_date=due_date,),
)
if remote_task and remote_task.get("err"):
logger.error(str(remote_task), exc_info=sys.exc_info())
return Response(
{"detail": "Updating Request failed. Try again later"}, status=400
)
# update task locally
task.name = name
task.description = description
task.updated_json = remote_task
task.save()
return Response({"detail": "Request updated successfully!"})
class RequestAttachment(APIView):
def post(self, request, *args, **kwargs):
serializer = AttachmentSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
vd = serializer.validated_data
file = vd.get("attachment")
task = vd.get("task")
created_json = u.attachment(task.clickup_id, file)
Attachment.objects.create(task=task, created_json=created_json)
return Response(created_json)
|
[
"ahmed@shahwan.me"
] |
ahmed@shahwan.me
|
b7438ab13396c7815765ea2582297ccd1491dc3e
|
b1080fd7844e104522e73ca1a6790f1a578df967
|
/spider/html_parser.py
|
8bae02a0878472a5973c36a2a6129f83af0ec2c8
|
[] |
no_license
|
sundawei2018/simple-web-crawler
|
5b8286fa39a4451608d16d151a2220c10d838aa4
|
e064adf97d8ecf43af3018bfc75c485e48c5278b
|
refs/heads/master
| 2021-01-22T11:33:05.356977
| 2017-09-27T19:10:45
| 2017-09-27T19:10:45
| 92,710,904
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,600
|
py
|
'''
Created on May 29, 2017
@author: Dave
'''
from bs4 import BeautifulSoup
import re
import urlparse
class HtmlParser(object):
def _get_new_urls(self, page_url, soup):
new_urls = set()
# // /view/123.htm <a href="/wiki/Measuring_programming_language_popularity" title="Measuring programming language popularity">widely used</a>
# r"/view/\d+\.htm"
links = soup.find_all('a', href = re.compile(r"/wiki/.*"))
for link in links:
new_url = link['href']
new_full_url = urlparse.urljoin(page_url, new_url)
new_urls.add(new_full_url)
return new_urls
def _get_new_data(self, page_url, soup):
res_data = {}
# <h1 id="firstHeading" class="firstHeading" lang="en">Python (programming language)</h1>
title_node = soup.find('h1', class_ = "firstHeading")
res_data['title'] = title_node.get_text()
# <div id="mw-content-text" lang="en" dir="ltr" class="mw-content-ltr"><table class="infobox vevent" style="width:22em">
summary_node = soup.find('div', class_ = "mw-content-ltr")
res_data['summary'] = summary_node.get_text()
res_data['url'] = page_url
return res_data
def parse(self, page_url, html_cont):
if page_url is None or html_cont is None:
return
soup = BeautifulSoup(html_cont, 'html.parser', from_encoding = 'utf-8')
new_urls = self._get_new_urls(page_url,soup)
new_data = self._get_new_data(page_url,soup)
return new_urls, new_data
|
[
"you@example.com"
] |
you@example.com
|
95bb0f4a76ab7b372f792b8f38730e4f9b750a89
|
96e77a734bf865f998e719fafcaabd120b93759c
|
/Python/Django/upload_proj/upload_proj/urls.py
|
590b4c8dd4fd30d3bad99fd71d00afaef17605cb
|
[] |
no_license
|
imronha/codingdojoprojects
|
3346feca1c03f625270eeded2cfb6a9b0249ab56
|
1b40688372844eca3fd02401f397c4ba4b334ce7
|
refs/heads/master
| 2020-04-05T12:59:14.237411
| 2017-11-06T08:27:06
| 2017-11-06T08:27:06
| 94,944,785
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,067
|
py
|
"""upload_proj URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
#
# urlpatterns = [
# url(r'^admin/', admin.site.urls),
# ]
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^', include('apps.upload_app.urls')),
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
[
"imronha@gmail.com"
] |
imronha@gmail.com
|
00217f082f757a0fe2f66ce46a6667c4c27cdde2
|
2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02
|
/PyTorch/contrib/cv/classification/VOLO/timm/models/layers/norm_act.py
|
e4cfadceba41b78d282416d9b4e949fded34f124
|
[
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference",
"GPL-1.0-or-later",
"LicenseRef-scancode-proprietary-license"
] |
permissive
|
Ascend/ModelZoo-PyTorch
|
4c89414b9e2582cef9926d4670108a090c839d2d
|
92acc188d3a0f634de58463b6676e70df83ef808
|
refs/heads/master
| 2023-07-19T12:40:00.512853
| 2023-07-17T02:48:18
| 2023-07-17T02:48:18
| 483,502,469
| 23
| 6
|
Apache-2.0
| 2022-10-15T09:29:12
| 2022-04-20T04:11:18
|
Python
|
UTF-8
|
Python
| false
| false
| 5,627
|
py
|
"""
BSD 3-Clause License
Copyright (c) Soumith Chintala 2016,
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Copyright 2020 Huawei Technologies Co., Ltd
Licensed under the BSD 3-Clause License (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://spdx.org/licenses/BSD-3-Clause.html
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
""" Normalization + Activation Layers
"""
import torch
from torch import nn as nn
from torch.nn import functional as F
from .create_act import get_act_layer
class BatchNormAct2d(nn.BatchNorm2d):
"""BatchNorm + Activation
This module performs BatchNorm + Activation in a manner that will remain backwards
compatible with weights trained with separate bn, act. This is why we inherit from BN
instead of composing it as a .bn member.
"""
def __init__(self, num_features, eps=1e-5, momentum=0.1, affine=True, track_running_stats=True,
apply_act=True, act_layer=nn.ReLU, inplace=True, drop_block=None):
super(BatchNormAct2d, self).__init__(
num_features, eps=eps, momentum=momentum, affine=affine, track_running_stats=track_running_stats)
if isinstance(act_layer, str):
act_layer = get_act_layer(act_layer)
if act_layer is not None and apply_act:
act_args = dict(inplace=True) if inplace else {}
self.act = act_layer(**act_args)
else:
self.act = nn.Identity()
def _forward_jit(self, x):
""" A cut & paste of the contents of the PyTorch BatchNorm2d forward function
"""
# exponential_average_factor is self.momentum set to
# (when it is available) only so that if gets updated
# in ONNX graph when this node is exported to ONNX.
if self.momentum is None:
exponential_average_factor = 0.0
else:
exponential_average_factor = self.momentum
if self.training and self.track_running_stats:
# TODO: if statement only here to tell the jit to skip emitting this when it is None
if self.num_batches_tracked is not None:
self.num_batches_tracked += 1
if self.momentum is None: # use cumulative moving average
exponential_average_factor = 1.0 / float(self.num_batches_tracked)
else: # use exponential moving average
exponential_average_factor = self.momentum
x = F.batch_norm(
x, self.running_mean, self.running_var, self.weight, self.bias,
self.training or not self.track_running_stats,
exponential_average_factor, self.eps)
return x
@torch.jit.ignore
def _forward_python(self, x):
return super(BatchNormAct2d, self).forward(x)
def forward(self, x):
# FIXME cannot call parent forward() and maintain jit.script compatibility?
if torch.jit.is_scripting():
x = self._forward_jit(x)
else:
x = self._forward_python(x)
x = self.act(x)
return x
class GroupNormAct(nn.GroupNorm):
# NOTE num_channel and num_groups order flipped for easier layer swaps / binding of fixed args
def __init__(self, num_channels, num_groups, eps=1e-5, affine=True,
apply_act=True, act_layer=nn.ReLU, inplace=True, drop_block=None):
super(GroupNormAct, self).__init__(num_groups, num_channels, eps=eps, affine=affine)
if isinstance(act_layer, str):
act_layer = get_act_layer(act_layer)
if act_layer is not None and apply_act:
act_args = dict(inplace=True) if inplace else {}
self.act = act_layer(**act_args)
else:
self.act = nn.Identity()
def forward(self, x):
x = F.group_norm(x, self.num_groups, self.weight, self.bias, self.eps)
x = self.act(x)
return x
|
[
"wangjiangben@huawei.com"
] |
wangjiangben@huawei.com
|
a9b9dc8b26570b265d6c7cfafb8279ee6667ae18
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03786/s330433957.py
|
3eaecf5a185424a9d32de2dcb05108a486f348d9
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 183
|
py
|
N = int(input())
A = list(map(int, input().split()))
A.sort()
ans = 1
for i in range(1, N):
if A[i] > A[i - 1] * 2:
ans = 1
else:
ans += 1
A[i] += A[i - 1]
print(ans)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
af4faf665997165566395bc98e79bd4d2b4a92d5
|
caaf04a58abe96563df1dbc88abe8594047fded9
|
/easy/problem_897_increasing_order_search_tree.py
|
962c3c353990ab9ebdee1a80a0fb89acac8892aa
|
[] |
no_license
|
EricMontague/Leetcode-Solutions
|
f1b09781b0afd60c79d55f65fe0552c80a928ac7
|
fd1e40ace51fe2a3cc6dadb3fe5872c7fa149188
|
refs/heads/master
| 2021-01-09T20:00:15.580735
| 2020-12-14T22:24:24
| 2020-12-14T22:24:24
| 242,441,085
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,397
|
py
|
"""This file contains my solutions for Leetcode problem 897:
Increasing Order Search Tree.
"""
# Recursive Solution
# time complexity: O(n), where 'n' is the number of nodes
# space complexity: O(h), where 'h' is the height of the tree
class Solution:
def increasingBST(self, root: TreeNode) -> TreeNode:
temp = TreeNode(None)
self.last_node = temp
self.reassign_pointers(root)
return temp.right
def reassign_pointers(self, current_node):
if current_node is not None:
self.reassign_pointers(current_node.left)
current_node.left = None
self.last_node.right = current_node
self.last_node = current_node
self.reassign_pointers(current_node.right)
# Iterative solution
# time complexity: O(n), where 'n' is the number of nodes
# space complexity: O(h), where 'h' is the height of the tree
class Solution:
def increasingBST(self, root: TreeNode) -> TreeNode:
temp = TreeNode(None)
last_node = temp
stack = []
while stack or root:
if root is not None:
stack.append(root)
root = root.left
else:
node = stack.pop()
node.left = None
last_node.right = node
last_node = node
root = node.right
return temp.right
|
[
"eric.g.montague@gmail.com"
] |
eric.g.montague@gmail.com
|
171b0832805242d9b1cf780276678c867032d059
|
7bededcada9271d92f34da6dae7088f3faf61c02
|
/pypureclient/flasharray/FA_2_20/models/volume_group.py
|
57b238464e1886e5047093e4b1a5baea380fdb03
|
[
"BSD-2-Clause"
] |
permissive
|
PureStorage-OpenConnect/py-pure-client
|
a5348c6a153f8c809d6e3cf734d95d6946c5f659
|
7e3c3ec1d639fb004627e94d3d63a6fdc141ae1e
|
refs/heads/master
| 2023-09-04T10:59:03.009972
| 2023-08-25T07:40:41
| 2023-08-25T07:40:41
| 160,391,444
| 18
| 29
|
BSD-2-Clause
| 2023-09-08T09:08:30
| 2018-12-04T17:02:51
|
Python
|
UTF-8
|
Python
| false
| false
| 6,060
|
py
|
# coding: utf-8
"""
FlashArray REST API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 2.20
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flasharray.FA_2_20 import models
class VolumeGroup(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'id': 'str',
'name': 'str',
'destroyed': 'bool',
'qos': 'Qos',
'priority_adjustment': 'PriorityAdjustment',
'space': 'Space',
'time_remaining': 'int',
'volume_count': 'int',
'pod': 'Reference'
}
attribute_map = {
'id': 'id',
'name': 'name',
'destroyed': 'destroyed',
'qos': 'qos',
'priority_adjustment': 'priority_adjustment',
'space': 'space',
'time_remaining': 'time_remaining',
'volume_count': 'volume_count',
'pod': 'pod'
}
required_args = {
}
def __init__(
self,
id=None, # type: str
name=None, # type: str
destroyed=None, # type: bool
qos=None, # type: models.Qos
priority_adjustment=None, # type: models.PriorityAdjustment
space=None, # type: models.Space
time_remaining=None, # type: int
volume_count=None, # type: int
pod=None, # type: models.Reference
):
"""
Keyword args:
id (str): A globally unique, system-generated ID. The ID cannot be modified and cannot refer to another resource.
name (str): A user-specified name. The name must be locally unique and can be changed.
destroyed (bool): Returns a value of `true` if the volume group has been destroyed and is pending eradication. Before the `time_remaining` period has elapsed, the destroyed volume group can be recovered by setting `destroyed=false`. After the `time_remaining` period has elapsed, the volume group is permanently eradicated and cannot be recovered.
qos (Qos)
priority_adjustment (PriorityAdjustment)
space (Space)
time_remaining (int): The amount of time left until the destroyed volume group is permanently eradicated, measured in milliseconds.
volume_count (int): The number of volumes in the volume group.
pod (Reference): A reference to the pod.
"""
if id is not None:
self.id = id
if name is not None:
self.name = name
if destroyed is not None:
self.destroyed = destroyed
if qos is not None:
self.qos = qos
if priority_adjustment is not None:
self.priority_adjustment = priority_adjustment
if space is not None:
self.space = space
if time_remaining is not None:
self.time_remaining = time_remaining
if volume_count is not None:
self.volume_count = volume_count
if pod is not None:
self.pod = pod
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `VolumeGroup`".format(key))
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
raise AttributeError
else:
return value
def __getitem__(self, key):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `VolumeGroup`".format(key))
return object.__getattribute__(self, key)
def __setitem__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `VolumeGroup`".format(key))
object.__setattr__(self, key, value)
def __delitem__(self, key):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `VolumeGroup`".format(key))
object.__delattr__(self, key)
def keys(self):
return self.attribute_map.keys()
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(VolumeGroup, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, VolumeGroup):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"noreply@github.com"
] |
PureStorage-OpenConnect.noreply@github.com
|
182c0906a91a5e9b134f3f40fa970298ee44e88b
|
4dc927b64f02d305bc1c49ca8528258fd1f0ee0f
|
/mergify_engine/tests/functional/actions/test_close.py
|
e7c2d2ba2ea409770ded0ed0380b90fe331932be
|
[
"Apache-2.0"
] |
permissive
|
okurz/mergify-engine
|
cccb1198fe5a2d46602f96db92fcc6a737ce5991
|
377de815a58408f97ddb8c917507a0a997276e5f
|
refs/heads/master
| 2022-11-09T06:35:50.800478
| 2020-06-25T13:18:40
| 2020-06-25T16:25:21
| 275,147,323
| 0
| 0
|
Apache-2.0
| 2020-06-26T12:09:30
| 2020-06-26T12:09:29
| null |
UTF-8
|
Python
| false
| false
| 3,330
|
py
|
# -*- encoding: utf-8 -*-
#
# Copyright © 2020 Mergify SAS
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import yaml
from mergify_engine import context
from mergify_engine.tests.functional import base
class TestCloseAction(base.FunctionalTestBase):
def test_close(self):
rules = {
"pull_request_rules": [
{
"name": "close",
"conditions": [f"base={self.master_branch_name}"],
"actions": {"close": {"message": "WTF?"}},
}
]
}
self.setup_repo(yaml.dump(rules))
p, _ = self.create_pr()
p.update()
self.assertEqual("closed", p.state)
self.assertEqual("WTF?", list(p.get_issue_comments())[-1].body)
def test_close_template(self):
rules = {
"pull_request_rules": [
{
"name": "close",
"conditions": [f"base={self.master_branch_name}"],
"actions": {"close": {"message": "Thank you {{author}}"}},
}
]
}
self.setup_repo(yaml.dump(rules))
p, _ = self.create_pr()
p.update()
self.assertEqual("closed", p.state)
comments = list(p.get_issue_comments())
self.assertEqual(f"Thank you {self.u_fork.login}", comments[-1].body)
def _test_close_template_error(self, msg):
rules = {
"pull_request_rules": [
{
"name": "close",
"conditions": [f"base={self.master_branch_name}"],
"actions": {"close": {"message": msg}},
}
]
}
self.setup_repo(yaml.dump(rules))
p, _ = self.create_pr()
p.update()
ctxt = context.Context(self.cli_integration, p.raw_data, {})
assert len(ctxt.pull_engine_check_runs) == 1
check = ctxt.pull_engine_check_runs[0]
assert "failure" == check["conclusion"]
assert "The Mergify configuration is invalid" == check["output"]["title"]
return check
def test_close_template_syntax_error(self):
check = self._test_close_template_error(msg="Thank you {{",)
assert (
"""Template syntax error @ data['pull_request_rules'][0]['actions']['close']['message'][line 1]
```
unexpected 'end of template'
```"""
== check["output"]["summary"]
)
def test_close_template_attribute_error(self):
check = self._test_close_template_error(msg="Thank you {{hello}}",)
assert (
"""Template syntax error for dictionary value @ data['pull_request_rules'][0]['actions']['close']['message']
```
Unknown pull request attribute: hello
```"""
== check["output"]["summary"]
)
|
[
"37929162+mergify[bot]@users.noreply.github.com"
] |
37929162+mergify[bot]@users.noreply.github.com
|
c0ff330448d02c9c7b04771303a6c644b2223736
|
528def9844f2ce13e6a358938b0b560945ab2248
|
/vc/migrations/0013_vcbindfilter_afi.py
|
5dcaa794a39a603e26ecd734e9250503bad8d48a
|
[
"BSD-3-Clause"
] |
permissive
|
skripkar/noc
|
055afbd42ab4c447d05d2cde0a822916f9e0844e
|
df193b99e478fe39157c8d27ff4098262d9cb734
|
refs/heads/master
| 2020-04-10T12:53:09.602779
| 2018-12-08T07:50:30
| 2018-12-08T07:50:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 349
|
py
|
# encoding: utf-8
import datetime
from south.db import db
from django.db import models
class Migration:
def forwards(self):
db.add_column("vc_vcbindfilter","afi",models.CharField("Address Family",max_length=1,choices=[("4","IPv4"),("6","IPv6")],default="4"))
def backwards(self):
db.delete_column("vc_vcbindfilter","afi")
|
[
"dvolodin7@gmail.com"
] |
dvolodin7@gmail.com
|
ca4dabcf067a7aa55aa9decd5fb8b930e9b877b2
|
03708e4eb8cfd79b45956f930bb50c1f54383bef
|
/lbdrabbit/example/cf.py
|
8729d0ac73093d564912f707c0fffd6d0a8c84ba
|
[
"MIT"
] |
permissive
|
MacHu-GWU/lbdrabbit-project
|
3d55e194328391f67c2c12c40b7762c18a2647ac
|
e808f116bc85d95f3d545e085bede56df41b6d92
|
refs/heads/master
| 2020-08-02T02:21:46.634030
| 2019-10-07T11:39:57
| 2019-10-07T11:39:57
| 211,206,513
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,094
|
py
|
# -*- coding: utf-8 -*-
import troposphere_mate as tm
from troposphere_mate import apigateway, awslambda, iam
from troposphere_mate.canned.iam import AWSManagedPolicyArn, AWSServiceName, create_assume_role_policy_document
from .app_config_init import app_config
template = tm.Template()
param_env_name = tm.Parameter(
"EnvironmentName",
Type="String",
)
template.add_parameter(param_env_name)
rest_api = apigateway.RestApi(
"RestApi",
template=template,
Name=tm.helper_fn_sub("{}", param_env_name),
EndpointConfiguration=apigateway.EndpointConfiguration(
Types=["REGIONAL", ]
)
)
lambda_code = awslambda.Code(
S3Bucket=app_config.LAMBDA_CODE_S3_BUCKET.get_value(),
S3Key=app_config.LAMBDA_CODE_S3_KEY.get_value(),
)
iam_role = iam.Role(
"IamRoleForLbdFunc",
template=template,
RoleName=tm.helper_fn_sub("{}-lbd-func", param_env_name),
AssumeRolePolicyDocument=create_assume_role_policy_document([
AWSServiceName.aws_Lambda
]),
ManagedPolicyArns=[
AWSManagedPolicyArn.awsLambdaBasicExecutionRole
]
)
|
[
"husanhe@gmail.com"
] |
husanhe@gmail.com
|
d6aaeb64cdcd1c25cf305328848856088316c5c6
|
0b279c246179bc6a76ad17f055ad1dce3402b045
|
/private_production/polarization/2016/crab_TT_NANOAODSIM.py
|
35cd0bc40abfe2728710280b82a9dd7f83514f77
|
[] |
no_license
|
UniMiBAnalyses/CMSSWGeneration
|
a55e6ad840e4f7f9fae6b46a4bb939a288492f10
|
a7acf1a780eeb30e14616fef90ccf389e4367668
|
refs/heads/master
| 2023-09-01T02:01:44.746469
| 2022-01-31T11:01:29
| 2022-01-31T11:01:29
| 212,852,677
| 0
| 2
| null | 2022-06-16T15:23:25
| 2019-10-04T15:57:27
|
Python
|
UTF-8
|
Python
| false
| false
| 840
|
py
|
from CRABClient.UserUtilities import config, getUsernameFromSiteDB
config = config()
config.General.requestName = 'VBS_SSWW_TT_NANOAODSIM'
config.General.workArea = 'crab_projects'
config.General.transferOutputs = True
config.General.transferLogs = False
config.JobType.pluginName = 'Analysis'
config.JobType.psetName = 'SMP-RunIISummer16NanoAODv5-00095_1_cfg.py'
config.JobType.numCores = 2
#config.JobType.maxMemoryMB = 6000
config.Data.inputDataset = '/Bulk/jixiao-VBS_SSWW_LT_MINIAODSIM-5f646ecd4e1c7a39ab0ed099ff55ceb9/USER'
config.Data.inputDBS = 'phys03'
config.Data.splitting = 'FileBased'
config.Data.unitsPerJob = 1
config.Data.outLFNDirBase = '/store/user/%s/pol2016' % (getUsernameFromSiteDB())
config.Data.publication = True
config.Data.outputDatasetTag = 'VBS_SSWW_TT_NANOAODSIM'
config.Site.storageSite = 'T2_CN_Beijing'
|
[
"jiexiao@pku.edu.cn"
] |
jiexiao@pku.edu.cn
|
34e1d19f762f7b38df5d8ddbe4a3c6f5668b4c97
|
762cbba14c80f4dd09fa6e5915e094825eef1cae
|
/Next Closest Time.py
|
b4d43d39560330f3dcdda1ade0c4185c1599eed3
|
[] |
no_license
|
arnabs542/Leetcode-18
|
1faff2564b4a5bb970308187a0b71553fd85a250
|
02d31ab3363c92e8fdde15100bf4a3cbcd43ecd0
|
refs/heads/master
| 2022-07-26T12:18:38.834287
| 2020-05-19T05:40:48
| 2020-05-19T05:40:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,477
|
py
|
class Solution(object):
# simulation solution
def nextClosestTime(self, time):
numset = set([int(n) for n in time if n != ":"])
time1 = 60 * int(time[:2]) + int(time[3:])
diff1 = float('Inf')
res1 = []
diff2 = float('-Inf')
res2 = []
for i in xrange(24):
hour = "0" + str(i) if i<10 else str(i)
if not all(int(m) in numset for m in hour):
continue
for j in xrange(60):
minute = "0" + str(j) if j<10 else str(j)
if not all(int(m) in numset for m in minute):
continue
time2 = i*60 + j - time1
if 0 < time2 < diff1:
diff1 = time2
res1 = [hour, minute]
elif time2 < 0 and -time2 > diff2:
diff2 = -time2
res2 = [hour, minute]
if res1 or res2:
return ":".join(res1) if res1 else ":".join(res2)
else:
return time
"""
:type time: str
:rtype: str
"""
# Given a time represented in the format "HH:MM", form the next closest time by reusing the current digits.
# There is no limit on how many times a digit can be reused.
# You may assume the given input string is always valid. For example, "01:34", "12:09" are all valid. "1:34", "12:9" are all invalid.
|
[
"noreply@github.com"
] |
arnabs542.noreply@github.com
|
942256d842444dcc83953a2e252eb27dfde6bb98
|
8749ef627476524710332113796203986f88698c
|
/migrations/versions/3409ef865171_.py
|
4481369a50a1eee247bd2230280223fde1fb86eb
|
[] |
no_license
|
JessicaFeng0926/easy_chinese
|
aff0276591c53e30568819425729117e6afd0fa4
|
7376612c904f9eaaea022e6b36711f2d4ba1e0f9
|
refs/heads/master
| 2020-07-07T13:06:05.180880
| 2020-04-04T09:29:01
| 2020-04-04T09:29:01
| 203,356,569
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,053
|
py
|
"""empty message
Revision ID: 3409ef865171
Revises: 662819aaef6f
Create Date: 2019-08-28 12:43:02.439715
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '3409ef865171'
down_revision = '662819aaef6f'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('makeuptime',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('teacher_id', sa.Integer(), nullable=True),
sa.Column('make_up_time', sa.DateTime(), nullable=True),
sa.Column('expire', sa.Boolean(), nullable=True),
sa.ForeignKeyConstraint(['teacher_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.add_column('specialrest', sa.Column('expire', sa.Boolean(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('specialrest', 'expire')
op.drop_table('makeuptime')
# ### end Alembic commands ###
|
[
"hasayake.hi@163.com"
] |
hasayake.hi@163.com
|
5293b36f206110d464f4ca838fa7203a29b71e61
|
29345337bf86edc938f3b5652702d551bfc3f11a
|
/python/src/main/python/pyalink/alink/tests/examples/operator/stream/test_bisecting_kmeans.py
|
e15839936daa9cca860d9de6d031c9707d1afc45
|
[
"Apache-2.0"
] |
permissive
|
vacaly/Alink
|
32b71ac4572ae3509d343e3d1ff31a4da2321b6d
|
edb543ee05260a1dd314b11384d918fa1622d9c1
|
refs/heads/master
| 2023-07-21T03:29:07.612507
| 2023-07-12T12:41:31
| 2023-07-12T12:41:31
| 283,079,072
| 0
| 0
|
Apache-2.0
| 2020-07-28T02:46:14
| 2020-07-28T02:46:13
| null |
UTF-8
|
Python
| false
| false
| 1,174
|
py
|
import unittest
from pyalink.alink import *
class TestPinjiu(unittest.TestCase):
def test_bisecting_kmeans_op(self):
import numpy as np
import pandas as pd
data = np.array([
[0, "0 0 0"],
[1, "0.1,0.1,0.1"],
[2, "0.2,0.2,0.2"],
[3, "9 9 9"],
[4, "9.1 9.1 9.1"],
[5, "9.2 9.2 9.2"]
])
df = pd.DataFrame({"id": data[:, 0], "vec": data[:, 1]})
inOp1 = BatchOperator.fromDataframe(df, schemaStr='id int, vec string')
inOp2 = StreamOperator.fromDataframe(df, schemaStr='id int, vec string')
kmeans = BisectingKMeansTrainBatchOp().setVectorCol("vec").setK(2)
predictBatch = BisectingKMeansPredictBatchOp().setPredictionCol("pred")
kmeans.linkFrom(inOp1)
predictBatch.linkFrom(kmeans, inOp1)
[model, predict] = collectToDataframes(kmeans, predictBatch)
print(model)
print(predict)
predictStream = BisectingKMeansPredictStreamOp(kmeans).setPredictionCol("pred")
predictStream.linkFrom(inOp2)
predictStream.print(refreshInterval=-1)
StreamOperator.execute()
|
[
"shaomeng.wang.w@gmail.com"
] |
shaomeng.wang.w@gmail.com
|
c62eb787de5a33b3e6dcf5db50bf08a5da118224
|
d9fd9c6329461235f140393f1e934362d0f645df
|
/Unidad 2/Ejercicios Clases/Ejercicio3.py
|
eee26e68253b66b262b7d35e0640a0a53952a22d
|
[
"MIT"
] |
permissive
|
angelxehg/utzac-python
|
e6b5ee988d1d76c549ab0fa49717eb042fa7d91f
|
fb88bcc661518bb35c08a102a67c20d0659f71db
|
refs/heads/main
| 2022-12-02T11:16:27.134741
| 2020-08-14T19:38:33
| 2020-08-14T19:38:33
| 265,944,612
| 0
| 0
|
MIT
| 2020-08-07T21:23:53
| 2020-05-21T20:25:24
|
Python
|
UTF-8
|
Python
| false
| false
| 637
|
py
|
class Numbers():
def __init__(self, num1, num2):
self.__num1 = num1
self.__num2 = num2
def added(self):
return self.__num1 + self.__num2
def subtracted(self):
return self.__num1 - self.__num2
def multiplied(self):
return self.__num1 * self.__num2
def divided(self):
return self.__num1 / self.__num2
numbs = Numbers(
int(input("Ingrese el primer número: ")),
int(input("Ingrese el segundo número: "))
)
print("Sumado:", numbs.added())
print("Restado:", numbs.subtracted())
print("Multiplicado:", numbs.multiplied())
print("Dividido:", numbs.divided())
|
[
"50889225+angelxehg@users.noreply.github.com"
] |
50889225+angelxehg@users.noreply.github.com
|
9a6cc23150389ebdceaa879d64298db70559424b
|
d93fe0484fc3b32c8fd9b33cc66cfd636a148ec4
|
/Argorithm/python/DP/LIS.py
|
69eb80e13fbcae5347af1b90aaabdee9354da92d
|
[] |
no_license
|
wattaihei/ProgrammingContest
|
0d34f42f60fa6693e04c933c978527ffaddceda7
|
c26de8d42790651aaee56df0956e0b206d1cceb4
|
refs/heads/master
| 2023-04-22T19:43:43.394907
| 2021-05-02T13:05:21
| 2021-05-02T13:05:21
| 264,400,706
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 656
|
py
|
# 蟻本p64より
# O(NlogN)で最長増加部分列のながさを出す
from bisect import bisect_left # 二部探索
N = int(input()) # 主列のながさ
A = list(map(int, input().split()))
INF = max(A) + 1 # どれよりも大きい数を用意(dpを単調増加にするため)
dp = [INF for _ in range(N)] # dp[i]=長さi+1であるような増加部分列における最終要素の最小値
for a in A:
k = bisect_left(dp, a) # すでに入っているものに対してできる限り長くなるような位置を探す
dp[k] = a # 更新
ans = N
for i, d in enumerate(dp):
if d == INF:
ans = i
break
print(ans)
|
[
"wattaihei.rapyuta@gmail.com"
] |
wattaihei.rapyuta@gmail.com
|
b476a93366227bd6b2a335a269a6708ff64d5eed
|
e1d53e786ad70a9ecfa22f7d38f640456c407f7c
|
/RecHitAnalysis/test/validationNtuple_cfg.py
|
99c768be73a431d732f1182e3833a0e050e88b38
|
[] |
no_license
|
hcwhwang/macro
|
feb01ed335e2d2fd3e1de85ca9c441d23c442320
|
261451e2606765cf2c52c7e848bf55b0f55bf21e
|
refs/heads/master
| 2021-01-10T11:37:03.967663
| 2016-01-27T22:39:48
| 2016-01-27T22:39:48
| 50,543,095
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,488
|
py
|
import FWCore.ParameterSet.Config as cms
process = cms.Process("RPCRecHitValidation")
### standard includes
process.load("FWCore.MessageService.MessageLogger_cfi")
process.load("Configuration.EventContent.EventContent_cff")
process.load("Configuration.StandardSequences.Services_cff")
process.load("Configuration.StandardSequences.GeometryDB_cff")
process.load("Configuration.StandardSequences.MagneticField_38T_cff")
process.load("Configuration.StandardSequences.RawToDigi_cff")
process.MessageLogger.cerr.FwkReport.reportEvery = 1000
### conditions
process.load("Configuration.StandardSequences.FrontierConditions_GlobalTag_cff")
#from Configuration.PyReleaseValidation.autoCond import autoCond
#process.GlobalTag.globaltag = autoCond['startup']
process.GlobalTag.globaltag = 'GR_R_74_V8::All'
process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) )
process.options = cms.untracked.PSet(
wantSummary = cms.untracked.bool(True)
)
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring(),
secondaryFileNames = cms.untracked.vstring()
)
process.TFileService = cms.Service("TFileService",
fileName = cms.string("rpcVal.root")
)
process.source.fileNames.extend([
'/store/data/Run2012D/SingleMu/RAW-RECO/ZMu-22Jan2013-v1/10000/02040732-DFA7-E211-8C20-E0CB4E29C4B7.root',
'/store/data/Run2012D/SingleMu/RAW-RECO/ZMu-22Jan2013-v1/10000/04009F1C-C8A7-E211-AA4D-E0CB4E1A11A7.root',
'/store/data/Run2012D/SingleMu/RAW-RECO/ZMu-22Jan2013-v1/10000/0857F9FF-BBA7-E211-8433-E0CB4E55363D.root',
'/store/data/Run2012D/SingleMu/RAW-RECO/ZMu-22Jan2013-v1/10000/0A02695F-90A7-E211-896E-001E4F3F3556.root',
'/store/data/Run2012D/SingleMu/RAW-RECO/ZMu-22Jan2013-v1/10000/0C330064-A8A7-E211-BC39-00259073E45E.root',
'/store/data/Run2012D/SingleMu/RAW-RECO/ZMu-22Jan2013-v1/10000/0C7163E9-9FA7-E211-A0D6-00259073E382.root',
'/store/data/Run2012D/SingleMu/RAW-RECO/ZMu-22Jan2013-v1/10000/1010C4E3-A1A7-E211-B288-00259074AE9A.root',
'/store/data/Run2012D/SingleMu/RAW-RECO/ZMu-22Jan2013-v1/10000/128E9086-9EA7-E211-BEF9-001EC9D4A1FD.root',
'/store/data/Run2012D/SingleMu/RAW-RECO/ZMu-22Jan2013-v1/10000/12927D85-F4A7-E211-8ECE-E0CB4E29C4B7.root',
'/store/data/Run2012D/SingleMu/RAW-RECO/ZMu-22Jan2013-v1/10000/12B45D3F-A7A7-E211-A827-00259073E382.root',
])
process.rpcValTree = cms.EDAnalyzer("RPCNtupleMaker",)
process.p = cms.Path(
process.rpcValTree
)
#process.outPath = cms.EndPath(process.out)
|
[
"jhgoh@cern.ch"
] |
jhgoh@cern.ch
|
1a2d329395a42874f6ea1e29ea0f80bcea02959f
|
1252a8d08fb5d21cac64e02369ada8c43c18eee1
|
/integration_tests/samples/basic_usage/rate_limits.py
|
e0d947cbf779ad9fbb8eaabffc11a6bfb8a9d7d7
|
[
"MIT"
] |
permissive
|
qause/python-slack-sdk
|
6d9d41bca7f8cf86cd30089105c98740528ca9a6
|
61f098311adbd6d2904f51541cf5d8bf42c83168
|
refs/heads/main
| 2023-07-27T08:13:15.061105
| 2021-09-12T23:35:08
| 2021-09-12T23:35:08
| 405,300,786
| 2
| 1
|
MIT
| 2021-09-12T23:35:09
| 2021-09-11T06:18:20
| null |
UTF-8
|
Python
| false
| false
| 1,122
|
py
|
import logging
logging.basicConfig(level=logging.DEBUG)
# export SLACK_API_TOKEN=xoxb-***
# python3 integration_tests/samples/basic_usage/rate_limits.py
import os
import time
from slack_sdk.web import WebClient
from slack_sdk.errors import SlackApiError
client = WebClient(token=os.environ["SLACK_API_TOKEN"])
# Simple wrapper for sending a Slack message
def send_slack_message(channel, message):
return client.chat_postMessage(channel=channel, text=message)
# Make the API call and save results to `response`
channel = "#random"
message = "Hello, from Python!"
# Do until being rate limited
while True:
try:
response = send_slack_message(channel, message)
except SlackApiError as e:
if e.response["error"] == "ratelimited":
# The `Retry-After` header will tell you how long to wait before retrying
delay = int(e.response.headers["Retry-After"])
print(f"Rate limited. Retrying in {delay} seconds")
time.sleep(delay)
response = send_slack_message(channel, message)
else:
# other errors
raise e
|
[
"seratch@gmail.com"
] |
seratch@gmail.com
|
7329bd6109cf6e48733830884ffbb3417c689ab5
|
de95e9ace929f6279f5364260630e4bf7a658c1c
|
/eachFunction.py
|
ee6c29b3108100f2c9fd3f792773edca3e000cd5
|
[] |
no_license
|
ludwigwittgenstein2/Algorithms-Python
|
ceaf0739b8582f7bd749a9b3f52f283765044744
|
c5bed8b2e398c218d1f36e72b05a3f5545cf783a
|
refs/heads/master
| 2021-06-19T11:40:31.012268
| 2017-07-02T04:59:20
| 2017-07-02T04:59:20
| 75,953,711
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 847
|
py
|
#!/bin/Python
# timing 7 different Python sorting algorithms with a list of integers
# each function is given the same list (fresh copy each time)
# tested with Python24 vegaseat 21jan2006
import random # for generating random numbers
import time # for timing each sort function with time.clock()
DEBUG = False # set True to check results of each sort
N = 1000 # number of elements in list
list1 = [] # list of integer elements
for i in range(0, N):
list1.append(random.randint(0, N-1))
#print list1 # test
def print_timing(func):
def wrapper(*arg):
t1 = time.clock()
res = func(*arg)
t2 = time.clock()
print '%s took %0.3fms' % (func.func_name, (t2-t1)*1000.0)
return res
return wrapper
def main():
print_timing(func)
if __name__ == '__main__':
main()
|
[
"penpals.oranges14@gmail.com"
] |
penpals.oranges14@gmail.com
|
be8e4caefa4443f42a959e72f6042da3658a3c2e
|
de33d07019f4be4439de4f07585a0906215ef562
|
/剑指Offer 65. 不用加减乘除做加法.py
|
74360ca3d2729bf88f6eafeb69b654efc708d908
|
[] |
no_license
|
Qinpeng96/leetcode
|
defbf7873fc07ed42fd34bb0c3e3282ff41ed266
|
e1dfd95c5bf09ffcd42934c1aca21a10337c3e7e
|
refs/heads/master
| 2021-07-13T10:53:32.109431
| 2020-08-19T09:33:36
| 2020-08-19T09:33:36
| 196,947,703
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,563
|
py
|
"""
[剑指 Offer 65. 不用加减乘除做加法](https://leetcode-cn.com/problems/bu-yong-jia-jian-cheng-chu-zuo-jia-fa-lcof/)
写一个函数,求两个整数之和,要求在函数体内不得使用 “+”、“-”、“*”、“/” 四则运算符号。
示例:
输入: a = 1, b = 1
输出: 2
提示:
a, b 均可能是负数或 0
结果不会溢出 32 位整数
***
在使用位运算计算数值的时候,注意有可能有负值参与计算,看[大佬的解释](https://leetcode-cn.com/problems/bu-yong-jia-jian-cheng-chu-zuo-jia-fa-lcof/solution/mian-shi-ti-65-bu-yong-jia-jian-cheng-chu-zuo-ji-7/)

#
print(hex(1)) # = 0x1 补码
print(hex(-1)) # = -0x1 负号 + 原码 ( Python 特色,Java 会直接输出补码)
print(hex(1 & 0xffffffff)) # = 0x1 正数补码
print(hex(-1 & 0xffffffff)) # = 0xffffffff 负数补码
print(-1 & 0xffffffff) # = 4294967295 ( Python 将其认为正数)
"""
```python
class Solution:
def add(self, a: int, b: int) -> int:
x = 0xffffffff
a, b = a & x, b & x
while b != 0:
a, b = (a ^ b), (a & b) << 1 & x
return a if a <= 0x7fffffff else ~(a ^ x)
作者:jyd
链接:https://leetcode-cn.com/problems/bu-yong-jia-jian-cheng-chu-zuo-jia-fa-lcof/solution/mian-shi-ti-65-bu-yong-jia-jian-cheng-chu-zuo-ji-7/
来源:力扣(LeetCode)
著作权归作者所有。商业转载请联系作者获得授权,非商业转载请注明出处。
```
|
[
"Qinpeng96@users.noreply.github.com"
] |
Qinpeng96@users.noreply.github.com
|
ef7a2c0953da342a2aa5615bf0728b055ce4ca83
|
2fd4de2f0820f186c735f0619bce2a0318bbfc38
|
/appzoo/utils/log/__init__.py
|
a70361c3e3b8431100d15650b5da10d40acb287d
|
[
"MIT"
] |
permissive
|
SunYanCN/AppZoo
|
e90b778fefdaf1a440c3fd40d078b5396e4e3f06
|
91b04cc75fcc5f70ae5819e98233ea9146c1f001
|
refs/heads/master
| 2023-08-22T05:41:22.175291
| 2021-10-12T13:37:21
| 2021-10-12T13:37:21
| 359,024,301
| 0
| 0
|
MIT
| 2021-09-05T12:24:47
| 2021-04-18T02:12:40
|
Python
|
UTF-8
|
Python
| false
| false
| 504
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Project : tql-App.
# @File : __init__.py
# @Time : 2019-12-10 17:24
# @Author : yuanjie
# @Email : yuanjie@xiaomi.com
# @Software : PyCharm
# @Description :
from loguru import logger
trace = logger.add('runtime_{time}.log', rotation="100 MB", retention='10 days')
logger.debug('this is a debug message')
if __name__ == '__main__':
@logger.catch()
def f():
1/0
return 1111
print(f())
|
[
"313303303@qq.com"
] |
313303303@qq.com
|
90c6ef60f816c6a1349f4d73159c4ae948447b06
|
14afcc5e2b8bdb3d91b500f6e7985d8a3378e929
|
/src/344.反转字符串.py
|
f3629f9c7014601a392eb49ac34f20c25e72ca6c
|
[] |
no_license
|
hysapphire/leetcode-python
|
8569a0e76f8917165e6b9fb25bfef1afc1186e3c
|
8e338ee7a5c9f124e897491d6a1f4bcd1d1a6270
|
refs/heads/master
| 2022-12-03T15:17:52.557115
| 2020-08-17T14:19:59
| 2020-08-17T14:19:59
| 278,781,919
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 389
|
py
|
#
# @lc app=leetcode.cn id=344 lang=python3
#
# [344] 反转字符串
#
# @lc code=start
class Solution:
def reverseString(self, s: List[str]) -> None:
"""
Do not return anything, modify s in-place instead.
"""
i = 0
j = len(s) - 1
while i < j:
s[i], s[j] = s[j], s[i]
i += 1
j -= 1
# @lc code=end
|
[
"huoyang93@qq.com"
] |
huoyang93@qq.com
|
995563648da4897a4425eded36f6d0dc3477e464
|
069c2295076c482afadfe6351da5ae02be8e18e6
|
/django/core/files/storage/mixins.py
|
663a163beae1f8780a88003792849fbd7b201fd8
|
[
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause",
"GPL-1.0-or-later",
"Python-2.0.1",
"LicenseRef-scancode-free-unknown",
"LicenseRef-scancode-other-permissive",
"Python-2.0"
] |
permissive
|
django/django
|
5eb557f57053631cd4f566f451e43197309dbeeb
|
c74a6fad5475495756a5bdb18b2cab2b68d429bc
|
refs/heads/main
| 2023-09-01T03:43:44.033530
| 2023-08-31T08:27:32
| 2023-08-31T08:27:32
| 4,164,482
| 73,530
| 38,187
|
BSD-3-Clause
| 2023-09-14T20:03:48
| 2012-04-28T02:47:18
|
Python
|
UTF-8
|
Python
| false
| false
| 700
|
py
|
class StorageSettingsMixin:
def _clear_cached_properties(self, setting, **kwargs):
"""Reset setting based property values."""
if setting == "MEDIA_ROOT":
self.__dict__.pop("base_location", None)
self.__dict__.pop("location", None)
elif setting == "MEDIA_URL":
self.__dict__.pop("base_url", None)
elif setting == "FILE_UPLOAD_PERMISSIONS":
self.__dict__.pop("file_permissions_mode", None)
elif setting == "FILE_UPLOAD_DIRECTORY_PERMISSIONS":
self.__dict__.pop("directory_permissions_mode", None)
def _value_or_setting(self, value, setting):
return setting if value is None else value
|
[
"felisiak.mariusz@gmail.com"
] |
felisiak.mariusz@gmail.com
|
67e188cf4dff45d62ef69d5b2bca2ce723dfa609
|
cc175b44e655a2450611c107884763b512e14aa8
|
/model/make_data.py
|
abf967d3d00a18eed02bee0417d5877da1637833
|
[] |
no_license
|
boxiangliu/abbrev
|
daa05e733c92d3c7072323b0ecad5369d87eca86
|
610f5e0789fd87ca7390bf458b6699ed83331a4f
|
refs/heads/master
| 2023-07-20T00:16:17.760843
| 2021-08-23T17:12:06
| 2021-08-23T17:12:06
| 300,144,070
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,605
|
py
|
import pandas as pd
import os
import click
# nrows = 1e6
# ab3p_fn = "../processed_data/preprocess/ab3p/summarize_ab3p/ab3p_res.csv"
# out_dir = "../processed_data/preprocess/model/train_val/"
# train_pct = 0.6
# val_pct = 0.2
# test_pct = 0.2
@click.command()
@click.option("--nrows", type=int, help="Number of rows.")
@click.option("--ab3p_fn", type=str, help="Path to ab3p file.")
@click.option("--out_dir", type=str, help="Path to out directory.")
@click.option("--train_pct", type=float, help="Proportion used for training.", default=0.6)
@click.option("--val_pct", type=float, help="Proportion used for evaluation.", default=0.2)
@click.option("--test_pct", type=float, help="Proportion used for testing.", default=0.2)
def main(nrows, ab3p_fn, out_dir, train_pct, val_pct, test_pct):
os.makedirs(out_dir, exist_ok=True)
ab3p = pd.read_csv(ab3p_fn, sep="\t", nrows=nrows)
denom = sum([train_pct, val_pct, test_pct])
train_pct, val_pct, test_pct = \
train_pct/denom, val_pct/denom, test_pct/denom
train_div = round(ab3p.shape[0] * train_pct)
val_div = train_div + round(ab3p.shape[0] * val_pct)
train_idx = range(train_div)
val_idx = range(train_div, val_div)
test_idx = range(val_div, ab3p.shape[0])
assert len(train_idx) + len(val_idx) + len(test_idx) == ab3p.shape[0]
ab3p.iloc[train_idx,:].to_csv(f"{out_dir}/train.tsv", sep="\t", index=False)
ab3p.iloc[val_idx,:].to_csv(f"{out_dir}/val.tsv", sep="\t", index=False)
ab3p.iloc[test_idx,:].to_csv(f"{out_dir}/test.tsv", sep="\t", index=False)
if __name__ == "__main__":
main()
|
[
"jollier.liu@gmail.com"
] |
jollier.liu@gmail.com
|
d5041bab3576ce33ff487521f40316e8a4f3a33c
|
ecb6b752523a126ef17895854b18e02df41c4cfe
|
/api_restful/urls.py
|
791b79d537ebf6c13c91a18c29865ae8b286bc8e
|
[
"MIT"
] |
permissive
|
zhanghe06/bearing_project
|
cd6a1b2ba509392da37e5797a3619454ca464276
|
25729aa7a8a5b38906e60b370609b15e8911ecdd
|
refs/heads/master
| 2023-05-27T17:23:22.561045
| 2023-05-23T09:26:07
| 2023-05-23T09:39:14
| 126,219,603
| 2
| 5
|
MIT
| 2022-12-08T03:11:27
| 2018-03-21T17:54:44
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 4,217
|
py
|
#!/usr/bin/env python
# encoding: utf-8
"""
@author: zhanghe
@software: PyCharm
@file: urls.py
@time: 2020-02-28 21:21
"""
from __future__ import unicode_literals
from uuid import uuid4
import logging
import time
from collections import defaultdict
from flask import jsonify, request, g, make_response
from werkzeug.exceptions import NotFound, InternalServerError
from api_restful.signals.operation_log import signal_operation_log
from api_restful import app
# api_logger = logging.getLogger('api')
debug_logger = logging.getLogger('debug')
SUCCESS_MSG = app.config['API_SUCCESS_MSG']
FAILURE_MSG = app.config['API_FAILURE_MSG']
@app.before_request
def api_before_request():
request_id = request.headers.get('X-Request-Id', str(uuid4())) # 不带短横: uuid4().get_hex()
g.request_id = request_id
debug_logger.debug('before_request')
g.req_time = time.time()
@app.after_request
def after_request(response):
request_id = g.get('request_id', str(uuid4()))
g.request_id = request_id
debug_logger.debug('after_request')
# 头部注入
response.headers.add('X-Request-Id', request_id)
g.status_code = response.status_code
g.project = app.name
g.res_time = time.time()
latency = time.time() - g.req_time
g.latency = latency
# api_log = defaultdict(lambda: '-')
# api_logger.info('-')
# 操作日志
operation_log = {
'project': app.name,
'latency': latency,
'client_host': request.host,
'client_addr': request.remote_addr,
'req_id': request_id,
'req_method': request.method,
'req_path': request.path,
'req_json': request.json,
'req_args': request.args.to_dict(),
'res_status_code': response.status_code,
'res_json': {},
}
# Get请求错误时记录返回,正确返回忽略,避免日志过大
if request.method in ['GET', 'HEAD', 'OPTIONS'] and response.status_code / 2 != 100:
operation_log['res_json'] = response.json
if request.method in ['POST', 'PUT', 'DELETE']:
operation_log['res_json'] = response.json
signal_operation_log.send(app, **operation_log)
return response # 必须返回response
# @app.after_request
# def after_request(response):
# request_id = g.get('request_id', str(uuid4()))
# g.request_id = request_id
# debug_logger.debug('after_request')
#
# g.status_code = response.status_code
#
# # 头部注入
# response.headers.add('X-Request-Id', request_id)
#
# return response # 必须返回response
# @app.teardown_request
# def teardown_request(exception=None):
# request_id = g.get('request_id', str(uuid4()))
# g.request_id = request_id
# debug_logger.debug('teardown_request')
#
# g.project = app.name
# g.res_time = time.time()
# g.latency = g.res_time - g.req_time
#
# # 接口日志
# g.api_log = defaultdict(lambda: '-')
# g.api_log['project_name'] = app.name
#
# if exception:
# exception_info = {
# 'module': exception.__class__.__module__,
# 'name': exception.__class__.__name__,
# 'message': exception.message,
# }
# g.api_log['exception'] = '%(module)s.%(name)s: %(message)s' % exception_info
# api_logger.error(dict(g.api_log))
# else:
# api_logger.info(dict(g.api_log))
# return exception
@app.route('/', methods=['GET', 'POST', 'OPTIONS'])
def heartbeat():
return jsonify(SUCCESS_MSG.copy())
# 全局路由错误
@app.errorhandler(NotFound.code)
def url_not_found(error):
return make_response(
jsonify(
{
'msg': '路径错误' or error.description,
'result': False,
# 'status': exceptions.NotFound.code,
}
),
NotFound.code
)
# 全局异常错误(DEBUG模式生效)
@app.errorhandler(Exception)
def exception(error):
return make_response(
jsonify(
{
'msg': error.message or InternalServerError.description,
'result': False,
# 'status': InternalServerError.code,
}
),
InternalServerError.code
)
|
[
"zhang_he06@163.com"
] |
zhang_he06@163.com
|
a63cdcdc0d81ec4c855b3d2d9b59a8d04cc7e353
|
043d91547df1c9824cdff5386c74083b234803c2
|
/Code Examples/Chapter 22/Statistics.py
|
17fddd9b393ced65aa9cff821be8caa5467a56b8
|
[] |
no_license
|
ddc899/cmpt145
|
9824b7caad98f78075dd42c5ecb1c1617f4628cf
|
2a8c2f36d42082dffdc6e79a9822aa2d4ad925a9
|
refs/heads/master
| 2022-01-26T22:44:02.647310
| 2018-07-27T22:51:07
| 2018-07-27T22:51:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,838
|
py
|
# CMPT 145: Objects
# Defines the Statistics ADT
# Calculate mean and variance.
# Implementation
# Do the calculations without storing all the data!
# Use a Python dictionary as a record to store three quantities:
# _count': the number of data values added
# _avg': the running average of the values added
# _sumsqdiff': the sum of the square differences between the
# values added and the mean so far
# These values can be modified every time a new data value is
# added, so that the mean and variance can be calculated quickly
# as needed. This approach means that we do not need to store
# the data values themselves, which could save a lot of space.
class Statistics(object):
def __init__(self):
"""
Purpose:
Initialize a Statistics object instance.
"""
self._count = 0 # how many data values have been seen
self._avg = 0 # the running average so far
self._sumsqdiff = 0 # the sum of the square differences
def add(self, value):
"""
Purpose:
Use the given value in the calculation of mean and variance.
Pre-Conditions:
:param value: the value to be added
Post-Conditions:
none
Return:
:return none
"""
self._count += 1
k = self._count # convenience
diff = value - self._avg # convenience
self._avg += diff / k
self._sumsqdiff += ((k - 1) / k) * (diff ** 2)
def mean(self):
"""
Purpose:
Return the mean of all the values seen so far.
Post-conditions:
(none)
Return:
The mean of the data seen so far.
Note: if no data has been seen, 0 is returned.
This is clearly false.
"""
return self._avg
def var(self):
"""
Purpose:
Return the variance of all the values seen so far.
(variance is the average of the squared difference
between each value and the average of all values)
Pre-conditions:
stat: the Statistics record containing the variance
Post-conditions:
(none)
Return:
The variance of the data seen so far.
Note: if 0 or 1 data values have been seen, 0 is returned.
This is clearly false.
"""
return self._sumsqdiff / self._count
def sampvar(self):
"""
Purpose:
Return the sample variance of all the values seen so far.
Pre-conditions:
stat: the Statistics record containing the sample variance
Post-conditions:
(none)
Return:
The sample variance of the data seen so far.
Note: if 0 or 1 data values have been seen, 0 is returned.
This is clearly false.
"""
return self._sumsqdiff / (self._count - 1)
|
[
"knownastron@gmail.com"
] |
knownastron@gmail.com
|
09aea084d55765c2c27515405b4a8b5d4af484bc
|
e3bb2717535c5f8c7db54a43d801971320b02ae1
|
/app/auth/views.py
|
b5ff9badc5abdeddd2de2705a84c588465b33d0d
|
[] |
no_license
|
EugeneZnm/Watchlist
|
e28e207f8fd2f2a3a1c23b4f6ccc52df9ea310ff
|
8f846ff6acb0d217c6ec7316abb836ff7d870a75
|
refs/heads/master
| 2020-03-27T08:41:40.696130
| 2018-09-05T11:08:59
| 2018-09-05T11:08:59
| 146,278,525
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,848
|
py
|
from flask import render_template, redirect, request, url_for, flash
from . import auth
from .. import db
from ..models import User
from .forms import LoginForm, RegistrationForm
# importation of mail_message function
from .. email import mail_message
from flask_login import login_user, logout_user, login_required
@auth.route('/login', methods=['GET', 'POST'])
def login():
login_form = LoginForm()
"""
create instance of Loginform and pass it into login.html template
"""
if login_form.validate_on_submit():
"""
check if form is validated
user is searched for in the database with the email received from form
"""
user = User.query.filter_by(email=login_form.email.data).first()
if user is not None and user.verify_password(login_form.password.data):
"""
use verify password method to confirm password entered matches with password hash stored in database
"""
login_user(user, login_form.remember.data)
"""
login function records the user as logged for current session if password ans hash match
user object taken and form data remembered
long time coolie is set if true
"""
return redirect(request.args.get('next') or url_for('main.index'))
flash('Invalid username or Password')
title = "watchlist login"
return render_template('auth/login.html', login_form=login_form, title=title)
@auth.route('/register', methods=["GET", "POST"])
def register():
form = RegistrationForm()
if form.validate_on_submit():
user = User(email=form.email.data, username=form.username.data, password=form.password.data)
"""
new user is created from user model when form is submitted
email, username and password are passed in
"""
db.session.add(user)
"""
new user is added to session
"""
db.session.commit()
"""
new user committed to session
"""
title = 'New Account'
mail_message("Welcome to Watchlist", "email/welcome_user", user.email, user=user)
"""
call mail message
pass in subject and template file where message body will be stored
pass in new user's email address obtained from the registration form
pass in user as a keyword argument
"""
return redirect(url_for('auth.login'))
return render_template('auth/register.html', registration_form=form)
# authenticated logout route calling logout_user function
@auth.route('/logout')
@login_required
def logout():
"""
logout function that logs out user from application
:return:
"""
logout_user()
# redirection to index page
return redirect(url_for("main.index"))
|
[
"eugenenzioki@gmail.com"
] |
eugenenzioki@gmail.com
|
4c64f905d39e8cb07f0685bdfb4bb78ea2b99621
|
a54b6e7a1906534b11584054e6096f855ced549d
|
/encapsulation-python-oop/account.py
|
98dce39f1fbb4a95b18e9d11731b87c109daf8a6
|
[] |
no_license
|
DavidStoilkovski/python-oop
|
1cece55dffa8471ed980773e259ca9569eb00283
|
ae497d84d41badb8fc9c64c518d1dd72d95c839f
|
refs/heads/main
| 2023-04-20T10:11:02.914382
| 2021-05-08T16:31:46
| 2021-05-08T16:31:46
| 359,358,703
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 641
|
py
|
class Account:
def __init__(self, id, balance, pin):
self.__id = id
self.__pin = pin
self.balance = balance
def get_id(self, pin):
if pin == self.__pin:
return self.__id
return "Wrong pin"
def change_pin(self, old_pin, new_pin):
if old_pin == self.__pin:
self.__pin = new_pin
return "Pin changed"
return "Wrong pin"
# Test code
# account = Account(8827312, 100, 3421)
# print(account.get_id(1111))
# print(account.get_id(3421))
# print(account.balance)
# print(account.change_pin(2212, 4321))
# print(account.change_pin(3421, 1234))
|
[
"stoilkovskidavid@gmail.com"
] |
stoilkovskidavid@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.