blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
de60fcbc238e290b46b71f00839306c79306ef20 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03221/s313909172.py | 7053691761a5635cbd5e58b85e5ee589303cc652 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 539 | py | from sys import stdin, setrecursionlimit
from bisect import bisect_left
def main():
input = stdin.buffer.readline
n, m = map(int, input().split())
py = [list(map(int, input().split())) for _ in range(m)]
summary = [[] for _ in range(n)]
for p, y in py:
summary[p - 1].append(y)
for i in range(n):
summary[i].sort()
for p, y in py:
print(str(p).zfill(6), str(bisect_left(summary[p - 1], y) + 1).zfill(6), sep='')
if __name__ == "__main__":
setrecursionlimit(10000)
main()
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
09709c91a5aaf7cf1de4c830b0e3f15e21759830 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_82/50.py | 0c6605af2e896c47bfa05692c350399f1ff042a0 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,259 | py | #! /usr/bin/python
import sys
def check_dist(nb_vendor, vendor, min_dist):
for i in xrange(nb_vendor):
for j in xrange(i+1, nb_vendor):
if (vendor[j] - vendor[i]) < min_dist:
return 0
return 1
def solve(nb_vendor, vendor, min_dist):
time = 0
while check_dist(nb_vendor, vendor, min_dist) == 0:
# one stop is a half-meter
time += 0.5
move = []
move.append(-0.5)
for i in xrange(1, nb_vendor-1):
diff = vendor[i] - vendor[i-1]
if diff > min_dist:
move.append(-0.5)
elif diff == min_dist:
move.append(move[i-1])
else:
move.append(0.5)
move.append(0.5)
for i in xrange(nb_vendor):
vendor[i] += move[i]
# print vendor
return time
fd = open(sys.argv[1])
num_cases = int(fd.readline())
for i in range(0, num_cases):
line = fd.readline().split(" ")
nb_pts = int(line[0])
min_dist = int(line[1])
nb_vendor = 0
vendor = []
for n in xrange(nb_pts):
line = fd.readline().split(" ")
pos = int(line[0])
num_vendor = int(line[1])
nb_vendor += num_vendor
for v in xrange(num_vendor):
vendor.append(pos)
# print vendor
output = solve(nb_vendor, vendor, min_dist)
print "Case #%d:" % (i+1), output
| [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
357f58315058e592739a421382e856391d2e1f97 | 7b15c40c00ba2008024979d0e520a922bc2f8229 | /2nd_try/0529_Minesweeper.py | 2bde189cdd44d12473720857df8d975c5259ad35 | [] | no_license | axd8911/Leetcode | aa9875a5b55c7d5e961d9a3ea55823d06eb08a88 | 1c6cab14f4dac4f3f29f1b5ce13bb5289724fdb4 | refs/heads/master | 2022-07-07T12:59:38.251218 | 2021-06-22T06:27:05 | 2021-06-22T06:27:05 | 173,857,144 | 0 | 1 | null | 2022-06-22T01:22:30 | 2019-03-05T02:23:42 | Python | UTF-8 | Python | false | false | 1,044 | py | class Solution:
def updateBoard(self, board: List[List[str]], click: List[int]) -> List[List[str]]:
a,b = click
if board[a][b] == 'M':
board[a][b] = 'X'
return board
h,w = len(board),len(board[0])
directions = [(1,0),(-1,0),(0,1),(0,-1),(1,1),(-1,1),(1,-1),(-1,-1)]
visited = {(a,b)}
queue = collections.deque([(a,b)])
while queue:
x,y = queue.popleft()
num = 0
for dx,dy in directions:
newX,newY = x+dx,y+dy
if 0<=newX<h and 0<=newY<w and board[newX][newY]=='M':
num += 1
if num>0:
board[x][y] = str(num)
else:
board[x][y] = 'B'
for dx,dy in directions:
newX,newY = x+dx,y+dy
if 0<=newX<h and 0<=newY<w and (newX,newY) not in visited:
visited.add((newX,newY))
queue.append((newX,newY))
return board
| [
"axd8911@hotmail.com"
] | axd8911@hotmail.com |
ed92641ba38ecc72946b4bca0f14b403d0ef8d9f | 8e6b59cf324c87de3d726a585f0f053cf129c5ed | /experiments/netowrks/testAllSimilarNets.py | ab77fb9fbc177a26f19637c3405a62135f147144 | [] | no_license | thodorisGeorgiou/comparison_of_hand_crafted_and_deep_learning_on_CFD_output | 4556d38a7a12384c0c2c7e780924bec584814272 | 80721c8af0eb48b0b9b8b1a5d5cccd97cc19304f | refs/heads/main | 2023-03-13T08:50:38.069662 | 2021-03-03T13:58:24 | 2021-03-03T13:58:24 | 344,140,621 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,222 | py | #Run test network scripts on repetative runs, in parallel
import os
import sys
from multiprocessing import Pool
# modelTypes = ["op", "cc"]
# modelTypes = ["vc", "ds", "op", "cc"]
modelTypes = ["vc"]
numRuns = 4
basePath = sys.argv[1]
mType = sys.argv[2]
if basePath[-1] != "/":
exit("Path must end with a slash")
# gpu = sys.argv[1]
# releaseDirs = ["vc/1/","vc/2/","vc/3/","vc/4/"]
def runTest(gpu):
run = str(gpu+1)
relDir = basePath+run+"Release/"
if not os.path.isdir(relDir):
print(relDir)
return
# os.system('python3 testNetworksOnFlow.py '+relDir+" "+mType)
# os.system('CUDA_VISIBLE_DEVICES='+str(gpu)+' python3 testNetworksOnFlow.py '+relDir+" "+mType)
os.system('CUDA_VISIBLE_DEVICES='+str(gpu)+' python3 testNetworks.py '+relDir+" "+mType)
runs = [i for i in range(4)]
p = Pool(4)
res = p.map(runTest, runs)
p.close()
p.join()
# for mType in modelTypes:
# for run in range(numRuns):
# # relDir = basePath+mType+"/"+str(run+1)+"/"
# relDir = basePath+str(run+1)+"Release/"
# if not os.path.isdir(relDir):
# print(relDir)
# continue
# os.system('CUDA_VISIBLE_DEVICES='+gpu+' python3 testNetworks.py '+relDir+" "+mType)
# # os.system('python3 testNetworks.py '+relDir+" "+mType) | [
"thodorisgeorgiou65@gmail.com"
] | thodorisgeorgiou65@gmail.com |
08d21fe53aa88f46cbec0f4f7d961b705b9a3937 | f2658c4bd7f833ace25ac2b63e88317b05f4602d | /2017 November/code/VendorDataImporter/rdf/common_tool/command_utils.py | 804d006cd090f84d8dcd954aac0dc2b63d6de4d2 | [] | no_license | xiaochao00/telanav_diary | e4c34ac0a14b65e4930e32012cc2202ff4ed91e2 | 3c583695e2880322483f526c98217c04286af9b2 | refs/heads/master | 2022-01-06T19:42:55.504845 | 2019-05-17T03:11:46 | 2019-05-17T03:11:46 | 108,958,763 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,987 | py | from common_utils import print_error, print_standout
import os, sys
def execute_cmd(cmd):
print_standout("execute cmd is :%s" % cmd)
s = os.system(cmd)
if s:
print_error("execute cmd[%s] failed" % cmd)
sys.exit(-1)
def parse_size_info_response_lines(response_lines):
"""
:param response_lines:
the response_lines of command 'df -m directory'
Filesystem 1M-blocks Used Available Use% Mounted on
:return: response dict
{'Filesystem': , 'TotalSize': , 'Used': , 'Available': , 'UsedRate': , 'MountedOn': }
default unit is KB
"""
if not response_lines:
print_error('parse the response line of command failed. response lines can not none ')
return None
# parse line num of 'Filesystem 1M-blocks Used Available Use% Mounted on'
specified_line_index = -1
for line in response_lines:
specified_line_index += 1
if line.find("Filesystem") != -1 and line.find("Available") != -1:
break
if specified_line_index == -1 or specified_line_index == len(response_lines):
print_error("in parse the response line of df command. lines is %s ." % response_lines)
return None
# names = response_lines[0].strip().split()
# the next line of specified_line_index
names = ['Filesystem', 'TotalSize', 'Used', 'Available', 'UsedRate', 'MountedOn']
values = response_lines[specified_line_index + 1].strip().split()
if len(names) != len(values):
print_error("parse command response line failed. lines : %s" % response_lines)
response_dict = {}
for i in range(len(names)):
name = names[i]
value = values[i]
response_dict[name] = value
# change unit to B
response_dict["TotalSize"] = float(response_dict["TotalSize"]) * 1024
response_dict["Used"] = float(response_dict["Used"]) * 1024
response_dict["Available"] = float(response_dict["Available"]) * 1024
return response_dict
| [
"1363180272@qq.com"
] | 1363180272@qq.com |
5214f839a0865e8185396f000fb2bfbf469fc0ee | e0dcc1e7c862823278a93c40bfe263bbd196944b | /setup.py | 5a6ad4f2c70e2f58b778427d4f65c0d973e82cd3 | [
"BSD-2-Clause",
"BSD-3-Clause"
] | permissive | baiqj/ToughRADIUS | 3e9300e3f65264d2e43bdf9f3475077de7d4491e | 382b80d2c38ad2fba2a848776d959da9701e002b | refs/heads/master | 2021-01-24T03:48:05.517408 | 2015-03-02T07:00:22 | 2015-03-02T07:00:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,857 | py | #!/usr/bin/python
from setuptools import setup, find_packages
import toughradius
version = toughradius.__version__
install_requires = [
'argparse',
'MySQL-python>=1.2.5',
'Mako>=0.9.0',
'Beaker>=1.6.4',
'MarkupSafe>=0.18',
'PyYAML>=3.10',
'SQLAlchemy>=0.9.8',
'Twisted>=13.0.0',
'autobahn>=0.9.3-3',
'bottle>=0.12.7',
'six>=1.8.0',
'tablib>=0.10.0',
'zope.interface>=4.1.1',
'pycrypto==2.6.1',
'sh==1.11',
'pyOpenSSL==0.14',
'service_identity',
]
install_requires_empty = []
package_data={
'toughradius': [
'console/admin/views/*',
'console/customer/views/*',
'console/static/css/*',
'console/static/fonts/*',
'console/static/img/*',
'console/static/js/*',
'console/static/favicon.ico',
'radiusd/dicts/*'
]
}
setup(name='toughradius',
version=version,
author='jamiesun',
author_email='jamiesun.net@gmail.com',
url='https://github.com/talkincode/ToughRADIUS',
license='BSD',
description='RADIUS Server',
long_description=open('README.rst').read(),
classifiers=[
'Development Status :: 6 - Mature',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: System :: Systems Administration :: Authentication/Directory',
],
packages=find_packages(),
package_data=package_data,
keywords=['radius', 'authentication'],
zip_safe=True,
include_package_data=True,
install_requires=install_requires,
scripts=['bin/toughctl'],
tests_require='nose>=0.10.0b1',
test_suite='nose.collector',
) | [
"jamiesun.net@gmail.com"
] | jamiesun.net@gmail.com |
9b319c036fd8d33a105fc098ecc2c5fbeec64da4 | 596e92d0d484b6e7eee6d322e72e52748fdeaa5d | /test/test_nfl_scores_team_season.py | e7165e08a90d72cf9112826ce8a69d29d3d5003c | [] | no_license | scottypate/sportsdata | f5f61ddc7eb482883f93737c6ce73dd814ed4336 | a07955ab50bf4fff1ce114ed9895095ff770c473 | refs/heads/main | 2023-08-18T16:51:56.452678 | 2021-10-22T12:44:08 | 2021-10-22T12:44:08 | 420,062,350 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 954 | py | # coding: utf-8
"""
NFL v3 Scores
NFL schedules, scores, odds, weather, and news API. # noqa: E501
OpenAPI spec version: 1.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import sportsdata.nfl_scores
from sportsdata.nfl_scores.models.nfl_scores_team_season import NflScoresTeamSeason # noqa: E501
from sportsdata.nfl_scores.rest import ApiException
class TestNflScoresTeamSeason(unittest.TestCase):
"""NflScoresTeamSeason unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testNflScoresTeamSeason(self):
"""Test NflScoresTeamSeason"""
# FIXME: construct object with mandatory attributes with example values
# model = sportsdata.nfl_scores.models.nfl_scores_team_season.NflScoresTeamSeason() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"scotty.pate@auth0.com"
] | scotty.pate@auth0.com |
4b5304e679901413066a5018e1a246a624a23db2 | 1d2e26ea7ed6d49c05a45e8b55243e52011b74e4 | /tests/test_main.py | 91c29b2679ea43df1ebebd177965776cc571f6ec | [] | no_license | mikeizbicki/html_validator | 723837acd090f29675bb4660d92710a7c9648d73 | d786058e2a8a55341e733f55be3770870f7d4124 | refs/heads/master | 2023-02-18T05:40:00.774317 | 2023-02-03T20:32:23 | 2023-02-03T20:32:23 | 238,632,615 | 1 | 143 | null | 2023-02-08T08:01:43 | 2020-02-06T07:36:35 | Python | UTF-8 | Python | false | false | 3,574 | py | import HTML_Validator
import pytest
def test__extract_tags_1():
assert HTML_Validator._extract_tags('') == []
def test__extract_tags_2():
assert HTML_Validator._extract_tags('python in fun') == []
def test__extract_tags_3():
assert HTML_Validator._extract_tags('<strong></strong>') == ['<strong>','</strong>']
def test__extract_tags_4():
assert HTML_Validator._extract_tags('python in <strong>fun</strong>') == ['<strong>','</strong>']
def test__extract_tags_5():
assert HTML_Validator._extract_tags('<a><b><c></a></b><f>') == ['<a>','<b>','<c>','</a>','</b>','<f>']
"""
def test__extract_tags_6():
with pytest.raises(ValueError, match='found < without matching >'):
HTML_Validator._extract_tags('<')
def test__extract_tags_7():
with pytest.raises(ValueError, match='found < without matching >'):
HTML_Validator._extract_tags('this is a <strong test')
def test__extract_tags_8():
with pytest.raises(ValueError, match='found < without matching >'):
HTML_Validator._extract_tags('this is a <strong< test')
"""
def test__extract_tags_9():
n = 10000
open_tags = [ '<' + str(i) + '>' for i in range(n) ]
close_tags = [ '</' + str(i) + '>' for i in range(n) ]
tags = open_tags + close_tags
assert HTML_Validator._extract_tags(' '.join(tags)) == tags
def test_validate_html_1():
assert HTML_Validator.validate_html('')
def test_validate_html_2():
assert HTML_Validator.validate_html('<a></a>')
def test_validate_html_3():
assert not HTML_Validator.validate_html('<a>')
def test_validate_html_4():
assert not HTML_Validator.validate_html('</a>')
def test_validate_html_5():
assert HTML_Validator.validate_html('<strong></strong><b></b>')
def test_validate_html_6():
assert HTML_Validator.validate_html('<strong><b></b></strong>')
def test_validate_html_6():
assert HTML_Validator.validate_html('<strong><strong></strong></strong>')
def test_validate_html_7():
assert not HTML_Validator.validate_html('<strong><b></strong></b>')
def test_validate_html_8():
assert HTML_Validator.validate_html('this is a test <em>hello!</em>')
def test_validate_html_9():
assert HTML_Validator.validate_html('''
<html>
<head>
<title>This is an awesome webpage!</title>
</head>
<body>
<p>Programming is the <strong>best</strong>!</p>
</body>
</html>
''')
def test_validate_html_10():
assert not HTML_Validator.validate_html('''
<html>
<head>
<title>This is an awesome webpage!</title>
</head>
<body>
<p>Programming is the <strong>best</strong>!
</body>
</html>
''')
def test_validate_html_11():
assert not HTML_Validator.validate_html('<')
def test_validate_html_12():
assert not HTML_Validator.validate_html('this is a <strong test')
def test_validate_html_13():
assert not HTML_Validator.validate_html('this is a <strong< test')
def test_validate_html_14():
n = 10000
open_tags = [ '<' + str(i) + '>' for i in range(n) ]
close_tags = [ '</' + str(i) + '>' for i in range(n) ]
close_tags.reverse()
tags = open_tags + close_tags
assert HTML_Validator.validate_html(' '.join(tags))
assert not HTML_Validator.validate_html(' '.join(open_tags))
assert not HTML_Validator.validate_html(' '.join(close_tags))
assert not HTML_Validator.validate_html(' '.join(tags[0:-1]))
def test_validate_html_15():
n = 10000
tags = [ '<' + str(i) + '></' + str(i) + '>' for i in range(n) ]
assert HTML_Validator.validate_html(' '.join(tags))
| [
"mike@izbicki.me"
] | mike@izbicki.me |
d0017561c6f6ad25a1c34c2406c04f4c6a962b52 | 19380415ccdcb0dac20f7bd67fcc8a0f631a3b90 | /codeforces/727A.py | 8ed364f5f8143382bfbf97c6d20628a693a1495f | [
"MIT"
] | permissive | italo-batista/problems-solving | c06c811364db7439d842db76e743dd7a1a7c8365 | f83ad34f0abebd52925c4020635556f20743ba06 | refs/heads/master | 2021-10-28T07:01:21.643218 | 2019-04-22T15:27:19 | 2019-04-22T15:27:19 | 76,066,780 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 910 | py | # LINK FOR PROBLEM: http://codeforces.com/problemset/problem/727/A
def ends_with_one(number):
number = str(number)
return number[-1] == "1"
def del_last_one(number):
number = str(number)
t = len(number)
number = int( number[:t-1] )
return number
def isEven(number):
return (number % 2 == 0)
a, b = map(int, raw_input().split())
fila = [str(b)]
current_number = b
while current_number > a:
if ends_with_one(current_number):
current_number = del_last_one(current_number)
fila.append(str(current_number))
elif isEven(current_number):
current_number = current_number / 2
fila.append(str(current_number))
else:
current_number = a-1
break
if current_number < a:
print "NO"
elif current_number == a:
print "YES"
print len(fila)
print " ".join(str(fila[i]) for i in range(len(fila)-1, -1, -1 ))
| [
"italo.batista@ccc.ufcg.edu.br"
] | italo.batista@ccc.ufcg.edu.br |
7b1fbc45105a4352caa4fbb01235b2f5dcdfa5b7 | a3e52fbdfc81da3d17fee3d11b4451b330bfd592 | /CompPython/tutoriais/bloghackerearth/numpyTutorial.py | 5b484ed95488bf81857c0275ab3b14db7745d0cb | [] | no_license | chrislucas/python | 79633915dd0aa8724ae3dfc5a3a32053f7a4f1e0 | d3cca374f87e134a7ddfc327a6daea983875ecac | refs/heads/master | 2021-01-17T04:08:25.056580 | 2016-12-26T11:41:31 | 2016-12-26T11:41:31 | 42,319,868 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 724 | py | '''
Created on 25 de nov de 2016
@author: C.Lucas
'''
if __name__ == '__main__':
pass
'''
http://blog.hackerearth.com/prerequisites-linear-algebra-machine-learning
'''
import numpy as np
from numpy import abs, array, eye
def test_numpy_abs():
print(abs([-1.2, 1.2]))
def test_numpy_array():
matrix = array([[1,2,3],[3,2,1]])
print(*matrix)
print(matrix.shape)
def identity_matrix(n):
print(eye(n))
def add_matrix():
a = array([ [1,2,3],[3,2,1] ])
b = array([ [1,2,3],[3,2,1] ])
return np.add(a, b)
#print(add_matrix())
'''
http://cs231n.github.io/python-numpy-tutorial/
'''
print(np.transpose(add_matrix()))
print(np.ones((1, 2, 3)))
#print(np.arange(4).reshape((2,2))) | [
"christoffer.luccas@gmail.com"
] | christoffer.luccas@gmail.com |
48cbda302d0e9345f8c476a9d0d78fc6254bd7e4 | 3ac0a169aa2a123e164f7434281bc9dd6373d341 | /sortedArrayToBST.py | 590e2e06c1ece805f11ef7d6d7957e2f01800c4c | [] | no_license | sfeng77/myleetcode | 02a028b5ca5a0354e99b8fb758883902a768f410 | a2841fdb624548fdc6ef430e23ca46f3300e0558 | refs/heads/master | 2021-01-23T02:06:37.569936 | 2017-04-21T20:31:06 | 2017-04-21T20:31:06 | 85,967,955 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 509 | py | class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution(object):
def sortedArrayToBST(self, nums):
"""
:type nums: List[int]
:rtype: TreeNode
"""
n = len(nums)
if n == 0:
return None
m = n / 2
node = TreeNode(nums[m])
node.left = self.sortedArrayToBST(nums[:m])
node.right = self.sortedArrayToBST(nums[m+1:])
return node
| [
"sfeng77@gmail.com"
] | sfeng77@gmail.com |
663d9e55f574ee82bb6d9ecf79a6f7ee71df9a65 | f06d9cd5fb86885a73ee997c687f3294840dd199 | /setuser.py | 169f54a1c0f7ac711cdde780e1f9ecd13d856b49 | [] | no_license | bu2/oauth-proxy | aaff16a07d5c2c07c8243293c9ed41205b251a74 | dbed492f8a806c36177a56ca626f005acec904b1 | refs/heads/master | 2020-12-26T15:53:40.618570 | 2013-07-09T05:06:16 | 2013-07-09T05:06:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,376 | py | import getpass
import os
import sys
from werkzeug.datastructures import MultiDict
import models
import forms
# Make sure the database gets installed properly
models.db.create_all()
values = MultiDict()
form = forms.SetUser(values)
values['email'] = sys.argv[1] if len(sys.argv) > 1 else raw_input('%s: ' % form.email.label.text)
form = forms.SetUser(values)
form.validate()
if form.email.errors:
sys.exit('\n'.join(' ! %s' % e for e in form.email.errors))
if models.User.query.filter_by(email=form.email.data).count():
print '%s already exists, setting the password' % form.email.data
values['password'] = getpass.getpass('%s: ' % form.password.label.text)
form = forms.SetUser(values)
form.validate()
if form.password.errors:
sys.exit('\n'.join(' ! %s' % e for e in form.password.errors))
values['retype'] = getpass.getpass('%s: ' % form.retype.label.text)
form = forms.SetUser(values)
form.validate()
if form.retype.errors:
sys.exit('\n'.join(' ! %s' % e for e in form.retype.errors))
user = models.User.query.filter_by(email=form.email.data).first()
if user:
user.set_password(form.password.data)
msg = 'Updated password for %s' % user.email
else:
user = models.User(email=form.email.data, password=form.password.data)
msg = 'Created account for %s' % user.email
models.db.session.add(user)
models.db.session.commit()
print msg
| [
"marty@martyalchin.com"
] | marty@martyalchin.com |
99568bf21ddb40ee0e1fa9bf07ed777ed5918f52 | 864619c0245254e01fc61ffb3e9942436b0f9a13 | /cerebtests/capabilities/__init__.py | 47a740e51f2dc5132c32ad8988051153a025b2a3 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | cerebunit/cerebtests | 094e65dcbc61027b183f28de73ce48e6f6d57ec7 | cf4b6d898f391db4f6200a8ee32753d0ff3ab200 | refs/heads/master | 2022-07-31T03:09:54.759945 | 2022-07-17T11:10:58 | 2022-07-17T11:10:58 | 139,698,304 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 145 | py | # ~/cerebtests/cerebtests/capabilities/__init__.py
#from cerebtests.capabilities import cells
#from . import microcircuit
#from . import network
| [
"neuralgraphs@gmail.com"
] | neuralgraphs@gmail.com |
b21b7c4509567cee5d172d6e123085031357a543 | 038ce0cf1d4e6f6a8ed6736663b6bb1e02d01b2a | /the_tale/post_service/tests/test_reset_password.py | cd1a25721437c7634379233bf22090e602731efb | [
"BSD-2-Clause-Views"
] | permissive | GrandUser/the-tale | d363fc34bc3cd04ced2bd718f375fa83f887c7df | 3f7ec22c457a0c400ddb51dede7e8a3e962acf83 | refs/heads/master | 2021-01-19T06:56:52.868165 | 2016-05-22T15:07:32 | 2016-05-22T15:07:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,765 | py | # coding: utf-8
from django.core import mail
from the_tale.common.utils import testcase
from the_tale.accounts.logic import register_user
from the_tale.accounts.prototypes import AccountPrototype, ResetPasswordTaskPrototype
from the_tale.game.logic import create_test_map
from the_tale.post_service.models import Message
from the_tale.post_service.prototypes import MessagePrototype
class ResetPasswordTests(testcase.TestCase):
def setUp(self):
super(ResetPasswordTests, self).setUp()
create_test_map()
register_user('user_1', 'user_1@test.com', '111111')
self.account_1 = AccountPrototype.get_by_nick('user_1')
self.reset_task = ResetPasswordTaskPrototype.create(self.account_1)
self.message = MessagePrototype.get_priority_message()
def test_register_message(self):
self.assertEqual(Message.objects.all().count(), 1)
def test_mail_send(self):
self.assertEqual(len(mail.outbox), 0)
self.message.process()
self.assertTrue(self.message.state.is_PROCESSED)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].to, [self.account_1.email])
self.assertTrue(self.reset_task.uuid in mail.outbox[0].body)
self.assertTrue(self.reset_task.uuid in mail.outbox[0].alternatives[0][0])
def test_mail_send__to_system_user(self):
from the_tale.accounts.logic import get_system_user
Message.objects.all().delete()
ResetPasswordTaskPrototype.create(get_system_user())
message = MessagePrototype.get_priority_message()
self.assertEqual(len(mail.outbox), 0)
message.process()
self.assertTrue(message.state.is_PROCESSED)
self.assertEqual(len(mail.outbox), 0)
| [
"a.eletsky@gmail.com"
] | a.eletsky@gmail.com |
42972f492df490b20e312d8053977787b6f6f9b5 | 66d9f74aabb3ecf2a79f24d36f94e082166fa9df | /trunk/webui/cloud/page/TopMenu.py | 6dbfd0a623a8321499036e2ab404f130b01f4cf8 | [] | no_license | willcai1984/AerohiveAuto | 6b7313de7c09a7d9b749f4531751eac0999b41f7 | a4aeea7f98dc279b17515f5d1719efce20dd5133 | refs/heads/master | 2020-05-24T14:42:35.924787 | 2014-11-18T09:19:48 | 2014-11-18T09:19:48 | 26,797,519 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,317 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
__author__ = 'hshao'
from selenium.webdriver.common.by import By
from webui import WebElement
class Home(WebElement):
menu_page_title = 'HiveManager NG'
Dashboard_btn = (By.XPATH, '//li[@data-dojo-attach-point="dashboardtab"]/a')
Monitor_btn = (By.XPATH, '//li[@data-dojo-attach-point="monitoringtab"]/a')
Devices_btn = (By.XPATH, '//li[@data-dojo-attach-point="devicesTab"]/a')
Configuration_btn = (By.XPATH, '//li[@data-dojo-attach-point="configurationtab"]/a')
Administration_btn = (By.XPATH, '//li[@class="data-dojo-attach-point="admintab"]/a')
class MenuSuccessfulPage(WebElement):
menu_successful_page_title = 'HiveManager NG'
Dashboard_successful_page_menu_title = 'Network Policies'
Monitor_successful_page_menu_title = 'Network Policies'
Devices_successful_page_menu_title = 'Devices'
Devices_successful_page_menu_title_xpath = (By.XPATH, '//div[@data-dojo-attach-point="DeviceListArea"]/descendant::div[span="Devices"]')
Configuration_successful_page_menu_title = 'Network Policies'
Configuration_successful_page_menu_title_xpath = (By.XPATH, '//div[@data-dojo-attach-point="NetworkPolicyListArea"]/div[@class="ui-tle"]/span')
Administration_successful_page_menu_title = 'Network Policies'
| [
"willcai1984@gmail.com"
] | willcai1984@gmail.com |
5755e6b8e66c11d2edb617fcfba69571cd90936f | 303bac96502e5b1666c05afd6c2e85cf33f19d8c | /solutions/python3/42.py | 7410ae5963ede23de80722a57ac467c5f85b6442 | [
"MIT"
] | permissive | jxhangithub/leetcode | 5e82f4aeee1bf201e93e889e5c4ded2fcda90437 | 0de1af607557d95856f0e4c2a12a56c8c57d731d | refs/heads/master | 2022-05-22T12:57:54.251281 | 2022-03-09T22:36:20 | 2022-03-09T22:36:20 | 370,508,127 | 1 | 0 | MIT | 2022-03-09T22:36:20 | 2021-05-24T23:16:10 | null | UTF-8 | Python | false | false | 426 | py | class Solution:
def trap(self, height):
res, left, l, r = 0, {}, 0, 0
for i, h in enumerate(height):
left[i] = l
if h > l:
l = h
for i in range(len(height) - 1, -1, -1):
roof = min(left[i] , r)
if roof > height[i]:
res += roof - height[i]
if height[i] > r:
r = height[i]
return res | [
"cenkay.arapsagolu@gmail.com"
] | cenkay.arapsagolu@gmail.com |
d6f9726289c482ee28560352f61c27e948ee4c0f | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /6hnrKRh7fZfMC5CKY_12.py | 826d0c825fe9b56642774cdbea1608c929276213 | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,197 | py | """
Given an integer, return a new **integer** according to the rules below:
* Split the number into groups of two digit numbers. If the number has an _odd_ number of digits, return `"invalid"`.
* For each group of two digit numbers, concatenate the _last digit_ to a new string the same number of times as the value of the _first digit_.
* Return the result as an _integer_.
look_and_say(3132) ➞ 111222
# By reading the number digit by digit, you get three "1" and three "2".
# Therefore, you put three ones and three two's together.
# Remember to return an integer.
### Examples
look_and_say(95) ➞ 555555555
look_and_say(1213141516171819) ➞ 23456789
look_and_say(120520) ➞ 200
look_and_say(231) ➞ "invalid"
### Notes
* Note that the number **0** can be included (see example #3).
* Check the **Resources** tab for a TED-Ed video for extra clarity.
"""
def look_and_say(n):
n_s = str(n)
if len(n_s) % 2 != 0:
return "invalid"
else:
var = ""
i = 0
while i < len(n_s):
j = 0
while j < int(n_s[i]):
var = var + n_s[i+1]
j = j + 1
i = i + 2
return int(var)
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
ba0a07d4363c1a6a51d8418e9bcbcfe4b2118f86 | 5c24dfc11c855fa0e4f196bc2e38661842761ab8 | /backend/ImproveContent/DecreaseAnt.py | 5b1d3d9dbe70ab765c1ed4bd5d2d7864c3579ab1 | [] | no_license | CharlesRajendran/go-viral | 2ad7bbdaf1b7d9dbfa0330012dba2740dbe10952 | 05ced94ac1b97df965232f3c44e341a980e58b3c | refs/heads/master | 2020-03-20T01:58:58.616333 | 2018-06-12T17:32:05 | 2018-06-12T17:32:05 | 137,095,620 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,669 | py | import nltk
import fileinput
from nltk.corpus import wordnet as wn
from nltk.tokenize import RegexpTokenizer
from nltk.corpus import stopwords
stop_words = set(stopwords.words('english'))
def increaseAnger(file_name):
toker = RegexpTokenizer(r'\w+')
words = toker.tokenize(file_name)
allowed_types = ["JJ", "JJR", "JJS", "NN", "NNS", "RB", "RBR", "RBS", "VB", "VBD", "VBG", "VBN", "VBP", "VBZ"]
filtered_words = []
#stopwords removal
for w in words:
if w not in stop_words:
filtered_words.append(w)
pos = nltk.pos_tag(filtered_words)
allowed_words = []
for p in pos:
if p[1] in allowed_types:
allowed_words.append(p[0].lower())
allowed_words_with_so = [[]]
for line in fileinput.input("anticipationSO.txt"):
chunks = line.split()
if chunks[0] in allowed_words:
allowed_words_with_so.append([chunks[0], chunks[1]])
recommended_words = [[]]
#to remove empty elements
awwso = list(filter(None, allowed_words_with_so))
#to remove duplicates
al = set(tuple(element) for element in awwso)
for el in al:
print(el[0])
syn = wn.synsets(el[0])
for sy in syn:
for le in sy.lemmas():
for line in fileinput.input("anticipationSO.txt"):
chunks = line.split()
if (chunks[0] == le.name())and (int(chunks[1])<int(el[1])):
recommended_words.append([el[0], el[1], le.name(), chunks[1]])
fileinput.close()
return recommended_words
#print(increaseAnger(open("23.txt","r").read()))
| [
"charlesrajendran44@gmail.com"
] | charlesrajendran44@gmail.com |
08f3c63df0a0b2bb7b4b07f7f0a824e1c895fbf1 | 10199a6ffc89c3fe3dd8747385989f6dfa354b3e | /nornir/plugins/tasks/apis/http_method.py | e4c50292e393e3dbe5b66041c6479eab3a914a00 | [
"Apache-2.0"
] | permissive | billgrant/nornir | 134f151818b444cee6a46ef80b4cc3ec73da3262 | 837bf85902d1776022d19f460ebe559884a9ffbe | refs/heads/develop | 2020-04-08T09:45:50.163417 | 2018-11-26T08:31:18 | 2018-11-26T08:31:18 | 159,238,505 | 0 | 0 | Apache-2.0 | 2018-11-26T21:59:29 | 2018-11-26T21:59:26 | Python | UTF-8 | Python | false | false | 1,553 | py | from nornir.core.task import Result
import requests
def http_method(task=None, method="get", url="", raise_for_status=True, **kwargs):
"""
This is a convenience task that uses `requests <http://docs.python-requests.org/en/master/>`_ to
interact with an HTTP server.
Arguments:
method (string): HTTP method to call
url (string): URL to connect to
raise_for_status (bool): Whether to call `raise_for_status
<http://docs.python-requests.org/en/master/api/#requests.Response.raise_for_status>`_
method automatically or not. For quick reference, raise_for_status will consider an
error if the return code is any of 4xx or 5xx
**kwargs: Keyword arguments will be passed to the `request
<http://docs.python-requests.org/en/master/api/#requests.request>`_
method
Returns:
:obj:`nornir.core.task.Result`:
* result (``string/dict``): Body of the response. Either text or a dict if the
response was a json object
* reponse (object): Original `Response
<http://docs.python-requests.org/en/master/api/#requests.Response>`_
"""
r = requests.request(method, url, **kwargs)
if raise_for_status:
r.raise_for_status()
try:
content_type = r.headers["Content-type"]
except KeyError:
content_type = "text"
result = r.json() if "application/json" == content_type else r.text
return Result(host=task.host if task else None, response=r, result=result)
| [
"dbarrosop@dravetech.com"
] | dbarrosop@dravetech.com |
f5be676611bbe6c6f91ce40e74a2f5a2523b9938 | 3fe272eea1c91cc5719704265eab49534176ff0d | /scripts/portal/enterHRpt.py | 416069eee82ed5292391535cd8932742f8aea17a | [
"MIT"
] | permissive | Bratah123/v203.4 | e72be4843828def05592298df44b081515b7ca68 | 9cd3f31fb2ef251de2c5968c75aeebae9c66d37a | refs/heads/master | 2023-02-15T06:15:51.770849 | 2021-01-06T05:45:59 | 2021-01-06T05:45:59 | 316,366,462 | 1 | 0 | MIT | 2020-12-18T17:01:25 | 2020-11-27T00:50:26 | Java | UTF-8 | Python | false | false | 252 | py | # 222020000 - Ludi tower: Helios Tower <Library> (CoK 3rd job portal)
if not sm.hasQuest(20881): # 3rd job quest
sm.chat("Only knights looking to job advance to the third job may enter here.")
else:
sm.warpInstanceIn(922030400, 0)
sm.dispose()
| [
"pokesmurfuwu@gmail.com"
] | pokesmurfuwu@gmail.com |
692ce7baaccecf845f9c396f1e159091422e7e9f | 83977dad449cfb0d0401194bad9fdf3d5b794118 | /channelfilter/channelfilter.py | e0d8daf8e74b54a33893928c5413c56903be7d99 | [
"MIT"
] | permissive | Rick7C2/TWO-Cogs | 935fcb30ba2b6a929a77cbc50f794b807168afee | 5c5d470ac96780fbc05bb59ba3fa532f01587c81 | refs/heads/master | 2020-03-20T11:10:14.131098 | 2018-06-15T00:29:59 | 2018-06-15T00:29:59 | 137,394,811 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,196 | py | # -*- coding: utf-8 -*-
"""
The MIT License (MIT)
Copyright (c) 2017 SML
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
import os
from collections import defaultdict
import discord
from cogs.utils import checks
from cogs.utils.dataIO import dataIO
from discord.ext import commands
PATH = os.path.join("data", "channelfilter")
JSON = os.path.join(PATH, "settings.json")
def nested_dict():
"""Recursively nested defaultdict."""
return defaultdict(nested_dict)
class ChannelFilter:
"""Channelf filter"""
def __init__(self, bot):
"""Init."""
self.bot = bot
self.settings = dataIO.load_json(JSON)
def init_server_settings(self, server):
self.settings[server.id] = {}
dataIO.save_json(JSON, self.settings)
def get_server_settings(self, server):
"""Return server settings."""
if server.id not in self.settings:
self.settings[server.id] = {}
dataIO.save_json(JSON, self.settings)
return self.settings[server.id]
def get_channel_settings(self, server, channel):
"""Return channel settings."""
server_settings = self.get_server_settings(server)
if channel.id not in server_settings:
self.settings[server.id][channel.id] = {}
dataIO.save_json(JSON, self.settings)
return self.settings[server.id][channel.id]
def add_word(self, server, channel, word, reason=None):
"""Add word to filter."""
channel_settings = self.get_channel_settings(server, channel)
channel_settings[word.lower()] = {
'reason': reason
}
dataIO.save_json(JSON, self.settings)
def remove_word(self, server, channel, word):
"""Remove word from filter."""
channel_settings = self.get_channel_settings(server, channel)
success = channel_settings.pop(word, None)
dataIO.save_json(JSON, self.settings)
if success is None:
return False
else:
return True
@checks.mod_or_permissions()
@commands.group(pass_context=True, aliases=['cf', 'cfilter'])
async def channelfilter(self, ctx):
"""Filter words by channel."""
if ctx.invoked_subcommand is None:
await self.bot.send_cmd_help(ctx)
@checks.is_owner()
@channelfilter.command(name="init", pass_context=True)
async def channelfilter_init(self, ctx):
"""Init server settings."""
server = ctx.message.server
self.init_server_settings(server)
await self.bot.say("Settings initialized.")
@checks.mod_or_permissions()
@channelfilter.command(name="add", pass_context=True, no_pm=True)
async def channelfilter_add(self, ctx, word, reason=None):
"""Add words."""
server = ctx.message.server
channel = ctx.message.channel
self.add_word(server, channel, word, reason=reason)
await self.bot.say("Added word to filter.")
@checks.mod_or_permissions()
@channelfilter.command(name="remove", pass_context=True, no_pm=True)
async def channelfilter_remove(self, ctx, word):
"""Remove words."""
server = ctx.message.server
channel = ctx.message.channel
success = self.remove_word(server, channel, word)
if success:
await self.bot.say("Removed word from filter.")
else:
await self.bot.say("Cannot find that word in filter.")
@checks.mod_or_permissions()
@channelfilter.command(name="list", pass_context=True, no_pm=True)
async def channelfilter_list(self, ctx):
"""Words filtered in channel."""
server = ctx.message.server
channel = ctx.message.channel
channel_settings = self.get_channel_settings(server, channel)
if len(channel_settings.keys()) == 0:
await self.bot.say("No words are filtered here.")
return
await self.bot.say(", ".join(channel_settings.keys()))
@checks.mod_or_permissions()
@channelfilter.command(name="listserver", pass_context=True, no_pm=True)
async def channelfilter_listserver(self, ctx):
"""Words filtered on server."""
server = ctx.message.server
server_settings = self.get_server_settings(server)
out = []
for channel_id in server_settings:
channel = self.bot.get_channel(channel_id)
channel_settings = self.get_channel_settings(server, channel)
if len(channel_settings):
out.append("{}: {}".format(channel.mention, ", ".join(channel_settings)))
if not len(out):
await self.bot.say("Nothing is filtered on this server.")
return
await self.bot.say(", ".join(out))
async def on_message(self, message):
"""Filter words by channel."""
server = message.server
channel = message.channel
author = message.author
if server is None or self.bot.user == author:
return
valid_user = isinstance(author, discord.Member) and not author.bot
# Ignore bots
if not valid_user:
return
# Ignore people with manage messages perms
if author.server_permissions.manage_messages:
return
channel_settings = self.get_channel_settings(server, channel)
if not isinstance(channel_settings, dict):
return
for word in channel_settings.keys():
if word.lower() in message.content.lower():
reason = channel_settings[word].get('reason', 'that')
await self.bot.send_message(
channel,
"{} {}. "
"Repeat offenders will be kicked/banned.".format(
author.mention,
reason
))
await self.bot.delete_message(message)
def check_folder():
"""Check folder."""
os.makedirs(PATH, exist_ok=True)
def check_file():
"""Check files."""
if not dataIO.is_valid_json(JSON):
dataIO.save_json(JSON, {})
def setup(bot):
"""Setup."""
check_folder()
check_file()
n = ChannelFilter(bot)
bot.add_cog(n)
| [
"smlbiobot@gmail.com"
] | smlbiobot@gmail.com |
d30fbfbfc9bc1ad2db4327b992818ba0277297e0 | 178eeebc29c3b5501505d1508f52f52c7d62ffdc | /Code/problem17.py | 7eba0e3006b02503fb71a59a6fa7fa104258c31a | [] | no_license | krishnakalyan3/BSG | 926bcb312974943478107c9a15b47dce737726ca | 68f9a853ae803e8d2943bcb8ec31842acbdc2813 | refs/heads/master | 2021-01-11T19:58:35.152345 | 2017-01-19T09:05:28 | 2017-01-19T09:05:28 | 79,435,188 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 379 | py | #!/usr/bin/env python2
# Counting Point Mutations
def hamming(x,y):
count = 0
for x1, y1 in zip(x, y):
if (x1 != y1):
count += 1
return count
if __name__ == '__main__':
dataset = open("/Users/krishna/Downloads/rosalind_hamm.txt").read()
#dataset = 'GAGCCTACTAACGGGAT\nCATCGTAATGACGGCCT'
split_data = dataset.split('\n')
print hamming(split_data[0], split_data[1]) | [
"krishnakalyan3@gmail.com"
] | krishnakalyan3@gmail.com |
0ac29342f999a38d4b563cd8fac442fcbc10d64d | 8ebf6311c3c1db40c7bb56051cf4e37e1b85a4f9 | /rm-server/gateway/gateway/router/projectmanager/project/user/list.py | 32c198c00b991b4d2b88403cc7cb22d51600f615 | [] | no_license | sq591442679/requirements-manager | e8b074afb7fd2a83632f2546d392dab4c35aeeeb | 6d664ce338b455150dcc9a86145967e8dd67a9dd | refs/heads/master | 2023-07-08T04:38:20.064019 | 2021-08-11T03:41:13 | 2021-08-11T03:41:13 | 392,877,568 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 540 | py | from flask import request
from gateway.app import app
from gateway.http_client import projectmanager_http_client
from gateway.utils.handle_api import (
get_client_username, handle_request_response
)
@app.route('/project/user/list', methods=['GET'])
@handle_request_response
@get_client_username
def project_user_list(client_username: str):
args = request.args.to_dict()
status_code, resp_body = projectmanager_http_client.get(
'project/user/list', client_username, params=args
)
return status_code, resp_body
| [
"591442679@qq.com"
] | 591442679@qq.com |
270cd817464fdfc04fa940e1f3291e2453365dc8 | 42b920d39f6fa79b1b6f8805c788599122e353db | /transcode movie/TV Show/encryption/new.py | 7eb5cec57b311ef1d36c18f5cf2713e0a0de5f02 | [] | no_license | analyticsbot/Python-Code---Part-10 | 62a167beb4326824429edf50f2004256103ef9d6 | 979853c419ed1073b046c65a095131e1e3a2769e | refs/heads/master | 2021-06-05T18:22:30.696083 | 2016-08-27T02:42:10 | 2016-08-27T02:42:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 397 | py | from Crypto.Cipher import AES
from Crypto.Util import Counter
key = '7842f0a1ebc38f44e3e0c81943f68582'.decode('hex')
iv = '7842f0a1ebc38f44'.decode('hex')
ctr_e = Counter.new(64, prefix=iv, initial_value=0)
encryptor = AES.new(key, AES.MODE_CBC, counter=ctr_e)
with open('Grass.out.jpg', 'wb') as fout:
with open('Grass.jpg', 'rb') as fin:
fout.write(encryptor.encrypt(fin.read()))
| [
"ravi.shankar1788@gmail.com"
] | ravi.shankar1788@gmail.com |
fcf79f0af457759532d20061e773bff084cc534c | 4c2c1775b6b319ae07155f46e70a6726ab0980c2 | /algo/algo_code/naga-algo/naga_interactive/script/tools/email_sender.py | 84d8d05ac3c42ef9c45cc00249239fb07efb8a8d | [] | no_license | kiminh/util | 8e4b204849a57941120e37c9330772f03c8892d0 | 763a71031d9c0ef207b87dc03ebc55208a2dd5ad | refs/heads/master | 2022-06-09T06:09:13.221754 | 2020-04-27T04:23:00 | 2020-04-27T04:23:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,747 | py | import email
import email.encoders
import email.mime.base
import email.mime.text
import logging
import smtplib
import time
import sys
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.utils import COMMASPACE
MAIL_CONFIG = {
"SENDER": 'noreply.ads.monitor@cootek.cn',
'SMTP_SERVER': "smtp.partner.outlook.cn",
"USER_NAME": "noreply.ads.monitor@cootek.cn",
"PASSWORD": "ApdAll666",
"EMAIL_PORT": "587",
"EMAIL_RECEIVER": ["ling.fang@cootek.cn"]
}
class MailSender(object):
""" MysqlUtilHandler """
def __init__(self, smtp_server=MAIL_CONFIG["SMTP_SERVER"], port=MAIL_CONFIG["EMAIL_PORT"],
user=MAIL_CONFIG["USER_NAME"], pwd=MAIL_CONFIG["PASSWORD"],
sender=MAIL_CONFIG["SENDER"], receiver_list=MAIL_CONFIG["EMAIL_RECEIVER"]):
self.smtp_server = smtp_server
self.port = port
self.user = user
self.pwd = pwd
self.sender = sender
self.receiver_list = receiver_list
self.smtp = None
def init(self):
""" init """
self.smtp = smtplib.SMTP(timeout=70)
self.smtp.connect(self.smtp_server, self.port)
self.smtp.starttls()
self.smtp.set_debuglevel(0)
def send_email(self, subject, msg, file_names=[], prefix=''):
""" send_email """
msg_root = MIMEMultipart('related')
msg_root['Subject'] = subject
msg_root['To'] = COMMASPACE.join(self.receiver_list)
msg_text = MIMEText('%s' % msg, 'html', 'utf-8')
msg_root.attach(msg_text)
for file_name in file_names:
suffix = file_name
file_name = prefix + file_name
fp = open(file_name, 'rb')
file1 = email.mime.base.MIMEBase('application', 'vnd.ms-excel')
file1.set_payload(fp.read())
fp.close()
email.encoders.encode_base64(file1)
str1 = 'attachment;filename=' + suffix
file1.add_header('Content-Disposition', str1)
msg_root.attach(file1)
while True:
try:
self.smtp.login(self.user, self.pwd)
self.smtp.sendmail(self.sender, self.receiver_list, msg_root.as_string())
break
except Exception as e:
print(e)
try:
time.sleep(20)
self.smtp.connect()
except Exception as e:
logging.error("failed to login to smtp server, e: %s" % str(e))
if __name__ == "__main__":
subject = sys.argv[1]
text_body = sys.argv[2]
mailsender = MailSender()
mailsender.init()
mailsender.send_email(subject=subject, msg=text_body)
| [
"ling@lingdeMacBook-Air.local"
] | ling@lingdeMacBook-Air.local |
bf517a269f1e1e2e3035381c5d0e2b41f3f1e470 | 82195c2a1fce4ec92bc843c815bf06f7bd9c782a | /test/functional/bitcoin_cli.py | 3ae91a61f6f43ec06248773e17ab4b3a8d1b25dd | [
"MIT"
] | permissive | cashgoldcoin/cashgoldcoin | 9ca2947ff000451182478afeb0726ebd07cdc274 | ec774b51a09379d2b9d16aaca6edc7e3661a64ab | refs/heads/master | 2020-04-06T07:21:11.480655 | 2018-11-12T20:55:19 | 2018-11-12T20:55:19 | 155,448,765 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 833 | py | #!/usr/bin/env python3
# Copyright (c) 2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test bitcoin-cli"""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal
class TestBitcoinCli(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
def run_test(self):
"""Main test logic"""
self.log.info("Compare responses from getinfo RPC and `cashgoldcoin-cli getinfo`")
cli_get_info = self.nodes[0].cli.getinfo()
rpc_get_info = self.nodes[0].getinfo()
assert_equal(cli_get_info, rpc_get_info)
if __name__ == '__main__':
TestBitcoinCli().main()
| [
"you@example.com"
] | you@example.com |
2d8ae82ee205c9f5daffdedda2ad91f75f17f1a9 | 9d454ae0d5dd1d7e96e904ced80ca502019bb659 | /1588_sumOddLengthSubarrays.py | 8cb3fb3c870e0f20cfe1e03d33631e56dd2254c2 | [] | no_license | zzz686970/leetcode-2018 | dad2c3db3b6360662a90ea709e58d7facec5c797 | 16e4343922041929bc3021e152093425066620bb | refs/heads/master | 2021-08-18T08:11:10.153394 | 2021-07-22T15:58:52 | 2021-07-22T15:58:52 | 135,581,395 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 296 | py | class Solution:
def sumOddLengthSubarrays(self, arr: List[int]) -> int:
length = len(arr)
res = 0
for i, val in enumerate(arr, 1):
res += (i * (length - i + 1) + 1) // 2 * val
return res
| [
"1564256031@qq.com"
] | 1564256031@qq.com |
77527a553d29938346edbcc692edd89fda02de2e | 77311ad9622a7d8b88707d7cee3f44de7c8860cb | /res/scripts/common/goodies/goodie_helpers.py | 2ef1ee6c219569bdef95cd2ff114b915d5135da2 | [] | no_license | webiumsk/WOT-0.9.14-CT | 9b193191505a4560df4e872e022eebf59308057e | cfe0b03e511d02c36ce185f308eb48f13ecc05ca | refs/heads/master | 2021-01-10T02:14:10.830715 | 2016-02-14T11:59:59 | 2016-02-14T11:59:59 | 51,606,676 | 0 | 0 | null | null | null | null | WINDOWS-1250 | Python | false | false | 4,542 | py | # 2016.02.14 12:44:31 Střední Evropa (běžný čas)
# Embedded file name: scripts/common/goodies/goodie_helpers.py
from collections import namedtuple
from debug_utils import LOG_ERROR
from Goodies import GoodieException
from GoodieConditions import MaxVehicleLevel
from GoodieDefinition import GoodieDefinition
from GoodieResources import Gold, Credits, Experience, CrewExperience, FreeExperience
from GoodieTargets import BuyPremiumAccount, BuySlot, PostBattle, BuyGoldTankmen, FreeExperienceConversion
from GoodieValue import GoodieValue
from goodie_constants import GOODIE_TARGET_TYPE, GOODIE_CONDITION_TYPE, GOODIE_RESOURCE_TYPE
GoodieData = namedtuple('GoodieData', 'variety target enabled lifetime useby limit autostart condition resource')
_CONDITIONS = {GOODIE_CONDITION_TYPE.MAX_VEHICLE_LEVEL: MaxVehicleLevel}
_TARGETS = {GOODIE_TARGET_TYPE.ON_BUY_PREMIUM: BuyPremiumAccount,
GOODIE_TARGET_TYPE.ON_BUY_SLOT: BuySlot,
GOODIE_TARGET_TYPE.ON_POST_BATTLE: PostBattle,
GOODIE_TARGET_TYPE.ON_BUY_GOLD_TANKMEN: BuyGoldTankmen,
GOODIE_TARGET_TYPE.ON_FREE_XP_CONVERSION: FreeExperienceConversion}
_RESOURCES = {GOODIE_RESOURCE_TYPE.GOLD: Gold,
GOODIE_RESOURCE_TYPE.CREDITS: Credits,
GOODIE_RESOURCE_TYPE.XP: Experience,
GOODIE_RESOURCE_TYPE.CREW_XP: CrewExperience,
GOODIE_RESOURCE_TYPE.FREE_XP: FreeExperience}
GOODIE_CONDITION_TO_TEXT = {MaxVehicleLevel: 'max_vehicle_level'}
GOODIE_RESOURCE_TO_TEXT = {Gold: 'gold',
Credits: 'credits',
Experience: 'experience',
CrewExperience: 'crew_experience',
FreeExperience: 'free_experience'}
GOODIE_TARGET_TO_TEXT = {BuyPremiumAccount: 'premium',
BuySlot: 'slot',
PostBattle: 'post_battle',
BuyGoldTankmen: 'gold_tankmen',
FreeExperienceConversion: 'free_xp_conversion'}
GOODIE_TEXT_TO_CONDITION = {'max_vehicle_level': GOODIE_CONDITION_TYPE.MAX_VEHICLE_LEVEL}
GOODIE_TEXT_TO_RESOURCE = {'credits': GOODIE_RESOURCE_TYPE.CREDITS,
'experience': GOODIE_RESOURCE_TYPE.XP,
'crew_experience': GOODIE_RESOURCE_TYPE.CREW_XP,
'free_experience': GOODIE_RESOURCE_TYPE.FREE_XP,
'gold': GOODIE_RESOURCE_TYPE.GOLD}
GOODIE_TEXT_TO_TARGET = {'premium': GOODIE_TARGET_TYPE.ON_BUY_PREMIUM,
'slot': GOODIE_TARGET_TYPE.ON_BUY_SLOT,
'post_battle': GOODIE_TARGET_TYPE.ON_POST_BATTLE,
'gold_tankmen': GOODIE_TARGET_TYPE.ON_BUY_GOLD_TANKMEN,
'free_xp_conversion': GOODIE_TARGET_TYPE.ON_FREE_XP_CONVERSION}
class NamedGoodieData(GoodieData):
def getTargetValue(self):
if self.target[0] == GOODIE_TARGET_TYPE.ON_BUY_PREMIUM:
return int(self.target[1].split('_')[1])
else:
return self.target[1]
@property
def targetID(self):
return self.target[0]
def loadDefinitions(d):
goodies = {}
for uid, d in d.iteritems():
v_variety, v_target, v_enabled, v_lifetime, v_useby, v_limit, v_autostart, v_condition, v_resource = d
if v_condition is not None:
condition = _CONDITIONS.get(v_condition[0])(v_condition[1])
else:
condition = None
target = _TARGETS[v_target[0]](v_target[1], v_target[2])
resource = _RESOURCES[v_resource[0]]
if v_resource[2]:
value = GoodieValue.percent(v_resource[1])
else:
value = GoodieValue.absolute(v_resource[1])
goodies[uid] = GoodieDefinition(uid=uid, variety=v_variety, target=target, enabled=v_enabled, lifetime=v_lifetime, useby=v_useby, counter=v_limit, autostart=v_autostart, resource=resource, value=value, condition=condition)
return goodies
def getPriceWithDiscount(price, value):
if value[2]:
result = int(price - price * (value[1] / float(100)))
if result < 0:
return 0
else:
return result
else:
return price - value[1]
def getPremiumCost(premiumCosts, goodie):
if goodie.target[0] == GOODIE_TARGET_TYPE.ON_BUY_PREMIUM:
price = premiumCosts.get(goodie.getTargetValue(), None)
if price is None:
return
return getPriceWithDiscount(price, goodie.resource)
else:
return
def loadPdata(pdataGoodies, goodies, logID):
for uid, goodie in pdataGoodies.iteritems():
try:
goodies.load(uid, goodie[0], goodie[1], goodie[2])
except GoodieException as detail:
LOG_ERROR('Cannot load a goodie', detail, logID)
# okay decompyling c:\Users\PC\wotsources\files\originals\res\scripts\common\goodies\goodie_helpers.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2016.02.14 12:44:31 Střední Evropa (běžný čas)
| [
"info@webium.sk"
] | info@webium.sk |
0cbce35c5daef0797c0b72a8d1e35e41e8716bc0 | da478f410908814c9be8044696dd2077889e3f82 | /0x1F-pascal_triangle/0-pascal_triangle.py | cc48432904bdc874fddaec8afd20dfd1008d73f2 | [] | no_license | sonnentag/holbertonschool-interview | 5c1c454cfe2e82b7bf4ba02d5aa19b5b5738d4a7 | d2d2f3159453a9c879cb1f8f205be504f53c4cae | refs/heads/main | 2023-07-18T09:06:19.653685 | 2021-08-26T16:19:11 | 2021-08-26T16:19:11 | 320,453,810 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 407 | py | #!/usr/bin/python3
""" 0x1F-pascal_triangle
"""
def pascal_triangle(n):
''' generate pascal's triangle of n levels deep
'''
triangle = []
if n <= 0:
return triangle
for x in range(1, n + 1):
row = []
col = 1
for k in range(1, x + 1):
row.append(col)
col = int(col * (x - k) / k)
triangle.append(row)
return triangle
| [
"zocle@zocle.net"
] | zocle@zocle.net |
2f7f638e66f5e1b63eee06075a10ade093af2e5d | b7f3edb5b7c62174bed808079c3b21fb9ea51d52 | /third_party/blink/web_tests/external/wpt/bluetooth/generate.py | f7fffddbc301f3e555f75ad4cb2d067430628925 | [
"LGPL-2.0-or-later",
"LicenseRef-scancode-warranty-disclaimer",
"LGPL-2.1-only",
"GPL-1.0-or-later",
"GPL-2.0-only",
"LGPL-2.0-only",
"BSD-2-Clause",
"LicenseRef-scancode-other-copyleft",
"BSD-3-Clause",
"MIT",
"Apache-2.0"
] | permissive | otcshare/chromium-src | 26a7372773b53b236784c51677c566dc0ad839e4 | 64bee65c921db7e78e25d08f1e98da2668b57be5 | refs/heads/webml | 2023-03-21T03:20:15.377034 | 2020-11-16T01:40:14 | 2020-11-16T01:40:14 | 209,262,645 | 18 | 21 | BSD-3-Clause | 2023-03-23T06:20:07 | 2019-09-18T08:52:07 | null | UTF-8 | Python | false | false | 7,189 | py | # Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
# TODO(509038): Delete the file in LayoutTests/bluetooth after all the script
# tests have been migrated to this directory.
"""Generator script for Web Bluetooth LayoutTests.
For each script-tests/X.js creates the following test files depending on the
contents of X.js
- getPrimaryService/X.https.window.js
- getPrimaryServices/X.https.window.js
- getPrimaryServices/X-with-uuid.https.window.js
script-tests/X.js files should contain "CALLS([variation1 | variation2 | ...])"
tokens that indicate what files to generate. Each variation in CALLS([...])
should corresponds to a js function call and its arguments. Additionally a
variation can end in [UUID] to indicate that the generated file's name should
have the -with-uuid suffix.
The PREVIOUS_CALL token will be replaced with the function that replaced CALLS.
The FUNCTION_NAME token will be replaced with the name of the function that
replaced CALLS.
For example, for the following template file:
// script-tests/example.js
promise_test(() => {
return navigator.bluetooth.requestDevice(...)
.then(device => device.gatt.CALLS([
getPrimaryService('heart_rate')|
getPrimaryServices('heart_rate')[UUID]]))
.then(device => device.gatt.PREVIOUS_CALL);
}, 'example test for FUNCTION_NAME');
this script will generate:
// getPrimaryService/example.https.window.js
promise_test(() => {
return navigator.bluetooth.requestDevice(...)
.then(device => device.gatt.getPrimaryService('heart_rate'))
.then(device => device.gatt.getPrimaryService('heart_rate'));
}, 'example test for getPrimaryService');
// getPrimaryServices/example-with-uuid.https.window.js
promise_test(() => {
return navigator.bluetooth.requestDevice(...)
.then(device => device.gatt.getPrimaryServices('heart_rate'))
.then(device => device.gatt.getPrimaryServices('heart_rate'));
}, 'example test for getPrimaryServices');
Run
$ python //third_party/WebKit/LayoutTests/bluetooth/generate.py
and commit the generated files.
"""
import fnmatch
import os
import re
import sys
import logging
TEMPLATES_DIR = 'script-tests'
class GeneratedTest:
def __init__(self, data, path, template):
self.data = data
self.path = path
self.template = template
def GetGeneratedTests():
"""Yields a GeneratedTest for each call in templates in script-tests."""
bluetooth_tests_dir = os.path.dirname(os.path.realpath(__file__))
# Read Base Test Template.
base_template_file_handle = open(
os.path.join(
bluetooth_tests_dir,
TEMPLATES_DIR,
'base_test_js.template'
), 'r')
base_template_file_data = base_template_file_handle.read().decode('utf-8')
base_template_file_handle.close()
# Get Templates.
template_path = os.path.join(bluetooth_tests_dir, TEMPLATES_DIR)
available_templates = []
for root, _, files in os.walk(template_path):
for template in files:
if template.endswith('.js'):
available_templates.append(os.path.join(root, template))
# Generate Test Files
for template in available_templates:
# Read template
template_file_handle = open(template, 'r')
template_file_data = template_file_handle.read().decode('utf-8')
template_file_handle.close()
template_name = os.path.splitext(os.path.basename(template))[0]
# Find function names in multiline pattern: CALLS( [ function_name,function_name2[UUID] ])
result = re.search(
r'CALLS\(' + # CALLS(
r'[^\[]*' + # Any characters not [, allowing for new lines.
r'\[' + # [
r'(.*?)' + # group matching: function_name(), function_name2[UUID]
r'\]\)', # adjacent closing characters: ])
template_file_data, re.MULTILINE | re.DOTALL)
if result is None:
raise Exception('Template must contain \'CALLS\' tokens')
new_test_file_data = base_template_file_data.replace('TEST',
template_file_data)
# Replace CALLS([...]) with CALLS so that we don't have to replace the
# CALLS([...]) for every new test file.
new_test_file_data = new_test_file_data.replace(result.group(), 'CALLS')
# Replace 'PREVIOUS_CALL' with 'CALLS' so that we can replace it while
# replacing CALLS.
new_test_file_data = new_test_file_data.replace('PREVIOUS_CALL', 'CALLS')
for call in result.group(1).split('|'):
# Parse call
call = call.strip()
function_name, args, uuid_suffix = re.search(r'(.*?)\((.*)\)(\[UUID\])?', call).groups()
# Replace template tokens
call_test_file_data = new_test_file_data
call_test_file_data = call_test_file_data.replace('CALLS', '{}({})'.format(function_name, args))
call_test_file_data = call_test_file_data.replace('FUNCTION_NAME', function_name)
# Get test file name
group_dir = os.path.basename(os.path.abspath(os.path.join(template, os.pardir)))
call_test_file_name = 'gen-{}{}.https.window.js'.format(template_name, '-with-uuid' if uuid_suffix else '')
call_test_file_path = os.path.join(bluetooth_tests_dir, group_dir, function_name, call_test_file_name)
yield GeneratedTest(call_test_file_data, call_test_file_path, template)
def main():
logging.basicConfig(level=logging.INFO)
previous_generated_files = set()
current_path = os.path.dirname(os.path.realpath(__file__))
for root, _, filenames in os.walk(current_path):
for filename in fnmatch.filter(filenames, 'gen-*.https.window.js'):
previous_generated_files.add(os.path.join(root, filename))
generated_files = set()
for generated_test in GetGeneratedTests():
prev_len = len(generated_files)
generated_files.add(generated_test.path)
if prev_len == len(generated_files):
logging.info('Generated the same test twice for template:\n%s',
generated_test.template)
# Create or open test file
directory = os.path.dirname(generated_test.path)
if not os.path.exists(directory):
os.makedirs(directory)
test_file_handle = open(generated_test.path, 'wb')
# Write contents
test_file_handle.write(generated_test.data.encode('utf-8'))
test_file_handle.close()
new_generated_files = generated_files - previous_generated_files
if len(new_generated_files) != 0:
logging.info('Newly generated tests:')
for generated_file in new_generated_files:
logging.info(generated_file)
obsolete_files = previous_generated_files - generated_files
if len(obsolete_files) != 0:
logging.warning('The following files might be obsolete:')
for generated_file in obsolete_files:
logging.warning(generated_file)
if __name__ == '__main__':
sys.exit(main())
| [
"commit-bot@chromium.org"
] | commit-bot@chromium.org |
54617dd96ce0cb421893e586c7a3aa26247754de | 3fbbdc377c84b974bb488ceea1f9a146fd90fdbb | /clean_test.py | 100205c6de74b79851f71a4030893b330d6d9a42 | [] | no_license | sxtech/SX-UrlImgPackage | ee7bf21bf02a1062218de5398926771e7ba7b722 | 8c9d95d4c57825a1db2df5f5394cba8dfdb38286 | refs/heads/master | 2021-01-15T10:05:39.772962 | 2016-08-18T07:22:00 | 2016-08-18T07:22:00 | 33,332,624 | 1 | 0 | null | 2016-08-18T07:22:00 | 2015-04-02T21:25:38 | Python | UTF-8 | Python | false | false | 256 | py | import time
from img_package import app
from img_package.clean_worker import CleanWorker
def test_clean_worker():
cw = CleanWorker()
cw.main()
time.sleep(10)
app.config['IS_QUIT']
if __name__ == "__main__":
test_clean_worker()
| [
"smellycat2014@foxmail.com"
] | smellycat2014@foxmail.com |
e3d180db6d65983092e1957c5bab49d204219983 | 68e0a8fbfc5bcbcd2ceaca07e420b1d4ca8d02c1 | /src/brewlog/utils/views.py | fea936bcd0ee35c3956264e976df32d0dd86d966 | [
"BSD-3-Clause"
] | permissive | zgoda/brewlog | 391e710a63b8bd1c753caee7cf56dc1e0780fcfd | cbf9d7b14f0cdfd9241ae869cb5f28c9a1e817b4 | refs/heads/master | 2022-02-21T15:39:46.778749 | 2022-02-07T11:51:07 | 2022-02-07T11:51:07 | 95,431,146 | 3 | 0 | NOASSERTION | 2020-01-14T20:42:03 | 2017-06-26T09:33:18 | Python | UTF-8 | Python | false | false | 3,413 | py | import collections
from typing import Optional
from urllib.parse import urljoin, urlparse
from flask import abort, request, session, url_for
from flask_babel import lazy_gettext as _
from itsdangerous.exc import BadSignature, SignatureExpired
from itsdangerous.url_safe import URLSafeTimedSerializer
from permission import Permission, Rule
def next_redirect(fallback_endpoint: str, *args, **kwargs) -> str:
"""Find redirect url. The order of search is request params, session and
finally url for fallback endpoint is returned if none found. Args and
kwargs are passed intact to endpoint.
:param fallback_endpoint: full endpoint specification
:type fallback_endpoint: str
:return: HTTP path to redirect to
:rtype: str
"""
for c in [request.args.get('next'), session.pop('next', None)]:
if is_redirect_safe(c):
return c
return url_for(fallback_endpoint, *args, **kwargs)
def is_redirect_safe(target: Optional[str]) -> bool:
"""Check if redirect is safe, that is using HTTP protocol and is pointing
to the same site.
:param target: redirect target url
:type target: str
:return: flag signalling whether redirect is safe
:rtype: bool
"""
if not target:
return False
ref_url = urlparse(request.host_url)
test_url = urlparse(urljoin(request.host_url, target))
return test_url.scheme in ('http', 'https') and ref_url.netloc == test_url.netloc
class RuleBase(Rule):
def __init__(self, obj):
self.obj = obj
super().__init__()
class PublicAccessRuleBase(RuleBase):
def deny(self):
abort(404)
class OwnerAccessRuleBase(RuleBase):
def deny(self):
abort(403)
class PermissionBase(Permission):
rule_class = None
def __init__(self, obj):
self.obj = obj
super().__init__()
def rule(self):
return self.rule_class(self.obj)
class AccessManagerBase:
primary = None
secondary = None
def __init__(self, obj, secondary_condition):
self.obj = obj
self.perms = []
if self.primary:
self.perms.append(self.primary(obj))
if self.secondary and secondary_condition:
self.perms.append(self.secondary(obj))
def check(self):
for perm in self.perms:
if not perm.check():
perm.deny()
TokenCheckResult = collections.namedtuple(
'TokenCheckResult', ['is_error', 'message', 'payload']
)
def check_token(token: str, secret: str, max_age: int) -> TokenCheckResult:
"""Check token validity, returns validation result with payload if token
is valid.
:param token: token to check
:type token: str
:param secret: secret that was used to generate token
:type secret: str
:param max_age: max age of token
:type max_age: int
:return: validation result
:rtype: TokenCheckResult
"""
serializer = URLSafeTimedSerializer(secret)
payload = None
is_error = True
msg = None
try:
payload = serializer.loads(token, max_age=max_age)
is_error = False
except SignatureExpired as e:
msg = _(
"token expired, it's valid for 48 hrs and it was generated on %(date)s",
date=e.date_signed,
)
except BadSignature:
msg = _('invalid token')
return TokenCheckResult(is_error, message=msg, payload=payload)
| [
"jarek.zgoda@gmail.com"
] | jarek.zgoda@gmail.com |
02eacc35cfb41f8410152f22332522879c2551a3 | b1b7206d4c8fb878e47bd2cd3a4e6b2a1f94cdc4 | /hw3/code/tip3/test_script.py | ad3345cf10406447eaabe3762601d306f5da5eb0 | [] | no_license | vachelch/MLDS | a6752e2e568841841389e50e7750d658e2157c29 | e4fcaeae3b8a63aaefbf07b9ca0c107e9601ad31 | refs/heads/master | 2020-05-04T23:08:00.507062 | 2019-04-04T16:56:23 | 2019-04-04T16:56:23 | 179,533,690 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 490 | py | from dataset import Dataset
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
dataset = Dataset()
np.random.seed(0)
r, c = 5, 5
dataset.data_generator(64)
true_imgs = dataset.next_batch()
true_imgs = true_imgs.transpose(0, 2, 3, 1)
fig, axs = plt.subplots(r, c)
cnt = 0
for i in range(r):
for j in range(c):
axs[i,j].imshow(true_imgs[cnt, :,:,:])
axs[i,j].axis('off')
cnt += 1
fig.savefig('log/test.png')
plt.close() | [
"r06944043@ntu.edu.tw"
] | r06944043@ntu.edu.tw |
fef291a802c179905e0f84f64fa6d6589ddd24b7 | ed6c1d30ced7e984ae507f5a25ebe4d92b33b5d8 | /segno_mimos/qrcode/main.py | 272d54bec4920f76f2925aac87269f9e1d825a92 | [
"BSD-3-Clause"
] | permissive | heuer/segno-mimos | 222f7720b183a08f20b15cf5971e567d5808c740 | 0b1b220c63fcda9fcaa0e42725ea719651a1d53e | refs/heads/master | 2021-07-12T02:20:55.216745 | 2020-08-01T00:54:42 | 2020-08-01T00:54:42 | 64,956,618 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,542 | py | # -*- coding: utf-8 -*-
#
# Copyright (c) 2011, Lincoln Loop
# Copyright (c) 2016 - 2017 -- Lars Heuer - Semagia <http://www.semagia.com/>.
# All rights reserved.
#
# License: BSD License
#
from __future__ import absolute_import, unicode_literals, print_function
import warnings
import segno
from . import constants, exceptions, util
from segno.utils import check_valid_scale, check_valid_border
from segno_mimos.qrcode.image.base import BaseImage
try:
from qrcode.image.base import BaseImage as qrcodeBaseImage
except ImportError:
qrcodeBaseImage = BaseImage
try: # pragma: no cover
range = xrange # Python 2
except NameError:
pass
# <https://wiki.python.org/moin/PortingToPy3k/BilingualQuickRef#New_Style_Classes>
__metaclass__ = type
def make(data=None, **kw):
qr = QRCode(**kw)
qr.add_data(data)
return qr.make_image()
def _check_valid_factory(img_factory):
if img_factory is not None:
assert issubclass(img_factory, (BaseImage, qrcodeBaseImage)) or hasattr(img_factory, 'drawrect')
class QRCode:
def __init__(self, version=None, error_correction=constants.ERROR_CORRECT_M,
box_size=10, border=4, image_factory=None):
check_valid_scale(box_size)
self.version = version and int(version)
self.error_correction = int(error_correction)
self.box_size = int(box_size)
self.border = int(border)
self.image_factory = image_factory
_check_valid_factory(image_factory)
self.clear()
def clear(self):
self.modules = None
self.modules_count = 0
self.data_cache = None
self.data_list = []
self.segno_qrcode = None
def add_data(self, data, optimize=20):
if isinstance(data, util.QRData):
self.data_list.append(data)
else:
if optimize:
chunks = tuple(util.optimal_data_chunks(data))
self.data_list.extend(chunks)
else:
self.data_list.append(util.QRData(data))
self.data_cache = None
def make(self, fit=True):
if fit:
self.version = None
self.makeImpl(False, None)
def makeImpl(self, test, mask_pattern):
if test:
warnings.warn('"test" is not supported')
segno_qrcode = segno.make_qr(self.data_list or '', mode=None,
version=self.version,
error=self.error_correction,
eci=False, boost_error=False, mask=mask_pattern)
self.data_cache = True
self.segno_qrcode = segno_qrcode
self.modules_count = len(segno_qrcode.matrix)
self.modules = [[bool(b) for b in row] for row in segno_qrcode.matrix]
self.version = segno_qrcode.version
def print_tty(self, out=None):
if self.data_cache is None:
self.make()
print(self.segno_qrcode.terminal(out=out, border=self.border))
def print_ascii(self, out=None, tty=False, invert=False):
if self.data_cache is None:
self.make()
print(self.segno_qrcode.terminal(out=out, border=self.border))
def make_image(self, image_factory=None, **kw):
check_valid_scale(self.box_size)
check_valid_border(self.border)
if self.data_cache is None:
self.make()
image_factory = image_factory or self.image_factory
_check_valid_factory(image_factory)
if image_factory is None or image_factory.kind in ('PNG', 'EPS', 'PDF', 'SVG'):
config = dict(scale=self.box_size, border=self.border)
kind = None
if image_factory is not None:
kind = image_factory.kind
try:
config.update(image_factory.config)
except AttributeError:
pass
try:
config['background'] = image_factory.background
except AttributeError:
pass
return _Image(self.segno_qrcode, config, kind)
im = image_factory(self.border, self.modules_count, self.box_size, **kw)
for r in range(self.modules_count):
for c in range(self.modules_count):
if self.modules[r][c]:
im.drawrect(r, c)
return im
def get_matrix(self):
if self.data_cache is None:
self.make()
if not self.border:
return self.modules
width = len(self.modules) + self.border*2
code = [[False]*width] * self.border
x_border = [False]*self.border
for module in self.modules:
code.append(x_border + module + x_border)
code += [[False]*width] * self.border
return code
class _Image:
"""\
This class is almost similar to qrcode.image.pil.PilImage and is able to
save a QR Code in all output formats which are common by qrcode and Segno.
"""
kind = None
allowed_kinds = ('PNG', 'EPS', 'PDF', 'SVG')
def __init__(self, segno_qrcode, config, kind):
self._qrcode = segno_qrcode
self.default_config = config
self.width = len(segno_qrcode.matrix)
self.kind = kind
def save(self, stream, format=None, kind=None, **kw):
fmt = format
if fmt is None:
fmt = kind or self.kind
if fmt is not None:
fmt = fmt.lower()
config = dict(self.default_config)
background_was_set = 'back_color' in kw or 'background' in kw or 'background' in config
config['color'] = kw.pop('fill_color', config.get('color', '#000'))
config['background'] = kw.pop('back_color', kw.pop('background', config.get('background', '#fff')))
if config['background'] == 'transparent':
config['background'] = None
if fmt == 'svg':
# SVG default config
svg_config = dict(scale=config.get('scale', 10) / 10, unit='mm', svgversion=1.1)
config.update(svg_config)
config.update(kw)
if fmt in (None, 'png'):
self._qrcode.save(stream, kind='png', **config)
return
if not background_was_set and fmt in ('eps', 'pdf', 'svg'):
# Remove background color if not set explictly
config['background'] = None
if fmt in ('eps', 'pdf', 'svg'):
self._qrcode.save(stream, kind=fmt, **config)
return
raise ValueError('Unsupported format "{}"'.format(fmt))
| [
"heuer@semagia.com"
] | heuer@semagia.com |
397d9bb3a19a2767981145d677e1eb98ddc7bab3 | d3efc82dfa61fb82e47c82d52c838b38b076084c | /utils/ETF/Redemption_HA/YW_ETFSS_SHSH_051.py | 59e59b0cdd08a7a3a4f4f1930d68b0081e0fc709 | [] | no_license | nantongzyg/xtp_test | 58ce9f328f62a3ea5904e6ed907a169ef2df9258 | ca9ab5cee03d7a2f457a95fb0f4762013caa5f9f | refs/heads/master | 2022-11-30T08:57:45.345460 | 2020-07-30T01:43:30 | 2020-07-30T01:43:30 | 280,388,441 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,694 | py | #!/usr/bin/python
# -*- encoding: utf-8 -*-
import sys
import time
sys.path.append("/home/yhl2/workspace/xtp_test/ETF")
from import_common import *
sys.path.append("/home/yhl2/workspace/xtp_test/ETF/etf_service")
from ETF_GetComponentShare import etf_get_all_component_stk
from ETF_Add import etf_add
class YW_ETFSS_SHSH_051(xtp_test_case):
def test_YW_ETFSS_SHSH_051(self):
# -----------ETF赎回-------------
title = 'T日购买ETF-T日赎回当天申购的ETF-' \
'T日用当天赎回的成分股申购ETF-T日卖出T日赎回的成分股'
# 定义当前测试用例的期待值
# 期望状态:初始、未成交、部成、全成、部撤已报、部撤、已报待撤、已撤、废单、撤废、内部撤单
# xtp_ID和cancel_xtpID默认为0,不需要变动
case_goal = {
'case_ID': 'ATC-204-051',
'期望状态': '全成',
'errorID': 0,
'errorMSG': '',
'是否生成报单': '是',
'是否是撤废': '否',
'xtp_ID': 0,
'cancel_xtpID': 0,
}
logger.warning(title + ', case_ID=' + case_goal['case_ID'])
unit_info = {
'ticker': '580480', # etf代码
'etf_unit': 1, # etf赎回单位数
'component_unit_buy': 1, # 成分股买入单位数
'etf_unit_buy': 1, # etf买入单位数
'component_unit_sell': 1 # 成分股卖出单位数
}
# -----------二级市场买入etf-----------
etf_add(Api,
Api.const.XTP_MARKET_TYPE['XTP_MKT_SH_A'],
unit_info['ticker'],
unit_info['etf_unit_buy'])
time.sleep(3)
# -----------查询ETF申购前成分股持仓-------------
component_stk_info = etf_get_all_component_stk(unit_info['ticker'])
# -----------ETF申购-------------
# 参数:证券代码、市场、证券类型、证券状态、交易状态、买卖方向(B买S卖)、期望状态、Api
stkparm = QueryEtfQty(unit_info['ticker'], '1', '14', '2', '0',
'B', case_goal['期望状态'], Api)
# 定义委托参数信息------------------------------------------
# 如果下单参数获取失败,则用例失败
if stkparm['返回结果'] is False:
rs = {
'用例测试结果': stkparm['返回结果'],
'用例错误原因': '获取下单参数失败, ' + stkparm['错误原因'],
}
etf_query_log(case_goal, rs)
self.assertEqual(rs['用例测试结果'], True)
else:
wt_reqs = {
'business_type':
Api.const.XTP_BUSINESS_TYPE['XTP_BUSINESS_TYPE_ETF'],
'order_client_id':
2,
'market':
Api.const.XTP_MARKET_TYPE['XTP_MKT_SH_A'],
'ticker':
stkparm['证券代码'],
'side':
Api.const.XTP_SIDE_TYPE['XTP_SIDE_REDEMPTION'],
'price_type':
Api.const.XTP_PRICE_TYPE['XTP_PRICE_LIMIT'],
'quantity':
int(unit_info['etf_unit'] * stkparm['最小申赎单位']),
}
EtfParmIni(Api, case_goal['期望状态'], wt_reqs['price_type'])
CaseParmInsertMysql(case_goal, wt_reqs)
rs = etfServiceTest(Api, case_goal, wt_reqs, component_stk_info)
etf_creation_log(case_goal, rs)
# -----------ETF申购-------------
case_goal['期望状态'] = '废单'
case_goal['errorID'] = 11010121
case_goal['errorMSG'] = 'Failed to check security quantity.'
# 定义委托参数信息------------------------------------------
# 如果下单参数获取失败,则用例失败
wt_reqs = {
'business_type':
Api.const.XTP_BUSINESS_TYPE['XTP_BUSINESS_TYPE_ETF'],
'market':
Api.const.XTP_MARKET_TYPE['XTP_MKT_SH_A'],
'ticker':
stkparm['证券代码'],
'side':
Api.const.XTP_SIDE_TYPE['XTP_SIDE_PURCHASE'],
'price_type':
Api.const.XTP_PRICE_TYPE['XTP_PRICE_LIMIT'],
'quantity':
int(unit_info['etf_unit'] * stkparm['最小申赎单位']),
}
EtfParmIni(Api, case_goal['期望状态'], wt_reqs['price_type'])
CaseParmInsertMysql(case_goal, wt_reqs)
rs = etfServiceTest(Api, case_goal, wt_reqs)
etf_creation_log(case_goal, rs)
# ------------二级市场卖出成份股-----------
case_goal['期望状态'] = '全成'
case_goal['errorID'] = 0
case_goal['errorMSG'] = ''
etf_component_info = QueryEtfComponentsInfoDB(stkparm['证券代码'],wt_reqs['market'])
rs = {}
for stk_info in etf_component_info:
if stk_info[1] != 2:
stk_code = stk_info[0]
components_share = QueryEtfComponentsDB(stkparm['证券代码'],
stk_code)
components_total = int(components_share *
unit_info['component_unit_sell'])
quantity = get_valid_amount(components_total)
limitup_px = getUpPrice(stk_code)
wt_reqs = {
'business_type':
Api.const.XTP_BUSINESS_TYPE['XTP_BUSINESS_TYPE_CASH'],
'order_client_id':
2,
'market':
Api.const.XTP_MARKET_TYPE['XTP_MKT_SH_A'],
'ticker':
stk_code,
'side':
Api.const.XTP_SIDE_TYPE['XTP_SIDE_SELL'],
'price_type':
Api.const.XTP_PRICE_TYPE['XTP_PRICE_BEST5_OR_CANCEL'],
'price':
limitup_px,
'quantity':
quantity,
}
ParmIni(Api, case_goal['期望状态'], wt_reqs['price_type'])
rs = serviceTest(Api, case_goal, wt_reqs)
if rs['用例测试结果'] is False:
etf_components_sell_log(case_goal, rs)
self.assertEqual(rs['用例测试结果'], True)
etf_components_sell_log(case_goal, rs)
self.assertEqual(rs['用例测试结果'], True)
if __name__ == '__main__':
unittest.main()
| [
"418033945@qq.com"
] | 418033945@qq.com |
3ab6e6f99098e0b8731959aad64c12f1b6a211bd | cde60eed1c85120ae4ec2bc3a2bb4cb418359aee | /lib/disco/mr_path.py | 8449564f47a4928dde7a93f7c791c745438cac35 | [
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | aronwc/quac | c2750c01902b95aead40d75f7c7e8fe5aa9c6e15 | f383b4ffba529d19f2c1d24496e5125118d1e13b | refs/heads/master | 2021-01-15T16:46:40.382175 | 2013-05-22T04:28:27 | 2013-05-22T04:28:27 | 9,962,349 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,571 | py | '''Disco has a scheme to detect which modules your stuff uses, put them in a
zip file, and copy it to workers. In theory, this enables you to install
absolutely nothing app-specific on worker nodes. Unfortunately, it works
poorly (e.g., it misses modules) and has some very weird quirks (e.g.,
packages are not supported). However, if you have a filesystem shared by
all the Disco nodes (e.g., via NFS), you can put your stuff in $PYTHONPATH
and let workers find it that way. Unfortunately, Disco mangles $PYTHONPATH.
This module works around that. To use, copy $PYTHONPATH to $PYTHONPATH_COPY
in your .bashrc, restart the Disco master, then place the following two
lines at the top of your Python scripts before any Disco stuff:
import mr_path
mr_path.fix_pythonpath()
Notes:
1. You will need to be able import *this module* before you can fix the
path; to do so, you'll want to set required_modules (mr.base.Job does
this automatically).
2. This module still doesn't fix the problem that Disco programs (e.g.,
modules with subclasses of disco.*) cannot be packaged. There is a
failed attempt at that in r3be9. Perhaps another wrapper is possible.
There is a bug for (perhaps part of) this problem:
<https://github.com/discoproject/disco/issues/328>'''
import os
import sys
path_fixed = False
def fix_pythonpath():
global path_fixed
if (not path_fixed):
for i in os.environ['PYTHONPATH_COPY'].split(':'):
sys.path.insert(0, i)
path_fixed = True
| [
"reidpr@lanl.gov"
] | reidpr@lanl.gov |
f061ccdde841c5c5ac087dabb6928fff1c970d56 | 68c29e7a17d87e34b1d6613c3e2e70a36fd2adcc | /easy/349_two_array_intersection.py | 1afa7cccb3ed847d3c43ac2e980d55f050a64209 | [
"MIT"
] | permissive | Sukhrobjon/leetcode | 284242fbfded3e47a57ce9230f9bc1175685cd7a | 547c200b627c774535bc22880b16d5390183aeba | refs/heads/master | 2022-02-26T20:56:57.347119 | 2022-02-05T01:58:49 | 2022-02-05T01:58:49 | 192,158,651 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 628 | py | """
Given two arrays, write a function to compute their intersection.
Note:
- Each element in the result must be unique.
- The result can be in any order.
link: https://leetcode.com/problems/intersection-of-two-arrays/
"""
class Solution(object):
def intersection(self, nums1, nums2):
"""
Finds the intersection of two given array.
:type nums1: List[int]
:type nums2: List[int]
:rtype: List[int]
"""
return list(set(nums1).intersection(set(nums2)))
nums1 = [1, 2, 2, 1]
nums2 = [2, 2]
obj = Solution()
result = obj.intersection(nums1, nums2)
print(result)
| [
"sgolibbo@mail.ccsf.edu"
] | sgolibbo@mail.ccsf.edu |
196b3a4ec3a18c69c259e617cb54c0e9ce008877 | 62e58c051128baef9452e7e0eb0b5a83367add26 | /x12/4040/278004040.py | 7a7cf770c5ac97ef0155d9ded6220558dac39a57 | [] | no_license | dougvanhorn/bots-grammars | 2eb6c0a6b5231c14a6faf194b932aa614809076c | 09db18d9d9bd9d92cefbf00f1c0de1c590fe3d0d | refs/heads/master | 2021-05-16T12:55:58.022904 | 2019-05-17T15:22:23 | 2019-05-17T15:22:23 | 105,274,633 | 0 | 0 | null | 2017-09-29T13:21:21 | 2017-09-29T13:21:21 | null | UTF-8 | Python | false | false | 1,581 | py | from bots.botsconfig import *
from records004040 import recorddefs
syntax = {
'version' : '00403', #version of ISA to send
'functionalgroup' : 'HI',
}
structure = [
{ID: 'ST', MIN: 1, MAX: 1, LEVEL: [
{ID: 'BHT', MIN: 1, MAX: 1},
{ID: 'HL', MIN: 1, MAX: 99999, LEVEL: [
{ID: 'TRN', MIN: 0, MAX: 9},
{ID: 'AAA', MIN: 0, MAX: 9},
{ID: 'UM', MIN: 0, MAX: 1},
{ID: 'HCR', MIN: 0, MAX: 1},
{ID: 'REF', MIN: 0, MAX: 9},
{ID: 'DTP', MIN: 0, MAX: 9},
{ID: 'HI', MIN: 0, MAX: 1},
{ID: 'HSD', MIN: 0, MAX: 1},
{ID: 'CRC', MIN: 0, MAX: 9},
{ID: 'CL1', MIN: 0, MAX: 1},
{ID: 'CR1', MIN: 0, MAX: 1},
{ID: 'CR2', MIN: 0, MAX: 1},
{ID: 'CR4', MIN: 0, MAX: 1},
{ID: 'CR5', MIN: 0, MAX: 1},
{ID: 'CR6', MIN: 0, MAX: 1},
{ID: 'CR7', MIN: 0, MAX: 1},
{ID: 'CR8', MIN: 0, MAX: 1},
{ID: 'PWK', MIN: 0, MAX: 99999},
{ID: 'MSG', MIN: 0, MAX: 1},
{ID: 'NM1', MIN: 0, MAX: 99999, LEVEL: [
{ID: 'REF', MIN: 0, MAX: 9},
{ID: 'N2', MIN: 0, MAX: 1},
{ID: 'N3', MIN: 0, MAX: 1},
{ID: 'N4', MIN: 0, MAX: 1},
{ID: 'PER', MIN: 0, MAX: 3},
{ID: 'AAA', MIN: 0, MAX: 9},
{ID: 'PRV', MIN: 0, MAX: 1},
{ID: 'DMG', MIN: 0, MAX: 1},
{ID: 'INS', MIN: 0, MAX: 1},
{ID: 'DTP', MIN: 0, MAX: 9},
]},
]},
{ID: 'SE', MIN: 1, MAX: 1},
]}
]
| [
"jason.capriotti@gmail.com"
] | jason.capriotti@gmail.com |
408f877ad0c183781906ef0d631c6a5bce6ec140 | 600df3590cce1fe49b9a96e9ca5b5242884a2a70 | /third_party/catapult/catapult_build/run_with_typ.py | b9c69f2e88db8e8169d48a419a76c5d2f48295b3 | [
"LGPL-2.0-or-later",
"GPL-1.0-or-later",
"MIT",
"Apache-2.0",
"BSD-3-Clause"
] | permissive | metux/chromium-suckless | efd087ba4f4070a6caac5bfbfb0f7a4e2f3c438a | 72a05af97787001756bae2511b7985e61498c965 | refs/heads/orig | 2022-12-04T23:53:58.681218 | 2017-04-30T10:59:06 | 2017-04-30T23:35:58 | 89,884,931 | 5 | 3 | BSD-3-Clause | 2022-11-23T20:52:53 | 2017-05-01T00:09:08 | null | UTF-8 | Python | false | false | 920 | py | #!/usr/bin/env python
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A wrapper around typ (test your projects)."""
import os
import sys
def Run(top_level_dir, path=None):
"""Runs a set of Python tests using typ.
Args:
top_level_dir: Directory to look for Python unit tests in.
path: A list of extra paths to add to sys.path when running the tests.
Returns:
An exit code (0 for success, otherwise non-zero).
"""
typ_path = os.path.abspath(os.path.join(
os.path.dirname(__file__), os.path.pardir, 'third_party', 'typ'))
_AddToPathIfNeeded(typ_path)
import typ
return typ.main(
top_level_dir=top_level_dir,
path=(path or []),
coverage_source=[top_level_dir])
def _AddToPathIfNeeded(path):
if path not in sys.path:
sys.path.insert(0, path)
| [
"enrico.weigelt@gr13.net"
] | enrico.weigelt@gr13.net |
a57cf4df9f0068460b9410a8f12b9098b79114f7 | 6cf70b611cc4d45a7c3e63d818f100f8be895314 | /067_二进制求和/Solution.py | db242d61bda7d2ca30ea0466d7b824d3ed6133e2 | [] | no_license | hhy5277/LeetCode-9 | 19bed5482841e7dcdc346093b6fb17ed769fe72e | cf240ff3c9124a1af87b6d5f49ec426ef248298c | refs/heads/master | 2020-06-19T00:19:30.194373 | 2019-06-05T13:53:44 | 2019-06-05T13:53:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,043 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019/4/28 21:01
# @Author : zenRRan
# @Version : python3.7
# @File : Solution.py
# @Software: PyCharm
class Solution:
def addBinary(self, a: str, b: str) -> str:
if a == '':
return b
if b == '':
return a
flag = 0
len_a = len(a)
len_b = len(b)
length = max(len_a, len_b)
a = (length - len(a)) * '0' + a
b = (length - len(b)) * '0' + b
res = ''
for i in range(length):
c = int(a[-1]) + int(b[-1]) + flag
if c > 2:
res += '1'
flag = 1
elif c > 1:
res += '0'
flag = 1
else:
res += str(c)
flag = 0
a = a[:-1]
b = b[:-1]
if flag == 1:
res += '1'
return res[::-1]
data = [['11', '1'], ['1010', '1011']]
for elem in data:
print(Solution().addBinary(elem[0], elem[1]))
| [
"824203828@qq.com"
] | 824203828@qq.com |
4ce566297c9b085acc9406eff21a80e0d11f3e64 | 58afefdde86346760bea40690b1675c6639c8b84 | /leetcode/find-in-mountain-array/397500377.py | 8e6b2ccf21e4d4d83c72f3ba712108de64aea34f | [] | no_license | ausaki/data_structures_and_algorithms | aaa563f713cbab3c34a9465039d52b853f95548e | 4f5f5124534bd4423356a5f5572b8a39b7828d80 | refs/heads/master | 2021-06-21T10:44:44.549601 | 2021-04-06T11:30:21 | 2021-04-06T11:30:21 | 201,942,771 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,516 | py | # title: find-in-mountain-array
# detail: https://leetcode.com/submissions/detail/397500377/
# datetime: Fri Sep 18 23:28:26 2020
# runtime: 28 ms
# memory: 14.6 MB
# """
# This is MountainArray's API interface.
# You should not implement it, or speculate about its implementation
# """
#class MountainArray:
# def get(self, index: int) -> int:
# def length(self) -> int:
class Solution:
def findInMountainArray(self, target: int, mountain_arr: 'MountainArray') -> int:
class Wrap:
def __init__(self, l, r, rev=False):
self.l = l
self.r = r
self.rev = rev
def __getitem__(self, i):
return A.get(self.l + i) if not self.rev else A.get(self.r - i)
def __len__(self):
return self.r - self.l + 1
A = mountain_arr
L = A.length()
i, j = 0, L - 1
while i <= j:
m = (i + j) // 2
v = A.get(m)
w = A.get(m - 1) if m else -1
if w < v:
i = m + 1
else:
j = m - 1
# print(i, j)
i = j
l = Wrap(0, i)
k = bisect.bisect_left(l, target)
if k >= len(l):
return -1
if l[k] == target:
return k
r = Wrap(i + 1, L - 1, True)
k = bisect.bisect_left(r, target)
if k >= len(r):
return -1
if r[k] == target:
return L - 1 - k
return -1 | [
"ljm51689@gmail.com"
] | ljm51689@gmail.com |
7fa4b7b39352d11a19ca84ce9da4278700dbb0ed | 632dcb4e37cadd87cb7ff8715b0048df5cd0d11b | /cc3d/tests/plugin_test_suite/connectivity_global_fast_test_run/Simulation/connectivity_global_fastSteppables.py | d544b685fd2eb386fca3d8171176a6269cfa8604 | [] | no_license | CompuCell3D/CompuCell3D | df638e3bdc96f84b273978fb479842d071de4a83 | 65a65eaa693a6d2b3aab303f9b41e71819f4eed4 | refs/heads/master | 2023-08-26T05:22:52.183485 | 2023-08-19T17:13:19 | 2023-08-19T17:13:19 | 12,253,945 | 51 | 41 | null | 2023-08-27T16:36:14 | 2013-08-20T20:53:07 | C++ | UTF-8 | Python | false | false | 544 | py |
from cc3d.core.PySteppables import *
class connectivity_global_fastSteppable(SteppableBasePy):
def __init__(self,frequency=1):
SteppableBasePy.__init__(self,frequency)
def start(self):
"""
any code in the start function runs before MCS=0
"""
def step(self,mcs):
"""
type here the code that will run every frequency MCS
:param mcs: current Monte Carlo step
"""
def finish(self):
"""
Finish Function is called after the last MCS
"""
| [
"maciekswat@gmail.com"
] | maciekswat@gmail.com |
fb50a368bcc5d8f9b46c504181ecff05ab605d77 | e7f67295e62fc5301ab23bce06c61f2311c2eeee | /mjml/scripts/mjml-html-compare | f49399ce8f7dc8cc9ba742460090ff97b3623b1d | [
"MIT"
] | permissive | bayesimpact/mjml-stub | 94d10588359990cd58d2085429b19a3777c51f15 | 30bab3f2e197d2f940f58439f2e8cd9fadb58d48 | refs/heads/main | 2023-05-08T11:54:19.313877 | 2021-01-25T21:30:48 | 2021-01-25T21:30:48 | 344,026,118 | 0 | 0 | MIT | 2021-03-03T06:31:49 | 2021-03-03T06:31:48 | null | UTF-8 | Python | false | false | 471 | #!/usr/bin/env python3
from pathlib import Path
import sys
from htmlcompare import assert_same_html
from mjml import mjml_to_html
mjml_filename = Path(sys.argv[1])
html_filename = Path(sys.argv[2])
with mjml_filename.open('rb') as mjml_fp:
result = mjml_to_html(mjml_fp)
with html_filename.open('rb') as html_fp:
expected_html = html_fp.read()
assert not result.errors
actual_html = result.html
assert_same_html(expected_html, actual_html, verbose=True)
| [
"felix.schwarz@oss.schwarz.eu"
] | felix.schwarz@oss.schwarz.eu | |
9360433c0a871cbdf2706b2337e8d4a813b08125 | 920b9cb23d3883dcc93b1682adfee83099fee826 | /pipeline/variable_framework/migrations/0001_initial.py | 56856837e4fec961b57f7bb1f80e956d630c85ae | [
"MIT",
"LGPL-2.1-or-later",
"LGPL-3.0-only"
] | permissive | TencentBlueKing/bk-itsm | f817fb166248d3059857b57d03e8b5ec1b78ff5b | 2d708bd0d869d391456e0fb8d644af3b9f031acf | refs/heads/master | 2023-08-31T23:42:32.275836 | 2023-08-22T08:17:54 | 2023-08-22T08:17:54 | 391,839,825 | 100 | 86 | MIT | 2023-09-14T08:24:54 | 2021-08-02T06:35:16 | Python | UTF-8 | Python | false | false | 1,470 | py | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017-2020 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = []
operations = [
migrations.CreateModel(
name="VariableModel",
fields=[
("id", models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name="ID")),
("code", models.CharField(max_length=255, unique=True, verbose_name="\u53d8\u91cf\u7f16\u7801")),
("status", models.BooleanField(default=True, verbose_name="\u53d8\u91cf\u662f\u5426\u53ef\u7528")),
],
options={"verbose_name": "Variable\u53d8\u91cf", "verbose_name_plural": "Variable\u53d8\u91cf"},
),
]
| [
"1758504262@qq.com"
] | 1758504262@qq.com |
38f4aaa11589e5d2daef4908a156e77873951653 | 77a7e01cf07531c8d4764c6e3edbbf956855a936 | /data-processing/tests/test_match_symbols.py | e7c3221cabccb396500fd7270b135827a86149c8 | [
"Apache-2.0",
"AGPL-3.0-only",
"GPL-1.0-or-later",
"AGPL-3.0-or-later"
] | permissive | huhuaping/scholarphi | ccc6afa8b2cfea1888748a7457fb882a98e15286 | ca892b41ab96a48b88183d8c06a26b9374c9167d | refs/heads/main | 2023-08-31T00:00:23.375640 | 2021-09-19T07:55:12 | 2021-09-19T07:55:12 | 374,073,210 | 0 | 0 | Apache-2.0 | 2021-06-05T09:29:32 | 2021-06-05T09:29:32 | null | UTF-8 | Python | false | false | 1,912 | py | from common.match_symbols import Match, get_mathml_matches
DEFAULT_TEX_PATH = "tex-path"
DEFAULT_EQUATION_INDEX = 0
def test_matches_self():
mathml = "<mi>x</mi>"
matches = get_mathml_matches([mathml])
assert len(matches) == 1
assert matches[mathml] == [Match(mathml, mathml, 1)]
def test_matches_symbol_with_shared_base():
x_sub_i = "<msub><mi>x</mi><mi>i</mi></msub>"
x_squared = "<msup><mi>x</mi><mn>2</mn></msup>"
matches = get_mathml_matches([x_sub_i, x_squared], allow_self_matches=False)
assert matches[x_sub_i] == [Match(x_sub_i, x_squared, 1)]
assert matches[x_squared] == [Match(x_squared, x_sub_i, 1)]
def test_exact_match_ranks_higher_than_partial_match():
x_sub_i = "<msub><mi>x</mi><mi>i</mi></msub>"
x_squared = "<msup><mi>x</mi><mn>2</mn></msup>"
matches = get_mathml_matches([x_sub_i, x_squared])
assert matches[x_sub_i] == [
Match(x_sub_i, x_sub_i, 1),
Match(x_sub_i, x_squared, 2),
]
def test_does_not_match_base_to_subscript():
i = "<mi>i</mi>"
x_sub_i = "<msub><mi>x</mi><mi>i</mi></msub>"
matches = get_mathml_matches([i, x_sub_i], allow_self_matches=False)
assert i not in matches
assert x_sub_i not in matches
def test_does_not_match_using_shared_subscript():
x_sub_i = "<msub><mi>x</mi><mi>i</mi></msub>"
t_sub_i = "<msub><mi>t</mi><mi>i</mi></msub>"
matches = get_mathml_matches([x_sub_i, t_sub_i], allow_self_matches=False)
assert x_sub_i not in matches
assert t_sub_i not in matches
def test_omit_duplicate_matches():
x1 = "<msub><mi>x</mi><mn>1</mn></msub>"
x2 = "<msub><mi>x</mi><mn>2</mn></msub>"
# While x2 is included in the list of MathML equations twice, only one match between
# x1 and x2 should be included in the matches data.
matches = get_mathml_matches([x1, x2, x2], allow_self_matches=False)
assert len(matches[x1]) == 1
| [
"head.andrewm@gmail.com"
] | head.andrewm@gmail.com |
8769ebbd283b5ccdb8e35c30ab2f020c91949723 | eddb5cc6ece559a21fb2d99dc03fb4b9e3e1ddb0 | /fagaiwei/others_test/xueqiu_test.py | d27ccff8868027c398543e88e64c99b26309ce54 | [] | no_license | KKtoNN/python_spider | a9bdd005d607b1265a556cb4908e84804c0bfc62 | c72bd061c3ca4145fef85b0fd9c15576441cdb09 | refs/heads/master | 2020-03-18T22:50:00.131802 | 2018-05-30T00:47:56 | 2018-05-30T00:47:56 | 135,367,902 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 920 | py | import requests
# url = "http://news.people.com.cn/210801/211150/index.js?_=1525332714933"
url = "https://xueqiu.com/v4/statuses/public_timeline_by_category.json?since_id=-1&max_id=-1&count=10&category=111"
headers = {
# "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
# "Accept-Encoding": "gzip, deflate, br",
# "Accept-Language": "zh-CN,zh;q=0.9",
# "Cache-Control": "max-age=0",
# "Connection": "keep-alive",
# "Host": "xueqiu.com",
# "Upgrade-Insecure-Requests": "1",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36",
}
st_url = "https://xueqiu.com/"
session = requests.session()
res = session.get(st_url, headers=headers)
response = session.get(url, headers=headers)
print(response)
# print(response.json())
result = response.json()
print(result)
| [
"18835702864@163.com"
] | 18835702864@163.com |
40a16ce415508f488033c0f2f56fa0b17b24df6e | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_156/406.py | 4ee40c64dba71311688697c965764f7c08bc02f6 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,338 | py | from math import ceil
def solve_problem(file_name):
input_file = file_name + ".in"
output_file = file_name + ".out"
f = open(input_file, "r")
g = open(output_file, "w")
# Get test cases:
test_cases = int(f.readline())
for test_case in range(1, test_cases + 1):
length = f.readline()
pancakes = map(int, f.readline().split())
answer = solve_test_case(pancakes)
result = "Case #" + str(test_case) + ": " + str(answer) + "\n"
g.write(result)
print result
return "Done"
def solve_test_case(pancakes):
# Try every scenario where we set:
# how many pancakes one person can eat.
upper_bound = max(pancakes) # should be no more than what's given
best_time = float("inf")
for max_pancakes in range(upper_bound, 0, -1):
moves_required = 0
for pancake_stack in pancakes:
moves_required += moves_needed(pancake_stack, max_pancakes)
time = max_pancakes + moves_required
best_time = min(best_time, time)
return best_time
def moves_needed(pancakes, max_pancakes):
pancakes = float(pancakes)
max_pancakes = float(max_pancakes)
moves_needed = ceil(pancakes / max_pancakes)
return int(moves_needed) - 1
print solve_problem("B-large")
| [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
35dc1fb247c5ddb4c0bce809ecfd038db2097050 | ef821468b081ef2a0b81bf08596a2c81e1c1ef1a | /Python OOP/Testing-Exercise/Code_For_Testing/Account.py | 3e74b71551a7fbf33d20b751a7e5e99e0818cc42 | [] | no_license | Ivaylo-Atanasov93/The-Learning-Process | 71db22cd79f6d961b9852f140f4285ef7820dd80 | 354844e2c686335345f6a54b3af86b78541ed3f3 | refs/heads/master | 2023-03-30T20:59:34.304207 | 2021-03-29T15:23:05 | 2021-03-29T15:23:05 | 294,181,544 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,475 | py | class Account:
def __init__(self, owner, amount=0):
self.owner = owner
self.amount = amount
self._transactions = []
def add_transaction(self, amount):
if isinstance(amount, int):
self._transactions.append(amount)
#self.amount += amount
else:
raise ValueError("please use int for amount")
@property
def balance(self):
return sum(self._transactions) + self.amount
@staticmethod
def validate_transaction(account, amount_to_add):
if account.amount + sum(account._transactions) + amount_to_add < 0:
raise ValueError("sorry cannot go in debt!")
else:
account._transactions.append(amount_to_add)
return f"New balance: {sum(account._transactions) + account.amount}"
def __len__(self):
return len(self._transactions)
def __str__(self):
return f'Account of {self.owner} with starting amount: {self.amount}'
def __repr__(self):
return f'Account({self.owner}, {self.amount})'
def __getitem__(self, item):
return self._transactions[item]
def __reversed__(self):
return reversed(self._transactions)
def __gt__(self, other):
return self.balance > other.balance
def __ge__(self, other):
return self.balance >= other.balance
def __lt__(self, other):
return self.balance < other.balance
def __le__(self, other):
return self.balance <= other.balance
def __eq__(self, other):
return self.balance == other.balance
def __ne__(self, other):
return self.balance != other.balance
def __add__(self, other):
new_account = Account(f'{self.owner}&{other.owner}' , self.amount + other.amount)
new_account._transactions = self._transactions + other._transactions
return new_account
# acc = Account('bob', 10)
# acc2 = Account('john')
# print(acc)
# print(repr(acc))
# acc.add_transaction(20)
# acc.add_transaction(-20)
# acc.add_transaction(30)
# print(acc.balance)
# print(len(acc))
# for transaction in acc:
# print(transaction)
# print(acc[1])
# print(list(reversed(acc)))
# acc2.add_transaction(10)
# acc2.add_transaction(60)
# print(acc > acc2)
# print(acc >= acc2)
# print(acc < acc2)
# print(acc <= acc2)
# print(acc == acc2)
# print(acc != acc2)
# acc3 = acc + acc2
# print(acc3)
# print(acc3._transactions)
# print(Account.validate_transaction(acc, 100))
| [
"ivailo.atanasov93@gmail.com"
] | ivailo.atanasov93@gmail.com |
652a8caf01f28a33c48720bd5911ab324e989653 | 9b6f36f544af5a2c1c042b18dda920c78fd11331 | /omsBackend/omsBackend/urls.py | 6d22eadd6ee826298b7574e14a6d7ef4ea919b65 | [] | no_license | Nikita-stels/MyOms | a946f08b4ba7abfa8392e98c579320b501a7ca2a | fdaf9d5a2a29b5386c1a86fcf89a2c0d5527687a | refs/heads/master | 2022-09-17T20:40:45.228067 | 2020-01-08T14:41:04 | 2020-01-08T14:41:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,400 | py | # -*- coding: utf-8 -*-
# author: kiven
from django.conf.urls import url, include
from django.conf.urls.static import static
from rest_framework_jwt.views import obtain_jwt_token
from rest_auth.views import PasswordChangeView
from django.views.generic.base import TemplateView
from omsBackend import settings
from omsBackend.routerApi import router
from apps.perms.views import routers
from apps.jobs.views import update_jobs_status
from apps.salts.views import update_states_status, get_state_bygroup
# version模块自动注册需要版本控制的 Model
urlpatterns = static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) + [
url(r'^api/', include(router.urls)),
url(r'^api/routers/', routers, name="myrouter"),
url(r'^api/update_jobs_status/', update_jobs_status, name="update_jobs_status"),
url(r'^api/update_states_status/', update_states_status, name="update_states_status"),
url(r'^api/get_state_bygroup/', get_state_bygroup, name="get_state_bygroup"),
# salt
url(r'^api/salts/', include('apps.salts.urls')),
# 用户认证
url(r'^api/changepasswd/', PasswordChangeView.as_view(), name='changepasswd'),
url(r'^api/api-token-auth/', obtain_jwt_token, name='rest_framework_token'),
url(r'^api/api-auth/', include('rest_framework.urls', namespace='rest_framework')),
# url(r'', TemplateView.as_view(template_name="index.html")),
]
| [
"1069195546@qq.com"
] | 1069195546@qq.com |
2b933a9ae42334721a3cae44e3413bd2ebcc44b3 | 4ec2b9c52dfa1d80fff89cead0f4cc8ec2874c1f | /1_tf_intro/exercises_sol/sol1_nNetReg.py | 12776d0d34843aa661b7b87ef8cda4c9a5c743c7 | [] | no_license | rsanchezgarc/deepLearningCourse | 31439ba9640662f0840ee7f5c58657d4dd5b6c5f | 175e3514e2b767ca2f5b4c88e891a777d95f513b | refs/heads/master | 2020-03-21T02:26:50.702008 | 2018-06-20T21:44:33 | 2018-06-20T21:44:33 | 137,998,784 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,406 | py | import tensorflow as tf
import numpy as np
from keras.datasets import boston_housing
from sklearn import cross_validation
from sklearn.utils import shuffle
from sklearn.preprocessing import StandardScaler
N_EPOCHS= 2
N_HIDDEN= 64
BATCH_SIZE= 32
LEARNING_RATE= 1e-10 #PLAY with learning rate. try 1e-1, 1e-2 ...
#load data
def generateData1(size=1024):
x= np.random.rand(size, 3)*10
y= np.expand_dims( np.sum(x, axis=1) + np.random.rand(size)*.1, axis=-1)
x_train, x_test, y_train, y_test = cross_validation.train_test_split(x, y, test_size=0.1, random_state=121)
return (x_train, y_train), (x_test, y_test)
def generateData2():
(x_train, y_train), (x_test, y_test) = boston_housing.load_data()
y_train= np.expand_dims(y_train, axis= -1)
y_test= np.expand_dims(y_test, axis= -1)
return (x_train, y_train), (x_test, y_test)
(x_train, y_train), (x_test, y_test) = generateData1()
#Normalize data. ( (x-mean)/std )
normalizer= StandardScaler()
x_train=normalizer.fit_transform(x_train)
x_test=normalizer.transform(x_test)
#split train and validation
x_train, x_validation, y_train, y_validation = cross_validation.train_test_split(x_train, y_train, test_size=0.1, random_state=121)
print(x_train.shape, y_train.shape)
#Model definition
inputPh= tf.placeholder(dtype=tf.float32, shape=[None, x_train.shape[1]], name="inputData") #shape= N_Examples x nFeats
labelsPh= tf.placeholder(dtype=tf.float32, shape=[None, 1], name="labelsData")
with tf.variable_scope("hidden_layer"):
w= tf.get_variable(name="weights", shape=[x_train.shape[1],N_HIDDEN], dtype=tf.float32, #shape= nFeats x N_HIDDEN
initializer=tf.truncated_normal_initializer(mean=0.0, stddev=0.1, dtype=tf.float32, seed=None),
regularizer=None, trainable=True)
b= tf.get_variable(name="bias", shape=[N_HIDDEN], dtype=tf.float32, #shape= N_HIDDEN
initializer=tf.constant_initializer(value=0.01, dtype=tf.float32),
regularizer=None, trainable=True)
h1_out= tf.nn.relu( tf.matmul(inputPh,w) + b)
with tf.variable_scope("output_layer"):
w= tf.get_variable(name="weights", shape=[N_HIDDEN,1], dtype=tf.float32, #shape= N_HIDDEN x 1
initializer=tf.truncated_normal_initializer(mean=0.0, stddev=0.1, dtype=tf.float32, seed=None),
regularizer=None, trainable=True)
b= tf.get_variable(name="bias", shape=[1], dtype=tf.float32,
initializer=tf.constant_initializer(value=0.01, dtype=tf.float32),
regularizer=None, trainable=True)
y_pred= tf.matmul(h1_out,w) + b
error = tf.reduce_mean(( tf.square(labelsPh -y_pred) ) ) #shape= N_Examples x 1
#error = tf.losses.mean_squared_error(labelsPh, y_pred) #Equivalent but prefered
optimizer= tf.train.GradientDescentOptimizer(learning_rate= LEARNING_RATE)
#optimizer= tf.train.AdamOptimizer(learning_rate= LEARNING_RATE) #Smarter optimizer
global_step = tf.Variable(0, name='global_step', trainable=False)
train_step = optimizer.minimize(error, global_step=global_step)
session = tf.Session()
session.run(tf.global_variables_initializer())
#FUNCTION TO EVALUATE
def coefficient_of_determination(y_true,y_pred):
def squared_error(y_true,y_pred):
return np.sum((y_pred - y_true) * (y_pred - y_true))
y_mean_pred = [np.mean(y_true) for y in y_true]
squared_error_regr = squared_error(y_true, y_pred)
squared_error_y_mean = squared_error(y_true, y_mean_pred)
return 1 - (squared_error_regr/squared_error_y_mean)
nStep=0
for nEpoch in range( N_EPOCHS ):
x_train, y_train = shuffle(x_train, y_train, random_state=121)
labels_train= []
preds_train= []
for i in range(0, x_train.shape[0], BATCH_SIZE):
feed_dict= {inputPh: x_train[i:i + BATCH_SIZE, ...], labelsPh: y_train[i:i + BATCH_SIZE]}
__, y_pred_train, errorExample= session.run([train_step, y_pred, error], feed_dict=feed_dict)
nStep+=1
labels_train.append( y_train[i:i + BATCH_SIZE])
preds_train.append( y_pred_train)
#EVALUATE VALIDATION DATA
labels_val= []
preds_val= []
for i in range(0, x_validation.shape[0], BATCH_SIZE):
feed_dict= {inputPh: x_validation[i:i + BATCH_SIZE, ...], labelsPh: y_validation[i:i + BATCH_SIZE]}
y_pred_val, errorVal= session.run([y_pred, error], feed_dict=feed_dict)
labels_val.append( y_validation[i:i + BATCH_SIZE])
preds_val.append(y_pred_val)
preds_train= np.concatenate(preds_train)
labels_train= np.concatenate(labels_train)
train_r2= coefficient_of_determination(labels_train, preds_train)
preds_val= np.concatenate(preds_val)
labels_val= np.concatenate(labels_val)
val_r2= coefficient_of_determination(labels_val, preds_val)
print("Epoch %d. train_r2 %f val_r2 %f"%(nEpoch, train_r2, val_r2))
#REPORT PERFORMANCE ON TEST SET
labels_test= []
preds_test= []
for i in range(0, x_test.shape[0], BATCH_SIZE):
feed_dict= {inputPh: x_test[i:i + BATCH_SIZE, ...], labelsPh: y_test[i:i + BATCH_SIZE]}
y_pred_test, errorTest= session.run([y_pred, error], feed_dict=feed_dict)
labels_test.append( y_test[i:i + BATCH_SIZE])
preds_test.append(y_pred_test)
preds_test= np.concatenate(preds_test)
labels_test= np.concatenate(labels_test)
test_r2= coefficient_of_determination(labels_test, preds_test)
print("END. test_r2 %f"%(test_r2))
session.close()
| [
"rubensanchezgarc@gmail.com"
] | rubensanchezgarc@gmail.com |
f988587e634f0ff1e7eafdfc1516557550cfc51e | 16c4d625ad9e945471a2a267e9992c7e9260214f | /project/settings/base.py | c4e2c96e358bdf8bb8c54a749f025709c08eb5ae | [
"BSD-2-Clause"
] | permissive | andywar65/rp_repo | 8cea1c81533250b49a4036fb9b0ff6e93a0dde66 | 726c1426d738b962cabeabd8995aa35767df0c41 | refs/heads/master | 2023-05-26T13:47:48.329624 | 2021-06-05T08:35:05 | 2021-06-05T08:35:05 | 255,056,987 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,238 | py | """
Django settings for project_repo project.
Generated by 'django-admin startproject' using Django 3.0.2.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
"""WARNING:
Commits to this file may not be cherry-picked by branches"""
import os
import json
from django.core.exceptions import ImproperlyConfigured
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
PROJECT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
BASE_DIR = os.path.dirname(PROJECT_DIR)
APPLICATION_DIR = os.path.dirname(BASE_DIR)
with open(os.path.join(APPLICATION_DIR, 'secrets.json')) as f:
secrets = json.loads(f.read())
def get_secret(setting, secrets=secrets):
'''Get the secret variable or return explicit exception.
Thanks to twoscoopsofdjango'''
try:
return secrets[setting]
except KeyError:
error_msg = 'Set the {0} environment variable'.format(setting)
raise ImproperlyConfigured(error_msg)
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# Application definition
INSTALLED_APPS = [
'filebrowser',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.postgres',
'users.apps.UsersConfig',
'pages.apps.PagesConfig',
'blog.apps.BlogConfig',
'cronache.apps.CronacheConfig',
'criterium.apps.CriteriumConfig',
'direzione.apps.DirezioneConfig',
'wordpress.apps.WordpressConfig',
'streamblocks',
'streamfield',
'captcha',
'taggit',
'crispy_forms',
'treebeard',
'private_storage',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(PROJECT_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'project.processors.get_global_settings',
],
},
},
]
CRISPY_TEMPLATE_PACK = 'bootstrap4'
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
STATICFILES_DIRS = [
os.path.join(PROJECT_DIR, "static"),
]
TAGGIT_CASE_INSENSITIVE = True
FILEBROWSER_VERSIONS = {
'admin_thumbnail': {'verbose_name': 'Admin Thumbnail', 'width': 60, 'height': 60, 'opts': 'crop'},
'thumbnail': {'verbose_name': 'Thumbnail (1 col)', 'width': 60, 'height': 60, 'opts': 'crop'},
'small': {'verbose_name': 'Small (2 col)', 'width': 140, 'height': '', 'opts': ''},
'medium': {'verbose_name': 'Medium (4col )', 'width': 300, 'height': '', 'opts': ''},
'big': {'verbose_name': 'Big (6 col)', 'width': 460, 'height': '', 'opts': ''},
'large': {'verbose_name': 'Large (8 col)', 'width': 680, 'height': '', 'opts': ''},
'wide_landscape': {'verbose_name': 'Orizzontale', 'width': 2048, 'height': 1024, 'opts': 'crop'},
'landscape': {'verbose_name': 'Orizzontale', 'width': 1280, 'height': 720, 'opts': 'crop'},
'portrait': {'verbose_name': 'Verticale', 'width': 768, 'height': 1024, 'opts': 'crop'},
'square': {'verbose_name': 'Quadrato', 'width': 768, 'height': 768, 'opts': 'crop'},
}
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = get_secret('LANGUAGE_CODE')#'en-us'
TIME_ZONE = get_secret('TIME_ZONE')
USE_I18N = True
USE_L10N = True
USE_TZ = True
AUTH_USER_MODEL = 'users.User'
#This stuff has nothing to do with django.site
WEBSITE_NAME = get_secret('WEBSITE_NAME')
WEBSITE_ACRO = get_secret('WEBSITE_ACRO')
#footer external links
#make your own, add them in project.processors.get_global_settings
FB_LINK = get_secret('FB_LINK')
INSTA_LINK = get_secret('INSTA_LINK')
TWIT_LINK = get_secret('TWIT_LINK')
IN_LINK = get_secret('IN_LINK')
GITHUB_LINK = get_secret('GITHUB_LINK')
EXT_LINK = get_secret('EXT_LINK')
| [
"andy.war1965@gmail.com"
] | andy.war1965@gmail.com |
ff86504950716ef63ae817247b33c8cfba3d5316 | 92bfcfaedb69b0d5c032f6a9b3ad70c0e06f7c53 | /ex12.py | 9622ce88c3c10d6bed96f9a1bb3fcd9bd5f1c55e | [] | no_license | hellstrikes13/sudipython | cf529ed60cca09afa185f3b56fe7ce17059212b0 | a789fc33cdab12d64ab674cb1c2ad7849617251f | refs/heads/master | 2022-01-03T09:12:14.192724 | 2021-12-23T05:38:31 | 2021-12-23T05:38:31 | 184,371,582 | 2 | 1 | null | 2021-12-23T05:38:31 | 2019-05-01T05:07:18 | Python | UTF-8 | Python | false | false | 139 | py | age = raw_input("age: ")
tall = raw_input("height: ")
wt = raw_input("weight: ")
print 'so u\'r %r old , %r tall and %r kg' %(age,tall,wt)
| [
"hellstrikes13@gmail.com"
] | hellstrikes13@gmail.com |
303ba6a7930d88999d35935f4a43741846fc8d9c | 57f5cad2409ee6c8af646a0f95b37431bddeb946 | /learning-python-application-development/chapter-05/wargame/attackoftheorcs.py | f76697ec7c0a3cd66fa2bd999a395c27d58b9021 | [] | no_license | johannesgiorgis/python-learning | ebb7991c83afd4f066cd487715c1f82d69d81ef9 | fb1d13e839d3f1a2ed806ae943853984cda17354 | refs/heads/master | 2022-09-30T12:21:11.517259 | 2020-06-07T06:54:13 | 2020-06-07T06:54:13 | 239,886,855 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,732 | py | """
Attack of the Orcs v2.0.0
"""
import random
from hut import Hut
from knight import Knight
from orcrider import OrcRider
from gameutils import print_bold
class AttackOfTheOrcs:
"""Main class to play Attack of The Orcs game
:ivar huts: List object to hold instances of `Hut` class.
:ivar player: Represents the player playing this game. This is an
instance of class `Knight` in current implementation.
.. seealso:: :py:meth:`self.play` where the main action happens.
"""
def __init__(self):
self.huts = []
self.player = None
self.num_huts = 5 # use this to avoid hard coding
def get_occupants(self) -> list:
"""Return a list of occupant types for all huts.
This is mainly used for printing information on current status of the hut
(whether unoccupied or acquired etc)
If the occupant is not `None` the occupant type will be 'enemy' or
'friend'. But if there is no occupant or is already 'acquired' the
occupant_type will display that information instead. See
`Hut.get_occupant_type()` for more details.
Return a list that collects this information from all the huts.
This is a list comprehension example. More on the list comprehension
in a chapter on Performance.
:return: A list containing occupant types (strings)
.. seealso:: :py:meth:`Hut.get_occupant_type`
.. TODO::
Prone to bugs if self.huts is not populated.
Chapter 2 talks about catching exceptions
"""
return [x.get_occupant_type() for x in self.huts]
def show_game_mission(self):
"""Print the game mission in the console"""
print_bold("Mission:")
print(" 1. Fight with the enemy.")
print(" 2. Bring all the huts in the village under your control")
print("---------------------------------------------------------\n")
def _process_user_choice(self) -> int:
"""Process the user input for choice of hut to enter
Returns the hut number to enter based on the user input. This method
makes sure that the hut number user has entered is valid. If not, it
prompts the user to re-enter this information.
:return: hut index to enter.
"""
verifying_choice = True
idx = 0
print(f"Current occupants: {self.get_occupants()}")
while verifying_choice:
user_choice = input("Choose a hut number to enter (1-5): ")
idx = int(user_choice)
if self.huts[idx - 1].is_acquired:
print(
"You have already acquired this hut. Try again."
"<INFO: You can NOT get healed in already acquired hut.>"
)
else:
verifying_choice = False
return idx
def _occupy_huts(self):
"""Randomly occupy the huts with one of: friend, enemy or 'None'
.. todo::
Here we assume there are exactly 5 huts. As an exercise, make it a
user input. Note that after such change, the unit test is expected to
fail!
"""
for i in range(self.num_huts):
choice_list = ["enemy", "friend", None]
computer_choice = random.choice(choice_list)
if computer_choice == "enemy":
name = "enemy-" + str(i + 1)
self.huts.append(Hut(i + 1, OrcRider(name)))
elif computer_choice == "friend":
name = "knight-" + str(i + 1)
self.huts.append(Hut(i + 1, Knight(name)))
else:
self.huts.append(Hut(i + 1, computer_choice))
def setup_game_scenario(self):
"""Create player and huts and then randomly pre-occupy huts...
The huts might be left empty as well.This method also prints the
game mission which could be refactored out of this as an exercise.
.. seealso:: :py:meth:`self.play` ,
:py:meth:`self._occupy_huts`
"""
self.player = Knight()
self._occupy_huts()
self.show_game_mission()
self.player.show_health(bold=True)
def play(self):
"""Workhorse method to play the game.
Controls the high level logic to play the game. This is called from
the main program to begin the game execution.
In summary, this method has the high level logic that does the following
by calling appropriate functionality:
* Set up instance variables for the game
* Accept the user input for hut number to enter
* Attempt to acquire the hut ( :py:meth:`Knight.acquire_hut` )
* Determine if the player wins or loses.
.. seealso:: :py:meth:`setup_game_scenario`,
:py:meth:`Knight.acquire_hut`
"""
# Create a Knight instance, create huts and preoccupy them with
# a game character instance (or leave empty)
self.setup_game_scenario()
# Initial setup is done, now the main play logic
acquired_hut_counter = 0
while acquired_hut_counter < self.num_huts:
idx = self._process_user_choice()
self.player.acquire_hut(self.huts[idx - 1])
if self.player.health_meter <= 0:
print_bold("YOU LOSE :( Better luck next time")
break
if self.huts[idx - 1].is_acquired:
acquired_hut_counter += 1
if acquired_hut_counter == self.num_huts:
print_bold("Congratulations! YOU WIN!!!")
if __name__ == "__main__":
print("Starting game...")
game = AttackOfTheOrcs()
game.play()
| [
"johannesgiorgis@users.noreply.github.com"
] | johannesgiorgis@users.noreply.github.com |
3905f5f1c9b86468bc04cf0f37ce790e482ffe99 | 7c0026e4e6e47114c93bd8a49c4d2759cf7a8613 | /starter_code_section_7/tests/system/models/test_system_user.py | 0cc5b14a8a835e16cc7eb267ebbef77c11866c73 | [] | no_license | ikostan/automation_with_python | 5edf9bca971629fc7621af2957d26c3f0f73fe9e | 131a0699f927ac40ea625a3526c9193863b1cc1c | refs/heads/master | 2020-05-05T09:42:14.360826 | 2019-07-03T01:49:38 | 2019-07-03T01:49:38 | 179,913,090 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,443 | py | from starter_code_section_7.tests.base_test import BaseTest
from starter_code_section_7.models.user import UserModel
import json
class UserSystemTest(BaseTest):
# Test User registration:
def test_register_user(self):
with self.app() as client:
with self.app_context():
username = 'uname'
response = client.post('/register',
data={'username': username,
'password': 'password'})
# Assert response
self.assertEqual(response.status_code, 201)
self.assertDictEqual(json.loads(response.data),
{'message': 'User created successfully.'})
# Assert user in DB
self.assertIsNotNone(UserModel.find_by_username(username))
self.assertIsNotNone(UserModel.find_by_id(1))
def test_register_and_login(self):
with self.app() as client:
with self.app_context():
username = 'uname'
password = 'password'
client.post('/register',
data={'username': username,
'password': password})
auth_response = client.post('/auth',
data=json.dumps({'username': username,
'password': password}),
headers={'Content-Type': 'application/json'})
self.assertIn('access_token', json.loads(auth_response.data).keys()) # 'access_token'
def test_register_duplicate_user(self):
with self.app() as client:
with self.app_context():
username = 'uname'
password = 'password'
client.post('/register',
data={'username': username,
'password': password})
response = client.post('/register',
data={'username': username,
'password': password})
self.assertEqual(response.status_code, 400)
self.assertDictEqual(json.loads(response.data),
{'message': 'A user with that username already exists.'})
| [
"igorkostan@gmail.com"
] | igorkostan@gmail.com |
83d01be9053a9d3d262a71ece694582b6fa24bb6 | 3d19e1a316de4d6d96471c64332fff7acfaf1308 | /Users/C/colby_neeb/swr3_twitter_search_1.py | 03b363ad975f75f06e90c4374e6f58115dd8bf9c | [] | no_license | BerilBBJ/scraperwiki-scraper-vault | 4e98837ac3b1cc3a3edb01b8954ed00f341c8fcc | 65ea6a943cc348a9caf3782b900b36446f7e137d | refs/heads/master | 2021-12-02T23:55:58.481210 | 2013-09-30T17:02:59 | 2013-09-30T17:02:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,430 | py | ###############################################################################
# Twitter scraper - designed to be forked and used for more interesting things
###############################################################################
import scraperwiki
import simplejson
import urllib2
# Change QUERY to your search term of choice.
# Examples: 'newsnight', 'from:bbcnewsnight', 'to:bbcnewsnight'
QUERY = 'swr3'
RESULTS_PER_PAGE = '100'
LANGUAGE = ''
NUM_PAGES = 50
UNTIL = '2012-12-27'
for page in range(1, NUM_PAGES+1):
base_url = 'http://search.twitter.com/search.json?q=%s&rpp=%s&lang=%s&page=%s&until=%s' \
% (urllib2.quote(QUERY), RESULTS_PER_PAGE, LANGUAGE, page, UNTIL)
try:
results_json = simplejson.loads(scraperwiki.scrape(base_url))
for result in results_json['results']:
data = {}
data['to_user'] = result['to_user']
data['id'] = result['id']
data['text'] = result['text']
data['from_user'] = result['from_user']
data['from_user_id'] = result['from_user_id']
data['to_user_id'] = result['to_user_id']
data['source'] = result['source']
data['iso_language_code'] = result['iso_language_code']
data['profile_image_url'] = result['profile_image_url']
data['created_at'] = result['created_at']
data['geo'] = result['geo']
print data ['created_at'], ['iso_language_code'], ['from_user'], ['from_user_id'], ['to_user'], ['to_user_id'], ['source'], ['profile_image_url'], ['geo'], data['text']
scraperwiki.sqlite.save(["id"], data)
except:
print 'Oh dear, failed to scrape %s' % base_url
###############################################################################
# Twitter scraper - designed to be forked and used for more interesting things
###############################################################################
import scraperwiki
import simplejson
import urllib2
# Change QUERY to your search term of choice.
# Examples: 'newsnight', 'from:bbcnewsnight', 'to:bbcnewsnight'
QUERY = 'swr3'
RESULTS_PER_PAGE = '100'
LANGUAGE = ''
NUM_PAGES = 50
UNTIL = '2012-12-27'
for page in range(1, NUM_PAGES+1):
base_url = 'http://search.twitter.com/search.json?q=%s&rpp=%s&lang=%s&page=%s&until=%s' \
% (urllib2.quote(QUERY), RESULTS_PER_PAGE, LANGUAGE, page, UNTIL)
try:
results_json = simplejson.loads(scraperwiki.scrape(base_url))
for result in results_json['results']:
data = {}
data['to_user'] = result['to_user']
data['id'] = result['id']
data['text'] = result['text']
data['from_user'] = result['from_user']
data['from_user_id'] = result['from_user_id']
data['to_user_id'] = result['to_user_id']
data['source'] = result['source']
data['iso_language_code'] = result['iso_language_code']
data['profile_image_url'] = result['profile_image_url']
data['created_at'] = result['created_at']
data['geo'] = result['geo']
print data ['created_at'], ['iso_language_code'], ['from_user'], ['from_user_id'], ['to_user'], ['to_user_id'], ['source'], ['profile_image_url'], ['geo'], data['text']
scraperwiki.sqlite.save(["id"], data)
except:
print 'Oh dear, failed to scrape %s' % base_url
| [
"pallih@kaninka.net"
] | pallih@kaninka.net |
f0b7e9b2949c6da520a21ecbd4335a424a92f82d | 20b1642035f3d52607ccde30cfdb3ee89552c9f1 | /backend/detector/service.py | 694ae0e3ad9a4bd835afb863ebc19b4642961e4b | [] | no_license | ScrollPage/Course-work | 26341fc194a8f5acb0b2fa33e3725d72ce09d5e5 | e5de9c6afa393da7065a6468b92a7e48620cb8de | refs/heads/master | 2023-02-13T18:04:32.158034 | 2021-01-05T14:40:31 | 2021-01-05T14:40:31 | 317,627,823 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 583 | py | from django.conf import settings
import json
from random import uniform, randint
def get_data(id):
lower_limit = settings.LOWER_DETECTOR_DATA_LIMIT
higher_limit = settings.HIGHER_DETECTOR_DATA_LIMIT
d = {
'id': id,
'temp': round(uniform(lower_limit, higher_limit), 2),
'Co2': round(uniform(lower_limit, higher_limit), 2),
'humidity': round(uniform(lower_limit, higher_limit), 2),
'lightning': round(uniform(lower_limit, higher_limit), 2),
'pH': round(uniform(lower_limit, higher_limit), 2),
}
return json.dumps(d) | [
"54814200+reqww@users.noreply.github.com"
] | 54814200+reqww@users.noreply.github.com |
420485c92ef6a53b2f95460695bd7fac891eeb19 | c026581b6c3855c75e7c9f9c6397acadc7833fb7 | /idm_core/organization/migrations/0005_auto_20170730_0837.py | 4b1297ed480e1e8fa7b7f304ecc27dae6d827fe2 | [] | no_license | mans0954/idm-core | 5734fd08a3c8c5deaec62167c9470336f0c6c6ef | 2a3cf326e0bb3db469e2b318b122033a7dd92b83 | refs/heads/master | 2021-07-24T04:13:47.021951 | 2017-11-02T22:09:25 | 2017-11-02T22:09:25 | 109,317,967 | 1 | 0 | null | 2017-11-02T20:56:01 | 2017-11-02T20:55:58 | null | UTF-8 | Python | false | false | 878 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-07-30 07:37
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('organization', '0004_auto_20170730_0835'),
]
operations = [
migrations.AlterField(
model_name='affiliationtype',
name='edu_person_affiliation_value',
field=models.CharField(blank=True, max_length=64),
),
migrations.AlterField(
model_name='affiliationtype',
name='id',
field=models.CharField(max_length=64, primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='roletype',
name='id',
field=models.CharField(max_length=64, primary_key=True, serialize=False),
),
]
| [
"alexander.dutton@it.ox.ac.uk"
] | alexander.dutton@it.ox.ac.uk |
b3db4e93af61bb05a18da0a3896c467c3c863720 | 4589a9ea76e458793ad78059839b81d365f433de | /athena_automation/athenataf/tests/user_management/user_account_settings/UserAccountSettings.py | a655eab509ba12f775b6cb6e5fdba1f6d8c0f8eb | [] | no_license | cash2one/reautomation_handoff | 5e2c4c432d8f658d1b57211782744bd0b56c52f6 | 7ef83572d659db35036189eb394f99de1369db5a | refs/heads/master | 2020-05-22T17:56:33.214080 | 2015-07-13T07:51:18 | 2015-07-13T07:51:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,169 | py | import logging
logger = logging.getLogger('athenataf')
from athenataf.lib.functionality.test.AthenaGUITestCase import AthenaGUITestCase
class UserAccountSettings(AthenaGUITestCase):
'''
Test class to validate the user maintenance info
'''
def test_ath_8214_validate_user_interface(self):
self.TopPanel.validate_user_interface()
self.TopPanel.setting_default_value()
def test_ath_8219_validate_time_zone(self):
self.TopPanel.validate_time_zone()
def test_ath_8220_validate_idle_timeout(self):
self.TopPanel.validate_idle_timeout()
self.TopPanel.setting_default_value()
def test_ath_8217_login_Logout(self):
self.logout()
self.login('default')
def test_ath_8218_login_Logout(self):
conf = self.config.config_vars
user_management_page=self.LeftPanel.go_to_user_management()
user_management_page.delete_if_any_user_present()
user_management_page.create_new_user(conf.email_read_write,conf.user_setting_group_value,conf.user_access_level_read_write)
self.logout()
self.login('read_write')
inner_left_panel = self.TopPanel.click_slider_icon()
inner_left_panel.assert_virtual_controller()
inner_left_panel.click_on_close_icon()
| [
"raju_set@testmile.com"
] | raju_set@testmile.com |
f9a01659bf39a63cdd339d284da8f4b70134e253 | a564b8277e33eb27009089ec2e216a4d266a8861 | /官方配套代码/11/11.5/ttk_test.py | 5be23b3a31f839e1487702c786002ba43db633c6 | [
"Unlicense"
] | permissive | yifengyou/crazy-python | 3cb50f462e4ddb921c365e2f0cb3e846e6539383 | 28099bd5011de6981a7c5412783952cc7601ae0c | refs/heads/main | 2023-06-18T18:10:52.691245 | 2021-07-18T14:21:03 | 2021-07-18T14:21:03 | 387,088,939 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,131 | py | # coding: utf-8
#########################################################################
# 网站: <a href="http://www.crazyit.org">疯狂Java联盟</a> #
# author yeeku.H.lee kongyeeku@163.com #
# #
# version 1.0 #
# #
# Copyright (C), 2001-2018, yeeku.H.Lee #
# #
# This program is protected by copyright laws. #
# #
# Program Name: #
# #
# <br>Date: #
#########################################################################
from tkinter import *
# 导入ttk
from tkinter import ttk
class App:
def __init__(self, master):
self.master = master
self.initWidgets()
def initWidgets(self):
# ttk使用Combobox取代了Listbox
cb = ttk.Combobox(self.master, font=24)
# 为Combobox设置列表项
cb['values'] = ('Python', 'Swift', 'Kotlin')
# cb = Listbox(self.master, font=24)
# 为Listbox设置列表项
# for s in ('Python', 'Swift', 'Kotlin'):
# cb.insert(END, s)
cb.pack(side=LEFT, fill=X, expand=YES)
f = ttk.Frame(self.master)
# f = Frame(self.master)
f.pack(side=RIGHT, fill=BOTH, expand=YES)
lab = ttk.Label(self.master, text='我的标签', font=24)
# lab = Label(self.master, text='我的标签', font=24)
lab.pack(side=TOP, fill=BOTH, expand=YES)
bn = ttk.Button(self.master, text='我的按钮')
# bn = Button(self.master, text='我的按钮')
bn.pack()
root = Tk()
root.title("简单事件处理")
App(root)
root.mainloop() | [
"842056007@qq.com"
] | 842056007@qq.com |
479d1aeeff3bb58b0fe996c995e56889a34455b3 | 6bdb32ddbd72c4337dab12002ff05d6966538448 | /gridpack_folder/mc_request/LHEProducer/Spin-1/Wprime_Wh_Whadhbb/Wprime_Wh_Whadhbb_narrow_M3500_13TeV-madgraph_cff.py | 4a14e9796316d1a471cfe37d24b515494d0bcdc6 | [] | no_license | cyrilbecot/DibosonBSMSignal_13TeV | 71db480de274c893ba41453025d01bfafa19e340 | d8e685c40b16cde68d25fef9af257c90bee635ba | refs/heads/master | 2021-01-11T10:17:05.447035 | 2016-08-17T13:32:12 | 2016-08-17T13:32:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 731 | py | import FWCore.ParameterSet.Config as cms
# link to cards:
# https://github.com/cms-sw/genproductions/tree/master/bin/MadGraph5_aMCatNLO/cards/production/13TeV/exo_diboson/Spin-1/Wprime_Wh_Whadhbb/Wprime_Wh_Whadhbb_narrow_M3500
externalLHEProducer = cms.EDProducer("ExternalLHEProducer",
args = cms.vstring('/cvmfs/cms.cern.ch/phys_generator/gridpacks/slc6_amd64_gcc481/13TeV/madgraph/V5_2.2.2/exo_diboson/Spin-1/Wprime_Wh_Whadhbb/narrow/v2/Wprime_Wh_Whadhbb_narrow_M3500_tarball.tar.xz'),
nEvents = cms.untracked.uint32(5000),
numberOfParameters = cms.uint32(1),
outputFile = cms.string('cmsgrid_final.lhe'),
scriptName = cms.FileInPath('GeneratorInterface/LHEInterface/data/run_generic_tarball_cvmfs.sh')
)
| [
"syu@cern.ch"
] | syu@cern.ch |
08dfd039fb10960d1b3682f7d5d8df927decb3e6 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_207/ch147_2020_04_12_19_01_16_048371.py | 8523a46d0bf284b44d23c7a6f18a43e0bf7b00f6 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 473 | py |
def conta_ocorrencias (lista_palavras):
dicio = {}
for word in lista_palavras:
if word in dicio:
dicio[word] +=1
else:
dicio[word] =1
return dicio
new_dicio = conta_ocorrencias(lista_palavras)
def mais_frequente (conta_ocorrencias):
max = 0
for v in new_dicio.values():
if max < v:
max = v
for k, v in new_dicio.items():
if v == max:
chave = k
return chave
| [
"you@example.com"
] | you@example.com |
5b3db41f2da38a3972e9b3c5e8d65841ed63c755 | 8afc9a84162f82b8afb586c56befca0703e5c4cc | /mercurius/core/http/HTTPParser.py | 5fc4e05df93d1861c5e9a110e8a1152f829ef2c0 | [
"BSD-2-Clause"
] | permissive | bossiernesto/mercurius | 70028d4fdc360f7dcd51df5084efea8e60202463 | 9c4bc26f45a317d4e22137b412f63f18976461fe | refs/heads/master | 2021-01-19T05:46:58.848795 | 2017-02-26T20:19:43 | 2017-02-26T20:19:43 | 27,944,892 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 764 | py | class HTTPHeader(object):
pass
class HTTPParser(object):
def parse_header(self, rfile):
headers = {}
name = ''
while 1:
line = rfile.readline()
if line == '\r\n' or line == '\n':
break
if line[0] in ' \t':
# continued header
headers[name] = headers[name] + '\r\n ' + line.strip()
else:
i = line.find(':')
assert(i != -1)
name = line[:i].lower()
if name in headers:
# merge value
headers[name] = headers[name] + ', ' + line.strip()
else:
headers[name] = line[i+1:].strip()
return headers | [
"bossi.ernestog@gmail.com"
] | bossi.ernestog@gmail.com |
9a99a3346de89b4d3b489a277a7773de3b3a22ae | b7d6b8918a5d32ee2e22fc2e97c25b96ffe24110 | /project/users/views.py | 966ae886f3753c49b76bf66d8d8b40eaa5915bdb | [] | no_license | Briankr33/FlaskTaskr | d935d2a97b2a36288c2aec297f6d1fbf21a9ca1e | 18e0290b08cef973c5f67bd3fb50970b104a534a | refs/heads/master | 2020-04-14T23:13:32.879566 | 2019-02-05T20:43:03 | 2019-02-05T20:43:03 | 164,194,629 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,754 | py | # project/users/views.py
#################
#### imports ####
#################
from functools import wraps
from flask import flash, redirect, render_template, \
request, session, url_for, Blueprint
from sqlalchemy.exc import IntegrityError
from .forms import RegisterForm, LoginForm
from project import db, bcrypt
from project.models import User
################
#### config ####
################
users_blueprint = Blueprint('users', __name__)
##########################
#### helper functions ####
##########################
def login_required(test):
@wraps(test)
def wrap(*args, **kwargs):
if 'logged_in' in session:
return test(*args, **kwargs)
else:
flash('You need to login first.')
return redirect(url_for('users.login'))
return wrap
################
#### routes ####
################
@users_blueprint.route('/logout/')
@login_required
def logout():
session.pop('logged_in', None)
session.pop('user_id', None)
session.pop('role', None)
session.pop('name', None)
flash('Goodbye!')
return redirect(url_for('users.login'))
@users_blueprint.route('/', methods=['GET', 'POST'])
def login():
error = None
form = LoginForm(request.form)
if request.method == 'POST':
if form.validate_on_submit():
user = User.query.filter_by(name=request.form['name']).first()
if user is not None and bcrypt.check_password_hash(user.password, request.form['password']):
session['logged_in'] = True
session['user_id'] = user.id
session['role'] = user.role
session['name'] = user.name
flash('Welcome!')
return redirect(url_for('tasks.tasks'))
else:
error = 'Invalid username or password.'
return render_template('login.html', form=form, error=error)
@users_blueprint.route('/register/', methods=['GET', 'POST'])
def register():
error = None
form = RegisterForm(request.form)
if request.method == 'POST':
if form.validate_on_submit():
new_user = User(
form.name.data,
form.email.data,
bcrypt.generate_password_hash(form.password.data),
)
try:
db.session.add(new_user)
db.session.commit()
flash('Thanks for registering. Please login.')
return redirect(url_for('users.login'))
except IntegrityError:
error = 'That username and/or email already exists.'
return render_template('register.html', form=form, error=error)
return render_template('register.html', form=form, error=error)
| [
"you@example.com"
] | you@example.com |
fc5dd4af74160a03bd97cc7c32cd9f983cfa7f43 | a4f3e7f4f0d28f2c072a6378487760a067e838e6 | /Array Values From User.py | cdd6dcee4acd5a44c0a567747aa9ae2c9296aed5 | [] | no_license | algebra-det/Python | 50cacdb517a863ef914dd8ce8b835091f28aa9f6 | c4506f4df0c2ec103b1bcebcfedca78cbe705541 | refs/heads/master | 2020-12-14T15:35:26.727430 | 2020-05-01T10:21:09 | 2020-05-01T10:21:09 | 234,789,421 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 628 | py | """
Here we will learn how to take array values from the user
"""
from array import *
arr = array('i',[])
y = int(input("Enter the length of the array "))
for z in range(y):
a = int(input("Enter the next value "))
arr.append(a)
print(arr)
b = int(input("which number do you want to search "))
# Searching manually i.e. without inbuilt function
d=0
for c in arr:
if c==b:
print("its at index ",d)
break
d+=1
else:
print("It does not match any value you entered")
print()
print()
print()
# searching with function
print("It's at : ",arr.index(b)) | [
"noreply@github.com"
] | algebra-det.noreply@github.com |
6c7260b28424b01279f297879ffc39b9b925ae24 | 0fd7a471a63e2bed2857976d8bf2c28bb7f6d1bb | /小练习/557.反转字符串中的单词.py | 826b71429220ffeb669e00d1cd3967030832822e | [] | no_license | zjf201811/LeetCode__exercises | 0432f97d314303a5b2305d745cae9d998b92a851 | 6e5975172dfd17d71b0c6bacc34d51e9e96b6a36 | refs/heads/master | 2020-04-18T01:47:52.285975 | 2019-01-27T00:28:58 | 2019-01-27T00:28:58 | 167,134,587 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 314 | py | # Author:ZJF
class Solution:
def reverseWords(self, s):
"""
:type s: str
:rtype: str
"""
str_list=s.split(" ")
for i in range(len(str_list)):
str_list[i]=str_list[i][-1::-1]
return " ".join(str_list)
print(Solution().reverseWords("12 12 12")) | [
"thor201105@163.com"
] | thor201105@163.com |
aeb0cf39d35aac50a81a46cef5ddeaaf6d247c96 | be3d301bf8c502bb94149c76cc09f053c532d87a | /python/GafferTest/PathFilterTest.py | e2c2749e516f4438ce09f52a932096caaf67d2a0 | [
"BSD-3-Clause"
] | permissive | ljkart/gaffer | 28be401d04e05a3c973ef42d29a571aba6407665 | d2ce0eb7134a33ceee375d0a3676129a9bdcfbc6 | refs/heads/master | 2021-01-18T08:30:19.763744 | 2014-08-10T13:48:10 | 2014-08-10T13:48:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,264 | py | ##########################################################################
#
# Copyright (c) 2011, John Haddon. All rights reserved.
# Copyright (c) 2012, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
import glob
import IECore
import Gaffer
import GafferTest
class PathFilterTest( GafferTest.TestCase ) :
def test( self ) :
path = Gaffer.FileSystemPath( "test/data/scripts" )
children = path.children()
self.assertEqual( len( children ), len( glob.glob( "test/data/scripts/*" ) ) )
# attach a filter
gfrFilter = Gaffer.FileNamePathFilter( [ "*.gfr" ] )
path.addFilter( gfrFilter )
children = path.children()
self.assertEqual( len( children ), len( glob.glob( "test/data/scripts/*.gfr" ) ) )
# copy the path and check the filter is working on the copy
pathCopy = path.copy()
self.assertEqual( len( pathCopy.children() ), len( children ) )
# detach the filter and check that behaviour has reverted
path.removeFilter( gfrFilter )
children = path.children()
self.assertEqual( len( children ), len( glob.glob( "test/data/scripts/*" ) ) )
def testEnabledState( self ) :
path = Gaffer.FileSystemPath( "test/data/scripts" )
f = Gaffer.FileNamePathFilter( [ "*.gfr" ] )
self.assertEqual( f.getEnabled(), True )
path.setFilter( f )
self.assertEqual( len( path.children() ), len( glob.glob( "test/data/scripts/*.gfr" ) ) )
f.setEnabled( False )
self.assertEqual( f.getEnabled(), False )
self.assertEqual( len( path.children() ), len( glob.glob( "test/data/scripts/*" ) ) )
f.setEnabled( True )
self.assertEqual( f.getEnabled(), True )
self.assertEqual( len( path.children() ), len( glob.glob( "test/data/scripts/*.gfr" ) ) )
def testChangedSignal( self ) :
pathFilter = Gaffer.FileNamePathFilter( [ "*.gfr" ] )
enabledStates = []
def f( pf ) :
self.failUnless( pf is pathFilter )
enabledStates.append( pf.getEnabled() )
c = pathFilter.changedSignal().connect( f )
pathFilter.setEnabled( False )
pathFilter.setEnabled( False )
pathFilter.setEnabled( True )
pathFilter.setEnabled( True )
pathFilter.setEnabled( False )
self.assertEqual( enabledStates, [ False, True, False ] )
def testUserData( self ) :
pathFilter = Gaffer.FileNamePathFilter( [ "*.gfr" ] )
self.assertEqual( pathFilter.userData(), {} )
ud = { "a" : "a" }
pathFilter = Gaffer.FileNamePathFilter( [ "*.gfr" ], userData = ud )
self.assertEqual( pathFilter.userData(), ud )
self.failIf( pathFilter.userData() is ud )
if __name__ == "__main__":
unittest.main()
| [
"thehaddonyoof@gmail.com"
] | thehaddonyoof@gmail.com |
bd4a60acea7ad6f199bcb3f98e075600430ce483 | e7ba4626bd239c20f48a49e8b198dace1391b403 | /Plotter/test/testPlot.py | 8d7cb251cad176a82b7f5cde5fb9d603af112911 | [] | no_license | ArturAkh/TauFW | b6952edb7ce6f1e29ee8c9f4501a035a7bd1729e | df209f865d3aacb72ffecb2e02126d57e4646181 | refs/heads/master | 2023-02-19T18:14:40.269908 | 2021-01-20T23:02:50 | 2021-01-20T23:04:15 | 293,569,311 | 0 | 12 | null | 2020-09-07T15:47:06 | 2020-09-07T15:47:06 | null | UTF-8 | Python | false | false | 2,640 | py | #! /usr/bin/env python
# Author: Izaak Neutelings (June 2020)
# Description: Test script for Plot class
# test/testPlot.py -v2 && eog plots/testPlot*.png
from TauFW.Plotter.plot.utils import LOG, ensuredir
from TauFW.Plotter.plot.Plot import Plot, CMSStyle
from ROOT import TH1D, gRandom
def plothist(xtitle,hists,ratio=False,logy=False,norm=False):
# SETTING
outdir = ensuredir("plots/")
fname = outdir+"testPlot"
if ratio:
fname += "_ratio"
if logy:
fname += "_logy"
if norm:
fname += "_norm" # normalize each histogram
rrange = 0.5
width = 0.2 # legend width
position = 'topright' # legend position
header = "Gaussians" # legend header
text = "#mu#tau_{h}" # corner text
grid = True #and False
staterr = True and False # add uncertainty band to first histogram
lstyle = 1 # solid lines
# PLOT
LOG.header(fname)
plot = Plot(xtitle,hists,norm=norm)
plot.draw(ratio=ratio,logy=logy,ratiorange=rrange,lstyle=lstyle,grid=grid,staterr=staterr)
plot.drawlegend(position,header=header,width=width)
plot.drawtext(text)
plot.saveas(fname+".png")
plot.saveas(fname+".pdf")
#plot.saveas(fname+".C")
#plot.saveas(fname+".png",fname+".C")
#plot.saveas(fname,ext=['png','pdf'])
plot.close()
print
def createhists(nhist=3):
nbins = 50
xmin = 0
xmax = 100
nevts = 10000
rrange = 0.5
hists = [ ]
gRandom.SetSeed(1777)
for i in xrange(1,nhist+1):
mu = 48+i
sigma = 10
hname = "hist%d"%(i)
htitle = "#mu = %s, #sigma = %s"%(mu,sigma)
hist = TH1D(hname,hname,nbins,xmin,xmax)
for j in xrange(nevts):
hist.Fill(gRandom.Gaus(mu,sigma))
hists.append(hist)
return hists
def main():
CMSStyle.setCMSEra(2018)
xtitle = "p_{T}^{MET} [GeV]"
#xtitle = "Leading jet p_{T} [GeV]"
#plothist(variable,hists,ratio=False,logy=False)
for ratio in [True,False]:
for logy in [True,False]:
for norm in [True,False]:
hists = createhists()
plothist(xtitle,hists,ratio=ratio,logy=logy,norm=norm)
if __name__ == "__main__":
import sys
from argparse import ArgumentParser
argv = sys.argv
description = '''Script to test the Plot class for comparing histograms.'''
parser = ArgumentParser(prog="testPlot",description=description,epilog="Good luck!")
parser.add_argument('-v', '--verbose', dest='verbosity', type=int, nargs='?', const=1, default=0, action='store',
help="set verbosity" )
args = parser.parse_args()
LOG.verbosity = args.verbosity
main()
| [
"iwn_@hotmail.com"
] | iwn_@hotmail.com |
ce40f7f0feca28259d678a1a8d0daf5153c8a968 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02796/s921715617.py | f4b6921f3b9803e4c79bddb581b27d145fb52df9 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 249 | py | N = int(input())
robots = [tuple(map(int,input().split())) for _ in range(N)]
robots = [(x+l,x-l) for x,l in robots]
robots.sort()
cnt = 0
last = -float('inf')
for r,l in robots:
if last <= l:
cnt += 1
last = r
print(cnt) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
eac6d6b3556237519c304e69f5902010cad91848 | 96dcea595e7c16cec07b3f649afd65f3660a0bad | /homeassistant/components/plugwise/switch.py | 8639826e37a7e890708b33604b2e0102551bff17 | [
"Apache-2.0"
] | permissive | home-assistant/core | 3455eac2e9d925c92d30178643b1aaccf3a6484f | 80caeafcb5b6e2f9da192d0ea6dd1a5b8244b743 | refs/heads/dev | 2023-08-31T15:41:06.299469 | 2023-08-31T14:50:53 | 2023-08-31T14:50:53 | 12,888,993 | 35,501 | 20,617 | Apache-2.0 | 2023-09-14T21:50:15 | 2013-09-17T07:29:48 | Python | UTF-8 | Python | false | false | 3,593 | py | """Plugwise Switch component for HomeAssistant."""
from __future__ import annotations
from dataclasses import dataclass
from typing import Any
from plugwise.constants import SwitchType
from homeassistant.components.switch import (
SwitchDeviceClass,
SwitchEntity,
SwitchEntityDescription,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import EntityCategory
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from .const import DOMAIN
from .coordinator import PlugwiseDataUpdateCoordinator
from .entity import PlugwiseEntity
from .util import plugwise_command
@dataclass
class PlugwiseSwitchEntityDescription(SwitchEntityDescription):
"""Describes Plugwise switch entity."""
key: SwitchType
SWITCHES: tuple[PlugwiseSwitchEntityDescription, ...] = (
PlugwiseSwitchEntityDescription(
key="dhw_cm_switch",
translation_key="dhw_cm_switch",
icon="mdi:water-plus",
entity_category=EntityCategory.CONFIG,
),
PlugwiseSwitchEntityDescription(
key="lock",
translation_key="lock",
icon="mdi:lock",
entity_category=EntityCategory.CONFIG,
),
PlugwiseSwitchEntityDescription(
key="relay",
translation_key="relay",
device_class=SwitchDeviceClass.SWITCH,
),
PlugwiseSwitchEntityDescription(
key="cooling_ena_switch",
translation_key="cooling_ena_switch",
name="Cooling",
icon="mdi:snowflake-thermometer",
entity_category=EntityCategory.CONFIG,
),
)
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up the Smile switches from a config entry."""
coordinator = hass.data[DOMAIN][config_entry.entry_id]
entities: list[PlugwiseSwitchEntity] = []
for device_id, device in coordinator.data.devices.items():
if not (switches := device.get("switches")):
continue
for description in SWITCHES:
if description.key not in switches:
continue
entities.append(PlugwiseSwitchEntity(coordinator, device_id, description))
async_add_entities(entities)
class PlugwiseSwitchEntity(PlugwiseEntity, SwitchEntity):
"""Representation of a Plugwise plug."""
entity_description: PlugwiseSwitchEntityDescription
def __init__(
self,
coordinator: PlugwiseDataUpdateCoordinator,
device_id: str,
description: PlugwiseSwitchEntityDescription,
) -> None:
"""Set up the Plugwise API."""
super().__init__(coordinator, device_id)
self.entity_description = description
self._attr_unique_id = f"{device_id}-{description.key}"
@property
def is_on(self) -> bool:
"""Return True if entity is on."""
return self.device["switches"][self.entity_description.key]
@plugwise_command
async def async_turn_on(self, **kwargs: Any) -> None:
"""Turn the device on."""
await self.coordinator.api.set_switch_state(
self._dev_id,
self.device.get("members"),
self.entity_description.key,
"on",
)
@plugwise_command
async def async_turn_off(self, **kwargs: Any) -> None:
"""Turn the device off."""
await self.coordinator.api.set_switch_state(
self._dev_id,
self.device.get("members"),
self.entity_description.key,
"off",
)
| [
"noreply@github.com"
] | home-assistant.noreply@github.com |
72d53a8968b10d535de42aabd8740462fb1ca955 | 77e5ed0d08a5187ca323a30f0d41591c38e82963 | /src/lib/reprlib.py | 827e2561295f8c01c797981d7ccbe4f1df130be6 | [
"MIT",
"Python-2.0"
] | permissive | maliaoMJ/skulpt | 9f40012dc234c58017531bc278b0753d485ea9ad | f812dc1f0d0c58855478bd8b4afbde70886a9180 | refs/heads/master | 2022-03-29T02:19:24.718295 | 2020-01-27T17:34:43 | 2020-01-27T17:34:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,272 | py | """Redo the builtin repr() (representation) but with limits on most sizes."""
__all__ = ["Repr", "repr", "recursive_repr"]
from itertools import islice
from _thread import get_ident
def recursive_repr(fillvalue='...'):
'Decorator to make a repr function return fillvalue for a recursive call'
def decorating_function(user_function):
repr_running = set()
def wrapper(self):
key = id(self), get_ident()
if key in repr_running:
return fillvalue
repr_running.add(key)
try:
result = user_function(self)
finally:
repr_running.discard(key)
return result
# Can't use functools.wraps() here because of bootstrap issues
wrapper.__module__ = getattr(user_function, '__module__')
wrapper.__doc__ = getattr(user_function, '__doc__')
wrapper.__name__ = getattr(user_function, '__name__')
wrapper.__qualname__ = getattr(user_function, '__qualname__')
wrapper.__annotations__ = getattr(user_function, '__annotations__', {})
return wrapper
return decorating_function
class Repr:
def __init__(self):
self.maxlevel = 6
self.maxtuple = 6
self.maxlist = 6
self.maxarray = 5
self.maxdict = 4
self.maxset = 6
self.maxfrozenset = 6
self.maxdeque = 6
self.maxstring = 30
self.maxlong = 40
self.maxother = 30
def repr(self, x):
return self.repr1(x, self.maxlevel)
def repr1(self, x, level):
typename = type(x).__name__
if ' ' in typename:
parts = typename.split()
typename = '_'.join(parts)
if hasattr(self, 'repr_' + typename):
return getattr(self, 'repr_' + typename)(x, level)
else:
return self.repr_instance(x, level)
def _repr_iterable(self, x, level, left, right, maxiter, trail=''):
n = len(x)
if level <= 0 and n:
s = '...'
else:
newlevel = level - 1
repr1 = self.repr1
pieces = [repr1(elem, newlevel) for elem in islice(x, maxiter)]
if n > maxiter: pieces.append('...')
s = ', '.join(pieces)
if n == 1 and trail: right = trail + right
return '%s%s%s' % (left, s, right)
def repr_tuple(self, x, level):
return self._repr_iterable(x, level, '(', ')', self.maxtuple, ',')
def repr_list(self, x, level):
return self._repr_iterable(x, level, '[', ']', self.maxlist)
def repr_array(self, x, level):
if not x:
return "array('%s')" % x.typecode
header = "array('%s', [" % x.typecode
return self._repr_iterable(x, level, header, '])', self.maxarray)
def repr_set(self, x, level):
if not x:
return 'set()'
x = _possibly_sorted(x)
return self._repr_iterable(x, level, '{', '}', self.maxset)
def repr_frozenset(self, x, level):
if not x:
return 'frozenset()'
x = _possibly_sorted(x)
return self._repr_iterable(x, level, 'frozenset({', '})',
self.maxfrozenset)
def repr_deque(self, x, level):
return self._repr_iterable(x, level, 'deque([', '])', self.maxdeque)
def repr_dict(self, x, level):
n = len(x)
if n == 0: return '{}'
if level <= 0: return '{...}'
newlevel = level - 1
repr1 = self.repr1
pieces = []
for key in islice(_possibly_sorted(x), self.maxdict):
keyrepr = repr1(key, newlevel)
valrepr = repr1(x[key], newlevel)
pieces.append('%s: %s' % (keyrepr, valrepr))
if n > self.maxdict: pieces.append('...')
s = ', '.join(pieces)
return '{%s}' % (s,)
def repr_str(self, x, level):
s = original_repr(x[:self.maxstring])
if len(s) > self.maxstring:
i = max(0, (self.maxstring-3)//2)
j = max(0, self.maxstring-3-i)
s = original_repr(x[:i] + x[len(x)-j:])
s = s[:i] + '...' + s[len(s)-j:]
return s
def repr_int(self, x, level):
s = original_repr(x) # XXX Hope this isn't too slow...
if len(s) > self.maxlong:
i = max(0, (self.maxlong-3)//2)
j = max(0, self.maxlong-3-i)
s = s[:i] + '...' + s[len(s)-j:]
return s
def repr_instance(self, x, level):
try:
s = original_repr(x)
# Bugs in x.__repr__() can cause arbitrary
# exceptions -- then make up something
except Exception:
return '<%s instance at %#x>' % (x.__class__.__name__, id(x))
if len(s) > self.maxother:
i = max(0, (self.maxother-3)//2)
j = max(0, self.maxother-3-i)
s = s[:i] + '...' + s[len(s)-j:]
return s
def _possibly_sorted(x):
# Since not all sequences of items can be sorted and comparison
# functions may raise arbitrary exceptions, return an unsorted
# sequence in that case.
try:
return sorted(x)
except Exception:
return list(x)
original_repr = repr
aRepr = Repr()
repr = aRepr.repr
| [
"acbart@vt.edu"
] | acbart@vt.edu |
ff84efeb02608cd2c2eb9a7672ddf1a671751511 | 4879f75dc2cfe3e983bdec07782cb0986b61103e | /util/time_multi.py | 7020446cb4c0f7b15d391a7de194d53657f9b1aa | [
"MIT",
"CC0-1.0"
] | permissive | haosu1987/duktape | befe386c330e20c32e5bb8221a33af9c6062924d | 4c1d4bfc12f16bb389a12bbc930568afe12a71e8 | refs/heads/master | 2016-09-06T12:14:09.269999 | 2015-08-04T09:06:59 | 2015-08-04T09:06:59 | 39,752,467 | 1 | 0 | null | 2015-08-04T09:07:00 | 2015-07-27T03:07:59 | JavaScript | UTF-8 | Python | false | false | 759 | py | #!/usr/bin/python
#
# Small helper for perftest runs.
#
import os
import sys
import subprocess
def main():
count = int(sys.argv[1])
time_min = None
for i in xrange(count):
cmd = [
'time',
'-f', '%U',
'--quiet',
sys.argv[2], # cmd
sys.argv[3] # testcase
]
#print(repr(cmd))
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
retval = p.wait()
#print(i, retval, stdout, stderr)
if retval != 0:
print 'n/a'
return
time = float(stderr)
#print(i, time)
if time_min is None:
time_min = time
else:
time_min = min(time_min, time)
# /usr/bin/time has only two digits of resolution
print('%.02f' % time_min)
if __name__ == '__main__':
main()
| [
"sami.vaarala@iki.fi"
] | sami.vaarala@iki.fi |
00aac13e36808410e6f5c29082c0af2fadedbab6 | 1bdc746ecd775dcd3cf0965deb23a9c86f826706 | /bcbiovm/shared/retriever.py | 06e7c8e895a073df4ee0e518acc7e505731e80e4 | [
"MIT"
] | permissive | bcbio/bcbio-nextgen-vm | 84b681a258ddacd8656f8cdbca7a2ec8d3ed7a0b | cd703e9d0dd5c6e25ddefc39efd9fd87ec815615 | refs/heads/master | 2021-06-08T13:32:03.854484 | 2020-06-03T14:37:01 | 2020-06-03T14:37:01 | 15,653,572 | 10 | 6 | MIT | 2020-06-03T14:37:03 | 2014-01-05T15:37:31 | Python | UTF-8 | Python | false | false | 8,045 | py | """Shared code for retrieving resources from external integrations.
"""
import os
import yaml
import six
import toolz as tz
from bcbio import utils
def get_resources(genome_build, fasta_ref, config, data, open_fn, list_fn, find_fn=None,
normalize_fn=None):
"""Add genome resources defined in configuration file to data object.
"""
resources_file = "%s-resources.yaml" % (os.path.splitext(fasta_ref)[0])
if find_fn:
resources_file = find_fn(resources_file)
base_dir = os.path.dirname(resources_file)
with open_fn(resources_file) as in_handle:
resources = yaml.safe_load(in_handle)
cfiles = list_fn(os.path.dirname(base_dir))
for k1, v1 in list(resources.items()):
if isinstance(v1, dict):
for k2, v2 in list(v1.items()):
if isinstance(v2, six.string_types) and v2.startswith("../"):
test_v2 = _normpath_remote(os.path.join(base_dir, v2), normalize_fn=normalize_fn)
if find_fn and find_fn(test_v2) is not None:
resources[k1][k2] = find_fn(test_v2)
elif test_v2 in cfiles:
resources[k1][k2] = test_v2
else:
del resources[k1][k2]
data["genome_resources"] = _ensure_annotations(resources, cfiles, data, normalize_fn)
data = _add_configured_indices(base_dir, cfiles, data, normalize_fn)
data = _add_data_versions(base_dir, cfiles, data, normalize_fn)
data = _add_viral(base_dir, cfiles, data, normalize_fn)
return _add_genome_context(base_dir, cfiles, data, normalize_fn)
def _add_data_versions(base_dir, cfiles, data, norm_fn=None):
"""Add versions file with data names mapped to current version.
"""
search_name = _normpath_remote(os.path.join(os.path.dirname(base_dir), "versions.csv"),
normalize_fn=norm_fn)
version_files = [x for x in cfiles if search_name == (norm_fn(x) if norm_fn else x)]
version_file = version_files[0] if version_files else None
data["reference"]["versions"] = version_file
return data
def _add_viral(base_dir, cfiles, data, norm_fn=None):
"""Add fasta and indices for viral QC.
"""
viral_dir = _normpath_remote(os.path.join(os.path.dirname(base_dir), "viral"),
normalize_fn=norm_fn)
viral_files = [x for x in cfiles if x.startswith(viral_dir)]
if viral_files:
data["reference"]["viral"] = {"base": [x for x in viral_files if x.endswith(".fa")][0],
"indexes": [x for x in viral_files if not x.endswith(".fa")]}
else:
data["reference"]["viral"] = None
return data
def _ensure_annotations(resources, cfiles, data, normalize_fn):
"""Retrieve additional annotations for downstream processing.
Mirrors functionality in bcbio.pipeline.run_info.ensure_annotations
"""
transcript_gff = tz.get_in(["rnaseq", "transcripts"], resources)
if transcript_gff:
gene_bed = utils.splitext_plus(transcript_gff)[0] + ".bed"
test_gene_bed = normalize_fn(gene_bed) if normalize_fn else gene_bed
for fname in cfiles:
test_fname = normalize_fn(fname) if normalize_fn else fname
if test_fname == test_gene_bed:
resources["rnaseq"]["gene_bed"] = fname
break
return resources
def _add_configured_indices(base_dir, cfiles, data, norm_fn=None):
"""Add additional resource indices defined in genome_resources: snpeff
"""
snpeff_db = tz.get_in(["genome_resources", "aliases", "snpeff"], data)
if snpeff_db:
tarball = _normpath_remote(os.path.join(os.path.dirname(base_dir), "snpeff--%s-wf.tar.gz" % snpeff_db),
normalize_fn=norm_fn)
snpeff_files = [x for x in cfiles if tarball == (norm_fn(x) if norm_fn else x)]
if len(snpeff_files) == 1:
data["reference"]["snpeff"] = {snpeff_db: snpeff_files[0]}
else:
index_dir = _normpath_remote(os.path.join(os.path.dirname(base_dir), "snpeff", snpeff_db),
normalize_fn=norm_fn)
if not index_dir.endswith("/"):
index_dir += "/"
snpeff_files = [x for x in cfiles if x.startswith(index_dir)]
if len(snpeff_files) > 0:
base_files = [x for x in snpeff_files if x.endswith("/snpEffectPredictor.bin")]
assert len(base_files) == 1, base_files
del snpeff_files[snpeff_files.index(base_files[0])]
data["reference"]["snpeff"] = {"base": base_files[0], "indexes": snpeff_files}
return data
def _add_genome_context(base_dir, cfiles, data, norm_fn=None):
"""Add associated genome context files, if present.
"""
index_dir = _normpath_remote(os.path.join(os.path.dirname(base_dir), "coverage", "problem_regions"),
normalize_fn=norm_fn)
context_files = [x for x in cfiles if x.startswith(index_dir) and x.endswith(".gz")]
if len(context_files) > 0:
data["reference"]["genome_context"] = sorted(context_files, key=os.path.basename)
return data
def _normpath_remote(orig, normalize_fn=None):
"""Normalize a path, avoiding removing initial s3:// style keys
"""
if normalize_fn:
return os.path.normpath(normalize_fn(orig))
elif orig.find("://") > 0:
key, curpath = orig.split(":/")
return key + ":/" + os.path.normpath(curpath)
else:
return os.path.normpath(orig)
def standard_genome_refs(genome_build, aligner, ref_prefix, list_fn):
"""Retrieve standard genome references: sequence, rtg and aligner.
"""
out = {}
base_targets = ("/%s.fa" % genome_build, "/mainIndex")
for dirname in [x for x in ["seq", "rtg", aligner] if x]:
key = {"seq": "fasta", "ucsc": "twobit"}.get(dirname, dirname)
tarball_files = [x for x in list_fn(ref_prefix)
if os.path.basename(x).startswith(dirname) and x.endswith("-wf.tar.gz")]
if len(tarball_files) > 0:
assert len(tarball_files) == 1, tarball_files
if dirname == aligner:
out[key] = {"base": tarball_files[0], "indexes": tarball_files}
else:
out[key] = tarball_files[0]
else:
cur_files = list_fn(os.path.join(ref_prefix, dirname))
base_files = [x for x in cur_files if x.endswith(base_targets)]
if len(base_files) > 0:
assert len(base_files) == 1, base_files
base_file = base_files[0]
del cur_files[cur_files.index(base_file)]
out[key] = {"base": base_file, "indexes": cur_files}
elif len(cur_files) == 1:
out[key] = cur_files[0]
else:
out[key] = {"indexes": cur_files}
return out
def find_ref_prefix(genome_build, find_fn):
"""Identify reference prefix in folders for genome build.
"""
for prefix in ["%s", "genomes/%s"]:
cur_prefix = prefix % genome_build
remote_dir = find_fn(cur_prefix)
if remote_dir:
return remote_dir
raise ValueError("Did not find genome files for %s" % (genome_build))
def fill_remote(cur, find_fn, is_remote_fn):
"""Add references in data dictionary to remote files if present and not local.
"""
if isinstance(cur, (list, tuple)):
return [fill_remote(x, find_fn, is_remote_fn) for x in cur]
elif isinstance(cur, dict):
out = {}
for k, v in cur.items():
out[k] = fill_remote(v, find_fn, is_remote_fn)
return out
elif (isinstance(cur, six.string_types) and os.path.splitext(cur)[-1] and not os.path.exists(cur)
and not is_remote_fn(cur)):
remote_cur = find_fn(cur)
if remote_cur:
return remote_cur
else:
return cur
else:
return cur
| [
"chapmanb@50mail.com"
] | chapmanb@50mail.com |
9180db9fbe7b19367e0b3ec313c3e0e46559ccf1 | f2c6ecbb99e8e606cd47a02045ad1bb6c881cfbd | /castjeeves/sqltables/TableLoader.py | d301f68460220e1e51f5aa8cda5a7bd17a74a8d7 | [
"BSD-3-Clause"
] | permissive | dkauf42/bayota | 36dc37f5f6fc8072567d970b1d8a379d8fb55b99 | 104ba91746231be10419390a9d7ed8f2593a21bc | refs/heads/master | 2023-04-10T18:22:56.979432 | 2021-11-17T15:54:48 | 2021-11-17T15:54:48 | 180,859,770 | 2 | 0 | NOASSERTION | 2021-11-17T15:54:49 | 2019-04-11T19:03:09 | Python | UTF-8 | Python | false | false | 2,238 | py | import pandas as pd
class TableLoader(object):
def __init__(self, tableSet):
object.__setattr__(self, "tableSet", set(tableSet))
def __getattribute__(self, attr):
if attr == "tableSet":
raise AttributeError("instance <attr>:tableSet is not directly accessible, use <method>:getTblList instead")
else:
# This was: tableSet = object.__getattribute__(self, "tableSet")
# But D.E.Kaufman added this if, then block...
if attr == '__dict__':
try:
tableSet = object.__getattribute__(self, "tableSet")
except AttributeError:
tableSet = ['', '']
else:
tableSet = object.__getattribute__(self, "tableSet")
try:
item = object.__getattribute__(self, attr)
if attr in tableSet:
return item # pd.DataFrame.copy(item)
else:
return item
except AttributeError:
if attr in tableSet:
raise AttributeError("use <method>:addTable to add <attr>:{:s}".format(attr))
else:
raise AttributeError("invalid attribute specification")
def __setattr__(self, attr, value):
if attr == "tableSet":
raise AttributeError("<attr>:tableSet cannot be changed")
tableSet = object.__getattribute__(self, "tableSet")
if attr in tableSet:
if hasattr(self, attr):
raise AttributeError("attribute has already been set and may not be changed")
else:
object.__setattr__(self, attr, value)
else:
raise AttributeError("invalid attribute specification")
def getTblList(self):
tableSet = object.__getattribute__(self, "tableSet")
return sorted(list(tableSet))
def addTable(self, tblName, tbl):
if not isinstance(tbl, pd.DataFrame):
raise TypeError("<arg>:tbl should be of type pandas.DataFrame")
try:
self.__setattr__(tblName, tbl)
except AttributeError as err:
raise err
| [
"dkauf42@gmail.com"
] | dkauf42@gmail.com |
7aed7b9ed202249c59c62c752ec52f0d75b18f27 | f1738cd603e0b2e31143f4ebf7eba403402aecd6 | /ucs/management/univention-directory-manager-modules/scripts/proof_kerberos_deactivation | 7c56f2eaf92fa5f45cb3010e4ca890f1c778bc7b | [] | no_license | m-narayan/smart | 92f42bf90d7d2b24f61915fac8abab70dd8282bc | 1a6765deafd8679079b64dcc35f91933d37cf2dd | refs/heads/master | 2016-08-05T17:29:30.847382 | 2013-01-04T04:50:26 | 2013-01-04T04:50:26 | 7,079,786 | 8 | 6 | null | 2015-04-29T08:54:12 | 2012-12-09T14:56:27 | Python | UTF-8 | Python | false | false | 4,758 | #!/usr/bin/python2.6
# -*- coding: utf-8 -*-
#
# Univention Directory Manager Modules
# sync posix flags to kerberos flags
#
# Copyright 2004-2012 Univention GmbH
#
# http://www.univention.de/
#
# All rights reserved.
#
# The source code of this program is made available
# under the terms of the GNU Affero General Public License version 3
# (GNU AGPL V3) as published by the Free Software Foundation.
#
# Binary versions of this program provided by Univention to you as
# well as other copyrighted, protected or trademarked materials like
# Logos, graphics, fonts, specific documentations and configurations,
# cryptographic keys etc. are subject to a license agreement between
# you and Univention and not subject to the GNU AGPL V3.
#
# In the case you use this program under the terms of the GNU AGPL V3,
# the program is provided in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public
# License with the Debian GNU/Linux or Univention distribution in file
# /usr/share/common-licenses/AGPL-3; if not, see
# <http://www.gnu.org/licenses/>.
# set activation state of kerberos account to same state as posix account
# set password and account exiration date to the same value as shadowExpiry and shadowLastChange
import ldap, re, time
import univention.baseconfig
baseConfig=univention.baseconfig.baseConfig()
baseConfig.load()
baseDN=baseConfig['ldap/base']
print "using baseDN",baseDN
lo=ldap.open('localhost', 7389)
bindpw=open('/etc/ldap.secret').read()
if bindpw[-1] == '\n':
bindpw=bindpw[0:-1]
lo.simple_bind_s("cn=admin,"+baseDN, bindpw)
count_changes = 0
warning = 0
# passwords will only be found in posixAccount
res_pA=lo.search_s(baseDN, ldap.SCOPE_SUBTREE, 'objectClass=posixAccount')
for i in range(0,len(res_pA)):
dn_pA=res_pA[i][0]
print dn_pA
if res_pA[i][1].has_key('objectClass'):
if 'krb5KDCEntry' in res_pA[i][1]['objectClass']:
if res_pA[i][1].has_key('userPassword'):
_re = re.compile('^\{crypt\}!.*$')
disabled = _re.match(res_pA[i][1]['userPassword'][0])
if res_pA[i][1].has_key('krb5KDCFlags'):
if disabled and not res_pA[i][1]['krb5KDCFlags'][0] == '254':
modlist = [(ldap.MOD_REPLACE,'krb5KDCFlags','254')]
lo.modify_s(dn_pA,modlist)
print " - kerberos disabled"
elif not disabled and not res_pA[i][1]['krb5KDCFlags'][0] == '126':
modlist = [(ldap.MOD_REPLACE,'krb5KDCFlags','126')]
lo.modify_s(dn_pA,modlist)
print " - kerberos enabled"
else:
print " - enable/disable OK"
else:
if disabled:
modlist = [(ldap.MOD_ADD,'krb5KDCFlags','254')]
lo.modify_s(dn_pA,modlist)
print " - kerberos initial disabled"
else:
modlist = [(ldap.MOD_ADD,'krb5KDCFlags','126')]
lo.modify_s(dn_pA,modlist)
print " - kerberos initial enabled"
else:
print " - user password not set"
if res_pA[i][1].has_key('shadowExpire') and res_pA[i][1]['shadowExpire'][0]:
userexpiry=time.strftime("%d.%m.%y",time.gmtime((long(res_pA[i][1]['shadowExpire'][0]))*3600*24))
krb5ValidEnd="%s" % "20"+userexpiry[6:8]+userexpiry[3:5]+userexpiry[0:2]+"000000Z"
if not res_pA[i][1].has_key('krb5ValidEnd'):
modlist = [(ldap.MOD_ADD,'krb5ValidEnd',krb5ValidEnd)]
lo.modify_s(dn_pA,modlist)
print " - kerberos expiry initial set"
elif not res_pA[i][1]['krb5ValidEnd'][0] == krb5ValidEnd:
modlist = [(ldap.MOD_REPLACE,'krb5ValidEnd',krb5ValidEnd)]
lo.modify_s(dn_pA,modlist)
print " - kerberos expiry set"
else:
print " - kerberos expiry OK"
else:
print " - account expire not set"
if res_pA[i][1].has_key('shadowLastChange') and res_pA[i][1].has_key('shadowMax'):
passwordexpiry=time.strftime("%d.%m.%y",time.gmtime((long(res_pA[i][1]['shadowLastChange'][0])+long(res_pA[i][1]['shadowMax'][0]))*3600*24))
krb5PasswordEnd="%s" % "20"+passwordexpiry[6:8]+passwordexpiry[3:5]+passwordexpiry[0:2]+"000000Z"
if not res_pA[i][1].has_key('krb5PasswordEnd'):
modlist = [(ldap.MOD_ADD,'krb5PasswordEnd',krb5PasswordEnd)]
lo.modify_s(dn_pA,modlist)
print "kerberos password end initial set"
elif not res_pA[i][1]['krb5PasswordEnd'][0] == krb5PasswordEnd:
modlist = [(ldap.MOD_REPLACE,'krb5PasswordEnd',krb5PasswordEnd)]
lo.modify_s(dn_pA,modlist)
print " - kerberos password end set"
else:
print " - kerberos password end OK"
else:
print " - Password expire not set"
else:
print " - no kerberos account"
else:
print " - WARNING: no key objectClass found !"
| [
"kartik@debian.org"
] | kartik@debian.org | |
561784c981e82de125085710eae2d409b6574fe5 | 3529ecaa44a53172094ba13498097057c8972723 | /Questiondir/761.employee-free-time/761.employee-free-time_134959127.py | 4ab403cf8042979a9028ef45e7132ecc3206df1a | [] | no_license | cczhong11/Leetcode-contest-code-downloader | 0681f0f8c9e8edd5371fd8d0a1d37dcc368566b6 | db64a67869aae4f0e55e78b65a7e04f5bc2e671c | refs/heads/master | 2021-09-07T15:36:38.892742 | 2018-02-25T04:15:17 | 2018-02-25T04:15:17 | 118,612,867 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 965 | py | # Definition for an interval.
# class Interval(object):
# def __init__(self, s=0, e=0):
# self.start = s
# self.end = e
class Solution(object):
def employeeFreeTime(self, avails):
"""
:type avails: List[List[Interval]]
:rtype: List[Interval]
"""
schedule = []
for lst in avails:
if lst:
schedule.append((lst[0].start, lst))
heapq.heapify(schedule)
freetimes = []
lastend = 0
if schedule:
lastend = schedule[0][0]
while schedule:
newstart, newlist = heapq.heappop(schedule)
if newstart > lastend:
freetimes.append((lastend, newstart))
lastsch = newlist.pop(0)
lastend = max(lastend, lastsch.end)
if newlist:
heapq.heappush(schedule, (newlist[0].start, newlist))
return freetimes | [
"tczhong24@gmail.com"
] | tczhong24@gmail.com |
3a63964aaa86ddd207c4953e4157339d539b2660 | e121dcc5d23e225891420e730549b9cc7ebe8e88 | /python/lib/direct/extensions/NurbsSurfaceEvaluator.py | 7cdf5e0a01a26643840d05f6d9cb94eaf7642328 | [
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | PlumpMath/panda3d-3 | 4f4cf7627eddae9b7f30795e0a0657b01fdf670d | 5c0be0e1cd46b422d28d5b81ffb1e8b28c3ac914 | refs/heads/master | 2021-01-25T06:55:36.209044 | 2014-09-29T14:24:53 | 2014-09-29T14:24:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,028 | py |
"""
NurbsSurfaceEvaluator-extensions module: contains methods to extend
functionality of the NurbsSurfaceEvaluator class
"""
def getUKnots(self):
"""Returns the U knot vector as a Python list of floats"""
knots = []
for i in range(self.getNumUKnots()):
knots.append(self.getUKnot(i))
return knots
def getVKnots(self):
"""Returns the V knot vector as a Python list of floats"""
knots = []
for i in range(self.getNumVKnots()):
knots.append(self.getVKnot(i))
return knots
def getVertices(self, relTo = None):
"""Returns the vertices as a 2-d Python list of Vec4's, relative
to the indicated space if given."""
verts = []
for ui in range(self.getNumUVertices()):
v = []
if relTo:
for vi in range(self.getNumVVertices()):
v.append(self.getVertex(ui, vi, relTo))
else:
for vi in range(self.getNumVVertices()):
v.append(self.getVertex(ui, vi))
verts.append(v)
return verts
| [
"ralf.kaestner@gmail.com"
] | ralf.kaestner@gmail.com |
ceda5f266f4d61d019be258bfcb20c68883dd20a | 648f5af4f4e95b0f7ad4943254abcacfe520c685 | /Labs_4/Scripting labs/cast_list.py | b1643ca7af1b9d7e220d663f19e3e835548a1647 | [
"MIT"
] | permissive | damiso15/we_japa_data_science_lab | 64a1ccbcff10554505dc55172991a9ed920f1295 | ada2a358753e1f1db087d410808524e7546284f6 | refs/heads/master | 2022-11-30T12:57:55.920142 | 2020-08-13T21:20:01 | 2020-08-13T21:20:01 | 283,690,866 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 967 | py | # You're going to create a list of the actors who appeared in the television programme Monty
# Python's Flying Circus.
# Write a function called create_cast_list that takes a filename as input and returns a list of
# actors' names. It will be run on the file flying_circus_cast.txt (this information was collected
# from imdb.com). Each line of that file consists of an actor's name, a comma, and then some
# (messy) information about roles they played in the programme. You'll need to extract only the
# name and add it to a list. You might use the .split() method to process each line.
# def create_cast_list(filename):
def create_cast_list(filename):
cast_list = []
with open(filename, 'r') as file:
cast = file.readlines()
for actor in cast:
actor_split = actor.split(',')[0]
cast_list.append(actor_split)
return cast_list
cast_list = create_cast_list('flying_circus_cast.txt')
print(cast_list)
| [
"damiso15@yahoo.com"
] | damiso15@yahoo.com |
28a7e27fedcf4fc6f018822c08f203e5a447e126 | c85aede0797e73dd719646a0f7671594b0d4e4e9 | /docs/support/pcsv_example_2.py | 80e6f2247fa5e47cd8f9fa880a93077fd2edcb53 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | mtiid/putil | c0493535ed5ee7694546ee9193cad0a764c440fc | a99c84ee781aa9eb6e45272f95b82ac35648ba4b | refs/heads/master | 2021-01-18T09:05:50.437577 | 2016-01-20T16:01:12 | 2016-01-20T16:01:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,870 | py | # pcsv_example_2.py
# Copyright (c) 2013-2016 Pablo Acosta-Serafini
# See LICENSE for details
# pylint: disable=C0111,C0410,W0104
import putil.misc, putil.pcsv
def main():
ctx = putil.misc.TmpFile
with ctx() as ifname:
with ctx() as rfname:
with ctx() as ofname:
# Create first (input) data file
input_data = [
['Item', 'Cost'],
[1, 9.99],
[2, 10000],
[3, 0.10]
]
putil.pcsv.write(ifname, input_data, append=False)
# Create second (replacement) data file
replacement_data = [
['Staff', 'Rate', 'Days'],
['Joe', 10, 'Sunday'],
['Sue', 20, 'Thursday'],
['Pat', 15, 'Tuesday']
]
putil.pcsv.write(rfname, replacement_data, append=False)
# Replace "Cost" column of input file with "Rate" column
# of replacement file for "Items" 2 and 3 with "Staff" data
# from Joe and Pat. Save resulting data to another file
putil.pcsv.replace(
ifname=ifname,
idfilter=('Cost', {'Item':[1, 3]}),
rfname=rfname,
rdfilter=('Rate', {'Staff':['Joe', 'Pat']}),
ofname=ofname
)
# Verify that resulting file is correct
ref_data = [
['Item', 'Cost'],
[1, 10],
[2, 10000],
[3, 15]
]
obj = putil.pcsv.CsvFile(ofname)
assert obj.header() == ref_data[0]
assert obj.data() == ref_data[1:]
if __name__ == '__main__':
main()
| [
"pmasdev@gmail.com"
] | pmasdev@gmail.com |
2904bf6014e3068e49cc729e4052857b4387ca52 | d99ac626d62c663704444a9cce7e7fc793a9e75e | /crypto_implementations/virgil-crypto-c/wrappers/python/virgil_crypto_lib/foundation/_c_bridge/_vscf_hybrid_private_key.py | 8947131a381161d91493cb49fb7178371e71ba0a | [
"LicenseRef-scancode-warranty-disclaimer",
"BSD-2-Clause",
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | Experiment5X/CryptoFunctionDetection | 3ab32d5573a249d24db1faf772721bc80b8d905d | dac700193e7e84963943593e36844b173211a8a1 | refs/heads/master | 2023-04-19T09:12:35.828268 | 2021-05-13T22:39:27 | 2021-05-13T22:39:27 | 355,299,557 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,617 | py | # Copyright (C) 2015-2020 Virgil Security, Inc.
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# (1) Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# (2) Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# (3) Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ''AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Lead Maintainer: Virgil Security Inc. <support@virgilsecurity.com>
from virgil_crypto_lib._libs import *
from ctypes import *
from ._vscf_impl import vscf_impl_t
class vscf_hybrid_private_key_t(Structure):
pass
class VscfHybridPrivateKey(object):
"""Handles a hybrid private key.
The hybrid private key contains 2 private keys."""
def __init__(self):
"""Create underlying C context."""
self._ll = LowLevelLibs()
self._lib = self._ll.foundation
def vscf_hybrid_private_key_new(self):
vscf_hybrid_private_key_new = self._lib.vscf_hybrid_private_key_new
vscf_hybrid_private_key_new.argtypes = []
vscf_hybrid_private_key_new.restype = POINTER(vscf_hybrid_private_key_t)
return vscf_hybrid_private_key_new()
def vscf_hybrid_private_key_delete(self, ctx):
vscf_hybrid_private_key_delete = self._lib.vscf_hybrid_private_key_delete
vscf_hybrid_private_key_delete.argtypes = [POINTER(vscf_hybrid_private_key_t)]
vscf_hybrid_private_key_delete.restype = None
return vscf_hybrid_private_key_delete(ctx)
def vscf_hybrid_private_key_alg_id(self, ctx):
"""Algorithm identifier the key belongs to."""
vscf_hybrid_private_key_alg_id = self._lib.vscf_hybrid_private_key_alg_id
vscf_hybrid_private_key_alg_id.argtypes = [POINTER(vscf_hybrid_private_key_t)]
vscf_hybrid_private_key_alg_id.restype = c_int
return vscf_hybrid_private_key_alg_id(ctx)
def vscf_hybrid_private_key_alg_info(self, ctx):
"""Return algorithm information that can be used for serialization."""
vscf_hybrid_private_key_alg_info = self._lib.vscf_hybrid_private_key_alg_info
vscf_hybrid_private_key_alg_info.argtypes = [POINTER(vscf_hybrid_private_key_t)]
vscf_hybrid_private_key_alg_info.restype = POINTER(vscf_impl_t)
return vscf_hybrid_private_key_alg_info(ctx)
def vscf_hybrid_private_key_len(self, ctx):
"""Length of the key in bytes."""
vscf_hybrid_private_key_len = self._lib.vscf_hybrid_private_key_len
vscf_hybrid_private_key_len.argtypes = [POINTER(vscf_hybrid_private_key_t)]
vscf_hybrid_private_key_len.restype = c_size_t
return vscf_hybrid_private_key_len(ctx)
def vscf_hybrid_private_key_bitlen(self, ctx):
"""Length of the key in bits."""
vscf_hybrid_private_key_bitlen = self._lib.vscf_hybrid_private_key_bitlen
vscf_hybrid_private_key_bitlen.argtypes = [POINTER(vscf_hybrid_private_key_t)]
vscf_hybrid_private_key_bitlen.restype = c_size_t
return vscf_hybrid_private_key_bitlen(ctx)
def vscf_hybrid_private_key_is_valid(self, ctx):
"""Check that key is valid.
Note, this operation can be slow."""
vscf_hybrid_private_key_is_valid = self._lib.vscf_hybrid_private_key_is_valid
vscf_hybrid_private_key_is_valid.argtypes = [POINTER(vscf_hybrid_private_key_t)]
vscf_hybrid_private_key_is_valid.restype = c_bool
return vscf_hybrid_private_key_is_valid(ctx)
def vscf_hybrid_private_key_extract_public_key(self, ctx):
"""Extract public key from the private key."""
vscf_hybrid_private_key_extract_public_key = self._lib.vscf_hybrid_private_key_extract_public_key
vscf_hybrid_private_key_extract_public_key.argtypes = [POINTER(vscf_hybrid_private_key_t)]
vscf_hybrid_private_key_extract_public_key.restype = POINTER(vscf_impl_t)
return vscf_hybrid_private_key_extract_public_key(ctx)
def vscf_hybrid_private_key_first_key(self, ctx):
"""Return first private key."""
vscf_hybrid_private_key_first_key = self._lib.vscf_hybrid_private_key_first_key
vscf_hybrid_private_key_first_key.argtypes = [POINTER(vscf_hybrid_private_key_t)]
vscf_hybrid_private_key_first_key.restype = POINTER(vscf_impl_t)
return vscf_hybrid_private_key_first_key(ctx)
def vscf_hybrid_private_key_second_key(self, ctx):
"""Return second private key."""
vscf_hybrid_private_key_second_key = self._lib.vscf_hybrid_private_key_second_key
vscf_hybrid_private_key_second_key.argtypes = [POINTER(vscf_hybrid_private_key_t)]
vscf_hybrid_private_key_second_key.restype = POINTER(vscf_impl_t)
return vscf_hybrid_private_key_second_key(ctx)
def vscf_hybrid_private_key_shallow_copy(self, ctx):
vscf_hybrid_private_key_shallow_copy = self._lib.vscf_hybrid_private_key_shallow_copy
vscf_hybrid_private_key_shallow_copy.argtypes = [POINTER(vscf_hybrid_private_key_t)]
vscf_hybrid_private_key_shallow_copy.restype = POINTER(vscf_hybrid_private_key_t)
return vscf_hybrid_private_key_shallow_copy(ctx)
def vscf_hybrid_private_key_impl(self, ctx):
vscf_hybrid_private_key_impl = self._lib.vscf_hybrid_private_key_impl
vscf_hybrid_private_key_impl.argtypes = [POINTER(vscf_hybrid_private_key_t)]
vscf_hybrid_private_key_impl.restype = POINTER(vscf_impl_t)
return vscf_hybrid_private_key_impl(ctx)
| [
"xmeadamx@gmail.com"
] | xmeadamx@gmail.com |
ca04af47cfd93b5d7ecf1ff6e78f5cbdec860b7e | d485ac12220d6febfe383bde45d55b3160cdc930 | /info_system/migrations/0009_auto_20170130_2303.py | 9f8f2f82e021335ef285819d5d7a7138f7db4229 | [] | no_license | argon2008-aiti/lcidarkuman | 03ef2b2c200ca21b57f7b8089976c8b3a1c03612 | 3e54fffdf9605edd87e7bfce134d0c5203dc72a9 | refs/heads/master | 2021-01-13T03:09:32.388512 | 2019-05-19T14:16:48 | 2019-05-19T14:16:48 | 77,407,801 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 852 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
class Migration(migrations.Migration):
dependencies = [
('info_system', '0008_auto_20170127_2125'),
]
operations = [
migrations.AlterField(
model_name='member',
name='date_joined',
field=models.DateField(default=datetime.date(2017, 1, 30)),
),
migrations.AlterField(
model_name='member',
name='date_of_birth',
field=models.DateField(default=datetime.date(2017, 1, 30)),
),
migrations.AlterField(
model_name='member',
name='profile',
field=models.ImageField(default=b'static/profiles/banner1.png', upload_to=b'static/profiles/', blank=True),
),
]
| [
"yunguta@gmail.com"
] | yunguta@gmail.com |
77fad9c6a8296a55f65e79ad21de98f7209e2477 | f324dba8769c8fb0f23693faa0cf8cba0dd66e0b | /setup.py | 5912fc516eee8aed5bbe84c6e3e67b05b0dc469f | [
"BSD-3-Clause"
] | permissive | sourcery-ai-bot/roles | e3587829ca2870c4f5848cbea4605330cc985383 | e945d4d4a265cc41216cf46b2325d0bba15f03ad | refs/heads/master | 2022-11-05T11:33:27.802375 | 2020-06-18T10:53:16 | 2020-06-18T10:53:16 | 273,213,490 | 0 | 0 | null | 2020-06-18T10:53:09 | 2020-06-18T10:53:09 | null | UTF-8 | Python | false | false | 1,013 | py | """
Setup script for roles module.
"""
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
from contextlib import closing
import glob
VERSION = '0.10'
with closing(open('README.txt')) as f:
doc = f.read()
setup(
name='roles',
version=VERSION,
description='Role based development',
long_description=doc,
author='Arjan Molenaar',
author_email='gaphor@gmail.com',
url='http://github.com/amolenaar/roles',
license="BSD License",
packages = [ 'roles' ],
keywords="role DCI data context interaction",
platforms=["All"],
classifiers=['Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries']
)
#vim:sw=4:et:ai
| [
"gaphor@gmail.com"
] | gaphor@gmail.com |
53592dc4b0ddb1d02dce8614f5215fc71d2939bb | e1429633ac8989e9cee6089c43c4f54e3553cab3 | /UefiTestingPkg/AuditTests/UefiVarLockAudit/Windows/UefiVarAudit.py | c97d8b1543b2852d24a92f95699fbe94f0bcf1ac | [
"BSD-2-Clause"
] | permissive | Perry31/mu_plus | 7bfd4a3c773384ff44df53a794382c7a047b1702 | 4cee2caffa0344517ce713c4066629160f1968d8 | refs/heads/release/20180529 | 2022-07-09T09:08:27.770548 | 2018-10-22T19:51:46 | 2018-10-22T19:51:46 | 154,810,652 | 0 | 0 | BSD-2-Clause | 2022-07-04T15:08:56 | 2018-10-26T09:30:49 | C | UTF-8 | Python | false | false | 5,383 | py | #
# Script to iterate thru an xml file and
# check the UEFI variable read/write properties of a given variable
#
# Copyright (c) 2016, Microsoft Corporation
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
##
##
import os, sys
import argparse
import logging
import datetime
import struct
import hashlib
import shutil
import time
import xml.etree.ElementTree as ET
from xml.etree.ElementTree import Element
from UefiVariablesSupportLib import UefiVariable
#
#main script function
#
def main():
parser = argparse.ArgumentParser(description='Variable Audit Tool')
#Output debug log
parser.add_argument("-l", dest="OutputLog", help="Create an output log file: ie -l out.txt", default=None)
parser.add_argument("--OutputXml", dest="OutputXml", help="Output Xml file that contains final results", default=None)
parser.add_argument("--InputXml", dest="InputXml", help="Input Xml file", default=None)
#Turn on dubug level logging
parser.add_argument("--debug", action="store_true", dest="debug", help="turn on debug logging level for file log", default=False)
options = parser.parse_args()
#setup file based logging if outputReport specified
if(options.OutputLog):
if(len(options.OutputLog) < 2):
logging.critical("the output log file parameter is invalid")
return -2
else:
#setup file based logging
filelogger = logging.FileHandler(filename=options.OutputLog, mode='w')
if(options.debug):
filelogger.setLevel(logging.DEBUG)
else:
filelogger.setLevel(logging.INFO)
filelogger.setFormatter(formatter)
logging.getLogger('').addHandler(filelogger)
logging.info("Log Started: " + datetime.datetime.strftime(datetime.datetime.now(), "%A, %B %d, %Y %I:%M%p" ))
#Check for required input parameters
if(not options.InputXml) or (not os.path.isfile(options.InputXml)):
logging.critical("No Input Xml file specified")
return -1
if(not options.OutputXml):
logging.critical("Output Xml file path not specified")
return -2
Uefi = UefiVariable()
#read in XML file as doc
XmlFile = ET.parse(options.InputXml)
XmlRoot = XmlFile.getroot()
for var in XmlRoot.findall("Variable"):
name = var.get("Name")
guid = var.get("Guid")
(ReadStatus, Data, ReadErrorString) = Uefi.GetUefiVar(name, guid)
(WriteSuccess, ErrorCode, WriteErrorString)= Uefi.SetUefiVar(name, guid)
if(WriteSuccess != 0):
logging.info("Must Restore Var %s:%s" % (name, guid))
(RestoreSuccess, RestoreEC, RestoreErrorString) = Uefi.SetUefiVar(name, guid, Data)
if (RestoreSuccess == 0):
logging.critical("Restoring failed for Var %s:%s 0x%X ErrorCode: 0x%X %s" % (name, guid, RestoreSuccess, RestoreEC, RestoreErrorString))
#append
#<FromOs>
#<ReadStatus>0x0 Success</ReadStatus>
#<WriteStatus>0x8000000000000002 Invalid Parameter</WriteStatus>
ele = Element("FromOs")
rs = Element("ReadStatus")
ws = Element("WriteStatus")
rs.text = "0x%lX" % (ReadStatus)
if(ReadErrorString is not None):
rs.text = rs.text + " %s" % ReadErrorString
ws.text = "0x%lX" % ErrorCode
if(WriteErrorString is not None):
ws.text = ws.text + " %s" % WriteErrorString
ele.append(rs)
ele.append(ws)
var.append(ele)
XmlFile.write(options.OutputXml)
return 0
if __name__ == '__main__':
#setup main console as logger
logger = logging.getLogger('')
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter("%(levelname)s - %(message)s")
console = logging.StreamHandler()
console.setLevel(logging.CRITICAL)
console.setFormatter(formatter)
logger.addHandler(console)
#call main worker function
retcode = main()
if retcode != 0:
logging.critical("Failed. Return Code: %i" % retcode)
#end logging
logging.shutdown()
sys.exit(retcode)
| [
"brbarkel@microsoft.com"
] | brbarkel@microsoft.com |
d81dbf76ee1a57f5b2cbec719ab8285289bca57e | a7926ba10e6c3717c27884eaacacfd02e86e0e7e | /0x04-python-more_data_structures/10-best_score.py | db9c1ecd229810b012783f95512468c924d4266a | [] | no_license | Yosri-ctrl/holbertonschool-higher_level_programming | e7516d33a49b7001eab1c33ca3ec236025a81fe5 | 4654b00f8ea7e8e013b131ffd4fc835de2f986fa | refs/heads/master | 2023-01-05T17:03:59.350837 | 2020-11-04T17:40:46 | 2020-11-04T17:40:46 | 259,329,715 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 309 | py | #!/usr/bin/python3
def best_score(a_dictionary):
max = 0
maxi = ""
if a_dictionary is None:
return None
for i, j in a_dictionary.items():
if max <= j:
max = j
maxi = i
if j is None:
max = None
maxi = None
return maxi
| [
"yosribouabid@gmail.com"
] | yosribouabid@gmail.com |
a08d5e055b20803571c91a862d0c579d8e4518f4 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2188/60634/257261.py | 03165fc942fd1f7a8effcb92592e622dbddad9a9 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 713 | py | def equal(s1,s2):
if len(s1) == len(s2):
size = len(s1)
i = 0
while i <= size:
if i == size:
return True
if s1[i] != s2[i]:
return False
i += 1
return False
temp = input().split(" ")
n = int(temp[0])
k = int(temp[1])
A = input()
B = input()
problems = int(input())
for p in range(problems):
temp = input().split(" ")
T = A[int(temp[0])-1:int(temp[1])]
P = B[int(temp[2])-1:int(temp[3])]
count = 0
i = 0
while i <= len(T) - len(P):
if equal(T[i:i+len(P)],P):
count += k - (i + int(temp[0]))
i += len(P) - 1
i += 1
print(count)
| [
"1069583789@qq.com"
] | 1069583789@qq.com |
1bbdce025b29c7652d4da8243f6310d769b29477 | a799a105ab2aba39a475bf2ce086405def0351c2 | /src/gluonts/transform/_base.py | 15545292fb649a4c9b0dd15bdcaea3571bfd2db0 | [
"Apache-2.0"
] | permissive | mbohlkeschneider/gluon-ts | d663750d13798624eca5c9d6f12a87e321ce7334 | df4256b0e67120db555c109a1bf6cfa2b3bd3cd8 | refs/heads/master | 2021-11-24T06:09:49.905352 | 2021-10-14T09:30:38 | 2021-10-14T09:30:38 | 192,546,557 | 54 | 10 | Apache-2.0 | 2022-08-31T18:36:44 | 2019-06-18T13:33:36 | Python | UTF-8 | Python | false | false | 6,305 | py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
import abc
from typing import Callable, Iterable, Iterator, List
from gluonts.core.component import validated
from gluonts.dataset.common import DataEntry, Dataset
from gluonts.env import env
class Transformation(metaclass=abc.ABCMeta):
"""
Base class for all Transformations.
A Transformation processes works on a stream (iterator) of dictionaries.
"""
@abc.abstractmethod
def __call__(
self, data_it: Iterable[DataEntry], is_train: bool
) -> Iterable[DataEntry]:
pass
def chain(self, other: "Transformation") -> "Chain":
return Chain([self, other])
def __add__(self, other: "Transformation") -> "Chain":
return self.chain(other)
def apply(
self, dataset: Dataset, is_train: bool = True
) -> "TransformedDataset":
return TransformedDataset(dataset, self, is_train=is_train)
class Chain(Transformation):
"""
Chain multiple transformations together.
"""
@validated()
def __init__(self, trans: List[Transformation]) -> None:
self.transformations: List[Transformation] = []
for transformation in trans:
# flatten chains
if isinstance(transformation, Chain):
self.transformations.extend(transformation.transformations)
else:
self.transformations.append(transformation)
def __call__(
self, data_it: Iterable[DataEntry], is_train: bool
) -> Iterable[DataEntry]:
tmp = data_it
for t in self.transformations:
tmp = t(tmp, is_train)
return tmp
class TransformedDataset(Dataset):
"""
A dataset that corresponds to applying a list of transformations to each
element in the base_dataset.
This only supports SimpleTransformations, which do the same thing at
prediction and training time.
Parameters
----------
base_dataset
Dataset to transform
transformations
List of transformations to apply
"""
def __init__(
self,
base_dataset: Dataset,
transformation: Transformation,
is_train=True,
) -> None:
self.base_dataset = base_dataset
self.transformation = transformation
self.is_train = is_train
def __len__(self):
# NOTE this is unsafe when transformations are run with is_train = True
# since some transformations may not be deterministic (instance splitter)
return sum(1 for _ in self)
def __iter__(self) -> Iterator[DataEntry]:
yield from self.transformation(
self.base_dataset, is_train=self.is_train
)
class Identity(Transformation):
def __call__(
self, data_it: Iterable[DataEntry], is_train: bool
) -> Iterable[DataEntry]:
return data_it
class MapTransformation(Transformation):
"""
Base class for Transformations that returns exactly one result per input in the stream.
"""
def __call__(
self, data_it: Iterable[DataEntry], is_train: bool
) -> Iterator:
for data_entry in data_it:
try:
yield self.map_transform(data_entry.copy(), is_train)
except Exception as e:
raise e
@abc.abstractmethod
def map_transform(self, data: DataEntry, is_train: bool) -> DataEntry:
pass
class SimpleTransformation(MapTransformation):
"""
Element wise transformations that are the same in train and test mode
"""
def map_transform(self, data: DataEntry, is_train: bool) -> DataEntry:
return self.transform(data)
@abc.abstractmethod
def transform(self, data: DataEntry) -> DataEntry:
pass
class AdhocTransform(SimpleTransformation):
"""
Applies a function as a transformation
This is called ad-hoc, because it is not serializable.
It is OK to use this for experiments and outside of a model pipeline that
needs to be serialized.
"""
def __init__(self, func: Callable[[DataEntry], DataEntry]) -> None:
self.func = func
def transform(self, data: DataEntry) -> DataEntry:
return self.func(data.copy())
class FlatMapTransformation(Transformation):
"""
Transformations that yield zero or more results per input, but do not
combine elements from the input stream.
"""
@validated()
def __init__(self):
self.max_idle_transforms = max(env.max_idle_transforms, 100)
def __call__(
self, data_it: Iterable[DataEntry], is_train: bool
) -> Iterator:
num_idle_transforms = 0
for data_entry in data_it:
num_idle_transforms += 1
for result in self.flatmap_transform(data_entry.copy(), is_train):
num_idle_transforms = 0
yield result
if num_idle_transforms > self.max_idle_transforms:
raise Exception(
f"Reached maximum number of idle transformation calls.\n"
f"This means the transformation looped over "
f"{self.max_idle_transforms} inputs without returning any "
f"output.\nThis occurred in the following transformation:\n"
f"{self}"
)
@abc.abstractmethod
def flatmap_transform(
self, data: DataEntry, is_train: bool
) -> Iterator[DataEntry]:
pass
class FilterTransformation(FlatMapTransformation):
def __init__(self, condition: Callable[[DataEntry], bool]) -> None:
super().__init__()
self.condition = condition
def flatmap_transform(
self, data: DataEntry, is_train: bool
) -> Iterator[DataEntry]:
if self.condition(data):
yield data
| [
"noreply@github.com"
] | mbohlkeschneider.noreply@github.com |
34e34b57a15f76966c9e27ff6ef2ed51cbc7481b | 441f0b4b4f2016ace7bed37431779b3352b9c2e4 | /Book Introdução Programação com Python/6 - Listas/06.06 - Calculo da media com notas digitadas.py | bb7d337bfbc147548494e910e77d646532c873ca | [] | no_license | AlexGalhardo/Learning-Python | 936b2eae814d148b0b3b77cc76cf81b45fbb4a02 | b710952101a0409f585ba975e2854bf0e0286ac7 | refs/heads/master | 2020-05-19T23:32:49.285710 | 2019-09-04T17:37:27 | 2019-09-04T17:37:27 | 134,312,273 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 868 | py | ##############################################################################
# Parte do livro Introdução à Programação com Python
# Autor: Nilo Ney Coutinho Menezes
# Editora Novatec (c) 2010-2014
# Primeira edição - Novembro/2010 - ISBN 978-85-7522-250-8
# Primeira reimpressão - Outubro/2011
# Segunda reimpressão - Novembro/1012
# Terceira reimpressão - Agosto/2013
# Segunda edição - Junho/2014 - ISBN 978-85-7522-408-3
# Site: http://python.nilo.pro.br/
#
# Arquivo: capitulo 06\06.06 - Calculo da media com notas digitadas.py
##############################################################################
notas = [0,0,0,0,0]
soma = 0
x = 0
while x < 5:
notas[x] = float(input("Nota %d:" % x))
soma += notas[x]
x += 1
x = 0
while x < 5:
print("Nota %d: %6.2f" % (x, notas[x]))
x += 1
print("Média: %5.2f" % (soma/x))
| [
"aleexgvieira@gmail.com"
] | aleexgvieira@gmail.com |
a44a3e6650365ff9d37092a330edb5c10091ad47 | a5688a923c488414ecffcb92e3405d3876f1889d | /examples/computer_vision/mmdetection_pytorch/configs/sabl/sabl_cascade_rcnn_r101_fpn_1x_coco.py | ab39c49a7335453756504a84235c99c4463cfda5 | [
"Apache-2.0"
] | permissive | armandmcqueen/determined | ae6e7a4d5d8c3fb6a404ed35519643cf33bd08e4 | 251e7093b60a92633b684586ac7a566379442f15 | refs/heads/master | 2023-05-28T17:52:18.915710 | 2021-06-09T23:55:59 | 2021-06-09T23:55:59 | 259,449,481 | 0 | 0 | Apache-2.0 | 2021-04-09T12:13:11 | 2020-04-27T20:47:23 | Go | UTF-8 | Python | false | false | 3,686 | py | _base_ = [
"../_base_/models/cascade_rcnn_r50_fpn.py",
"../_base_/datasets/coco_detection.py",
"../_base_/schedules/schedule_1x.py",
"../_base_/default_runtime.py",
]
# model settings
model = dict(
pretrained="torchvision://resnet101",
backbone=dict(depth=101),
roi_head=dict(
bbox_head=[
dict(
type="SABLHead",
num_classes=80,
cls_in_channels=256,
reg_in_channels=256,
roi_feat_size=7,
reg_feat_up_ratio=2,
reg_pre_kernel=3,
reg_post_kernel=3,
reg_pre_num=2,
reg_post_num=1,
cls_out_channels=1024,
reg_offset_out_channels=256,
reg_cls_out_channels=256,
num_cls_fcs=1,
num_reg_fcs=0,
reg_class_agnostic=True,
norm_cfg=None,
bbox_coder=dict(
type="BucketingBBoxCoder", num_buckets=14, scale_factor=1.7
),
loss_cls=dict(
type="CrossEntropyLoss", use_sigmoid=False, loss_weight=1.0
),
loss_bbox_cls=dict(
type="CrossEntropyLoss", use_sigmoid=True, loss_weight=1.0
),
loss_bbox_reg=dict(type="SmoothL1Loss", beta=0.1, loss_weight=1.0),
),
dict(
type="SABLHead",
num_classes=80,
cls_in_channels=256,
reg_in_channels=256,
roi_feat_size=7,
reg_feat_up_ratio=2,
reg_pre_kernel=3,
reg_post_kernel=3,
reg_pre_num=2,
reg_post_num=1,
cls_out_channels=1024,
reg_offset_out_channels=256,
reg_cls_out_channels=256,
num_cls_fcs=1,
num_reg_fcs=0,
reg_class_agnostic=True,
norm_cfg=None,
bbox_coder=dict(
type="BucketingBBoxCoder", num_buckets=14, scale_factor=1.5
),
loss_cls=dict(
type="CrossEntropyLoss", use_sigmoid=False, loss_weight=1.0
),
loss_bbox_cls=dict(
type="CrossEntropyLoss", use_sigmoid=True, loss_weight=1.0
),
loss_bbox_reg=dict(type="SmoothL1Loss", beta=0.1, loss_weight=1.0),
),
dict(
type="SABLHead",
num_classes=80,
cls_in_channels=256,
reg_in_channels=256,
roi_feat_size=7,
reg_feat_up_ratio=2,
reg_pre_kernel=3,
reg_post_kernel=3,
reg_pre_num=2,
reg_post_num=1,
cls_out_channels=1024,
reg_offset_out_channels=256,
reg_cls_out_channels=256,
num_cls_fcs=1,
num_reg_fcs=0,
reg_class_agnostic=True,
norm_cfg=None,
bbox_coder=dict(
type="BucketingBBoxCoder", num_buckets=14, scale_factor=1.3
),
loss_cls=dict(
type="CrossEntropyLoss", use_sigmoid=False, loss_weight=1.0
),
loss_bbox_cls=dict(
type="CrossEntropyLoss", use_sigmoid=True, loss_weight=1.0
),
loss_bbox_reg=dict(type="SmoothL1Loss", beta=0.1, loss_weight=1.0),
),
]
),
)
| [
"noreply@github.com"
] | armandmcqueen.noreply@github.com |
bfb7ae8370d6c159723df606e7ca1d00215c9bd5 | d1c427249d1161c1f4f848e1de23d95c03ae40a3 | /501_practitioner_rate_input_landing.py | ae9c1309902dd59363e1fbc687283a03b1e721c4 | [] | no_license | Sangee2610/pythonscripts_march1 | 94b80ab3b037793022d114d7cd3604d69ba82147 | 2fb224fc0753beb3d65d873f658cdae247425cf1 | refs/heads/master | 2020-04-26T05:03:00.998024 | 2019-03-01T15:07:46 | 2019-03-01T15:07:46 | 173,321,199 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,419 | py | import psycopg2
import config as cfg
conn = cfg.DATABASE_CONNECT
cur = conn.cursor()
import csv
cur.execute("""
DROP TABLE IF EXISTS Landing_Partitioner_Rate;
CREATE TABLE Landing_Partitioner_Rate(
Name text,
ContactKey text,
Contact text,
PayBand text,
Boundary text,
StartDate text,
EndDate text,
Cost text,
Datasource text,
Owner_ text
)
""")
input_file = '/home/baadmin/NCT_ETL/input_files/practitioner_rate.csv'
def data_cleaning_loading(filename):
new_filename = filename.replace(".csv", "_corrected.csv")
f = open(filename, encoding="ISO-8859-1")
g = open(new_filename, "w+", encoding="utf-8")
new_rows = []
changes = { ',' : ''}
for row in csv.reader(f, quotechar='"', delimiter=',',quoting=csv.QUOTE_ALL, skipinitialspace=True): # iterate over the rows in the file
new_row = row # at first, just copy the row
for key, value in changes.items(): # iterate over 'changes' dictionary
new_row = [ x.replace(key, value) for x in new_row ] # make the substitutions
new_rows.append(new_row) # add the modified rows
new_rows = new_rows[1:] #Remove header
for new_row in new_rows:
g.write(str(",".join(new_row)) + "\n")
g.close()
g = open(new_filename)
cur.copy_from(g, 'Landing_Partitioner_Rate', sep=",")
conn.commit()
g.close()
f.close()
data_cleaning_loading(input_file)
| [
"noreply@github.com"
] | Sangee2610.noreply@github.com |
8c46c93984d57edb4696caf91058e8aa7b2ff09e | ba4f99f24a0a13ff20a07d12adfcc4eba73b874a | /setup.py | 6d3e3b0b140952ba5b5052ee8976dfad14dea044 | [
"MIT"
] | permissive | chenwang12/whitebox-python | d65f7cb774218fe1776f939742d5f90d0cc23b39 | 23d88860332bfe498ac2830fb03b8fd5e4755906 | refs/heads/master | 2023-04-10T10:18:52.885997 | 2021-04-21T01:26:44 | 2021-04-21T01:26:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,419 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""The setup script."""
from setuptools import setup, find_packages
with open('README.rst') as readme_file:
readme = readme_file.read()
with open('HISTORY.rst') as history_file:
history = history_file.read()
requirements = ['Click>=6.0', ]
setup_requirements = [ ]
test_requirements = [ ]
setup(
author="Qiusheng Wu",
author_email='giswqs@gmail.com',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
description="An advanced geospatial data analysis platform ",
entry_points={
'console_scripts': [
'whitebox=whitebox.cli:main',
],
},
install_requires=requirements,
license="MIT license",
long_description=readme + '\n\n' + history,
include_package_data=True,
keywords='whitebox',
name='whitebox',
packages=find_packages(include=['whitebox']),
setup_requires=setup_requirements,
test_suite='tests',
tests_require=test_requirements,
url='https://github.com/giswqs/whitebox',
version='1.4.1',
zip_safe=False,
)
| [
"giswqs@gmail.com"
] | giswqs@gmail.com |
1224fa8663e7f8ddcd037f1d3789e5caf813d63e | c4b94158b0ac8f1c4f3d535b6cdee5d1639743ce | /Python/377__Combination_Sum_IV.py | 4435fd17e5f9fb491a78890b190d9d7d2eafecaa | [] | no_license | FIRESTROM/Leetcode | fc61ae5f11f9cb7a118ae7eac292e8b3e5d10e41 | 801beb43235872b2419a92b11c4eb05f7ea2adab | refs/heads/master | 2020-04-04T17:40:59.782318 | 2019-08-26T18:58:21 | 2019-08-26T18:58:21 | 156,130,665 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 557 | py | class Solution(object):
def combinationSum4(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: int
"""
self.dic = {}
def helper(s):
if s in self.dic:
return self.dic[s]
if s == 0:
return 1
result = 0
for val in nums:
if s - val >= 0:
result += helper(s - val)
self.dic[s] = result
return result
return helper(target)
| [
"junou_cui@berkeley.edu"
] | junou_cui@berkeley.edu |
5359c90102abe3c637b3160593118e340ddb6395 | 91d1a6968b90d9d461e9a2ece12b465486e3ccc2 | /outposts_write_1/outpost_create.py | 352d0b4588fd7900394d3db6690e24f3bc11c552 | [] | no_license | lxtxl/aws_cli | c31fc994c9a4296d6bac851e680d5adbf7e93481 | aaf35df1b7509abf5601d3f09ff1fece482facda | refs/heads/master | 2023-02-06T09:00:33.088379 | 2020-12-27T13:38:45 | 2020-12-27T13:38:45 | 318,686,394 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,129 | py | #!/usr/bin/python
# -*- codding: utf-8 -*-
import os
import sys
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
from common.execute_command import write_one_parameter
# url : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/outposts/create-outpost.html
if __name__ == '__main__':
"""
delete-outpost : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/outposts/delete-outpost.html
get-outpost : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/outposts/get-outpost.html
list-outposts : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/outposts/list-outposts.html
"""
parameter_display_string = """
# site-id : The ID of the site.
"""
add_option_dict = {}
#######################################################################
# parameter display string
add_option_dict["parameter_display_string"] = parameter_display_string
# ex: add_option_dict["no_value_parameter_list"] = "--single-parameter"
write_one_parameter("outposts", "create-outpost", "site-id", add_option_dict)
| [
"hcseo77@gmail.com"
] | hcseo77@gmail.com |
8289c203d520271f1af4b250c4cd28f9e98a894d | d22f8cd1a834f706d2c0cd77a814414cb4650265 | /data/data/models/structures/character/limits.py | f053282a268cf6c670541a5371fa985bd90c5b0e | [
"MIT"
] | permissive | teris1994/L2py | 9e7535935f58d729453f39bee998f21240b85e8b | 07cc5d7c5d52ac4179378b29ef4873b11f6daa0c | refs/heads/master | 2023-09-01T06:21:10.625029 | 2021-10-24T12:48:18 | 2021-10-24T13:21:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 268 | py | from dataclasses import dataclass, field
from common.dataclass import BaseDataclass
@dataclass
class Limits(BaseDataclass):
inventory: Int32
warehouse: Int32
freight: Int32
sell: Int32
buy: Int32
dwarf_recipe: Int32
common_recipe: Int32
| [
"yurzs@icloud.com"
] | yurzs@icloud.com |
ae0fde02f81e4d5fc05921176c8b982fddf3e2d4 | 7e69ef0295a00d413b79b6c7646ca837e8dcb4fa | /conductor/feeds.py | 27b3f2c14076785163645c18ee493a2b71aefb7d | [
"MIT"
] | permissive | random-labs/conductor | 1d2ac1e2bb4a7d4833636f7a7a8bea432126191c | 547dfb2c2b36c16d828fcaaf11db49d3bdece527 | refs/heads/master | 2021-06-24T21:43:31.372914 | 2017-09-08T20:43:11 | 2017-09-08T20:43:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,165 | py | import time
import traceback
from steem import Steem
from .config import witness
from .markets import Markets
from .utils import unlock_steempy_wallet
steem = Steem()
settings = {
"sleep_time_seconds": 10 * 60,
"minimum_spread_pct": 2.0,
}
def get_last_published_price(witness_name):
my_info = steem.get_witness_by_account(witness_name)
price = 0
if float(my_info["sbd_exchange_rate"]["quote"].split()[0]) != 0:
price = float(my_info["sbd_exchange_rate"]["base"].split()[0]) / float(
my_info["sbd_exchange_rate"]["quote"].split()[0])
return price
def refresh_price_feeds(witness_name, support_peg=False):
print(time.ctime())
markets = Markets(cache_timeout=30)
# old prices
old_adj_price = get_last_published_price(witness_name)
print("Old Price: " + format(old_adj_price, ".3f"))
# new prices
steem_usd = markets.steem_usd_implied()
sbd_usd = markets.sbd_usd_implied()
quote = round(1 / sbd_usd, 3) if support_peg else "1.000"
quote_adj_current_price = round(steem_usd / float(quote), 3)
print('New Price: %s' % quote_adj_current_price)
print('\nCurrent STEEM price: %.3f USD' % steem_usd)
print('Current SBD price: %.3f USD' % sbd_usd)
print('Quote: %s STEEM' % quote)
# publish new price is spread widens
spread = abs(markets.calc_spread(old_adj_price, quote_adj_current_price))
print("\nSpread between prices: %.3f%%" % spread)
if spread > 25:
print("Possibly invalid spread (%.2f%%), ignoring..." % spread)
elif spread > settings['minimum_spread_pct']:
steem.commit.witness_feed_publish(steem_usd, quote=quote, account=witness_name)
print("Updated the witness price feed.")
print('\n\n')
def run_price_feeds(**kwargs):
unlock_steempy_wallet()
while True:
try:
refresh_price_feeds(witness('name'), **kwargs)
time.sleep(settings['sleep_time_seconds'])
except KeyboardInterrupt:
print('Quitting...')
return
except:
print(traceback.format_exc())
time.sleep(10)
if __name__ == '__main__':
pass
| [
"_@furion.me"
] | _@furion.me |
713c2b2c5286ea101228bc49bbc219e3083f413c | e2d22f12f8e540a80d31de9debe775d35c3c5c22 | /blousebrothers/confs/migrations/0027_auto_20161209_1619.py | b224d425b525fa31c181a087f8fe4458290b4a7d | [
"MIT"
] | permissive | sladinji/blousebrothers | 360c3b78ec43379977dbf470e5721e6a695b2354 | 461de3ba011c0aaed3f0014136c4497b6890d086 | refs/heads/master | 2022-12-20T10:24:07.631454 | 2019-06-13T13:17:35 | 2019-06-13T13:17:35 | 66,867,705 | 1 | 0 | NOASSERTION | 2022-12-19T18:15:44 | 2016-08-29T18:04:33 | Python | UTF-8 | Python | false | false | 2,510 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-12-09 16:19
from __future__ import unicode_literals
import blousebrothers.confs.models
from decimal import Decimal
from django.db import migrations, models
import django.db.models.deletion
import image_cropping.fields
class Migration(migrations.Migration):
dependencies = [
('confs', '0026_create_test_for_conferenciers'),
]
operations = [
migrations.CreateModel(
name='AnswerImage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', image_cropping.fields.ImageCropField(max_length=255, upload_to=blousebrothers.confs.models.answer_image_directory_path, verbose_name='Image')),
('cropping', image_cropping.fields.ImageRatioField('image', '430x360', adapt_rotation=False, allow_fullsize=False, free_crop=True, help_text=None, hide_image_field=False, size_warning=False, verbose_name='cropping')),
('date_created', models.DateTimeField(auto_now_add=True, verbose_name='Date created')),
('caption', models.CharField(blank=True, max_length=200, verbose_name='Libellé')),
('index', models.PositiveIntegerField(default=0, verbose_name='Ordre')),
],
),
migrations.AddField(
model_name='question',
name='explaination',
field=models.TextField(blank=True, null=True, verbose_name='Remarque globale pour la correction'),
),
migrations.AlterField(
model_name='answer',
name='explaination_image',
field=image_cropping.fields.ImageCropField(blank=True, max_length=255, null=True, upload_to=blousebrothers.confs.models.answer_image_directory_path, verbose_name='Image'),
),
migrations.AlterField(
model_name='conference',
name='price',
field=models.DecimalField(decimal_places=2, default=Decimal('0.5'), help_text='', max_digits=6, verbose_name='Prix de vente'),
),
migrations.AlterField(
model_name='question',
name='question',
field=models.TextField(verbose_name='Enoncé'),
),
migrations.AddField(
model_name='answerimage',
name='answer',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='images', to='confs.Answer'),
),
]
| [
"julien.almarcha@gmail.com"
] | julien.almarcha@gmail.com |
6c2f8aa083e3f008c65511b61390b8d865a33b09 | 14373275670c1f3065ce9ae195df142146e2c1a4 | /stubs/openpyxl/openpyxl/utils/units.pyi | 6264bc8263e8765abc985e5b35777a99076350cf | [
"Apache-2.0",
"MIT"
] | permissive | sobolevn/typeshed | eb7af17c06a9722f23c337e6b9a4726223155d58 | d63a82640390a9c130e0fe7d409e8b0b836b7c31 | refs/heads/master | 2023-08-04T05:59:29.447015 | 2023-06-14T21:27:53 | 2023-06-14T21:27:53 | 216,265,622 | 2 | 0 | Apache-2.0 | 2022-02-08T10:40:53 | 2019-10-19T20:21:25 | Python | UTF-8 | Python | false | false | 692 | pyi | from typing_extensions import Final
DEFAULT_ROW_HEIGHT: Final[float]
BASE_COL_WIDTH: Final = 8
DEFAULT_COLUMN_WIDTH: Final = 13
DEFAULT_LEFT_MARGIN: Final[float]
DEFAULT_TOP_MARGIN: Final[float]
DEFAULT_HEADER: Final[float]
def inch_to_dxa(value): ...
def dxa_to_inch(value): ...
def dxa_to_cm(value): ...
def cm_to_dxa(value): ...
def pixels_to_EMU(value): ...
def EMU_to_pixels(value): ...
def cm_to_EMU(value): ...
def EMU_to_cm(value): ...
def inch_to_EMU(value): ...
def EMU_to_inch(value): ...
def pixels_to_points(value, dpi: int = 96): ...
def points_to_pixels(value, dpi: int = 96): ...
def degrees_to_angle(value): ...
def angle_to_degrees(value): ...
def short_color(color): ...
| [
"noreply@github.com"
] | sobolevn.noreply@github.com |
4d59d2813350d10612df8936d180ef40f296eed9 | 781e2692049e87a4256320c76e82a19be257a05d | /all_data/exercism_data/python/bob/f56387ebf1894a488924586759d551df.py | 76000a8d0392cc004c9488d8f5416a627db04e28 | [] | no_license | itsolutionscorp/AutoStyle-Clustering | 54bde86fe6dbad35b568b38cfcb14c5ffaab51b0 | be0e2f635a7558f56c61bc0b36c6146b01d1e6e6 | refs/heads/master | 2020-12-11T07:27:19.291038 | 2016-03-16T03:18:00 | 2016-03-16T03:18:42 | 59,454,921 | 4 | 0 | null | 2016-05-23T05:40:56 | 2016-05-23T05:40:56 | null | UTF-8 | Python | false | false | 724 | py | from string import punctuation
whiteSpace = " \t\n"
numbers = "1234567890"
def isCapsLock(stuff):
strippedStuff = ""
for i in stuff:
if i.isalpha():
strippedStuff += i
if strippedStuff == "":
return False
elif strippedStuff.upper() == strippedStuff:
return True
else:
return False
def isNotEnglish(stuff):
toReturn = True
for i in stuff:
if i.isalpha() == False and i.isspace() == False and i in punctuation == False and i in numbers == False:
toReturn = False
return toReturn
def hey(stuff):
if stuff == "" or stuff.isspace():
return "Fine. Be that way!"
elif isCapsLock(stuff):
return "Whoa, chill out!"
elif stuff[(len(stuff)-1)] == "?":
return "Sure."
else:
return "Whatever."
| [
"rrc@berkeley.edu"
] | rrc@berkeley.edu |
7ff052bddfeed588e5781b786468c13fa0e360ff | 41e2cf24f0ff3a11a98bb00e03c598dde35452c4 | /reportview/migrations/0020_auto_20180802_1306.py | dedffeb085b6e16e63c3bef27d3101cbb3aed9dd | [] | no_license | anushamokashi/mob | f5dbedc729073092f94323feca6d95dee24087a2 | 37bc0eb033bc23d37e9d4fb9bb8b2b456553ff7f | refs/heads/master | 2020-04-24T08:36:56.008212 | 2019-02-21T09:09:04 | 2019-02-21T09:09:04 | 171,810,613 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 689 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.13 on 2018-08-02 07:36
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('reportview', '0019_auto_20180802_1246'),
]
operations = [
migrations.RemoveField(
model_name='reportprintformataction',
name='action_type',
),
migrations.RemoveField(
model_name='reportprintformataction',
name='htmlfile',
),
migrations.RemoveField(
model_name='reportprintformataction',
name='iconcls',
),
]
| [
"anusha.mokashi@gmail.com"
] | anusha.mokashi@gmail.com |
79b9eedd6f17c01c7de1fa837f1e31fcb1e6ac50 | 03c9cd5bd96874d6117fb17c37ac4d7450c15933 | /django-tutorial/chapter04/orm_field_demo/article/migrations/0005_person_signature.py | 872309e8204b1b99c5fd56c8430551defb6db66c | [] | no_license | atiger808/opencv-tutorial | 603de35e97679d6beae104298ae355edfdd9036a | 2ea9bb3818284fb75f85697e36fde37b6479d1c6 | refs/heads/master | 2020-05-29T23:16:30.462022 | 2019-11-05T10:08:20 | 2019-11-05T10:08:20 | 189,425,004 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 418 | py | # Generated by Django 2.0 on 2019-05-14 09:00
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('article', '0004_auto_20190514_1622'),
]
operations = [
migrations.AddField(
model_name='person',
name='signature',
field=models.TextField(default=''),
preserve_default=False,
),
]
| [
"atiger0614@163.com"
] | atiger0614@163.com |
a3a7782413ebcec1c92fcaa6dae3cb78c21a3113 | 135f967e9dbbc681e031b9b0adbd85a5dbe43649 | /reveal_graph_embedding/embedding/laplacian.py | 8f6b4e431bdb3bf0c320838624481b8e2ee028ab | [
"Apache-2.0"
] | permissive | gm0907/reveal-graph-embedding | 9d1c9501c542b2f473c73b22c3cc6373910ec8ef | 72d4af794536f97b8ede06c0f27f261ea85d8c4b | refs/heads/master | 2021-02-26T23:42:09.514887 | 2019-07-08T11:12:43 | 2019-07-08T11:12:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,602 | py | __author__ = 'Georgios Rizos (georgerizos@iti.gr)'
import numpy as np
import scipy.sparse as spsp
from reveal_graph_embedding.embedding.implicit import get_implicit_combinatorial_adjacency_matrix,\
get_implicit_directed_adjacency_matrix
def get_unnormalized_laplacian(adjacency_matrix):
# Calculate diagonal matrix of node degrees.
degree = spsp.dia_matrix((adjacency_matrix.sum(axis=0), np.array([0])), shape=adjacency_matrix.shape)
degree = degree.tocsr()
# Calculate sparse graph Laplacian.
laplacian = spsp.csr_matrix(-adjacency_matrix + degree, dtype=np.float64)
return laplacian
def get_normalized_laplacian(adjacency_matrix):
# Calculate diagonal matrix of node degrees.
degree = spsp.dia_matrix((adjacency_matrix.sum(axis=0), np.array([0])), shape=adjacency_matrix.shape)
degree = degree.tocsr()
# Calculate sparse graph Laplacian.
adjacency_matrix = spsp.csr_matrix(-adjacency_matrix + degree, dtype=np.float64)
# Calculate inverse square root of diagonal matrix of node degrees.
degree.data = np.real(1/np.sqrt(degree.data))
# Calculate sparse normalized graph Laplacian.
normalized_laplacian = degree*adjacency_matrix*degree
return normalized_laplacian
def get_random_walk_laplacian(adjacency_matrix):
# Calculate diagonal matrix of node degrees.
degree = spsp.dia_matrix((adjacency_matrix.sum(axis=0), np.array([0])), shape=adjacency_matrix.shape)
degree = degree.tocsr()
# Calculate sparse graph Laplacian.
adjacency_matrix = spsp.csr_matrix(-adjacency_matrix + degree, dtype=np.float64)
# Calculate inverse of diagonal matrix of node degrees.
degree.data = np.real(1/degree.data)
# Calculate sparse normalized graph Laplacian.
random_walk_laplacian = degree*adjacency_matrix
return random_walk_laplacian
"""
def get_directed_laplacian(adjacency_matrix, rho=0.2):
number_of_nodes = adjacency_matrix.shape[0]
effective_adjacency_matrix, rw_distribution = get_implicit_directed_adjacency_matrix(adjacency_matrix, rho)
I = spsp.spdiags(rw_distribution, [0], number_of_nodes, number_of_nodes)
theta_matrix = I - effective_adjacency_matrix
return theta_matrix
def get_combinatorial_laplacian(adjacency_matrix, rho=0.2):
number_of_nodes = adjacency_matrix.shape[0]
effective_adjacency_matrix, rw_distribution = get_implicit_combinatorial_adjacency_matrix(adjacency_matrix, rho)
I = spsp.spdiags(rw_distribution, [0], number_of_nodes, number_of_nodes)
theta_matrix = I - effective_adjacency_matrix
return theta_matrix
""" | [
"georgevrizos@gmail.com"
] | georgevrizos@gmail.com |
c76711de951568a3ad9478239d5c98cbf869d606 | 16d32837fe02613774e64c4b19a3fba20de60d3d | /pebble_tool/version.py | 54b0d2159865cd281768546eeee92d416b9eeda7 | [
"MIT"
] | permissive | bboehmke/pebble-tool | c42558d696c9bceed7f283ef1a1f98b2f5e3d1bd | 64caa870714042df601288463272e17e4f4165b4 | refs/heads/master | 2021-01-17T21:22:11.337174 | 2015-12-15T06:43:24 | 2015-12-15T06:43:24 | 48,221,683 | 0 | 0 | null | 2015-12-18T07:45:53 | 2015-12-18T07:45:53 | null | UTF-8 | Python | false | false | 372 | py | version_base = (4, 0, 0)
version_suffix = None
if version_suffix is None:
__version_info__ = version_base
else:
__version_info__ = version_base + (version_suffix,)
__version__ = '{}.{}'.format(*version_base)
if version_base[2] != 0:
__version__ += '.{}'.format(version_base[3])
if version_suffix is not None:
__version__ += '-{}'.format(version_suffix)
| [
"katharine@getpebble.com"
] | katharine@getpebble.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.