blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
281
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 6
116
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 313
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 18.2k
668M
⌀ | star_events_count
int64 0
102k
| fork_events_count
int64 0
38.2k
| gha_license_id
stringclasses 17
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 107
values | src_encoding
stringclasses 20
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.02M
| extension
stringclasses 78
values | content
stringlengths 2
6.02M
| authors
listlengths 1
1
| author
stringlengths 0
175
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c5517dd7c08f39a902510e7f3cd4029fc8a11a87
|
900e3a70c3264d43a8a478143ec0443b7afc9083
|
/helpers.py
|
d497e49a30802ccb27deb6bfe120b39e0b050c7d
|
[] |
no_license
|
James171/nfl_team_comparison_app
|
285fa5b48f36fda67ecf469cb0ca092e458568c3
|
1428cca60faefca64e111d2e072e6d3c2924a5cc
|
refs/heads/master
| 2021-02-19T01:49:11.382554
| 2020-05-06T19:23:59
| 2020-05-06T19:23:59
| 245,265,062
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,325
|
py
|
import os
import requests
import urllib.parse
from flask import redirect, render_template, request, session
from functools import wraps
# Hide API Key from viewers
api_key = os.environ['API_KEY']
# Lookup function to to get hierarchy api information and team ID
def lookup(symbol):
# Contact API
try:
response = requests.get(f"http://api.sportradar.us/nfl/official/trial/v5/en/league/hierarchy.json?api_key={api_key}")
# print(response.headers)
response.raise_for_status()
except requests.RequestException:
return None
# Parse response
try:
data = response.json()
return data
except (KeyError, TypeError, ValueError):
return None
def lookup_stats(team_id):
# Contact API
try:
response = requests.get(f"http://api.sportradar.us/nfl/official/trial/v5/en/seasons/2019/REG/teams/{team_id}/statistics.json?api_key={api_key}")
# print(response.headers)
print(response)
response.raise_for_status()
except requests.RequestException:
return None
# Parse response
try:
data_stats = response.json()
return data_stats
except (KeyError, TypeError, ValueError):
return None
def usd(value):
"""Format value as USD."""
return f"${value:,.2f}"
|
[
"26070680+James171@users.noreply.github.com"
] |
26070680+James171@users.noreply.github.com
|
ee6f7db289489973bebf80ffdf3c8346f71732cc
|
6d5c5fbbefb6b6dfc398adfc054f09cb95fbdcaf
|
/rh_pathfinding/src/rh_pathfinding/utils/minheap.py
|
e8cbf2a4ee3c61c7adc7c8c6290a9deff3e0d774
|
[
"MIT"
] |
permissive
|
Wuxinxiaoshifu/rh_ros
|
46a8d75d10317aef04c57785653a6b00ddcb2b63
|
e13077060bdfcc231adee9731ebfddadcd8d6b4a
|
refs/heads/master
| 2022-04-11T15:12:38.539565
| 2020-04-09T19:51:46
| 2020-04-09T19:51:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,479
|
py
|
import heapq
class MinHeap:
"""
What you'd expect. There is probably a better one, but this is simple and meets our needs.
"""
def __init__(self):
self._heapList = []
self._inc = 0
def isEmpty(self):
return len(self._heapList) == 0
def __len__(self):
return len(self._heapList)
def push(self, priority, data):
# This is given as a 2nd argument, after priority, to break ties. Unique incrementing value
heapq.heappush(self._heapList, (priority, self._inc, data))
self._inc += 1
def getTop(self):
(priority, dontCare, data) = self._heapList[0]
return data
def pop(self):
(cost, dontCare, data) = heapq.heappop(self._heapList)
return data
def getTopWithPriority(self):
(priority, dontCare, data) = self._heapList[0]
return (priority, data)
def popWithPriority(self):
(priority, dontCare, data) = heapq.heappop(self._heapList)
return (priority, data)
def __iter__(self):
return _heapIter(self)
def __getitem__(self, index):
return self._heapList[index]
class _heapIter:
def __init__(self, heap):
self._heap = heap
self._index = 0
def next(self):
if self._index < len(self._heap):
(priority, dontCare, data) = self._heap[self._index]
self._index += 1
return data
else:
raise StopIteration()
|
[
"stephenpheiffer1@gmail.com"
] |
stephenpheiffer1@gmail.com
|
e18cf06b5b2dc63197823de50c35c4e582ab59e9
|
772be6c5f04273b8e53485128e625d89fa13cb18
|
/algorithms/warmup/Diagonal-difference.py
|
1db5412cd07a5a314f7e4b36645f32ab834dfafc
|
[] |
no_license
|
anandvimal/hackerrank
|
4f714c44756b65886ff56c3c42ed8bc03477f436
|
148221da61f03859035009d307e2955ecf760fe2
|
refs/heads/master
| 2021-01-20T20:08:04.392126
| 2016-06-19T22:58:24
| 2016-06-19T22:58:24
| 59,915,520
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 371
|
py
|
import sys
n = int(raw_input().strip())
a = []
for a_i in xrange(n):
a_temp = map(int,raw_input().strip().split(' '))
a.append(a_temp)
x=0
sum1 = 0
sum2 = 0
for i in a:
d1 = i[x]
d2 = i[len(i)-1-x]
#print d1
#print d2
#print " "
sum1 = d1 + sum1
sum2 = d2 + sum2
x+=1
#print sum1
#print sum2
#print " "
print abs(sum1-sum2)
|
[
"colirsweb@gmail.com"
] |
colirsweb@gmail.com
|
b484e91c0ba83b8e1b68afae32e927f0094f3207
|
cb025bce485f2900c588dc6853b58aaba2db8e85
|
/venv/bin/pip2.7
|
6bec2a6dd4dfc14afa4c65d9de1459e799c1d0cc
|
[] |
no_license
|
jovannovkovic/athon
|
04fc0fd0729133e504e30a03e721e8c415a4b803
|
2059160a58dfebdf891097dd5b3bc4bb50daf5bc
|
refs/heads/master
| 2020-06-06T04:21:23.494297
| 2015-04-06T10:05:42
| 2015-04-06T10:05:42
| 30,805,852
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 233
|
7
|
#!/home/popara/Desktop/backend/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from pip import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"popara@gmail.com"
] |
popara@gmail.com
|
6bedde275f4ca48faeadfd4bab076e488b4228ea
|
cdb05cd3d2e90320bdb8469d7a0a3731457afbf6
|
/base/products.py
|
40365ad96ff37abac239c249d1e2016976a4d115
|
[] |
no_license
|
lastdefiance20/Campirit_Website_Example
|
c80a9d06e15b2c855a512c869d4c1ed30a6f835c
|
ba4d80057d77a7b6484eb5c7e88caf3553035d5c
|
refs/heads/master
| 2023-07-15T12:07:40.483139
| 2021-08-31T23:50:44
| 2021-08-31T23:50:44
| 387,822,800
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,575
|
py
|
products = [
{
'_id': '1',
'name': 'Airpods Wireless Bluetooth Headphones',
'image': '/images/airpods.jpg',
'description':
'Bluetooth technology lets you connect it with compatible devices wirelessly High-quality AAC audio offers immersive listening experience Built-in microphone allows you to take calls while working',
'brand': 'Apple',
'category': 'Electronics',
'price': 89.99,
'countInStock': 10,
'rating': 4.5,
'numReviews': 12,
},
{
'_id': '2',
'name': 'iPhone 11 Pro 256GB Memory',
'image': '/images/phone.jpg',
'description':
'Introducing the iPhone 11 Pro. A transformative triple-camera system that adds tons of capability without complexity. An unprecedented leap in battery life',
'brand': 'Apple',
'category': 'Electronics',
'price': 599.99,
'countInStock': 7,
'rating': 4.0,
'numReviews': 8,
},
{
'_id': '3',
'name': 'Cannon EOS 80D DSLR Camera',
'image': '/images/camera.jpg',
'description':
'Characterized by versatile imaging specs, the Canon EOS 80D further clarifies itself using a pair of robust focusing systems and an intuitive design',
'brand': 'Cannon',
'category': 'Electronics',
'price': 929.99,
'countInStock': 5,
'rating': 3,
'numReviews': 12,
},
{
'_id': '4',
'name': 'Sony Playstation 4 Pro White Version',
'image': '/images/playstation.jpg',
'description':
'The ultimate home entertainment center starts with PlayStation. Whether you are into gaming, HD movies, television, music',
'brand': 'Sony',
'category': 'Electronics',
'price': 399.99,
'countInStock': 11,
'rating': 5,
'numReviews': 12,
},
{
'_id': '5',
'name': 'Logitech G-Series Gaming Mouse',
'image': '/images/mouse.jpg',
'description':
'Get a better handle on your games with this Logitech LIGHTSYNC gaming mouse. The six programmable buttons allow customization for a smooth playing experience',
'brand': 'Logitech',
'category': 'Electronics',
'price': 49.99,
'countInStock': 7,
'rating': 3.5,
'numReviews': 10,
},
{
'_id': '6',
'name': 'Amazon Echo Dot 3rd Generation',
'image': '/images/alexa.jpg',
'description':
'Meet Echo Dot - Our most popular smart speaker with a fabric design. It is our most compact smart speaker that fits perfectly into small space',
'brand': 'Amazon',
'category': 'Electronics',
'price': 29.99,
'countInStock': 0,
'rating': 4,
'numReviews': 12,
},
]
|
[
"lastdefiance20@gmail.com"
] |
lastdefiance20@gmail.com
|
352561a6b78afefc483b5c4fb0eb6fdc50bbde25
|
dab17ec8e693ed85e5e88c8da2c8557b100fe479
|
/bak/plot_matrix.py
|
af51422e4752ccf9822d290479e797d8b81a0bdf
|
[] |
no_license
|
chenm2015/homework-alarm
|
de83b384dce4e9321b101138a055156ca13d2660
|
9524833aa07f253980cf004b9b4d58973d2c9fc0
|
refs/heads/master
| 2020-12-14T14:08:34.713901
| 2016-05-04T06:21:31
| 2016-05-04T06:21:31
| 20,043,395
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,919
|
py
|
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.dates as mpldates
import subprocess
import ast
from matplotlib.dates import HourLocator, DayLocator
from matplotlib.patches import Ellipse, Rectangle
from matplotlib import cm
from matplotlib import ticker
from matplotlib.colors import LogNorm
from cStringIO import StringIO
def plot_matrix(floc, saveloc):
p = subprocess.Popen(['zcat', floc],stdout=subprocess.PIPE)
f = StringIO(p.communicate()[0])
assert p.returncode == 0
lists = list()
dvs = list()
high = 0
for line in f:
line = line.rstrip('\n')
if line == '':
continue
data = line.split(':')[1]
data = ast.literal_eval(data)
count = 0
for d in data:
if d > 0:
count += 1
if count > 2: # we do not plot dv that is too small
dvs.append(count)
lists.append(data)
now_max = max(data)
if now_max > high:
high = now_max
f.close()
lists = [x for (y,x) in sorted(zip(dvs, lists))]
pfx_quantity = len(lists)
fig = plt.figure(figsize=(10,16))
ax = fig.add_subplot(111)
cax = ax.imshow(lists, interpolation='nearest', aspect='auto', cmap=cm.jet, norm=LogNorm(1,high))
lvls = np.logspace(0, 3, 10)
cbar = fig.colorbar(cax, ticks=lvls)
yticklist = []
count = 0
while True:
if pfx_quantity/200 >= 1:
count += 1
yticklist.append(count*200)
pfx_quantity -= 200
else:
#yticklist.append(count*200+pfx_quantity)
break
plt.yticks(yticklist)
plt.savefig(saveloc, bbox_inches='tight')
plt.clf() # clear the figure
plt.close()
#if __name__ == '__main__':
# plot_matrix('/media/usb/output/20141130_20141201/1417420800.txt.gz')
|
[
"cenmong@gmail.com"
] |
cenmong@gmail.com
|
366ea396e642b9019bb557961a0b903fbe4531e5
|
3ae01569e10db9e4b3036a6e877ef24a3f056179
|
/website/openeuler_release_refresher/obs/util.py
|
1b8b9459e04a65dd9c973feeb9ee6743a3f3af57
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
openeuler-mirror/infrastructure
|
f3bc92bdc8af9285a549045a6180593f7d9e16fe
|
d1020ab3fe7535f75da9e8b24e6221f103422ac0
|
refs/heads/master
| 2023-09-01T18:08:00.704355
| 2023-08-26T18:00:04
| 2023-08-26T18:00:04
| 246,005,363
| 0
| 2
|
Apache-2.0
| 2023-05-23T05:51:28
| 2020-03-09T10:25:26
|
Python
|
UTF-8
|
Python
| false
| false
| 12,786
|
py
|
#!/usr/bin/python
# -*- coding:utf-8 -*-
# Copyright 2019 Huawei Technologies Co.,Ltd.
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use
# this file except in compliance with the License. You may obtain a copy of the
# License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
import re
import base64
import hashlib
import os
from obs import const
from obs import progress
if const.IS_PYTHON2:
import urllib
else:
import urllib.parse as urllib
from obs.ilog import INFO, ERROR
def to_bool(item):
try:
return True if item is not None and str(item).lower() == 'true' else False
except Exception:
return None
def to_int(item):
try:
return int(item)
except Exception:
return None
def to_long(item):
try:
return const.LONG(item)
except Exception:
return None
def to_float(item):
try:
return float(item)
except Exception:
return None
def to_string(item):
try:
return str(item) if item is not None else ''
except Exception:
return ''
def is_valid(item):
return item is not None and item.strip() != ''
class RequestFormat(object):
@staticmethod
def get_pathformat():
return PathFormat()
@staticmethod
def get_subdomainformat():
return SubdomainFormat()
@classmethod
def convert_path_string(cls, path_args, allowdNames=None):
e = ''
if isinstance(path_args, dict):
e1 = '?'
e2 = '&'
for path_key, path_value in path_args.items():
flag = True
if allowdNames is not None and path_key not in allowdNames:
flag = False
if flag:
path_key = encode_item(path_key, '/')
if path_value is None:
e1 += path_key + '&'
continue
e2 += path_key + '=' + encode_item(path_value, '/') + '&'
e = (e1 + e2).replace('&&', '&').replace('?&', '?')[:-1]
return e
def get_endpoint(self, server, port, bucket):
return
def get_pathbase(self, bucket, key):
return
def get_url(self, bucket, key, path_args):
return
class PathFormat(RequestFormat):
def get_server(self, server, bucket):
return server
def get_pathbase(self, bucket, key):
if bucket:
return '/' + bucket + '/' + encode_object_key(key) if key else '/' + bucket
return '/' + encode_object_key(key) if key else '/'
def get_endpoint(self, server, port, bucket):
if port == 80 or port == 443:
return server
return server + ':' + str(port)
def get_url(self, bucket, key, path_args):
path_base = self.get_pathbase(bucket, key)
path_arguments = self.convert_path_string(path_args)
return path_base + path_arguments
def get_full_url(self, is_secure, server, port, bucket, key, path_args):
url = 'https://' if is_secure else 'http://'
url += self.get_endpoint(server, port, bucket)
url += self.get_url(bucket, key, path_args)
return url
class SubdomainFormat(RequestFormat):
def get_server(self, server, bucket):
return bucket + '.' + server if bucket else server
def get_pathbase(self, bucket, key):
if key is None:
return '/'
return '/' + encode_object_key(key)
def get_endpoint(self, server, port, bucket):
if port == 80 or port == 443:
return self.get_server(server, bucket)
return self.get_server(server, bucket) + ':' + str(port)
def get_url(self, bucket, key, path_args):
url = self.convert_path_string(path_args)
return self.get_pathbase(bucket, key) + url
def get_full_url(self, is_secure, server, port, bucket, key, path_args):
url = 'https://' if is_secure else 'http://'
url += self.get_endpoint(server, port, bucket)
url += self.get_url(bucket, key, path_args)
return url
class delegate(object):
def __init__(self, conn):
self.conn = conn
def send(self, data, final=False, stream_id=None):
self.conn.send(data)
def conn_delegate(conn):
return delegate(conn)
def get_readable_entity(readable, chunk_size=65536, notifier=None, auto_close=True):
if notifier is None:
notifier = progress.NONE_NOTIFIER
def entity(conn):
try:
while True:
chunk = readable.read(chunk_size)
if not chunk:
conn.send('0\r\n\r\n' if const.IS_PYTHON2 else '0\r\n\r\n'.encode('UTF-8'), final=True)
break
newReadCount = len(chunk)
if newReadCount > 0:
notifier.send(newReadCount)
hex_chunk = hex(len(chunk))[2:]
conn.send(hex_chunk if const.IS_PYTHON2 else hex_chunk.encode('UTF-8'))
conn.send('\r\n' if const.IS_PYTHON2 else '\r\n'.encode('UTF-8'))
conn.send(chunk)
conn.send('\r\n' if const.IS_PYTHON2 else '\r\n'.encode('UTF-8'))
finally:
if hasattr(readable, 'close') and callable(readable.close) and auto_close:
readable.close()
return entity
def get_readable_entity_by_totalcount(readable, totalCount, chunk_size=65536, notifier=None, auto_close=True):
if notifier is None:
notifier = progress.NONE_NOTIFIER
def entity(conn):
try:
readCount = 0
while True:
readCountOnce = chunk_size if totalCount - readCount >= chunk_size else totalCount - readCount
chunk = readable.read(readCountOnce)
newReadCount = len(chunk)
readCount += newReadCount
if newReadCount > 0:
notifier.send(newReadCount)
if readCount >= totalCount:
conn.send(chunk, final=True)
break
conn.send(chunk)
finally:
if hasattr(readable, 'close') and callable(readable.close) and auto_close:
readable.close()
return entity
def get_file_entity(file_path, chunk_size=65536, notifier=None):
if notifier is None:
notifier = progress.NONE_NOTIFIER
def entity(conn):
fileSize = os.path.getsize(file_path)
readCount = 0
with open(file_path, 'rb') as f:
while True:
chunk = f.read(chunk_size)
newReadCount = len(chunk)
if newReadCount > 0:
notifier.send(newReadCount)
readCount += newReadCount
if readCount >= fileSize:
conn.send(chunk, final=True)
break
conn.send(chunk)
return entity
def get_file_entity_by_totalcount(file_path, totalCount, chunk_size=65536, notifier=None):
if notifier is None:
notifier = progress.NONE_NOTIFIER
def entity(conn):
readCount = 0
with open(file_path, 'rb') as f:
while True:
readCountOnce = chunk_size if totalCount - readCount >= chunk_size else totalCount - readCount
chunk = f.read(readCountOnce)
newReadCount = len(chunk)
if newReadCount > 0:
notifier.send(newReadCount)
readCount += newReadCount
if readCount >= totalCount:
conn.send(chunk, final=True)
break
conn.send(chunk)
return entity
def get_file_entity_by_offset_partsize(file_path, offset, partSize, chunk_size=65536, notifier=None):
if notifier is None:
notifier = progress.NONE_NOTIFIER
def entity(conn):
readCount = 0
with open(file_path, 'rb') as f:
f.seek(offset)
while True:
readCountOnce = chunk_size if partSize - readCount >= chunk_size else partSize - readCount
chunk = f.read(readCountOnce)
newReadCount = len(chunk)
if newReadCount > 0:
notifier.send(newReadCount)
readCount += newReadCount
if readCount >= partSize:
conn.send(chunk, final=True)
break
conn.send(chunk)
return entity
def is_ipaddress(item):
return re.match(const.IPv4_REGEX, item)
def md5_encode(unencoded):
m = hashlib.md5()
unencoded = unencoded if const.IS_PYTHON2 else (unencoded.encode('UTF-8') if not isinstance(unencoded, bytes) else unencoded)
m.update(unencoded)
return m.digest()
def base64_encode(unencoded):
unencoded = unencoded if const.IS_PYTHON2 else (unencoded.encode('UTF-8') if not isinstance(unencoded, bytes) else unencoded)
encodeestr = base64.b64encode(unencoded, altchars=None)
return encodeestr if const.IS_PYTHON2 else encodeestr.decode('UTF-8')
def encode_object_key(key):
return encode_item(key, '/~')
def encode_item(item, safe='/'):
return urllib.quote(to_string(item), safe)
def decode_item(item):
return urllib.unquote(item)
def safe_trans_to_utf8(item):
if not const.IS_PYTHON2:
return item
if item is not None:
item = safe_encode(item)
try:
return item.decode('GB2312').encode('UTF-8')
except Exception:
return item
return None
def safe_trans_to_gb2312(item):
if not const.IS_PYTHON2:
return item
if item is not None:
item = safe_encode(item)
try:
return item.decode('UTF-8').encode('GB2312')
except Exception:
return item
return None
def safe_decode(item):
if not const.IS_PYTHON2:
return item
if isinstance(item, str):
try:
item = item.decode('UTF-8')
except:
try:
item = item.decode('GB2312')
except Exception:
item = None
return item
def safe_encode(item):
if not const.IS_PYTHON2:
return item
if isinstance(item, const.UNICODE):
try:
item = item.encode('UTF-8')
except UnicodeDecodeError:
try:
item = item.encode('GB2312')
except Exception:
item = None
return item
def md5_file_encode_by_size_offset(file_path=None, size=None, offset=None, chuckSize=None):
if file_path is not None and size is not None and offset is not None:
m = hashlib.md5()
with open(file_path, 'rb') as fp:
CHUNKSIZE = 65536 if chuckSize is None else chuckSize
fp.seek(offset)
read_count = 0
while read_count < size:
read_size = CHUNKSIZE if size - read_count >= CHUNKSIZE else size - read_count
data = fp.read(read_size)
read_count_once = len(data)
if read_count_once <= 0:
break
m.update(data)
read_count += read_count_once
return m.digest()
def do_close(result, conn, connHolder, log_client=None):
if not result:
close_conn(conn, log_client)
elif result.getheader('connection', '').lower() == 'close':
if log_client:
log_client.log(INFO, 'server inform to close connection')
close_conn(conn, log_client)
elif to_int(result.status) >= 500 or connHolder is None:
close_conn(conn, log_client)
elif hasattr(conn, '_clear') and conn._clear:
close_conn(conn, log_client)
else:
if connHolder is not None:
try:
connHolder['connSet'].put_nowait(conn)
except:
close_conn(conn, log_client)
def close_conn(conn, log_client=None):
try:
if conn:
conn.close()
except Exception as ex:
if log_client:
log_client.log(ERROR, ex)
SKIP_VERIFY_ATTR_TYPE = False
def verify_attr_type(value, allowedAttrType):
if SKIP_VERIFY_ATTR_TYPE:
return True
if isinstance(allowedAttrType, list):
for t in allowedAttrType:
if isinstance(value, t):
return True
return False
return isinstance(value, allowedAttrType)
def lazyCallback(*args, **kwargs):
pass
|
[
"hu.husheng@huawei.com"
] |
hu.husheng@huawei.com
|
d021d36f984ab643b089ddca6cf72adba3e0c21e
|
e3565e1ce607f60745f2a045aae8026661a6b99b
|
/resources/Onyx-1.0.511/py/onyx/grid/griddy.py
|
b7fdb67930c51b30bfc7c426ac2a4ed49d48c2c2
|
[
"Apache-2.0"
] |
permissive
|
eternity668/speechAD
|
4c08d953b2ed06b3357b1c39d8709dd088a2471c
|
f270a1be86372b7044615e4fd82032029e123bc1
|
refs/heads/master
| 2021-01-12T22:10:33.358500
| 2014-02-03T16:03:28
| 2014-02-03T16:03:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,152
|
py
|
###########################################################################
#
# File: griddy.py (directory: ./py/onyx/grid)
# Date: 4-Feb-2009
# Author: Hugh Secker-Walker
# Description: A function for use in testing by gridgo.py
#
# This file is part of Onyx http://onyxtools.sourceforge.net
#
# Copyright 2009 The Johns Hopkins University
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
#
###########################################################################
"""
>>> True
True
"""
def my_func(a, b, c):
return a, b, c
if __name__ == '__main__':
from onyx import onyx_mainstartup
onyx_mainstartup()
|
[
"nassos@n12mavra.cs.ntua.gr"
] |
nassos@n12mavra.cs.ntua.gr
|
1382cc4d3aa5afb92ec947c4c5b49adc271ec8ad
|
6d05c14eaf6e638d5bf0faf69c74a7f80eefcc82
|
/gaussian_gradient/gaussian_gradient.pyde
|
e638d302c89da26699bfd0df2c48f8aa1e3acec4
|
[] |
no_license
|
npabon/Processing
|
9e21cbfa2eca1afc75fb78761731b1bee1be0571
|
a7a6e2eb341aef14bf262405d221c7ad24666c8b
|
refs/heads/master
| 2020-03-07T22:52:07.923780
| 2018-07-11T13:00:33
| 2018-07-11T13:00:33
| 127,765,952
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,009
|
pyde
|
from gaussian_support import init, run
from palettes import nature_palettes_6_color
import random as rand
add_library('svg')
n=10
points = []
current = []
def setup():
size(800,500)
background(255,255,255)
noStroke()
#blendMode(MULTIPLY)
noLoop()
def draw():
global points, current
#background(255,255,255)
# aztek sunset colors: http://www.color-hex.com/color-palette/21517
darkred = color(62,2,0)
medred = color(255,0,0)
aqua = color(64,244,208)
orange = color(245,158,4)
#filename="gaussian_stone_{}.svg".format(iter)
#beginRecord(SVG, filename)
hy = 0
fill(orange,5)
points = init(n,hy)
run(current, points)
hy = height/4
fill(aqua,5)
points = init(n,hy)
run(current, points)
hy = height/2
fill(medred,5)
points = init(n,hy)
run(current, points)
hy = 3*height/4
fill(darkred,5)
points = init(n,hy)
run(current, points)
|
[
"npabon15@gmail.com"
] |
npabon15@gmail.com
|
9eb02a16cb5679b043e158e4f36ae3ea11a51e80
|
162f0a636cab320ead784b33597e583e38ac432f
|
/1744.py
|
f339b7b48defbece73a4dddc7bee0dbea7c0d161
|
[] |
no_license
|
goodsosbva/BOJ_Greedy
|
fc2450df90f64790f6cc01c168ba7f19ec83e504
|
98d21af254cacf41632a4b40ca9ef643b29bb104
|
refs/heads/main
| 2023-03-31T17:26:33.863396
| 2021-04-06T07:50:57
| 2021-04-06T07:50:57
| 347,081,712
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,641
|
py
|
n = int(input())
sequence = []
negative = []
positive = []
res = 0
for i in range(n):
i = int(input())
sequence.append(i)
for k in sequence:
if k < 0:
negative.append(k)
elif k > 0:
positive.append(k)
else:
negative.append(k)
negative.sort()
positive.sort(reverse=True)
# print(negative)
u = len(negative)
if 0 in negative:
if u % 2 == 0:
for q in range(0, u, 2):
res += negative[q] * negative[q + 1]
else:
for w in range(0, u - 1, 2):
res += negative[w] * negative[w + 1]
else:
if u % 2 == 0:
for q in range(0, u, 2):
res += negative[q] * negative[q + 1]
elif u % 2 != 0 and u != 1:
for w in range(0, u - 1, 2):
res += negative[w] * negative[w + 1]
res += negative[u - 1]
else:
res += negative[0]
# print("음수합:", res)
# print(positive)
v = len(positive)
if 1 in positive:
x = positive.count(1)
# print(x)
if v - 1 > x:
if v % 2 == 0:
for s in range(0, v - x, 2):
res += positive[s] * positive[s + 1]
res += x
else:
for t in range(0, v - x, 2):
res += positive[t] * positive[t + 1]
res += x
else:
for h in positive:
res += h
else:
if v % 2 == 0:
for r in range(0, v, 2):
res += positive[r] * positive[r + 1]
else:
for f in range(0, v - 1, 2):
res += positive[f] * positive[f + 1]
res += positive[v - 1]
print(res)
|
[
"noreply@github.com"
] |
noreply@github.com
|
c6fef081bd46b0cb2875a2870bf64ad4631575c4
|
baffcef29e33658138c43ef358d7399ab3ea2c0d
|
/WORKFLOWS/Tools/NEC/NAL/nal-model/rest/test/unit/test_dcs.py
|
cb088e130c920d2697ba7584fef6500526bdc175
|
[
"Apache-2.0",
"JSON"
] |
permissive
|
openmsa/NO
|
aa7d4ff000875bfcff0baee24555ec16becdb64e
|
24df42ee3927415b552b5e5d7326eecd04ebca61
|
refs/heads/master
| 2020-03-09T23:21:09.657439
| 2019-03-29T06:29:07
| 2019-03-29T06:29:07
| 129,056,267
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,271
|
py
|
import json
import mysql.connector
import os
import sys
import unittest
import urllib.request
import urllib.parse
sys.path.append(os.path.dirname(os.path.abspath(__file__)) + '/../../../')
from rest.api import router
from rest.conf import config
class TestSelectAPI(unittest.TestCase):
# Do a test of Select.
ID = 0
def setUp(self):
# Establish a clean test environment.
super(TestSelectAPI, self).setUp()
# Insert test data
self.create_fixtures()
def tearDown(self):
"""Clear the test environment"""
super(TestSelectAPI, self).tearDown()
self.destroy_fixtures()
def create_fixtures(self):
con, cur = self.connect_db()
global extension_info
extension_info = {
'dc_name': 'dc_namexxxxxxxxxx',
'dc_number': 'dc_numberxxxxxxxxxx'
}
# Execute SQL
param_vals = ['test_create_id-0ac6cb428b23', '2016-12-31 23:59:59',
'test_update_id-0ac6cb428b23', '2016-12-31 23:59:59',
0, 'dc_id-dd7e-0ac6cb428b23',
json.dumps(extension_info)]
cur.execute("INSERT INTO WIM_DC_MNG(create_id, create_date, " +
"update_id, update_date, delete_flg, " +
"dc_id, extension_info) VALUES " +
"(%s, %s, %s, %s, %s, %s, %s)", param_vals)
cur.execute('SELECT last_insert_id() FROM WIM_DC_MNG')
global ID
ID = cur.fetchall()[0][0]
self.cut_db(con, cur)
def destroy_fixtures(self):
con, cur = self.connect_db()
# Execute SQL
param_vals = ['test_create_id-0ac6cb428b23']
cur.execute("DELETE FROM WIM_DC_MNG WHERE " +
"create_id = %s", param_vals)
self.cut_db(con, cur)
def connect_db(self):
# Connect Database
con = mysql.connector.connect(
host=getattr(config, 'MYSQL_HOSTNAME', ''),
db=getattr(config, 'MYSQL_DBNAME', ''),
user=getattr(config, 'MYSQL_USERID', ''),
passwd=getattr(config, 'MYSQL_PASSWORD', ''),
buffered=True)
# Set Autocommit Off
con.autocommit = False
# Open Cursor
cur = con.cursor()
return con, cur
def cut_db(self, con, cur):
# Commit Transaction
con.commit()
# Close Cursor
cur.close()
# Close Database
con.close()
def test_select_api(self):
request_params = {
'query': {
'delete_flg': '0', 'ID': ID
},
'resource': 'dcs',
'method': 'GET',
'id': []
}
res = router.Router().routing(request_params)
status = res['status']
res_data = res['message'].decode('utf-8')
res_data = json.loads(res_data)
self.assertEqual(status, '200 OK')
self.assertEqual(len(res_data), 1)
self.assertEqual(res_data[0]['ID'], ID)
self.assertEqual(res_data[0]['create_id'],
'test_create_id-0ac6cb428b23')
self.assertEqual(res_data[0]['update_id'],
'test_update_id-0ac6cb428b23')
self.assertEqual(res_data[0]['delete_flg'], '0')
self.assertEqual(res_data[0].get('extension_info', ''), '')
for key in extension_info:
self.assertEqual(res_data[0].get(key), extension_info[key])
def test_insert_api(self):
insert_params = {
'create_id': 'test_create_id-0ac6cb428b23',
'update_id': 'test_create_id-0ac6cb428b23',
'delete_flg': 0,
'dc_id': 'dc_id-bb6d-6bb9bd380a11',
'dc_name': 'dc_name_B',
'dc_number': 1234
}
request_params = {
'body': insert_params,
'query': {},
'resource': 'dcs',
'method': 'POST',
'id': []
}
res = router.Router().routing(request_params)
status = res['status']
res_data = res['message'].decode('utf-8')
res_data = json.loads(res_data)
# Assertion
self.assertEqual(status, '200 OK')
self.assertEqual(len(res_data), 1)
self.assertTrue('ID' in res_data)
# Assertion(check select)
request_params = {
'query': {
'dc_id': 'dc_id-bb6d-6bb9bd380a11',
},
'resource': 'dcs',
'method': 'GET',
'id': []
}
res = router.Router().routing(request_params)
status = res['status']
res_data = res['message'].decode('utf-8')
res_data = json.loads(res_data)
self.assertEqual(status, '200 OK')
self.assertEqual(len(res_data), 1)
for key in insert_params:
if key == 'delete_flg':
self.assertEqual(res_data[0].get(key), str(insert_params[key]))
else:
self.assertEqual(res_data[0].get(key), insert_params[key])
def test_update_api(self):
update_params = {
'update_id': 'test_update_id-0ac6cb428b23',
'dc_id': 'dc_id-ad4c-4cc6ea276a55',
'dc_name': 'dc_name_C',
'dc_number': 5678
}
request_params = {
'body': update_params,
'query': {},
'resource': 'dcs',
'method': 'PUT',
'id': [ID]
}
res = router.Router().routing(request_params)
status = res['status']
res_data = res['message'].decode('utf-8')
res_data = json.loads(res_data)
# Assertion
self.assertEqual(status, '200 OK')
self.assertEqual(res_data, True)
# Assertion(check select)
request_params = {
'query': {
'dc_id': 'dc_id-ad4c-4cc6ea276a55',
},
'resource': 'dcs',
'method': 'GET',
'id': []
}
res = router.Router().routing(request_params)
status = res['status']
res_data = res['message'].decode('utf-8')
res_data = json.loads(res_data)
self.assertEqual(status, '200 OK')
self.assertEqual(len(res_data), 1)
for key in update_params:
if key == 'delete_flg':
self.assertEqual(res_data[0].get(key), str(update_params[key]))
else:
self.assertEqual(res_data[0].get(key), update_params[key])
def test_delete_api(self):
request_params = {
'body': {},
'query': {},
'resource': 'dcs',
'method': 'DELETE',
'id': [ID]
}
res = router.Router().routing(request_params)
status = res['status']
res_data = res['message'].decode('utf-8')
res_data = json.loads(res_data)
# Assertion
self.assertEqual(status, '200 OK')
self.assertEqual(res_data, True)
# Assertion(Select Check)
con, cur = self.connect_db()
cur.execute("SELECT ID FROM WIM_DC_MNG " +
"WHERE ID = %s", [ID])
self.assertEqual(cur.fetchall(), [])
self.cut_db(con, cur)
|
[
"ofa@ubiqube.com"
] |
ofa@ubiqube.com
|
48cf0f54c8738ea16878d6beb0a2fd2a8d7aa385
|
c50e5af8f72de6ef560ee6c0bbfa756087824c96
|
/刷题/Leetcode/84. 柱状图中最大的矩形/p84_Largest_Rectangle_in_Histogram_暴力.py
|
7430809260718f7c390d48a5c4dc9f9b4dcaa792
|
[] |
no_license
|
binghe2402/learnPython
|
5a1beef9d446d8316aaa65f6cc9d8aee59ab4d1c
|
2b9e21fe4a8eea0f8826c57287d59f9d8f3c87ce
|
refs/heads/master
| 2022-05-27T03:32:12.750854
| 2022-03-19T08:00:19
| 2022-03-19T08:00:19
| 252,106,387
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,012
|
py
|
from typing import List
class Solution:
# # 遍历不同边界(宽度),根据最小高度
# def largestRectangleArea(self, heights: List[int]) -> int:
# area = 0
# for i in range(len(heights)):
# for j in range(i, len(heights)):
# area = max(area, (j-i+1)*min(heights[i:j+1]))
# return area
# 遍历不同高度(从每个柱向两侧扩展),根据当前高度的最小宽度(最窄边界)
# 当两侧高度小于中央开始起点的高度,即为边界
def largestRectangleArea(self, heights: List[int]) -> int:
area = 0
for i in range(len(heights)):
left = right = i
# 寻找左边界
while left >= 0 and heights[i] <= heights[left]:
left -= 1
# 寻找右边界
while right < len(heights) and heights[i] <= heights[right]:
right += 1
area = max(area, (right - left - 1)*heights[i])
return area
|
[
"binghe2402@hotmail.com"
] |
binghe2402@hotmail.com
|
f8e9765b859dd527defd2ce06933a55ecb70e041
|
35fdd5b42b47a1dbe6a25f6fc1865f4e48b842a5
|
/evalml/tests/component_tests/test_catboost_classifier.py
|
1ef6fd41a8656a2914d90172ce42a92330d0a24e
|
[
"BSD-3-Clause"
] |
permissive
|
skvorekn/evalml
|
41e5426f9f7d5ad625c21b74336009894c79c7de
|
2cbfa344ec3fdc0fb0f4a0f1093811135b9b97d8
|
refs/heads/main
| 2023-03-27T01:42:07.691406
| 2021-03-19T18:53:43
| 2021-03-19T18:53:43
| 349,555,689
| 0
| 0
|
BSD-3-Clause
| 2021-03-21T14:57:01
| 2021-03-19T21:08:12
| null |
UTF-8
|
Python
| false
| false
| 837
|
py
|
import pandas as pd
from pytest import importorskip
from evalml.pipelines.components import CatBoostClassifier
from evalml.utils import SEED_BOUNDS
importorskip('catboost', reason='Skipping test because catboost not installed')
def test_catboost_classifier_random_seed_bounds_seed(X_y_binary):
"""ensure catboost's RNG doesn't fail for the min/max bounds we support on user-inputted random seeds"""
X, y = X_y_binary
col_names = ["col_{}".format(i) for i in range(len(X[0]))]
X = pd.DataFrame(X, columns=col_names)
y = pd.Series(y)
clf = CatBoostClassifier(n_estimators=1, max_depth=1, random_seed=SEED_BOUNDS.min_bound)
clf.fit(X, y)
clf = CatBoostClassifier(n_estimators=1, max_depth=1, random_seed=SEED_BOUNDS.max_bound)
fitted = clf.fit(X, y)
assert isinstance(fitted, CatBoostClassifier)
|
[
"noreply@github.com"
] |
noreply@github.com
|
91a113c4ada5b426c5ec2a79294fad93cc6a42ec
|
3003d18727cd4c122d7c8de62a90acba4ec0b168
|
/main.py
|
966f44c686f3c1f14f5752beec8cbdf7a6cbb2b6
|
[] |
no_license
|
kelvin5hart/blind-auction
|
eabd8c7dbf66fe20556e04069f704629d5165b61
|
2e6216d5ec79bfe5059d7bfc6e331507c9163193
|
refs/heads/master
| 2023-03-07T22:58:50.141445
| 2021-02-13T10:12:05
| 2021-02-13T10:12:05
| 338,546,232
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 558
|
py
|
from replit import clear
import art
print(art.logo)
entries = {}
continueBid = "yes"
while continueBid == "yes":
name = input("What is you name? \n")
bid = int(input("What's your bid? \n$"))
optionNext = input("Are there other bidders? Type 'yes' or 'no' \n").lower()
entries[name] = bid
clear()
print(entries)
if optionNext == "no":
continueBid = "no"
score = 0
for person in entries:
if entries[person] > score:
score = entries[person]
nameOfPerson = person
print(f"Highest bidder is {nameOfPerson} with a bid of ${score}")
|
[
"atemiehartkelvin@rocketmail.com"
] |
atemiehartkelvin@rocketmail.com
|
b7be68b02706b304621a9f728282913df839691a
|
bd0792ec5d61dfc325839d4dd112e6f71f015a46
|
/node_modules/mongoose/node_modules/mongodb/node_modules/bson/build/config.gypi
|
54fab577d66e035f9cd35cc8b00fab0c27acd9fc
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
GeoffreyLong/scheduler
|
d9666760cf6b77f687f6e1a026ceff825743dd53
|
5d30e41c72e5b705c7576088f03f6c9c360ab387
|
refs/heads/master
| 2020-05-05T08:03:38.102170
| 2014-09-14T12:28:57
| 2014-09-14T12:28:57
| 21,985,621
| 0
| 0
| null | 2014-09-14T12:28:57
| 2014-07-18T15:50:51
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 3,019
|
gypi
|
# Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": []
},
"variables": {
"clang": 0,
"gcc_version": 47,
"host_arch": "x64",
"node_install_npm": "true",
"node_prefix": "",
"node_shared_cares": "false",
"node_shared_http_parser": "false",
"node_shared_libuv": "false",
"node_shared_openssl": "false",
"node_shared_v8": "false",
"node_shared_zlib": "false",
"node_tag": "",
"node_unsafe_optimizations": 0,
"node_use_dtrace": "false",
"node_use_etw": "false",
"node_use_openssl": "true",
"node_use_perfctr": "false",
"node_use_systemtap": "false",
"python": "/usr/bin/python",
"target_arch": "x64",
"v8_enable_gdbjit": 0,
"v8_no_strict_aliasing": 1,
"v8_use_snapshot": "true",
"nodedir": "/home/geoff/.node-gyp/0.10.25",
"copy_dev_lib": "true",
"standalone_static_library": 1,
"cache_lock_stale": "60000",
"sign_git_tag": "",
"always_auth": "",
"user_agent": "node/v0.10.25 linux x64",
"bin_links": "true",
"key": "",
"description": "true",
"fetch_retries": "2",
"heading": "npm",
"user": "",
"force": "",
"cache_min": "10",
"init_license": "ISC",
"editor": "vi",
"rollback": "true",
"cache_max": "null",
"userconfig": "/home/geoff/.npmrc",
"engine_strict": "",
"init_author_name": "",
"init_author_url": "",
"tmp": "/home/geoff/tmp",
"depth": "null",
"save_dev": "",
"usage": "",
"https_proxy": "",
"onload_script": "",
"rebuild_bundle": "true",
"save_bundle": "",
"shell": "/bin/bash",
"prefix": "/usr/local",
"registry": "https://registry.npmjs.org/",
"browser": "",
"cache_lock_wait": "10000",
"save_optional": "",
"searchopts": "",
"versions": "",
"cache": "/home/geoff/.npm",
"ignore_scripts": "",
"searchsort": "name",
"version": "",
"local_address": "",
"viewer": "man",
"color": "true",
"fetch_retry_mintimeout": "10000",
"umask": "18",
"fetch_retry_maxtimeout": "60000",
"message": "%s",
"ca": "",
"cert": "",
"global": "",
"link": "",
"save": "",
"unicode": "true",
"long": "",
"production": "",
"unsafe_perm": "true",
"node_version": "v0.10.25",
"tag": "latest",
"git_tag_version": "true",
"shrinkwrap": "true",
"fetch_retry_factor": "10",
"npat": "",
"proprietary_attribs": "true",
"strict_ssl": "true",
"username": "",
"dev": "",
"globalconfig": "/usr/local/etc/npmrc",
"init_module": "/home/geoff/.npm-init.js",
"parseable": "",
"globalignorefile": "/usr/local/etc/npmignore",
"cache_lock_retries": "10",
"group": "1000",
"init_author_email": "",
"searchexclude": "",
"git": "git",
"optional": "true",
"email": "",
"json": ""
}
}
|
[
"Geoffrey.Long@mail.mcgill.ca"
] |
Geoffrey.Long@mail.mcgill.ca
|
e7bb019278a9a0e6f5cca4f0270baf54e8ada1b2
|
8a19822ceaf8096a5a8e59a30919662b95bcb5e4
|
/AzureCluster/eventhubssastoken.py
|
b87c3582bdb56f9091ce3544f4bf82a032f7fcf9
|
[] |
no_license
|
junkaixue/service-fabric-java-quickstart
|
053f60623a424f10e401107690079a5a973d97f8
|
858adc98339d8ecbacc83d9a9e1eae602006fd31
|
refs/heads/master
| 2022-04-09T15:39:11.359255
| 2020-03-25T23:44:44
| 2020-03-25T23:44:44
| 361,007,546
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,073
|
py
|
#!/usr/bin/env python
import sys
import time
import urllib
from urllib.parse import urlparse
import hmac
import hashlib
import base64
def get_auth_token(sb_name, eh_name, sas_name, sas_value):
print(sb_name)
print(eh_name)
print(sas_name)
print(sas_value)
uri = urllib.parse.quote_plus("https://{}.servicebus.windows.net/{}" \
.format(sb_name, eh_name))
sas = sas_value.encode('utf-8')
expiry = str(int(time.time() + 10000))
string_to_sign = (uri + '\n' + expiry).encode('utf-8')
signed_hmac_sha256 = hmac.HMAC(sas, string_to_sign, hashlib.sha256)
signature = urllib.parse.quote(base64.b64encode(signed_hmac_sha256.digest()))
return {"sb_name": sb_name,
"eh_name": eh_name,
"token":'SharedAccessSignature sr={}&sig={}&se={}&skn={}' \
.format(uri, signature, expiry, sas_name)
}
sb_name = sys.argv[1]
eh_name = sys.argv[2]
sas_name = sys.argv[3]
sas_value = sys.argv[4]
print(get_auth_token(sb_name, eh_name, sas_name, sas_value))
|
[
"sudhanvahuruli@gmail.com"
] |
sudhanvahuruli@gmail.com
|
fbd49bfeec9947ef6f83b1e9787a0081f6be9f05
|
57775b4c245723078fd43abc35320cb16f0d4cb6
|
/Data structure/linked-list/delete-node-given-position.py
|
cc4164b336e8f1ad6093479327c26ce5514d4106
|
[] |
no_license
|
farhapartex/code-ninja
|
1757a7292ac4cdcf1386fe31235d315a4895f072
|
168fdc915a4e3d3e4d6f051c798dee6ee64ea290
|
refs/heads/master
| 2020-07-31T16:10:43.329468
| 2020-06-18T07:00:34
| 2020-06-18T07:00:34
| 210,668,245
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,394
|
py
|
"""
Given a ‘key’, delete the first occurrence of this key in linked list.
To delete a node from linked list, we need to do following steps.
1) Find previous node of the node to be deleted.
2) Change the next of previous node.
3) Free memory for the node to be deleted.
"""
class Node:
def __init__(self, data):
self.data = data
self.next = None
class LinkedList:
def __init__(self):
self.head = None
def print_list(self):
temp = self.head
while temp:
print(temp.data)
temp = temp.next
def push_front(self, node):
new_node = Node(node)
new_node.next = self.head
self.head = new_node
def insert_after(self, prev_node, new_node):
if prev_node is None:
print("Previous node must be in a LinkedList")
return
new_node = Node(new_node)
new_node.next = prev_node.next
prev_node.next = new_node
def append(self, new_data):
new_node = Node(new_data)
if self.head is None:
self.head = new_node
return
last = self.head
while last.next:
last = last.next
last.next = new_node
def deleteNodeGivenPosition(self, position):
if self.head is None:
return
temp = self.head
if position == 0:
self.head = temp.next
temp = None
return
# Find previous node of the node to be deleted
for i in range(position-1):
temp = temp.next
if temp is None:
break
# If position is more than number of nodes
if temp is None:
return
if temp.next is None:
return
# Node temp.next is the node to be deleted
# store pointer to the next of node to be deleted
next = temp.next.next
temp.next=None
# Unlink the node from linked list
temp.next = next
if __name__ == "__main__":
llist = LinkedList()
llist.append(6)
llist.push_front(10)
llist.push_front(6)
llist.push_front(11)
llist.append(20)
llist.insert_after(llist.head.next, 8)
llist.print_list()
llist.deleteNodeGivenPosition(2)
print("Linked List after Deletion at 2:")
llist.print_list()
|
[
"farhapartex@gmail.com"
] |
farhapartex@gmail.com
|
0f7628c5c4ce70e0322d89e5ace91c558952f845
|
bdb75d503f097c4b10fd0744705b873919e001b0
|
/Question8partB.py
|
79380aae7b2ab6ca0653471180eb0e64a6a76235
|
[] |
no_license
|
joshuamaxion/plc-test2
|
8f4f6c3878462bbc936cbb872efa725c41fb0916
|
3d9c00f5675d926d666d8db330a5629be9068c5a
|
refs/heads/main
| 2023-04-10T02:36:22.413122
| 2021-04-22T20:46:15
| 2021-04-22T20:46:15
| 359,655,267
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 210
|
py
|
# Online Python - IDE, Editor, Compiler, Interpret
int set A{
x=2;
y=5;
z=9;
set C();
}
int set B{
x=1;
z= 16;
}
int set C{
y = 20;
z = 4;
set B();
}
x = 0;
y = 3;
z = 12;
print %(x,y,z)
|
[
"noreply@github.com"
] |
noreply@github.com
|
78cd82f5ea823208cc3cca8afffd365191738c29
|
0e667b514d3b129c1ba4d53d5a70eb66b507f50f
|
/Shop/models.py
|
c2b18b4ce4beee2c2bb37a3383e47957109dbd19
|
[] |
no_license
|
frank9th/AkomShop-DjangoEcommerce
|
ec5c736afbb0019676d56198a191c077f2c731fd
|
4847899f53cd5f2fda4eec93f8ad98d758e602a1
|
refs/heads/main
| 2023-02-26T03:59:20.089902
| 2021-01-08T12:52:57
| 2021-01-08T12:52:57
| 322,301,528
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,616
|
py
|
from django.db import models
from django.contrib.auth.models import User
# Create your models here.
class Customer(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE, null=True, blank=True)
name = models.CharField(max_length=200, null=True)
email = models.CharField(max_length=200, null=True)
phone = models.CharField(max_length=200, null=True)
def __str__(self):
return self.name
class Product(models.Model):
name = models.CharField(max_length=200, null=True)
price = models.FloatField()
food = models.BooleanField(default=False, null=True, blank=False)
image = models.ImageField(null=True, blank=True)
def __str__(self):
return self.name
@property
def imageURL(self):
try:
url = self.image.url
except:
url = ''
return url
class Order(models.Model):
customer = models.ForeignKey(Customer, on_delete=models.SET_NULL, blank=True, null=True)
date_ordered = models.DateTimeField(auto_now_add=True)
complete = models.BooleanField(default=False, null=True, blank=False)
transaction_id = models.CharField(max_length=200, null=True)
def __str__(self):
return str(self.id)
# total price of items in cart
@property
def get_cart_total(self):
orderitems = self.orderitem_set.all()
total = sum([item.get_total for item in orderitems])
return total
# total quantity of itmes in cart
@property
def get_cart_items(self):
orderitems = self.orderitem_set.all()
total = sum([item.quantity for item in orderitems])
return total
class OrderItem(models.Model):
product = models.ForeignKey(Product, on_delete=models.SET_NULL, blank=True, null=True)
order = models.ForeignKey(Order, on_delete=models.SET_NULL, blank=True, null=True)
quantity = models.IntegerField(default=0, null=True, blank=True)
date_added = models.DateTimeField(auto_now_add=True)
#fuction for calculating the totoal of product - this is a propert of orderItems
@property
def get_total(self):
total = self.product.price * self.quantity
return total
class ShippingAddress(models.Model):
customer = models.ForeignKey(Customer, on_delete=models.SET_NULL, blank=True, null=True)
order = models.ForeignKey(Order, on_delete=models.SET_NULL, blank=True, null=True)
address = models.CharField(max_length=200, null=True)
city = models.CharField(max_length=200, null=True)
landmark = models.CharField(max_length=200, null=True)
state = models.CharField(max_length=200, null=True)
date_added = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.address
|
[
"ap.teamservices@gmail.com"
] |
ap.teamservices@gmail.com
|
be4cffb85075a9800f3d78a74574e03d9aa3acb0
|
25223ca66ccd039bd3a67ceeaa0247dd33008ff5
|
/homework/hw-test.py
|
d96b16fa8eb723ffc711481c5f3b0bddef34fa56
|
[] |
no_license
|
joqhuang/si
|
ad160cbf021fe062a87ae935a6e0373ffc8932ad
|
cbb57ca1da9bbba41663a688c704f038b33d9624
|
refs/heads/master
| 2021-05-06T11:04:05.904818
| 2018-01-23T18:44:52
| 2018-01-23T18:44:52
| 114,205,555
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 650
|
py
|
exam_one = int(input("Input exam grade one: "))
exam_two = int(input("Input exam grade two: "))
exam_three = int(input("Input exam grade three: "))
grades = [exam_one, exam_two, exam_three]
sum = 0
for grade in grades:
sum += grade
avg = sum // len(grades)
if avg >= 90:
letter_grade = "A"
elif avg >= 80:
letter_grade = "B"
elif avg >= 70:
letter_grade = "C"
elif avg >= 60:
letter_grade = "D"
else:
letter_grade = "F"
print("Exam: {}, {}, {}".format(*grades))
print("Average: " + str(avg))
print("Grade: " + letter_grade)
if letter_grade is "F":
print("Student is failing.")
else:
print ("Student is passing.")
|
[
"questionableinsanity@gmail.com"
] |
questionableinsanity@gmail.com
|
b0b3a3821927cc4c14af702c3ae0565c27730f69
|
23dde32f52f7672c61500d344de0923382e81b17
|
/script.service.jogwheel/addon.py
|
20f26ccde9e436587f13e08589d0c5275a5f33fb
|
[] |
no_license
|
spielzeugland/kodi-control-panel
|
5226148dcaca881812a49ed92f5732c347902654
|
755c74f43617a5670a8942274201018908092e37
|
refs/heads/master
| 2021-01-21T15:07:28.373026
| 2018-11-17T21:13:06
| 2018-11-17T22:20:18
| 57,076,527
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,094
|
py
|
import xbmc
import lib.generic.worker as worker
import lib.generic.configuredLogging as configuredLogging
import lib.generic.kodi as kodi
import lib.configuredMenu as configuredMenu
from lib.display import Display
from lib.inputs import Inputs
if __name__ == "__main__":
inputs = None
try:
configuredLogging.configure(configuredLogging.WARNING, kodi.KodiLogHandler(xbmc))
localKodi = kodi.local(xbmc)
queue = worker.createQueue()
inputs = Inputs(queue)
display = Display()
theController = configuredMenu.create(localKodi, display.update)
worker = theController.work(queue)
monitor = localKodi.getMonitor()
while not monitor.abortRequested():
if monitor.waitForAbort(10):
display.writeMessage("Good Bye!")
break
if not worker.is_alive():
localKodi.shutdown()
# TODO exception handling
# except Exception as e:
# display.write("Error")
finally:
if(inputs is not None):
inputs.close()
|
[
"spielzeugland@users.noreply.github.com"
] |
spielzeugland@users.noreply.github.com
|
1e48d5e5be10c8f681d187b11d997a55db54f7a6
|
c221de88d7efcc08b0fe654eded554ec50c9340a
|
/cv/颜色映射.py
|
1c8e41f55781c0cdf01be145d58a96873fbebf5a
|
[] |
no_license
|
daviddych/dnn_application
|
51be77520af2f54344fdebc26e5e11cf2d6a6e58
|
df2ae0860a4f470ec6bf4943802fe905a3bebec3
|
refs/heads/master
| 2020-04-27T13:36:46.100612
| 2019-05-17T09:05:37
| 2019-05-17T09:05:37
| 174,376,139
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 572
|
py
|
#!/usr/bin/env python
#coding:utf-8
import cv2
import numpy as np
# (r,g,b) --> (r, b * 1.5, g * 1.3)
def assert_pixel(x):
if x > 255:
x = 255
elif x <0:
x = 0
return x
if __name__ == '__main__':
img = cv2.imread('data/cat.jpg', 1)
rows, cols, chs = img.shape
dst = np.zeros((rows, cols, chs), np.uint8)
for i in range(0, rows):
for j in range(0, cols):
(b, g, r) = img[i, j]
dst[i, j] = (b, assert_pixel(g * 1.5), assert_pixel(r * 1.3))
cv2.imshow('dst', dst)
cv2.waitKey(0)
|
[
"287382545@qq.com"
] |
287382545@qq.com
|
7162ab9918f49d29d858a7e54c83899411bb2c2a
|
d4dc405c159b575568044766f756caf7e5ca4c38
|
/app/bin/dltk/core/deployment/params.py
|
d86e5b4ad2951da0c96d9eefb0e8a1b702ea1127
|
[
"ISC",
"Apache-2.0",
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
tatsunobuMurata/deep-learning-toolkit
|
283824ae01f13ee929d91f3a049848a9a3dd7646
|
84f9c978d9859a96f6ba566737a5c7102738d13c
|
refs/heads/master
| 2023-02-21T19:44:41.919214
| 2021-01-07T11:53:56
| 2021-01-07T11:53:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 367
|
py
|
__all__ = [
"get_default_param",
]
def get_default_param(name, environment, algorithm=None, runtime=None):
environment_param = environment.get_param(name)
if environment_param is not None:
return environment_param
if algorithm:
return algorithm.get_param(name)
if runtime:
return runtime.get_param(name)
return None
|
[
"hovu96"
] |
hovu96
|
8edbd040378fdcfdb0fb7e75b5cfad27cb05c423
|
4a1438c2e99d3452827351433e75119b62f28c26
|
/holi/wsgi.py
|
316480cbeab3f9f708d3934404d82851300ebe70
|
[] |
no_license
|
taowang6000/holi_Django
|
407747baa0d9a06cd51cff6e65a78fbfe368bc0a
|
48c237b59a3ce7e90ea96bfacfa39975e7eae9cd
|
refs/heads/master
| 2021-08-19T02:34:29.415738
| 2017-11-24T13:56:43
| 2017-11-24T13:56:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 386
|
py
|
"""
WSGI config for holi project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "holi.settings")
application = get_wsgi_application()
|
[
"tao@TAOs-MacBook-Pro.local"
] |
tao@TAOs-MacBook-Pro.local
|
e7cbec8407c61c7b724171aa967674dbf244853b
|
89bae02f23e787416fda894a128c9abfb4986515
|
/metalearning/allennlp/tests/modules/matrix_attention/cosine_matrix_attention_test.py
|
cff481ba8ea3a77de780b912867c54cef1eb849c
|
[
"Apache-2.0"
] |
permissive
|
asheverdin/multilingual-interference
|
f2e64cebfffc749b080fa64860659922224e6e65
|
7bc1b5918142e3c84bea83c5a7f39e3f245172e9
|
refs/heads/main
| 2023-05-12T13:07:19.997696
| 2021-05-28T22:16:26
| 2021-05-28T22:16:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,518
|
py
|
import torch
from numpy.testing import assert_almost_equal
import numpy
from allennlp.common import Params
from allennlp.common.testing.test_case import AllenNlpTestCase
from allennlp.modules.matrix_attention import CosineMatrixAttention
from allennlp.modules.matrix_attention.matrix_attention import MatrixAttention
class TestCosineMatrixAttention(AllenNlpTestCase):
def test_can_init_cosine(self):
legacy_attention = MatrixAttention.from_params(Params({"type": "cosine"}))
isinstance(legacy_attention, CosineMatrixAttention)
def test_cosine_similarity(self):
# example use case: a batch of size 2.
# With a time element component (e.g. sentences of length 2) each word is a vector of length 3.
# It is comparing this with another input of the same type
output = CosineMatrixAttention()(
torch.FloatTensor([[[0, 0, 0], [4, 5, 6]], [[-7, -8, -9], [10, 11, 12]]]),
torch.FloatTensor([[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]),
)
# For the first batch there is
# no correlation between the first words of the input matrix
# but perfect correlation for the second word
# For the second batch there is
# negative correlation for the first words
# correlation for the second word
assert_almost_equal(
output.numpy(), numpy.array([[[0, 0], [0.97, 1]], [[-1, -0.99], [0.99, 1]]]), decimal=2
)
|
[
"lcur0308@test3.irc.sara.nl.irc.sara.nl"
] |
lcur0308@test3.irc.sara.nl.irc.sara.nl
|
9be0e397a6794662e1b56e0a657678f25f26b112
|
16556dbc3a50f627e2441038168a40c2474d8f4d
|
/Session_3_Solved_question/List_Solution/change_list_nth_to_(n+1)th.py
|
b40c2c50e7f5990679ae24fc9f35e548fb3e7cd4
|
[] |
no_license
|
Kratos-28/258349_Daily_Commit
|
73669fc69e8fb36fca8146a8e054077d183e79d8
|
9c3192d99f6342f5fd07c5a455318b08c8bfda97
|
refs/heads/master
| 2023-04-18T07:31:43.215780
| 2021-04-23T08:52:27
| 2021-04-23T08:52:27
| 359,241,594
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 269
|
py
|
from itertools import zip_longest,chain,tee
def change_list(lst):
lst2=tee(iter(lst),2)
return list(chain.from_iterable(zip_longest(lst[1::2],lst[::2])))
n=[11,22,33,44,55,66,77,88]
print("before changing the list: ",n)
print("Change list to: ",change_list(n))
|
[
"surajarya517@gmail.com"
] |
surajarya517@gmail.com
|
0f40b2b6ad662f88bf819ec5071e79cabccb3e9e
|
280a3f5d22aeaf609264eb1d4006d7fced75a212
|
/hdcrm/migrations/0015_sku_currency.py
|
d4dcbb64f5dae9846ad35462a5b0f5cec5528514
|
[] |
no_license
|
lianhuness/django1
|
9e51f322ef5ca484e69e1a6f2d44caa70629145c
|
53dfd38e940f9571be8174fba0ed43fc1d98ab9e
|
refs/heads/master
| 2021-07-22T04:31:18.985141
| 2017-10-29T15:46:55
| 2017-10-29T15:46:55
| 108,743,007
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 501
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-10-10 15:54
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('hdcrm', '0014_sku_name'),
]
operations = [
migrations.AddField(
model_name='sku',
name='currency',
field=models.CharField(choices=[('USD', '美金'), ('CNY', '人民币')], default='CNY', max_length=5),
),
]
|
[
"lianhuness@gmail.com"
] |
lianhuness@gmail.com
|
bebcffe5ecafc72fe596f81506b37bdc8eedc5cf
|
1dd4c6833ebf731879ad222e57577cfc27db8ea0
|
/queue.py
|
3a6a99af4f82d1e87a24239b44b79b0ff8c266c4
|
[] |
no_license
|
peeblesbrandon/data_strucs_and_algos_in_python_exercises
|
b3a46e99bee2d6c6c2ba09f409b4e1f44c193300
|
77d70cd1bf13f9e7a7e64bfce00a5fd90d87e1b7
|
refs/heads/master
| 2023-01-24T01:17:46.607104
| 2020-11-22T17:10:55
| 2020-11-22T17:10:55
| 275,945,581
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,647
|
py
|
from colorama import Fore, Back, Style
class Empty(Exception):
pass
class ArrayQueue:
DEFAULT_CAPACITY = 10
def __init__(self):
self._data = [None] * ArrayQueue.DEFAULT_CAPACITY # use Class.global_var to access
self._size = 0 # elements in queue
self._front = 0 # front element in queue
def __len__(self):
return self._size
def is_empty(self):
return self._size == 0
def first(self):
if self.is_empty():
raise Empty('Queue is empty')
return self._data[self._front]
def dequeue(self):
if self.is_empty():
raise Empty('Queue is empty')
answer = self._data[self._front]
self._data[self._front] = None
self._front = (self._front + 1) % len(self._data)
self._size -= 1
if self._size <= len(self._data) // 4:
self._resize(len(self._data) // 2)
return answer
def enqueue(self, e):
if self._size == len(self._data):
self._resize(2 * len(self._data))
avail = (self._front + self._size) % len(self._data) #get next available index to insert element
self._data[avail] = e
self._size += 1
def _resize(self, cap):
old = self._data
self._data = [None] * cap
walk = self._front
for k in range(self._size):
self._data[k] = old[walk]
walk = (walk + 1) % len(old)
self._front = 0
# unit tests
if __name__ == '__main__':
print(Style.DIM, Fore.WHITE, "...running tests", Style.RESET_ALL)
# initialization tests
queue = ArrayQueue()
assert isinstance(queue, ArrayQueue) == True, "Should be of class ArrayQueue"
assert queue.is_empty() == True, "Queue should be initialized as empty"
# enqueue and dequeue tests
queue.enqueue(1)
assert queue.first() == 1, "Front element should be: 1"
for i in range(2,9):
queue.enqueue(i)
assert queue.dequeue() == 1, "Dequeued element should be: 1"
assert queue.first() == 2, "New front element should be: 2"
queue.dequeue()
queue.dequeue()
for i in range(100, 103):
queue.enqueue(i)
assert queue._data[0] == 102, "Elements should have wrapped around -- 0th element should be: 102"
for i in range(5):
queue.enqueue(i)
assert len(queue._data) == 20, "Queue should have resized to 2x default capacity"
for i in range(10):
queue.dequeue()
assert len(queue._data) == 10, "Queue should reduced size by 50%"
# print success message
print(Fore.GREEN, "All tests passed", Style.RESET_ALL)
|
[
"peeblesbrandon@gmail.com"
] |
peeblesbrandon@gmail.com
|
2d18223999ed02bcf8f908978267d515d97287d0
|
59b4d0f5fbfa7795241583e7684486368a825080
|
/cs1440-felt-grayson-assn0/lsn/1-TextProcessing/ex0.py
|
bbac0607d42338291fcfb751a41f65807e513848
|
[] |
no_license
|
grayfelt/CS1440
|
a405ca6e5b7408dd28c05f67b74431e209943d1b
|
ecc466352468081a902829bc9110a90f4b8adb10
|
refs/heads/master
| 2022-12-20T17:34:50.539105
| 2020-10-03T18:00:38
| 2020-10-03T18:00:38
| 295,624,932
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,140
|
py
|
def findWords(sentence):
wordsToReturn = []
# TODO: Return the words in `sentence` that are five characters or less
# for word in sentence...
word = ''
for w in sentence:
sum = 0
for c in w:
sum += 1
if sum <= 5:
wordsToReturn.append(w)
return wordsToReturn
if __name__ == '__main__':
provided = [
"Craftsman",
"Keep",
"Reveal",
"personal",
"it",
"harmful",
"engine",
"short",
"friendly",
"killer",
"honest",
"season",
"and",
"camera",
"strange",
"hiccup",
"horseshoe",
"sphere",
"charismatic",
"ceiling",
"sweet",
"formation",
"substitute"
"daughter",
"perfect"
]
words = findWords(provided)
# Prints the `words` list formatted like a sentence.
for i in range(len(words)):
if i != (len(words) - 1):
print(words[i], end=" ")
else:
print(words[i] + "!")
|
[
"noreply@github.com"
] |
noreply@github.com
|
9dafe0bbc395eab1c1bc9a764c84764d4c75733f
|
9e4724734b1de620030ba80f84e36d08172648f5
|
/test_main.py
|
81e8bc9568dd35cc6223d9e1415723b5eb0c5160
|
[] |
no_license
|
Steve-107/pyqt_GUI
|
c351f889bee2f1df17acfd91023c3af47b34848f
|
b03f453a9bfb9b9e1c35799799508a78dd6f3392
|
refs/heads/master
| 2023-05-21T13:57:52.997119
| 2021-06-11T16:45:43
| 2021-06-11T16:45:43
| 375,743,219
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,657
|
py
|
import sys
from MainFrame import Ui_Form
from PyQt5 import QtWidgets, QtCore
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
from PyQt5.QtGui import QMouseEvent, QCursor , QMovie
from sheetstyle import *
class MyMainFrame(QtWidgets.QWidget, Ui_Form):
max_flag = False
mic_status_flag = True
link_status_flag = False
def __init__(self):
super(MyMainFrame, self).__init__()
self.setupUi(self)
def mousePressEvent(self, event):
if event.button() == Qt.LeftButton:
self.m_flag = True
self.m_Position = event.globalPos() - self.pos() # 获取鼠标相对窗口的位置
event.accept()
self.setCursor(QCursor(Qt.OpenHandCursor)) # 更改鼠标图标
def mouseMoveEvent(self, QMouseEvent):
if Qt.LeftButton and self.m_flag:
self.move(QMouseEvent.globalPos() - self.m_Position) # 更改窗口位置
QMouseEvent.accept()
def mouseReleaseEvent(self, QMouseEvent):
self.m_flag = False
self.setCursor(QCursor(Qt.ArrowCursor))
def init_ui():
my_form.setWindowFlag(Qt.FramelessWindowHint)
my_form.setStyleSheet(form_background_style)
my_form.pushButton_min.setStyleSheet(min_btn_style)
my_form.pushButton_mid.setStyleSheet(mid_btn_style)
my_form.pushButton_close.setStyleSheet(close_btn_style)
my_form.pushButton_main.setStyleSheet(main_btn_style)
my_form.pushButton_light.setStyleSheet(light_btn_style)
my_form.pushButton_tempature.setStyleSheet(tempature_btn_style)
my_form.pushButton_main.setStyleSheet('QPushButton{'
'border-image: url(icons/Main_on.png)'
'}')
my_form.pushButton_mic.setStyleSheet('QPushButton{'
'border-image: url(icons/mic1.png)'
'}')
my_form.pushButton_link.setStyleSheet('QPushButton{'
'border-image: url(icons/link_close1.png)'
'}')
my_form.groupBox_fjdw.setStyleSheet(radio_btn_style)
my_form.groupBox_dbyd.setStyleSheet(radio_btn_style)
my_form.groupBox_jdms.setStyleSheet(radio_btn_style)
my_form.groupBox_left.setStyleSheet(radio_btn_style)
my_form.groupBox_right.setStyleSheet(radio_btn_style)
my_form.verticalSlider_sx_1.setStyleSheet(horizontal_slider_style)
my_form.horizontalSlider_red.setStyleSheet(horizontal_slider_style)
my_form.horizontalSlider_blue.setStyleSheet(horizontal_slider_style)
my_form.horizontalSlider_green.setStyleSheet(horizontal_slider_style)
my_form.horizontalSlider_cold_hot.setStyleSheet(horizontal_slider_style)
my_form.horizontalSlider_left_bottom.setStyleSheet(horizontal_slider_style)
my_form.horizontalSlider_right_bottom.setStyleSheet(horizontal_slider_style)
my_form.verticalSlider_sx_1.setStyleSheet(vertical_slider_style)
my_form.verticalSlider_sx_2.setStyleSheet(vertical_slider_style)
show_gif()
def show_full_window():
if my_form.max_flag:
my_form.max_flag = False
my_form.showNormal()
else:
my_form.max_flag = True
my_form.showMaximized()
def clear_btn_checked_status():
my_form.pushButton_main.setChecked(False)
my_form.pushButton_light.setChecked(False)
my_form.pushButton_tempature.setChecked(False)
def clicked_main_btn():
clear_btn_checked_status()
my_form.pushButton_main.setStyleSheet('QPushButton{'
'border-image: url(icons/Main_on.png)'
'}')
my_form.pushButton_light.setStyleSheet(light_btn_style)
my_form.pushButton_tempature.setStyleSheet(tempature_btn_style)
my_form.tabWidget.setCurrentIndex(0)
def clicked_light_btn():
clear_btn_checked_status()
my_form.pushButton_main.setStyleSheet(main_btn_style)
my_form.pushButton_light.setStyleSheet('QPushButton{'
'border-image: url(icons/Light_hover.png)'
'}')
my_form.pushButton_tempature.setStyleSheet(tempature_btn_style)
my_form.tabWidget.setCurrentIndex(1)
def clicked_tempature_btn():
clear_btn_checked_status()
my_form.pushButton_main.setStyleSheet(main_btn_style)
my_form.pushButton_light.setStyleSheet(light_btn_style)
my_form.pushButton_tempature.setStyleSheet('QPushButton{'
'border-image: url(icons/Tempature_hover.png)'
'}')
my_form.tabWidget.setCurrentIndex(2)
def clicked_mic_btn():
my_form.mic_status_flag = not my_form.mic_status_flag
if my_form.mic_status_flag:
my_form.pushButton_mic.setStyleSheet('QPushButton{'
'border-image: url(icons/mic1.png)'
'}')
else:
my_form.pushButton_mic.setStyleSheet('QPushButton{'
'border-image: url(icons/mic2.png)'
'}')
def clicked_link_btn():
my_form.link_status_flag = not my_form.link_status_flag
if my_form.link_status_flag:
my_form.pushButton_link.setStyleSheet('QPushButton{'
'border-image: url(icons/link_open1.png)'
'}')
else:
my_form.pushButton_link.setStyleSheet('QPushButton{'
'border-image: url(icons/link_close1.png)'
'}')
def show_gif():
gif = QMovie('test.gif')
my_form.label_gif.setMovie(gif)
gif.start()
def init_signal():
my_form.pushButton_min.clicked.connect(lambda: my_form.showMinimized())
my_form.pushButton_mid.clicked.connect(show_full_window)
my_form.pushButton_close.clicked.connect(lambda: my_form.close())
my_form.pushButton_main.clicked.connect(clicked_main_btn)
my_form.pushButton_light.clicked.connect(clicked_light_btn)
my_form.pushButton_tempature.clicked.connect(clicked_tempature_btn)
my_form.pushButton_mic.clicked.connect(clicked_mic_btn)
my_form.pushButton_link.clicked.connect(clicked_link_btn)
if __name__ == '__main__':
try:
app = QtWidgets.QApplication(sys.argv)
my_form = MyMainFrame()
init_ui()
init_signal()
my_form.show()
sys.exit(app.exec_())
except Exception as e:
print(e)
|
[
"yourwsb@163.com"
] |
yourwsb@163.com
|
d2462ea0d850cd6935ccb6c60eff3fbb00faf7d7
|
07917881310fc81d85a2cbdf27c9b3c4fa03c694
|
/python1812/python_1/17_测试_收发邮件_二维码/代码/04_验证码生成器.py
|
4d493eee3597ce7e1c156d58c53c29845e19966c
|
[] |
no_license
|
zaoyuaner/Learning-materials
|
9bc9a127d1c6478fb6cebbb6371b1fd85427c574
|
1f468a6f63158758f7cbfe7b5df17f51e3205f04
|
refs/heads/master
| 2020-05-18T11:38:45.771271
| 2019-05-20T09:07:44
| 2019-05-20T09:07:44
| 184,384,050
| 2
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,363
|
py
|
import datetime
import hashlib
from PIL import ImageFont,ImageDraw,Image
from random import randint
class VerifyCode:
def __init__(self,width=100,height=40,size=4):
"""
:param width: 验证码的宽度
:param height: 验证码的高度
:param size: 验证码的长度
"""
self.width = width if width > 0 else 100
self.height = height if height > 0 else 40
self.size = size if size > 0 else 4
self.pen = None # 画笔
self.code = "" # 保存验证码字符串
# @property
# def code(self):
# return self.__code
# @code.setter
# def code(self,code):
# self.__code = code
def generate(self):
# 1.生成画布 # 越靠近255的颜色越浅
im = Image.new("RGB",(self.width,self.height),self.randColor(160,255))
# 2.生成画笔
self.pen = ImageDraw.Draw(im)
# 3.生成随机字符串
self.randString()
# 4.画字符串
self.__drawCode()
# 5.画干扰点
self.__drawPoint()
# 6.画干扰线
self.__drawLine()
# 7.保存图片
im.save("vc.jpg")
def __drawLine(self):
"""
画干扰线
:return:
"""
for i in range(6):
start = (randint(1,self.width-1),randint(1,self.height-1))
end = (randint(1,self.width-1),randint(1,self.height-1))
self.pen.line([start,end],fill=self.randColor(50,150),width = 1)
def __drawPoint(self):
"""
画干扰点
:return:
"""
for i in range(200):
x = randint(1,self.width-1)
y = randint(1,self.height-1)
self.pen.point((x,y),fill= self.randColor(30,100))
def __drawCode(self):
"""
画字符串
:return:
"""
myFont = ImageFont.truetype("MSYH.TTF",size=20,encoding="UTF-8")
for i in range(self.size):
x = 15 + i*(self.width - 20)/self.size # 为每个字符均匀分配位置
y = randint(5,10) # 随机高度
self.pen.text((x,y),self.code[i],fill = self.randColor(0,60),font = myFont)
def randString(self):
"""
产生随机整数字符串
:return:
"""
result = ""
for i in range(self.size):
result += str(randint(0,9))
self.code = result
def randColor(self,low,high): # 随机背景颜色
return randint(low,high),randint(low,high),randint(low,high)
# class StrCode(VerifyCode):
# def randString(self):
# s1 =hashlib.md5(b"2314").hexdigest()
# print(s1)
# self.code = s1[:self.size]
if __name__ == "__main__":
vc = VerifyCode()
# vc = StrCode()
vc.generate()
print(vc.code)
|
[
"13510647877@163.com"
] |
13510647877@163.com
|
6efe0840dad330e8780acea33e1d0f9f97bbd1cf
|
503b38aaec3c632b78285f7ca016f13a2c14fcd4
|
/products/forms.py
|
301a034e15ac6296e8317166313e6f1221a93d84
|
[] |
no_license
|
Glitch-dev1/Test
|
f876ddaafccf2f02748001f8a53e43f031649f23
|
a46d375a40cae70b4d10c36bbf9fe16b8a118901
|
refs/heads/master
| 2023-07-02T19:05:47.461216
| 2021-08-08T16:17:07
| 2021-08-08T16:17:07
| 392,296,916
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 402
|
py
|
from django import forms
from .models import Product
class ProductForm(forms.ModelForm):
description = forms.CharField(widget=forms.Textarea(attrs = {
"rows": "10",
"cols": "35",
}
)
)
class Meta:
model = Product
fields = [
'name',
'price',
'description',
'image',
]
|
[
"83586216+Glitch-dev1@users.noreply.github.com"
] |
83586216+Glitch-dev1@users.noreply.github.com
|
d236f4d719fe15f5144cc8972bf498f6257cdaaa
|
55d8abdf2008630172e27d22bedd1a9775bc4d36
|
/code/algospot_boggle.py
|
777d62689aaac95b9b3f090c11a3a19ce839e1c6
|
[] |
no_license
|
keeka2/algorithm_study
|
b184fb5fea84ea6c9ca65ee6dde99500b377894b
|
df1183cf6f8cd240e894ae2d968c94804b612abe
|
refs/heads/main
| 2023-04-01T15:12:58.316841
| 2021-04-04T13:58:08
| 2021-04-04T13:58:08
| 344,655,968
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,477
|
py
|
# https://algospot.com/judge/problem/read/FENCE
import sys
def solution(board, word_dict, find_word):
dx = [0, 1, 0, -1, 1, 1, -1, -1]
dy = [1, 0, -1, 0, 1, -1, 1, -1]
start_word = find_word[0]
if start_word in word_dict:
stack = word_dict[start_word][:]
idx_stack = [1 for _ in range(len(stack))]
else:
return "NO"
while stack:
cur_x, cur_y = stack.pop()
cur_idx = idx_stack.pop()
if cur_idx == len(find_word):
return "YES"
word = find_word[cur_idx]
if word not in word_dict:
return "NO"
for i in range(8):
nxt_x, nxt_y = cur_x + dx[i], cur_y + dy[i]
if 0 <= nxt_x < 5 and 0 <= nxt_y < 5 and word == board[nxt_x][nxt_y]:
stack.append([nxt_x, nxt_y])
idx_stack.append(cur_idx + 1)
if not stack:
return "NO"
return "YES"
c = int(sys.stdin.readline())
for _ in range(c):
board = []
word_dict = {}
for x in range(5):
row = list(sys.stdin.readline().strip())
board.append(row)
for y in range(5):
w = row[y]
if w in word_dict:
word_dict[w].append([x, y])
else:
word_dict[w] = [[x, y]]
for _ in range(int(sys.stdin.readline().strip())):
find_word = sys.stdin.readline().strip()
pos = solution(board, word_dict, find_word)
print(find_word, pos)
|
[
"noreply@github.com"
] |
noreply@github.com
|
dbe98b3d3b69f2864a7216c5b3197f0d31cb863a
|
52cb5e2190c7e7c1c3317653c76f5e110358503b
|
/main/models.py
|
caa208ba7ed95183debe7a430eda55b293fcae19
|
[] |
no_license
|
nixiaohui/webT
|
228ea5199481f1becc7fb79f4a78e8c1748003f5
|
eff7c7d4db8cdd1b5038f47a66080f9387a113e2
|
refs/heads/master
| 2023-02-05T15:18:04.460832
| 2019-06-27T04:36:04
| 2019-06-27T04:36:04
| 194,019,916
| 0
| 0
| null | 2023-02-02T06:32:28
| 2019-06-27T03:59:23
|
Python
|
UTF-8
|
Python
| false
| false
| 4,333
|
py
|
from datetime import datetime
from app import db
class Article(db.Model):
__tablename__ = 'articles'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
title = db.Column(db.String(100), nullable=False)
content = db.Column(db.Text, nullable=False)
def __repr__(self):
return '<Article> %r' % self.title
game_users = db.Table(
'game_users',
db.Column('game_id', db.ForeignKey('games.id'), primary_key=True),
db.Column('user_id', db.ForeignKey('users.id'), primary_key=True)
)
# room_users = db.Table(
# 'room_users',
# db.Column('room_id', db.ForeignKey('rooms.id'), primary_key=True),
# db.Column('user_id', db.ForeignKey('users.id'), primary_key=True)
# )
class User(db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
name = db.Column(db.String(20), nullable=False, index=True, unique=True)
email = db.Column(db.String(40), nullable=True, unique=True)
password = db.Column(db.String(256), nullable=False)
submission_data = db.Column(db.DateTime, default=datetime.now())
update_data = db.Column(db.DateTime, default=datetime.now())
games = db.relationship(
'Game',
secondary=game_users,
back_populates='users'
)
# rooms = db.relationship(
# 'Room',
# secondary=room_users,
# back_populates='users'
# )
def __repr__(self):
return '<User> %r' % self.name
class Card(db.Model):
__tablename__ = 'cards'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
suit = db.Column(db.String(10), nullable=False)
value = db.Column(db.Integer, nullable=False)
game_id = db.Column(db.Integer, db.ForeignKey('games.id'), nullable=False)
game = db.relationship('Game', backref=db.backref('cards', lazy=True))
user_id = db.Column(db.Integer, db.ForeignKey('users.id'), nullable=False)
user = db.relationship('User', backref=db.backref('cards', lazy=True))
def __repr__(self):
return '<Card> %r%r' % (self.suit, self.value)
class Room(db.Model):
__tablename__ = 'rooms'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
title = db.Column(db.String(20), nullable=False)
capacity = db.Column(db.Integer, nullable=False, default=9)
small_blind = db.Column(db.Integer, nullable=False, default=2)
buy_in = db.Column(db.Integer, nullable=False, default=400)
is_close = db.Column(db.Boolean, default=False)
password = db.Column(db.String(6))
admin_id = db.Column(db.Integer, db.ForeignKey('users.id'), nullable=False)
admin = db.relationship('User', backref=db.backref('rooms', lazy=True))
# users = db.relationship(
# 'User',
# secondary=room_users,
# back_populates='rooms'
# )
def __repr__(self):
return '<Room> %r' % self.title
class Seat(db.Model):
__tablename__ = 'seats'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
user_id = db.Column(db.Integer, db.ForeignKey('users.id'), nullable=False)
user = db.relationship('User', backref=db.backref('users', lazy=True))
room_id = db.Column(db.Integer, db.ForeignKey('rooms.id'), nullable=False)
room = db.relationship('Room', backref=db.backref('rooms', lazy=True))
seat = db.Column(db.Integer, nullable=False, default=99)
class Game(db.Model):
__tablename__ = 'games'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
round = db.Column(db.Integer, nullable=False, default=99)
room_id = db.Column(db.Integer, db.ForeignKey('rooms.id'), nullable=False)
room = db.relationship('Room', backref=db.backref('games', lazy=True))
users = db.relationship(
'User',
secondary=game_users,
back_populates='games'
)
def __repr__(self):
return '<Game> %r' % self.id
class Pool(db.Model):
__tablename__ = 'pools'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
chips = db.Column(db.Integer, nullable=False)
game_id = db.Column(db.Integer, db.ForeignKey('games.id'), nullable=False)
game = db.relationship('Game', backref=db.backref('pools', lazy=True))
def __repr__(self):
return '<Pool> %r' % self.id
if __name__ == '__main__':
db.create_all()
|
[
"6089870@qq.com"
] |
6089870@qq.com
|
6c967bd79071b400c2a1d94b4ebb7d4f1bb9b7a6
|
b92c9c1c5c7e8cf1d63f20a88515af0a4417e55a
|
/data_structures/linked-list/linked_list.py
|
f4decfa6bc3581a340c41034aae22f1765d78d1d
|
[
"MIT"
] |
permissive
|
glasscharlie/data-structures-and-algorithms
|
8506cd23ec1e1f257ae8066247d0f695b72bc384
|
4546a0606334c6e3156b567d8cc82d39fb183c58
|
refs/heads/master
| 2021-06-24T22:50:46.707322
| 2020-02-14T12:33:07
| 2020-02-14T12:33:07
| 225,463,373
| 0
| 0
|
MIT
| 2021-04-20T21:14:37
| 2019-12-02T20:33:21
|
Python
|
UTF-8
|
Python
| false
| false
| 3,603
|
py
|
class Node:
def __init__(self, value=None):
self.value = value
self.next = None
class LinkedList:
def __init__(self):
self.head = None
def insert(self, value):
new_node = Node(value)
new_node.next = self.head
self.head = new_node
return value
def includes(self, value):
if not self.head:
return False
cur = self.head
while cur:
if cur.value == value:
return True
cur = cur.next
return False
def to_string(self):
value = " "
cur = self.head
while cur.next != None:
value += " " + str(cur.value)
cur = cur.next
value += " " + str(cur.value)
print(value)
return value
def insert_before(self, existing_value, value):
current = self.head
if current.value == existing_value:
new_node = Node(value)
new_node.next = current
self.head = new_node
return True
while current.next:
if current.next.value == existing_value:
new_node = Node(value)
new_node.next = current.next
current.next = new_node
return True
current = current.next
def insert_after(self, existing_value, value):
current = self.head
while current:
if current.value == existing_value:
new_node = Node(value)
new_node.next = current.next
current.next = new_node
return True
current = current.next
def append(self, value):
if not self.head:
self.insert(value)
return
else:
current = self.head
while current.next:
current = current.next
current.next = Node(value)
def kth_from_end(self, k):
value_list = []
if self.head:
current = self.head
while current.next:
value_list.append(current.value)
current = current.next
value_list.append(current.value)
if 0 <= k < len(value_list):
return value_list[-(k+1)]
else:
return 'K is out of range'
q.head = q_curr
small_list = LinkedList()
small_list.insert(1)
small_list.insert(2)
small_list.insert(3)
# small_list.insert_after(2, 4)
# small_list.insert_before(2, 5)
# small_list.insert_before(9, 7)
# small_list.insert_after(9,7)
small_list.to_string()
big_list = LinkedList()
big_list.insert(4)
big_list.insert(5)
big_list.insert(6)
big_list.insert(6)
big_list.insert(7)
big_list.to_string()
def merge_list(a_list, b_list):
a_list_curr = a_list.head
b_list_curr = b_list.head
while a_list_curr.next != None and b_list_curr != None:
#save the next address
a_list_next = a_list_curr.next
b_list_next = b_list_curr.next
# make b_list_current next of a_list_curr
b_list_curr.next = a_list_next
a_list_curr.next = b_list_curr
# update current for next iteration
a_list_curr = a_list_next
b_list_curr = b_list_next
#if a list doesnt have a next and b has a current
if b_list_curr:
a_list_curr.next = b_list_curr
return a_list.head
# first list is longer
merge_list(big_list, small_list)
big_list.to_string()
#2nd list is longer
# merge_list(small_list, big_list)
# small_list.to_string()
|
[
"charlie-glass@live.com"
] |
charlie-glass@live.com
|
ca0bf818f5d797fe169d26f5876caf9e6873172e
|
197b10d75ba44b22fca29f8d69c2922b72cb8ca5
|
/ship/api.py
|
ae8690dc1a4da94d2c96e6f66ac78b8994a82a42
|
[] |
no_license
|
webbyfox/py_master
|
c713c87cf4fd7d2697765211cdaefd7b49f96adc
|
e4b3ef5ea618b8f91c363d7f51d0e7b7064762a9
|
refs/heads/master
| 2021-01-11T14:45:15.060075
| 2017-01-27T13:24:58
| 2017-01-27T13:24:58
| 80,209,118
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,519
|
py
|
# -*- coding: utf-8 -*-
from rest_framework import viewsets, mixins, status
from rest_framework.pagination import LimitOffsetPagination
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from assessment.auth import TokenAuthSupportQueryString
from .injection_setup import logic
from .serializers import ShipSerializer
class ShipViewSet(
mixins.CreateModelMixin,
mixins.ListModelMixin,
mixins.RetrieveModelMixin,
mixins.DestroyModelMixin,
viewsets.GenericViewSet
):
authentication_classes = (TokenAuthSupportQueryString,)
permission_classes = (IsAuthenticated,)
pagination_class = LimitOffsetPagination
serializer_class = ShipSerializer
default_limit = 20
def list(self, request): # pylint: disable=unused-argument
ships = self.get_queryset()
page = self.paginate_queryset(ships)
return self.get_paginated_response(page)
def get_queryset(self):
user = self.request.user
user_ids = [user.id] + self.request.query_params.getlist('user_id')
query_kwargs = {
'user_ids': user_ids,
'id': self.request.query_params.get('id'),
'ids': self.request.query_params.getlist('ids'),
'status': self.request.query_params.get('status'),
'order_by': self.request.query_params.get('order_by'),
}
ships, __ = logic.get_ships(**query_kwargs)
return ships
def create(self, request):
data = self.request.data.copy()
# We want to override the user ID to be the authenticated user.
data['user_id'] = self.request.user.id
serializer = self.serializer_class(data=data)
if serializer.is_valid():
serializer.save()
return Response(status=status.HTTP_204_NO_CONTENT)
else:
return Response(
serializer.errors,
status=status.HTTP_400_BAD_REQUEST
)
def retrieve(self, request, pk=None):
ships, __ = logic.get_ships(
id=pk,
user_ids=[request.user.id],
)
return Response(self.serializer_class(ships[0]).data)
def update(self, request, pk=None):
raise NotImplementedError(
'Please implement ``ship.api:ShipViewSet.update``'
)
def destroy(self, request, pk=None): # pylint: disable=unused-argument
logic.delete_ship(id=pk)
return Response(status=status.HTTP_204_NO_CONTENT)
|
[
"mr.mansuri@gmail.com"
] |
mr.mansuri@gmail.com
|
82ecfd01834d11e1c0de1b980af3a9cafb7d5d79
|
d0fec74acfbfdee1b662736731c1cc988e2ba2ee
|
/problem_44/p044.py
|
45a4578f96261bb8aeac04304edbc1ab5ebc2014
|
[] |
no_license
|
msztylko/project-Euler
|
fdd0cfefbe88b63f6dbd2d08f1cd59270b9e1735
|
b3f5ce828ccc6662c100dd27fa295fc8afa22f6e
|
refs/heads/master
| 2021-11-23T02:50:19.333259
| 2021-10-31T17:52:28
| 2021-10-31T17:52:28
| 195,980,596
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,851
|
py
|
import itertools, sys
if sys.version_info.major == 2:
range = xrange
def compute():
pentanum = PentagonalNumberHelper()
min_d = None # None means not found yet, positive number means found a candidate
# For each upper pentagonal number index, going upward
for i in itertools.count(2):
pent_i = pentanum.term(i)
# If the next number down is at least as big as a found difference, then conclude searching
if min_d is not None and pent_i - pentanum.term(i - 1) >= min_d:
break
# For each lower pentagonal number index, going downward
for j in range(i - 1, 0, -1):
pent_j = pentanum.term(j)
diff = pent_i - pent_j
# If the difference is at least as big as a found difference, then stop testing lower pentagonal numbers
if min_d is not None and diff >= min_d:
break
elif pentanum.is_term(pent_i + pent_j) and pentanum.is_term(diff):
min_d = diff # Found a smaller difference
return str(min_d)
# Provides memoization for generating and testing pentagonal numbers.
class PentagonalNumberHelper(object):
def __init__(self):
self.term_list = [0]
self.term_set = set()
def term(self, x):
assert x > 0
while len(self.term_list) <= x:
n = len(self.term_list)
term = (n * (n * 3 - 1)) >> 1
self.term_list.append(term)
self.term_set.add(term)
return self.term_list[x]
def is_term(self, y):
assert y > 0
while self.term_list[-1] < y:
n = len(self.term_list)
term = (n * (n * 3 - 1)) >> 1
self.term_list.append(term)
self.term_set.add(term)
return y in self.term_set
if __name__ == "__main__":
print(compute())
|
[
"marcin.sztylko@gmail.com"
] |
marcin.sztylko@gmail.com
|
d33d0228e5fc83abc26914e524837108513efb50
|
5611e60f5a9fb2b37db7d305f7c126e645bbd78c
|
/defOS/defOS/urls.py
|
7a4b95f3fb5eb89bc17ea891e7c841ca5e059881
|
[] |
no_license
|
defossystem/DefOS
|
d58ea7f4b0f2d65ea7338f232e292f22f3d31274
|
51929ccb5dc34a1bd6d5e0b1114728ab94c03b8f
|
refs/heads/master
| 2021-01-19T12:37:22.529121
| 2017-02-17T18:50:58
| 2017-02-17T18:50:58
| 82,327,737
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 822
|
py
|
"""defOS URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import include, url
from django.contrib import admin
from defOS.core import urls as core_urls
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'', include(core_urls)),
]
|
[
"matheus.pessoa16@gmail.com"
] |
matheus.pessoa16@gmail.com
|
f14500003a9a3d376ce6ea46eaa9209c444e174e
|
71303c14f31fbc8e674b53b6eb734838cfb392e0
|
/Loop.py
|
bcbb6e1b77afd139ccdf31a6e41f20e642766149
|
[
"MIT"
] |
permissive
|
luiz-fischer/Python
|
b776285d155ace065b99bbbd97e5128619d83d4a
|
03e85c3456bdee675a246a9d78dde17e351ce2f4
|
refs/heads/master
| 2023-01-02T12:21:49.636616
| 2020-11-03T00:47:06
| 2020-11-03T00:47:06
| 309,091,841
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,302
|
py
|
#WHILE
i = 1
while i < 6:
print(i)
i += 1
#The Break Statement
i = 1
while i < 6:
print(i)
if i == 3:
break
i += 1
#The Continue Statement
i = 0
while i < 6:
i += 1
if i == 3:
continue
print(i)
#The Else Statement
i = 1
while i < 6:
print(i)
i += 1
else:
print("i is no longer less than 6")
#FOR
fruits = ["apple", "banana", "cherry"]
for x in fruits:
print(x)
#Looping Through a String
for x in "banana":
print(x)
#The Break Statement
fruits = ["Apple", "banana", "cherry"]
for x in fruits:
print(x)
if x == "banana":
break
#Exit the loop when x is "banana", but this time the break comes before the print
fruits = ["apple", "banana", "cherry"]
for x in fruits:
if x == "banana":
break
print(x)
#The Continue Statement
fruits = ["apple", "banana", "cherry"]
for x in fruits:
if x == "banana":
continue
print(x)
#The Range Function
for x in range(6):
print(x)
for x in range(2, 6):
print(x)
for x in range(2, 30, 3):
print(x)
#Else in For Loop
for x in range(6):
print(x)
else:
print("Finally Finished")
#Nested Loops
adj = ["Red", "Big", "Tasty"]
fruits = ["Apple", "Banana", "Cherry"]
for x in adj:
for y in fruits:
print(x, y)
#The Pass Statement
for x in [0, 1, 2]:
pass
|
[
"luiz.fischer@alunos.sc.senac.br"
] |
luiz.fischer@alunos.sc.senac.br
|
b3727709256989d10a71597d181a6473bd4cf24f
|
2a227fe131600485707a53791ae2e056063196b8
|
/sshttproxy.py
|
bb0b54da36a6960a5be6e78998160a246ae30964
|
[] |
no_license
|
BharatAntil/sshttproxy
|
f14d0f4f439119ea6c4ea7f26904bc7279e4ef3e
|
4d13996e49520f8938a26025e3ee60294bb754c1
|
refs/heads/master
| 2022-12-20T19:44:03.429456
| 2011-03-08T12:47:35
| 2011-03-08T12:47:35
| 300,006,260
| 0
| 0
| null | 2020-09-30T17:54:32
| 2020-09-30T17:54:31
| null |
UTF-8
|
Python
| false
| false
| 3,285
|
py
|
#!/usr/bin/python
import argparse
import re
import traceback
from collections import defaultdict
import eventlet
from eventlet.green import select
paramiko = eventlet.import_patched('paramiko')
ssh_client_locks = defaultdict(eventlet.semaphore.BoundedSemaphore)
ssh_clients = {}
def get_ssh_client(hostname):
"""Create SSHClient for hostname or return pre-existing client."""
with ssh_client_locks[hostname]:
if hostname not in ssh_clients:
client = paramiko.SSHClient()
client.load_system_host_keys()
client.connect(hostname)
ssh_clients[hostname] = client
return ssh_clients[hostname]
def create_tunnel(local_conn, remote_host, remote_port):
"""Create tunnel for forwarding."""
transport = get_ssh_client(remote_host).get_transport()
channel = transport.open_channel('direct-tcpip',
('localhost', remote_port),
local_conn.getpeername())
if not channel:
raise Exception('Remote host %s refused connection on %s'
% (remote_host, remote_port))
return channel
def forward(conn_a, conn_b):
"""Forward data both ways between connections until one closes."""
conns = conn_a, conn_b
while True:
# Get connections that are ready to read from
for conn in select.select(conns, [], [])[0]:
data = conn.recv(32768)
if len(data) == 0:
return
# Write data to the other connection
conns[1-conns.index(conn)].sendall(data)
def http_error(text):
"""Create HTTP error response."""
return (
'HTTP/1.0 500 Internal Server Error\r\n'
'Content-Length: %d\r\n'
'Content-Type: text/plain\r\n'
'\r\n%s' %
(len(text), text))
def extract_remote_host_port(http_data):
"""
Extract tunnel requirements from HTTP Host header.
Rightmost all-numeric subdomain is treated as the remote port,
everything to the left is treated as the remote host e.g:
remote-host.example.com.8080.forward.localhost
connects to remote-host.example.com on port 8080.
"""
host_header = re.search(r'^Host:\s+(\S+)(\s|$)', http_data,
re.I | re.M).group(1)
match = re.search(r'^(?P<host>.+)\.(?P<port>\d+)\.', host_header)
return match.group('host'), int(match.group('port'))
def connect_to_remote_host(client):
"""Extract remote host details, create tunnel and forward traffic."""
# Grab the first chunk of client data
data = client.recv(1024)
try:
remote_host, remote_port = extract_remote_host_port(data)
server = create_tunnel(client, remote_host, remote_port)
except Exception, e:
client.sendall(http_error('Connection failure:\n%s'
% traceback.format_exc()))
client.close()
return
# Send initial chunk to server
server.sendall(data)
# Forward data both ways until one connection closes
forward(client, server)
client.close()
server.close()
def listen(address):
"""Listen for incoming connections and forward to remote hosts"""
listener = eventlet.listen(address)
while True:
client = listener.accept()[0]
eventlet.spawn_n(connect_to_remote_host, client)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--port', '-p', type=int, default=7150,
help='port to listen on')
parser.add_argument('--host', default='localhost',
help='host to listen on')
args = parser.parse_args()
listen((args.host, args.port))
|
[
"dave@orman.(none)"
] |
dave@orman.(none)
|
16cdd7293c435d61297667e7acc054a367ccb5c2
|
9fff62d7d6f23bb42a5b489a0ee25ecb6c63c07e
|
/runExp.py
|
59ffa3140c8d81c0cfa7c011d42567368bf10145
|
[] |
no_license
|
fancyqlx/graphJ
|
d488647a2759f72faf7151c5c2930c1eab56fc28
|
979fe309c52e9ab0282d54395ae019692958838c
|
refs/heads/master
| 2020-03-10T07:18:42.418827
| 2018-06-09T09:09:23
| 2018-06-09T09:09:23
| 129,260,299
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,157
|
py
|
import os
def generateGraph():
os.system("rm -r graphData/*")
os.system("python GenerateGraph.py")
def generateRandomGraph():
os.system("rm -r graphData/random*")
os.system("python GenerateGraph.py")
def compileJavaSource():
os.system("javac -d target/classes/ -cp target/classes/ -s src/main/java/fancyqlx/ src/main/java/fancyqlx/*.java")
def runGirth():
os.system("java -cp target/classes fancyqlx/Girth")
def runTrivalBFS():
os.system("java -cp target/classes fancyqlx/TrivalBFS")
def runBellmanFord():
os.system("java -cp target/classes fancyqlx/BellmanFord")
def runBetweenness():
os.system("java -cp target/classes fancyqlx/Betweenness")
def runStatistic():
os.system("python Statistic.py")
def run():
for i in xrange(0,100):
print "run %d-th experiment" % i
generateGraph()
runGirth()
runTrivalBFS()
runBellmanFord()
runStatistic()
def runBC():
for i in xrange(0,10):
print "run %d-th experiment" % i
generateRandomGraph()
runBetweenness()
runStatistic()
if __name__ == "__main__":
compileJavaSource()
run()
|
[
"fancyqlx@163.com"
] |
fancyqlx@163.com
|
edfcd0b67010b318be752683aea47602efef2e0e
|
9b57429efa72dbfa2ead9ae8d98a148475264aef
|
/dataservice/zmq/UPcomputer_part/data_process_0mqsubsys/codetestfile.py
|
5b92ab166e2e97779e29006953d6456126db19c8
|
[] |
no_license
|
Scottars/nis_website
|
7d78b1ab8647ebf17bc2b020660a56ac6f6a039f
|
2025e428dd65dba06c95738233978604ee011570
|
refs/heads/master
| 2022-03-07T19:04:15.565128
| 2021-01-19T16:03:50
| 2021-01-19T16:03:50
| 218,421,853
| 0
| 0
| null | 2022-03-02T06:49:57
| 2019-10-30T01:58:29
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 155
|
py
|
import struct
b = b'exp' + struct.pack('!f', 12)
print(b)
print(b[0:3])
if b[0:3] == b'exp':
exp_id = struct.unpack('!f', b[3:7])[0]
print(exp_id)
|
[
"ScottarSEEE@hust.edu.cn"
] |
ScottarSEEE@hust.edu.cn
|
81678e4f401442962478ab90127c24b61b21e897
|
c074ce302e0a2a09ebe8b0a94e342380afbaa911
|
/beakjoon_PS/no2579_2.py
|
7c00e40144c179d3cbf2eca5fbd8ec8eb8d546f6
|
[] |
no_license
|
elrion018/CS_study
|
eeea7a48e9e9b116ddf561ebf10633670d305722
|
3d5478620c4d23343ae0518d27920b3211f686fd
|
refs/heads/master
| 2021-06-10T13:35:20.258335
| 2021-04-25T10:12:17
| 2021-04-25T10:12:17
| 169,424,097
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 555
|
py
|
import sys
N = int(sys.stdin.readline())
stair = []
for _ in range(N):
stair.append(int(sys.stdin.readline()))
dp = [[0, 0] for _ in range(N)]
if N > 2:
dp[0][0] = stair[0]
dp[1][0] = stair[1]
dp[1][1] = stair[0] + stair[1]
dp[2][0] = stair[0] + stair[2]
dp[2][1] = stair[1] + stair[2]
for i in range(2, N):
dp[i][0] = max(dp[i-2][0], dp[i-2][1]) + stair[i]
dp[i][1] = dp[i-1][0] + stair[i]
print(max(dp[N-1][0], dp[N-1][1]))
elif N == 2:
print(stair[0]+stair[1])
elif N == 1:
print(stair[0])
|
[
"elrion018@gmail.com"
] |
elrion018@gmail.com
|
2615d1f8724a3ba4639ef2ce0ddd8f607108b8bc
|
6adbc70bd71c26e1c837509d8d09fffb6f23e418
|
/sieve_of_e.py
|
3e596ebb8f1dafbf3f272186fe5356a11e7c43ad
|
[] |
no_license
|
random-weights/prime_numbers
|
a8892e0aab8fa52ddc46636732fc85d8ad600562
|
dcdccf69d17f33c1b8c6fcfcf66b0f48cbddadd8
|
refs/heads/master
| 2020-06-17T20:47:47.551462
| 2019-07-22T21:36:26
| 2019-07-22T21:36:26
| 196,049,221
| 0
| 0
| null | 2019-07-10T16:08:23
| 2019-07-09T16:54:19
|
Python
|
UTF-8
|
Python
| false
| false
| 2,563
|
py
|
"""
sieve of e....list out all numbers from 2 to n.
cross off numbers by making multiple passes over the array.
dont store numbers, instead store only the boolean isPrime
which by default is set to True for all numbers.
since the array starts with integer 2, number at any index is i+2 where
i is the index
"""
import sys,time
import numpy as np
class Primes:
def __init__(self,n):
"""
initializes a numpy boolean array of size n
:param n: integer upto which we want to find prime numbers
"""
#to make sure our array isn't too big for memory allocation
if n <= sys.maxsize:
try:
# we used numpy array instead of python list because
# python stores boolean as integer rather than single bit
self.ls_primes = np.ones((n),dtype = bool)
print("Array of booleans created")
except Exception as e:
print(e)
self.n = n
self.curr_prime = 2
def epoch(self,multiplier):
"""
always call getNextPrime() after epoch()
:param multiplier: is always the curr_prime
:return: None
"""
multiple = multiplier*2
while multiple <= self.n:
self.ls_primes[multiple - 2] = False
multiple += multiplier
#print("All multiples of {0} crossed off".format(multiplier))
def getNextPrime(self):
"""
will check for next true value in ls_primes after curr_prime
updates instance variable curr_prime after each epoch.
:return: None
"""
index = self.curr_prime - 1
while True:
if self.ls_primes[index]:
break
else:
index += 1
self.curr_prime = index + 2
return self.curr_prime
def getMaxPrime(self):
"""
after all epochs, find largest prime by looking
for True value from end of numpy array working backwards.
:return: largest prime found below self.n
"""
last_index = self.n - 2
while True:
if self.ls_primes[last_index]:
return last_index + 2
else:
last_index += -1
if __name__ == "__main__":
n = int(input("Enter the upper bound: "))
start = time.time()
p = Primes(n)
while p.curr_prime <= n:
p.epoch(p.curr_prime)
p.getNextPrime()
print(p.getMaxPrime())
end = time.time()
print("Total time taken: {0:.4g}".format(end-start))
|
[
"ram.thubati@gmail.com"
] |
ram.thubati@gmail.com
|
71fe34c375dc7c5a2bbdddab6e00a7ceee37f963
|
bb6bacdb86362989fa0a72cfcf8120c8de9828b8
|
/clustering/gaussian_clustering.py
|
75d692ee804289916aed3a31370b29c237879071
|
[] |
no_license
|
platoneko/unique-ai-freshman-mission
|
c6624fde7135f0b834f0483f330024c849f8d330
|
661a0cc2a682429cce42c9f7a180ac4459109c53
|
refs/heads/master
| 2020-03-31T02:07:02.041486
| 2019-04-21T07:57:19
| 2019-04-21T07:57:19
| 151,807,120
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,803
|
py
|
# -*- coding: utf-8 -*-
import numpy as np
import random
from math import *
def get_gaussian_prob(x, mean, sigma, n):
return exp(-0.5 * np.mat(x - mean) * sigma.I * np.mat(x - mean).T) / ((2 * pi)**(n/2) * np.linalg.det(sigma)**0.5)
def gaussian_clusters(x, k):
m, n = x.shape
mean_array = x[random.sample(list(range(m)), k)]
alpha_array = np.ones(k) / k
sigma_mats = [np.mat(np.eye(n)) / 10 for i in range(k)]
gamma = np.zeros([m, k])
for j in range(m):
for i in range(k):
gamma[j, i] = alpha_array[i] * get_gaussian_prob(x[j], mean_array[i], sigma_mats[i], n)
gamma[j] /= np.sum(gamma[j])
while True:
cur_gamma = gamma.copy()
for i in range(k):
gamma_i = np.sum(gamma[:, i])
mean_array[i] = np.sum(gamma[:, i].reshape([m, 1]) * x, 0) / gamma_i
sigma_mats[i] = np.mat(np.zeros([n, n]))
for j in range(m):
sigma_mats[i] += gamma[j, i] * np.mat(x[j] - mean_array[i]).T * np.mat(x[j] - mean_array[i])
sigma_mats[i] /= gamma_i
alpha_array[i] = gamma_i / m
for j in range(m):
for i in range(k):
gamma[j, i] = alpha_array[i] * get_gaussian_prob(x[j], mean_array[i], sigma_mats[i], n)
gamma[j] /= np.sum(gamma[j])
if np.sum((gamma - cur_gamma)**2) < 1e-5:
break
result = np.zeros(m)
for j in range(m):
result[j] = np.argmax(gamma[j])
return alpha_array, mean_array, sigma_mats, result
def load_watermelons():
x = np.array([[0.697, 0.460],
[0.774, 0.376],
[0.634, 0.264],
[0.608, 0.318],
[0.556, 0.215],
[0.403, 0.237],
[0.481, 0.149],
[0.437, 0.211],
[0.666, 0.091],
[0.243, 0.267],
[0.245, 0.057],
[0.343, 0.099],
[0.639, 0.161],
[0.657, 0.198],
[0.360, 0.370],
[0.593, 0.042],
[0.719, 0.103],
[0.359, 0.188],
[0.339, 0.241],
[0.282, 0.257],
[0.748, 0.232],
[0.714, 0.346],
[0.483, 0.312],
[0.478, 0.437],
[0.525, 0.369],
[0.751, 0.489],
[0.532, 0.472],
[0.473, 0.376],
[0.725, 0.445],
[0.446, 0.459]])
y = np.array([1]*8 + [0]*13 + [1]*9)
return x, y
if __name__ == '__main__':
x, y = load_watermelons()
alpha_array, mean_array, sigma_mats, result = gaussian_clusters(x, k=3)
print(result)
|
[
"platonekosama@gmail.com"
] |
platonekosama@gmail.com
|
e7c22075cb792d7fe56f635557a0684186338535
|
40face04db4d2ce5fe785262d67ff6fa3c1a455c
|
/canteen_frontend_2.py
|
0bb86a61bc1a5b5df9904282d33f7679041a7f71
|
[
"MIT"
] |
permissive
|
ishaanalmeida/College-Canteen-Management-System
|
1ae505cb9ff8d83733ceb7417325b3560c9c6aa5
|
149ca593b893c01635491d381fd067a76c887516
|
refs/heads/main
| 2023-03-18T07:55:10.597356
| 2021-03-04T20:18:26
| 2021-03-04T20:18:26
| 344,597,772
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,643
|
py
|
#frontend
from tkinter import *
import tkinter.messagebox
from tkinter import ttk
import canteen_backend_2
color1="SkyBlue1"
color2="ghost white"
options=["NONE",
"CHINESE DOSA","RAWA MASALA DOSA","CHOPSUEY DOSA","SADA DOSA",
"VEG FRANKIE","PANEER FRANKIE","CHEESE FRANKIE","SCHEZWAN FRANKIE",
"CHEESE SANDWICH","CHEESE CHILLI TOAST","VEG CLUB SANDWICH","GRILLED SANDWICH",
"BHEL PURI","SEV PURI","DAHI PAPDI CHAAT","SAMOSA BHEL","RAGDA PURI",
"PAV BHAJI","TAWA PULAO","MATAR PULAO","PANEER TIKKA BIRYANI",
"PANEER TIKKA MASALA","PANEER SHAHI KORMA","PANEER TADKA",
"PANEER GARLIC","VEG CRISPY","MANCHURIAN DRY","BUTTER NAAN","GARLIC NAAN","ROTI",
"METHI PARATHA","PANEER PARATHA","ALOO PARATHA",
"FRUIT SALAD","KHICHIYAMASALA PAPAD","BANANA MILKSHAKE","CHIKOO MILKSHAKE",
"OREO SHAKE","LASSI","BUTTER MILK",
"ORANGE JUICE","FRESH LIME JUICE","PINEAPPLE JUICE","TEA","COFFEE"]
itemlist=dict.fromkeys(options,0)
#print(itemlist)
class Student:
def __init__(self,root):
self.root=root
self.root.title("Canteen ")
self.root.geometry("1350x750+0+0")
self.root.config(bg=color1)
SAPID=StringVar()
username=StringVar()
stream=StringVar()
course=StringVar()
year=StringVar()
password=StringVar()
mobile=StringVar()
address=StringVar()
#functions
def iExit():
iExit=tkinter.messagebox.askyesno("Canteen","Confrim if you want to exit")
if iExit>0:
root.destroy()
return
def clearData():
self.txtSAPID.delete(0,END)
self.txtuser.delete(0,END)
self.txtstream.delete(0,END)
self.txtyear.delete(0,END)
#self.txtpass.delete(0,END)
def addData():
if(len(SAPID.get())!=0):
countt=0
for i in itemlist.keys():
if i ==password.get():
itemlist[i]+=1
countt=itemlist[i]
canteen_backend_2.addStdRec(SAPID.get(),username.get(),stream.get(),year.get(),password.get(),countt)
studentlist.delete(0,END)
studentlist.insert(END,(SAPID.get(),username.get(),stream.get(),year.get(),password.get()))
for i in itemlist.keys():
if i ==password.get():
itemlist[i]+=1
# print(itemlist)
def DisplayData():
studentlist.delete(0,END)
for row in canteen_backend_2.viewData():
studentlist.insert(END,row,str(""))
def StudentRec(event):
global sd
searchStd=studentlist.curselection()[0]
sd=studentlist.get(searchStd)
self.txtSAPID.delete(0,END)
self.txtSAPID.insert(END,sd[1])
self.txtuser.delete(0,END)
self.txtuser.insert(END,sd[2])
self.txtstream.delete(0,END)
self.txtstream.insert(END,sd[3])
self.txtyear.delete(0,END)
self.txtyear.insert(END,sd[4])
def DeleteData():
if(len(SAPID.get())!=0):
canteen_backend_2.deleteRec(sd[0])
clearData()
DisplayData()
def searchDatabase():
studentlist.delete(0,END)
for row in canteen_backend_2.searchData(SAPID.get()):
studentlist.insert(END,row,str(""))
#frames
MainFrame=Frame(self.root,bg=color1)
MainFrame.grid()
TitFrame=Frame(MainFrame,bd=2,padx=54,pady=8,bg=color2,relief=RIDGE)
TitFrame.pack(side=TOP)
self.lblTit=Label(TitFrame,font=('arial',40, 'bold'),text="CANTEEN",bg=color2)
self.lblTit.grid()
ButtonFrame=Frame(MainFrame,bd=2,width=1350,height=70,padx=18,pady=10,bg=color2,relief=RIDGE)
ButtonFrame.pack(side=BOTTOM)
DataFrame=Frame(MainFrame,bd=1,width=1300,height=70,padx=20,pady=20,bg=color1,relief=RIDGE)
DataFrame.pack(side=BOTTOM)
DataFrameLEFT=LabelFrame(DataFrame,bd=1,width=1000,height=600,padx=20,bg=color2,relief=RIDGE,font=('arial',20, 'bold'),text="student info\n")
DataFrameLEFT.pack(side=LEFT)
DataFrameRIGHT=LabelFrame(DataFrame,bd=1,width=450,height=300,padx=31,pady=3,bg=color2,relief=RIDGE,font=('arial',20, 'bold'),text="ORDER LOGS:\n")
DataFrameRIGHT.pack(side=RIGHT)
#dropdownmenu
password.set("NONE")
combo=ttk.Combobox(DataFrameLEFT,values=options,width=15,textvariable=password)
combo.grid(row=4,column=1,columnspan = 2, sticky = 'NSWE', padx = 5, pady = 5)
#labels
self.lblSAPID=Label(DataFrameLEFT,font=('arial',20, 'bold'),text="SAP ID:",padx=2,pady=2,bg=color2)
self.lblSAPID.grid(row=0,column=0,sticky=W)
self.txtSAPID=Entry(DataFrameLEFT,font=('arial',20, 'bold'),textvariable=SAPID,width=39)
self.txtSAPID.grid(row=0,column=1)
self.lbluser=Label(DataFrameLEFT,font=('arial',20, 'bold'),text="Username:",padx=2,pady=2,bg=color2)
self.lbluser.grid(row=1,column=0,sticky=W)
self.txtuser=Entry(DataFrameLEFT,font=('arial',20, 'bold'),textvariable=username,width=39)
self.txtuser.grid(row=1,column=1)
self.lblstream=Label(DataFrameLEFT,font=('arial',20, 'bold'),text="Stream:",padx=2,pady=2,bg=color2)
self.lblstream.grid(row=2,column=0,sticky=W)
self.txtstream=Entry(DataFrameLEFT,font=('arial',20, 'bold'),textvariable=stream,width=39)
self.txtstream.grid(row=2,column=1)
self.lblyear=Label(DataFrameLEFT,font=('arial',20, 'bold'),text="Year:",padx=2,pady=2,bg=color2)
self.lblyear.grid(row=3,column=0,sticky=W)
self.txtyear=Entry(DataFrameLEFT,font=('arial',20, 'bold'),textvariable=year,width=39)
self.txtyear.grid(row=3,column=1)
self.lblpass=Label(DataFrameLEFT,font=('arial',20, 'bold'),text="Your Order:",padx=2,pady=2,bg=color2)
self.lblpass.grid(row=4,column=0,sticky=W)
val=canteen_backend_2.findRecc()
self.lblrecc=Label(DataFrameLEFT,font=('arial',20, 'bold'),text="Recomended Order:",padx=2,pady=2,bg=color2)
self.lblrecc.grid(row=5,column=0)
self.lblfind=Label(DataFrameLEFT,font=('arial',20, 'bold'),text=val,padx=2,pady=2,bg=color2)
self.lblfind.grid(row=5,column=1)
#scroll
scrollbar=Scrollbar(DataFrameRIGHT)
scrollbar.grid(row=0,column=1,sticky='NS')
scrollbar1=Scrollbar(DataFrameRIGHT,orient='horizontal')
scrollbar1.grid(row=1,column=0,sticky='WE')
studentlist=Listbox(DataFrameRIGHT,width=41,height=16,font=('arial',12, 'bold'),yscrollcommand=scrollbar.set,xscrollcommand=scrollbar1.set)
studentlist.bind('<<ListboxSelect>>',StudentRec)
studentlist.grid(row=0,column=0,padx=8)
scrollbar.config(command=studentlist.yview)
scrollbar1.config(command=studentlist.xview)
#Button
self.btnAddData=Button(ButtonFrame,text="Add New",font=('arial',20, 'bold'),height=1,width=10,bd=4,command=addData)
self.btnAddData.grid(row=0,column=0)
self.btnDisplay=Button(ButtonFrame,text="Display",font=('arial',20, 'bold'),height=1,width=10,bd=4,command=DisplayData)
self.btnDisplay.grid(row=0,column=1)
self.btnClear=Button(ButtonFrame,text="Clear",font=('arial',20, 'bold'),height=1,width=10,bd=4,command=clearData)
self.btnClear.grid(row=0,column=2)
self.btnDelete=Button(ButtonFrame,text="Delete",font=('arial',20, 'bold'),height=1,width=10,bd=4,command=DeleteData)
self.btnDelete.grid(row=0,column=3)
self.btnSearch=Button(ButtonFrame,text="Search",font=('arial',20, 'bold'),height=1,width=10,bd=4,command=searchDatabase)
self.btnSearch.grid(row=0,column=4)
self.btnExit=Button(ButtonFrame,text="Exit",font=('arial',20, 'bold'),height=1,width=10,bd=4,command=iExit)
self.btnExit.grid(row=0,column=6)
if __name__=="__main__":
root=Tk()
application=Student(root)
root.mainloop()
|
[
"noreply@github.com"
] |
noreply@github.com
|
effb09cc5701e16d280b1dbbea9921aead19d8fb
|
a37afb5108edeb0798a4e8439fd811e404627976
|
/boston_housing/visuals.py
|
b3a708e69cd285a27d8cb7ca10c1c1e73d1874e5
|
[] |
no_license
|
Trezcool/udacity-ml
|
19141859d848d49d88e0729291704bea7484e467
|
0abf1fd3a793283a1452569dd6027ecefc98d028
|
refs/heads/master
| 2023-04-27T07:59:57.745635
| 2019-05-08T01:05:42
| 2019-05-08T01:05:42
| 184,679,519
| 0
| 0
| null | 2023-04-23T17:01:50
| 2019-05-03T00:51:57
|
HTML
|
UTF-8
|
Python
| false
| false
| 4,882
|
py
|
###########################################
# Suppress matplotlib user warnings
# Necessary for newer version of matplotlib
import warnings
warnings.filterwarnings("ignore", category=UserWarning, module="matplotlib")
#
# Display inline matplotlib plots with IPython
from IPython import get_ipython
get_ipython().run_line_magic('matplotlib', 'inline')
###########################################
import matplotlib.pyplot as pl
import numpy as np
from sklearn.model_selection import learning_curve
from sklearn.model_selection import validation_curve
from sklearn.tree import DecisionTreeRegressor
from sklearn.model_selection import ShuffleSplit, train_test_split
def ModelLearning(X, y):
"""
Calculates the performance of several models with varying sizes of training data.
The learning and testing scores for each model are then plotted.
"""
# Create 10 cross-validation sets for training and testing
cv = ShuffleSplit(n_splits=10, test_size=0.2, random_state=0)
# Generate the training set sizes increasing by 50
train_sizes = np.rint(np.linspace(1, X.shape[0] * 0.8 - 1, 9)).astype(int)
# Create the figure window
fig = pl.figure(figsize=(10, 7))
# Create three different models based on max_depth
for k, depth in enumerate([1, 3, 6, 10]):
# Create a Decision tree regressor at max_depth = depth
regressor = DecisionTreeRegressor(max_depth=depth)
# Calculate the training and testing scores
sizes, train_scores, test_scores = learning_curve(regressor, X, y, cv=cv, train_sizes=train_sizes, scoring='r2')
# Find the mean and standard deviation for smoothing
train_std = np.std(train_scores, axis=1)
train_mean = np.mean(train_scores, axis=1)
test_std = np.std(test_scores, axis=1)
test_mean = np.mean(test_scores, axis=1)
# Subplot the learning curve
ax = fig.add_subplot(2, 2, k + 1)
ax.plot(sizes, train_mean, 'o-', color='r', label='Training Score')
ax.plot(sizes, test_mean, 'o-', color='g', label='Testing Score')
ax.fill_between(sizes, train_mean - train_std, train_mean + train_std, alpha=0.15, color='r')
ax.fill_between(sizes, test_mean - test_std, test_mean + test_std, alpha=0.15, color='g')
# Labels
ax.set_title('max_depth = %s' % depth)
ax.set_xlabel('Number of Training Points')
ax.set_ylabel('Score')
ax.set_xlim([0, X.shape[0] * 0.8])
ax.set_ylim([-0.05, 1.05])
# Visual aesthetics
ax.legend(bbox_to_anchor=(1.05, 2.05), loc='lower left', borderaxespad=0.)
fig.suptitle('Decision Tree Regressor Learning Performances', fontsize=16, y=1.03)
fig.tight_layout()
fig.show()
def ModelComplexity(X, y):
"""
Calculates the performance of the model as model complexity increases.
The learning and testing errors rates are then plotted.
"""
# Create 10 cross-validation sets for training and testing
cv = ShuffleSplit(n_splits=10, test_size=0.2, random_state=0)
# Vary the max_depth parameter from 1 to 10
max_depth = np.arange(1, 11)
# Calculate the training and testing scores
train_scores, test_scores = validation_curve(DecisionTreeRegressor(), X, y, param_name="max_depth",
param_range=max_depth, cv=cv, scoring='r2')
# Find the mean and standard deviation for smoothing
train_mean = np.mean(train_scores, axis=1)
train_std = np.std(train_scores, axis=1)
test_mean = np.mean(test_scores, axis=1)
test_std = np.std(test_scores, axis=1)
# Plot the validation curve
pl.figure(figsize=(7, 5))
pl.title('Decision Tree Regressor Complexity Performance')
pl.plot(max_depth, train_mean, 'o-', color='r', label='Training Score')
pl.plot(max_depth, test_mean, 'o-', color='g', label='Validation Score')
pl.fill_between(max_depth, train_mean - train_std, train_mean + train_std, alpha=0.15, color='r')
pl.fill_between(max_depth, test_mean - test_std, test_mean + test_std, alpha=0.15, color='g')
# Visual aesthetics
pl.legend(loc='lower right')
pl.xlabel('Maximum Depth')
pl.ylabel('Score')
pl.ylim([-0.05, 1.05])
pl.show()
def PredictTrials(X, y, fitter, data):
""" Performs trials of fitting and predicting data. """
# Store the predicted prices
prices = []
for k in range(10):
# Split the data
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=k)
# Fit the data
reg = fitter(X_train, y_train)
# Make a prediction
pred = reg.predict([data[0]])[0]
prices.append(pred)
# Result
print("Trial {}: ${:,.2f}".format(k + 1, pred))
# Display price range
print("\nRange in prices: ${:,.2f}".format(max(prices) - min(prices)))
|
[
"kambembotresor@gmail.com"
] |
kambembotresor@gmail.com
|
0a9b0d33a479f994ccc21835586abd03aef43929
|
dfac8913a8f9a7ee9615cd3e86be87cc8f268095
|
/net-tools/coroutineDemo/Gevent_Group.py
|
01d1d9b107b5950d2f2976a49546bcc1ffa2be8d
|
[] |
no_license
|
speence/python-tools
|
f894e4971bce3d274a01dd43f2f8f33a311783b9
|
287aebfd68723c22ed2bae154b30c4fa8a4f56c1
|
refs/heads/master
| 2021-05-12T07:46:50.637131
| 2015-02-03T01:29:09
| 2015-02-03T01:29:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 820
|
py
|
import gevent
from gevent import getcurrent
from gevent.pool import Group
# example 1
def talk(msg):
for i in xrange(3):
print(msg)
def Demo1():
g1 = gevent.spawn(talk, 'bar')
g2 = gevent.spawn(talk, 'foo')
g3 = gevent.spawn(talk, 'fizz')
group = Group()
group.add(g1)
group.add(g2)
group.join()
group.add(g3)
group.join()
# example 2
group = Group()
def hello_from(n):
print('Size of group %s' % len(group))
print('Hello from Greenlet %s' % id(getcurrent()))
group.map(hello_from, xrange(3))
def intensive(n):
gevent.sleep(3 - n)
return 'task', n
print('Ordered')
ogroup = Group()
for i in ogroup.imap(intensive, xrange(3)):
print(i)
print('Unordered')
igroup = Group()
for i in igroup.imap_unordered(intensive, xrange(20)):
print(i)
|
[
"liudepei1129@gmail.com"
] |
liudepei1129@gmail.com
|
4d913b9bc6e6760166c0ea0aa4e1fc5244a33c68
|
b05ae9148e58f421ddc0125318cf65bfbda85264
|
/cms/migrations/0008_auto_20190715_1435.py
|
fe46766d491d6d581aa4c6dfff1755de460341ec
|
[] |
no_license
|
icalunyu/lovisatya
|
f5f072ab29f88d6e7c7708a2f3d9714acd33c8e3
|
6e3ef4128809416711cd941a18b4cab069058e17
|
refs/heads/master
| 2020-06-20T06:39:19.124968
| 2019-07-15T15:48:33
| 2019-07-15T15:48:33
| 197,027,942
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 672
|
py
|
# Generated by Django 2.2.3 on 2019-07-15 14:35
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cms', '0007_auto_20190714_1504'),
]
operations = [
migrations.AddField(
model_name='rsvp',
name='phone',
field=models.IntegerField(default=1),
preserve_default=False,
),
migrations.AlterField(
model_name='rsvp',
name='guest',
field=models.CharField(choices=[('', 'Will you bring plus one?'), ('notalone', 'Yes, I Will.'), ('alone', 'No. I`m coming alone')], max_length=255),
),
]
|
[
"dipinjemangger@El-Capitan.local"
] |
dipinjemangger@El-Capitan.local
|
24fa38cb1a5db921dd96c5f040aa58a9b77b65e4
|
7950e35b32e252690a82faf5aefc06e433e9bd34
|
/cleverhans/serial.py
|
9fc379d763ec15b0686422d9d09b7d66f61d0654
|
[] |
no_license
|
machanic/cleverhans_adversarial_example_gen
|
b717da4b803cec2b67d0fc730392b137d20682d5
|
d5300f8a1228b4c9fe26568a956f06c36df03627
|
refs/heads/master
| 2022-11-21T19:10:01.258478
| 2020-07-21T15:45:03
| 2020-07-21T15:45:03
| 173,035,907
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,230
|
py
|
"""Serialization functionality.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import warnings
import joblib
import tensorflow as tf
from cleverhans.model import Model
from cleverhans.utils import ordered_union
from cleverhans.utils import safe_zip
class PicklableVariable(object):
"""
A wrapper around a Variable that makes it picklable.
The name of the Variable will not be reliable, only the value. Models
intended to be picklable should identify variables by referencing
Python objects rather than by using TensorFlow's names.
TensorFlow Variables have different values associated with each Session.
For this class, the value associated with the default Session will be used
for both saving and loading, so both operations require that a default
Session has been selected.
Pickle is not secure. Unpickle only files you made yourself.
See cleverhans_tutorials/mnist_tutorial_picklable.py for examples of a
complete model training, pickling, and unpickling process using
PicklableVariable.
See cleverhans.picklable_model for models built using PicklableVariable.
"""
def __init__(self, *args, **kwargs):
self.var = tf.Variable(*args, **kwargs)
def __getstate__(self):
sess = tf.get_default_session()
if sess is None:
raise RuntimeError("PicklableVariable requires a default "
"TensorFlow session")
return {'var': sess.run(self.var)}
def __setstate__(self, d):
self.var = tf.Variable(d['var'])
sess = tf.get_default_session()
if sess is None:
raise RuntimeError("PicklableVariable requires a default "
"TensorFlow session")
sess.run(self.var.initializer)
class NoRefModel(Model):
"""
A Model that can be pickled because it contains no references to any
Variables (e.g. it identifies Variables only by name).
The Model must be able to find all of its Variables via get_vars
for them to be pickled.
Note that NoRefModel may have different Variable names after it is
restored, e.g. if the unpickling is run with a different enclosing
scope. NoRefModel will still work in these circumstances as long
as get_params returns the same order of Variables after unpickling
as it did before pickling.
See also cleverhans.picklable_model for a different, complementary
pickling strategy: models that can be pickled because they use *only*
references to Variables and work regardless of Variable names.
"""
def __getstate__(self):
# Serialize everything except the Variables
out = self.__dict__.copy()
# The base Model class adds this tf reference to self
# We mustn't pickle anything tf, this will need to be
# regenerated after the model is reloaded.
if "_dummy_input" in out:
del out["_dummy_input"]
# Add the Variables
sess = tf.get_default_session()
if sess is None:
raise RuntimeError("NoRefModel requires a default "
"TensorFlow session")
tf_variables = self.get_vars()
out[VARS] = sess.run(tf_variables)
out[VAR_NAMES] = [var.name for var in tf_variables]
return out
def __setstate__(self, d):
tf_variables = d[VARS]
del d[VARS]
tf_variable_names = None
# older joblib files may not have "_tf_variable_names"
if VAR_NAMES in d:
tf_variable_names = d[VAR_NAMES]
del d[VAR_NAMES]
else:
warnings.warn("This joblib file has no " + VAR_NAMES + " field. "
"The field may become required on or after 2019-04-11."
"You can make your file compatible with the new format by"
" loading the file and re-saving it.")
# Deserialize everything except the Variables
self.__dict__ = d
# Deserialize the Variables
sess = tf.get_default_session()
if sess is None:
raise RuntimeError("NoRefModel requires a default "
"TensorFlow session")
cur_vars = self.get_vars()
if len(cur_vars) != len(tf_variables):
print("Model format mismatch")
print("Current model has " + str(len(cur_vars)) + " variables")
print("Saved model has " + str(len(tf_variables)) + " variables")
print("Names of current vars:")
for var in cur_vars:
print("\t" + var.name)
if tf_variable_names is not None:
print("Names of saved vars:")
for name in tf_variable_names:
print("\t" + name)
else:
print("Saved vars use old format, no names available for them")
assert False
found = [False] * len(cur_vars)
if tf_variable_names is not None:
# New version using the names to handle changes in ordering
for value, name in safe_zip(tf_variables, tf_variable_names):
value_found = False
for idx, cur_var in enumerate(cur_vars):
if cur_var.name == name:
assert not found[idx]
value_found = True
found[idx] = True
cur_var.load(value, sess)
break
assert value_found
assert all(found)
else:
# Old version that works if and only if the order doesn't change
for var, value in safe_zip(cur_vars, tf_variables):
var.load(value, sess)
def get_vars(self):
"""
Provides access to the model's Variables.
This may include Variables that are not parameters, such as batch
norm running moments.
:return: A list of all Variables defining the model.
"""
# Catch eager execution and assert function overload.
try:
if tf.executing_eagerly():
raise NotImplementedError("For Eager execution - get_vars "
"must be overridden.")
except AttributeError:
pass
done = False
tried_to_make_params = False
while not done:
# Most models in cleverhans use only trainable variables and do not
# make sure the other collections are updated correctly.
trainable_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
self.scope + "/")
# When wrapping other code, such as the CIFAR 10 challenge models,
# we need to make sure we get the batch norm running averages as well
# as the trainable variables.
model_vars = tf.get_collection(tf.GraphKeys.MODEL_VARIABLES,
self.scope + "/")
scope_vars = ordered_union(trainable_vars, model_vars)
if len(scope_vars) > 0:
done = True
else:
assert not tried_to_make_params
tried_to_make_params = True
self.make_params()
# Make sure no variables have been added or removed
if hasattr(self, "num_vars"):
assert self.num_vars == len(scope_vars)
else:
self.num_vars = len(scope_vars)
return scope_vars
def save(filepath, obj):
"""Saves an object to the specified filepath using joblib.
joblib is like pickle but will save NumPy arrays as separate files for
greater efficiency.
:param filepath: str, path to save to
:obj filepath: object to save
"""
joblib.dump(obj, filepath)
def load(filepath):
"""Returns an object stored via `save`
"""
obj = joblib.load(filepath)
return obj
VARS = "_tf_variables"
VAR_NAMES = "_tf_variable_names"
|
[
"sharpstill@163.com"
] |
sharpstill@163.com
|
701d80bc1243f7dba692f2859ab68a0de98476a8
|
546d3804d05bdd999fbe499d2aad6b5ad2a1dcf0
|
/타이타닉 데이터 분석 및 시각화.py
|
837da2e28df41c5d46592e5f4f24fb1b203ec4fc
|
[] |
no_license
|
RockhoRockho/Titanic-anlaysis_visualization
|
40237c0bef5b87e8b816ba82baf1006ceb61b262
|
c9a198df9d156fe5af507eb1764b78b162cf3a30
|
refs/heads/main
| 2023-07-23T08:34:32.294521
| 2021-09-03T13:27:20
| 2021-09-03T13:27:20
| 402,753,040
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,732
|
py
|
#!/usr/bin/env python
# coding: utf-8
# # 타이타닉 데이터 분석 및 시각화
# ----
# In[1]:
import numpy as np
import pandas as pd
import seaborn as sns
# In[2]:
sns.set_style('whitegrid')
# In[3]:
titanic = sns.load_dataset('titanic')
# In[4]:
titanic.head()
# In[5]:
titanic.describe()
# In[6]:
titanic.dropna().describe()
# In[7]:
titanic.var()
# In[8]:
titanic.mad()
# In[9]:
titanic.groupby('class').count()
# In[10]:
sns.countplot(y='class', data=titanic)
# In[11]:
sns.countplot(y='sex', data=titanic)
# In[12]:
sns.countplot(y='alive', data=titanic)
# In[14]:
sns.countplot(y='alone', data=titanic)
# In[15]:
titanic.groupby('class').std()
# In[17]:
titanic.groupby('class')['fare'].median()
# In[18]:
titanic.query('alive == "yes"')
# In[19]:
titanic.query('alive == "yes"').groupby('class').count()
# In[20]:
titanic.groupby('class')['age'].describe()
# In[21]:
titanic.query("alive == 'yes'").groupby('class').describe()
# In[24]:
titanic.groupby('sex')['age'].aggregate([min, np.median, max])
# In[26]:
titanic.query("age > 30").groupby('class').median()
# In[27]:
titanic.query("fare < 20").groupby('class').median()
# In[30]:
titanic.groupby(['class', 'sex'])['age'].mean().unstack()
# In[33]:
sns.catplot(x='sex', y='age',
hue='class', kind='bar',
data=titanic)
# In[34]:
sns.catplot(x='who', y='age',
hue='class', kind='bar',
data=titanic)
# In[36]:
titanic.groupby(['class', 'sex'])['fare'].mean().unstack()
# In[37]:
titanic.groupby(['class', 'who'])['fare'].mean().unstack()
# In[38]:
sns.catplot(x='sex', y='fare',
hue='class', kind='bar',
data=titanic)
# In[39]:
sns.catplot(x='who', y='fare',
hue='class', kind='bar',
data=titanic)
# In[41]:
titanic.groupby(['class', 'sex'])['survived'].mean().unstack()
# In[42]:
titanic.pivot_table('survived', index='class', columns='sex')
# In[43]:
titanic.pivot_table('survived', index='class', columns='who')
# In[44]:
sns.catplot(x='class', y='survived',
hue='sex', kind='bar',
data=titanic)
# In[45]:
sns.catplot(x='class', y='survived',
hue='who', kind='bar',
data=titanic)
# In[46]:
age = pd.cut(titanic['age'], [0, 18, 40 ,80])
titanic.pivot_table('survived', ['sex', age], 'class')
# In[47]:
age = pd.cut(titanic['age'], [0, 18, 40 ,80])
titanic.pivot_table('survived', ['who', age], 'class')
# In[50]:
fare = pd.qcut(titanic['fare'], 3)
titanic.pivot_table('survived', ['who', age], [fare, 'class'])
# In[51]:
titanic.pivot_table('survived', index='who', columns='class', margins=True)
# In[52]:
sns.catplot(x='class', y='survived',
col='who', kind='bar',
data=titanic)
# In[53]:
titanic.pivot_table('survived', index='deck', columns='class', margins=True)
# In[56]:
sns.countplot(x='deck', data=titanic)
# In[57]:
sns.countplot(y='deck', hue='class', data=titanic)
# In[58]:
sns.catplot(x='survived', y='deck',
hue='class', kind='bar',
data=titanic)
# In[59]:
titanic.pivot_table('survived', index='embark_town', columns='class', margins=True)
# In[60]:
sns.countplot(y='embark_town', data=titanic)
# In[61]:
sns.catplot(x='survived', y='embark_town',
hue='class', kind='bar',
data=titanic)
# In[62]:
sns.catplot(x='sibsp', y='survived',
kind='bar', data=titanic)
# In[63]:
sns.catplot(x='parch', y='survived',
kind='bar', data=titanic)
# In[64]:
sns.catplot(x='alone', y='survived',
kind='bar', data=titanic)
# In[ ]:
|
[
"noreply@github.com"
] |
noreply@github.com
|
3178fff39f09febceb828ec936b3cf022ba96a20
|
d374c0359a80ffc2fbc66fc405a882fe5534ea6f
|
/list01/ex10.py
|
c528e2dd6136606985a2c1b8b5a7d50e4b038a53
|
[
"MIT"
] |
permissive
|
flavio-brusamolin/py-scripts
|
e941e7d55b54e7478f0d8df7fdb425d887821dda
|
d80b9efc7fc1b59a5cf110180efc03cdc839cd23
|
refs/heads/master
| 2022-10-07T17:41:07.339899
| 2020-06-06T06:04:42
| 2020-06-06T06:04:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 129
|
py
|
distance = float(input('Enter trip distance in Km: '))
print(f'Value: {distance * 0.5 if distance <= 200 else distance * 0.45}')
|
[
"flaviobrusamolin@gec.inatel.br"
] |
flaviobrusamolin@gec.inatel.br
|
aa4c1d64ab5007478c6035cf4a0c3268d542695f
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/nouns/_tow.py
|
bbb4161da8d142413231367d45dc13fd41964c06
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 369
|
py
|
#calss header
class _TOW():
def __init__(self,):
self.name = "TOW"
self.definitions = [u"to pull someone's vehicle using a rope or chain tied to your vehicle: ", u'being pulled along: ']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'nouns'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
5665e09c10141bbaec53e616bd41f19c27445136
|
102e7f138f6abd4eefaedb69f48a82b60f7cf193
|
/day21/demon4.py
|
06cad2637aa03bf27ac71587366ff9e38856cb39
|
[] |
no_license
|
Ivan-yyq/livePython-2018
|
33fd455d976d08188f751574e8ee377c52aabac3
|
be18999839a28a322ce58434348b25b62647c65e
|
refs/heads/master
| 2020-03-16T21:36:45.648373
| 2018-07-27T02:58:33
| 2018-07-27T02:58:33
| 129,837,410
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 917
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2018/5/19 21:27
# @Author : lingxiangxiang
# @File : demon3.py
import time
import multiprocessing
from multiprocessing import Value, Array, Manager
def add1(value, number):
print("start add1 number= {0}".format(number.value))
for i in range(1, 5):
number.value += value
print("number = {0}".format(number.value))
def add3(value, number):
print("start add3 number= {0}".format(number.value))
try:
for i in range(1, 5):
number.value += value
print("number = {0}".format(number.value))
except Exception as e:
raise e
if __name__ == '__main__':
print("start main")
number = Value('d', 0)
p1 = multiprocessing.Process(target=add1, args=(1, number))
p3 = multiprocessing.Process(target=add3, args=(3, number))
p1.start()
p3.start()
print("end main")
|
[
"15625087150@163.com"
] |
15625087150@163.com
|
61e722729eb792ee9f5ba92ae25d075658ce71ff
|
f7947f8a53601f13e26905e837d96e02cc9fc88a
|
/converted/export_char_indices.py.py
|
1468aaf6f723b2dd1a3e01dd9c9283711e3a1a21
|
[] |
no_license
|
Steve0929/TaylorFlow
|
c059db617ce0fdde8bc18606ad60bd4eb4364b40
|
914bdc0e22cf3763444a0650c3ab64066c104f8d
|
refs/heads/master
| 2020-03-22T18:22:20.144280
| 2018-07-27T23:10:08
| 2018-07-27T23:10:08
| 140,455,695
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,439
|
py
|
import keras
import numpy as np
import random
import sys
import json
text = open('C:\\Users\\chimi\\Desktop\\tay.txt').read().lower()
print('Text Corpus length:', len(text))
maxlen = 25
# We sample a new sequence every `step` characters
step = 3
# This holds our extracted sequences
sentences = []
# This holds the targets (the follow-up characters)
next_chars = []
#range(start_value, end_value, step)
for i in range(0, len(text)-maxlen , step):
sentences.append(text[i: i + maxlen])
next_chars.append(text[i + maxlen])
print('Number of sequences:', len(sentences))
# List of unique characters in the corpus
chars = sorted(list(set(text)))
print('Unique characters:', len(chars))
# Dictionary mapping unique characters to their index in `chars`
char_indices = dict((char, chars.index(char)) for char in chars)
with open('C:\\Users\\chimi\\Desktop\\file.txt', 'w') as file:
file.write(json.dumps(char_indices))
# Next, one-hot encode the characters into binary arrays.
print('Vectorization...')
x = np.zeros((len(sentences), maxlen, len(chars)), dtype=np.bool)
y = np.zeros((len(sentences), len(chars)), dtype=np.bool)
for i, sentence in enumerate(sentences):
for t, char in enumerate(sentence):
x[i, t, char_indices[char]] = 1
y[i, char_indices[next_chars[i]]] = 1
import tensorflowjs as tfjs
#tfjs.converters.save_keras_model(model, 'C:\\Users\\chimi\\Desktop')
|
[
"noreply@github.com"
] |
noreply@github.com
|
cd5736c1eebcc8037098247e603314f43137a4bc
|
43bb3856e40d9ca3d7378b951b6dec9788675313
|
/scripts/main.py
|
20481d0e998ad616fb0b87a11be87482edef6f2b
|
[] |
no_license
|
abhib85/Assignment
|
3aa57c6cb4581a60ef85d204944429b8741d3356
|
a62f7a8812a7d26147c572911305c9398e493aad
|
refs/heads/master
| 2023-03-19T08:49:12.624847
| 2021-03-04T06:06:50
| 2021-03-04T06:06:50
| 344,368,717
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,629
|
py
|
#importing modules
import pandas as pd
from configparser import ConfigParser
import os
import glob
import logging
import pandas as pd
import os.path
logging.basicConfig(filename='C:/Users/abhi/PycharmProjects/pythonProject_11feb21/logs/logt',level=logging.DEBUG,filemode='a',format='%(asctime)s:%(levelname)s:%(message)s')
# Folders:
# client : incoming excel sheets
# network : merged excel files updated from the client folder
# config : storing all paths
# log : logging
# scripts : main script
# object creation of ConfigParser() class
config = ConfigParser()
configFilePath=r"C:/Users/abhi/PycharmProjects/pythonProject_11feb21/config/config.cfg"
print(config.read(configFilePath))
# Reading sections of config file
# print(config.sections())
# print(config["path"]["client"])
# print(config["path"]["logs"])
# print(config["path"]["scripts"])
#print(config["path"]["network"])
path_network=config["path"]["network"]
path_client=config["path"]["client"]
path_scripts=config["path"]["scripts"]
path_logs=config["path"]["logs"]
print(path_network)
print(path_client)
print(path_scripts)
print(path_logs)
class Student:
# current = r"C:/Users/abhi/PycharmProjects/pythonProject_11feb21/client/"
# b = os.listdir(current)
# print(b)
def students_data(self):
print("program start")
current = path_client
print(current)
b = os.listdir(current)
print(b)
try:
if len(b) > 1:
logging.info("Files are greater than 1")
# i = 0
# while i<=len(b):
for file in b:
os.chdir("path_client")
extension = 'csv'
all_filenames = [i for i in glob.glob('*.{}'.format(extension))]
print(all_filenames)
# combine all files in the list
combined_csv = pd.concat([pd.read_csv(f) for f in all_filenames])
# export to csv
combined_csv.to_csv(path_network+"combinedf_csv.csv", index=False)
elif len(b) == 0:
logging.info("No file exist")
else:
logging.info("There is single file")
except:
logging.info("There was some error")
finally:
logging.info("Operation Completed")
s=Student()
s.students_data()
#
# print("Importing Modules")
# print("File Exists")
# print("File doesnt exist")
# print("Merging the files")
# print("File merge completed")
# print("End of Program")
# print("Start of Program")
|
[
"abhib85@yahoo.co.in"
] |
abhib85@yahoo.co.in
|
9d189e75fd3269e07584170d02020eb5de7afdea
|
b333ead2d9b2fd64733d32974e53ee9bbeceadb2
|
/apps/oranizations/__init__.py
|
1c36daee004a8ec3cd60f99efaab40a73093c315
|
[] |
no_license
|
Nideayu/bwonline
|
4774518d2105e093195407f2f5e2617f4b053db7
|
aecfc99aad6d122540ab7d1cf9b94ffcfd0b7172
|
refs/heads/master
| 2023-02-24T02:53:12.720462
| 2021-01-20T10:07:26
| 2021-01-20T10:07:26
| 326,538,239
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 59
|
py
|
default_app_config = 'oranizations.apps.OranizationsConfig'
|
[
"1242469403@qq.com"
] |
1242469403@qq.com
|
55fbfc547d984965fbab0a24f74781b31bdce204
|
2a2b36a603e1fefeaca902381b6377a44bb503af
|
/api/tpRequest.py
|
eb5f48ce67b85aa56bddc10b21c8075af4586aab
|
[] |
no_license
|
nenad/TPpy
|
1c3f848df48f4cef4a6dcd3fb01efa401d0457a3
|
8b261cdea8e1d418288397d8e62ad2b9a7246385
|
refs/heads/master
| 2021-05-30T23:26:26.344356
| 2016-04-15T13:14:57
| 2016-04-15T13:14:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,813
|
py
|
import json
from configuration import config
from requests.auth import HTTPBasicAuth
import requests
class TPRequest:
def __init__(self):
self.query = ""
self.entity_id = ""
self.username = config.get('username')
self.password = config.get('password')
self.basicUrl = ""
self.includedProperties = ['Name']
if config.get_project_var('user_id') is None:
self._set_user()
if config.get_project_var('process_id') is None:
self._set_process_id()
def setBasicUrl(self, url):
self.basicUrl = url
def setIncludedProperties(self, properties):
self.includedProperties = properties
def setId(self, entity_id):
self.entity_id = "/" + entity_id.__str__()
def setQuery(self, query):
self.query = '&where=' + query
def get(self, url="", return_format='json'):
return_format = "?format=" + return_format
properties = "&include=[" + ",".join(self.includedProperties) + "]"
if url == "":
url = self.basicUrl
return requests.get(url + self.entity_id + return_format + properties + self.query,
auth=HTTPBasicAuth(self.username, self.password))
def post(self, url="", data=""):
if url == "":
url = self.basicUrl
if '?' in url:
url += '&format=json'
else:
url += '?format=json'
properties = "&include=[" + ",".join(self.includedProperties) + "]"
return requests.post(url + properties, data, auth=HTTPBasicAuth(self.username, self.password))
def _set_user(self):
from api.tpApi import TPApi
prev_id = self.entity_id
prev_url = self.basicUrl
prev_incl = self.includedProperties
self.setId('loggeduser')
self.setBasicUrl(TPApi().getEntityTypeURL('Users'))
self.setIncludedProperties(['Id', 'FirstName', 'LastName'])
response = self.get()
data = json.loads(response.content)
config.set_project_var('user_id', data['Id'])
self.setBasicUrl(prev_url)
self.entity_id = prev_id
self.setIncludedProperties(prev_incl)
def _set_process_id(self):
from api.tpApi import TPApi
prev_id = self.entity_id
prev_url = self.basicUrl
prev_incl = self.includedProperties
self.entity_id = ''
self.setBasicUrl(TPApi().getEntityTypeURL('Processes'))
self.setQuery('(Name eq "Kanban")')
self.setIncludedProperties(['Id'])
response = self.get()
data = json.loads(response.content)
config.set_project_var('process_id', data['Items'][0]['Id'])
self.setBasicUrl(prev_url)
self.entity_id = prev_id
self.setIncludedProperties(prev_incl)
|
[
"n.stojanovik@voxteneo.com.mk"
] |
n.stojanovik@voxteneo.com.mk
|
f2e9286044675907e079b6077b71208aafa5528d
|
a9b8f84c55aa64d4721de11e34e6fc300453be1b
|
/public/packages/pymongo/v28/pymongo/common.py
|
7c53646dd809b43309aad38e4e69fa55b96ca912
|
[] |
no_license
|
xuning992/tfty
|
f17273db407bb5ca87f583b114a42eb8e83d67fc
|
20785621b933d2d6bdc293e953710faef4268bf6
|
refs/heads/master
| 2022-12-13T22:39:14.696326
| 2017-11-19T15:23:11
| 2017-11-19T15:23:11
| 111,306,251
| 0
| 0
| null | 2022-07-05T21:08:37
| 2017-11-19T15:11:40
|
Python
|
UTF-8
|
Python
| false
| false
| 27,865
|
py
|
# Copyright 2011-2014 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you
# may not use this file except in compliance with the License. You
# may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
"""Functions and classes common to multiple pymongo modules."""
import sys
import warnings
from . import read_preferences
from .auth import MECHANISMS
from .read_preferences import ReadPreference
from .errors import ConfigurationError
from ..bson.binary import (OLD_UUID_SUBTYPE, UUID_SUBTYPE,
JAVA_LEGACY, CSHARP_LEGACY)
HAS_SSL = True
try:
import ssl
except ImportError:
HAS_SSL = False
# Jython 2.7 includes an incomplete ssl module. See PYTHON-498.
if sys.platform.startswith('java'):
HAS_SSL = False
# Defaults until we connect to a server and get updated limits.
MAX_BSON_SIZE = 16 * (1024 ** 2)
MAX_MESSAGE_SIZE = 2 * MAX_BSON_SIZE
MIN_WIRE_VERSION = 0
MAX_WIRE_VERSION = 0
MAX_WRITE_BATCH_SIZE = 1000
# What this version of PyMongo supports.
MIN_SUPPORTED_WIRE_VERSION = 0
MAX_SUPPORTED_WIRE_VERSION = 3
# mongod/s 2.6 and above return code 59 when a
# command doesn't exist. mongod versions previous
# to 2.6 and mongos 2.4.x return no error code
# when a command does exist. mongos versions previous
# to 2.4.0 return code 13390 when a command does not
# exist.
COMMAND_NOT_FOUND_CODES = (59, 13390, None)
def raise_config_error(key, dummy):
"""Raise ConfigurationError with the given key name."""
raise ConfigurationError("Unknown option %s" % (key,))
# Mapping of URI uuid representation options to valid subtypes.
_UUID_SUBTYPES = {
'standard': UUID_SUBTYPE,
'pythonLegacy': OLD_UUID_SUBTYPE,
'javaLegacy': JAVA_LEGACY,
'csharpLegacy': CSHARP_LEGACY
}
def validate_boolean(option, value):
"""Validates that 'value' is 'true' or 'false'.
"""
if isinstance(value, bool):
return value
elif isinstance(value, basestring):
if value not in ('true', 'false'):
raise ConfigurationError("The value of %s must be "
"'true' or 'false'" % (option,))
return value == 'true'
raise TypeError("Wrong type for %s, value must be a boolean" % (option,))
def validate_integer(option, value):
"""Validates that 'value' is an integer (or basestring representation).
"""
if isinstance(value, (int, long)):
return value
elif isinstance(value, basestring):
if not value.isdigit():
raise ConfigurationError("The value of %s must be "
"an integer" % (option,))
return int(value)
raise TypeError("Wrong type for %s, value must be an integer" % (option,))
def validate_positive_integer(option, value):
"""Validate that 'value' is a positive integer.
"""
val = validate_integer(option, value)
if val < 0:
raise ConfigurationError("The value of %s must be "
"a positive integer" % (option,))
return val
def validate_readable(option, value):
"""Validates that 'value' is file-like and readable.
"""
if value is None:
return value
# First make sure its a string py3.3 open(True, 'r') succeeds
# Used in ssl cert checking due to poor ssl module error reporting
value = validate_basestring(option, value)
open(value, 'r').close()
return value
def validate_cert_reqs(option, value):
"""Validate the cert reqs are valid. It must be None or one of the three
values ``ssl.CERT_NONE``, ``ssl.CERT_OPTIONAL`` or ``ssl.CERT_REQUIRED``"""
if value is None:
return value
if HAS_SSL:
if value in (ssl.CERT_NONE, ssl.CERT_OPTIONAL, ssl.CERT_REQUIRED):
return value
raise ConfigurationError("The value of %s must be one of: "
"`ssl.CERT_NONE`, `ssl.CERT_OPTIONAL` or "
"`ssl.CERT_REQUIRED" % (option,))
else:
raise ConfigurationError("The value of %s is set but can't be "
"validated. The ssl module is not available"
% (option,))
def validate_positive_integer_or_none(option, value):
"""Validate that 'value' is a positive integer or None.
"""
if value is None:
return value
return validate_positive_integer(option, value)
def validate_basestring(option, value):
"""Validates that 'value' is an instance of `basestring`.
"""
if isinstance(value, basestring):
return value
raise TypeError("Wrong type for %s, value must be an "
"instance of %s" % (option, basestring.__name__))
def validate_basestring_or_none(option, value):
"""Validates that 'value' is an instance of `basestring` or `None`.
"""
if value is None:
return value
return validate_basestring(option, value)
def validate_int_or_basestring(option, value):
"""Validates that 'value' is an integer or string.
"""
if isinstance(value, (int, long)):
return value
elif isinstance(value, basestring):
if value.isdigit():
return int(value)
return value
raise TypeError("Wrong type for %s, value must be an "
"integer or a string" % (option,))
def validate_positive_float(option, value):
"""Validates that 'value' is a float, or can be converted to one, and is
positive.
"""
err = ConfigurationError("%s must be a positive int or float" % (option,))
try:
value = float(value)
except (ValueError, TypeError):
raise err
# float('inf') doesn't work in 2.4 or 2.5 on Windows, so just cap floats at
# one billion - this is a reasonable approximation for infinity
if not 0 < value < 1e9:
raise err
return value
def validate_timeout_or_none(option, value):
"""Validates a timeout specified in milliseconds returning
a value in floating point seconds.
"""
if value is None:
return value
return validate_positive_float(option, value) / 1000.0
def validate_read_preference(dummy, value):
"""Validate read preference for a ReplicaSetConnection.
"""
if value in read_preferences.modes:
return value
# Also allow string form of enum for uri_parser
try:
return read_preferences.mongos_enum(value)
except ValueError:
raise ConfigurationError("Not a valid read preference")
def validate_tag_sets(dummy, value):
"""Validate tag sets for a ReplicaSetConnection.
"""
if value is None:
return [{}]
if not isinstance(value, list):
raise ConfigurationError((
"Tag sets %s invalid, must be a list") % repr(value))
if len(value) == 0:
raise ConfigurationError((
"Tag sets %s invalid, must be None or contain at least one set of"
" tags") % repr(value))
for tags in value:
if not isinstance(tags, dict):
raise ConfigurationError(
"Tag set %s invalid, must be a dict" % repr(tags))
return value
def validate_auth_mechanism(option, value):
"""Validate the authMechanism URI option.
"""
# CRAM-MD5 is for server testing only. Undocumented,
# unsupported, may be removed at any time. You have
# been warned.
if value not in MECHANISMS and value != 'CRAM-MD5':
raise ConfigurationError("%s must be in "
"%s" % (option, MECHANISMS))
return value
def validate_uuid_representation(dummy, value):
"""Validate the uuid representation option selected in the URI.
"""
if value not in _UUID_SUBTYPES.keys():
raise ConfigurationError("%s is an invalid UUID representation. "
"Must be one of "
"%s" % (value, _UUID_SUBTYPES.keys()))
return _UUID_SUBTYPES[value]
def validate_uuid_subtype(dummy, value):
"""Validate the uuid subtype option, a numerical value whose acceptable
values are defined in bson.binary."""
if value not in _UUID_SUBTYPES.values():
raise ConfigurationError("Not a valid setting for uuid_subtype.")
return value
_MECHANISM_PROPS = frozenset(['SERVICE_NAME'])
def validate_auth_mechanism_properties(option, value):
"""Validate authMechanismProperties."""
value = validate_basestring(option, value)
props = {}
for opt in value.split(','):
try:
key, val = opt.split(':')
if key not in _MECHANISM_PROPS:
raise ConfigurationError("%s is not a supported auth "
"mechanism property. Must be one of "
"%s." % (key, tuple(_MECHANISM_PROPS)))
props[key] = val
except ValueError:
raise ConfigurationError("auth mechanism properties must be "
"key:value pairs like SERVICE_NAME:"
"mongodb, not %s." % (opt,))
return props
# jounal is an alias for j,
# wtimeoutms is an alias for wtimeout,
# readpreferencetags is an alias for tag_sets.
VALIDATORS = {
'replicaset': validate_basestring_or_none,
'slaveok': validate_boolean,
'slave_okay': validate_boolean,
'safe': validate_boolean,
'w': validate_int_or_basestring,
'wtimeout': validate_integer,
'wtimeoutms': validate_integer,
'fsync': validate_boolean,
'j': validate_boolean,
'journal': validate_boolean,
'connecttimeoutms': validate_timeout_or_none,
'sockettimeoutms': validate_timeout_or_none,
'waitqueuetimeoutms': validate_timeout_or_none,
'waitqueuemultiple': validate_positive_integer_or_none,
'ssl': validate_boolean,
'ssl_keyfile': validate_readable,
'ssl_certfile': validate_readable,
'ssl_cert_reqs': validate_cert_reqs,
'ssl_ca_certs': validate_readable,
'readpreference': validate_read_preference,
'read_preference': validate_read_preference,
'readpreferencetags': validate_tag_sets,
'tag_sets': validate_tag_sets,
'secondaryacceptablelatencyms': validate_positive_float,
'secondary_acceptable_latency_ms': validate_positive_float,
'auto_start_request': validate_boolean,
'use_greenlets': validate_boolean,
'authmechanism': validate_auth_mechanism,
'authsource': validate_basestring,
'gssapiservicename': validate_basestring,
'authmechanismproperties': validate_auth_mechanism_properties,
'uuidrepresentation': validate_uuid_representation,
'socketkeepalive': validate_boolean
}
_AUTH_OPTIONS = frozenset(['gssapiservicename', 'authmechanismproperties'])
def validate_auth_option(option, value):
"""Validate optional authentication parameters.
"""
lower, value = validate(option, value)
if lower not in _AUTH_OPTIONS:
raise ConfigurationError('Unknown '
'authentication option: %s' % (option,))
return lower, value
def validate(option, value):
"""Generic validation function.
"""
lower = option.lower()
validator = VALIDATORS.get(lower, raise_config_error)
value = validator(option, value)
return lower, value
SAFE_OPTIONS = frozenset([
'w',
'wtimeout',
'wtimeoutms',
'fsync',
'j',
'journal'
])
class WriteConcern(dict):
def __init__(self, *args, **kwargs):
"""A subclass of dict that overrides __setitem__ to
validate write concern options.
"""
super(WriteConcern, self).__init__(*args, **kwargs)
def __setitem__(self, key, value):
if key not in SAFE_OPTIONS:
raise ConfigurationError("%s is not a valid write "
"concern option." % (key,))
key, value = validate(key, value)
super(WriteConcern, self).__setitem__(key, value)
class BaseObject(object):
"""A base class that provides attributes and methods common
to multiple pymongo classes.
SHOULD NOT BE USED BY DEVELOPERS EXTERNAL TO MONGODB.
"""
def __init__(self, **options):
self.__slave_okay = False
self.__read_pref = ReadPreference.PRIMARY
self.__tag_sets = [{}]
self.__secondary_acceptable_latency_ms = 15
self.__safe = None
self.__uuid_subtype = OLD_UUID_SUBTYPE
self.__write_concern = WriteConcern()
self.__set_options(options)
if (self.__read_pref == ReadPreference.PRIMARY
and self.__tag_sets != [{}]):
raise ConfigurationError(
"ReadPreference PRIMARY cannot be combined with tags")
# If safe hasn't been implicitly set by write concerns then set it.
if self.__safe is None:
if options.get("w") == 0:
self.__safe = False
else:
self.__safe = validate_boolean('safe',
options.get("safe", True))
# Note: 'safe' is always passed by Connection and ReplicaSetConnection
# Always do the most "safe" thing, but warn about conflicts.
if self.__safe and options.get('w') == 0:
warnings.warn("Conflicting write concerns: %s. Write concern "
"options were configured, but w=0 disables all "
"other options." % self.write_concern,
UserWarning)
def __set_safe_option(self, option, value):
"""Validates and sets getlasterror options for this
object (Connection, Database, Collection, etc.)
"""
if value is None:
self.__write_concern.pop(option, None)
else:
self.__write_concern[option] = value
if option != "w" or value != 0:
self.__safe = True
def __set_options(self, options):
"""Validates and sets all options passed to this object."""
for option, value in options.iteritems():
if option in ('slave_okay', 'slaveok'):
self.__slave_okay = validate_boolean(option, value)
elif option in ('read_preference', "readpreference"):
self.__read_pref = validate_read_preference(option, value)
elif option in ('tag_sets', 'readpreferencetags'):
self.__tag_sets = validate_tag_sets(option, value)
elif option == 'uuidrepresentation':
self.__uuid_subtype = validate_uuid_subtype(option, value)
elif option in (
'secondaryacceptablelatencyms',
'secondary_acceptable_latency_ms'
):
self.__secondary_acceptable_latency_ms = \
validate_positive_float(option, value)
elif option in SAFE_OPTIONS:
if option == 'journal':
self.__set_safe_option('j', value)
elif option == 'wtimeoutms':
self.__set_safe_option('wtimeout', value)
else:
self.__set_safe_option(option, value)
def __set_write_concern(self, value):
"""Property setter for write_concern."""
if not isinstance(value, dict):
raise ConfigurationError("write_concern must be an "
"instance of dict or a subclass.")
# Make a copy here to avoid users accidentally setting the
# same dict on multiple instances.
wc = WriteConcern()
for k, v in value.iteritems():
# Make sure we validate each option.
wc[k] = v
self.__write_concern = wc
def __get_write_concern(self):
"""The default write concern for this instance.
Supports dict style access for getting/setting write concern
options. Valid options include:
- `w`: (integer or string) If this is a replica set, write operations
will block until they have been replicated to the specified number
or tagged set of servers. `w=<int>` always includes the replica set
primary (e.g. w=3 means write to the primary and wait until
replicated to **two** secondaries). **Setting w=0 disables write
acknowledgement and all other write concern options.**
- `wtimeout`: (integer) Used in conjunction with `w`. Specify a value
in milliseconds to control how long to wait for write propagation
to complete. If replication does not complete in the given
timeframe, a timeout exception is raised.
- `j`: If ``True`` block until write operations have been committed
to the journal. Cannot be used in combination with `fsync`. Prior
to MongoDB 2.6 this option was ignored if the server was running
without journaling. Starting with MongoDB 2.6 write operations will
fail with an exception if this option is used when the server is
running without journaling.
- `fsync`: If ``True`` and the server is running without journaling,
blocks until the server has synced all data files to disk. If the
server is running with journaling, this acts the same as the `j`
option, blocking until write operations have been committed to the
journal. Cannot be used in combination with `j`.
>>> m = pymongo.MongoClient()
>>> m.write_concern
{}
>>> m.write_concern = {'w': 2, 'wtimeout': 1000}
>>> m.write_concern
{'wtimeout': 1000, 'w': 2}
>>> m.write_concern['j'] = True
>>> m.write_concern
{'wtimeout': 1000, 'j': True, 'w': 2}
>>> m.write_concern = {'j': True}
>>> m.write_concern
{'j': True}
>>> # Disable write acknowledgement and write concern
...
>>> m.write_concern['w'] = 0
.. note:: Accessing :attr:`write_concern` returns its value
(a subclass of :class:`dict`), not a copy.
.. warning:: If you are using :class:`~pymongo.connection.Connection`
or :class:`~pymongo.replica_set_connection.ReplicaSetConnection`
make sure you explicitly set ``w`` to 1 (or a greater value) or
:attr:`safe` to ``True``. Unlike calling
:meth:`set_lasterror_options`, setting an option in
:attr:`write_concern` does not implicitly set :attr:`safe`
to ``True``.
"""
# To support dict style access we have to return the actual
# WriteConcern here, not a copy.
return self.__write_concern
write_concern = property(__get_write_concern, __set_write_concern)
def __get_slave_okay(self):
"""DEPRECATED. Use :attr:`read_preference` instead.
.. versionchanged:: 2.1
Deprecated slave_okay.
.. versionadded:: 2.0
"""
return self.__slave_okay
def __set_slave_okay(self, value):
"""Property setter for slave_okay"""
warnings.warn("slave_okay is deprecated. Please use "
"read_preference instead.", DeprecationWarning,
stacklevel=2)
self.__slave_okay = validate_boolean('slave_okay', value)
slave_okay = property(__get_slave_okay, __set_slave_okay)
def __get_read_pref(self):
"""The read preference mode for this instance.
See :class:`~pymongo.read_preferences.ReadPreference` for
available options.
.. versionadded:: 2.1
"""
return self.__read_pref
def __set_read_pref(self, value):
"""Property setter for read_preference"""
self.__read_pref = validate_read_preference('read_preference', value)
read_preference = property(__get_read_pref, __set_read_pref)
def __get_acceptable_latency(self):
"""Any replica-set member whose ping time is within
secondary_acceptable_latency_ms of the nearest member may accept
reads. Defaults to 15 milliseconds.
See :class:`~pymongo.read_preferences.ReadPreference`.
.. versionadded:: 2.3
.. note:: ``secondary_acceptable_latency_ms`` is ignored when talking
to a replica set *through* a mongos. The equivalent is the
localThreshold_ command line option.
.. _localThreshold: http://docs.mongodb.org/manual/reference/mongos/#cmdoption-mongos--localThreshold
"""
return self.__secondary_acceptable_latency_ms
def __set_acceptable_latency(self, value):
"""Property setter for secondary_acceptable_latency_ms"""
self.__secondary_acceptable_latency_ms = (validate_positive_float(
'secondary_acceptable_latency_ms', value))
secondary_acceptable_latency_ms = property(
__get_acceptable_latency, __set_acceptable_latency)
def __get_tag_sets(self):
"""Set ``tag_sets`` to a list of dictionaries like [{'dc': 'ny'}] to
read only from members whose ``dc`` tag has the value ``"ny"``.
To specify a priority-order for tag sets, provide a list of
tag sets: ``[{'dc': 'ny'}, {'dc': 'la'}, {}]``. A final, empty tag
set, ``{}``, means "read from any member that matches the mode,
ignoring tags." ReplicaSetConnection tries each set of tags in turn
until it finds a set of tags with at least one matching member.
.. seealso:: `Data-Center Awareness
<http://www.mongodb.org/display/DOCS/Data+Center+Awareness>`_
.. versionadded:: 2.3
"""
return self.__tag_sets
def __set_tag_sets(self, value):
"""Property setter for tag_sets"""
self.__tag_sets = validate_tag_sets('tag_sets', value)
tag_sets = property(__get_tag_sets, __set_tag_sets)
def __get_uuid_subtype(self):
"""This attribute specifies which BSON Binary subtype is used when
storing UUIDs. Historically UUIDs have been stored as BSON Binary
subtype 3. This attribute is used to switch to the newer BSON Binary
subtype 4. It can also be used to force legacy byte order and subtype
compatibility with the Java and C# drivers. See the :mod:`bson.binary`
module for all options."""
return self.__uuid_subtype
def __set_uuid_subtype(self, value):
"""Sets the BSON Binary subtype to be used when storing UUIDs."""
self.__uuid_subtype = validate_uuid_subtype("uuid_subtype", value)
uuid_subtype = property(__get_uuid_subtype, __set_uuid_subtype)
def __get_safe(self):
"""**DEPRECATED:** Use the 'w' :attr:`write_concern` option instead.
Use getlasterror with every write operation?
.. versionadded:: 2.0
"""
return self.__safe
def __set_safe(self, value):
"""Property setter for safe"""
warnings.warn("safe is deprecated. Please use the"
" 'w' write_concern option instead.",
DeprecationWarning, stacklevel=2)
self.__safe = validate_boolean('safe', value)
safe = property(__get_safe, __set_safe)
def get_lasterror_options(self):
"""DEPRECATED: Use :attr:`write_concern` instead.
Returns a dict of the getlasterror options set on this instance.
.. versionchanged:: 2.4
Deprecated get_lasterror_options.
.. versionadded:: 2.0
"""
warnings.warn("get_lasterror_options is deprecated. Please use "
"write_concern instead.", DeprecationWarning,
stacklevel=2)
return self.__write_concern.copy()
def set_lasterror_options(self, **kwargs):
"""DEPRECATED: Use :attr:`write_concern` instead.
Set getlasterror options for this instance.
Valid options include j=<bool>, w=<int/string>, wtimeout=<int>,
and fsync=<bool>. Implies safe=True.
:Parameters:
- `**kwargs`: Options should be passed as keyword
arguments (e.g. w=2, fsync=True)
.. versionchanged:: 2.4
Deprecated set_lasterror_options.
.. versionadded:: 2.0
"""
warnings.warn("set_lasterror_options is deprecated. Please use "
"write_concern instead.", DeprecationWarning,
stacklevel=2)
for key, value in kwargs.iteritems():
self.__set_safe_option(key, value)
def unset_lasterror_options(self, *options):
"""DEPRECATED: Use :attr:`write_concern` instead.
Unset getlasterror options for this instance.
If no options are passed unsets all getlasterror options.
This does not set `safe` to False.
:Parameters:
- `*options`: The list of options to unset.
.. versionchanged:: 2.4
Deprecated unset_lasterror_options.
.. versionadded:: 2.0
"""
warnings.warn("unset_lasterror_options is deprecated. Please use "
"write_concern instead.", DeprecationWarning,
stacklevel=2)
if len(options):
for option in options:
self.__write_concern.pop(option, None)
else:
self.__write_concern = WriteConcern()
def _get_wc_override(self):
"""Get write concern override.
Used in internal methods that **must** do acknowledged write ops.
We don't want to override user write concern options if write concern
is already enabled.
"""
if self.safe and self.__write_concern.get('w') != 0:
return {}
return {'w': 1}
def _get_write_mode(self, safe=None, **options):
"""Get the current write mode.
Determines if the current write is safe or not based on the
passed in or inherited safe value, write_concern values, or
passed options.
:Parameters:
- `safe`: check that the operation succeeded?
- `**options`: overriding write concern options.
.. versionadded:: 2.3
"""
if safe is not None:
warnings.warn("The safe parameter is deprecated. Please use "
"write concern options instead.", DeprecationWarning,
stacklevel=3)
validate_boolean('safe', safe)
# Passed options override collection level defaults.
if safe is not None or options:
if safe or options:
if not options:
options = self.__write_concern.copy()
# Backwards compatability edge case. Call getLastError
# with no options if safe=True was passed but collection
# level defaults have been disabled with w=0.
# These should be equivalent:
# Connection(w=0).foo.bar.insert({}, safe=True)
# MongoClient(w=0).foo.bar.insert({}, w=1)
if options.get('w') == 0:
return True, {}
# Passing w=0 overrides passing safe=True.
return options.get('w') != 0, options
return False, {}
# Fall back to collection level defaults.
# w=0 takes precedence over self.safe = True
if self.__write_concern.get('w') == 0:
return False, {}
elif self.safe or self.__write_concern.get('w', 0) != 0:
return True, self.__write_concern.copy()
return False, {}
|
[
"xuning992@163.com"
] |
xuning992@163.com
|
c2c07f41578eba965c4833092c81a399fa745b4d
|
edec4b9eaf88d910a95df2bbca77c9eac44a7a97
|
/58 Next Day.py
|
9835624b858c2672b52a507fb8bb09b59a7dc4e9
|
[
"MIT"
] |
permissive
|
L0ganhowlett/Python_workbook-Ben_Stephenson
|
1babdf1a5517f773b008a78dea35e9ea302204c6
|
ab711257bd2da9b34c6001a8e09d20bfc0114a3f
|
refs/heads/main
| 2023-03-18T19:54:30.725213
| 2021-03-13T09:48:04
| 2021-03-13T09:48:04
| 347,330,506
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 926
|
py
|
#58: Next Day
a=input("Enter the date:")
b=[1,3,5,7,8,10,12]
c=[4,6,9,11]
d=a.split('-')
year=int(d[0])
month=int(d[1])
date=int(d[2])
if year%400==0:
x=True
elif year%100==0:
x=False
elif year%4==0:
x=True
else:
x=False
if 1<=month<=12 and 1<=date<=31:
if month in b:
date+=1
if date>31:
month+=1
date=1
if month>12:
year+=1
month=1
elif month in c:
date+=1
if date>30:
month+=1
date=1
elif month==2:
if x:
date+=1
if date>29:
month+=1
date=1
else:
date+=1
if date>28:
month+=1
date=1
print(year,"-",month,"-",date)
else:
print("Invalid input")
|
[
"noreply@github.com"
] |
noreply@github.com
|
606baf852b5d77f9f1bc1b3011ce75d1d4f1b514
|
8ec1858ea1aab27f67c3948464882507de46818b
|
/day1/2.py
|
f01d20b16452fe0c24da495c6af8112bf82126bb
|
[] |
no_license
|
nhuzaa/advent_of_code_y2020
|
534adb822c8cf031513b85a6fd3ea910280f60f9
|
3c4349afc48fbcc85f8be20e5bb7d0f560aca9bd
|
refs/heads/master
| 2023-01-24T10:09:39.906885
| 2020-12-12T09:52:26
| 2020-12-12T09:52:26
| 317,801,054
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,239
|
py
|
'''condition
sum 2020 then get the product of it
'''
def main():
SUMTO = 2020
# num_list = [1,2,3,4,5,6,7,8,9,10,11]
f = open("input2", "r")
data = f.read()
num_list= [int(x) for x in data.splitlines()]
print(num_list)
#filter out the list
less_than_yr = [x for x in num_list if x <= 2020]
#sorting
less_than_yr.sort()
print('####sorted',less_than_yr)
prod_list = []
'''
time complexity O(n)
'''
right_bound = len(less_than_yr) -1
for k in range(0,right_bound):
a = less_than_yr[k]
for i in range(k,right_bound):
c = less_than_yr[i]
b = SUMTO - (a + c)
# print (a , b , c)
# print ('right_bound', right_bound)
if b <= 0:
right_bound -= i -1
break
elif b > 0:
if b in less_than_yr[k+1:right_bound]: #dnt know the time comp of this a
print('b in arry', a , b , c)
prod = a * b * c
prod_list.append(prod)
#answer
print(prod_list)
if __name__ == '__main__':
import time
b = time.time()
main()
e = time.time()
print("execution time:", e-b)
|
[
"nhuzaa@gmail.com"
] |
nhuzaa@gmail.com
|
a569570bf007e78a5309d1fdf90329df076ce608
|
63fb6123aff12e7cfeec5f91e6fb7da371558a39
|
/version2/game/player.py
|
b747394a507f7951e8487bb52a4b59c25d09bcb5
|
[] |
no_license
|
Pratere/Asteriods
|
f4db94dfba794ee2f591db48ff8d24f0cfb94aaf
|
2935966b8029332aadc9c33964665502e2b45646
|
refs/heads/master
| 2020-07-12T02:01:50.559947
| 2019-08-27T11:35:35
| 2019-08-27T11:35:35
| 204,690,114
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,407
|
py
|
import pyglet
import math
from pyglet.window import key
from . import physicalobject, resources
class Player(physicalobject.PhysicalObject):
def __init__(self, *args, **kwargs):
super().__init__(img=resources.player_image, *args, **kwargs)
self.thrust = 300.0
self.rotate_speed = 200.0
self.key_handler = key.KeyStateHandler()
self.engine_sprite = pyglet.sprite.Sprite(img=resources.engine_image, *args, **kwargs)
self.engine_sprite.visible = False
def update(self, dt):
super(Player, self).update(dt)
if self.key_handler[key.LEFT]:
self.rotation -= self.rotate_speed * dt
if self.key_handler[key.RIGHT]:
self.rotation += self.rotate_speed * dt
if self.key_handler[key.UP]:
angle_radians = -math.radians(self.rotation)
force_x = math.cos(angle_radians) * self.thrust * dt
force_y = math.sin(angle_radians) * self.thrust * dt
self.velocity_x += force_x
self.velocity_y += force_y
self.engine_sprite.rotation = self.rotation
self.engine_sprite.x = self.x
self.engine_sprite.y = self.y
self.engine_sprite.visible = True
else:
self.engine_sprite.visible = False
def delete(self):
self.engine_sprite.delete()
super(Player, self).delete()
|
[
"eliprater@dhcp-10-241-162-60.cp.wireless.private.cam.ac.uk"
] |
eliprater@dhcp-10-241-162-60.cp.wireless.private.cam.ac.uk
|
87e2891c541d13b67c9a58b0d0a406bc9f2443c4
|
8c91511845bd3574e02f999b75e263b152698b99
|
/python/reviProva/Python-master/17_ValorNaFaixa.py
|
1d76c9b772be8727355c521fe531d86461ca7382
|
[
"MIT"
] |
permissive
|
jardelhokage/python
|
9626f6e79a1779f2bef936752cd0c187e5390d0f
|
f2a3a9cb5a51c032e6fa36dcb4af24f8bd390f46
|
refs/heads/master
| 2020-08-11T04:20:46.612128
| 2019-10-11T17:18:21
| 2019-10-11T17:18:21
| 214,490,145
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 190
|
py
|
#Valor na faixa
numero = int(input('Digite um número: '))
if numero in range(1, 10):
print("O valor está na faixa permitida!")
else:
print("O valor informado está fora da faixa!")
|
[
"jardel.ct@hotmail.com"
] |
jardel.ct@hotmail.com
|
72c9faf376136a45f845898f3b31c3992bebed11
|
e27fa9e15c6ba1bac3ba1161a3e068e89ff05a62
|
/qa_app/utils.py
|
85a8bfae8da275b6bc7a03edf615999e2573f9a4
|
[
"Apache-2.0"
] |
permissive
|
mogaika/qa_app_flask
|
50ea2f6a6fab88dd4ef1d8c85ef0bdf6cf70771f
|
5d7e7105786d6f7d0f44ee17bfc753aaa5e7b32b
|
refs/heads/master
| 2020-06-22T18:38:23.668183
| 2016-11-08T17:40:53
| 2016-11-08T17:40:53
| 74,581,430
| 0
| 0
| null | 2016-11-23T13:54:30
| 2016-11-23T13:54:30
| null |
UTF-8
|
Python
| false
| false
| 3,445
|
py
|
# Copyright 2016 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import datetime
import hashlib
import re
from socket import inet_aton, inet_ntoa, socket
from struct import unpack, pack
from qa_app import models, lm
from flask import current_app as app, g, session, request, abort
from flask_login import current_user
import settings
@app.before_request
def before_request():
g.user = current_user
@app.before_request
def needs_setup():
if request.path.startswith('/static'):
return
@lm.user_loader
def load_user(id):
return models.Users.query.get(int(id))
def authed():
return bool(session.get('id', False))
def unix_time(dt):
return int((dt - datetime.datetime(1970, 1, 1)).total_seconds())
def unix_time_millis(dt):
return unix_time(dt) * 1000
def sha512(string):
return hashlib.sha512(string).hexdigest()
def get_ip():
""" Returns the IP address of the currently in scope request. The approach is to define a list of trusted proxies
(in this case the local network), and only trust the most recently defined untrusted IP address.
Taken from http://stackoverflow.com/a/22936947/4285524 but the generator there makes no sense.
The trusted_proxies regexes is taken from Ruby on Rails.
This has issues if the clients are also on the local network so you can remove proxies from config.py.
QATrainingFrontend does not use IP address for anything besides cursory tracking of users and it is ill-advised to do much
more than that if you do not know what you're doing.
"""
trusted_proxies = app.config['TRUSTED_PROXIES']
combined = "(" + ")|(".join(trusted_proxies) + ")"
route = request.access_route + [request.remote_addr]
for addr in reversed(route):
if not re.match(combined, addr): # IP is not trusted but we trust the proxies
remote_addr = addr
break
else:
remote_addr = request.remote_addr
return remote_addr
def long2ip(ip_int):
return inet_ntoa(pack('!I', ip_int))
def ip2long(ip):
return unpack('!I', inet_aton(ip))[0]
def init_utils(app):
app.jinja_env.filters['unix_time'] = unix_time
app.jinja_env.filters['unix_time_millis'] = unix_time_millis
app.jinja_env.filters['long2ip'] = long2ip
app.jinja_env.globals.update(template_theme=settings.TEMPLATE)
@app.context_processor
def inject_user():
if session:
return dict(session)
return dict()
@app.before_request
def csrf():
if not session.get('nonce'):
session['nonce'] = sha512(os.urandom(10))
if request.method == "POST":
if session['nonce'] != request.form.get('nonce'):
abort(403)
def is_admin():
if authed():
return settings.USER_ROLE.keys()[settings.USER_ROLE.values().index(session['admin'])]
else:
return False
|
[
"leks.molecul@gmail.com"
] |
leks.molecul@gmail.com
|
9ebf83d033ab7245eb43f26c2e25b455511aebc6
|
94462ad305ce7811a46ed7e14db774a8dd303442
|
/problems/76_minimum_window_substring.py
|
ace30ee4e650ee5d52dd435370d8442f2f43bdee
|
[
"MIT"
] |
permissive
|
lucasheriques/leetcode-solutions
|
dfa7529b36a5bfffe1e05bf2b4d09cec59da8ce5
|
cd36b5df46a75a0cb17569faf9cf56186864f68a
|
refs/heads/master
| 2020-07-04T20:14:52.034387
| 2019-09-21T01:44:05
| 2019-09-21T01:44:05
| 202,401,532
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 947
|
py
|
from collections import Counter
class Solution:
def minWindow(self, s: str, t: str) -> str:
if len(s) < len(t) or not s or not t:
return ""
table = Counter(t)
counter = len(table)
begin = end = 0
response = (0, len(s))
while end < len(s):
endchar = s[end]
if endchar in table:
table[endchar] -= 1
if table[endchar] == 0:
counter -= 1
end += 1
while counter == 0:
if response[1] - response[0] > end - begin:
response = (begin, end)
beginchar = s[begin]
if beginchar in table:
table[beginchar] += 1
if table[beginchar] > 0:
counter += 1
begin += 1
return s[response[0]:response[1]]
Solution().minWindow("ab", "b")
|
[
"lucasheriques@gmail.com"
] |
lucasheriques@gmail.com
|
cb0b7de7110594cc60db9f4425dd32d4cf0f3dc2
|
50cd2203e4a19e4191c7710d955feb2159391a41
|
/lib/json_exporter.py
|
98345320bf4cf0d47b2c729d57b701d311980453
|
[
"MIT"
] |
permissive
|
phareeser/workouts
|
9555ce0dde9e3c7af4f1afa4cb3f3bff5b7dc8a8
|
36be728402e5829b5f32000ff7d315b96d9f00a2
|
refs/heads/master
| 2023-02-26T05:35:38.553384
| 2021-02-06T08:58:38
| 2021-02-06T08:58:38
| 250,558,148
| 0
| 0
|
MIT
| 2021-02-05T17:16:16
| 2020-03-27T14:37:43
|
Python
|
UTF-8
|
Python
| false
| false
| 1,443
|
py
|
# coding=utf-8
from lib.workout_exporter import WorkoutExporter
from lib.workout import Workout, Sport, SportsType, WorkoutsDatabase
import logging
import json
logger = logging.getLogger(__name__)
class JsonExporter(WorkoutExporter):
"""
Exports workouts from a database to a json file
"""
def __init__(self, filename):
logger.info("json exporter initializing ...")
self.json = None
self.filename = filename
def create_session(self):
logger.info("json exporter creating session ...")
try:
self.json = open(self.filename, 'w', encoding='utf-8')
except OSError:
logger.error("json output file could not be accessed")
return False
except TypeError:
logger.error("export filename not correct")
return False
return True
def close_session(self):
logger.info("json exporter closing session ...")
if self.json:
self.json.close()
self.json = None
def export_workouts(self, db):
logger.info("exporting workouts ...")
exported_workouts = 0
workouts = db.session.query(Workout).all()
json_data = []
for workout in workouts:
json_data.append(workout.as_dict(db))
exported_workouts += 1
json.dump(json_data, self.json)
logger.info("{} workouts exported".format(exported_workouts))
|
[
"github@reeses.eu"
] |
github@reeses.eu
|
e15fca6405f366c3103d7c00f519962d002b58d4
|
53244518737c86a8567c1efbd2e4b2d7cfd13d8a
|
/garment_shop/asgi.py
|
e6eb4a04abc24a3940c4414f898990d0a0e43354
|
[] |
no_license
|
artzvrzn/garment_shop
|
073489cc31d0f6aee7a73a17eedeee553275cd47
|
aa8c448b7a59d722e65915d61220a4ba1eec8a97
|
refs/heads/main
| 2023-03-11T08:56:07.686169
| 2021-02-22T18:43:45
| 2021-02-22T18:43:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 401
|
py
|
"""
ASGI config for garment_shop project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'garment_shop.settings')
application = get_asgi_application()
|
[
"art.zvrzn@gmail.com"
] |
art.zvrzn@gmail.com
|
c7ee17d7cc63737119b7d48ee0b522964c5d1ade
|
17559a4c13ee3d6991b7448315e8f0ea0cd0a74d
|
/add_patient.py
|
1cfa0d3aba60b8e2d1ea16e329eb0af4804ef47f
|
[] |
no_license
|
BhutdaNilesh/HospitalManagamentSystemGUI-PyQt5
|
4a4ba50b3a1b13d1958f1f78c54b4925450b2151
|
8005c6e5e727f099e99657fa9b4c776a27a3f825
|
refs/heads/main
| 2023-08-05T10:30:29.940269
| 2021-09-16T08:54:14
| 2021-09-16T08:54:14
| 406,760,368
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,039
|
py
|
import sys,os
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PIL import Image
import sqlite3
DefaultImg = "xyz.png"
con = sqlite3.connect("Hospital_database.db")
cur = con.cursor()
now = QDateTime.currentDateTime()
dt = now.toString(Qt.ISODate)
last = cur.execute("SELECT * FROM patients").fetchall()[-1]
reg_Id = last[1]+1
# print(reg_Id)
class AddPatient(QWidget):
def __init__(self):
super().__init__()
#########Initailising Window###################
self.setWindowTitle("New Registration")
self.setGeometry(300, 150, 700, 620)
self.setFixedSize(self.size()) # Fixed size of window
self.setWindowIcon(QIcon("icons/logo.png"))
self.UI()
self.show()
def UI(self):
self.widgets()
self.layouts()
def widgets(self):
self.addPatientImg = QLabel()
self.img = QPixmap('icons/xyz.png')
self.addPatientImg.setPixmap(self.img)
self.addPatientImg.setAlignment(Qt.AlignCenter)
self.titleText = QLabel("New Patient")
self.titleText.setAlignment(Qt.AlignCenter)
##########################################################
global reg_Id
self.regText = QLabel("Registration Id: ")
self.regEntry = QLineEdit()
self.regEntry.setFixedWidth(80)
self.regEntry.setText(str(reg_Id))
self.dateTimeText = QLabel("Date & Time: ")
self.dateTimeText.setFixedWidth(160)
self.dateTimeEntry = QLineEdit()
self.dateTimeEntry.setFixedWidth(140)
self.dateTimeEntry.setText(dt)
###########################################################
self.nameText =QLabel("Patient Name: ")
self.namePrefix = QComboBox()
self.namePrefix.setFixedWidth(50)
self.namePrefix.addItems(["Mr.","Mrs."])
self.FnameEntry = QLineEdit()
self.FnameEntry.setFixedWidth(150)
self.FnameEntry.setPlaceholderText("Enter First Name")
self.LnameEntry = QLineEdit()
self.LnameEntry.setFixedWidth(150)
self.LnameEntry.setPlaceholderText("Enter Last Name")
############################################################
self.ageText = QLabel("Age: ")
self.ageEntry = QLineEdit()
self.ageEntry.setPlaceholderText("Enter Age")
self.sexText = QLabel("Sex: ")
self.sexCombo = QComboBox()
self.sexCombo.setFixedWidth(60)
self.sexCombo.addItems(["Male","Female","Others"])
#############################################################
self.addressText = QLabel("Address: ")
self.addressEntry = QLineEdit()
self.addressEntry.setFixedWidth(220)
self.addressEntry.setPlaceholderText("Enter Address")
self.districtText = QLabel("District")
districts = ["Ahmednagar", "Akola", "Amravati", "Aurangabad", "Beed", "Bhandara", "Buldhana", "Chandrapur",
"Dhule", "Gadchiroli", "Gondia", "Hingoli", "Jalgaon", "Jalna", "Kolhapur", "Latur",
"Mumbai City", "Mumbai Suburban", "Nagpur", "Nanded", "Nandurbar", "Nashik", "Osmanabad",
"Palghar", "Parbhani", "Pune",
"Raigad", "Ratnagiri", "Sangli", "Satara", "Sindhudurg", "Solapur", "Thane", "Wardha", "Washim",
"Yavatmal"]
self.districtCombo = QComboBox()
self.districtCombo.setFixedWidth(100)
for dis in districts:
self.districtCombo.addItem(dis)
self.talukaText = QLabel("Taluka: ")
self.talukaEntry = QLineEdit()
##################################################
self.occupationText = QLabel("Occupation: ")
self.occupationEntry = QComboBox()
self.occupationEntry.addItems(["Farmer","Student","Self","Teacher","Gov. Service","Pvt. Service","Engg.","Doctor"])
self.phoneText = QLabel("Mob. No.: ")
self.phoneEntry = QLineEdit()
self.phoneEntry.setPlaceholderText("Enter Phone")
##################################################
self.imgText = QLabel("Image: ")
self.imguploadBtn = QPushButton("Upload")
self.imguploadBtn.clicked.connect(self.uploadImg)
self.referredText = QLabel("Referred by: ")
self.referredEntry = QComboBox()
self.referredEntry.setFixedWidth(120)
self.submitBtn = QPushButton("Submit")
self.submitBtn.clicked.connect(self.addPatientData)
def layouts(self):
self.mainLayout = QVBoxLayout()
self.topLayout = QVBoxLayout()
self.bottomLayout = QGridLayout()
# self.bottomLayout.setVerticalSpacing(20)
# self.bottomLayout.setHorizontalSpacing(5)
self.topFrame = QFrame()
self.bottomFrame = QFrame()
self.topLayout.addWidget(self.titleText)
self.topLayout.addWidget(self.addPatientImg)
self.topFrame.setLayout(self.topLayout)
############### HBox1 ##########################
self.bottomLayout.addWidget(self.regText,0,0)
self.bottomLayout.addWidget(self.regEntry,0,1)
self.bottomLayout.addWidget(self.dateTimeText,0,2)
self.bottomLayout.addWidget(self.dateTimeEntry,0,3)
#####################hbox2##########################
self.bottomLayout.addWidget(self.nameText,1,0)
self.bottomLayout.addWidget(self.namePrefix)
self.bottomLayout.addWidget(self.FnameEntry)
self.bottomLayout.addWidget(self.LnameEntry)
#######################hbox3########################
self.bottomLayout.addWidget(self.sexText,2,0)
self.bottomLayout.addWidget(self.sexCombo,2,1)
self.bottomLayout.addWidget(self.ageText,2,2)
self.bottomLayout.addWidget(self.ageEntry,2,3)
######################hbox3#########################
self.bottomLayout.addWidget(self.addressText,3,0)
self.bottomLayout.addWidget(self.addressEntry)
#######################hbox4#########################
self.bottomLayout.addWidget(self.districtText,4,0)
self.bottomLayout.addWidget(self.districtCombo,4,1)
self.bottomLayout.addWidget(self.talukaText,4,2)
self.bottomLayout.addWidget(self.talukaEntry,4,3)
#################### hbox5###########################
self.bottomLayout.addWidget(self.occupationText,5,0)
self.bottomLayout.addWidget(self.occupationEntry,5,1)
self.bottomLayout.addWidget(self.phoneText,5,2)
self.bottomLayout.addWidget(self.phoneEntry,5,3)
#######################################################
self.bottomLayout.addWidget(self.imgText,6,0)
self.bottomLayout.addWidget(self.imguploadBtn,6,1)
self.bottomLayout.addWidget(self.referredText,6,2)
self.bottomLayout.addWidget(self.referredEntry,6,3)
################ End box ############################
self.bottomLayout.addWidget(self.submitBtn,7,3)
#########################################################
self.bottomFrame.setLayout(self.bottomLayout)
self.mainLayout.addWidget(self.topFrame,35)
self.mainLayout.addWidget(self.bottomFrame,65)
self.setLayout(self.mainLayout)
def uploadImg(self):
global DefaultImg
size = (256,256)
self.filename,ok = QFileDialog.getOpenFileName(self,"Upload Image","","Image files (*.jpg *.png *.jpeg)")
if ok:
DefaultImg = os.path.basename(self.filename)
Img = Image.open(self.filename)
Img = Img.resize(size)
Img.save("patients_imgs/{0}".format(DefaultImg))
def addPatientData(self):
global DefaultImg,reg_Id
regId = self.regEntry.text()
time = self.dateTimeEntry.text()
name = self.namePrefix.currentText() + " " + self.FnameEntry.text() + " " + self.LnameEntry.text()
sex = self.sexCombo.currentText()
age = self.ageEntry.text()
address = self.addressEntry.text()
district = self.districtCombo.currentText()
taluka = self.talukaEntry.text()
occupation = self.occupationEntry.currentText()
phone = self.phoneEntry.text()
refer = self.referredEntry.currentText()
if(name and phone and address and regId !=""):
try:
query = ("INSERT INTO 'patients' (registration_id,date_time,patient_name,patient_phone,patient_age,patient_sex,patient_address,district,taluka,occupation,patient_img,referred_by) VALUES (?,?,?,?,?,?,?,?,?,?,?,?)")
cur.execute(query,(regId,time,name,phone,age,sex,address,district,taluka,occupation,DefaultImg,refer))
con.commit()
QMessageBox.information(self,"Info","Patient has been Added")
# reg_Id = reg_Id+1
except:
QMessageBox.information(self,"Info","Patient has not been Added")
else:
QMessageBox.information(self,"Warning","Fields cannot be empty")
|
[
"bhutdanilesh2517@gmail.com"
] |
bhutdanilesh2517@gmail.com
|
45e451b6f6e00f0071841994037f11b34c3d3553
|
9b0d8477e49f3c201805d3fb68bf9589a4402462
|
/modules/db.py
|
d33491ddc28bc8e0b64ad1ee256fc00e67bce090
|
[] |
no_license
|
Hwangtaewon/utility
|
e2333090ea243d0da5cc2edd07ba89218df1555c
|
cb41375050d1ff13077f0ed6dd451d9c74f8f8f1
|
refs/heads/master
| 2020-12-30T09:04:07.194461
| 2020-01-30T05:31:04
| 2020-01-30T05:31:04
| 238,942,065
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,018
|
py
|
from .requester import Requester
import json
import base64
class DB(object):
def __init__(self, dbdomain="http://127.0.0.1:8000"):
self.dbdomain = dbdomain
self.requester = Requester()
def get_subdomains_of_company(self, company):
# parameter error handling
if not isinstance(company, str):
print("[!] Error: parameter type error - company type is str")
return None
res = self.requester.requests(self.dbdomain + "/subdomains/company/" + company)
if not res:
print("[!] Get subdomains of company fail")
return None
return json.loads(res.text)
def save_subdomains_of_company(self, company, subdomains, source):
# parameter error handling
if not isinstance(company, str):
print("[!] Error: parameter type error - company type is str")
return None
if not isinstance(source, str):
print("[!] Error: parameter type error - source type is str")
return None
if not isinstance(subdomains, list):
print("[!] Error: parameter type error - company's subdomains type is list")
return None
data = {sub:{"source":[source]} for sub in subdomains}
data = json.dumps(data)
res = self.requester.requests(self.dbdomain + "/subdomains/company/" + company, method="POST", data=data)
if not res:
print("[!] Save subdomains of owner fail")
return None
return res
def delete_subdomains_of_company(self, company, subdomains):
# parameter error handling
if not isinstance(company, str):
print("[!] Error: parameter type error - company type is str")
return None
if not isinstance(subdomains, list):
print("[!] Error: parameter type error - company's subdomains type is list")
return None
data = json.dumps(subdomains)
res = self.requester.requests(self.dbdomain + "/subdomains/company/" + company, method="DELETE", data=data)
if not res:
print("[!] Delete subdomains of owner fail")
return None
return res
def delete_subdomains_of_company_source(self):
pass
def get_subdomains_of_domain(self, domain):
# parameter error handling
if not isinstance(domain, str):
print("[!] Error: parameter type error - domain type is str")
return None
domain = str(base64.b64encode(domain.encode("utf-8")), "utf-8")
res = self.requester.requests(self.dbdomain + "/subdomains/domain/" + domain)
if not res:
print("[!] Get subdomains of domain fail")
return None
return json.loads(res.text)
def save_subdomains_of_domain(self, domain, subdomains, source):
# parameter error handling
if not isinstance(domain, str):
print("[!] Error: parameter type error - domain type is str")
return None
if not isinstance(subdomains, list):
print("[!] Error: parameter type error - owner's subdomains type is list")
return None
domain = str(base64.b64encode(domain.encode("utf-8")), "utf-8")
data = {sub:{"source":[source]} for sub in subdomains}
data = json.dumps(data)
res = self.requester.requests(self.dbdomain + "/subdomains/domain/" + domain, method="POST", data=data)
if not res:
print("[!] Save subdomains of domain fail")
return None
return res
def get_next_target(self):
res = self.requester.requests(self.dbdomain + "/targets", method="GET")
if not res:
print("[!] Get next target fail")
return None
return json.loads(res.text)
|
[
""
] | |
36f1d5f72557f2f12255cef6d2f407bc9c8305f7
|
19f2286819c18c84ef5ba16a7e7a5dcf51a4f338
|
/Day 23 Complete Binary tree.py
|
bbc0231a439eb4ff9ceb9a815a10a065508dd072
|
[] |
no_license
|
abhishek2chikun/200daysofdsa
|
7cbb78d98d89847f404966afa655455280252961
|
932a7f20de5d0ddcd1b22c3bb3d2a093ba437854
|
refs/heads/master
| 2023-01-04T16:04:14.005028
| 2020-10-28T05:02:34
| 2020-10-28T05:02:34
| 291,129,160
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,016
|
py
|
class Node:
def __init__(self,data,left=None,right=None):
self.data=data
self.left=left
self.right=right
def is_Complete(root,n,index):
if root is None:
return True
if index>=n:
return False
return is_Complete(root.left,n,2*index+1) and is_Complete(root.right,n,2*index+2)
def count(root):
if root is None:
return 0
return (1+count(root.left)+count(root.right))
root=Node(1,left=Node(2,left=Node(4),right=Node(5)),right=Node(3))
ind=0
if is_Complete(root,count(root),ind):
print("Your tree is a Complete Binary Tree")
else:
print("Not a binary Complete Binary tree ")
root.right.left=Node(6)
if is_Complete(root,count(root),ind):
print("Your tree is a Complete Binary Tree")
else:
print("Not a binary Complete Binary tree ")
root.left.left.left=Node(7)
if is_Complete(root,count(root),ind):
print("Your tree is a Complete Binary Tree")
else:
print("Not a binary Complete Binary tree ")
|
[
"noreply@github.com"
] |
noreply@github.com
|
61d00c0a43a81a997af12a5df819936ceddd4160
|
fce8394300cd83e29fcf4362caf259e6f7a4813e
|
/src/mod_plot.py
|
69faa320b25b4ae102502c51d87410d76951fd7c
|
[
"MIT"
] |
permissive
|
maxbeauchamp/2020a_SSH_mapping_NATL60
|
1982d7be625aca82d876f2d1240e6ac415c19640
|
04ce00567496c72f8988b78e3f5ac1575b66c42c
|
refs/heads/master
| 2022-12-16T17:50:44.838016
| 2020-09-04T09:47:05
| 2020-09-04T09:47:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,416
|
py
|
import xarray as xr
import numpy
import logging
import matplotlib.pylab as plt
from matplotlib.ticker import ScalarFormatter
def plot_psd_score(ds_psd):
try:
nb_experiment = len(ds_psd.experiment)
except:
nb_experiment = 1
fig, ax0 = plt.subplots(1, nb_experiment, sharey=True, figsize=(20, 5))
#plt.subplots_adjust(right=0.1, left=0.09)
for exp in range(nb_experiment):
try:
ctitle = ds_psd.experiment.values[exp]
except:
ctitle = ''
ax = ax0[exp]
ax.invert_yaxis()
ax.invert_xaxis()
c1 = ax.contourf(1./(ds_psd.freq_lon), 1./ds_psd.freq_time, (ds_psd.isel(experiment=exp).values),
levels=numpy.arange(0,1.1, 0.1), cmap='RdYlGn', extend='both')
ax.set_xlabel('wavenumber(degree_lon)', fontweight='bold', fontsize=18)
ax0[0].set_ylabel('frequency (days)', fontweight='bold', fontsize=18)
#plt.xscale('log')
ax.set_yscale('log')
ax.grid(linestyle='--', lw=1, color='w')
ax.tick_params(axis='both', labelsize=18)
ax.set_title(f'PSD-based score ({ctitle})', fontweight='bold', fontsize=18)
for axis in [ax.xaxis, ax.yaxis]:
axis.set_major_formatter(ScalarFormatter())
c2 = ax.contour(1./(ds_psd.freq_lon), 1./ds_psd.freq_time,(ds_psd.isel(experiment=exp).values), levels=[0.5], linewidths=2, colors='k')
cbar = fig.colorbar(c1, ax=ax, pad=0.01)
cbar.add_lines(c2)
bbox_props = dict(boxstyle="round,pad=0.5", fc="w", ec="k", lw=2)
ax0[-1].annotate('Resolved scales',
xy=(1.2, 0.8),
xycoords='axes fraction',
xytext=(1.2, 0.55),
bbox=bbox_props,
arrowprops=
dict(facecolor='black', shrink=0.05),
horizontalalignment='left',
verticalalignment='center')
ax0[-1].annotate('UN-resolved scales',
xy=(1.2, 0.2),
xycoords='axes fraction',
xytext=(1.2, 0.45),
bbox=bbox_props,
arrowprops=
dict(facecolor='black', shrink=0.05),
horizontalalignment='left',
verticalalignment='center')
plt.show()
|
[
"adeajayi.kunle@gmail.com"
] |
adeajayi.kunle@gmail.com
|
8fc31e37a4a15496375b67b89a219906c69328eb
|
9e287012bf88e56782b36b83803ac6550f077486
|
/algorithims/414.py
|
951ad6124dc9696a3cd9572f26bf8d8d670a8fdd
|
[] |
no_license
|
kmin-jeong/solvong_Algorithms
|
b3255655dd96f70f3ed468644e7feb501c64e535
|
5622ef00e4417eb572009ee7d122946192016850
|
refs/heads/master
| 2023-08-29T12:21:56.659216
| 2020-09-21T14:28:29
| 2020-09-21T14:28:29
| 291,240,403
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 271
|
py
|
class Solution:
def thirdMax(self, nums):
nums = list(set(nums))
nums = list(sorted(nums, reverse=True))
if len(nums) < 3:
answer = nums[0]
else:
answer = nums[2]
return answer
|
[
"desk535353@gmail.com"
] |
desk535353@gmail.com
|
925de84479dcf1d87e11ce81b0c8dc7b15d21acd
|
8a495b823576b5c0bb39decd44575de20b1dc43d
|
/hydrus/client/db/ClientDBFilesStorage.py
|
79d2c246ceddf3d6d68933dbb19b89d0713923d8
|
[
"WTFPL"
] |
permissive
|
Treadder/hydrus
|
f8c11e9798316cc5457497e9bff56236727862e0
|
ca2f5f161214aa6df3900809f9ca18339c3e1f9a
|
refs/heads/master
| 2023-09-06T08:46:05.694446
| 2021-11-17T21:22:27
| 2021-11-17T21:22:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 33,781
|
py
|
import collections
import sqlite3
import typing
from hydrus.core import HydrusConstants as HC
from hydrus.core import HydrusData
from hydrus.core import HydrusDB
from hydrus.client import ClientConstants as CC
from hydrus.client import ClientSearch
from hydrus.client.db import ClientDBMaster
from hydrus.client.db import ClientDBModule
from hydrus.client.db import ClientDBServices
def GenerateFilesTableNames( service_id: int ) -> typing.Tuple[ str, str, str, str ]:
suffix = str( service_id )
current_files_table_name = 'main.current_files_{}'.format( suffix )
deleted_files_table_name = 'main.deleted_files_{}'.format( suffix )
pending_files_table_name = 'main.pending_files_{}'.format( suffix )
petitioned_files_table_name = 'main.petitioned_files_{}'.format( suffix )
return ( current_files_table_name, deleted_files_table_name, pending_files_table_name, petitioned_files_table_name )
def GenerateFilesTableName( service_id: int, status: int ) -> str:
( current_files_table_name, deleted_files_table_name, pending_files_table_name, petitioned_files_table_name ) = GenerateFilesTableNames( service_id )
if status == HC.CONTENT_STATUS_CURRENT:
return current_files_table_name
elif status == HC.CONTENT_STATUS_DELETED:
return deleted_files_table_name
elif status == HC.CONTENT_STATUS_PENDING:
return pending_files_table_name
else:
return petitioned_files_table_name
class DBLocationSearchContext( object ):
def __init__( self, location_search_context: ClientSearch.LocationSearchContext ):
self.location_search_context = location_search_context
self.files_table_name = None
def GetLocationSearchContext( self ) -> ClientSearch.LocationSearchContext:
return self.location_search_context
def GetTableJoinIteratedByFileDomain( self, table_phrase: str ):
if self.location_search_context.IsAllKnownFiles():
return table_phrase
else:
return '{} CROSS JOIN {} USING ( hash_id )'.format( self.files_table_name, table_phrase )
def GetTableJoinLimitedByFileDomain( self, table_phrase: str ):
if self.location_search_context.IsAllKnownFiles():
return table_phrase
else:
return '{} CROSS JOIN {} USING ( hash_id )'.format( table_phrase, self.files_table_name )
class ClientDBFilesStorage( ClientDBModule.ClientDBModule ):
def __init__( self, cursor: sqlite3.Cursor, modules_services: ClientDBServices.ClientDBMasterServices, modules_texts: ClientDBMaster.ClientDBMasterTexts ):
self.modules_services = modules_services
self.modules_texts = modules_texts
ClientDBModule.ClientDBModule.__init__( self, 'client file locations', cursor )
self.temp_file_storage_table_name = None
def _GetInitialTableGenerationDict( self ) -> dict:
return {
'main.local_file_deletion_reasons' : ( 'CREATE TABLE IF NOT EXISTS {} ( hash_id INTEGER PRIMARY KEY, reason_id INTEGER );', 400 )
}
def _GetServiceIndexGenerationDict( self, service_id ) -> dict:
( current_files_table_name, deleted_files_table_name, pending_files_table_name, petitioned_files_table_name ) = GenerateFilesTableNames( service_id )
index_generation_dict = {}
index_generation_dict[ current_files_table_name ] = [
( [ 'timestamp' ], False, 447 )
]
index_generation_dict[ deleted_files_table_name ] = [
( [ 'timestamp' ], False, 447 ),
( [ 'original_timestamp' ], False, 447 )
]
index_generation_dict[ petitioned_files_table_name ] = [
( [ 'reason_id' ], False, 447 )
]
return index_generation_dict
def _GetServiceTableGenerationDict( self, service_id ) -> dict:
( current_files_table_name, deleted_files_table_name, pending_files_table_name, petitioned_files_table_name ) = GenerateFilesTableNames( service_id )
return {
current_files_table_name : ( 'CREATE TABLE IF NOT EXISTS {} ( hash_id INTEGER PRIMARY KEY, timestamp INTEGER );', 447 ),
deleted_files_table_name : ( 'CREATE TABLE IF NOT EXISTS {} ( hash_id INTEGER PRIMARY KEY, timestamp INTEGER, original_timestamp INTEGER );', 447 ),
pending_files_table_name : ( 'CREATE TABLE IF NOT EXISTS {} ( hash_id INTEGER PRIMARY KEY );', 447 ),
petitioned_files_table_name : ( 'CREATE TABLE IF NOT EXISTS {} ( hash_id INTEGER PRIMARY KEY, reason_id INTEGER );', 447 )
}
def _GetServiceIdsWeGenerateDynamicTablesFor( self ):
return self.modules_services.GetServiceIds( HC.SPECIFIC_FILE_SERVICES )
def AddFiles( self, service_id, insert_rows ):
( current_files_table_name, deleted_files_table_name, pending_files_table_name, petitioned_files_table_name ) = GenerateFilesTableNames( service_id )
self._ExecuteMany( 'INSERT OR IGNORE INTO {} VALUES ( ?, ? );'.format( current_files_table_name ), ( ( hash_id, timestamp ) for ( hash_id, timestamp ) in insert_rows ) )
self._ExecuteMany( 'DELETE FROM {} WHERE hash_id = ?;'.format( pending_files_table_name ), ( ( hash_id, ) for ( hash_id, timestamp ) in insert_rows ) )
pending_changed = self._GetRowCount() > 0
return pending_changed
def ClearDeleteRecord( self, service_id, hash_ids ):
deleted_files_table_name = GenerateFilesTableName( service_id, HC.CONTENT_STATUS_DELETED )
self._ExecuteMany( 'DELETE FROM {} WHERE hash_id = ?;'.format( deleted_files_table_name ), ( ( hash_id, ) for hash_id in hash_ids ) )
num_deleted = self._GetRowCount()
return num_deleted
def ClearFilesTables( self, service_id: int, keep_pending = False ):
( current_files_table_name, deleted_files_table_name, pending_files_table_name, petitioned_files_table_name ) = GenerateFilesTableNames( service_id )
self._Execute( 'DELETE FROM {};'.format( current_files_table_name ) )
self._Execute( 'DELETE FROM {};'.format( deleted_files_table_name ) )
if not keep_pending:
self._Execute( 'DELETE FROM {};'.format( pending_files_table_name ) )
self._Execute( 'DELETE FROM {};'.format( petitioned_files_table_name ) )
def ClearLocalDeleteRecord( self, hash_ids = None ):
# we delete from everywhere, but not for files currently in the trash
service_ids_to_nums_cleared = {}
local_non_trash_service_ids = self.modules_services.GetServiceIds( ( HC.COMBINED_LOCAL_FILE, HC.LOCAL_FILE_DOMAIN ) )
if hash_ids is None:
trash_current_files_table_name = GenerateFilesTableName( self.modules_services.trash_service_id, HC.CONTENT_STATUS_CURRENT )
for service_id in local_non_trash_service_ids:
deleted_files_table_name = GenerateFilesTableName( service_id, HC.CONTENT_STATUS_DELETED )
self._Execute( 'DELETE FROM {} WHERE hash_id NOT IN ( SELECT hash_id FROM {} );'.format( deleted_files_table_name, trash_current_files_table_name ) )
num_cleared = self._GetRowCount()
service_ids_to_nums_cleared[ service_id ] = num_cleared
self._Execute( 'DELETE FROM local_file_deletion_reasons WHERE hash_id NOT IN ( SELECT hash_id FROM {} );'.format( trash_current_files_table_name ) )
else:
trashed_hash_ids = self.FilterCurrentHashIds( self.modules_services.trash_service_id, hash_ids )
ok_to_clear_hash_ids = set( hash_ids ).difference( trashed_hash_ids )
if len( ok_to_clear_hash_ids ) > 0:
for service_id in local_non_trash_service_ids:
deleted_files_table_name = GenerateFilesTableName( service_id, HC.CONTENT_STATUS_DELETED )
self._ExecuteMany( 'DELETE FROM {} WHERE hash_id = ?;'.format( deleted_files_table_name ), ( ( hash_id, ) for hash_id in ok_to_clear_hash_ids ) )
num_cleared = self._GetRowCount()
service_ids_to_nums_cleared[ service_id ] = num_cleared
self._ExecuteMany( 'DELETE FROM local_file_deletion_reasons WHERE hash_id = ?;', ( ( hash_id, ) for hash_id in ok_to_clear_hash_ids ) )
return service_ids_to_nums_cleared
def DeletePending( self, service_id: int ):
( current_files_table_name, deleted_files_table_name, pending_files_table_name, petitioned_files_table_name ) = GenerateFilesTableNames( service_id )
self._Execute( 'DELETE FROM {};'.format( pending_files_table_name ) )
self._Execute( 'DELETE FROM {};'.format( petitioned_files_table_name ) )
def DropFilesTables( self, service_id: int ):
( current_files_table_name, deleted_files_table_name, pending_files_table_name, petitioned_files_table_name ) = GenerateFilesTableNames( service_id )
self._Execute( 'DROP TABLE IF EXISTS {};'.format( current_files_table_name ) )
self._Execute( 'DROP TABLE IF EXISTS {};'.format( deleted_files_table_name ) )
self._Execute( 'DROP TABLE IF EXISTS {};'.format( pending_files_table_name ) )
self._Execute( 'DROP TABLE IF EXISTS {};'.format( petitioned_files_table_name ) )
def FilterAllCurrentHashIds( self, hash_ids, just_these_service_ids = None ):
if just_these_service_ids is None:
service_ids = self.modules_services.GetServiceIds( HC.SPECIFIC_FILE_SERVICES )
else:
service_ids = just_these_service_ids
current_hash_ids = set()
with self._MakeTemporaryIntegerTable( hash_ids, 'hash_id' ) as temp_hash_ids_table_name:
for service_id in service_ids:
current_files_table_name = GenerateFilesTableName( service_id, HC.CONTENT_STATUS_CURRENT )
hash_id_iterator = self._STI( self._Execute( 'SELECT hash_id FROM {} CROSS JOIN {} USING ( hash_id );'.format( temp_hash_ids_table_name, current_files_table_name ) ) )
current_hash_ids.update( hash_id_iterator )
return current_hash_ids
def FilterAllPendingHashIds( self, hash_ids, just_these_service_ids = None ):
if just_these_service_ids is None:
service_ids = self.modules_services.GetServiceIds( HC.SPECIFIC_FILE_SERVICES )
else:
service_ids = just_these_service_ids
pending_hash_ids = set()
with self._MakeTemporaryIntegerTable( hash_ids, 'hash_id' ) as temp_hash_ids_table_name:
for service_id in service_ids:
pending_files_table_name = GenerateFilesTableName( service_id, HC.CONTENT_STATUS_PENDING )
hash_id_iterator = self._STI( self._Execute( 'SELECT hash_id FROM {} CROSS JOIN {} USING ( hash_id );'.format( temp_hash_ids_table_name, pending_files_table_name ) ) )
pending_hash_ids.update( hash_id_iterator )
return pending_hash_ids
def FilterCurrentHashIds( self, service_id, hash_ids ):
if service_id == self.modules_services.combined_file_service_id:
return set( hash_ids )
with self._MakeTemporaryIntegerTable( hash_ids, 'hash_id' ) as temp_hash_ids_table_name:
current_files_table_name = GenerateFilesTableName( service_id, HC.CONTENT_STATUS_CURRENT )
current_hash_ids = self._STS( self._Execute( 'SELECT hash_id FROM {} CROSS JOIN {} USING ( hash_id );'.format( temp_hash_ids_table_name, current_files_table_name ) ) )
return current_hash_ids
def FilterPendingHashIds( self, service_id, hash_ids ):
if service_id == self.modules_services.combined_file_service_id:
return set( hash_ids )
with self._MakeTemporaryIntegerTable( hash_ids, 'hash_id' ) as temp_hash_ids_table_name:
pending_files_table_name = GenerateFilesTableName( service_id, HC.CONTENT_STATUS_PENDING )
pending_hash_ids = self._STS( self._Execute( 'SELECT hash_id FROM {} CROSS JOIN {} USING ( hash_id );'.format( temp_hash_ids_table_name, pending_files_table_name ) ) )
return pending_hash_ids
def GenerateFilesTables( self, service_id: int ):
table_generation_dict = self._GetServiceTableGenerationDict( service_id )
for ( table_name, ( create_query_without_name, version_added ) ) in table_generation_dict.items():
self._Execute( create_query_without_name.format( table_name ) )
index_generation_dict = self._GetServiceIndexGenerationDict( service_id )
for ( table_name, columns, unique, version_added ) in self._FlattenIndexGenerationDict( index_generation_dict ):
self._CreateIndex( table_name, columns, unique = unique )
def GetAPendingHashId( self, service_id ):
pending_files_table_name = GenerateFilesTableName( service_id, HC.CONTENT_STATUS_PENDING )
result = self._Execute( 'SELECT hash_id FROM {};'.format( pending_files_table_name ) ).fetchone()
if result is None:
return None
else:
( hash_id, ) = result
return hash_id
def GetAPetitionedHashId( self, service_id ):
petitioned_files_table_name = GenerateFilesTableName( service_id, HC.CONTENT_STATUS_PETITIONED )
result = self._Execute( 'SELECT hash_id FROM {};'.format( petitioned_files_table_name ) ).fetchone()
if result is None:
return None
else:
( hash_id, ) = result
return hash_id
def GetCurrentFilesCount( self, service_id, only_viewable = False ):
current_files_table_name = GenerateFilesTableName( service_id, HC.CONTENT_STATUS_CURRENT )
if only_viewable:
# hashes to mimes
result = self._Execute( 'SELECT COUNT( * ) FROM {} CROSS JOIN files_info USING ( hash_id ) WHERE mime IN {};'.format( current_files_table_name, HydrusData.SplayListForDB( HC.SEARCHABLE_MIMES ) ) ).fetchone()
else:
result = self._Execute( 'SELECT COUNT( * ) FROM {};'.format( current_files_table_name ) ).fetchone()
( count, ) = result
return count
def GetCurrentFilesInboxCount( self, service_id ):
current_files_table_name = GenerateFilesTableName( service_id, HC.CONTENT_STATUS_CURRENT )
result = self._Execute( 'SELECT COUNT( * ) FROM {} CROSS JOIN file_inbox USING ( hash_id );'.format( current_files_table_name ) ).fetchone()
( count, ) = result
return count
def GetCurrentHashIdsList( self, service_id ):
current_files_table_name = GenerateFilesTableName( service_id, HC.CONTENT_STATUS_CURRENT )
hash_ids = self._STL( self._Execute( 'SELECT hash_id FROM {};'.format( current_files_table_name ) ) )
return hash_ids
def GetCurrentFilesTotalSize( self, service_id ):
current_files_table_name = GenerateFilesTableName( service_id, HC.CONTENT_STATUS_CURRENT )
# hashes to size
result = self._Execute( 'SELECT SUM( size ) FROM {} CROSS JOIN files_info USING ( hash_id );'.format( current_files_table_name ) ).fetchone()
( count, ) = result
return count
def GetCurrentHashIdsToTimestamps( self, service_id, hash_ids ):
current_files_table_name = GenerateFilesTableName( service_id, HC.CONTENT_STATUS_CURRENT )
with self._MakeTemporaryIntegerTable( hash_ids, 'hash_id' ) as temp_hash_ids_table_name:
rows = dict( self._Execute( 'SELECT hash_id, timestamp FROM {} CROSS JOIN {} USING ( hash_id );'.format( temp_hash_ids_table_name, current_files_table_name ) ) )
return rows
def GetCurrentTimestamp( self, service_id: int, hash_id: int ):
current_files_table_name = GenerateFilesTableName( service_id, HC.CONTENT_STATUS_CURRENT )
result = self._Execute( 'SELECT timestamp FROM {} WHERE hash_id = ?;'.format( current_files_table_name ), ( hash_id, ) ).fetchone()
if result is None:
return None
else:
( timestamp, ) = result
return timestamp
def GetDeletedFilesCount( self, service_id: int ) -> int:
deleted_files_table_name = GenerateFilesTableName( service_id, HC.CONTENT_STATUS_DELETED )
result = self._Execute( 'SELECT COUNT( * ) FROM {};'.format( deleted_files_table_name ) ).fetchone()
( count, ) = result
return count
def GetDeletionStatus( self, service_id, hash_id ):
# can have a value here and just be in trash, so we fetch it whatever the end result
result = self._Execute( 'SELECT reason_id FROM local_file_deletion_reasons WHERE hash_id = ?;', ( hash_id, ) ).fetchone()
if result is None:
file_deletion_reason = 'Unknown deletion reason.'
else:
( reason_id, ) = result
file_deletion_reason = self.modules_texts.GetText( reason_id )
deleted_files_table_name = GenerateFilesTableName( service_id, HC.CONTENT_STATUS_DELETED )
is_deleted = False
timestamp = None
result = self._Execute( 'SELECT timestamp FROM {} WHERE hash_id = ?;'.format( deleted_files_table_name ), ( hash_id, ) ).fetchone()
if result is not None:
is_deleted = True
( timestamp, ) = result
return ( is_deleted, timestamp, file_deletion_reason )
def GetDBLocationSearchContext( self, location_search_context: ClientSearch.LocationSearchContext ):
if not location_search_context.SearchesAnything():
location_search_context = ClientSearch.LocationSearchContext( current_service_keys = [ CC.COMBINED_FILE_SERVICE_KEY ] )
db_location_search_context = DBLocationSearchContext( location_search_context )
if location_search_context.IsAllKnownFiles():
# no table set, obviously
return db_location_search_context
table_names = []
for current_service_key in location_search_context.current_service_keys:
service_id = self.modules_services.GetServiceId( current_service_key )
table_names.append( GenerateFilesTableName( service_id, HC.CONTENT_STATUS_CURRENT ) )
for deleted_service_key in location_search_context.deleted_service_keys:
service_id = self.modules_services.GetServiceId( deleted_service_key )
table_names.append( GenerateFilesTableName( service_id, HC.CONTENT_STATUS_DELETED ) )
if len( table_names ) == 1:
table_name = table_names[0]
db_location_search_context.files_table_name = table_name
else:
# while I could make a VIEW of the UNION SELECT, we'll populate an indexed single column table to help query planner later on
# we're hardcoding the name to this class for now, so a limit of one db_location_search_context at a time _for now_
# we make change this in future to use wrapper temp int tables, we'll see
# maybe I should stick this guy in 'temp' to live through db connection resets, but we'll see I guess. it is generally ephemeral, not going to linger through weird vacuum maintenance or anything right?
if self.temp_file_storage_table_name is None:
self.temp_file_storage_table_name = 'mem.temp_file_storage_hash_id'
self._Execute( 'CREATE TABLE IF NOT EXISTS {} ( hash_id INTEGER PRIMARY KEY );'.format( self.temp_file_storage_table_name ) )
else:
self._Execute( 'DELETE FROM {};'.format( self.temp_file_storage_table_name ) )
select_query = ' UNION '.join( ( 'SELECT hash_id FROM {}'.format( table_name ) for table_name in table_names ) )
self._Execute( 'INSERT OR IGNORE INTO {} ( hash_id ) SELECT hash_id FROM {};'.format( self.temp_file_storage_table_name, select_query ) )
db_location_search_context.files_table_name = self.temp_file_storage_table_name
return db_location_search_context
def GetHashIdsToCurrentServiceIds( self, temp_hash_ids_table_name ):
hash_ids_to_current_file_service_ids = collections.defaultdict( list )
for service_id in self.modules_services.GetServiceIds( HC.SPECIFIC_FILE_SERVICES ):
current_files_table_name = GenerateFilesTableName( service_id, HC.CONTENT_STATUS_CURRENT )
for hash_id in self._STI( self._Execute( 'SELECT hash_id FROM {} CROSS JOIN {} USING ( hash_id );'.format( temp_hash_ids_table_name, current_files_table_name ) ) ):
hash_ids_to_current_file_service_ids[ hash_id ].append( service_id )
return hash_ids_to_current_file_service_ids
def GetHashIdsToServiceInfoDicts( self, temp_hash_ids_table_name ):
hash_ids_to_current_file_service_ids_and_timestamps = collections.defaultdict( list )
hash_ids_to_deleted_file_service_ids_and_timestamps = collections.defaultdict( list )
hash_ids_to_pending_file_service_ids = collections.defaultdict( list )
hash_ids_to_petitioned_file_service_ids = collections.defaultdict( list )
for service_id in self.modules_services.GetServiceIds( HC.SPECIFIC_FILE_SERVICES ):
( current_files_table_name, deleted_files_table_name, pending_files_table_name, petitioned_files_table_name ) = GenerateFilesTableNames( service_id )
for ( hash_id, timestamp ) in self._Execute( 'SELECT hash_id, timestamp FROM {} CROSS JOIN {} USING ( hash_id );'.format( temp_hash_ids_table_name, current_files_table_name ) ):
hash_ids_to_current_file_service_ids_and_timestamps[ hash_id ].append( ( service_id, timestamp ) )
for ( hash_id, timestamp, original_timestamp ) in self._Execute( 'SELECT hash_id, timestamp, original_timestamp FROM {} CROSS JOIN {} USING ( hash_id );'.format( temp_hash_ids_table_name, deleted_files_table_name ) ):
hash_ids_to_deleted_file_service_ids_and_timestamps[ hash_id ].append( ( service_id, timestamp, original_timestamp ) )
for hash_id in self._Execute( 'SELECT hash_id FROM {} CROSS JOIN {} USING ( hash_id );'.format( temp_hash_ids_table_name, pending_files_table_name ) ):
hash_ids_to_pending_file_service_ids[ hash_id ].append( service_id )
for hash_id in self._Execute( 'SELECT hash_id FROM {} CROSS JOIN {} USING ( hash_id );'.format( temp_hash_ids_table_name, petitioned_files_table_name ) ):
hash_ids_to_petitioned_file_service_ids[ hash_id ].append( service_id )
return (
hash_ids_to_current_file_service_ids_and_timestamps,
hash_ids_to_deleted_file_service_ids_and_timestamps,
hash_ids_to_pending_file_service_ids,
hash_ids_to_petitioned_file_service_ids
)
def GetNumLocal( self, service_id: int ) -> int:
current_files_table_name = GenerateFilesTableName( service_id, HC.CONTENT_STATUS_CURRENT )
combined_local_current_files_table_name = GenerateFilesTableName( self.modules_services.combined_local_file_service_id, HC.CONTENT_STATUS_CURRENT )
( num_local, ) = self._Execute( 'SELECT COUNT( * ) FROM {} CROSS JOIN {} USING ( hash_id );'.format( current_files_table_name, combined_local_current_files_table_name ) ).fetchone()
return num_local
def GetPendingFilesCount( self, service_id: int ) -> int:
pending_files_table_name = GenerateFilesTableName( service_id, HC.CONTENT_STATUS_PENDING )
result = self._Execute( 'SELECT COUNT( * ) FROM {};'.format( pending_files_table_name ) ).fetchone()
( count, ) = result
return count
def GetPetitionedFilesCount( self, service_id: int ) -> int:
petitioned_files_table_name = GenerateFilesTableName( service_id, HC.CONTENT_STATUS_PETITIONED )
result = self._Execute( 'SELECT COUNT( * ) FROM {};'.format( petitioned_files_table_name ) ).fetchone()
( count, ) = result
return count
def GetServiceIdCounts( self, hash_ids ) -> typing.Dict[ int, int ]:
with self._MakeTemporaryIntegerTable( hash_ids, 'hash_id' ) as temp_hash_ids_table_name:
service_ids_to_counts = {}
for service_id in self.modules_services.GetServiceIds( HC.SPECIFIC_FILE_SERVICES ):
current_files_table_name = GenerateFilesTableName( service_id, HC.CONTENT_STATUS_CURRENT )
# temp hashes to files
( count, ) = self._Execute( 'SELECT COUNT( * ) FROM {} CROSS JOIN {} USING ( hash_id );'.format( temp_hash_ids_table_name, current_files_table_name ) ).fetchone()
service_ids_to_counts[ service_id ] = count
return service_ids_to_counts
def GetSomePetitionedRows( self, service_id: int ):
petitioned_files_table_name = GenerateFilesTableName( service_id, HC.CONTENT_STATUS_PETITIONED )
petitioned_rows = list( HydrusData.BuildKeyToListDict( self._Execute( 'SELECT reason_id, hash_id FROM {} ORDER BY reason_id LIMIT 100;'.format( petitioned_files_table_name ) ) ).items() )
return petitioned_rows
def GetTableJoinIteratedByFileDomain( self, service_id, table_name, status ):
files_table_name = GenerateFilesTableName( service_id, status )
return '{} CROSS JOIN {} USING ( hash_id )'.format( files_table_name, table_name )
def GetTableJoinLimitedByFileDomain( self, service_id, table_name, status ):
files_table_name = GenerateFilesTableName( service_id, status )
return '{} CROSS JOIN {} USING ( hash_id )'.format( table_name, files_table_name )
def GetTablesAndColumnsThatUseDefinitions( self, content_type: int ) -> typing.List[ typing.Tuple[ str, str ] ]:
tables_and_columns = []
if HC.CONTENT_TYPE_HASH:
for service_id in self.modules_services.GetServiceIds( HC.SPECIFIC_FILE_SERVICES ):
( current_files_table_name, deleted_files_table_name, pending_files_table_name, petitioned_files_table_name ) = GenerateFilesTableNames( service_id )
tables_and_columns.extend( [
( current_files_table_name, 'hash_id' ),
( deleted_files_table_name, 'hash_id' ),
( pending_files_table_name, 'hash_id' ),
( petitioned_files_table_name, 'hash_id' )
] )
return tables_and_columns
def GetUndeleteRows( self, service_id, hash_ids ):
deleted_files_table_name = GenerateFilesTableName( service_id, HC.CONTENT_STATUS_DELETED )
with self._MakeTemporaryIntegerTable( hash_ids, 'hash_id' ) as temp_hash_ids_table_name:
rows = self._Execute( 'SELECT hash_id, original_timestamp FROM {} CROSS JOIN {} USING ( hash_id );'.format( temp_hash_ids_table_name, deleted_files_table_name ) ).fetchall()
return rows
def PendFiles( self, service_id, hash_ids ):
pending_files_table_name = GenerateFilesTableName( service_id, HC.CONTENT_STATUS_PENDING )
self._ExecuteMany( 'INSERT OR IGNORE INTO {} ( hash_id ) VALUES ( ? );'.format( pending_files_table_name ), ( ( hash_id, ) for hash_id in hash_ids ) )
def PetitionFiles( self, service_id, reason_id, hash_ids ):
petitioned_files_table_name = GenerateFilesTableName( service_id, HC.CONTENT_STATUS_PETITIONED )
self._ExecuteMany( 'DELETE FROM {} WHERE hash_id = ?;'.format( petitioned_files_table_name ), ( ( hash_id, ) for hash_id in hash_ids ) )
self._ExecuteMany( 'INSERT OR IGNORE INTO {} ( hash_id, reason_id ) VALUES ( ?, ? );'.format( petitioned_files_table_name ), ( ( hash_id, reason_id ) for hash_id in hash_ids ) )
def RecordDeleteFiles( self, service_id, insert_rows ):
deleted_files_table_name = GenerateFilesTableName( service_id, HC.CONTENT_STATUS_DELETED )
now = HydrusData.GetNow()
self._ExecuteMany(
'INSERT OR IGNORE INTO {} ( hash_id, timestamp, original_timestamp ) VALUES ( ?, ?, ? );'.format( deleted_files_table_name ),
( ( hash_id, now, original_timestamp ) for ( hash_id, original_timestamp ) in insert_rows )
)
num_new_deleted_files = self._GetRowCount()
return num_new_deleted_files
def RescindPendFiles( self, service_id, hash_ids ):
pending_files_table_name = GenerateFilesTableName( service_id, HC.CONTENT_STATUS_PENDING )
self._ExecuteMany( 'DELETE FROM {} WHERE hash_id = ?;'.format( pending_files_table_name ), ( ( hash_id, ) for hash_id in hash_ids ) )
def RescindPetitionFiles( self, service_id, hash_ids ):
petitioned_files_table_name = GenerateFilesTableName( service_id, HC.CONTENT_STATUS_PETITIONED )
self._ExecuteMany( 'DELETE FROM {} WHERE hash_id = ?;'.format( petitioned_files_table_name ), ( ( hash_id, ) for hash_id in hash_ids ) )
def RemoveFiles( self, service_id, hash_ids ):
( current_files_table_name, deleted_files_table_name, pending_files_table_name, petitioned_files_table_name ) = GenerateFilesTableNames( service_id )
self._ExecuteMany( 'DELETE FROM {} WHERE hash_id = ?;'.format( current_files_table_name ), ( ( hash_id, ) for hash_id in hash_ids ) )
self._ExecuteMany( 'DELETE FROM {} WHERE hash_id = ?;'.format( petitioned_files_table_name ), ( ( hash_id, ) for hash_id in hash_ids ) )
pending_changed = self._GetRowCount() > 0
return pending_changed
def SetFileDeletionReason( self, hash_ids, reason ):
reason_id = self.modules_texts.GetTextId( reason )
self._ExecuteMany( 'REPLACE INTO local_file_deletion_reasons ( hash_id, reason_id ) VALUES ( ?, ? );', ( ( hash_id, reason_id ) for hash_id in hash_ids ) )
|
[
"hydrus.admin@gmail.com"
] |
hydrus.admin@gmail.com
|
5abf6fa46ed937255a0421db8f04f7d3d0691eaa
|
8a85d3dc46c0e397f0b111e4ce785d1b533d8949
|
/patient/models.py
|
d57ef70cc4a668c2ce5292033001df94957488f8
|
[] |
no_license
|
Sidhanta184/project
|
547da6744e9ede8493418ef4ec65bae9522f65f5
|
5659fc7b47d7ce7bdfdbc7e39ee7868c07006102
|
refs/heads/master
| 2020-12-08T09:32:03.545967
| 2020-01-10T02:33:20
| 2020-01-10T02:33:20
| 232,947,298
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,817
|
py
|
from django.db import models
from django.utils import timezone
from doctor.models import Doctor
from medical.models import Medicine,Medical
from lab.models import Lab,Test
# Create your models here.
gender = (('male','Male'),('female','Female'),('other','Other'))
class Patient(models.Model):
id=models.BigAutoField(primary_key=True)
date = models.DateField(default=timezone.now)
name = models.CharField(max_length=100)
gender = models.CharField(max_length=100,choices=gender)
age = models.IntegerField()
address = models.CharField(max_length=100)
contact_no = models.CharField(max_length=100,blank=True,null=True)
def __str__(self):
return self.name
class DoctorComment(models.Model):
comment = models.TextField()
patient = models.OneToOneField(Patient,on_delete=models.CASCADE)
doctor = models.ForeignKey(Doctor,on_delete=models.CASCADE)
def __str__(self):
return self.comment
class MedicineSuggest(models.Model):
medicine_name = models.ForeignKey(Medicine)
buy_date = models.DateField(default=timezone.now)
is_paid = models.BooleanField(default=False)
suggest_by = models.ForeignKey(Doctor,on_delete=models.CASCADE)
sell_by = models.ForeignKey(Medical,on_delete=models.CASCADE)
patient = models.ForeignKey(Patient,on_delete=models.CASCADE)
def __str__(self):
return self.medicine_name
class TestSuggest(models.Model):
test = models.ForeignKey(Lab)
test_date = models.DateField(default=timezone.now)
is_paid = models.BooleanField(default=False)
suggest_by = models.ForeignKey(Doctor,on_delete=models.CASCADE)
test_by = models.ForeignKey(Medical,on_delete=models.CASCADE)
patient = models.ForeignKey(Patient,on_delete=models.CASCADE)
def __str__(self):
return self.test
|
[
"noreply@github.com"
] |
noreply@github.com
|
1345cc0e0a984974cc45d265fb5e248b561053c2
|
b503e79ccfca67c8114f5bd7a215f5ae993a0ba4
|
/airflow/providers/amazon/aws/sensors/glue.py
|
21a82da9ee9d040fd45ccda5044d467bf7c6b4c3
|
[
"Apache-2.0",
"BSD-3-Clause",
"Python-2.0",
"MIT"
] |
permissive
|
github/incubator-airflow
|
df1d9780f862ea1df8261ea6015dd50a4583f983
|
73f70e00b9fd294057f8ca6b714a85622f6d5dd5
|
refs/heads/gh-2.0.2
| 2023-07-29T18:08:43.140580
| 2022-09-14T18:23:42
| 2022-09-14T18:23:42
| 80,634,006
| 24
| 27
|
Apache-2.0
| 2023-04-18T04:24:36
| 2017-02-01T15:34:55
|
Python
|
UTF-8
|
Python
| false
| false
| 2,398
|
py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from airflow.exceptions import AirflowException
from airflow.providers.amazon.aws.hooks.glue import AwsGlueJobHook
from airflow.sensors.base import BaseSensorOperator
from airflow.utils.decorators import apply_defaults
class AwsGlueJobSensor(BaseSensorOperator):
"""
Waits for an AWS Glue Job to reach any of the status below
'FAILED', 'STOPPED', 'SUCCEEDED'
:param job_name: The AWS Glue Job unique name
:type job_name: str
:param run_id: The AWS Glue current running job identifier
:type run_id: str
"""
template_fields = ('job_name', 'run_id')
@apply_defaults
def __init__(self, *, job_name: str, run_id: str, aws_conn_id: str = 'aws_default', **kwargs):
super().__init__(**kwargs)
self.job_name = job_name
self.run_id = run_id
self.aws_conn_id = aws_conn_id
self.success_states = ['SUCCEEDED']
self.errored_states = ['FAILED', 'STOPPED', 'TIMEOUT']
def poke(self, context):
hook = AwsGlueJobHook(aws_conn_id=self.aws_conn_id)
self.log.info("Poking for job run status :for Glue Job %s and ID %s", self.job_name, self.run_id)
job_state = hook.get_job_state(job_name=self.job_name, run_id=self.run_id)
if job_state in self.success_states:
self.log.info("Exiting Job %s Run State: %s", self.run_id, job_state)
return True
elif job_state in self.errored_states:
job_error_message = "Exiting Job " + self.run_id + " Run State: " + job_state
raise AirflowException(job_error_message)
else:
return False
|
[
"noreply@github.com"
] |
noreply@github.com
|
f06a8d2d8170fb9c37ed397039a12bcac68a1fb9
|
5d9b34ff537ca195abe2c4ad2e06f644d026d14e
|
/engine/items/guns/assault_rifle_energy.py
|
219835c0c92a1eb19f7ab51998ce8ddeec1b36b0
|
[
"MIT"
] |
permissive
|
Jawmo/Hope
|
ab61bdef4977ef4aa4a3a40b2995f380b7418d19
|
1d142a712c064c2fb9dc63b38d8c8e65fb089386
|
refs/heads/master
| 2023-04-07T08:56:32.097156
| 2021-04-17T16:32:12
| 2021-04-17T16:32:12
| 224,865,988
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 894
|
py
|
from engine.global_config import *
from engine.lex import Lex
from engine.items.guns.assault_rifle import Assault_Rifle
from engine.join_tables.join_table_items import *
from engine.suits.suit_templates import alpha_suit_table
class Assault_Rifle_Energy(Assault_Rifle):
def __init__(self, **kwargs):
super().__init__(**kwargs)
def animation_load(self, user, user_input, input_kwargs):
print(user.inventory)
first_person = "You side-load {} into your {} and it immediately spins up, humming with energy.".format(Lex.a_an(user.inventory['l_hand']['contents'].name), user.inventory['r_hand']['contents'].name)
target_person = ""
third_person = "{} side-loads {} into {} {}.".format(user.name, Lex.a_an(user.inventory['l_hand']['contents'].name), Lex.gender(user, "own"), Lex.a_an(user.inventory['r_hand']['contents'].name))
return first_person, target_person, third_person
|
[
"d.jankowski3@gmail.com"
] |
d.jankowski3@gmail.com
|
811812104ba7a303d2a76c887b61da25afa1797c
|
b83f5c023e4a96d47a1d05049969f036c7fc3ed6
|
/server.py
|
b12aef4992969c7c1fba2c9afbcc8c333b902c91
|
[] |
no_license
|
ncss/projects-2016-2
|
bb51c1e533e82c02535577bdb8740294a2056d11
|
15208ba6b9e1b3c5fe71d403a3afc373330431a0
|
refs/heads/master
| 2021-01-24T14:47:25.905307
| 2016-01-11T04:03:16
| 2016-01-11T04:13:11
| 47,598,505
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,535
|
py
|
from tornado.ncss import Server, ncssbook_log
from activities import ActivityInputHandler, activity_dict
from engine.template import render
from profile import ProfileHandler
def landing_handler(response):
response.write(render("landing.html", {'a': 'B'}))
def home_handler(response):
response.write(render("feed.html", {'a': 'B'}))
def register_handler(response):
response.write(render("register.html", {'a': 'B'}))
def profile_handler(response, user_id):
poh = ProfileHandler(user_id)
response.write(render("profile.html", poh.display_profile()))
def input_handler(response):
aih = ActivityInputHandler(activity_dict)
response.write(render("input_activity.html", aih.get_template_data()))
def updateprofile_handler(response):
response.write(render("update_profile.html", {'a': 'B'}))
def template_demo(response):
response.write(render("test.html", {'a': 'B'}))
def search_handler(response):
response.write(render("search_results.html", {'a': 'B'}))
def login_handler(response):
response.write(render("login.html", {'a': 'B'}))
server = Server()
server.register(r"/", landing_handler)
server.register(r"/home/", home_handler)
server.register(r"/register/", register_handler)
server.register(r"/profile/(\d+)/", profile_handler)
server.register(r"/input/", input_handler)
server.register(r"/updateprofile/", updateprofile_handler)
server.register(r"/search/", search_handler)
server.register(r"/template/", template_demo)
server.register(r"/login/", login_handler)
server.run()
|
[
"tomkitson8@gmail.com"
] |
tomkitson8@gmail.com
|
a7356e39311eceaa408bd348ffa485b4aebebfa2
|
965f71c01a0bf9e541252f79ea7918d4f0963567
|
/learnvenv/bin/django-admin
|
bf57aa7fdfc8766ace3f0bc5d442e5e16f685ef2
|
[] |
no_license
|
empathlabs/my-first-blog
|
b5a1fdc804116873184527e95522b70c72864a6d
|
bf69ce796cadb94f300c911f252910b0b7e7d800
|
refs/heads/master
| 2020-04-05T23:23:02.424842
| 2015-09-02T13:36:44
| 2015-09-02T13:36:44
| 41,733,124
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 305
|
#!/Users/luis.morais/djangolearning/learnvenv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from django.core.management import execute_from_command_line
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(execute_from_command_line())
|
[
"luis.morais@g109916mac.thisisglobal.com"
] |
luis.morais@g109916mac.thisisglobal.com
|
|
33fe40510a1cc856b3402f89f10238ce8935ca07
|
1449894eff0a437e15da2ac79f4c567e63ea5de5
|
/src/EIS_Optical.py
|
2b43f629f4142e01098a59140e9790e73dc80e53
|
[] |
no_license
|
St659/Electrophotonics
|
1b0c32c29dcdaf5087b2d59e6c4a1a9edcbc59ff
|
7386d89ce2b53fe64564db5a88e9888dd506d572
|
refs/heads/master
| 2020-03-22T18:24:13.818695
| 2018-07-26T23:00:28
| 2018-07-26T23:00:28
| 140,457,700
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,616
|
py
|
from src.EIS_Reader import EISPlotter, EISReader
import os
import matplotlib.pyplot as plt
import numpy as np
eis_directory = '../Data/EIS_Optical/EIS'
photonics_directory = '../Data/EIS_Optical/Photonics'
photonics_files = os.listdir(photonics_directory)
print(photonics_files)
wavelength = np.linspace(498.6174,1103.161,3648)
photonics_fig, photonics_plot = plt.subplots()
w,background = np.genfromtxt(os.path.join(photonics_directory,photonics_files[-1]), unpack=True, delimiter=',')
for file in photonics_files[1:-1]:
w, reflectance = np.genfromtxt(os.path.join(photonics_directory,file), unpack=True, delimiter=',')
normalised_reflectance = np.divide(reflectance,background)
photonics_plot.plot(wavelength, normalised_reflectance)
photonics_plot.set_xlim([850,950])
photonics_plot.set_ylim([0,0.6])
photonics_plot.legend(['100 mM', '10 mM', '1mM','100 $\mu $M', '10 $\mu $M'])
photonics_plot.set_xlabel('Wavelength (nm)')
photonics_plot.set_ylabel('Reflectance')
eis_files = os.listdir(eis_directory)
fig,eis_mag = plt.subplots()
eis_phase = eis_mag.twinx()
for file in eis_files[1:]:
print(file)
eis_reader = EISReader(os.path.join(eis_directory,file), set_cycle=2)
eis_mag.loglog(eis_reader.eis.frequency, eis_reader.eis.magnitude)
eis_phase.semilogx(eis_reader.eis.frequency, eis_reader.eis.phase, '--')
eis_mag.set_xlabel('Frequency (Hz)')
eis_mag.set_ylabel('|Z| ($\Omega $)')
eis_mag.legend(['100 mM', '10 mM', '1mM','100 $\mu $M', '10 $\mu $M'],loc='center right')
eis_phase.set_ylabel('$\\angle$ ($\degree $)')
plt.show()
#eis_plotter = EISPlotter(eis_directory)
|
[
"st659@nas10-240-93-32.york.ac.uk"
] |
st659@nas10-240-93-32.york.ac.uk
|
98bd390e05f2cf8fcea011d56550a3ea5f828af6
|
5b602529b1a366116cc8c9f439c4b1dcfbc22603
|
/home/migrations/0003_homedata.py
|
e45a9ba11ea94ce2085216ec1e5b3b6a862ee538
|
[] |
no_license
|
kunmi02/blue
|
db9b0a8239a3cbdbdcab25b4a60d2c0ca248701c
|
a38da7adc4d65aea42acddb8bcb0bd244f8795a0
|
refs/heads/master
| 2020-06-20T21:03:06.372316
| 2019-07-16T18:25:15
| 2019-07-16T18:25:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,042
|
py
|
# Generated by Django 2.2.2 on 2019-06-24 15:52
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('home', '0002_create_homepage'),
]
operations = [
migrations.CreateModel(
name='HomeData',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('yearlyloans', models.CharField(default='240,000', max_length=100)),
('minimuminterest', models.CharField(default='15%', max_length=100)),
('maxmonths', models.CharField(default='24', max_length=100)),
('maxloan', models.CharField(default='1,000,000', max_length=100)),
('customerservice', models.CharField(default='24/7', max_length=100)),
('bluehostemail', models.CharField(default='info@bluecredit.com', max_length=100)),
('phone', models.IntegerField(default=234808080808)),
],
),
]
|
[
"godfredakpan@gmail.com"
] |
godfredakpan@gmail.com
|
e762503fb00302d5490dbea646d3b4e384512c36
|
7067f57fd82b025d595f9bbcbe027753aac55cfb
|
/polls/migrations/0001_initial.py
|
0b505bc0450ec2fd7b5655030493d8f94e61aee2
|
[] |
no_license
|
xjli865/Django-PollsApp
|
78dd79dea7c862baab6f537e9bb51825ba49e024
|
57095b5f0b8b6804cbe97ea32dbe936d0689069c
|
refs/heads/master
| 2021-01-20T19:18:32.585509
| 2016-06-23T19:30:00
| 2016-06-23T19:30:00
| 61,830,529
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,229
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-06-08 19:54
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Choice',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('choice_text', models.CharField(max_length=200)),
('votes', models.IntegerField(default=0)),
],
),
migrations.CreateModel(
name='Question',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('question_text', models.CharField(max_length=200)),
('pub_date', models.DateTimeField(verbose_name='date published')),
],
),
migrations.AddField(
model_name='choice',
name='question',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='polls.Question'),
),
]
|
[
"xjli865@gmail.com"
] |
xjli865@gmail.com
|
0ca4e078ad5be9eb653daec33bd937412976bb6f
|
b3f092ea07ed57f3749241148a2049a0fb1fc0c6
|
/sc_utils/sc_utils.py
|
e736b50267f6740e2a830f2c057f56b769aa85ff
|
[
"MIT"
] |
permissive
|
flribeiro/sc_utils
|
89c19b66eec63c65a831513efc122a30ef1ecd2e
|
1484f4890557ea2bbb7865403ce5f637f7c84577
|
refs/heads/master
| 2023-01-09T02:50:14.018808
| 2019-09-14T15:59:58
| 2019-09-14T15:59:58
| 187,886,062
| 0
| 0
|
NOASSERTION
| 2022-12-26T20:47:38
| 2019-05-21T17:41:36
|
Python
|
UTF-8
|
Python
| false
| false
| 7,169
|
py
|
import os
import re
import string
import argparse
import sys
import uuid
from random import randint
def validar_cpf(ns):
cpf = ns.cpf
"""
Retorna o CPF válido sanitizado ou False.
# CPFs corretos
>>> validar_cpf('123.456.789-09')
'12345678909'
>>> validar_cpf('98765432100')
'98765432100'
>>> validar_cpf(' 123 123 123 87 ')
'12312312387'
# CPFs incorretos
>>> validar_cpf('12345678900')
False
>>> validar_cpf('1234567890')
False
>>> validar_cpf('')
False
>>> validar_cpf(None)
False
"""
cpf = ''.join(re.findall(r'\d', str(cpf)))
if not cpf or len(cpf) < 11:
print('CPF inválido.')
antigo = [int(d) for d in cpf]
# Gera CPF com novos dígitos verificadores e compara com CPF informado
novo = antigo[:9]
while len(novo) < 11:
resto = sum([v * (len(novo) + 1 - i) for i, v in enumerate(novo)]) % 11
digito_verificador = 0 if resto <= 1 else 11 - resto
novo.append(digito_verificador)
if novo == antigo:
print(f'CPF válido: {cpf}')
return None
print('CPF inválido.')
'''
* Essa função gera um número de CPF válido.
* @param {Boolean} formatar define se o número do CPF deve ser gerado com os pontos e hífen.
* @return {String} CPF
*
* Regra de Formação
*
* O número de um CPF tem exatamente 9 algarismos em sua raiz e mais dois dígitos verificadores que são indicados por último.
* Portanto, um CPF tem 11 algarismos. O número do CPF é escrito na forma abcdefghi-jk ou diretamente como abcdefghijk onde
* os algarismos não podem ser todos iguais entre si.
*
* abc.def.ghi-jk
*
* O j é chamado 1° dígito verificador do número do CPF.
*
* O k é chamado 2° dígito verificador do número do CPF.
*
* Primeiro Dígito
*
* Para obter j multiplicamos a, b, c, d, e, f, g, h e i pelas constantes correspondentes, e somamos os resultados de cada multiplicação:
*
* S = 10a + 9b + 8c + 7d + 6e + 5f + 4g + 3h + 2i
*
* O resultado da soma é dividido por 11, e resto da divisão é tratada da seguinte forma:
*
* se o resto for igual a 0 ou 1, j será 0 (zero)
* se o resto for 2, 3, 4, 5, 6, 7, 8, 9 ou 10, j será 11 - resto
*
* Para obter k, multiplicamos a, b, c, d, e, f, g, h, i e j pelas constantes correspondentes, e somamos os resultados de cada multiplicação:
*
* S = 11a + 10b + 9c + 8d + 7e + 6f + 5g + 4h + 3i + 2j
*
* O resultado da soma é dividido por 11, e resto da divisão é tratada da seguinte forma:
*
* se o resto for igual a 0 ou 1, k será 0 (zero)
* se o resto for 2, 3, 4, 5, 6, 7, 8, 9 ou 10, k será 11 - resto
*
'''
def geradorDeCpf(ns):
formatar = ns.format
# 9 números aleatórios
arNumeros = []
for i in range(9):
arNumeros.append( randint(0,9) )
# Calculado o primeiro DV
somaJ = ( arNumeros[0] * 10 ) + ( arNumeros[1] * 9 ) + ( arNumeros[2] * 8 ) + ( arNumeros[3] * 7 ) + ( arNumeros[4] * 6 ) + ( arNumeros[5] * 5 ) + ( arNumeros[6] * 4 ) + ( arNumeros[7] * 3 ) + ( arNumeros[8] * 2 )
restoJ = somaJ % 11
if ( restoJ == 0 or restoJ == 1 ):
j = 0
else:
j = 11 - restoJ
arNumeros.append( j )
# Calculado o segundo DV
somaK = ( arNumeros[0] * 11 ) + ( arNumeros[1] * 10 ) + ( arNumeros[2] * 9 ) + ( arNumeros[3] * 8 ) + ( arNumeros[4] * 7 ) + ( arNumeros[5] * 6 ) + ( arNumeros[6] * 5 ) + ( arNumeros[7] * 4 ) + ( arNumeros[8] * 3 ) + ( j * 2 )
restoK = somaK % 11
if ( restoK == 0 or restoK == 1 ):
k = 0
else:
k = 11 - restoK
arNumeros.append( k )
cpf = ''.join(str(x) for x in arNumeros)
if formatar:
return cpf[ :3 ] + '.' + cpf[ 3:6 ] + '.' + cpf[ 6:9 ] + '-' + cpf[ 9: ]
else:
return cpf
def geradorDeUuid(ns):
return uuid.uuid4()
def pontes_comandos(ns):
switcher = {
'cartao': f'ssh -L {ns.porta}:10.215.226.71:3306 {ns.user}@bastion-sa-vpc-shared.gcp.luizalabs.com',
'antifraude': f'ssh -L {ns.porta}:10.215.226.72:3306 {ns.user}@bastion-sa-vpc-shared.gcp.luizalabs.com',
'nickfury': f'ssh -L {ns.porta}:10.215.226.45:3306 {ns.user}@bastion-sa-vpc-shared.gcp.luizalabs.com',
'cdc': f'echo Ponte indisponível.',
'valecompra': f'echo Ponte indisponível.'
}
return switcher.get(ns.nome, lambda: 'Ponte não definida.')
def ponte(ns):
print(f'Estabelecendo a ponte {ns.nome} com o usuário {ns.user} na porta {ns.porta}.')
os.system(pontes_comandos(ns))
def estabelece_vpn(ns):
os.system(f'sudo vpnc vpn-ML.conf')
def select_generators(ns):
switcher = {
'cpf': geradorDeCpf,
'uuid': geradorDeUuid
}
func = switcher.get(ns.type, lambda: 'Tipo inválido de gerador.')
for n in range(ns.num):
print(func(ns))
def parse_calling(choice, ns):
switcher = {
'gen': select_generators,
'val': validar_cpf,
'p': ponte,
'vpn': estabelece_vpn,
}
func = switcher.get(choice, lambda: 'Opção inválida.')
func(ns)
def main():
parser = argparse.ArgumentParser(
prog='sc_utils',
description='App CLI destinado a automações cotidianas '
'para a Squad Crédito.',
epilog='Squad Crédito - Luizalabs, 2019.'
)
subparsers = parser.add_subparsers(
title='sub-comandos',
description='comandos válidos',
help='help adicional',
dest='sp_name'
)
g_parser = subparsers.add_parser(
'gen',
help='gerador de CPF/UUID'
)
g_parser.add_argument(
'-t', '--type',
help='Gera um CPF válido ou UUID',
choices=[
'cpf',
'uuid'
]
)
g_parser.add_argument(
'-n', '--num',
help='Quantidade de itens a gerar',
type=int,
default=1
)
g_parser.add_argument(
'-f', '--format',
help='Formata saída para CPFs gerados',
action='store_true'
)
v_parser = subparsers.add_parser(
'val',
help='validador de CPF'
)
v_parser.add_argument(
'cpf',
help='CPF a ser validado (somente números)',
type=str
)
p_parser = subparsers.add_parser(
'p',
help='estabelece uma das pontes'
)
p_parser.add_argument(
'-n', '--nome',
help='nome da ponte que será estabelecida',
choices=['cartao', 'antifraude', 'nickfury', 'cdc', 'valecompra']
)
p_parser.add_argument(
'-p', '--porta',
help='porta usada para conexão pela ponte',
type=int,
default=3390
)
p_parser.add_argument(
'-u', '--user',
help='usuário para autenticação',
type=str
)
vpn_parser = subparsers.add_parser(
'vpn',
help='estabelece vpn'
)
vpn_parser.add_argument(
'vpn',
action='store_true',
default=True,
help=argparse.SUPPRESS
)
parser.add_argument(
'--version',
action='version',
version='%(prog)s 1.0'
)
if len(sys.argv) == 1:
parser.print_help(sys.stderr)
sys.exit(1)
args = parser.parse_args()
# print(vars(args))
parse_calling(args.sp_name, args)
if __name__ == "__main__":
main()
|
[
"fabricio.ribeiro@luizalabs.com"
] |
fabricio.ribeiro@luizalabs.com
|
8940ba8031a5ac9ba2c91447e1fe6306a34b339d
|
0d03f5bbca543abab5831affc9c776bc536c97f2
|
/fibonacci.py
|
d61db7b4c1c15874a6a9abc643fe04c2a72ddbe8
|
[] |
no_license
|
kawausomando/python-tutorial
|
a77e7488b020cafef5f0f2cc7e5359c7ce78cf6d
|
cccd02fe8ec270a4db7627fd4b2d5ba6037c57d8
|
refs/heads/master
| 2020-09-01T05:40:48.381131
| 2019-12-11T03:55:36
| 2019-12-11T03:55:36
| 218,891,127
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 123
|
py
|
# フィボナッチ級数
# このように多重代入が可能
a, b = 0, 1
while b < 10:
print(b)
a, b = b, a+b
|
[
"happy.trail0912+github@gmail.com"
] |
happy.trail0912+github@gmail.com
|
28c3ffb6bbd7754e4df12ec0219e684700edecc8
|
fca7013508d883e1f1c0b030aca2fdd69c39f7ce
|
/Dynamic Programming 2/Partition.py
|
d4fe7918b4b1f34e4fc49b06eb0c803a4909fea5
|
[] |
no_license
|
AnshuGit2/AnshuGit2-Algorithmic-Design-and-Techniques
|
4c686e7d8093e0cc6ae1503b51993572a066efb7
|
484a6db184f290cc9652b0cbd7bce0bcada59f1a
|
refs/heads/main
| 2023-08-17T12:57:57.666744
| 2021-01-18T00:16:22
| 2021-01-18T00:16:22
| 329,479,666
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 851
|
py
|
# Python 3
import numpy
def partitions(W, n, items):
""" Finds if number of partitions having capacity W is >=3
(int, int, list) -> (int) """
count = 0
value = numpy.zeros((W+1, n+1))
for i in range(1, W+1):
for j in range(1, n+1):
value[i][j] = value[i][j-1]
if items[j-1]<=i:
temp = value[i-items[j-1]][j-1] + items[j-1]
if temp > value[i][j]:
value[i][j] = temp
if value[i][j] == W: count += 1
if count < 3: print('0')
else: print('1')
if __name__ == '__main__':
n = int(input())
item_weights = [int(i) for i in input().split()]
total_weight = sum(item_weights)
if n<3:
print('0')
elif total_weight%3 != 0:
print('0')
else:
partitions(total_weight//3, n, item_weights)
|
[
"noreply@github.com"
] |
noreply@github.com
|
8ea5abe49cbb899830e548978eaad6cef1c2e987
|
c69e4107617e09db383554481ebd3eb7a3247bea
|
/script_colorcolor.py
|
2a3c52c340fc25e36ade9b9c5df034eeed5d5860
|
[] |
no_license
|
niliafsari/KSP-SN
|
09e4e03da92500598557a953c1429187e3d8f854
|
9b18a2f405060611b61bb3d49283c50cdfe904ee
|
refs/heads/master
| 2020-06-26T21:53:31.372225
| 2018-11-26T16:29:31
| 2018-11-26T16:29:31
| 97,030,917
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 562
|
py
|
import numpy as np
import matplotlib.pyplot as plt
import U # values for constants
B = lambda nu, T, muOnKT: nu**3 * 1./(np.exp(U.Hplanck/U.Kb*nu/T-muOnKT) - 1)
nu_B = U.Clight/(4378*U.Angstrom)
nu_V = U.Clight/(5466*U.Angstrom)
nu_I = U.Clight/(8565*U.Angstrom)
BminusV = lambda T,muOnKT: -2.5*np.log10(B(nu_B,T,muOnKT)/B(nu_V,T,muOnKT) )
VminusI = lambda T,muOnKT: -2.5*np.log10(B(nu_V,T,muOnKT)/B(nu_I,T,muOnKT) )
plt.figure()
for muOnKT in np.arange(0,1,.1):
for T in np.arange(4000,15000,100):
plt.plot(VminusI(T,0),BminusV(T,0),'s')
plt.show()
|
[
"nlr.afsari@gmail.com"
] |
nlr.afsari@gmail.com
|
816ccf13d545d21c6a8991fbbd5db56841a3fd65
|
4eab1bd9e1b00155872e44963a5df0532cb5341f
|
/menus/menuTwo.py
|
ebbc521d4e91603c346648b2c0ccb7a4a9256571
|
[] |
no_license
|
soheilpaper/python-gui
|
9b067467ca41d27092e5817d0a49162b10c37de6
|
4e6bcad319829dd2c0fdc328520a55a7932060c7
|
refs/heads/master
| 2020-12-31T04:29:16.798703
| 2016-04-08T08:41:59
| 2016-04-08T08:41:59
| 55,763,643
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,595
|
py
|
import wx
########################################################################
class MyForm(wx.Frame):
""""""
#----------------------------------------------------------------------
def __init__(self):
"""Constructor"""
wx.Frame.__init__(self, None, title="wx.Menu Tutorial")
self.panel = wx.Panel(self, wx.ID_ANY)
# create the menubar
menuBar = wx.MenuBar()
# create the first menu (starting on left)
carMenu = wx.Menu()
carMenu.Append(101, "&Ford", "An American Automaker")
carMenu.Append(102, "&Nissan", "")
carMenu.Append(103, "&Toyota", "Buy Japanese!")
carMenu.Append(104, "&Close", "Close the application")
# add a picture to a menu
picMenu = wx.Menu()
item = wx.MenuItem(picMenu, wx.ID_ANY, "Snake", "This menu has a picture!")
img = wx.Image('snake32.bmp', wx.BITMAP_TYPE_ANY)
item.SetBitmap(wx.BitmapFromImage(img))
picMenu.AppendItem(item)
# add menus to menubar
menuBar.Append(carMenu, "&Vehicles")
menuBar.Append(picMenu, "&Picture")
self.SetMenuBar(menuBar)
#----------------------------------------------------------------------
def onExit(self, event):
""""""
self.Close()
#----------------------------------------------------------------------
# Run the program
if __name__ == "__main__":
app = wx.App(False)
frame = MyForm().Show()
app.MainLoop()
|
[
"soheil_paper@yahoo.com"
] |
soheil_paper@yahoo.com
|
dc695c39f94e3e5033de601ceb8c069fc23d9a58
|
41731334f3f792b731acb807eaf5059f6be6fb3b
|
/Gayathri-Chandrasekaran-individual-project/Code/mywork/modeling_seq2seq.py
|
c8b514440da2b8d335fccf543ae3788ed5891286
|
[
"MIT"
] |
permissive
|
redhairedcelt/Final-Project-Group1
|
591350cdad18360fea9625fca6845f3225315098
|
084ebc51cdaeb3d61a673bdde2214b73a0438b41
|
refs/heads/main
| 2023-03-27T04:25:32.822161
| 2021-03-29T10:08:54
| 2021-03-29T10:08:54
| 307,972,757
| 1
| 3
|
MIT
| 2020-12-23T22:31:01
| 2020-10-28T09:54:50
|
Python
|
UTF-8
|
Python
| false
| false
| 12,263
|
py
|
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
from pickle import load
from sklearn.metrics import classification_report, cohen_kappa_score, confusion_matrix
from keras.models import Model
from keras.layers import Dense, Input
from keras.layers import LSTM
# set run name
run_name = 'DL_50'
# set model name
model_name = 'seq2seq'
print(f'Run name is {run_name} and model name is {model_name}.')
# load tokenizer, get vocab_size, and load x, y
tokenizer = load(open(f'/home/ubuntu/Final-Project-Group1/Models/{run_name}_tokenizer.pkl', 'rb'))
vocab_size = len(tokenizer.word_index) + 1
x_train = np.load(f'/home/ubuntu/Final-Project-Group1/Data 2/{run_name}_x_train.npy')
y_train = np.load(f'/home/ubuntu/Final-Project-Group1/Data 2/{run_name}_y_train.npy')
x_test = np.load(f'/home/ubuntu/Final-Project-Group1/Data 2/{run_name}_x_test.npy')
y_test = np.load(f'/home/ubuntu/Final-Project-Group1/Data 2/{run_name}_y_test.npy')
# Undo one hot encoding on target variables
y_train = np.argmax(y_train, axis=1)
y_train = np.reshape(y_train, (len(y_train), -1))
y_test = np.argmax(y_test, axis=1)
y_test = np.reshape(y_test, (len(y_test), -1))
# Combine x and y to be split however works best
x_train = np.concatenate((x_train, y_train), axis=1)
# Do the same for test data
x_test = np.concatenate((x_test, y_test), axis=1)
del y_train, y_test # No longer needed
# Check shapes before splitting
print(x_train.shape)
print(x_test.shape)
print()
# Define length of beginning sequence
split = 49
# Train a model for each split
# Results and models are saved to /home/ubuntu/Final-Project-Group1/Models/DL_25_seq2seq
seq_beg, other, seq_end = np.split(x_train, [split, split], axis=1) # Split data and analyze shapes
print(other.shape)
del other # remove this useless output
print(seq_beg.shape)
print(seq_end.shape)
print()
# Add special beginning and end tags to training ending sequences
seq_end = np.insert(seq_end, 0, 1111, axis=1)
seq_end = np.insert(seq_end, seq_end.shape[1], 9999, axis=1)
print(seq_end.shape)
print()
# Also split test data and analyze shapes
seq_beg_test, other, seq_end_test = np.split(x_test, [split, split], axis=1)
print(other.shape)
del other
print(seq_beg_test.shape)
print(seq_end_test.shape)
print()
# Add special beginning and end tags to testing ending sequences
seq_end_test = np.insert(seq_end_test, 0, 1111, axis=1)
seq_end_test = np.insert(seq_end_test, seq_end_test.shape[1], 9999, axis=1)
print(seq_end_test.shape)
print()
# Store all unique airport IDs in a list
airports = x_train.flatten().tolist()
airports.append(1111) # Add the special characters so they will be in dictionaries
airports.append(9999)
airports = set(airports)
airports = sorted(list(airports))
# dictionary to index each airport - key is index and value is airport
index_to_airport_dict = {}
# dictionary to get airport given its index - key is airport and value is index
airport_to_index_dict = {}
for k, v in enumerate(airports):
index_to_airport_dict[k] = v
airport_to_index_dict[v] = k
# Get empty numpy arrays to tokenize the training sequences
tokenized_seq_beg = np.zeros(shape=(seq_beg.shape[0], seq_beg.shape[1], len(airports)), dtype='float32')
tokenized_seq_end = np.zeros(shape=(seq_end.shape[0], seq_end.shape[1], len(airports)), dtype='float32')
target_data = np.zeros(shape=(seq_end.shape[0], seq_end.shape[1], len(airports)), dtype='float32')
# Vectorize the beginning and ending sequences for training data
for i in range(seq_beg.shape[0]):
for k, ch in enumerate(seq_beg[i]):
tokenized_seq_beg[i, k, airport_to_index_dict[ch]] = 1
for k, ch in enumerate(seq_end[i]):
tokenized_seq_end[i, k, airport_to_index_dict[ch]] = 1
# decoder_target_data will be ahead by one timestep and will not include the start airport.
if k > 0:
target_data[i, k - 1, airport_to_index_dict[ch]] = 1
# Get empty numpy array to tokenize the beginning test sequences to be fed at evaluation time
tokenized_seq_beg_test = np.zeros(shape=(seq_beg_test.shape[0], seq_beg_test.shape[1], len(airports)), dtype='float32')
# Vectorize the beginning sequences for test data to be fed to encoder
for i in range(seq_beg_test.shape[0]):
for k, ch in enumerate(seq_beg_test[i]):
tokenized_seq_beg_test[i, k, airport_to_index_dict[ch]] = 1
# hyperparameters
N_NEURONS = 256
N_EPOCHS = 6
BATCH_SIZE = 64
# Encoder Model
encoder_input = Input(shape=(None, len(airports)))
encoder_LSTM = LSTM(N_NEURONS, return_state=True)
encoder_outputs, encoder_h, encoder_c = encoder_LSTM(encoder_input)
encoder_states = [encoder_h, encoder_c] # These states are passed to decoder LSTM layer
# Decoder model
decoder_input = Input(shape=(None, len(airports)))
decoder_LSTM = LSTM(N_NEURONS, return_sequences=True, return_state=True)
decoder_out, _, _ = decoder_LSTM(decoder_input, initial_state=encoder_states)
decoder_dense = Dense(len(airports), activation='softmax')
decoder_out = decoder_dense(decoder_out)
model = Model(inputs=[encoder_input, decoder_input], outputs=[decoder_out])
# Run training
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
history = model.fit(x=[tokenized_seq_beg, tokenized_seq_end], y=target_data,
batch_size=BATCH_SIZE, epochs=N_EPOCHS, validation_split=.2)
# Visualize training process
plt.plot(history.history['loss'], label='Categorical crossentropy loss (training data)')
plt.plot(history.history['val_loss'], label='Categorical crossentropy loss (validation data)')
plt.title(f'Categorical crossentropy loss for {run_name}, overall accuracy: {run_name}')
plt.ylabel('Categorical crossentropy loss value')
plt.yscale('log')
plt.xlabel('No. epoch')
plt.legend(loc="upper left")
plt.show()
# save the model to file
model.save(f'/home/ubuntu/Final-Project-Group1/Models/{run_name}_{model_name}_model.h5')
# save history
df_history = pd.DataFrame.from_dict(history.history)
df_history.to_csv(f'/home/ubuntu/Final-Project-Group1/Models/{run_name}_{model_name}_history.csv')
######################################################################################################################
# Evaluate model with test data
# Encoder inference model
encoder_model_inf = Model(encoder_input, encoder_states)
# Decoder inference model
decoder_state_input_h = Input(shape=(N_NEURONS,))
decoder_state_input_c = Input(shape=(N_NEURONS,))
decoder_input_states = [decoder_state_input_h, decoder_state_input_c]
decoder_out, decoder_h, decoder_c = decoder_LSTM(decoder_input, initial_state=decoder_input_states)
decoder_states = [decoder_h, decoder_c]
decoder_out = decoder_dense(decoder_out)
decoder_model_inf = Model(inputs=[decoder_input] + decoder_input_states,
outputs=[decoder_out] + decoder_states)
def decode_seq(inp_seq):
# Get initial states by feeding beginning of a test sequence to encoder
states_val = encoder_model_inf.predict(inp_seq)
# Set first target sequence to be 1111 (one hot encoded)
decoder_input = np.zeros((1, 1, len(airports)))
decoder_input[0, 0, airport_to_index_dict[1111]] = 1
# Start predicted airplane route with special character
airplane_route = [1111]
stop_condition = False
# Predict the next airports
while not stop_condition:
decoder_out, decoder_h, decoder_c = decoder_model_inf.predict(x=[decoder_input] + states_val)
# Get index of predicted airport
max_val_index = np.argmax(decoder_out[0, -1, :])
sampled_airport = index_to_airport_dict[max_val_index]
# Append predicted airport to list representing predicted sequence
airplane_route.append(sampled_airport)
# If predictions surpass the ending sequence length or model predicts 9999 indicating end of sequence
if (sampled_airport == 9999) or (len(airplane_route) > (seq_end.shape[1] - 1)):
stop_condition = True
# Update predicted airport to be fed to decoder model
decoder_input = np.zeros((1, 1, len(airports)))
decoder_input[0, 0, max_val_index] = 1
# Get states for predicting next character
states_val = [decoder_h, decoder_c]
return airplane_route # Return predicted sequence
cumulative_predictions = [] # To accumulate all predictions
cumulative_actuals = [] # To accumulate all actual labels
cumulative_accuracy = 0
test_sequences = 5000
drops = [1111, 9999] # specify beg and end tags to drop for evaluation
# Loop through test data and feed input sequences to encoder model
loop_count = 0
print('Beginning inference...')
for seq_index in range(test_sequences):
inp_seq = tokenized_seq_beg_test[seq_index]
inp_seq = np.expand_dims(inp_seq, axis=0) # Resize to go into encoder model
pred_airplane_route = decode_seq(inp_seq)
# Drop beginning and end tags before calculating evaluation metrics
pred_airplane_route = [_ for _ in pred_airplane_route if _ not in drops]
actual_airplane_route = seq_end_test[seq_index]
actual_airplane_route = [_ for _ in actual_airplane_route if _ not in drops]
# print('-')
# print('Input sequence:', seq_beg_test[seq_index])
# print('Predicted output sequence:', pred_airplane_route)
# print('Actual output sequence:', actual_airplane_route)
# print('Actual whole sequence', x_test[seq_index])
correct, incorrect = 0, 0 # To keep track of right and wrong predictions
for _ in range(len(actual_airplane_route)):
if pred_airplane_route[_] == actual_airplane_route[_]:
correct += 1
else:
incorrect += 1
# Append predictions and labels to huge lists for classification report
cumulative_predictions.append(pred_airplane_route[_])
cumulative_actuals.append(actual_airplane_route[_])
accuracy = correct / (correct + incorrect)
#print('Test Accuracy', accuracy) # This gives the accuracy on each test sequence
cumulative_accuracy += accuracy # Accumulate accuracy from all test sequences to be averaged later
#loop_count += 1
#print('Processing test sequence ' + str(loop_count) + ' out of ' + str(test_sequences))
######################################################################################################################
# Evaluate model performance on test data
cumulative_accuracy = cumulative_accuracy / test_sequences # Gets accuracy over all test sequences used
print()
# Get classification report
class_report = classification_report(cumulative_actuals, cumulative_predictions, output_dict=True)
print(class_report)
print()
# Get confusion matrix
conf_mat = confusion_matrix(y_true=cumulative_actuals, y_pred=cumulative_predictions)
print(conf_mat)
print()
# Get accuracy
print('Accuracy:', cumulative_accuracy)
# Get Cohens Kappa
ck_score = cohen_kappa_score(cumulative_actuals, cumulative_predictions)
print('Cohens Kappa:', ck_score)
# Get precision
print('Precision:', class_report['weighted avg']['precision'])
# Get recall
print('Recall:', class_report['weighted avg']['recall'])
# Get F1
print('F1:', class_report['weighted avg']['f1-score'])
# Get support
print('Support:', class_report['weighted avg']['support'])
# Create dataframe from classification report
df_class_report = pd.DataFrame(class_report).T.iloc[:-3,:]
df_class_report.sort_values('f1-score', inplace=True)
print(df_class_report)
# Plot the classes (airports) as a scatterplot colored by F1 and sized by total numbed of flights from each airport.
# g = sns.scatterplot(x='precision', y='recall', size='support',
# hue='f1-score', data=df_class_report)
# plt.title("Scatterplot of Model's Precision and Recall, \nColored by F1 Score, Sized by Number of Flights")
# plt.show()
plt.scatter(df_class_report['precision'], df_class_report['recall'], s=df_class_report['support'],
c=df_class_report['f1-score'])
plt.title(f"Scatterplot of {model_name}_{run_name} Precision and Recall, \nColored by F1 Score, Sized by Number of Flights")
plt.show()
# proc log
log_name = f'/home/ubuntu/Final-Project-Group1/Logs/{model_name}'
log = open(log_name, 'a+')
log.write(f'{model_name} for {run_name} scored {accuracy} accuracy and {ck_score} cohen_kappa score. \n')
log.close()
|
[
"patrickfmaus@gwu.edu"
] |
patrickfmaus@gwu.edu
|
7358cda3629e79200fe58e47c0f254cdd0af3523
|
1d7ae7f6e7a0df98d92f9ec5f277752d14924a94
|
/fake-very-small-test/wrong_case/pytorch_bike_dqn_test-small-with-former-a-trick.py
|
5916cee0a242517a8af7ef2d2c50f65db11824f0
|
[] |
no_license
|
lindsaymorgan/Mobike-Bike-Sharing-System-Dispatch-Optimization-Using-Reinforcement-Learning
|
1e6b1aa3c64d2ff2e31b5d9dcc4abdc11e10679c
|
6c8a329fae5c2ac8db45a3d8c55b308aae8ad804
|
refs/heads/master
| 2023-05-02T07:39:49.089459
| 2021-05-23T02:26:14
| 2021-05-23T02:26:14
| 279,467,461
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 15,003
|
py
|
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import optim
import matplotlib.pyplot as plt
import pandas as pd
import random
import time
# hyper parameters
EPSILON = 0.85
GAMMA = 0.99
LR = 0.001
MEMORY_CAPACITY = 3000
Q_NETWORK_ITERATION = 2000
BATCH_SIZE = 128
EPISODES = 20000
need = pd.read_csv('../fake_4region_trip_20170510.csv')
ts=int(time.time())
class Env(object):
def __init__(self, region_num, move_amount_limit, eps_num):
self.region_num = region_num
self.move_amount_limit = move_amount_limit
self.action_dim = region_num * (2 * move_amount_limit + 1)
self.obs_dim = 2 * region_num + 1
self.episode_num = eps_num
self.start_region = need.groupby('start_region')
self.end_region = need.groupby('end_region')
self.t_index = {i: str(i) for i in range(eps_num)}
self.out_nums = np.array([self.start_region[str(i)].agg(np.sum) for i in range(eps_num)])
self.in_nums = np.array([self.end_region[str(i)].agg(np.sum) for i in range(eps_num)])
self.t = 0
self.obs_init = np.array([15, 15, 15, 15, 0, 0,0,0,15, 15, 15, 15, 0, 0]) # 各方格单车量+货车位置+货车上的单车量
self.obs_init[-self.region_num-2:-2] -= self.out_nums[0, ]
def init(self):
self.obs = self.obs_init.copy()
self.t = 0
return np.append(self.obs, self.t)
def step(self, action):
# 更新时间状态
self.t += 1
if self.t == self.episode_num-1:
done = True
else:
done = False
self.obs[:self.region_num+2]=self.obs[-self.region_num-2:] #更新状态
region = int(np.floor(action / (2 * self.move_amount_limit + 1)))
move = action % (2 * self.move_amount_limit + 1) - self.move_amount_limit
# 更新单车分布状态
# 处理上时段骑入
self.obs[-self.region_num-2:-2] += self.in_nums[self.t - 1, ]
reward = 0
# 筛选不合理情况 若合理 按照推算移动车辆 更新货车状态 若不合理则不采取任何操作
if move + self.obs[-self.region_num-2+region] >= 0 and move <= self.obs[-1] \
and (self.obs[-self.region_num-2+region]- self.out_nums[self.t,region])*move<=0:
self.obs[-self.region_num-2+region] += move
# 更新货车状态
self.obs[-1] -= move # 更新货车上的单车数
self.obs[-2] = region # 更新货车位置
# 更新之前的动作历史
self.obs[-self.region_num-2-1] = move # 搬动的单车数
self.obs[-self.region_num-2-2] = region # 货车位置
self.obs[-self.region_num-2:-2] -= self.out_nums[self.t, ]
reward = np.sum(self.obs[-self.region_num-2:-2][self.obs [-self.region_num-2:-2]< 0])
self.obs[-self.region_num-2:-2][self.obs [-self.region_num-2:-2]< 0] = 0
return np.append(self.obs, self.t), reward, done
class Net(nn.Module):
def __init__(self, NUM_STATES):
super(Net, self).__init__()
EMB_SIZE = 10
OTHER_SIZE = NUM_STATES-2 # fixme: update this value based on the input
self.fc1 = nn.Linear(OTHER_SIZE + EMB_SIZE * 4, 256).cuda()
# self.fc1.weight.data.normal_(0, 0.1)
self.fc2 = nn.Linear(256, 64).cuda()
# self.fc2.weight.data.normal_(0, 0.1)
self.fc3 = nn.Linear(64, 1).cuda()
# self.fc3.weight.data.normal_(0, 0.1)
self.m = nn.Dropout(p=0.2).cuda()
self.emb = nn.Embedding(NUM_STATES, EMB_SIZE).cuda()
def forward(self, x: torch.cuda.FloatTensor, stations: torch.cuda.LongTensor):
emb = self.emb(stations).flatten(start_dim=1)
x = torch.cat([x, emb], 1)
x = self.fc1(x)
x = F.relu(x)
# x = self.m(x)
x = self.fc2(x)
x = F.relu(x)
# x = self.m(x)
x = self.fc3(x)
return x
class Dqn():
def __init__(self, NUM_STATES, NUM_ACTIONS, region_num,move_amount_limit, eps_num):
self.eval_net, self.target_net = Net(NUM_STATES), Net(NUM_STATES)
self.target_net.load_state_dict(self.eval_net.state_dict())
self.memory = np.zeros((MEMORY_CAPACITY, NUM_STATES * 2 + 2))
# state, action ,reward and next state
self.memory_counter = 0
self.learn_counter = 0
self.optimizer = optim.Adam(self.eval_net.parameters(), LR)
self.loss = nn.MSELoss()
self.NUM_ACTIONS = NUM_ACTIONS
self.NUM_STATES = NUM_STATES
self.move_amount_limit = move_amount_limit
self.region_num=region_num
self.fig, self.ax = plt.subplots()
self.start_region = need.groupby('start_region')
self.end_region = need.groupby('end_region')
self.t_index = {i: str(i) for i in range(eps_num)}
self.out_nums = np.array([self.start_region[str(i)].agg(np.sum) for i in range(eps_num)])
def store_trans(self, state, action, reward, next_state):
if self.memory_counter % 10 == 0:
print("The experience pool collects {} time experience".format(self.memory_counter))
index = self.memory_counter % MEMORY_CAPACITY
trans = np.hstack((state, [action], [reward], next_state))
self.memory[index,] = trans
self.memory_counter += 1
def choose_action(self, state,EPSILON):
# notation that the function return the action's index nor the real action
# EPSILON
# state = torch.unsqueeze(torch.FloatTensor(state) ,0)
# feasible action
# print(EPSILON)
if random.random() > EPSILON:
action=self.predict(state)
else:
feasible_action = list()
for action in range(self.NUM_ACTIONS):
move = action % (2 * self.move_amount_limit + 1) - self.move_amount_limit
region = int(np.floor(action / (2 * self.move_amount_limit + 1)))
if move + state[-self.region_num - 2 + region] >= 0 and move <= state[-2] and \
(state[-self.region_num-2+region]- self.out_nums[state[-1],region])*move<=0:
feasible_action.append(action)
action = random.choice(feasible_action)
return action
def predict(self, state):
# notation that the function return the action's index nor the real action
# EPSILON
# feasible action
feasible_action = list()
state_1 = [j for i, j in enumerate(state) if
i not in [self.region_num, self.region_num + 2, 2 * self.region_num + 4]]
state_2 = [j for i, j in enumerate(state) if
i in [self.region_num, self.region_num + 2, 2 * self.region_num + 4]]
tmp_x = list()
tmp_y = list()
for action in range(self.NUM_ACTIONS):
move = action % (2 * self.move_amount_limit + 1) - self.move_amount_limit
region = int(np.floor(action / (2 * self.move_amount_limit + 1)))
if move + state[-self.region_num - 2 + region] >= 0 and move <= state[-2]\
and (state[-self.region_num-2+region]- self.out_nums[state[-1],region])*move<=0:
feasible_action.append(action)
tmp_x.append(np.concatenate([state_1, np.array([move])]))
tmp_y.append(np.concatenate([state_2, np.array([region])]))
x = torch.FloatTensor(tmp_x).cuda()
station = torch.LongTensor(tmp_y).cuda()
action_val = self.target_net.forward(x, station)
max_indice = [i for i, j in enumerate([i[0] for i in action_val]) if
j == np.max([i[0] for i in action_val])] # 找最大index
action = feasible_action[random.choice(max_indice)] # 如果有多个index随机选一个,获得对应action
return action
def plot(self, ax, x):
ax.cla()
ax.set_xlabel("episode")
ax.set_ylabel("total reward")
ax.plot(x, 'b-')
plt.pause(0.000000000000001)
def learn(self):
# learn 100 times then the target network update
if self.learn_counter % Q_NETWORK_ITERATION == 0:
self.target_net.load_state_dict(self.eval_net.state_dict())
self.learn_counter += 1
if self.learn_counter % 50 == 0:
test_x=torch.FloatTensor([[11,12,12,7,0,0,5,5,3,0,0,1,-5],[5,5,3,0,0,0,10,11,0,3,0,2,-10],
[11,12,12,7,0,-1,4,5,3,0,1,1,-5],[10,8,0,3,3,3,8,9,0,0,0,3,-9]]).cuda()
test_station=torch.LongTensor([[0,3,3,0],[3,0,0,0],[0,0,0,0],[1,3,3,0]]).cuda()
action_val = self.target_net.forward(test_x, test_station)
print(np.mean(action_val.cpu().detach().numpy()), file=open(f"result_history/actionless_output_action_value_{ts}.txt", "a"))
sample_index = np.random.choice(MEMORY_CAPACITY, BATCH_SIZE)
# 切取sars切片
batch_memory = self.memory[sample_index, :]
batch_reward = torch.FloatTensor(batch_memory[:, self.NUM_STATES + 1: self.NUM_STATES + 2]).cuda()
x=torch.FloatTensor(np.delete(batch_memory[:, :self.NUM_STATES],
[self.region_num,self.region_num+2,self.region_num*2+4], 1)).cuda()
move = torch.FloatTensor([[i[0] % (2 * self.move_amount_limit + 1) - self.move_amount_limit] for i in
batch_memory[:, self.NUM_STATES:self.NUM_STATES + 1]]).cuda()
x = torch.cat((x, move), 1)
y=torch.LongTensor(batch_memory[:, [self.region_num,self.region_num+2,self.region_num*2+4]]).cuda()
region = torch.LongTensor([[int(np.floor(i[0] / (2 * self.move_amount_limit + 1)))] for i in
batch_memory[:, self.NUM_STATES:self.NUM_STATES + 1]]).cuda()
y = torch.cat((y, region), 1)
q_eval = self.eval_net(x, y)
tmp_q_next = list()
for state in batch_memory[:, -self.NUM_STATES:]:
feasible_action = list()
m_r_list=list()
for action in range(self.NUM_ACTIONS):
move = action % (2 * self.move_amount_limit + 1) - self.move_amount_limit
region = int(np.floor(action / (2 * self.move_amount_limit + 1)))
if move + state[-self.region_num-2+region] >= 0 and move <= state[-2]\
and (state[-self.region_num-2+region]- self.out_nums[int(state[-1]),region])*move<=0:
feasible_action.append(action)
m_r_list.append((move,region))
tmp_x = list()
tmp_y = list()
# 对每个feasible action算value
state_1 = [j for i, j in enumerate(state) if
i not in [self.region_num, self.region_num + 2, 2 * self.region_num + 4]]
state_2 = [j for i, j in enumerate(state) if
i in [self.region_num, self.region_num + 2, 2 * self.region_num + 4]]
for move,region in m_r_list:
# move = action % (2 * self.move_amount_limit + 1) - self.move_amount_limit
# region = int(np.floor(action / (2 * self.move_amount_limit + 1)))
tmp_x.append(np.concatenate([state_1, np.array([move])]))
tmp_y.append(np.concatenate([state_2, np.array([region])]))
x = torch.FloatTensor(tmp_x).cuda()
station = torch.LongTensor(tmp_y).cuda()
action_val = self.target_net.forward(x, station)
tmp_q_next.append([float(action_val.max(1)[0].max().cpu().detach().numpy())])
q_next = torch.FloatTensor(tmp_q_next).cuda()
# q_target = batch_reward + GAMMA*q_next.max(1)[0].view(BATCH_SIZE, 1)
q_target = batch_reward + GAMMA * q_next
loss = self.loss(q_eval, q_target)
print(loss.item(), file=open(f"result_history/actionless_output_loss_{ts}.txt", "a"))
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
# 评估 agent, 跑 5 个episode,总reward求平均
def evaluate(self, env, render=False):
eval_reward = []
for i in range(1):
obs = env.init()
episode_reward = 0
while True:
action = self.predict(obs) # 预测动作,只选最优动作
obs, reward, done = env.step(action)
episode_reward += reward
print(f"obs:{obs[:-1]} action:{action} reward:{reward} reward_sum:{episode_reward} t:{obs[-1]}")
print(
f"obs:{obs[:-1]} t:{obs[-1]} region:{int(np.floor(action / (2 * self.move_amount_limit + 1)))} "
f"move:{action % (2 * self.move_amount_limit + 1) - self.move_amount_limit} reward:{reward} "
f"reward_sum:{episode_reward}",
file=open(f"result_action/actionless_output_action_{ts}.txt", "a"))
# if render:
# env.render()
if done:
break
eval_reward.append(episode_reward)
return np.mean(eval_reward)
def main():
eps_num = 5
EPSILON = 0.9
EPS_DECAY = 0.99
env = Env(region_num=4, move_amount_limit=10, eps_num=eps_num)
NUM_ACTIONS = (2 * env.move_amount_limit + 1) * env.region_num # [-500,500]*4个方块
NUM_STATES = 2*env.region_num + 7 # MountainCar-v0: (2,)
net = Dqn(NUM_STATES, NUM_ACTIONS, env.region_num, env.move_amount_limit, eps_num)
print("The DQN is collecting experience...")
step_counter_list = []
for episode in range(EPISODES):
state = env.init()
step_counter = 0
reward_sum = 0
while True:
step_counter += 1
# env.render()
EPSILON = max(EPSILON * EPS_DECAY, 0.01)
action = net.choose_action(state,EPSILON)
# print("the action is {}".format(action))
next_state, reward, done = env.step(action)
net.store_trans(state, action, reward, next_state)
reward_sum += reward
if net.memory_counter >= 5*BATCH_SIZE:
net.learn()
if done:
print("episode {}, the reward is {}".format(episode, round(reward_sum, 3)))
print(f"{round(reward_sum, 3)}", file=open(f"result_history/actionless_output_result_{ts}.txt", "a"))
if done:
step_counter_list.append(step_counter)
net.plot(net.ax, step_counter_list)
break
state = next_state
print(net.evaluate(env))
if __name__ == '__main__':
main()
|
[
"lindsaymarymorgan@gmail.com"
] |
lindsaymarymorgan@gmail.com
|
950e75b21d13b28624261269aba44aa62cdb8544
|
2283d7ae2d8c6d2a13b5dbf9e13a395600fb7951
|
/baseline/util.py
|
dbb0d914da17af1e362a702b0ec79937c53ac10d
|
[] |
no_license
|
nishanth01/summary_final
|
eebb3df6de939a2af955cb904b489d3d5e6a10e1
|
b8d9413db87de1a0ce46085f1beae94cb976fa57
|
refs/heads/master
| 2020-03-25T23:25:00.052768
| 2018-08-11T09:47:53
| 2018-08-11T09:47:53
| 144,273,733
| 12
| 8
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,420
|
py
|
import tensorflow as tf
import time
import os
def load_ckpt(saver, session, hps, ckpt_dir="train"):
while True:
try:
latest_filename = "checkpoint"# if ckpt_dir=="eval" else hps.model_name
ckpt_dir = os.path.join(hps.log_root,ckpt_dir)
ckpt_state = tf.train.get_checkpoint_state(ckpt_dir, latest_filename=latest_filename)
tf.logging.info('Loading checkpoint %s', ckpt_state.model_checkpoint_path)
saver.restore(session, ckpt_state.model_checkpoint_path)
return ckpt_state.model_checkpoint_path
except Exception as e:
# tf.logging.error(e)
# tf.logging.info("Failed to load checkpoint from %s. Sleeping for %i secs...", ckpt_dir, 10)
raise e
def running_avg_loss(loss, running_avg_loss, summary_writer, step, decay=0.99):
if running_avg_loss == 0: # on the first iteration just take the loss
running_avg_loss = loss
else:
running_avg_loss = running_avg_loss * decay + (1 - decay) * loss
running_avg_loss = min(running_avg_loss, 12) # clip
loss_sum = tf.Summary()
tag_name = 'running_avg_loss/decay=%f' % (decay)
loss_sum.value.add(tag=tag_name, simple_value=running_avg_loss)
summary_writer.add_summary(loss_sum, step)
#tf.logging.info('running_avg_loss: %f', running_avg_loss)
return running_avg_loss
|
[
"nisnair@cisco.comgit config --global user.name nisnair"
] |
nisnair@cisco.comgit config --global user.name nisnair
|
dceb4693f1c5ce71fce4dde114fe80301261ae5e
|
28f918f2c78fb09e178dcab2ec238423cb84640f
|
/benefits/rs_analysis_mk2.py
|
aff41b82b1cb93b37048ca018bf25d0f241a58cc
|
[] |
no_license
|
toddb8632/geoprocessing
|
6d75adce5e8a0dcead7a7dd8f9a6f5d4b36c5229
|
6fb2c9204e5782fcf04f247a3ba6f67fe447771c
|
refs/heads/master
| 2023-03-16T21:21:11.861754
| 2017-11-30T16:35:00
| 2017-11-30T16:35:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 26,368
|
py
|
#-------------------------------------------------------------------------------
# Name: WH benefits related
# Purpose:
#
# Author: Yichuan Shi (yichuan.shi@unep-wcmc.org)
#
# Created: 2014/01/21
# Modified: 2014/01/29: corrected area calculation
# 2014/02/03: added forest extent
# Copyright: (c) Yichuan Shi 2014
#-------------------------------------------------------------------------------
import os, sys, codecs
import arcpy, numpy, math
import Yichuan10
from YichuanRAS import *
# in order to work with spatialanalyst
arcpy.CheckOutExtension("Spatial")
arcpy.env.overwriteOutput= True
def carbon(workspace, fc, outputfile, UID, UID_name):
base_rasters = dict()
base_rasters['africa'] = [r"C:\Ys\reg\Africa\carbon_saatchi_111103\africa_carbon_1km.tif",
r"C:\Ys\reg\Africa\carbon_saatchi_111103\africa_carbon_error_1km.tif",
r"C:\Ys\data.gdb\vcf_africa_2000_resample"]
base_rasters['asia'] = [r"C:\Ys\reg\Asia\carbon_saatchi_111103\asia_carbon_1km.tif",
r"C:\Ys\reg\Asia\carbon_saatchi_111103\asia_carbon_error_1km.tif",
r"C:\Ys\data.gdb\vcf_asia_2000_resample"]
base_rasters['america'] = [r"C:\Ys\reg\LatinAmerica\carbon_saatchi_111103\america_carbon_1km.tif",
r"C:\Ys\reg\LatinAmerica\carbon_saatchi_111103\america_carbon_error_1km.tif",
r"C:\Ys\data.gdb\vcf_america_2000_resample"]
# set workspace
if not os.path.exists(workspace):
os.mkdir(workspace)
workspace_clip = workspace + os.sep + 'clip'
if not os.path.exists(workspace_clip):
os.mkdir(workspace_clip)
os.chdir(workspace)
arcpy.env.workspace = workspace_clip
# create logs etc
f = codecs.open(outputfile, 'w', 'utf-8')
# create header:
headerlist = ['region', 'wdpaid', 'pa_name']
for thres in [10, 25, 30]:
headerlist.append('total_carbon' + str(thres))
headerlist.append('forest_area' + str(thres))
headerlist.append('density'+ str(thres))
headerlist.append('abs_error' + str(thres))
headerlist.append('rel_error' + str(thres))
header = ','.join(headerlist) + '\n'
f.write(header)
f.close()
# loop over all rasters
for region in base_rasters.keys():
# get region rasters
raster = arcpy.Raster(base_rasters[region][0])
raster_error = arcpy.Raster(base_rasters[region][1])
raster_forest = arcpy.Raster(base_rasters[region][2])
pa_set = set(Yichuan10.GetUniqueValuesFromFeatureLayer_mk2(fc, UID))
pa_unfinish_set = set()
# PA clipping carbon rasters
with arcpy.da.SearchCursor(fc, (UID, UID_name, 'SHAPE@')) as cursor:
# for each site
for row in cursor:
# if the raster extent contains the feature geom, clip rasters
geom = row[2]
wdpaid = row[0]
pa_name = row[1]
try:
if raster.extent.overlaps(geom) or raster.extent.contains(geom) :
print 'Clip: ' + str(wdpaid)
out_ras = region + '_' + str(wdpaid) +'.tif'
out_ras_error = region + '_' + str(wdpaid) + '_error.tif'
out_ras_forest = region + '_' + str(wdpaid) + '_forest.tif'
# clip
clip_raster(geom, raster, out_ras, 0)
clip_raster(geom, raster_error, out_ras_error, 0)
clip_raster(geom, raster_forest, out_ras_forest, 0)
# output
f = codecs.open(outputfile, 'a', 'utf-8')
if type(pa_name) == type('text') or type(pa_name) == type(u'\xe1'):
line = region + ',' + str(wdpaid) + ',\"' + pa_name + '\",'
else:
line = region + ',' + str(wdpaid) + ',\"' + str(pa_name) + '\",'
all_results = []
# need to specify threshold for forests 10,25,30 tree cover (see data)
for thres in [10, 25, 30]:
# calculate carbon: caution the extents must be indentical
result = pa_carbon_mk3(out_ras, out_ras_error, out_ras_forest, thres)
all_results.extend(result)
# write result
line += ','.join([str(x) for x in all_results])
f.write(line)
f.write('\n')
print 'Complete:', str(wdpaid)
else:
print 'Pass:', str(wdpaid)
# finally remove id
pa_set.remove(wdpaid)
except Exception as e:
pa_unfinish_set.add(wdpaid)
Yichuan10.Printboth('Error: ' + str(wdpaid))
Yichuan10.Printboth(str(e))
Yichuan10.Printboth(sys.exc_info()[0])
finally:
print '-----\n'
f.close()
Yichuan10.ExportListToTxt(pa_set, 'log_left.txt')
Yichuan10.ExportListToTxt(pa_unfinish_set, 'log_fail.txt')
def water(workspace, fc, outputfile, fail_log=None):
# data
months = map(lambda x: '0'+x, map(str, range(1, 10))) + map(str, range(10, 13))
etdict = dict()
petdict = dict()
pdict = dict()
for month in months:
etdict[month] = r"C:\Ys\water_workspace\avg" + os.sep + 'et_' + month + '.tif'
petdict[month] = r"C:\Ys\water_workspace\avg" + os.sep + 'pet_' + month + '.tif'
pdict[month] = r"C:\Ys\water_workspace\prec" + os.sep + 'prec' + '_' + str(int(month))
inputdict = {'et': etdict,
'pet': petdict,
'p': pdict}
# set workspace
if not os.path.exists(workspace):
os.mkdir(workspace)
workspace_clip = workspace + os.sep + 'clip'
if not os.path.exists(workspace_clip):
os.mkdir(workspace_clip)
# os workspace
os.chdir(workspace)
# arcgis workspace
arcpy.env.workspace = workspace_clip
# create logs etc
f = codecs.open(outputfile, 'w', 'utf-8')
# create header:
headerlist = ['wdpaid',
'pa_name',
'type',
'month',
'total_amount',
'total_amount_sphere',
'total_area',
'total_area_sphere',
'simple_mean',
'true_mean',
'true_mean_sphere',
'count_pixel']
# for assessing result dict
datalist = headerlist[4:]
header = ','.join(headerlist) + '\n'
f.write(header)
f.close()
# loop over all rasters
for raster_type in inputdict.keys():
# get data type
raster_dict = inputdict[raster_type]
# for each month
for month in sorted(raster_dict.keys()):
rasterpath = raster_dict[month]
raster = arcpy.Raster(rasterpath)
pa_set = set(Yichuan10.GetUniqueValuesFromFeatureLayer_mk2(fc, 'wdpaid'))
pa_unfinish_set = set()
#where_clause = '\"wdpaid\" = 900629'
# PA clipping carbon rasters
with arcpy.da.SearchCursor(fc, ('wdpaid', 'name', 'SHAPE@')) as cursor:
# for each site
for row in cursor:
# if the raster extent contains the feature geom, clip rasters
geom = row[2]
wdpaid = row[0]
pa_name = row[1]
try:
if raster.extent.overlaps(geom) or raster.extent.contains(geom) :
print 'Clip: ' + str(wdpaid)
out_ras = raster_type + month + '_' + str(wdpaid) +'.tif'
# clip
clip_raster(geom, rasterpath, out_ras, 0)
# output
f = codecs.open(outputfile, 'a', 'utf-8')
line = str(wdpaid) + ',\"' + pa_name + '\",' + raster_type.upper() + ',' + month + ','
all_results = []
# returns a dictionary
result = ras_each(out_ras)
# add to line list
for each in datalist:
# add a check for et and pet, which have unit of 0.1mm
if raster_type == 'p':
all_results.append(result[each])
# for et and pet
else:
all_results.append(result[each]/10)
# write result
line += ','.join([str(x) for x in all_results])
f.write(line)
f.write('\n')
else:
print 'pass: ' + str(wdpaid)
# finally remove id
pa_set.remove(wdpaid)
except Exception as e:
pa_unfinish_set.add(wdpaid)
Yichuan10.Printboth('Error: ' + str(wdpaid))
Yichuan10.Printboth(str(e))
Yichuan10.Printboth(sys.exc_info()[0])
finally:
print '----\n'
f.close()
Yichuan10.ExportListToTxt(pa_set, 'log_left.txt')
Yichuan10.ExportListToTxt(pa_unfinish_set, 'log_fail.txt')
def forest(workspace, fc, outputfile, fail_log=None):
# input data mosaic
rasterpath = r"C:\Ys\Hansen2013\loss_year.gdb\hansenlossyear"
# set workspace
if not os.path.exists(workspace):
os.mkdir(workspace)
workspace_clip = workspace + os.sep + 'clip'
if not os.path.exists(workspace_clip):
os.mkdir(workspace_clip)
# os workspace
os.chdir(workspace)
# arcgis workspace
arcpy.env.workspace = workspace_clip
# create header:
headerlist = ['wdpaid',
'pa_name',
'year',
'count_pixel',
'total_area']
# for assessing result dict
datalist = headerlist[2:]
header = ','.join(headerlist) + '\n'
# create logs etc
if not fail_log:
where_clause = None
f = codecs.open(outputfile, 'w', 'utf-8')
f.write(header)
f.close()
else:
# if with list
where_clause = '\"wdpaid\" in (' + Yichuan10.CreateListFromTxtTable(fail_log) + ')'
raster = arcpy.Raster(rasterpath)
pa_set = set(Yichuan10.GetUniqueValuesFromFeatureLayer_mk2(fc, 'wdpaid'))
pa_unfinish_set = set()
# where_clause = '\"wdpaid\" = 2017'
# PA clipping carbon rasters
with arcpy.da.SearchCursor(fc, ('wdpaid', 'name', 'SHAPE@'), where_clause=where_clause) as cursor:
# for each site
for row in cursor:
# if the raster extent contains the feature geom, clip rasters
geom = row[2]
wdpaid = row[0]
pa_name = row[1]
try:
if raster.extent.overlaps(geom) or raster.extent.contains(geom) :
out_ras = str(wdpaid) +'.tif'
# clip
clip_raster(geom, rasterpath, out_ras, no_data=0)
# output
f = codecs.open(outputfile, 'a', 'utf-8')
# returns a dictionary
result = desforestation(arcpy.env.workspace+os.sep+out_ras, 0)
# add to line list
for year in result:
# for each wdpaid
line = str(wdpaid) + ',\"' + pa_name + '\",'
# year, count pixels, total area
all_results = [year, result[year][0], result[year][1]]
# write result
line += ','.join([str(x) for x in all_results])
f.write(line)
f.write('\n')
# complete
print 'complete: ' + str(wdpaid)
else:
print 'pass: ' + str(wdpaid)
# finally remove id
pa_set.remove(wdpaid)
except Exception as e:
pa_unfinish_set.add(wdpaid)
Yichuan10.Printboth('Error: ' + str(wdpaid))
Yichuan10.Printboth(str(e))
Yichuan10.Printboth(sys.exc_info()[0])
finally:
print '-----\n'
f.close()
Yichuan10.ExportListToTxt(pa_set, 'log_left.txt')
Yichuan10.ExportListToTxt(pa_unfinish_set, 'log_fail.txt')
def desforestation(ras, nodata):
"""input raster path -> return stats"""
"""input raster path -> return stats"""
# get area grid
area_grid = raster_area_lat(ras) # true WGS84 spheroid
# getting numpy object
#ras_np_raw = arcpy.RasterToNumPyArray(ras, ncols = ncols, nrows = nrows, nodata_to_value= nodata)
ras_np_raw = gdal_tif_to_numpy(ras)
# masking data not need as further masked below
# ras_np = numpy.ma.masked_values(ras_np_raw, nodata)
# 0 - no loss, 1 - change in 2000-2001, .. 12 change 2011-2012
years = range(0, 13)
year_dict = dict()
for year in years:
# get subset of the year, i.e. all other valuse are masked
# ras_sub = numpy.ma.masked_not_equal(ras_np_raw, year)
# the mask is useful
ras_sub_mask = numpy.ma.masked_equal(ras_np_raw, year)
# use count (no mask) NOT size (including mask)
# count_pixel = ras_sub.count()
count_pixel = ras_sub_mask.mask.sum()
# True is treated as 1
total_area = (ras_sub_mask.mask * area_grid).sum()
year_dict[year] = [count_pixel, total_area]
return year_dict
def ras_each(ras):
"""input raster path -> return stats"""
param = arcpy.Raster(ras)
nrows = param.height
ncols = param.width
# get area grid
area_grid = raster_area_mk3(ras) # true WGS84 spheroid
area_grid_sphere = raster_area(ras) # based on a sphere
# getting numpy object
ras_np = arcpy.RasterToNumPyArray(ras, ncols = ncols, nrows = nrows, nodata_to_value=0)
print ras_np.shape
# calculate area (0, 1)
ras_np_copy = numpy.copy(ras_np)
ras_np_copy[ras_np_copy!=0] = 1
# count
count = ras_np_copy[ras_np_copy!=0].size
# total
total_amount = (ras_np * area_grid).sum()
total_amount_sphere = (ras_np * area_grid_sphere).sum()
total_area = (ras_np_copy * area_grid).sum()
total_area_sphere = (ras_np_copy * area_grid_sphere).sum()
try:
avg = ras_np[ras_np!=0].mean()
except:
avg = 'NA'
try:
true_avg = total_amount/total_area
except:
true_avg = 'NA'
try:
true_avg_sphere = total_amount_sphere/total_area_sphere
except:
true_avg_sphere = 'NA'
return {'total_amount': total_amount,
'total_amount_sphere': total_amount_sphere,
'total_area': total_area,
'total_area_sphere': total_area_sphere,
'simple_mean': avg,
'true_mean': true_avg,
'true_mean_sphere':true_avg_sphere,
'count_pixel': count}
def pa_carbon_mk3(pa_carbon, pa_carbon_error, pa_forest, thres):
"""assuming the input are already in Mg/km2; 10^6 gram per square km
it returns a tuple of absolute error and relative error - need to take into account
calculation of cell area"""
# convert rasters to numpy arrays
param = arcpy.Raster(pa_carbon)
nrows = param.height
ncols = param.width
carbon = arcpy.RasterToNumPyArray(pa_carbon, ncols = ncols, nrows = nrows, nodata_to_value=0)
carbon_error = arcpy.RasterToNumPyArray(pa_carbon_error, ncols = ncols, nrows = nrows, nodata_to_value=0)
forest = arcpy.RasterToNumPyArray(pa_forest, ncols = ncols, nrows = nrows, nodata_to_value=0)
print carbon.shape, carbon_error.shape, forest.shape
# debug magnitude: original unit is MgC/ha - this is to convert to MgC/km2
carbon = carbon * 100
carbon_error = carbon_error
# calculate total biomass carbon
# this is the area grid
## print forest
# order important
forest[forest<thres] = 0
forest[forest>0] = 1
## # debug:
# pa_area, area for each cell in carbon, unit: sqkm
pa_area = raster_area_mk3(pa_carbon)
# forest area
forest_area = (pa_area * forest).sum()
# total carbon
total_carbon = (carbon * pa_area * forest).sum()
# calculate uncertainty, i.e. standard error, error in data is presented without %
abs_error = math.sqrt((carbon * pa_area * carbon * pa_area * carbon_error * carbon_error * forest / 10000.0).sum())
# density: forest carbon in forested areas (average) unit: Mg/km2
if forest_area:
density = total_carbon/forest_area
else:
density = 'NaN'
# check if total carbon is zero
if total_carbon:
rel_error = abs_error/total_carbon
else:
rel_error = 'NaN'
return (total_carbon, forest_area, density, abs_error, rel_error)
def raster_area(raster_object):
""" this function takes an raster object or path and creates an numpy array with areas per cell
This should be used in conjunction with the numpy array created using ArcGIS's
RasterToNumpyArray
Corrected"""
# nrow is the row number (lat), derived from raster object
# for each row calculate area A=r^2 * cellsize * (sin(ymin + (i+1)*cellsize) - sin(ymin + icellsize))
# authalic radius: assuming same surface area (spheroid) as if the earth was a perfect sphere
# http://en.wikipedia.org/wiki/Earth_radius#Authalic_radius
# make sure this is the arcpy raster object
if not isinstance(raster_object, arcpy.Raster):
raster_object = arcpy.Raster(raster_object)
r = 6371.0072
cellsize = raster_object.meanCellHeight
nrow = raster_object.height
ncol = raster_object.width
ymax = raster_object.extent.YMax
# in rad
cellsize_pi = cellsize * numpy.pi / 180
ymax_pi = ymax * numpy.pi / 180
stack_list = list()
for i in range(nrow):
# all in degress
y2 = ymax_pi - i*cellsize_pi
y1 = y2 - cellsize_pi
# calculate area
ith_area = (r * r * cellsize_pi) * (numpy.sin(y2) - numpy.sin(y1))
# all same latitude cells have same areas
ith_array = numpy.array([ith_area], 'float32')
# append to list for vstack later
stack_list.append(ith_array)
# create ndarray using vstack; note the difference in storage. e.g. i = 0 refers to the bottom row in raster
# whilst it is the first row in the array
result = numpy.vstack(stack_list)
print result.shape
return result
def raster_area_mk3(raster_object):
""" this function takes an raster object or path and creates an numpy array with areas per cell
This should be used in conjunction with the numpy array created using ArcGIS's
RasterToNumpyArray
Fixed an issue with numpy read order:
Vstack: a1=[1,2,3], a2[2,3,4], vstack((a1,a2)) = [[1,2,3], [2,3,4]
Arcpy.Raster-> Numpy and Numpy-> Raster, all table like storage
test run: 1) size algorithm 2) order """
# trying to calculate true ellipsoidal area
# nrow is the row number (lat), derived from raster object
# eccentricity e
e = 0.081819190842621
# semi-major axis = equatorial radius a
a = 6378.137
# this function calculates the formula in solving integral
def f(lat):
from numpy import sin, power, log
return sin(lat)/(1-power(e, 2)*power(sin(lat), 2)) + (0.5/e)*log((1+e*sin(lat))/(1-e*sin(lat)))
# make sure this is the arcpy raster object
if not isinstance(raster_object, arcpy.Raster):
raster_object = arcpy.Raster(raster_object)
# height and width measured in number of unit cell
# not to be confused with raster_object.extent.height, which measured in map units
cellsize = raster_object.meanCellHeight
nrow = raster_object.height
ncol = raster_object.width
ymin = raster_object.extent.YMin
ymax = raster_object.extent.YMax # cell upper left corner, not centroid of cell
# in rad
cellsize_pi = cellsize * numpy.pi / 180
ymax_pi = ymax * numpy.pi / 180
stack_list = list()
for i in range(nrow):
# all in degress: order corrected
y2 = ymax_pi - i*cellsize_pi
y1 = y2 - cellsize_pi
# calculate area
ith_area = 0.5 * a * a * (1 - e * e) * cellsize_pi * (f(y2) - f(y1))
# all same latitude cells have same areas
ith_array = numpy.array([ith_area], 'float32')
# append to list for vstack later
stack_list.append(ith_array)
# create ndarray using vstack; note storage. e.g. i = 0 refers to the top row in raster and in np array
result = numpy.vstack(stack_list)
# free memory not sure needed
del raster_object
print result.shape
return result
# average rasters, for this to happen one needs to check out licence
# mask is needed to prevent NA + number
# issue regarding average data where no-data is present
def arrange_raster():
et_dict = compile_raster('ET')
pet_dict = compile_raster('PET')
outputfolder = r'D:\Yichuan\WH_benefits\data\water\et\avg'
# 12 year average
print 'processing ET:'
for month in sorted(et_dict.keys()):
outras = outputfolder + os.sep + 'et_' + month + '.tif'
raster_average_mk2(et_dict[month], outras)
print 'Finished year: ', month
print 'processing PET:'
for month in sorted(pet_dict.keys()):
outras = outputfolder + os.sep + 'pet_' + month + '.tif'
raster_average_mk2(pet_dict[month], outras)
print 'Finished year: ', month
def raster_average_mk2(rasterobject_list, outras):
# this function improves the previous version in that no value data is considered
from arcpy.sa import Con, SetNull, CellStatistics
n = len(rasterobject_list)
# get mask
rastermask_list = list()
for each in rasterobject_list:
eachmask = Con(each>32760, 1, 0)
rastermask_list.append(eachmask)
sum_mask = CellStatistics(rastermask_list, "SUM")
# flip values and set null for mask
# only do this for pixels having more than 6 NoData
## sum_mask = Con(sum_mask>0, None, 1)
sum_mask = SetNull(sum_mask>6, 1)
# it doesn't honor mask
outras_mask = r"C:\mask_temp.tif"
sum_mask.save(outras_mask)
# average, only operate on those valid values
arcpy.env.mask = outras_mask
# average
avg_raster = CellStatistics(rasterobject_list, "MEAN", "DATA")
avg_raster.save(outras)
# clear mask
arcpy.env.mask = None
def raster_average(rasterobject_list, outras):
from arcpy.sa import Con, SetNull
n = len(rasterobject_list)
# get mask
rastermask_list = list()
for each in rasterobject_list:
eachmask = Con(each>32760, 1, 0)
rastermask_list.append(eachmask)
sum_mask = rastermask_list[0]
for each in rastermask_list[1:]:
sum_mask += each
# flip values and set null for mask
## sum_mask = Con(sum_mask>0, None, 1)
sum_mask = SetNull(sum_mask>0, 1)
# it doesn't honor mask
outras_mask = r"C:\mask_temp.tif"
sum_mask.save(outras_mask)
# average, only operate on those valid values
arcpy.env.mask = outras_mask
## arcpy.env.mask = sum_mask.catalogPath
sum_raster = rasterobject_list[0]
for each in rasterobject_list[1:]:
sum_raster += each
avg_raster = sum_raster/n
avg_raster.save(outras)
# clear mask
arcpy.env.mask = None
# get a dictionary of month - years raster object
def compile_raster(type = 'ET'):
# 'ET' or 'PET'
folder = r"D:\Yichuan\WH_benefits\data\water\et\GEOTIFF_0.05degree"
# 2000, 2001,... 2012
years = map(str, range(2000, 2013))
# 01, 02, ... 12
months = map(lambda x: '0'+x, map(str, range(1, 10))) + map(str, range(10, 13))
monthdict = dict()
# by month
for month in months:
monthdict[month] = list()
for year in years:
rastername = "MOD16A2_" + type + "_0.05deg_GEO_" + year + "M" + month + '.tif'
raster = arcpy.Raster(folder + os.sep + rastername)
monthdict[month].append(raster)
return monthdict
# carbon analysis
def run_carbon():
print 'Run: carbon'
UID = 'wdpaid'
UID_name = 'name'
workspace = r"C:\raster_workspace_3\carbon_wh"
fc = r"C:\Ys\whs_dump_140113.shp"
outputfile = 'result_wh.csv'
carbon(workspace, fc, outputfile, UID, UID_name)
print '---------Finish WH--------'
UID = 'wdpaid'
UID_name = 'name'
workspace = r"C:\raster_workspace_3\carbon_pa"
fc = r"C:\Ys\data.gdb\biome_intersect_pa_dis"
outputfile = 'result_pa.csv'
carbon(workspace, fc, outputfile, UID, UID_name)
print '---------Finish PA--------'
UID = 'OBJECTID'
UID_name = 'name'
workspace = r"C:\raster_workspace_3\carbon_pa_dis"
fc = r"C:\Ys\data.gdb\biome_intersect_pa_dis_complete"
outputfile = 'result_pa_dis.csv'
carbon(workspace, fc, outputfile, UID, UID_name)
print '---------Finish PA-dis -------'
UID = 'OBJECTID'
UID_name = 'BIOME'
workspace = r"C:\raster_workspace_3\carbon_biome"
fc = r"C:\Ys\data.gdb\biome_1237"
outputfile = 'result_biome.csv'
carbon(workspace, fc, outputfile, UID, UID_name)
print '---------Finish all biome----------'
def run_forest():
#forest
print 'Run: forest'
workspace = r"C:\raster_workspace_3\forest"
fc = r"C:\Ys\whs_dump_140113.shp"
outputfile = "resultforest.csv"
forest(workspace, fc, outputfile)
def run_water():
# water
print 'Run: water'
workspace = r"C:\raster_workspace_3\water"
fc = r"C:\Ys\whs_dump_140113.shp"
outputfile = "resultwater.csv"
water(workspace, fc, outputfile)
|
[
"yichuan.shi@outlook.com"
] |
yichuan.shi@outlook.com
|
00b7d3262afb03b29695861e71ac12ee040ea78f
|
047b1d8f93420fc799c6f7214d3ef7f1ed7bb950
|
/students/migrations/0009_auto_20160126_1533.py
|
361b76d043bc8d6daa7644f2249f6eb55051e1f8
|
[] |
no_license
|
alferum/django_beginners
|
922e933c30f292877502d3f1e7c6d685e3bb6de1
|
e96c7d979571b46914a9fab5053d613f764ea868
|
refs/heads/master
| 2021-01-18T21:08:52.457315
| 2016-06-10T17:42:22
| 2016-06-10T17:42:22
| 44,047,521
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 992
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('students', '0008_auto_20160125_1824'),
]
operations = [
migrations.AlterField(
model_name='exam',
name='teacher',
field=models.CharField(max_length=128, verbose_name="\u0406\u043c'\u044f \u0432\u0438\u043a\u043b\u0430\u0434\u0430\u0447\u0430"),
preserve_default=True,
),
migrations.AlterField(
model_name='exam',
name='title',
field=models.CharField(max_length=128, verbose_name='\u041d\u0430\u0437\u0432\u0430 \u043f\u0440\u0435\u0434\u043c\u0435\u0442\u0443'),
preserve_default=True,
),
migrations.AlterUniqueTogether(
name='exam',
unique_together=set([('date', 'teacher'), ('title', 'exam_group'), ('date', 'exam_group')]),
),
]
|
[
"alamzaker@yandex.ua"
] |
alamzaker@yandex.ua
|
127f00a61aebf0d77e3d5c34e7f7cf9ee4fa1ff2
|
6a87a432fbdf26c9f7f249b9e93edb89edcec853
|
/ThreeManMorris.py
|
0c1bed418207fe60e741d0060b91904719c34043
|
[] |
no_license
|
mariazverina/ThreeManMorrisDojo
|
88e1811157f9f65248549f22505ff30a5c3e50b7
|
b1305c2228aba00feb0d8a2a6f2cde5f2963391e
|
refs/heads/master
| 2021-01-16T18:18:22.500902
| 2013-10-22T08:58:37
| 2013-10-22T08:58:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,239
|
py
|
'''
Created on 21 Oct 2013
@author: mariaz
'''
import unittest
from collections import namedtuple
class Game(object):
BOARD_SIZE = 3
WHITE_PLAYER = 1
BLACK_PLAYER = 2
def __init__(self):
self._currentPlayer = Game.WHITE_PLAYER
self._board = {}
def current_player(self):
return self._currentPlayer
def placeStone(self, location):
self._board[location] = self._currentPlayer
self._currentPlayer = Game.BLACK_PLAYER
def isWhite(self, location):
return self._board.get(location) == Game.WHITE_PLAYER
def isEmpty(self, location):
return not self._board.has_key(location)
def placeTiles(self, slots):
for slot in slots:
self._board[slot.location] = slot.player
def isBlack(self, location):
return self._board.get(location) == Game.BLACK_PLAYER
def moveFromTo(self, fromLocation, toLocation):
self._board[toLocation] = self._board.get(fromLocation)
del self._board[fromLocation]
pass
def isFullRowOfColor(self, rowNum, color):
slots = self._board.iteritems()
whiteSlotsInRow = [1 for location, player in slots if player == color and location.row == rowNum]
return sum(whiteSlotsInRow) == Game.BOARD_SIZE
def isFullColumnOfColor(self, columnNum, color):
slots = self._board.iteritems()
blackSlotsInColumn = [1 for (location, player) in slots if player == color and location.column == columnNum]
return sum(blackSlotsInColumn) == Game.BOARD_SIZE
def isWhiteRow(self, rowNum):
return self.isFullRowOfColor(rowNum, Game.WHITE_PLAYER)
def isFirstDiagonalForPlayer(self, color):
slots = self._board.iteritems()
sorted_player_locations = sorted([location for (location, player) in slots if player == color])
diagonals = [[(0,0), (1,1), (2,2)], [(0, 2), (1, 1), (2, 0)]]
return sorted_player_locations in diagonals
def winner(self):
for player in [Game.WHITE_PLAYER, Game.BLACK_PLAYER]:
for i in range(3):
if(self.isFullRowOfColor(i, player) or self.isFullColumnOfColor(i, player)):
return player
if self.isFirstDiagonalForPlayer(player):
return player
return None
Location = namedtuple('Location', 'row column')
Slot = namedtuple('Slot', 'location player')
class Test(unittest.TestCase):
def testNewBoardIsEmpty(self):
game = Game()
allLocations = [(0, 0), (0, 1), (0, 2), (1, 0), (1, 1), (1, 2), (2, 0), (2, 1), (2, 2)]
for location in allLocations:
self.assertTrue(game.isEmpty(Location(*location)), "Every tile should be empty at the beginning")
# self.assertEqual("...\n...\n...", game.humanRepresenation(), "New boards should be empty")
def testFirstPlayerIsWhite(self):
game = Game()
self.assertEqual(game.current_player(), Game.WHITE_PLAYER, "First player should be white")
def testSecondPlayerIsBlack(self):
game = Game()
game.placeStone(Location(1,1))
self.assertEqual(game.current_player(), Game.BLACK_PLAYER, "Second player should be black")
def testValidFirstMovePlacesATile(self):
game = Game()
game.placeStone(Location(1,1))
self.assertTrue(game.isWhite(Location(1,1)), "Middle should be white now")
self.assertFalse(game.isEmpty(Location(1,1)), "Middle shouldn't be empty")
def testSettingUpABoardInParticularState(self):
game = Game()
game.placeTiles([Slot(Location(0,0), Game.WHITE_PLAYER),
Slot(Location(1,1), Game.BLACK_PLAYER),
Slot(Location(2,2), Game.WHITE_PLAYER)
])
self.assertTrue(game.isWhite(Location(0,0)), "Bottom left corner should be white")
self.assertTrue(game.isBlack(Location(1,1)), "Middle should be black")
self.assertTrue(game.isWhite(Location(2,2)), "Top right corner should be white")
def testEmptyBoardHasNoWhiteNorBlackPiece(self):
game = Game()
self.assertFalse(game.isWhite(Location(1,1)), "Should be empty - not white")
self.assertFalse(game.isBlack(Location(1,1)), "Should be empty - not black")
def testMakingMoveBetweenSlots(self):
game = Game()
game.placeTiles([Slot(Location(0,0), Game.WHITE_PLAYER),
Slot(Location(1,1), Game.BLACK_PLAYER),
Slot(Location(2,2), Game.WHITE_PLAYER),
Slot(Location(0,1), Game.BLACK_PLAYER),
Slot(Location(1,2), Game.WHITE_PLAYER),
Slot(Location(2,0), Game.BLACK_PLAYER),
])
game.moveFromTo(Location(2,2), Location(2,1))
self.assertTrue(game.isEmpty(Location(2,2)), "Departing slot should be empty")
self.assertTrue(game.isWhite(Location(2,1)), "Arrival slot should be white")
def testEmptyBoardHasNoWinner(self):
game = Game()
self.assertEqual(game.winner(), None, "Nobody should be winning at the start")
def testFirstRowIsWhite(self):
game = Game()
game.placeTiles([Slot(Location(0,0), Game.WHITE_PLAYER),
Slot(Location(0,1), Game.WHITE_PLAYER),
Slot(Location(0,2), Game.WHITE_PLAYER),
Slot(Location(1,0), Game.BLACK_PLAYER),
Slot(Location(1,2), Game.BLACK_PLAYER),
Slot(Location(2,1), Game.BLACK_PLAYER),
])
self.assertTrue(game.isWhiteRow(0), "Row 0 should be white")
def testWinnerIsWhite(self):
game = Game()
game.placeTiles([Slot(Location(0,0), Game.WHITE_PLAYER),
Slot(Location(0,1), Game.WHITE_PLAYER),
Slot(Location(0,2), Game.WHITE_PLAYER),
Slot(Location(1,0), Game.BLACK_PLAYER),
Slot(Location(1,2), Game.BLACK_PLAYER),
Slot(Location(2,1), Game.BLACK_PLAYER),
])
self.assertEqual(game.winner(), Game.WHITE_PLAYER, "Winner should be white")
def testWinnerIsBlackByTakingTheFirstColumn(self):
game = Game()
game.placeTiles([Slot(Location(1,0), Game.WHITE_PLAYER),
Slot(Location(1,2), Game.WHITE_PLAYER),
Slot(Location(2,1), Game.WHITE_PLAYER),
Slot(Location(0,0), Game.BLACK_PLAYER),
Slot(Location(1,0), Game.BLACK_PLAYER),
Slot(Location(2,0), Game.BLACK_PLAYER),
])
self.assertEqual(game.winner(), Game.BLACK_PLAYER, "Column 0 is black")
def testWinnerIsBlackByTakingTheSouthEastDiagonal(self):
game = Game()
game.placeTiles([Slot(Location(1,0), Game.WHITE_PLAYER),
Slot(Location(1,2), Game.WHITE_PLAYER),
Slot(Location(2,1), Game.WHITE_PLAYER),
Slot(Location(0,0), Game.BLACK_PLAYER),
Slot(Location(1,1), Game.BLACK_PLAYER),
Slot(Location(2,2), Game.BLACK_PLAYER),
])
self.assertEqual(game.winner(), Game.BLACK_PLAYER, "Diagonal is black")
def testWinnerIsBlackByTakingTheNorthEastDiagonal(self):
game = Game()
game.placeTiles([Slot(Location(1,0), Game.WHITE_PLAYER),
Slot(Location(1,2), Game.WHITE_PLAYER),
Slot(Location(2,1), Game.WHITE_PLAYER),
Slot(Location(0,2), Game.BLACK_PLAYER),
Slot(Location(1,1), Game.BLACK_PLAYER),
Slot(Location(2,0), Game.BLACK_PLAYER),
])
self.assertEqual(game.winner(), Game.BLACK_PLAYER, "Diagonal is black")
if __name__ == "__main__":
unittest.main()
|
[
"maria.zverina@corp.badoo.com"
] |
maria.zverina@corp.badoo.com
|
99d528e6a75d5e5451e88cab9b20cc94d9bf4a70
|
ff89ab582608c442d223966c077ee47f7a99f3b1
|
/2222.py
|
84133ae9ca260bcbf1d8e419efc2a15db622c4f4
|
[] |
no_license
|
JunPengRen/lll
|
48ac5d9cf7ad8a46a8f4fdb00cd4dde29c70b53a
|
11d76301f1f32a7d05dc4dee2a5f5a2df2886532
|
refs/heads/master
| 2020-03-18T13:38:09.456445
| 2018-05-25T03:45:02
| 2018-05-25T03:45:02
| 134,797,002
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 12
|
py
|
number = 122
|
[
"15110624119@163.com"
] |
15110624119@163.com
|
096a2503c110b091215e0f93241cf12953d85604
|
47832a9ad8f617002edcb585d116f66903a60306
|
/extraction/clean.py
|
8a7fd04bfca95c2af500b26fb4be4f0116fb9c02
|
[] |
no_license
|
Sharad24/multilingual_kws
|
66435298839c5fb19d091208b219881a0d0944d0
|
22b5ddb4bcbad294100637a426a6fcf0465f1140
|
refs/heads/main
| 2023-07-22T13:12:38.055350
| 2021-08-25T19:30:46
| 2021-08-25T19:30:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,676
|
py
|
#%%
from cvutils import Alphabet
from cvutils import Validator
from pathlib import Path
from tqdm import tqdm
import os, multiprocessing, shutil
data = Path('/mnt/disks/std3/compressed/generated/common_voice/frequent_words/')
# langs = os.listdir(data)
langs = ['lt']
# %%
map, nolang = {}, set()
# langs = ['en']
#%%
# for l in tqdm(langs):
# map[l] = {}
# try:
# v = Validator(l)
# except:
# nolang.add(l)
# continue
# words = os.listdir(data / l / 'clips')
# for w in words:
# tw = correct_spelling(w)
# map[l][w] = tw
# out = v.validate(w)
# if out is not None:
# map[l][w] = out
# else:
# print(w)
# map[l][w] = w
# def correct_spelling(word, alphabet):
# return [w for w in word if w in alphabet else '']
#%%
def correct_spelling(w, alphabet):
nw = ''
for i in w:
if i in alphabet:
nw += i
else:
continue
if nw == '':
return ''
if nw[0] == "'":
nw = nw[1:]
if nw[-1] == "'":
nw = nw[:-1]
return nw
# langs = os.listdir(data)
map = {}
novalids, noalphas = [], []
for l in tqdm(langs):
valid, alpha = False, False
map[l] = {}
try:
v = Validator(l)
valid = True
except:
novalids.append(l)
valid = False
try:
alphabet = set(Alphabet(l).get_alphabet())
alpha = True
except:
noalphas.append(l)
alpha = False
words = os.listdir(data / l / 'clips')
if not alpha:
map[l] = {k:k for k in words}
continue
for w in tqdm(words):
nw = w
if valid:
nw_validated = v.validate(nw)
if nw_validated is not None:
nw = nw_validated
nw = correct_spelling(nw, alphabet)
map[l][w] = nw
cleaned = map
import pickle
with open('cleaned_LT.p','wb') as file:
pickle.dump(cleaned, file)
|
[
"sharadchitlangia24sc@gmail.com"
] |
sharadchitlangia24sc@gmail.com
|
82b102860dad12c81b3575f99ab5d3102e7229e3
|
927d23e5fbcbd7001b1007990b9a28014bfb8219
|
/mnist_classification.py
|
373bf1d62d3f945e2554161b608f5dc3b439098b
|
[] |
no_license
|
minar09/tensorflow-practices
|
5822cf784063223bc0a5a62570fa0a5548cf1ef0
|
7982860ce2ec6df0c57a5389711464cbddad89fe
|
refs/heads/master
| 2020-03-28T21:09:32.658650
| 2018-10-08T15:25:08
| 2018-10-08T15:25:08
| 149,133,679
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,725
|
py
|
#### MNIST classification ###
# Hide the warning messages about CPU/GPU
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
# Import libraries
import tensorflow as tf
import time
import numpy as np
old_v = tf.logging.get_verbosity()
tf.logging.set_verbosity(tf.logging.ERROR)
# Download/Read MNIST
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
# Hide the warning messages about deprecations of MNIST data read
tf.logging.set_verbosity(old_v)
# Initialize parameters
t1 = time.time()
num_steps = 5000
batch_size = 128
display_step = 500
n_hidden_1 = 256
n_hidden_2 = 256
n_hidden_3 = 256
num_input = 784
num_classes = 10
# Define placeholder
x = tf.placeholder("float", [None, num_input])
y = tf.placeholder("float", [None, num_classes])
# Define Weight and Bias for linear regression
weights = {
'h1' : tf.Variable(tf.random_normal([num_input, n_hidden_1])),
'h2' : tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])),
'h3' : tf.Variable(tf.random_normal([n_hidden_2, n_hidden_3])),
'out' : tf.Variable(tf.random_normal([n_hidden_1, num_classes]))
}
biases = {
'b1' : tf.Variable(tf.random_normal([n_hidden_1])),
'b2' : tf.Variable(tf.random_normal([n_hidden_2])),
'b3' : tf.Variable(tf.random_normal([n_hidden_3])),
'out' : tf.Variable(tf.random_normal([num_classes]))
}
# Initialize the model
def mlp(x):
l1 = tf.nn.relu(tf.add(tf.matmul(x, weights['h1']), biases['b1']))
l2 = tf.nn.relu(tf.add(tf.matmul(l1, weights['h2']), biases['b2']))
l3 = tf.nn.relu(tf.add(tf.matmul(l2, weights['h3']), biases['b3']))
lout = tf.add(tf.matmul(l3, weights['out']), biases['out'])
return lout
# Define hypothesis, cost and optimization functions
logits = mlp(x)
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits, labels=y))
optimizer = tf.train.AdamOptimizer(learning_rate=0.01).minimize(cost)
prediction = tf.nn.softmax(logits)
correct_prediction = tf.equal(tf.argmax(prediction, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# Launch graph/Initialize session
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for step in range(1, num_steps+1):
batch_train_images, batch_train_labels = mnist.train.next_batch(batch_size)
sess.run(optimizer, feed_dict={x: batch_train_images, y: batch_train_labels})
if step % display_step == 0 or step == 1:
print("Step " + str(step) + " out of " + str(num_steps))
print("Optimization finished!")
t2 = time.time()
print("Testing accuracy: ", sess.run(accuracy, feed_dict={x: mnist.test.images, y: mnist.test.labels})*100, "%")
print("Learning time: " + str(t2-t1) + " seconds")
|
[
"minar09.bd@gmail.com"
] |
minar09.bd@gmail.com
|
5ea8085f35c9778a5a1d4aae6dc84dacc2eb3e30
|
62e58c051128baef9452e7e0eb0b5a83367add26
|
/edifact/D08A/MOVINSD08AUN.py
|
546f7086b78b82989b1b35deabc5ccb25f908114
|
[] |
no_license
|
dougvanhorn/bots-grammars
|
2eb6c0a6b5231c14a6faf194b932aa614809076c
|
09db18d9d9bd9d92cefbf00f1c0de1c590fe3d0d
|
refs/heads/master
| 2021-05-16T12:55:58.022904
| 2019-05-17T15:22:23
| 2019-05-17T15:22:23
| 105,274,633
| 0
| 0
| null | 2017-09-29T13:21:21
| 2017-09-29T13:21:21
| null |
UTF-8
|
Python
| false
| false
| 1,740
|
py
|
#Generated by bots open source edi translator from UN-docs.
from bots.botsconfig import *
from edifact import syntax
from recordsD08AUN import recorddefs
structure = [
{ID: 'UNH', MIN: 1, MAX: 1, LEVEL: [
{ID: 'BGM', MIN: 1, MAX: 1},
{ID: 'DTM', MIN: 1, MAX: 1},
{ID: 'RFF', MIN: 0, MAX: 9, LEVEL: [
{ID: 'DTM', MIN: 0, MAX: 9},
]},
{ID: 'NAD', MIN: 0, MAX: 9, LEVEL: [
{ID: 'CTA', MIN: 0, MAX: 9, LEVEL: [
{ID: 'COM', MIN: 0, MAX: 9},
]},
]},
{ID: 'TDT', MIN: 1, MAX: 3, LEVEL: [
{ID: 'LOC', MIN: 1, MAX: 1},
{ID: 'DTM', MIN: 1, MAX: 99},
{ID: 'RFF', MIN: 0, MAX: 1},
{ID: 'FTX', MIN: 0, MAX: 1},
]},
{ID: 'HAN', MIN: 0, MAX: 99999, LEVEL: [
{ID: 'LOC', MIN: 0, MAX: 99999, LEVEL: [
{ID: 'RFF', MIN: 1, MAX: 99},
{ID: 'FTX', MIN: 0, MAX: 9},
{ID: 'MEA', MIN: 0, MAX: 9},
{ID: 'DIM', MIN: 0, MAX: 9},
{ID: 'LOC', MIN: 0, MAX: 9},
{ID: 'NAD', MIN: 1, MAX: 99},
{ID: 'TMP', MIN: 0, MAX: 1, LEVEL: [
{ID: 'RNG', MIN: 0, MAX: 1},
]},
{ID: 'EQD', MIN: 0, MAX: 99, LEVEL: [
{ID: 'EQN', MIN: 0, MAX: 1},
]},
{ID: 'EQA', MIN: 0, MAX: 99, LEVEL: [
{ID: 'EQN', MIN: 0, MAX: 1},
]},
{ID: 'GID', MIN: 0, MAX: 9999, LEVEL: [
{ID: 'GDS', MIN: 0, MAX: 1},
]},
{ID: 'RFF', MIN: 0, MAX: 999, LEVEL: [
{ID: 'DGS', MIN: 1, MAX: 99, LEVEL: [
{ID: 'FTX', MIN: 0, MAX: 1},
]},
]},
]},
]},
{ID: 'UNT', MIN: 1, MAX: 1},
]},
]
|
[
"jason.capriotti@gmail.com"
] |
jason.capriotti@gmail.com
|
1dbae09a882980f310395543b799670a7ed4e9c9
|
8217d63b4f8875598cc8e01c9cdf4c92e35e4e62
|
/tools/pypmmn/pypmmn/pypmmn.py
|
4d38dc5e8ba1cdc93e9037397c8102e80acbeff2
|
[] |
no_license
|
bubbafix/munin-contrib
|
d5bdfa156cbebbec73a3851349859bf7caa137eb
|
b9ec8fbb040808bf4930bea6b065ce5564fbd77d
|
refs/heads/master
| 2021-01-16T19:58:11.309579
| 2013-05-22T09:02:23
| 2013-05-22T09:02:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 12,683
|
py
|
#!/usr/bin/python
"""
A very simple munin-node written in pure python (no external libraries
required)
"""
from datetime import datetime
from logging.handlers import RotatingFileHandler
from optparse import OptionParser
from os import listdir, access, X_OK, getpid
from os.path import join, isdir, abspath, dirname, exists
from subprocess import Popen, PIPE
from time import sleep
import logging
import socket
import sys
LOG = logging.getLogger(__name__)
LOG_FORMAT = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
SESSION_TIMEOUT = 10 # Amount of seconds until an unused session is closed
from daemon import createDaemon
__version__ = '1.0b1'
class CmdHandler(object):
"""
This handler defines the protocol between munin and this munin node.
Each method starting with ``do_`` responds to the corresponding munin
command.
"""
def __init__(self, get_fun, put_fun, options):
"""
Constructor
:param get_fun: The function used to receive a message from munin
:param put_fun: The function used to send a message back to munin
:param options: The command-line options object
"""
self.get_fun = get_fun
self.put_fun = put_fun
self.options = options
def do_version(self, arg):
"""
Prints the version of this instance.
"""
LOG.debug('Command "version" executed with args: %r' % arg)
self.put_fun('# munin node at %s\n' % (
self.options.host,
))
def do_nodes(self, arg):
"""
Prints this hostname
"""
LOG.debug('Command "nodes" executed with args: %r' % arg)
self.put_fun('%s\n' % self.options.host)
self.put_fun('.\n')
def do_quit(self, arg):
"""
Stops this process
"""
LOG.debug('Command "quit" executed with args: %r' % arg)
sys.exit(0)
def do_list(self, arg):
"""
Print a list of plugins
"""
LOG.debug('Command "list" executed with args: %r' % arg)
try:
LOG.debug('Listing files inside %s' % self.options.plugin_dir)
for filename in listdir(self.options.plugin_dir):
if not access(join(self.options.plugin_dir, filename), X_OK):
LOG.warning('Non-executable plugin %s found!' % filename)
continue
LOG.debug('Found plugin: %s' % filename)
self.put_fun("%s " % filename)
except OSError, exc:
self.put_fun("# ERROR: %s" % exc)
self.put_fun("\n")
def _caf(self, plugin, cmd):
"""
handler for ``config``, ``alert`` and ``fetch``
Calls the plugin with ``cmd`` as only argument.
:param plugin: The plugin name
:param cmd: The command which is to passed to the plugin
"""
plugin_filename = join(self.options.plugin_dir, plugin)
# Sanity checks
if isdir(plugin_filename) or not access(plugin_filename, X_OK):
msg = "# Unknown plugin [%s] for %s" % (plugin, cmd)
LOG.warning(msg)
self.put_fun(msg)
return
# for 'fetch' we don't need to pass a command to the plugin
if cmd == 'fetch':
plugin_arg = ''
else:
plugin_arg = cmd
try:
cmd = [plugin_filename, plugin_arg]
LOG.debug('Executing %r' % cmd)
output = Popen(cmd, stdout=PIPE).communicate()[0]
except OSError, exc:
LOG.exception()
self.put_fun("# ERROR: %s\n" % exc)
return
self.put_fun(output)
self.put_fun('.\n')
def do_alert(self, arg):
"""
Handle command "alert"
"""
LOG.debug('Command "alert" executed with args: %r' % arg)
self._caf(arg, 'alert')
def do_fetch(self, arg):
"""
Handles command "fetch"
"""
LOG.debug('Command "fetch" executed with args: %r' % arg)
self._caf(arg, 'fetch')
def do_config(self, arg):
"""
Handles command "config"
"""
LOG.debug('Command "config" executed with args: %r' % arg)
self._caf(arg, 'config')
def do_cap(self, arg):
"""
Handles command "cap"
"""
LOG.debug('Command "cap" executed with args: %r' % arg)
self.put_fun("cap ")
if self.options.spoolfetch_dir:
self.put_fun("spool")
else:
LOG.debug('No spoolfetch_dir specified. Result spooling disabled')
self.put_fun("\n")
def do_spoolfetch(self, arg):
"""
Handles command "spoolfetch"
"""
LOG.debug('Command "spellfetch" executed with args: %r' % arg)
output = Popen(['%s/spoolfetch_%s' % (self.options.spoolfetch_dir,
self.options.host),
arg]).communicate()[0]
self.put_fun(output)
self.put_fun('.\n')
# aliases
do_exit = do_quit
def handle_input(self, line):
"""
Handles one input line and sends any result back using ``put_fun``
"""
line = line.strip()
line = line.split(' ')
cmd = line[0]
if len(line) == 1:
arg = ''
elif len(line) == 2:
arg = line[1]
else:
self.put_fun('# Invalid input: %s\n' % line)
return
if not cmd:
return
func = getattr(self, 'do_%s' % cmd, None)
if not func:
# Give the client a list of supported commands.
commands = [_[3:] for _ in dir(self) if _.startswith('do_')]
self.put_fun("# Unknown command. Supported commands: %s\n" % (
commands))
return
func(arg)
def is_timed_out(self):
return (datetime.now() - self._last_command).seconds > SESSION_TIMEOUT
def reset_time(self):
self._last_command = datetime.now()
def usage(option, opt, value, parser):
"""
Prints the command usage and exits
"""
parser.print_help()
sys.exit(0)
def get_options():
"""
Parses command-line arguments.
"""
parser = OptionParser(add_help_option=False)
parser.add_option('-p', '--port', dest='port',
default=None,
help='TCP Port to listen on. (If not specified, use stdin/stdout)')
parser.add_option('-d', '--plugin-dir', dest='plugin_dir',
default='plugins',
help=('The directory containing the munin-plugins.'
' Default: <current working dir>/plugins'))
parser.add_option('-h', '--host', dest='host',
help=('The hostname which will be reported in the plugins.'
' Default: %s' % socket.gethostname()),
default=socket.gethostname())
parser.add_option('-n', '--no-daemon', dest='no_daemon',
default=False,
action='store_true',
help='Run in foreground. Do not daemonize. '
'Will also enable debug logging to stdout.')
parser.add_option('-l', '--log-dir', dest='log_dir',
default=None,
help='The log folder. Default: disabled')
parser.add_option('-s', '--spoolfech-dir', dest='spoolfetch_dir',
default=None,
help='The spoolfetch folder. Default: disabled')
parser.add_option('--help', action='callback', callback=usage,
help='Shows this help')
options, args = parser.parse_args()
# ensure we are using absolute paths (for daemonizing)
if options.log_dir:
options.log_dir = abspath(options.log_dir)
if options.spoolfetch_dir:
options.spoolfetch_dir = abspath(options.spoolfetch_dir)
if options.plugin_dir:
options.plugin_dir = abspath(options.plugin_dir)
return (options, args)
def process_stdin(options):
"""
Process commands by reading from stdin
"""
rfhandler = RotatingFileHandler(
join(abspath(dirname(__file__)), 'log', 'pypmmn.log'),
maxBytes=100 * 1024,
backupCount=5
)
rfhandler.setFormatter(logging.Formatter(LOG_FORMAT))
logging.getLogger().addHandler(rfhandler)
handler = CmdHandler(sys.stdin.read, sys.stdout.write, options)
handler.do_version(None)
LOG.info('STDIN handler opened')
while True:
data = sys.stdin.readline().strip()
if not data:
return
handler.handle_input(data)
def process_socket(options):
"""
Process socket connections.
.. note::
This is not a multithreaded process. So only one connection can be
handled at any given time. But given the nature of munin, this is Good
Enough.
"""
retcode = 0
if options.no_daemon:
# set up on-screen-logging
console_handler = logging.StreamHandler(sys.stdout)
console_handler.setFormatter(logging.Formatter(LOG_FORMAT))
logging.getLogger().addHandler(console_handler)
else:
# fork fork
retcode = createDaemon()
# set up a rotating file log
rfhandler = RotatingFileHandler(
join(options.log_dir, 'daemon.log'),
maxBytes=100 * 1024,
backupCount=5
)
rfhandler.setFormatter(logging.Formatter(LOG_FORMAT))
logging.getLogger().addHandler(rfhandler)
# write down some house-keeping information
LOG.info('New process PID: %d' % getpid())
pidfile = open(join(options.log_dir, 'pypmmn.pid'), 'w')
pidfile.write(str(getpid()))
pidfile.close()
LOG.info('PID file created in %s' % join(options.log_dir,
'pypmmn.pid'))
LOG.info('Socket handler started.')
host = '' # listens on all addresses TODO: make this configurable
port = int(options.port)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind((host, port))
s.listen(1)
LOG.info('Listening on host %r, port %r' % (host, port))
conn, addr = s.accept()
handler = CmdHandler(conn.recv, conn.send, options)
handler.do_version(None)
handler.reset_time()
LOG.info("Accepting incoming connection from %s" % (addr, ))
while True:
data = conn.recv(1024)
if not data.strip():
sleep(1)
if handler.is_timed_out():
LOG.info('Session timeout.')
conn.shutdown(socket.SHUT_RDWR)
conn.close()
LOG.info('Listening on host %r, port %r' % (host, port))
conn, addr = s.accept()
handler.reset_time()
handler.get_fun = conn.recv
handler.put_fun = conn.send
handler.do_version(None)
LOG.info("Accepting incoming connection from %s" % (addr, ))
try:
data = conn.recv(1024)
except socket.error, exc:
LOG.warning("Socket error. Reinitialising.: %s" % exc)
conn, addr = s.accept()
handler.reset_time()
handler.get_fun = conn.recv
handler.put_fun = conn.send
handler.do_version(None)
LOG.info("Accepting incoming connection from %s" % (addr, ))
if data.strip() == 'quit':
LOG.info('Client requested session end. Closing connection.')
conn.shutdown(socket.SHUT_RDWR)
conn.close()
LOG.info('Listening on host %r, port %r' % (host, port))
conn, addr = s.accept()
handler.reset_time()
handler.get_fun = conn.recv
handler.put_fun = conn.send
handler.do_version(None)
LOG.info("Accepting incoming connection from %s" % (addr, ))
continue
handler.handle_input(data)
sys.exit(retcode)
def main():
"""
The main entry point of the application
"""
options, args = get_options()
# Handle logging as early as possible.
if options.log_dir:
if not exists(options.log_dir):
raise IOError('[Errno 2] No such file or directory: %r' % (
options.log_dir))
# set up logging if requested
root_logger = logging.getLogger()
root_logger.setLevel(logging.NOTSET) # TODO: Make configurable
# Start either the "stdin" interface, or the socked daemon. Depending on
# whether a port was given on startup or not.
if not options.port:
process_stdin(options)
else:
process_socket(options)
if __name__ == '__main__':
main()
|
[
"michel@albert.lu"
] |
michel@albert.lu
|
088dc88aa4aeb64878d97237118802a64edf1d5f
|
48db7bebad4309a7bca8b7dec2cc9193551f46a3
|
/returns/_generated/pointfree/bind_io.pyi
|
192094dd7cdfecddb6d6bb7c83451e2b4d7e27ae
|
[
"BSD-2-Clause"
] |
permissive
|
kenjihiraoka/returns
|
bff6196a059d411b6c36f4a2e284e4439d24fd73
|
4589973520d7226b18acd7295d1a9a10ff032759
|
refs/heads/master
| 2022-11-20T13:20:41.094871
| 2020-07-07T08:23:05
| 2020-07-07T08:23:05
| 277,863,697
| 0
| 0
|
BSD-2-Clause
| 2020-07-07T16:09:25
| 2020-07-07T16:09:25
| null |
UTF-8
|
Python
| false
| false
| 1,779
|
pyi
|
from typing import Callable, TypeVar, overload
from typing_extensions import Protocol
from returns.context import RequiresContextFutureResult, RequiresContextIOResult
from returns.future import Future, FutureResult
from returns.io import IO, IOResult
_ValueType = TypeVar('_ValueType', contravariant=True)
_ErrorType = TypeVar('_ErrorType')
_NewValueType = TypeVar('_NewValueType', covariant=True)
_EnvType = TypeVar('_EnvType', contravariant=True)
class _BindIO(Protocol[_ValueType, _NewValueType]):
"""
Helper class to represent type overloads for ret_type based on a value type.
Contains all containers we have.
It does not exist in runtime.
It is also completely removed from typing with the help of the mypy plugin.
"""
@overload
def __call__(
self,
container: RequiresContextIOResult[_EnvType, _ValueType, _ErrorType],
) -> RequiresContextIOResult[_EnvType, _NewValueType, _ErrorType]:
...
@overload
def __call__(
self,
container: RequiresContextFutureResult[
_EnvType, _ValueType, _ErrorType,
],
) -> RequiresContextFutureResult[_EnvType, _NewValueType, _ErrorType]:
...
@overload
def __call__(
self,
container: IOResult[_ValueType, _ErrorType],
) -> IOResult[_NewValueType, _ErrorType]:
...
@overload
def __call__(
self,
container: Future[_ValueType],
) -> Future[_NewValueType]:
...
@overload
def __call__(
self,
container: FutureResult[_ValueType, _ErrorType],
) -> FutureResult[_NewValueType, _ErrorType]:
...
def _bind_io(
function: Callable[[_ValueType], IO[_NewValueType]],
) -> _BindIO[_ValueType, _NewValueType]:
...
|
[
"mail@sobolevn.me"
] |
mail@sobolevn.me
|
836b76b01fc12ad24e8d5601219287b0420d878d
|
200b0fbe48d87857d12bd737456c0a68432d9fe9
|
/5. mouse_events/more_mouse_events_2.py
|
bea8efeda088eb6659f39993df7e61eeffaf63b5
|
[] |
no_license
|
mclods/gods-eye
|
d5640a2dbe7296040ec08f7d7997c3e2e1155a62
|
70449b82f8c1e3abfbdc8f197abf5e3936584e78
|
refs/heads/master
| 2022-12-13T07:16:12.777312
| 2020-09-04T22:24:39
| 2020-09-04T22:24:39
| 291,174,699
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 617
|
py
|
# Importing Libraries
import numpy as np
import cv2
def click_event(event, x, y, flags, param):
if event == cv2.EVENT_LBUTTONDOWN:
blue = img[y, x, 0]
green = img[y, x, 1]
red = img[y, x, 2]
cv2.circle(img, (x, y), 3, (0, 0, 255), -1)
myColorImage = np.zeros((512, 512, 3), np.uint8)
myColorImage[:] = [blue, green, red]
cv2.imshow('color', myColorImage)
# img = np.zeros((512, 512, 3), np.uint8)
img = cv2.imread('lena.jpg', 1)
cv2.imshow('image', img)
points = []
cv2.setMouseCallback('image', click_event)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
[
"47282664+mclods@users.noreply.github.com"
] |
47282664+mclods@users.noreply.github.com
|
06186cc49912bee6062a948f30bf667d3200eda5
|
63b0fb34672015ad6ca7be6a379340c19a20deba
|
/Top teen Programmer/Week2/dprime.py
|
bc97930571f00377f389e64cf24ad422f1ccdc15
|
[] |
no_license
|
Roberto09/Competitive-Programming
|
dbc291bb33f3e61a48bd62407a392ac0e3a6cfd2
|
29fb64c6931e5a27871f75dd7394735e454d5309
|
refs/heads/master
| 2021-05-15T12:45:36.314768
| 2018-05-29T04:39:34
| 2018-05-29T04:39:34
| 108,482,061
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 453
|
py
|
import sys
def alg(p):
z=True
for n in range(2,(p/2)+1):
y=float(p)%float(n)
if y != 0:
continue
else:
z=False
break
return z
"""for x in range(ninicial,nfinal):
if alg(x):
print(x)"""
casos=int(sys.stdin.readline())
for caso in range(casos):
respuesta=0
numero=int(sys.stdin.readline())
for x in range(2,numero+1):
if alg(x):
#print(x)
if numero%x==0:
respuesta=respuesta+1
sys.stdout.write(str(respuesta)+'\n')
|
[
"roberto_garcia_torres@outlook.com"
] |
roberto_garcia_torres@outlook.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.