blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7c94c4246337922503c3c04bb85a88472f19f303
|
7caa2803db67f5c609865fe0f5c4d24bbbdb4afe
|
/leetcode/621-task-scheduler/main.py
|
ce57a5a949b68ada661a286438c646a3b5b02b77
|
[] |
no_license
|
ataul443/AlgoDaily
|
106fd9e496ede30bfdf223ce54dcac2b14852815
|
b8ae4f80cf162681aaff1ff8ed6e1e4d05f2010d
|
refs/heads/master
| 2022-11-20T10:47:52.683016
| 2020-07-16T13:27:55
| 2020-07-16T13:27:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,729
|
py
|
import heapq
from collections import *
"""
1st approach: maxheap
- similar to lc358
1. count occurence for each task
2. in each iteration
- pop the tasks from maxheap n+1 times
- put the tasks back to the queue with decremented count
3. remove trailing '-'(idle)
Time O(nlog26) -> O(n)
Space O(n)
744 ms, faster than 26.51%
"""
class Solution(object):
def leastInterval(self, tasks, n):
"""
:type tasks: List[str]
:type n: int
:rtype: int
"""
# count occurence for each task
counter = Counter(tasks)
pq = []
for key in counter:
heapq.heappush(pq, (-counter[key], key))
res = []
while len(pq) > 0:
arr = []
# pop the tasks from maxheap
for i in range(n+1):
if len(pq) > 0:
pop = heapq.heappop(pq)
res.append(pop[1])
arr.append(pop)
else:
res.append("-")
# put the tasks back to the queue with decremented count
for count, key in arr:
if abs(count) > 1:
heapq.heappush(pq, (count+1, key))
# remove trailing '-'(idle)
while len(res) > 0:
if res[-1] == '-':
res.pop()
else:
break
# res is the list of tasks
return len(res)
print(Solution().leastInterval(["A", "A", "A", "B", "B", "B"], 2))
print(Solution().leastInterval(["A", "A", "A", "B", "B", "B", "C", "C"], 2))
print(Solution().leastInterval(
["A", "A", "A", "A", "A", "B", "B", "B", "C", "C", "C", "D", "D"], 2))
|
[
"chan9118kin@gmail.com"
] |
chan9118kin@gmail.com
|
06c0697c042852553ea8f2603afaca223ce2c5c1
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_81/311.py
|
d316cf2c5a48f65d437f14d31ee4f5b5c78d65a5
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,068
|
py
|
def crash():
assert 1==2
def percentage(arrayWins):
wins = arrayWins.count('1')
losses = arrayWins.count('0')
return float(wins)/(wins+losses)
def average(arrayWins):
return float(sum(arrayWins))/len(arrayWins)
fileLoc = '/Users/alarobric/Downloads/'
#fileLoc += 'A-small-attempt0'
#fileLoc += 'A-test'
fileLoc += 'A-large'
f = open(fileLoc+'.in', 'r')
g = open(fileLoc+'.out', 'w')
cases = int(f.readline())
for i in range (1, cases + 1):
N = int(f.readline())
#N teams
winPerc = []
winPercAgainst = [[] for j in range(N)]
owp = []
oowp = []
rpi = []
wins = []
for j in range(N):
line = [c for c in f.readline().strip()]
wins.append(line)
#print wins
for k, teamWins in enumerate(wins):
#print teamWins
winPerc.append(percentage(teamWins))
for j in range(N):
if teamWins[j] == '.':
winPercAgainst[j].append('.')
else:
tempTeamWins = teamWins[:]
tempTeamWins.pop(j)
#print "k", k, j, tempTeamWins, percentage(tempTeamWins)
winPercAgainst[j].append(percentage(tempTeamWins))
for winPercAgainstSub in winPercAgainst:
#print "a", winPercAgainstSub
for bob in range(winPercAgainstSub.count('.')):
winPercAgainstSub.remove('.')
owp.append(average(winPercAgainstSub))
for k, teamWins in enumerate(wins):
oowpTmp = []
for j in range(N):
if teamWins[j] == '1' or teamWins[j] == '0':
oowpTmp.append(owp[j])
oowp.append(average(oowpTmp))
rpi.append(0.25 * winPerc[k] + 0.50 * owp[k] + 0.25 * oowp[k])
#print "end"
#print winPerc
#print owp
#print oowp
#print rpi
output = "Case #" + str(i) + ": " + "\n"
for k in range(N):
output += str(rpi[k]) + "\n"
print output
g.write(output)
#2
#3
#.10
#0.1
#10.
#4
#.11.
#0.00
#01.1
#.10.
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
b02b6f4a8dad54e71517c5a53c23626326299904
|
1a36f8f77ca3d3093b51932f8df15b3c2fad3ae4
|
/mlx/filesystem/local_filesystem.py
|
599111e0fe82594c1cd126f3a05558c81109f986
|
[
"Apache-2.0"
] |
permissive
|
lewfish/mlx
|
3c2477aa2fc53f5522e4e70975f0e430090c3024
|
027decf72bf9d96de3b4de13dcac7b352b07fd63
|
refs/heads/master
| 2020-04-20T13:57:36.831202
| 2019-10-08T00:37:55
| 2019-10-08T00:37:55
| 168,884,084
| 0
| 0
|
Apache-2.0
| 2019-10-08T00:37:56
| 2019-02-02T22:13:28
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 4,068
|
py
|
import os
import shutil
from datetime import datetime, timezone
import glob
from mlx.filesystem import (FileSystem, NotReadableError)
def make_dir(path, check_empty=False, force_empty=False, use_dirname=False):
"""Make a local directory.
Args:
path: path to directory
check_empty: if True, check that directory is empty
force_empty: if True, delete files if necessary to make directory
empty
use_dirname: if path is a file, use the the parent directory as path
Raises:
ValueError if check_empty is True and directory is not empty
"""
directory = path
if use_dirname:
directory = os.path.abspath(os.path.dirname(path))
if force_empty and os.path.isdir(directory):
shutil.rmtree(directory)
os.makedirs(directory, exist_ok=True)
if check_empty and any(os.scandir(directory)):
raise ValueError(
'{} needs to be an empty directory!'.format(directory))
class LocalFileSystem(FileSystem):
@staticmethod
def matches_uri(uri: str, mode: str) -> bool:
return True
@staticmethod
def file_exists(uri: str, include_dir: bool = True) -> bool:
return (os.path.isfile(uri) or (include_dir and os.path.isdir(uri)))
@staticmethod
def read_str(file_uri: str) -> str:
if not os.path.isfile(file_uri):
raise NotReadableError('Could not read {}'.format(file_uri))
with open(file_uri, 'r') as file_buffer:
return file_buffer.read()
@staticmethod
def read_bytes(file_uri: str) -> bytes:
if not os.path.isfile(file_uri):
raise NotReadableError('Could not read {}'.format(file_uri))
with open(file_uri, 'rb') as file_buffer:
return file_buffer.read()
@staticmethod
def write_str(file_uri: str, data: str) -> None:
make_dir(file_uri, use_dirname=True)
with open(file_uri, 'w') as content_file:
content_file.write(data)
@staticmethod
def write_bytes(file_uri: str, data: bytes) -> None:
make_dir(file_uri, use_dirname=True)
with open(file_uri, 'wb') as content_file:
content_file.write(data)
@staticmethod
def sync_from_dir(src_dir_uri: str,
dest_dir_uri: str,
delete: bool = False) -> None:
if src_dir_uri == dest_dir_uri:
return
if delete:
shutil.rmtree(dest_dir_uri)
# https://stackoverflow.com/a/15824216/841563
def recursive_overwrite(src, dest):
if os.path.isdir(src):
if not os.path.isdir(dest):
os.makedirs(dest)
for entry in os.scandir(src):
recursive_overwrite(entry.path,
os.path.join(dest, entry.name))
else:
shutil.copyfile(src, dest)
recursive_overwrite(src_dir_uri, dest_dir_uri)
@staticmethod
def sync_to_dir(src_dir_uri: str, dest_dir_uri: str,
delete: bool = False) -> None:
LocalFileSystem.sync_from_dir(src_dir_uri, dest_dir_uri, delete)
@staticmethod
def copy_to(src_path: str, dst_uri: str) -> None:
if src_path != dst_uri:
make_dir(dst_uri, use_dirname=True)
shutil.copyfile(src_path, dst_uri)
@staticmethod
def copy_from(uri: str, path: str) -> None:
not_found = not os.path.isfile(path)
if not_found:
raise NotReadableError('Could not read {}'.format(uri))
@staticmethod
def local_path(uri: str, download_dir: str) -> None:
path = uri
return path
@staticmethod
def last_modified(uri: str) -> datetime:
local_last_modified = datetime.utcfromtimestamp(os.path.getmtime(uri))
return local_last_modified.replace(tzinfo=timezone.utc)
@staticmethod
def list_paths(uri, ext=None):
if ext is None:
ext = ''
return glob.glob(os.path.join(uri, '*' + ext))
|
[
"lewfish@gmail.com"
] |
lewfish@gmail.com
|
84d16dc699f540476901eb0935eba31f39b44c87
|
cace862c1d95f6b85a9750a427063a8b0e5ed49c
|
/binaryapi/ws/chanels/statement.py
|
1feab178b3ce9eabd3e3223fa3549dc1c82fe4f4
|
[] |
no_license
|
HyeongD/binaryapi
|
65486532389210f1ca83f6f2098276ecf984702b
|
e8daa229c04de712242e8e9b79be3b774b409e35
|
refs/heads/master
| 2023-08-29T13:24:58.364810
| 2021-10-26T19:00:59
| 2021-10-26T19:00:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,393
|
py
|
"""Module for Binary statement websocket channel."""
from binaryapi.ws.chanels.base import Base
from typing import Any, Optional, Union
from decimal import Decimal
# https://developers.binary.com/api/#statement
class Statement(Base):
"""Class for Binary statement websocket channel."""
name = "statement"
def __call__(self, action_type: Optional[str] = None, date_from: Optional[int] = None, date_to: Optional[int] = None, description: Optional[int] = None, limit: Optional[Union[int, float, Decimal]] = None, offset: Optional[Union[int, float, Decimal]] = None, passthrough: Optional[Any] = None, req_id: Optional[int] = None):
"""Method to send message to statement websocket channel.
Statement (request)
Retrieve a summary of account transactions, according to given search criteria
:param action_type: [Optional] To filter the statement according to the type of transaction.
:type action_type: Optional[str]
:param date_from: [Optional] Start date (epoch)
:type date_from: Optional[int]
:param date_to: [Optional] End date (epoch)
:type date_to: Optional[int]
:param description: [Optional] If set to 1, will return full contracts description.
:type description: Optional[int]
:param limit: [Optional] Maximum number of transactions to receive.
:type limit: Optional[Union[int, float, Decimal]]
:param offset: [Optional] Number of transactions to skip.
:type offset: Optional[Union[int, float, Decimal]]
:param passthrough: [Optional] Used to pass data through the websocket, which may be retrieved via the `echo_req` output field.
:type passthrough: Optional[Any]
:param req_id: [Optional] Used to map request to response.
:type req_id: Optional[int]
"""
data = {
"statement": int(1)
}
if action_type:
data['action_type'] = str(action_type)
if date_from:
data['date_from'] = int(date_from)
if date_to:
data['date_to'] = int(date_to)
if description:
data['description'] = int(description)
if limit:
data['limit'] = limit
if offset:
data['offset'] = offset
return self.send_websocket_request(self.name, data, passthrough=passthrough, req_id=req_id)
|
[
"mdn522@gmail.com"
] |
mdn522@gmail.com
|
b95757190f75c244f98cde51fd55a1f6010f586e
|
f07a42f652f46106dee4749277d41c302e2b7406
|
/Data Set/bug-fixing-5/ecf3f471c55ae18f8a0bbf4c7170104a63eeceb8-<solve>-bug.py
|
e8fa2b2c9ec86842a0502cfe3dd6c00fb1c8305d
|
[] |
no_license
|
wsgan001/PyFPattern
|
e0fe06341cc5d51b3ad0fe29b84098d140ed54d1
|
cc347e32745f99c0cd95e79a18ddacc4574d7faa
|
refs/heads/main
| 2023-08-25T23:48:26.112133
| 2021-10-23T14:11:22
| 2021-10-23T14:11:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,350
|
py
|
def solve(self):
'\n Runs the DifferentialEvolutionSolver.\n\n Returns\n -------\n res : OptimizeResult\n The optimization result represented as a ``OptimizeResult`` object.\n Important attributes are: ``x`` the solution array, ``success`` a\n Boolean flag indicating if the optimizer exited successfully and\n ``message`` which describes the cause of the termination. See\n `OptimizeResult` for a description of other attributes. If `polish`\n was employed, and a lower minimum was obtained by the polishing,\n then OptimizeResult also contains the ``jac`` attribute.\n '
(nit, warning_flag) = (0, False)
status_message = _status_message['success']
if np.all(np.isinf(self.population_energies)):
self._calculate_population_energies()
for nit in range(1, (self.maxiter + 1)):
try:
next(self)
except StopIteration:
warning_flag = True
status_message = _status_message['maxfev']
break
if self.disp:
print(('differential_evolution step %d: f(x)= %g' % (nit, self.population_energies[0])))
convergence = self.convergence
if (self.callback and (self.callback(self._scale_parameters(self.population[0]), convergence=(self.tol / convergence)) is True)):
warning_flag = True
status_message = 'callback function requested stop early by returning True'
break
if ((convergence < self.tol) or warning_flag):
break
else:
status_message = _status_message['maxiter']
warning_flag = True
DE_result = OptimizeResult(x=self.x, fun=self.population_energies[0], nfev=self._nfev, nit=nit, message=status_message, success=(warning_flag is not True))
if self.polish:
result = minimize(self.func, np.copy(DE_result.x), method='L-BFGS-B', bounds=self.limits.T, args=self.args)
self._nfev += result.nfev
DE_result.nfev = self._nfev
if (result.fun < DE_result.fun):
DE_result.fun = result.fun
DE_result.x = result.x
DE_result.jac = result.jac
self.population_energies[0] = result.fun
self.population[0] = self._unscale_parameters(result.x)
return DE_result
|
[
"dg1732004@smail.nju.edu.cn"
] |
dg1732004@smail.nju.edu.cn
|
d080d3e8d1b6f4511ea71577ee373dcddf86faf3
|
559e6745868358da529c2916180edc90b97f852a
|
/tests/test_connection_serial.py
|
b455ad3694f789772efba6de4bee9a93bb5f2feb
|
[
"MIT"
] |
permissive
|
SwiftyMorgan/msl-equipment
|
712290579e49682337548f8c4294907e9b56d5a3
|
56bc467e97a2a0a60aa6f031dd30bf1d98ebda5c
|
refs/heads/master
| 2020-04-21T14:49:23.902185
| 2019-01-18T18:43:16
| 2019-01-18T18:43:16
| 169,647,690
| 0
| 0
|
MIT
| 2019-02-07T22:08:37
| 2019-02-07T21:46:56
|
Python
|
UTF-8
|
Python
| false
| false
| 1,958
|
py
|
import os
import time
import threading
try:
import pty
except ImportError:
pty = None
import pytest
from msl.equipment import EquipmentRecord, ConnectionRecord, Backend, MSLConnectionError
@pytest.mark.skipif(pty is None, reason='pty is not available')
def test_connection_serial_read():
term = b'\r\n'
def echo_server(port):
while True:
data = bytearray()
while not data.endswith(term):
data.extend(os.read(port, 1))
if data.startswith(b'SHUTDOWN'):
break
os.write(port, data)
# simulate a Serial port
master, slave = pty.openpty()
thread = threading.Thread(target=echo_server, args=(master,))
thread.start()
time.sleep(0.5) # allow some time for the echo server to start
record = EquipmentRecord(
connection=ConnectionRecord(
address='ASRL::' + os.ttyname(slave),
backend=Backend.MSL,
properties={
'read_termination': term,
'write_termination': term,
'timeout': 25,
},
)
)
dev = record.connect()
assert dev.read_termination == term
assert dev.write_termination == term
dev.write('hello')
assert dev.read() == 'hello'
n = dev.write('hello')
assert dev.read(n) == 'hello' + term.decode()
dev.write('x'*4096)
assert dev.read() == 'x'*4096
n = dev.write('123.456')
with pytest.raises(MSLConnectionError):
dev.read(n+1)
with pytest.raises(MSLConnectionError):
dev.read(dev.max_read_size+1) # requesting more bytes than are maximally allowed
msg = 'a' * (dev.max_read_size - len(term))
dev.write(msg)
assert dev.read() == msg
dev.write(b'021.3' + term + b',054.2')
assert dev.read() == '021.3' # read until first `term`
assert dev.read() == ',054.2' # read until second `term`
dev.write('SHUTDOWN')
|
[
"joe.borbely@gmail.com"
] |
joe.borbely@gmail.com
|
fd8ba7457853c480f4536bb86ddc6b051a090e0a
|
779291cb83ec3cab36d8bb66ed46b3afd4907f95
|
/notebook/2020-02-25_gene_dists.py
|
fb81557f52dcb9ae4b74edaec0aa9f2c2a93caf2
|
[] |
no_license
|
Shengqian95/ncbi_remap
|
ac3258411fda8e9317f3cdf951cc909cc0f1946e
|
3f2099058bce5d1670a672a69c13efd89d538cd1
|
refs/heads/master
| 2023-05-22T06:17:57.900135
| 2020-11-01T17:16:54
| 2020-11-01T17:16:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,105
|
py
|
# %%
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from ncbi_remap import plotting
# %%
plt.style.use(("sra_talk", "sra"))
plt.style.use("sra")
# %%
gene_metadata = (
pd.read_feather(
"../../larval_gonad/references/gene_annotation_dmel_r6-26.feather",
columns=["FBgn", "gene_symbol"],
)
.set_index("FBgn")
.squeeze()
)
symbol2fbgn = {v: k for k, v in gene_metadata.items()}
# %%
gene_expression = pd.read_csv("../output/agg-rnaseq-wf/tpm_gene_counts.tsv", sep="\t", index_col=0)
# %%
def zscore(x):
return (x - x.mean()) / x.std()
# %%
dsx = zscore(gene_expression[symbol2fbgn["dsx"]]).rename("dsx")
pd.cut(dsx, bins=4, labels=["low", "low-mid", "mid-high", "high"]).value_counts().map(lambda x: f"{x:,}").to_frame()
# %%
pd.cut(dsx, bins=4, labels=["low", "low-mid", "mid-high", "high"]).pipe(lambda x: x[x == "high"]).index.tolist()
# %%
ax = sns.kdeplot(dsx)
ax.legend_.remove()
ax.set(ylabel="Density", xlabel="Z-score (TPM)")
sns.despine(ax=ax, left=True, right=True)
ax.set_title("dsx", fontstyle="italic")
|
[
"justin.m.fear@gmail.com"
] |
justin.m.fear@gmail.com
|
f6090b4123803ea0f46d3530ec0f174c8b4fa349
|
c4e9b3e5686ed8c6e885aa9f6a72a571f4b33db6
|
/matplotlib_study/multiplot.py
|
1d61e196cd0bb3e007ee0e6d62a3ce621c044f01
|
[] |
no_license
|
WhiteCri/learn-advanced-python
|
c2a081db0f901bb76e470341497014b4384ba803
|
8111cb12e8b05a9168a0236e05f4a6a1cda255eb
|
refs/heads/master
| 2023-04-13T22:55:49.060522
| 2021-04-28T05:00:49
| 2021-04-28T05:00:49
| 345,628,312
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 279
|
py
|
import numpy as np
import matplotlib.pyplot as plt
x = np.linspace(-3, 3, 30)
y = x**2
plt.plot(x, y)
plt.show()
x = np.linspace(-3, 3, 30)
y = x ** 2
plt.plot(x, y, 'r.')
plt.show()
plt.plot(x, np.sin(x))
plt.plot(x, np.cos(x), 'r-')
plt.plot(x, -np.sin(x), 'g--')
plt.show()
|
[
"sjrnfu12@naver.com"
] |
sjrnfu12@naver.com
|
5e6416fa20b7c75266a35c0f033ba4e3ad7dab6e
|
20a0bd0a9675f52d4cbd100ee52f0f639fb552ef
|
/config/urls/admin.py
|
aef939fc4dc34e06fb39b702a80393a1c2a7734d
|
[] |
no_license
|
yx20och/bods
|
2f7d70057ee9f21565df106ef28dc2c4687dfdc9
|
4e147829500a85dd1822e94a375f24e304f67a98
|
refs/heads/main
| 2023-08-02T21:23:06.066134
| 2021-10-06T16:49:43
| 2021-10-06T16:49:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,837
|
py
|
from django.conf import settings
from django.contrib import admin
from django.urls import include, path
from django.views import defaults as default_views
from transit_odp.common.utils.custom_error_handlers import (
page_not_found,
permission_denied,
)
from transit_odp.common.views import ComingSoonView
from transit_odp.site_admin.urls import (
account_paths,
agent_paths,
consumer_paths,
metrics_paths,
organisation_paths,
)
from transit_odp.site_admin.views import AdminHomeView
from transit_odp.users.views.auth import InviteOnlySignupView
urlpatterns = [
path("", AdminHomeView.as_view(), name="home"),
path(
"metrics/",
include(metrics_paths),
),
path("coming_soon/", ComingSoonView.as_view(), name="placeholder"),
path(
"",
include(
(
[
path("consumers/", include(consumer_paths)),
path("organisations/", include(organisation_paths)),
path("agents/", include(agent_paths)),
# Put account routes here so they share the users namespace
path("account/", include(account_paths)),
],
"users",
),
# Note need to add users namespace to be compatible with other service
namespace="users",
),
),
path(
"account/",
include(
[
# override signup view with invited only signup page
path(
"signup/",
view=InviteOnlySignupView.as_view(),
name="account_signup",
),
path("", include("config.urls.allauth")),
]
),
),
path("invitations/", include("config.urls.invitations", namespace="invitations")),
# Django Admin, use {% url 'admin:index' %}
# TODO - host route on Admin service
path(settings.ADMIN_URL, admin.site.urls),
]
if settings.DEBUG:
# This allows the error pages to be debugged during development, just visit
# these url in browser to see how these error pages look like.
urlpatterns += [
path(
"400/",
default_views.bad_request,
kwargs={"exception": Exception("Bad Request!")},
),
path(
"403/",
permission_denied,
kwargs={"exception": Exception("Permission Denied")},
),
path(
"404/",
page_not_found,
kwargs={"exception": Exception("Page not Found")},
),
path("500/", default_views.server_error),
]
if "debug_toolbar" in settings.INSTALLED_APPS:
import debug_toolbar
urlpatterns = [path("__debug__/", include(debug_toolbar.urls))] + urlpatterns
|
[
"ciaran.mccormick@itoworld.com"
] |
ciaran.mccormick@itoworld.com
|
6765e6eccaa7845dd1dfbcce37ca3e18dfc8895e
|
8f0c5cb4938cffb0fb931c9bed6ce3e74d63e342
|
/trydjango/settings.py
|
932475114b7fe2005eb83556f701b6668ca2e373
|
[] |
no_license
|
Mehedi2885/trydjango2
|
acce9b3643769759c62fbc6475c6a3e68b162f1f
|
e1bfe3c14ef2df573d062b60a4a671d74103717b
|
refs/heads/master
| 2022-04-22T20:14:59.813475
| 2020-04-28T20:11:20
| 2020-04-28T20:11:20
| 257,658,950
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,356
|
py
|
"""
Django settings for trydjango project.
Generated by 'django-admin startproject' using Django 3.0.5.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'j7c_++!-d6=3lx(o^x5g0m(=nc*-ppx%&&l0r27fh#g99_br4_'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'products',
'pages',
'blog',
'courses',
'modelQuerySet',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'trydjango.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, "templates")],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'trydjango.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'django',
'USER': 'root',
'PASSWORD': 'Hassan2885',
'HOST': 'localhost',
'PORT': '3306',
'OPTIONS': {
'autocommit': True,
},
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
|
[
"mehedi_45seafarer@yahoo.com"
] |
mehedi_45seafarer@yahoo.com
|
f9fedff9e4d783a6823b9fb25ad4aa897ee8700d
|
556e88a954cf031460ea7fdf3791eb968ca4fbdd
|
/fluent_python/chapter_10/ch10_vector_v3.py
|
6491b78aa2e5ea74affc5864f33b6aa2955b4065
|
[] |
no_license
|
feng-hui/python_books_examples
|
c696243fcb8305be495f44d1a88a02e7f906b7bd
|
e38542db7be927cdaa5d85317a58a13b3a13ae25
|
refs/heads/master
| 2022-03-07T00:37:29.311687
| 2019-09-28T15:15:20
| 2019-09-28T15:15:20
| 122,941,867
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,568
|
py
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# @time : 2018-11-09 21:44
# @author : feng_hui
# @email : capricorn1203@126.com
import reprlib
from array import array
from math import sqrt
import numbers
class Vector(object):
type_code = 'd'
shortcut_names = 'xyzt'
def __init__(self, components):
self._components = array(self.type_code, components)
def __iter__(self):
return iter(self._components)
def __len__(self):
return len(self._components)
def __getitem__(self, item):
cls = type(self)
if isinstance(item, slice):
return cls(self._components[item])
elif isinstance(item, numbers.Integral):
return self._components[item]
else:
msg = '{cls.__name__} indices must be integers'
raise TypeError(msg.format(cls=cls))
def __getattr__(self, item):
cls = type(self)
if len(item) == 1:
pos = cls.shortcut_names.find(item)
if 0 <= pos < len(self._components):
return self._components[pos]
msg = '{.__name__!r} object has no attribute {!r}'
return AttributeError(msg.format(cls, item))
def __setattr__(self, key, value):
cls = type(self)
if len(key) == 1:
if key in cls.shortcut_names:
error = 'readonly attribute {attr_name!r}'
elif key.islower():
error = "can't set attribute 'a' to 'z' in {attr_name!r}"
else:
error = ''
if error:
msg = error.format(cls_name=cls.__name__, attr_name=key)
raise AttributeError(msg)
super().__setattr__(key, value)
def __repr__(self):
"""
string
:return: if len(string) > 30, return string[:13] + '...' + string[14:]
"""
components = reprlib.repr(self._components)
components = components[components.find('['):-1]
return 'Vector({})'.format(components)
def __str__(self):
return str(tuple(self))
def __bytes__(self):
return (
bytes([ord(self.type_code)]) + bytes(self._components)
)
def __eq__(self, other):
return tuple(self) == tuple(other)
def __abs__(self):
return sqrt(sum(x * x for x in self._components))
def __bool__(self):
return bool(abs(self))
@classmethod
def from_bytes(cls, octets):
type_code = chr(octets[0])
memv = memoryview(octets[1:]).cast(type_code)
return cls(memv)
|
[
"982698913@qq.com"
] |
982698913@qq.com
|
b0f80eb2b1f7e213f697b4799f1b0d39d340b773
|
eb9c3dac0dca0ecd184df14b1fda62e61cc8c7d7
|
/google/cloud/videointelligence/v1p3beta1/videointelligence-v1p3beta1-py/google/cloud/videointelligence_v1p3beta1/services/streaming_video_intelligence_service/transports/__init__.py
|
a2b915bf9cbce5897c92f4aaabe34406b5a83a45
|
[
"Apache-2.0"
] |
permissive
|
Tryweirder/googleapis-gen
|
2e5daf46574c3af3d448f1177eaebe809100c346
|
45d8e9377379f9d1d4e166e80415a8c1737f284d
|
refs/heads/master
| 2023-04-05T06:30:04.726589
| 2021-04-13T23:35:20
| 2021-04-13T23:35:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,367
|
py
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
from typing import Dict, Type
from .base import StreamingVideoIntelligenceServiceTransport
from .grpc import StreamingVideoIntelligenceServiceGrpcTransport
from .grpc_asyncio import StreamingVideoIntelligenceServiceGrpcAsyncIOTransport
# Compile a registry of transports.
_transport_registry = OrderedDict() # type: Dict[str, Type[StreamingVideoIntelligenceServiceTransport]]
_transport_registry['grpc'] = StreamingVideoIntelligenceServiceGrpcTransport
_transport_registry['grpc_asyncio'] = StreamingVideoIntelligenceServiceGrpcAsyncIOTransport
__all__ = (
'StreamingVideoIntelligenceServiceTransport',
'StreamingVideoIntelligenceServiceGrpcTransport',
'StreamingVideoIntelligenceServiceGrpcAsyncIOTransport',
)
|
[
"bazel-bot-development[bot]@users.noreply.github.com"
] |
bazel-bot-development[bot]@users.noreply.github.com
|
e3d5233ed380d8424d31d8ec58827f40bb02cd72
|
38d7109b78e0d009759586f49f506ac25eac6c5f
|
/orca/topology/manager.py
|
87b235f773094269199a88aef782950de6a366a9
|
[
"Apache-2.0"
] |
permissive
|
MoonkiHong/orca
|
19694dfe01a1bfbed9e4911b3c80e66ee78874bc
|
28267c23eff2886f7b22a539c6e77faa2a2a6223
|
refs/heads/master
| 2021-01-14T15:21:15.785498
| 2020-02-23T12:57:25
| 2020-02-23T12:57:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,286
|
py
|
import cotyledon
from orca.graph import drivers as graph_drivers
from orca.graph.graph import Graph
from orca.topology import linker, probe
from orca.topology.alerts.elastalert import manager as es
from orca.topology.alerts.falco import manager as falco
from orca.topology.alerts.prometheus import manager as prom
from orca.topology.infra.istio import manager as istio
from orca.topology.infra.k8s import manager as k8s
from orca.topology.infra.kiali import manager as kiali
class Manager(cotyledon.ServiceManager):
def __init__(self):
super().__init__()
def initialize(self):
graph = self._init_graph()
linker_dispatcher = linker.Dispatcher()
graph.add_listener(linker_dispatcher)
probe_managers = [k8s, istio, prom, falco, es, kiali]
for probe_manager in probe_managers:
for probe_inst in probe_manager.initialize_probes(graph):
self.add(probe.ProbeService, workers=1, args=(probe_inst,))
for linker_inst in probe_manager.initialize_linkers(graph):
linker_dispatcher.add_linker(linker_inst)
def _init_graph(self):
# TODO: read graph backend from config
graph_client = graph_drivers.DriverFactory.get('neo4j')
return Graph(graph_client)
|
[
"zurkowski.bartosz@gmail.com"
] |
zurkowski.bartosz@gmail.com
|
275e33e9e41f58b015be34ecee98851acc81ef13
|
540eca7619a4b91424f1d1f269e9ef2c31e2321b
|
/test/functional/abandonconflict.py
|
6919a062c4c70bd4043a8f696a20358c3b68652f
|
[
"MIT"
] |
permissive
|
Roshanthalal/RECAP-Core
|
66406b8327ae233dd507b232e049a9acf10539b1
|
ade84a0a11fe6d3769e7256c8f5117b9480d7e60
|
refs/heads/master
| 2022-11-19T10:05:32.551958
| 2020-07-27T05:03:06
| 2020-07-27T05:03:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,714
|
py
|
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the abandontransaction RPC.
The abandontransaction RPC marks a transaction and all its in-wallet
descendants as abandoned which allows their inputs to be respent. It can be
used to replace "stuck" or evicted transactions. It only works on transactions
which are not included in a block and are not currently in the mempool. It has
no effect on transactions which are already conflicted or abandoned.
"""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
class AbandonConflictTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.extra_args = [["-minrelaytxfee=0.00001"], []]
def run_test(self):
self.nodes[1].generate(100)
sync_blocks(self.nodes)
balance = self.nodes[0].getbalance()
txA = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), Decimal("10"))
txB = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), Decimal("10"))
txC = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), Decimal("10"))
sync_mempools(self.nodes)
self.nodes[1].generate(1)
sync_blocks(self.nodes)
newbalance = self.nodes[0].getbalance()
assert(balance - newbalance < Decimal("0.001")) #no more than fees lost
balance = newbalance
# Disconnect nodes so node0's transactions don't get into node1's mempool
disconnect_nodes(self.nodes[0], 1)
# Identify the 10btc outputs
nA = next(i for i, vout in enumerate(self.nodes[0].getrawtransaction(txA, 1)["vout"]) if vout["value"] == Decimal("10"))
nB = next(i for i, vout in enumerate(self.nodes[0].getrawtransaction(txB, 1)["vout"]) if vout["value"] == Decimal("10"))
nC = next(i for i, vout in enumerate(self.nodes[0].getrawtransaction(txC, 1)["vout"]) if vout["value"] == Decimal("10"))
inputs =[]
# spend 10btc outputs from txA and txB
inputs.append({"txid":txA, "vout":nA})
inputs.append({"txid":txB, "vout":nB})
outputs = {}
outputs[self.nodes[0].getnewaddress()] = Decimal("14.66668")
outputs[self.nodes[1].getnewaddress()] = Decimal("5")
signed = self.nodes[0].signrawtransaction(self.nodes[0].createrawtransaction(inputs, outputs))
txAB1 = self.nodes[0].sendrawtransaction(signed["hex"])
# Identify the 14.66668btc output
nAB = next(i for i, vout in enumerate(self.nodes[0].getrawtransaction(txAB1, 1)["vout"]) if vout["value"] == Decimal("14.66668"))
#Create a child tx spending AB1 and C
inputs = []
inputs.append({"txid":txAB1, "vout":nAB})
inputs.append({"txid":txC, "vout":nC})
outputs = {}
outputs[self.nodes[0].getnewaddress()] = Decimal("24.9996")
signed2 = self.nodes[0].signrawtransaction(self.nodes[0].createrawtransaction(inputs, outputs))
txABC2 = self.nodes[0].sendrawtransaction(signed2["hex"])
# In mempool txs from self should increase balance from change
newbalance = self.nodes[0].getbalance()
assert_equal(newbalance, balance - Decimal("30") + Decimal("24.9996"))
balance = newbalance
# Restart the node with a higher min relay fee so the parent tx is no longer in mempool
# TODO: redo with eviction
self.stop_node(0)
self.start_node(0, extra_args=["-minrelaytxfee=0.0001"])
# Verify txs no longer in either node's mempool
assert_equal(len(self.nodes[0].getrawmempool()), 0)
assert_equal(len(self.nodes[1].getrawmempool()), 0)
# Not in mempool txs from self should only reduce balance
# inputs are still spent, but change not received
newbalance = self.nodes[0].getbalance()
assert_equal(newbalance, balance - Decimal("24.9996"))
# Unconfirmed received funds that are not in mempool, also shouldn't show
# up in unconfirmed balance
unconfbalance = self.nodes[0].getunconfirmedbalance() + self.nodes[0].getbalance()
assert_equal(unconfbalance, newbalance)
# Also shouldn't show up in listunspent
assert(not txABC2 in [utxo["txid"] for utxo in self.nodes[0].listunspent(0)])
balance = newbalance
# Abandon original transaction and verify inputs are available again
# including that the child tx was also abandoned
self.nodes[0].abandontransaction(txAB1)
newbalance = self.nodes[0].getbalance()
assert_equal(newbalance, balance + Decimal("30"))
balance = newbalance
# Verify that even with a low min relay fee, the tx is not reaccepted from wallet on startup once abandoned
self.stop_node(0)
self.start_node(0, extra_args=["-minrelaytxfee=0.00001"])
assert_equal(len(self.nodes[0].getrawmempool()), 0)
assert_equal(self.nodes[0].getbalance(), balance)
# But if its received again then it is unabandoned
# And since now in mempool, the change is available
# But its child tx remains abandoned
self.nodes[0].sendrawtransaction(signed["hex"])
newbalance = self.nodes[0].getbalance()
assert_equal(newbalance, balance - Decimal("20") + Decimal("14.66668"))
balance = newbalance
# Send child tx again so its unabandoned
self.nodes[0].sendrawtransaction(signed2["hex"])
newbalance = self.nodes[0].getbalance()
assert_equal(newbalance, balance - Decimal("10") - Decimal("14.66668") + Decimal("24.9996"))
balance = newbalance
# Remove using high relay fee again
self.stop_node(0)
self.start_node(0, extra_args=["-minrelaytxfee=0.0001"])
assert_equal(len(self.nodes[0].getrawmempool()), 0)
newbalance = self.nodes[0].getbalance()
assert_equal(newbalance, balance - Decimal("24.9996"))
balance = newbalance
# Create a double spend of AB1 by spending again from only A's 10 output
# Mine double spend from node 1
inputs =[]
inputs.append({"txid":txA, "vout":nA})
outputs = {}
outputs[self.nodes[1].getnewaddress()] = Decimal("9.6666")
tx = self.nodes[0].createrawtransaction(inputs, outputs)
signed = self.nodes[0].signrawtransaction(tx)
self.nodes[1].sendrawtransaction(signed["hex"])
self.nodes[1].generate(1)
connect_nodes(self.nodes[0], 1)
sync_blocks(self.nodes)
# Verify that B and C's 10 BTC outputs are available for spending again because AB1 is now conflicted
newbalance = self.nodes[0].getbalance()
assert_equal(newbalance, balance + Decimal("20"))
balance = newbalance
# There is currently a minor bug around this and so this test doesn't work. See Issue #7315
# Invalidate the block with the double spend and B's 10 BTC output should no longer be available
# Don't think C's should either
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
newbalance = self.nodes[0].getbalance()
#assert_equal(newbalance, balance - Decimal("10"))
self.log.info("If balance has not declined after invalidateblock then out of mempool wallet tx which is no longer")
self.log.info("conflicted has not resumed causing its inputs to be seen as spent. See Issue #7315")
self.log.info(str(balance) + " -> " + str(newbalance) + " ?")
if __name__ == '__main__':
AbandonConflictTest().main()
|
[
"shamim.ice.ewu@gmail.com"
] |
shamim.ice.ewu@gmail.com
|
7f2d1e41aec4d8c1e88832f5a46b4b9cbe6b8fa1
|
fe683ebe3cbf794dd41121d67bff86278f2721cf
|
/src/nanocurrency/__init__.py
|
4ed3cec39a3578103a5ea9c514610109bacfda29
|
[
"CC0-1.0"
] |
permissive
|
marcosmmb/pynanocurrency
|
3966ab61dc10c1b892c0fdd3d62c41b7df2a038a
|
dd1a4f093068447f9f2421b708843df4e6eb17c9
|
refs/heads/master
| 2020-04-26T06:44:21.634468
| 2019-03-02T02:45:49
| 2019-03-02T02:45:49
| 173,374,825
| 1
| 0
| null | 2019-03-01T22:11:47
| 2019-03-01T22:11:47
| null |
UTF-8
|
Python
| false
| false
| 450
|
py
|
"""
nanocurrency
~~~~~~~~~~~~
pynanocurrency is a Python library allowing to work with NANO cryptocurrency
functions such as block creation and manipulation, account generation and
proof-of-work validation and solving
"""
from .accounts import *
from .blocks import *
from .exceptions import *
from .units import *
from .work import *
from .util import *
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
|
[
"jannepulk@gmail.com"
] |
jannepulk@gmail.com
|
e8a25edf6cf55a7d26838fd9f347dbeacacfc73f
|
188fa261446cee9fc1b56029e884c8e74364a7f4
|
/huseinhouse.com/MBTI-Study/soaning/function.py
|
c72af9fc818dc9cea5ec2ca6b50fa4a8414cc176
|
[
"MIT"
] |
permissive
|
huseinzol05/Hackathon-Huseinhouse
|
026cc1346afb127aa2675be94a818ebf35f72bb5
|
6796af2fe02f10d7860ac8db27bd24fa27b8bb01
|
refs/heads/master
| 2021-06-29T23:04:35.027212
| 2020-09-07T04:01:49
| 2020-09-07T04:01:49
| 147,768,669
| 3
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,469
|
py
|
import textract
import re
import pandas as pd
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.pipeline import Pipeline
from sklearn.linear_model import SGDClassifier
from sklearn.preprocessing import LabelEncoder
import sklearn.datasets
import nltk
nltk.data.path.append('/home/husein/nltk_data/')
from textblob import TextBlob
import random
import collections
from collections import OrderedDict
from fuzzywuzzy import fuzz
import numpy as np
def clearstring(string):
string = re.sub('[^A-Za-z ]+', '', string)
string = string.split(' ')
string = filter(None, string)
string = [y.strip() for y in string]
string = ' '.join(string)
return string.lower()
df = pd.read_csv('processed_mbti.csv')
label = df.type.unique()
labelset = LabelEncoder().fit_transform(df.type)
trainset = df.posts.values
for i in range(trainset.shape[0]):
trainset[i] = ' '.join(trainset[i].split('|||'))
def separate_dataset(trainset):
datastring = []
datatarget = []
for i in range(len(trainset.data)):
data_ = trainset.data[i].split('\n')
data_ = filter(None, data_)
for n in range(len(data_)):
data_[n] = clearstring(data_[n])
datastring += data_
for n in range(len(data_)):
datatarget.append(trainset.target[i])
return datastring, datatarget
job = sklearn.datasets.load_files(container_path = 'jobdescription', decode_error = 'replace')
job.data, job.target = separate_dataset(job)
c = list(zip(job.data, job.target))
random.shuffle(c)
job.data, job.target = zip(*c)
dev_clf = Pipeline([('vect', CountVectorizer(ngram_range=(1, 2))), ('clf', SGDClassifier(loss = 'modified_huber', penalty = 'l2', alpha = 1e-4, n_iter = 100, random_state = 42))])
dev_clf.fit(job.data, job.target)
clf = Pipeline([('vect', CountVectorizer()), ('clf', SGDClassifier(loss = 'modified_huber', penalty = 'l2', alpha = 1e-4, n_iter = 100, random_state = 42))])
clf.fit(trainset, labelset)
def clearstring_pdf(string):
string = re.sub(r'[^\x00-\x7F]', '', string)
string = string.split(' ')
string = filter(None, string)
string = [y.strip() for y in string]
string = ' '.join(string)
return string
def get_detail(text):
text = filter(None, [clearstring_pdf(t) for t in text.split('\n')])
blobs = [TextBlob(i).tags for i in text]
nouns = []
for blob in blobs:
nouns += [b[0] for b in blob if b[1] == 'NNP' or b[1] == 'NN']
nouns = [n.lower() for n in nouns][15:]
prob = dev_clf.predict_proba(text)
prob = np.mean(prob, axis = 0)
dict_prob = {}
for i in range(prob.shape[0]):
dict_prob[job.target_names[i]] = float(prob[i])
personality = clf.predict_proba([' '.join(text)])[0]
unique = np.unique(personality)
loc = np.where(personality == unique[-1])[0]
personalities = []
for i in loc:
personalities += list(label[i])
personalities_unique, personalities_count = np.unique(personalities, return_counts = True)
personalities_count = (personalities_count * 1.0) / np.sum(personalities_count)
counts = collections.Counter(personalities)
new_list = sorted(personalities, key = lambda x: -counts[x])
new_list = ''.join(list(OrderedDict.fromkeys(new_list))[:4])
new_type = label[np.argmax([fuzz.ratio(new_list, i) for i in label])]
nouns_unique, nouns_count = np.unique(nouns, return_counts = True)
return {'developer': dict_prob, 'personality_percent': personalities_count.tolist(), 'personality': personalities_unique.tolist(), 'type': new_type,
'nouns': nouns_unique.tolist(), 'nouns_count': nouns_count.tolist()}
|
[
"husein.zol05@gmail.com"
] |
husein.zol05@gmail.com
|
b0fb2160ae3308b14f82c19fd930021f3db68660
|
b8d286c69d89ea42f532c2784ec2aa1633c57d8f
|
/tests/test_devices/test_snmp_handler_interface.py
|
18fd9b196f28aff00a5d291019c027a4e5ac06f7
|
[
"Apache-2.0"
] |
permissive
|
dirkakrid/cloudshell-networking-devices
|
460f85268998f6403fb1a0567d8303e0e92ace27
|
6e62b8ab4c1d8dbe8a68d6ff2d34094b3b90a548
|
refs/heads/master
| 2021-01-21T19:40:12.388282
| 2017-05-10T13:20:01
| 2017-05-10T13:20:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 641
|
py
|
import unittest
from cloudshell.devices.snmp_handler_interface import SnmpHandlerInterface
class TestSnmpHandlerInterface(unittest.TestCase):
def setUp(self):
class TestedClass(SnmpHandlerInterface):
pass
self.tested_class = TestedClass
def test_get_snmp_service(self):
"""Check that instance can't be instantiated without implementation of the "get_snmp_service" method"""
with self.assertRaisesRegexp(TypeError, "Can't instantiate abstract class TestedClass with "
"abstract methods get_snmp_service"):
self.tested_class()
|
[
"anton.p@qualisystems.com"
] |
anton.p@qualisystems.com
|
e47193ed11bd9d0fe2046a3291cee676d776ccd3
|
ac8ffabf4d7339c5466e53dafc3f7e87697f08eb
|
/python_solutions/1425.constrained-subsequence-sum.py
|
4038454a87a6aa43af1ce0804ead5dafa93468e9
|
[] |
no_license
|
h4hany/leetcode
|
4cbf23ea7c5b5ecfd26aef61bfc109741f881591
|
9e4f6f1a2830bd9aab1bba374c98f0464825d435
|
refs/heads/master
| 2023-01-09T17:39:06.212421
| 2020-11-12T07:26:39
| 2020-11-12T07:26:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,261
|
py
|
from collections import Counter, defaultdict, OrderedDict, deque
from bisect import bisect_left, bisect_right
from functools import reduce, lru_cache
from typing import List
import itertools
import math
import heapq
import string
true = True
false = False
MIN, MAX = -0x3f3f3f3f, 0x3f3f3f3f
#
# @lc app=leetcode id=1425 lang=python3
#
# [1425] Constrained Subsequence Sum
#
# https://leetcode.com/problems/constrained-subsequence-sum/description/
#
# algorithms
# Hard (43.60%)
# Total Accepted: 6.9K
# Total Submissions: 15.8K
# Testcase Example: '[10,2,-10,5,20]\n2'
#
# Given an integer array nums and an integer k, return the maximum sum of a
# non-empty subsequence of that array such that for every two consecutive
# integers in the subsequence, nums[i] and nums[j], where i < j, the condition
# j - i <= k is satisfied.
#
# A subsequence of an array is obtained by deleting some number of elements
# (can be zero) from the array, leaving the remaining elements in their
# original order.
#
#
# Example 1:
#
#
# Input: nums = [10,2,-10,5,20], k = 2
# Output: 37
# Explanation: The subsequence is [10, 2, 5, 20].
#
#
# Example 2:
#
#
# Input: nums = [-1,-2,-3], k = 1
# Output: -1
# Explanation: The subsequence must be non-empty, so we choose the largest
# number.
#
#
# Example 3:
#
#
# Input: nums = [10,-2,-10,-5,20], k = 2
# Output: 23
# Explanation: The subsequence is [10, -2, -5, 20].
#
#
#
# Constraints:
#
#
# 1 <= k <= nums.length <= 10^5
# -10^4 <= nums[i] <= 10^4
#
#
#
class Solution:
def constrainedSubsetSum(self, nums: List[int], k: int) -> int:
'''d is a decreasing deque, caching the previous k maximum sum
'''
d = deque()
ans = float('-inf')
for i in range(len(nums)):
nums[i] += d[0] if d else 0
ans = max(ans, nums[i])
# both >= and > are fine
while d and nums[i] >= d[-1]:
d.pop()
if nums[i] > 0:
d.append(nums[i])
while d and i >= k and d[0] == nums[i - k]:
d.popleft()
return ans
sol = Solution()
# nums = [10,2,-10,5,20], = 2
# nums = [-1,-2,-3], k = 1÷
nums = [10, -2, -10, -5, 20]
k = 2
print(sol.constrainedSubsetSum(nums, k))
|
[
"ssruoz@gmail.com"
] |
ssruoz@gmail.com
|
af815ff89cb5d9b1273a7cc8d98677c1eba8e801
|
51a2fb45db6a074c7bd5af32c8ee8471251436f4
|
/Project/企业微信/PageObject实战/PO/Login_PO.py
|
9d221d18c4bee9c5f5fe323ce5232fbca8a85d1b
|
[] |
no_license
|
JiangHuYiXiao/Web-Autotest-Python
|
c5e2cf61a5a62d132df048d3218dfb973be8784e
|
65b30360337b56b6ca4eba21f729c922f1665489
|
refs/heads/master
| 2021-08-26T07:46:42.957744
| 2021-08-12T02:24:11
| 2021-08-12T02:24:11
| 253,945,694
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 704
|
py
|
# -*- coding:utf-8 -*-
# @Author : 江湖一笑
# @Time : 2021/2/26 16:01
# @Software : Web-Autotest-Python
# @Python_verison : 3.7
from selenium.webdriver.remote.webdriver import WebDriver
from Project.企业微信.PageObject实战1.PO.Register_PO import Register
class Login():
def __init__(self,driver:WebDriver): # 复用上一个PO的driver,driver:WebDriver指定为WebDriver类型后,可以使用find方法
self._driver = driver
def scan(self):
pass
def click_register(self):
# 点击立即注册
self._driver.find_element_by_css_selector('.login_registerBar_link').click()
return Register(self._driver)
|
[
"1163270704@qq.com"
] |
1163270704@qq.com
|
896e40d4255f32860379afbe8a7112b45b7b29b6
|
f0d713996eb095bcdc701f3fab0a8110b8541cbb
|
/tbz5ji3ocwzAeLQNa_11.py
|
fce155cea3aba702b57ec473a17f33d65c5f76ee
|
[] |
no_license
|
daniel-reich/turbo-robot
|
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
|
a7a25c63097674c0a81675eed7e6b763785f1c41
|
refs/heads/main
| 2023-03-26T01:55:14.210264
| 2021-03-23T16:08:01
| 2021-03-23T16:08:01
| 350,773,815
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,903
|
py
|
"""
Given a two-dimensional list of `maze` and a list of `directions`. Your task
is to follow the given directions.
* If you can reach the endpoint before all your moves have gone, return `"Finish"`.
* If you hit any walls or go outside the maze border, return `"Dead"`.
* If you find yourself still in the maze after using all the moves, return `"Lost"`.
The maze list will look like this:
maze = [
[1, 1, 1, 1, 1, 1, 1, 1, 0, 1],
[1, 3, 1, 0, 1, 0, 0, 0, 0, 1],
[1, 0, 1, 0, 0, 0, 1, 1, 0, 1],
[1, 0, 1, 1, 1, 1, 1, 0, 0, 1],
[1, 0, 1, 0, 0, 0, 0, 0, 0, 1],
[1, 0, 1, 0, 1, 0, 1, 0, 0, 1],
[1, 0, 1, 0, 1, 0, 0, 0, 0, 0],
[1, 0, 1, 0, 1, 0, 1, 1, 0, 1],
[1, 0, 0, 0, 1, 0, 0, 0, 0, 1],
[1, 1, 1, 0, 1, 1, 1, 1, 2, 1]
]
# 0 = Safe place to walk
# 1 = Wall
# 2 = Start Point
# 3 = Finish Point
# N = North, E = East, W = West and S = South
See the below examples for a better understanding:
### Examples
exit_maze(maze, ["N", "E", "E"]) ➞ "Dead"
# Hitting the wall should return "Dead".
exit_maze(maze, ["N", "N", "N", "E"]) ➞ "Lost"
# Couldn't reach the finish point.
exit_maze(maze, ["N", "W", "W", "W", "N", "N", "N", "N", "W", "W", "S", "S", "S", "S", "W", "W", "N", "N", "N", "N", "N", "N", "N"]) ➞ "Finish"
### Notes
N/A
"""
def Starting_Point(maze):
for i in range(len(maze)):
for j in range(len(maze)):
if maze[i][j] == 2:
return j,i
def exit_maze(maze, directions):
col , line = Starting_Point(maze)
for moves in directions:
if moves == "N": line-=1
elif moves == "S": line+=1
elif moves == "W": col-=1
else: col+=1
if line > len(maze)-1 or col > len(maze)-1:return 'Dead'
if maze[line][col] == 1:return 'Dead'
elif maze[line][col] == 3:return 'Finish'
return 'Lost'
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
b66bd06abd3bf875afad489cae81330f335e27e8
|
cc78de009a8e7805f9f6a852774e0384b11bfdcb
|
/testcase/api/login/postman_login_post.py
|
18b31c0cc29699bb94201354eab342239c45f319
|
[] |
no_license
|
williamzxl/app_test_many_devices
|
c1806e54c17a84f18a04c3808604633c2deba052
|
dd5434018fadd11d5462903cafaafbb5b387c24a
|
refs/heads/master
| 2020-03-29T21:20:51.239295
| 2019-03-05T03:13:56
| 2019-03-05T03:13:56
| 150,361,766
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 731
|
py
|
import requests
import json
url = "https://proxy.langlib.com/accounts/loginByAccount"
headers = {
'platform': "Android",
'appkey': "CEE_AA8F55B916AB",
'appversion': "10000005",
'appsecret': "3DB5159C-EB1E-47FE-8584-47115EF5E443",
'app': "cee",
'content-length': "55",
'host': "proxy.langlib.com",
'accept-encoding': "gzip",
'Connection': 'Keep-Alive',
'user-agent': "okhttp/3.11.0",
'content-type': "application/json",
'cache-control': "no-cache",
}
body = {"UserCredential": "test_pay@t.com", "Password": "111111"}
response = requests.request("POST", url, headers=headers, json=body)
content = (json.loads(response.text))
print(content.get("AccessToken"))
print(type(content))
|
[
"1053418947@qq.com"
] |
1053418947@qq.com
|
d4656c1a3c9c77ace9534ffe71b6a42d0cfb0ff6
|
97959e8f049e1877931e85b9965fc48e1d7b7616
|
/27.py
|
452de52776bed556ec31c7166f8038ef60774ca9
|
[] |
no_license
|
mahabaleshwarabairi/mahabaleshwarabairi
|
403911a7e9a0ec93730845eea6e27ee55363113f
|
27935ac9165f049c48153455768709e6d8c8a374
|
refs/heads/master
| 2023-04-26T08:26:17.574774
| 2021-06-05T08:36:35
| 2021-06-05T08:36:35
| 351,385,527
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 595
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 22 11:13:22 2020
@author: Mahabaleshwara.Bairi
"""
states={
'TN' : {'capital':'Chenai','language':'Tamil'},
'KL': {'capital':'trivandarm','language':'Keralla'},
'KK':{'capital':'BNG','language':'KAN'}
}
labels ={'capital':'capital city',
'language':'spoken language'}
state=input("Please enter state:")
fetch=input("capital (C) or L..?")
if fetch=='C': key='capital'
if fetch=='L':key='language'
if state in states:
print("%s 's %s is %s" %(state,labels[key],states[state][key]))
|
[
"root@localhost.localdomain"
] |
root@localhost.localdomain
|
e9a1fae3749f53ba0f93c11b8432510a4e7f8af0
|
8acffb8c4ddca5bfef910e58d3faa0e4de83fce8
|
/ml-flask/Lib/site-packages/pandas/tests/util/test_assert_interval_array_equal.py
|
8a537308063a13fd91d44a09de45484ac058bbb0
|
[
"MIT"
] |
permissive
|
YaminiHP/SimilitudeApp
|
8cbde52caec3c19d5fa73508fc005f38f79b8418
|
005c59894d8788c97be16ec420c0a43aaec99b80
|
refs/heads/master
| 2023-06-27T00:03:00.404080
| 2021-07-25T17:51:27
| 2021-07-25T17:51:27
| 389,390,951
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 129
|
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:82edae2255ec8fcb3927eb50fad5b0eda9ea8dbaaa2cb6dc12188b8a9fac142b
size 2364
|
[
"yamprakash130@gmail.com"
] |
yamprakash130@gmail.com
|
d1b92393bf9fbe4433948c91091bb196969f8bc8
|
8dde6f201657946ad0cfeacab41831f681e6bc6f
|
/1167. Minimum Cost to Connect Sticks.py
|
272df4236aaf86c111809a73d668d5d9118eacac
|
[] |
no_license
|
peraktong/LEETCODE_Jason
|
c5d4a524ba69b1b089f18ce4a53dc8f50ccbb88c
|
06961cc468211b9692cd7a889ee38d1cd4e1d11e
|
refs/heads/master
| 2022-04-12T11:34:38.738731
| 2020-04-07T21:17:04
| 2020-04-07T21:17:04
| 219,398,022
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 445
|
py
|
import heapq
class Solution(object):
def connectSticks(self, sticks):
"""
:type sticks: List[int]
:rtype: int
"""
# always start with lowest since they are added the most
ans = 0
heapq.heapify(sticks)
while len(sticks) > 1:
x, y = heapq.heappop(sticks), heapq.heappop(sticks)
ans += x + y
heapq.heappush(sticks, x + y)
return ans
|
[
"caojunzhi@caojunzhisMBP3.fios-router.home"
] |
caojunzhi@caojunzhisMBP3.fios-router.home
|
02abde4a40072f9b6ee47ca15157b48df39d4a60
|
25dda94672497e3287a7403e283fb279ad171b79
|
/practice/정렬/선택 정렬.py
|
eba9f36bb4a0839f774abd388f00bb2e48c35c01
|
[] |
no_license
|
woorud/Algorithm
|
c94b844e8c96a446c5fdee5c0abb159bfee384d7
|
f5b8e3cf0aea7fc4400e6f5bb0c1531fad93e541
|
refs/heads/master
| 2023-02-23T13:53:28.645036
| 2021-01-29T12:24:23
| 2021-01-29T12:24:23
| 230,908,288
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 261
|
py
|
array = [7, 5, 9, 0, 3, 1, 6, 2, 4, 8]
for i in range(len(array)):
min_index = i
for j in range(i+1, len(array)):
if array[min_index] > array[j]:
min_index = j
array[i], array[min_index] = array[min_index], array[i]
print(array)
|
[
"woorud96@gmail.com"
] |
woorud96@gmail.com
|
98a5436bba2136ae470959f0588878666f9736e7
|
f9d564f1aa83eca45872dab7fbaa26dd48210d08
|
/huaweicloud-sdk-elb/huaweicloudsdkelb/v3/model/create_listener_quic_config_option.py
|
ea75a64289e0119f8980032cb31fa3a3892e5e4e
|
[
"Apache-2.0"
] |
permissive
|
huaweicloud/huaweicloud-sdk-python-v3
|
cde6d849ce5b1de05ac5ebfd6153f27803837d84
|
f69344c1dadb79067746ddf9bfde4bddc18d5ecf
|
refs/heads/master
| 2023-09-01T19:29:43.013318
| 2023-08-31T08:28:59
| 2023-08-31T08:28:59
| 262,207,814
| 103
| 44
|
NOASSERTION
| 2023-06-22T14:50:48
| 2020-05-08T02:28:43
|
Python
|
UTF-8
|
Python
| false
| false
| 5,568
|
py
|
# coding: utf-8
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class CreateListenerQuicConfigOption:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'quic_listener_id': 'str',
'enable_quic_upgrade': 'bool'
}
attribute_map = {
'quic_listener_id': 'quic_listener_id',
'enable_quic_upgrade': 'enable_quic_upgrade'
}
def __init__(self, quic_listener_id=None, enable_quic_upgrade=None):
"""CreateListenerQuicConfigOption
The model defined in huaweicloud sdk
:param quic_listener_id: 监听器关联的QUIC监听器ID。指定的listener id必须已存在,且协议类型为QUIC,不能指定为null,否则与enable_quic_upgrade冲突。 [不支持QUIC。](tag:tm,hws_eu,g42,hk_g42,hcso_dt,dt,dt_test)
:type quic_listener_id: str
:param enable_quic_upgrade: QUIC升级的开启状态。 True:开启QUIC升级; Flase:关闭QUIC升级(默认)。 开启HTTPS监听器升级QUIC监听器能力。 [不支持QUIC。](tag:tm,hws_eu,g42,hk_g42,hcso_dt,dt,dt_test)
:type enable_quic_upgrade: bool
"""
self._quic_listener_id = None
self._enable_quic_upgrade = None
self.discriminator = None
self.quic_listener_id = quic_listener_id
if enable_quic_upgrade is not None:
self.enable_quic_upgrade = enable_quic_upgrade
@property
def quic_listener_id(self):
"""Gets the quic_listener_id of this CreateListenerQuicConfigOption.
监听器关联的QUIC监听器ID。指定的listener id必须已存在,且协议类型为QUIC,不能指定为null,否则与enable_quic_upgrade冲突。 [不支持QUIC。](tag:tm,hws_eu,g42,hk_g42,hcso_dt,dt,dt_test)
:return: The quic_listener_id of this CreateListenerQuicConfigOption.
:rtype: str
"""
return self._quic_listener_id
@quic_listener_id.setter
def quic_listener_id(self, quic_listener_id):
"""Sets the quic_listener_id of this CreateListenerQuicConfigOption.
监听器关联的QUIC监听器ID。指定的listener id必须已存在,且协议类型为QUIC,不能指定为null,否则与enable_quic_upgrade冲突。 [不支持QUIC。](tag:tm,hws_eu,g42,hk_g42,hcso_dt,dt,dt_test)
:param quic_listener_id: The quic_listener_id of this CreateListenerQuicConfigOption.
:type quic_listener_id: str
"""
self._quic_listener_id = quic_listener_id
@property
def enable_quic_upgrade(self):
"""Gets the enable_quic_upgrade of this CreateListenerQuicConfigOption.
QUIC升级的开启状态。 True:开启QUIC升级; Flase:关闭QUIC升级(默认)。 开启HTTPS监听器升级QUIC监听器能力。 [不支持QUIC。](tag:tm,hws_eu,g42,hk_g42,hcso_dt,dt,dt_test)
:return: The enable_quic_upgrade of this CreateListenerQuicConfigOption.
:rtype: bool
"""
return self._enable_quic_upgrade
@enable_quic_upgrade.setter
def enable_quic_upgrade(self, enable_quic_upgrade):
"""Sets the enable_quic_upgrade of this CreateListenerQuicConfigOption.
QUIC升级的开启状态。 True:开启QUIC升级; Flase:关闭QUIC升级(默认)。 开启HTTPS监听器升级QUIC监听器能力。 [不支持QUIC。](tag:tm,hws_eu,g42,hk_g42,hcso_dt,dt,dt_test)
:param enable_quic_upgrade: The enable_quic_upgrade of this CreateListenerQuicConfigOption.
:type enable_quic_upgrade: bool
"""
self._enable_quic_upgrade = enable_quic_upgrade
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CreateListenerQuicConfigOption):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"hwcloudsdk@huawei.com"
] |
hwcloudsdk@huawei.com
|
2bf79220a4a605d9b665cad56a6c298e21d55040
|
98beeffab0570eb7e4bd2785fc195658e18aa6dd
|
/SRC/common/IO/progressbar_delay.py
|
cfb2f4c580665e91b96fc9caa3cc476d6de797c5
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
shkeshavarz/OOF2
|
27f59bb04775b76ad250ecfd76118b3760647bba
|
0f69f535d040875354cd34e8bbedeae142ff09a3
|
refs/heads/master
| 2021-01-15T15:32:10.713469
| 2016-01-13T14:44:20
| 2016-01-13T14:44:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 863
|
py
|
# -*- python -*-
# $RCSfile: progressbar_delay.py,v $
# $Revision: 1.22 $
# $Author: langer $
# $Date: 2011/02/01 16:38:56 $
# This software was produced by NIST, an agency of the U.S. government,
# and by statute is not subject to copyright in the United States.
# Recipients of this software assume all responsibilities associated
# with its operation, modification and maintenance. However, to
# facilitate maintenance we ask that before distributing modified
# versions of this software, you first contact the authors at
# oof_manager@nist.gov.
# Time, in milliseconds, between the time that a progressbar object is
# created and the time that it is installed in the ActivityViewer
# window.
delay = 2000
# Time in milliseconds between progress bar updates.
period = 200
def set_delay(menuitem, milliseconds):
global delay
delay = milliseconds
|
[
"lnz5@rosie.nist.gov"
] |
lnz5@rosie.nist.gov
|
698ce85576475ac479f25c8050a6685b8c37d9f0
|
9f8ce7b4b3fd8b2d0be51e559945feded81fb0b1
|
/negotiator2/__init__.py
|
38de5a17832bb501a1dcbfee6bd6900685236cad
|
[] |
no_license
|
zimeon/negotiator2
|
bf82d092492d11435a4db9bbf9f65211cd787f74
|
33dee19cd9fcf5db7cc6872c6608843f3bf1e9c8
|
refs/heads/master
| 2021-01-16T23:22:55.504372
| 2018-02-20T13:52:48
| 2018-02-20T13:52:48
| 95,739,871
| 0
| 0
| null | 2017-11-03T20:17:22
| 2017-06-29T04:53:53
|
Python
|
UTF-8
|
Python
| false
| false
| 286
|
py
|
"""Imports for negotiator2."""
__version__ = '2.1.1'
from .negotiator import AcceptParameters, ContentType, Language, ContentNegotiator
from .memento import BadTimeMap, TimeMap, memento_parse_datetime, memento_datetime_string
from .util import conneg_on_accept, negotiate_on_datetime
|
[
"simeon.warner@cornell.edu"
] |
simeon.warner@cornell.edu
|
57b38b59e6438c0800acb06a2fcf6c7d6f16fc8f
|
e3b9aa9b17ebb55e53dbc4fa9d1f49c3a56c6488
|
/thehive/komand_thehive/actions/get_case/schema.py
|
4fd641e02f199012fe05d6e6f2d54aea73510dcd
|
[
"MIT"
] |
permissive
|
OSSSP/insightconnect-plugins
|
ab7c77f91c46bd66b10db9da1cd7571dfc048ab7
|
846758dab745170cf1a8c146211a8bea9592e8ff
|
refs/heads/master
| 2023-04-06T23:57:28.449617
| 2020-03-18T01:24:28
| 2020-03-18T01:24:28
| 248,185,529
| 1
| 0
|
MIT
| 2023-04-04T00:12:18
| 2020-03-18T09:14:53
| null |
UTF-8
|
Python
| false
| false
| 3,744
|
py
|
# GENERATED BY KOMAND SDK - DO NOT EDIT
import komand
import json
class Component:
DESCRIPTION = "Retrieve a case by ID"
class Input:
ID = "id"
class Output:
CASE = "case"
class GetCaseInput(komand.Input):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"id": {
"type": "string",
"title": "Case ID",
"description": "Case ID e.g. AV_ajI_oYMfcbXhqb9tS",
"order": 1
}
},
"required": [
"id"
]
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
class GetCaseOutput(komand.Output):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"case": {
"$ref": "#/definitions/case",
"title": "Case",
"description": "Get case output",
"order": 1
}
},
"required": [
"case"
],
"definitions": {
"case": {
"type": "object",
"title": "case",
"properties": {
"_type": {
"type": "string",
"title": "Type",
"description": "Case type",
"order": 3
},
"caseId": {
"type": "integer",
"title": "Case ID e.g. AV_ajI_oYMfcbXhqb9tS",
"description": "Case ID",
"order": 8
},
"createdAt": {
"type": "integer",
"title": "Created At",
"description": "Created at",
"order": 16
},
"createdBy": {
"type": "string",
"title": "Created By",
"description": "Case created by",
"order": 12
},
"customFields": {
"type": "object",
"title": "Custom Fields",
"description": "Case custom fields",
"order": 14
},
"description": {
"type": "string",
"title": "Description",
"order": 17
},
"flag": {
"type": "boolean",
"title": "Flag",
"description": "Case flags",
"order": 10
},
"id": {
"type": "string",
"title": "ID",
"description": "ID",
"order": 15
},
"metrics": {
"type": "object",
"title": "Metrics",
"description": "Case metrics",
"order": 9
},
"owner": {
"type": "string",
"title": "Owner",
"description": "Case owner",
"order": 13
},
"severity": {
"type": "integer",
"title": "Severity",
"description": "Case severity",
"order": 5
},
"startDate": {
"type": "integer",
"title": "Start Date",
"description": "Case start date",
"order": 2
},
"status": {
"type": "string",
"title": "Status",
"description": "Case status",
"order": 1
},
"tags": {
"type": "array",
"title": "Tags",
"description": "Case tags",
"items": {
"type": "string"
},
"order": 6
},
"title": {
"type": "string",
"title": "Title",
"description": "Case title",
"order": 7
},
"tlp": {
"type": "integer",
"title": "TLP",
"description": "Traffic Light Protocol level",
"order": 4
},
"user": {
"type": "string",
"title": "User",
"description": "Case user",
"order": 11
}
}
}
}
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
|
[
"jonschipp@gmail.com"
] |
jonschipp@gmail.com
|
f5b4ce49234aa51d775d4794255d26f32726d67e
|
ce5ce3764e75774c0b7eab47893987b9f311b1b9
|
/.history/moments/models_20210605234329.py
|
2a69731e929875c9c2fe37b29fe4471a44af790c
|
[] |
no_license
|
iSecloud/django-wechat
|
410fb8a23b50dc2343c2f0167bbae560bf6e9694
|
faaff9bb7f9454a63b2f8411d3577169b470baad
|
refs/heads/main
| 2023-05-15T06:53:16.252422
| 2021-06-07T14:00:35
| 2021-06-07T14:00:35
| 374,685,069
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,452
|
py
|
from django.db import models
from django.contrib.auth.models import User
# Create your models here.
class WeChatUser(models.Model):
user = models.OneToOneField(User, models.CASCADE)
motto = models.CharField(max_length=100, null=True, blank=True)
pic = models.CharField(max_length=50, null=True, blank=True)
region = models.CharField(max_length=50, null=True, blank=True)
def __str__(self):
return self.user.username
class Status(models.Model):
user = models.ForeignKey(WeChatUser, models.CASCADE)
text = models.CharField(max_length=280)
pic = models.CharField(max_length=100, null=True, blank=True)
pub_time = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.text
class Meta:
ordering = ['-id']
#定义回复和点赞的数据表
class Reply(models.Model):
status = models.ForeignKey(Status, models.CASCADE)
author = models.CharField(max_length=100)
type = models.CharField(max_length=20, choices=[("0", "like"), ("1", "comment")])
text = models.CharField(max_length=300, null=True, blank=True) #允许空评论
at_person = models.CharField(max_length=100, null=True, blank=True)
def __str__(self):
at_person_name = ""
if self.at_person:
at_person_name = "@{}".format(self.at_person)
return "{}{} says {}".format(self.author, at_person_name, self.text)
|
[
"869820505@qq.com"
] |
869820505@qq.com
|
da954a0ed8caf750124be9eb6582ad919fab6903
|
258c045a25c2a4d2027c5510272b2e40fb7938ca
|
/ellipse_polaire.py
|
680918cb63cc02de291149e3ce3fe6c1e7f16f3c
|
[
"MIT"
] |
permissive
|
harryturr/electron-charging-rings-detector
|
5395791aba2ce2264659b49c9ce4ad14c29c896a
|
7eeefcf2c2be7aba56daf965fe80727887b01eb7
|
refs/heads/master
| 2022-07-07T08:27:15.412680
| 2019-10-29T23:58:57
| 2019-10-29T23:58:57
| 218,405,876
| 0
| 0
|
MIT
| 2022-06-21T23:20:08
| 2019-10-29T23:54:51
|
OpenEdge ABL
|
UTF-8
|
Python
| false
| false
| 3,696
|
py
|
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
import features_automatic_detection
nombre_angle = 1000
features_automatic_detection.largeur_max = 256
features_automatic_detection.hauteur_max = 256
def ellipse_conversion(liste):
coeff_poly_0 = -4 * liste[1] ** 2 - (liste[0] - liste[2]) ** 2
coeff_poly_1 = (liste[0] - liste[2]) ** 2 + 4 * liste[1] ** 2
coeff_poly_2 = -liste[1] ** 2
cool = np.roots([coeff_poly_0, coeff_poly_1, coeff_poly_2])
bons_beta = []
if len(cool) == 0:
return [1, 0]
else:
if cool[0] >= 0:
beta_reel_0 = np.sqrt(cool[0])
beta_reel_1 = -np.sqrt(cool[0])
bons_beta.append(beta_reel_0)
bons_beta.append(beta_reel_1)
if cool[1] >= 0:
beta_reel_2 = np.sqrt(cool[1])
beta_reel_3 = -np.sqrt(cool[1])
bons_beta.append(beta_reel_2)
bons_beta.append(beta_reel_3)
index = bons_beta[0]
absol = abs(
(liste[0] - liste[2]) * np.sqrt(1 - bons_beta[0] ** 2) * bons_beta[0]
+ liste[1] * (1 - 2 * bons_beta[0] ** 2)
)
n = len(bons_beta)
for p in range(n):
if abs(bons_beta[p]):
value = (liste[0] - liste[2]) * np.sqrt(
1 - bons_beta[p] ** 2
) * bons_beta[p] + liste[1] * (1 - 2 * bons_beta[p] ** 2)
absolu = abs(value)
if absolu < absol:
absol = absolu
index = bons_beta[p]
return [np.sqrt(1 - index ** 2), index]
def changement_de_coordonnées_ellipse(liste):
[alpha, beta] = ellipse_conversion(liste)
new_a = liste[0] * alpha ** 2 + liste[2] * beta ** 2 - 2 * liste[1] * alpha * beta
new_b = 0
new_c = liste[2] * alpha ** 2 + liste[0] * beta ** 2 + 2 * liste[1] * alpha * beta
new_d = liste[3] * alpha - liste[4] * beta
new_e = liste[4] * alpha + liste[3] * beta
new_f = liste[5]
new_coordonnees = [new_a, new_b, new_c, new_d, new_e, new_f]
return new_coordonnees
def polaire_apres_rotation(liste):
new = changement_de_coordonnées_ellipse(liste)
K = new[3] ** 2 / new[0] + new[4] ** 2 / new[2] - new[5]
rayon_x = np.sqrt(K / new[0])
rayon_y = np.sqrt(K / new[2])
centre_x = -new[3] / new[0]
centre_y = -new[4] / new[2]
[alpha, beta] = ellipse_conversion(liste)
ellipse_polaire = [rayon_x, rayon_y, centre_x, centre_y, alpha, beta]
return ellipse_polaire
def tracer_ellipse(liste):
[rayon_x, rayon_y, centre_x, centre_y, alpha, beta] = polaire_apres_rotation(liste)
liste = []
for t in range(nombre_angle):
cos = np.cos(2 * np.pi * t / nombre_angle)
sin = np.sin(2 * np.pi * t / nombre_angle)
[grand_X, grand_Y] = [centre_x + rayon_x * cos, centre_y + rayon_y * sin]
[x, y] = [alpha * grand_X + beta * grand_Y, -beta * grand_X + alpha * grand_Y]
[int_x, int_y] = [int(x), int(y)]
if int_x >= 0 and int_x < 256 and int_y >= 0 and int_y < 256:
liste.append([int_x, int_y])
return liste
def enlever_doublon(liste):
n = len(liste)
new = []
if n == 0:
return []
if n == 1:
new.append(liste[0])
return new
else:
for i in range(0, n - 1):
if liste[i] != liste[i + 1]:
new.append(liste[i])
new.append(liste[n - 1])
return new
def ellipse_final(liste):
liste_0 = tracer_ellipse(liste)
liste_1 = enlever_doublon(liste_0)
return liste_1
|
[
"griffin.harrisonn@gmail.com"
] |
griffin.harrisonn@gmail.com
|
654e36ee1139fd4e26f34bff8a5ad866723502e8
|
9e2bd8e828d3aeedc9b5034d847a8e1e3a381cfa
|
/rltk/io/adapter/__init__.py
|
75fd0cceb723c9a2948f88da45ca6d33433231d1
|
[
"MIT"
] |
permissive
|
rpatil524/rltk
|
0a55c6d5f02ccf2991dc458fb38a0bf4f0caa151
|
aee10ed5dd561583e60db3373ed82fe1208da1e9
|
refs/heads/master
| 2021-11-01T10:41:12.158504
| 2021-10-06T23:41:52
| 2021-10-06T23:41:52
| 183,799,148
| 0
| 0
|
MIT
| 2021-09-04T03:42:53
| 2019-04-27T16:53:12
|
Python
|
UTF-8
|
Python
| false
| false
| 624
|
py
|
from rltk.io.adapter.key_value_adapter import KeyValueAdapter
from rltk.io.adapter.memory_key_value_adapter import MemoryKeyValueAdapter
from rltk.io.adapter.dbm_key_value_adapter import DbmKeyValueAdapter
from rltk.io.adapter.redis_key_value_adapter import RedisKeyValueAdapter
from rltk.io.adapter.hbase_key_value_adapter import HBaseKeyValueAdapter
from rltk.io.adapter.key_set_adapter import KeySetAdapter
from rltk.io.adapter.memory_key_set_adapter import MemoryKeySetAdapter
from rltk.io.adapter.redis_key_set_adapter import RedisKeySetAdapter
from rltk.io.adapter.leveldb_key_set_adapter import LevelDbKeySetAdapter
|
[
"bigyyx@gmail.com"
] |
bigyyx@gmail.com
|
0e961003dcb191f892a1ebafa66c42a9f3c130d3
|
78011517bc7fe931f736b81297d0603f7dc01819
|
/Python/kettle_set_mode.py
|
bd205cdc3f9d36d16e56196725915e22b9f34f4c
|
[] |
no_license
|
BorisE/RedmondKettle
|
47c040f90be4ccf7cee76720d793b4ab908ccfc3
|
dec016e65a8cd9663719c279ef6bb98fda60f923
|
refs/heads/master
| 2021-05-17T18:17:43.429333
| 2020-04-05T18:08:14
| 2020-04-05T18:08:14
| 250,914,477
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,614
|
py
|
#!/usr/bin/python
# coding: utf-8
#from kettle.kettleclass import RedmondKettler
#from kettle.logclass import logclass
from kettle.logclass import log
import sys
#Use main wrapper library
from kettle_wrappers_lib import *
#Private part
if __name__ == "__main__":
log.debug(f"Arguments count: {len(sys.argv)}")
for i, arg in enumerate(sys.argv):
log.debug(f"Argument {i:>6}: {arg}")
# kettle_mode_heat mode target_temp duration_correction
try:
mode = str(sys.argv[1])
mode=mode if len(str(mode))==2 else "0"+str(mode)
except:
mode = "00"
try:
target_temp = int(sys.argv[2])
if mode == "01" or mode == "02":
target_temp = min(target_temp, 90) #max allowed 90 in mode 1 & 2
except:
target_temp = 100
try:
dutation_correction = int(sys.argv[3])
except:
dutation_correction = 0
#Init Kettler Object
kettler = Setup_Kettler()
if kettler:
log.info("Kettle setup was successfully completed, can proceed with commands further")
#kettler.sendStart()
log.info ("Setting kettle parameters: MODE=%s, TARGET_TEMP=%s, DURATION_CORRECTION=%s"%(mode,target_temp,dutation_correction))
mainMethodAnswer = False
if kettler.sendSetMode(mode, target_temp, dutation_correction):
log.info ("Successfully set")
mainMethodAnswer = True
else:
log.error ("Error setting kettle parameters")
mainMethodAnswer = False
json_data = Make_status_JSON (kettler, mainMethodAnswer)
print (json_data)
kettler.disconnect()
|
[
"email@example.com"
] |
email@example.com
|
5d8c2d955c93665a6a101d866aabccc05a6eec22
|
ed9e1b622dad6b559cd0fe6fa23d6a27f857dc7f
|
/galsim/config/input_powerspectrum.py
|
c80cacd70793e9d12c16b6aa092dc249e98deb81
|
[
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
ajwheeler/GalSim
|
40d6f8c64789b601ed2547eefed05f1577592613
|
cf0ef33e5f83da1b13a0617d362d8357056d6f22
|
refs/heads/master
| 2021-01-22T06:14:31.486159
| 2017-04-20T01:20:20
| 2017-04-20T01:20:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,937
|
py
|
# Copyright (c) 2012-2017 by the GalSim developers team on GitHub
# https://github.com/GalSim-developers
#
# This file is part of GalSim: The modular galaxy image simulation toolkit.
# https://github.com/GalSim-developers/GalSim
#
# GalSim is free software: redistribution and use in source and binary forms,
# with or without modification, are permitted provided that the following
# conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions, and the disclaimer given in the accompanying LICENSE
# file.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the disclaimer given in the documentation
# and/or other materials provided with the distribution.
#
from __future__ import print_function
import galsim
import math
# This file adds input type nfw_halo and value types PowerSpectrumShear and
# PowerSpectrumMagnification.
# A PowerSpectrum input type requires a special initialization at the start of each image
# to build the shear grid. This is done in SetupPowerSpecrum. There are also a couple of
# parameters that are specific to that step, which we want to ignore when getting the
# initialization kwargs, so we define a special GetPowerSpectrumKwargs function here.
from .input import InputLoader
class PowerSpectrumLoader(InputLoader):
def getKwargs(self, config, base, logger):
"""Parse the config dict and return the kwargs needed to build the PowerSpectrum object.
@param config The configuration dict for 'power_spectrum'
@param base The base configuration dict
@param logger If given, a logger object to log progress.
@returns kwargs, safe
"""
# Ignore these parameters here, since they are for the buildGrid step, not the
# initialization of the PowerSpectrum object.
ignore = ['grid_spacing', 'interpolant']
opt = galsim.PowerSpectrum._opt_params
return galsim.config.GetAllParams(config, base, opt=opt, ignore=ignore)
def setupImage(self, input_obj, config, base, logger=None):
"""Set up the PowerSpectrum input object's gridded values based on the
size of the image and the grid spacing.
@param input_obj The PowerSpectrum object to use
@param config The configuration dict for 'power_spectrum'
@param base The base configuration dict.
@param logger If given, a logger object to log progress.
"""
if 'grid_spacing' in config:
grid_spacing = galsim.config.ParseValue(config, 'grid_spacing', base, float)[0]
elif 'grid_xsize' in base and 'grid_ysize' in base:
# Then we have a tiled image. Can use the tile spacing as the grid spacing.
grid_size = min(base['grid_xsize'], base['grid_ysize'])
# This size is in pixels, so we need to convert to arcsec using the pixel scale.
# Note: we use the (max) pixel scale at the image center. This isn't
# necessarily optimal, but it seems like the best choice for a non-trivial WCS.
scale = base['wcs'].maxLinearScale(base['image_center'])
grid_spacing = grid_size * scale
else:
raise AttributeError("power_spectrum.grid_spacing required for non-tiled images")
if 'grid_xsize' in base and base['grid_xsize'] == base['grid_ysize']:
# PowerSpectrum can only do a square FFT, so make it the larger of the two n's.
nx_grid = int(math.ceil(base['image_xsize']/base['grid_xsize']))
ny_grid = int(math.ceil(base['image_ysize']/base['grid_ysize']))
ngrid = max(nx_grid, ny_grid)
# Normally that's good, but if tiles aren't square, need to drop through to the
# second option.
else:
image_size = max(base['image_xsize'], base['image_ysize'])
scale = base['wcs'].maxLinearScale(base['image_center'])
ngrid = int(math.ceil(image_size * scale / grid_spacing))
if 'interpolant' in config:
interpolant = galsim.config.ParseValue(config, 'interpolant', base, str)[0]
else:
interpolant = None
# We don't care about the output here. This just builds the grid, which we'll
# access for each object using its position.
if base['wcs'].isCelestial():
world_center = galsim.PositionD(0,0)
else:
world_center = base['wcs'].toWorld(base['image_center'])
rng = galsim.config.check_for_rng(base, logger, 'PowerSpectrum')
input_obj.buildGrid(grid_spacing=grid_spacing, ngrid=ngrid, center=world_center,
rng=rng, interpolant=interpolant)
# Make sure this process gives consistent results regardless of the number of processes
# being used.
if not isinstance(input_obj, galsim.PowerSpectrum) and rng is not None:
# Then input_obj is really a proxy, which means the rng was pickled, so we need to
# discard the same number of random calls from the one in the config dict.
rng.discard(input_obj.nRandCallsForBuildGrid())
# Register this as a valid input type
from .input import RegisterInputType
RegisterInputType('power_spectrum', PowerSpectrumLoader(galsim.PowerSpectrum))
# There are two value types associated with this: PowerSpectrumShear and
# PowerSpectrumMagnification.
def _GenerateFromPowerSpectrumShear(config, base, value_type):
"""@brief Return a shear calculated from a PowerSpectrum object.
"""
power_spectrum = galsim.config.GetInputObj('power_spectrum', config, base, 'PowerSpectrumShear')
if 'world_pos' not in base:
raise ValueError("PowerSpectrumShear requested, but no position defined.")
pos = base['world_pos']
# There aren't any parameters for this, so just make sure num is the only (optional)
# one present.
galsim.config.CheckAllParams(config, opt={ 'num' : int })
try:
g1,g2 = power_spectrum.getShear(pos)
shear = galsim.Shear(g1=g1,g2=g2)
except KeyboardInterrupt:
raise
except Exception as e:
import warnings
warnings.warn("Warning: PowerSpectrum shear is invalid -- probably strong lensing! " +
"Using shear = 0.")
shear = galsim.Shear(g1=0,g2=0)
#print(base['obj_num'],'PS shear = ',shear)
return shear, False
def _GenerateFromPowerSpectrumMagnification(config, base, value_type):
"""@brief Return a magnification calculated from a PowerSpectrum object.
"""
power_spectrum = galsim.config.GetInputObj('power_spectrum', config, base,
'PowerSpectrumMagnification')
if 'world_pos' not in base:
raise ValueError("PowerSpectrumMagnification requested, but no position defined.")
pos = base['world_pos']
opt = { 'max_mu' : float, 'num' : int }
kwargs = galsim.config.GetAllParams(config, base, opt=opt)[0]
mu = power_spectrum.getMagnification(pos)
max_mu = kwargs.get('max_mu', 25.)
if not max_mu > 0.:
raise ValueError(
"Invalid max_mu=%f (must be > 0) for type = PowerSpectrumMagnification"%max_mu)
if mu < 0 or mu > max_mu:
import warnings
warnings.warn("Warning: PowerSpectrum mu = %f means strong lensing! Using mu=%f"%(
mu,max_mu))
mu = max_mu
#print(base['obj_num'],'PS mu = ',mu)
return mu, False
# Register these as valid value types
from .value import RegisterValueType
RegisterValueType('PowerSpectrumShear', _GenerateFromPowerSpectrumShear, [ galsim.Shear ],
input_type='power_spectrum')
RegisterValueType('PowerSpectrumMagnification', _GenerateFromPowerSpectrumMagnification, [ float ],
input_type='power_spectrum')
|
[
"michael@jarvis.net"
] |
michael@jarvis.net
|
d1a8a52cfbf35438a187599eb96006576f455b17
|
1f2b05dbe818ff922269717389187e5ced71d198
|
/blog/feeds.py
|
1abe7ca5675208ef1cc195782b747a4c6430f792
|
[
"BSD-2-Clause"
] |
permissive
|
Pythonian/suorganizer
|
e665b0c642b62172156bbbd6537485d66709c339
|
c835cf1647b2b980d3eaf744c9dd91f33dec7e33
|
refs/heads/master
| 2022-04-24T10:19:09.722026
| 2020-04-19T09:07:37
| 2020-04-19T09:07:37
| 256,956,925
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 870
|
py
|
from django.contrib.syndication.views import Feed
from django.urls import reverse_lazy
from django.utils.feedgenerator import (
Atom1Feed, Rss201rev2Feed)
from .models import Post
class BasePostFeedMixin():
title = "Latest Startup Organizer Blog Posts"
link = reverse_lazy('blog_post_list')
description = subtitle = (
"Stay up to date on the "
"hottest startup news.")
def items(self):
# uses Post.Meta.ordering
return Post.objects.published()[:10]
def item_title(self, item):
return item.formatted_title()
def item_description(self, item):
return item.short_text()
def item_link(self, item):
return item.get_absolute_url()
class AtomPostFeed(BasePostFeedMixin, Feed):
feed_type = Atom1Feed
class Rss2PostFeed(BasePostFeedMixin, Feed):
feed_type = Rss201rev2Feed
|
[
"prontomaster@gmail.com"
] |
prontomaster@gmail.com
|
9c91e73a5440b8c28e4620927f5a5026b41dba99
|
d49fbd7874b70a93cbc551afed1b87e3e47617a8
|
/django/example/functions/auth.py
|
77a3a3bacddb35d18816e9b9d8c1217f663a1b2b
|
[] |
no_license
|
gitter-badger/tutorials-4
|
bbdbb673e978118f9fec3212baa13f6f99226be0
|
3ce1cdb7c6d26f6df4d6bb94e82f83e8cab9389b
|
refs/heads/master
| 2020-04-04T20:52:28.181616
| 2018-10-28T22:05:17
| 2018-10-28T22:05:17
| 156,264,177
| 0
| 0
| null | 2018-11-05T18:32:17
| 2018-11-05T18:32:16
| null |
UTF-8
|
Python
| false
| false
| 470
|
py
|
from attr import attrib, attrs
from django.contrib.auth import login, password_validation
from django.core.exceptions import ValidationError
def validate_password(raw_password):
try:
password_validation.validate_password(raw_password)
except ValidationError as error:
return False, error
else:
return True, None
@attrs
class StoreUserInSession:
request = attrib()
def do(self, user):
login(self.request, user)
|
[
"proofit404@gmail.com"
] |
proofit404@gmail.com
|
252dcf468e2f7a8486144abbbbd8991296a8ff2c
|
d3efc82dfa61fb82e47c82d52c838b38b076084c
|
/Autocase_Result/ETFMM_K/YW_ETFMM_SZSJ_403_K.py
|
3b875095e4f9841ff364c4ae9f79cb0c40b3692f
|
[] |
no_license
|
nantongzyg/xtp_test
|
58ce9f328f62a3ea5904e6ed907a169ef2df9258
|
ca9ab5cee03d7a2f457a95fb0f4762013caa5f9f
|
refs/heads/master
| 2022-11-30T08:57:45.345460
| 2020-07-30T01:43:30
| 2020-07-30T01:43:30
| 280,388,441
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,078
|
py
|
#!/usr/bin/python
# -*- encoding: utf-8 -*-
import sys
sys.path.append("/home/yhl2/workspace/xtp_test/xtp/api")
from xtp_test_case import *
sys.path.append("/home/yhl2/workspace/xtp_test/service")
from ServiceConfig import *
from mainService import *
from QueryStkPriceQty import *
from log import *
sys.path.append("/home/yhl2/workspace/xtp_test/mysql")
from CaseParmInsertMysql import *
sys.path.append("/home/yhl2/workspace/xtp_test/utils")
from QueryOrderErrorMsg import queryOrderErrorMsg
class YW_ETFMM_SZSJ_403_K(xtp_test_case):
# YW_ETFMM_SZSJ_403_K
def test_YW_ETFMM_SZSJ_403_K(self):
title = '深圳A股股票交易日即成剩撤委托卖-错误的业务类型'
# 定义当前测试用例的期待值
# 期望状态:初始、未成交、部成、全成、部撤已报、部撤、已报待撤、已撤、废单、撤废、内部撤单
# xtp_ID和cancel_xtpID默认为0,不需要变动
case_goal = {
'期望状态': '废单',
'errorID': 11000370,
'errorMSG': queryOrderErrorMsg(11000370),
'是否生成报单': '否',
'是否是撤废': '否',
'xtp_ID': 0,
'cancel_xtpID': 0,
}
logger.warning(title)
# 定义委托参数信息------------------------------------------
# 参数:证券代码、市场、证券类型、证券状态、交易状态、买卖方向(B买S卖)、期望状态、Api
stkparm = QueryStkPriceQty('999999', '2', '14', '2', '0', 'S', case_goal['期望状态'], Api)
# 如果下单参数获取失败,则用例失败
if stkparm['返回结果'] is False:
rs = {
'用例测试结果': stkparm['返回结果'],
'测试错误原因': '获取下单参数失败,' + stkparm['错误原因'],
}
self.assertEqual(rs['用例测试结果'], True)
else:
wt_reqs = {
'business_type': Api.const.XTP_BUSINESS_TYPE['XTP_BUSINESS_TYPE_IPOS'],
'order_client_id':2,
'market': Api.const.XTP_MARKET_TYPE['XTP_MKT_SZ_A'],
'ticker': stkparm['证券代码'],
'side': Api.const.XTP_SIDE_TYPE['XTP_SIDE_SELL'],
'price_type': Api.const.XTP_PRICE_TYPE['XTP_PRICE_BEST_OR_CANCEL'],
'price': stkparm['随机中间价'],
'quantity': 200,
'position_effect': Api.const.XTP_POSITION_EFFECT_TYPE['XTP_POSITION_EFFECT_INIT']
}
ParmIni(Api, case_goal['期望状态'], wt_reqs['price_type'])
CaseParmInsertMysql(case_goal, wt_reqs)
rs = serviceTest(Api, case_goal, wt_reqs)
logger.warning('执行结果为' + str(rs['用例测试结果']) + ','
+ str(rs['用例错误源']) + ',' + str(rs['用例错误原因']))
self.assertEqual(rs['用例测试结果'], True) # 0
if __name__ == '__main__':
unittest.main()
|
[
"418033945@qq.com"
] |
418033945@qq.com
|
abaf7f138942cdf3f08632884ead902bc339f791
|
9cc325b00adba5f1b1d8334d98fcca5e0b995d3e
|
/setup.py
|
557da98bf3b4a4d3cc652154b1d3263ee63fdf58
|
[] |
no_license
|
voronind/fuzzy-fabric
|
63fb87d92c224c2c27f8fc3da00bcd799eac03e4
|
9afa7426c9ea91be14a706ecbc887432b447615d
|
refs/heads/master
| 2021-05-28T00:43:00.968044
| 2014-08-25T14:35:59
| 2014-08-25T14:35:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,046
|
py
|
# coding=utf8
from setuptools import setup, find_packages
setup(
name='fuzzy-fabric',
version='0.6.3',
author='Dmitry Voronin',
author_email='dimka665@gmail.com',
url='https://github.com/dimka665/fuzzy-fabric',
description='Fuzzy Functions For Fabric',
packages=find_packages(),
package_data={
'': [
'templates/.*',
'templates/*.*',
'templates/nginx/*.*',
]
},
install_requires=[
'Fabric',
'virtualenv',
'virtualenvwrapper',
],
entry_points={
'console_scripts': [
'ff = fuzzy_fabric.main:main',
]
},
license='MIT License',
classifiers=[
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
keywords='fuzzy functions for fabric',
)
|
[
"dimka665@gmail.com"
] |
dimka665@gmail.com
|
6b5cb6b20a8f5f1ca794c0c3f95dd7536d9baba6
|
d109b64bfa8c80a6ec7d647beeadf9fe1c667fac
|
/class0925/clist.py
|
5480aaf0d2d17a6e7ea731723916f1a863406239
|
[] |
no_license
|
jumbokh/micropython_class
|
d34dd0a2be39d421d3bbf31dbb7bfd39b5f6ac6f
|
950be81582dba970e9c982e2e06fa21d9e9a0fdd
|
refs/heads/master
| 2022-10-10T22:27:02.759185
| 2022-10-01T14:44:31
| 2022-10-01T14:44:31
| 173,898,623
| 4
| 3
| null | 2020-03-31T09:57:23
| 2019-03-05T07:40:38
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 349
|
py
|
clist=[[0,0,0],[255,255,255],[255,0,0],[0,255,0],
[0,0,255],[255,255,0],[0,255,255],[255,0,255],
[192,192,192,],[128,128,128],[128,0,0],[128,0,0],
[128,128,0],[0,128,128],[0,0,128]]
hlist=[[59,96,233],[104,42,67],[213,227,227],[216,18,47],
[12,238,108],[255,246,58],[236,184,20],[240,93,197],
[16,173,186],[0,222,255],[59,96,233],[102,98,133]]
|
[
"jumbokh@gmail.com"
] |
jumbokh@gmail.com
|
4a0710140ef441c276e39701404ea8f661acf36a
|
249298bde8b03da659171947b29b8761b7115201
|
/pollux/adaptdl/adaptdl/checkpoint.py
|
ed158092630eecbb9441a42570d4b78a92aa3f4f
|
[
"Apache-2.0"
] |
permissive
|
gudiandian/ElasticFlow
|
cd4ce1f97f17cb878aa79865277ab64fa8ba7f89
|
0ffc17d257f2923de6478c4331ea64d858e7ab53
|
refs/heads/main
| 2023-04-18T14:45:49.998762
| 2023-01-05T13:13:10
| 2023-01-05T13:13:10
| 541,545,148
| 7
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,657
|
py
|
# Copyright 2020 Petuum, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module provides functionality to Save and load arbitrary state as part of
checkpoint-restart elasticity. The `State` class can be subclassed to define
how to save/load any state to/from persistent storage, so it can be restored
after the current job restarts and resumed from where it left off.
"""
import os
from adaptdl.env import checkpoint_path, replica_rank
# FIXME: Keeping global state like this will result in memory leaks for
# applications which do not restart too often.
_STATES_TO_NAMES = {}
_NAMES_TO_STATES = {}
class State(object):
"""
This class implements An arbitrary piece of state which can be saved and
loaded as part of a checkpoint, and synchronized across all replicas.
Should be sub-classed to define custom save, load, and sync logic.
"""
def __init__(self, name):
"""
Initialize the state object with a unique identifier `name`, which is
used to refer to the saved object in persistent storage. No two `State`
objects may share the same `name`.
Arguments:
name (str): Unique name of this `State` object.
Raises:
ValueError: If a `State` object with the given name already exists.
"""
if name in _NAMES_TO_STATES:
raise ValueError("State '{}' already exists".format(name))
_NAMES_TO_STATES[name] = self
_STATES_TO_NAMES[self] = name
def save(self, fileobj):
"""
This method should be overridden by subclasses to define how the state
is saved. Is invoked by `save_all_states` and `save_state` to save the
state into persistent storage.
Arguments:
fileobj (BinaryIO): A binary writable file object.
"""
pass
def load(self, fileobj):
"""
This method should be overridden by subclasses to define how the state
is loaded. Is invoked by `load_state` to load the state from persistent
storage.
Arguments:
fileobj (BinaryIO): A binary readable file object.
"""
pass
def sync(self):
"""
This method should be overridden by subclasses to define how the state
is synchronized across replicas. This might be necessary to make sure
the state is consistent before saving it to persistent storage. Is
invoked by `save_state` before saving the state.
"""
pass
def save_all_states():
"""
Invokes `save_state` on all `State` objects for which `State.skip` is True.
This function can be used to trigger a global checkpoint and save every
`State` in the current job.
"""
for state in _STATES_TO_NAMES:
save_state(state)
def save_state(state, sync=True):
"""
Saves a `State` object to persistent storage. First invokes `State.sync` on
all replicas if `sync` is `True` (default), and then invokes `State.save`
on the replica of rank 0 only.
Arguments:
state (State): The `State` object to save to persistent storage.
sync (bool): Whether `State.sync` should be invoked.
"""
if sync:
state.sync()
if replica_rank() == 0:
name = _STATES_TO_NAMES[state]
if checkpoint_path() is not None:
with open(os.path.join(checkpoint_path(), name), "wb") as f:
state.save(f)
def load_state(state):
"""
Load the given `State` object from persistent storage. If the object was
previously saved, then State.load will be invoked with a readable file
object to load from.
Arguments:
state (State): `State` object to load from persistent storage.
Returns:
`True` if state was previously saved and `State.load` was invoked,
`False` otherwise.
"""
if checkpoint_path() is None:
return False
try:
name = _STATES_TO_NAMES[state]
with open(os.path.join(checkpoint_path(), name), "rb") as f:
state.load(f)
return True
except FileNotFoundError:
return False
|
[
"gudiandian1998@pku.edu.cn"
] |
gudiandian1998@pku.edu.cn
|
a5ee1872c6c373df25f50d062c448a2600297ef8
|
930c207e245c320b108e9699bbbb036260a36d6a
|
/BRICK-RDFAlchemy/generatedCode/brick/brickschema/org/schema/_1_0_2/Brick/Outside_Air_Flow_Sensor.py
|
68914369d58d176ddefe3473259f05153cfd0d36
|
[] |
no_license
|
InnovationSE/BRICK-Generated-By-OLGA
|
24d278f543471e1ce622f5f45d9e305790181fff
|
7874dfa450a8a2b6a6f9927c0f91f9c7d2abd4d2
|
refs/heads/master
| 2021-07-01T14:13:11.302860
| 2017-09-21T12:44:17
| 2017-09-21T12:44:17
| 104,251,784
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 485
|
py
|
from rdflib import Namespace, Graph, Literal, RDF, URIRef
from rdfalchemy.rdfSubject import rdfSubject
from rdfalchemy import rdfSingle, rdfMultiple, rdfList
from brick.brickschema.org.schema._1_0_2.Brick.Air_Flow_Sensor import Air_Flow_Sensor
from brick.brickschema.org.schema._1_0_2.Brick.Outside_Air import Outside_Air
class Outside_Air_Flow_Sensor(Air_Flow_Sensor,Outside_Air):
rdf_type = Namespace('https://brickschema.org/schema/1.0.2/Brick#').Outside_Air_Flow_Sensor
|
[
"Andre.Ponnouradjane@non.schneider-electric.com"
] |
Andre.Ponnouradjane@non.schneider-electric.com
|
01c272af2d82ab2ed9b1ab5b5f39606aed3d5c01
|
42c63d5f9c724c99ba93f77bdead51891fcf8623
|
/OpenStack-Mitaka-src/python-manilaclient/manilaclient/v2/share_export_locations.py
|
0d0cb3163ea350a24e7e37fcc6849e45d5fa6087
|
[
"BSD-3-Clause",
"Apache-2.0",
"BSD-2-Clause"
] |
permissive
|
liyongle/openstack-mitaka
|
115ae819d42ed9bf0922a8c0ab584fa99a3daf92
|
5ccd31c6c3b9aa68b9db1bdafcf1b029e8e37b33
|
refs/heads/master
| 2021-07-13T04:57:53.488114
| 2019-03-07T13:26:25
| 2019-03-07T13:26:25
| 174,311,782
| 0
| 1
| null | 2020-07-24T01:44:47
| 2019-03-07T09:18:55
|
Python
|
UTF-8
|
Python
| false
| false
| 1,883
|
py
|
# Copyright 2015 Mirantis inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from manilaclient import api_versions
from manilaclient import base
from manilaclient.openstack.common.apiclient import base as common_base
class ShareExportLocation(common_base.Resource):
"""Resource class for a share export location."""
def __repr__(self):
return "<ShareExportLocation: %s>" % self.id
def __getitem__(self, key):
return self._info[key]
class ShareExportLocationManager(base.ManagerWithFind):
"""Manage :class:`ShareExportLocation` resources."""
resource_class = ShareExportLocation
@api_versions.wraps("2.9")
def list(self, share):
"""List all share export locations."""
share_id = common_base.getid(share)
return self._list("/shares/%s/export_locations" % share_id,
"export_locations")
@api_versions.wraps("2.9")
def get(self, share, export_location):
"""Get a share export location."""
share_id = common_base.getid(share)
export_location_id = common_base.getid(export_location)
return self._get(
"/shares/%(share_id)s/export_locations/%(export_location_id)s" % {
"share_id": share_id,
"export_location_id": export_location_id}, "export_location")
|
[
"yongle.li@gmail.com"
] |
yongle.li@gmail.com
|
85de194799b259a616254f8b20da8a630ac9d0a6
|
e1eaed6dde62fc54eb317d28dbd18e0740e3e8f3
|
/official/vision/image_classification/efficientnet/tfhub_export.py
|
d3518a1304c8c761cfaabdcc96dead70dd9b0097
|
[
"Apache-2.0"
] |
permissive
|
nlpming/models
|
cf5008d2e66d2b66b6d61423e214f2f9f9fbe472
|
3cbf0748529d787dd09fa3ed031e557f0ddfa268
|
refs/heads/master
| 2021-12-03T03:29:16.042489
| 2021-11-23T14:09:10
| 2021-11-23T14:09:10
| 206,007,973
| 0
| 0
|
Apache-2.0
| 2019-09-03T06:47:46
| 2019-09-03T06:47:46
| null |
UTF-8
|
Python
| false
| false
| 2,317
|
py
|
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A script to export TF-Hub SavedModel."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl import app
from absl import flags
import tensorflow as tf
from official.vision.image_classification.efficientnet import efficientnet_model
FLAGS = flags.FLAGS
flags.DEFINE_string("model_name", None, "EfficientNet model name.")
flags.DEFINE_string("model_path", None, "File path to TF model checkpoint.")
flags.DEFINE_string("export_path", None,
"TF-Hub SavedModel destination path to export.")
def export_tfhub(model_path, hub_destination, model_name):
"""Restores a tf.keras.Model and saves for TF-Hub."""
model_configs = dict(efficientnet_model.MODEL_CONFIGS)
config = model_configs[model_name]
image_input = tf.keras.layers.Input(
shape=(None, None, 3), name="image_input", dtype=tf.float32)
x = image_input * 255.0
ouputs = efficientnet_model.efficientnet(x, config)
hub_model = tf.keras.Model(image_input, ouputs)
ckpt = tf.train.Checkpoint(model=hub_model)
ckpt.restore(model_path).assert_existing_objects_matched()
hub_model.save(
os.path.join(hub_destination, "classification"), include_optimizer=False)
feature_vector_output = hub_model.get_layer(name="top_pool").get_output_at(0)
hub_model2 = tf.keras.Model(image_input, feature_vector_output)
hub_model2.save(
os.path.join(hub_destination, "feature-vector"), include_optimizer=False)
def main(argv):
if len(argv) > 1:
raise app.UsageError("Too many command-line arguments.")
export_tfhub(FLAGS.model_path, FLAGS.export_path, FLAGS.model_name)
if __name__ == "__main__":
app.run(main)
|
[
"gardener@tensorflow.org"
] |
gardener@tensorflow.org
|
31214babd69af863c6c00e938f64103d02fbd00b
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_143/ch20_2020_03_04_19_28_32_953906.py
|
1c8d2050fbb9fd530886f4bcd029d1f07091e621
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 230
|
py
|
# Pergunta quantos km
km= float(input('quantos km:'))
def P(qts_km):
if qts_km <= 200:
y=qts_km*0.5
return y
else:
y=200*0.5 + (qts_km-200)*0.45
return y
y = P(km)
print ('{0:2f} .format(y))
|
[
"you@example.com"
] |
you@example.com
|
7a6cdd72513023707903c252305e7238ce9bbccf
|
f9f1f887629855bbf12ecb0b7358fed5946b3caa
|
/.history/app_blog_forum/views_20201117201247.py
|
b670732826fc49cafc6e9dd7d657644d12d97833
|
[] |
no_license
|
hibamohi5/blog_forum
|
4f687cee3ca6bdb1d0302b3657a77c01945404b3
|
d6380eb7149355c79276b738da7da94c2ee03570
|
refs/heads/main
| 2023-01-14T18:33:53.043754
| 2020-11-20T01:52:22
| 2020-11-20T01:52:22
| 314,417,118
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,340
|
py
|
from django.shortcuts import render, redirect
from .models import *
from django.contrib import messages
def index(request):
return render(request, "index.html")
def register_new_user(request):
errors = User.objects.user_registration_validator(request.POST)
if len(errors) > 0:
for key, value in errors.items():
error_msg = key + ' - ' + value
messages.error(request, error_msg)
return redirect("/")
else:
first_name_from_post = request.POST['first_name']
last_name_from_post = request.POST['last_name']
email_from_post = request.POST['email']
password_from_post = request.POST['password']
new_user = User.objects.create(
first_name=first_name_from_post,
last_name=last_name_from_post,
email=email_from_post,
password=password_from_post
)
print(new_user.id)
request.session['user_id'] = new_user.id
return redirect('/register/view')
def view_trip(request, trip_id):
if 'user_id' not in request.session:
return redirect("/")
user = User.objects.get(id=request.session['user_id'])
trip = Trip.objects.get(id=trip_id)
context = {
"user": user,
"trip": trip
}
return render(request, "view_trip.html", context)
|
[
"hibamohi5@gmail.com"
] |
hibamohi5@gmail.com
|
ac08592f24581115477dc1e3fe0e6907fc2e9860
|
2941b312fc83ff08f5b5f362cf700e7ff8352cd3
|
/kats/tests/models/test_data_validation.py
|
cf7402fdd68cec674de17cc8bb5223d1b0844e51
|
[
"MIT"
] |
permissive
|
rbagd/Kats
|
5e1ac0b288f1250423921f7ada812c1198c55269
|
4f86a332d0afc790ab1d833fd8ffe6782a8be93b
|
refs/heads/main
| 2023-08-21T20:35:42.141281
| 2021-09-22T09:27:18
| 2021-09-22T09:27:18
| 407,527,411
| 0
| 0
|
MIT
| 2021-09-17T12:11:43
| 2021-09-17T12:11:42
| null |
UTF-8
|
Python
| false
| false
| 1,870
|
py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import io
import os
import pkgutil
import unittest
from unittest import TestCase
import pandas as pd
from kats.consts import TimeSeriesData
from kats.data.utils import load_air_passengers
def load_data(file_name):
ROOT = "kats"
if "kats" in os.getcwd().lower():
path = "data/"
else:
path = "kats/data/"
data_object = pkgutil.get_data(ROOT, path + file_name)
return pd.read_csv(io.BytesIO(data_object), encoding="utf8")
class DataValidationTest(TestCase):
def setUp(self):
self.TSData = load_air_passengers()
def test_data_validation(self) -> None:
# add the extra data point to break the frequency.
extra_point = pd.DataFrame(
[["1900-01-01", 2], ["2020-01-01", 2]], columns=["time", "y"]
)
DATA = self.TSData.to_dataframe()
data_with_extra_point = DATA.copy().append(extra_point)
tsData_with_missing_point = TimeSeriesData(data_with_extra_point)
tsData_with_missing_point.validate_data(
validate_frequency=False, validate_dimension=False
)
tsData_with_missing_point.validate_data(
validate_frequency=False, validate_dimension=True
)
with self.assertRaises(ValueError, msg="Frequency validation should fail."):
tsData_with_missing_point.validate_data(
validate_frequency=True, validate_dimension=False
)
with self.assertRaises(ValueError, msg="Frequency validation should fail."):
tsData_with_missing_point.validate_data(
validate_frequency=True, validate_dimension=True
)
if __name__ == "__main__":
unittest.main()
|
[
"facebook-github-bot@users.noreply.github.com"
] |
facebook-github-bot@users.noreply.github.com
|
c466642b454d777e22b14c619db8d707e72673fd
|
615e3cdc2c136b2f66b5c553d375823d3580fd08
|
/exercicio/venv/Scripts/easy_install-3.7-script.py
|
d7f8ad6ddd000dbb1b0c6fc876af7b1dc51e5a7a
|
[] |
no_license
|
Android-Ale/PracticePython
|
859a084e224cfb52eed573e38d7d9dc91f405885
|
cab2ac7593deb22e6bb05a95ecd19a8ea2c96b0a
|
refs/heads/master
| 2023-05-06T06:33:36.724569
| 2021-05-15T00:12:06
| 2021-05-15T00:12:06
| 369,307,842
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 460
|
py
|
#!C:\Users\Alpha\PycharmProjects\exercicio\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==40.8.0','console_scripts','easy_install-3.7'
__requires__ = 'setuptools==40.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==40.8.0', 'console_scripts', 'easy_install-3.7')()
)
|
[
"alesson9silva@gmail.com"
] |
alesson9silva@gmail.com
|
eeaa45b85f2ad11bc1462bbc6fe5dd16a502735b
|
2c0993aac7ad3848d343ffb4f838a5bda8f740f3
|
/funcation_Python/harshad.py
|
019eb9bed9d0b4ecbe37f410ebf3b95bc61b6fa5
|
[] |
no_license
|
Praveen-Kumar-Bairagi/Python_Logical_Quetions
|
6e20e04bf737f8f4592be76694b54470d5d79e7a
|
4e0f3262cf183f56e5a57157f0593a454923317f
|
refs/heads/master
| 2023-08-22T21:40:36.343811
| 2021-10-24T05:21:43
| 2021-10-24T05:21:43
| 420,594,339
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 271
|
py
|
def is_harshad_num(num):
num2=num
add=0
while 0<num:
a=num%10
add+=a
num=num//10
if num2%add==0:
return "true"
else:
return "False"
num=int(input("enter the num"))
print(is_harshad_num(num))
|
[
"you@example.com"
] |
you@example.com
|
a4badd11e840906a2c0e96b040d2ad0c1d23965d
|
e631f155b30122d813678fd8dd98004085b9579e
|
/setup.py
|
0d6bb32b8676adacd2c8026933b7605a6ce3bab9
|
[
"MIT"
] |
permissive
|
SidSachdev/pycrunchbase
|
4efd716c58b2bdbee379c5f4d9fd30c310d43502
|
f0a9b945bc5d3f7b3827820bd28a75265c28f756
|
refs/heads/master
| 2020-12-14T18:53:51.512180
| 2015-09-28T01:17:19
| 2015-09-28T01:17:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,791
|
py
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
from __future__ import absolute_import, print_function
import io
import os
import re
from glob import glob
from os.path import basename
from os.path import dirname
from os.path import join
from os.path import relpath
from os.path import splitext
from setuptools import find_packages
from setuptools import setup
def read(*names, **kwargs):
return io.open(
join(dirname(__file__), *names),
encoding=kwargs.get("encoding", "utf8")
).read()
setup(
name="pycrunchbase",
version="0.3.5",
license="MIT",
description="Python bindings to CrunchBase",
long_description="{0}\n{1}".format(read("README.rst"), re.sub(":obj:`~?(.*?)`", r"``\1``", read("CHANGELOG.rst"))),
author="Ng Zhi An",
author_email="ngzhian@gmail.com",
url="https://github.com/ngzhian/pycrunchbase",
packages=find_packages("src"),
package_dir={"": "src"},
py_modules=[splitext(basename(path))[0] for path in glob("src/*.py")],
include_package_data=True,
zip_safe=False,
classifiers=[
# complete classifier list: http://pypi.python.org/pypi?%3Aaction=list_classifiers
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: Unix",
"Operating System :: POSIX",
"Operating System :: Microsoft :: Windows",
"Programming Language :: Python",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: Implementation :: CPython",
"Topic :: Utilities",
],
keywords=[
"crunchbase"
],
install_requires=[
"requests==2.5.1", "six==1.9.0"
],
)
|
[
"ngzhian@gmail.com"
] |
ngzhian@gmail.com
|
9a552b56252d358c502b6dbb7ae63745a4fca22c
|
2452bdfb25628e55190c46694e156bf4b1459cf0
|
/prejudge/views.py
|
1dcd4f1dae49f5c5ed8bf029726d85ea47c4ae9a
|
[] |
no_license
|
bobjiangps/bug_prejudge
|
0da3fbeab9dae1620330d16852d3e8792def56d5
|
aeadd84476de0cf10a0341d694888f768e6c3706
|
refs/heads/master
| 2023-03-01T14:43:44.904473
| 2023-02-13T10:09:59
| 2023-02-13T10:09:59
| 195,923,513
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 746
|
py
|
from prejudge_process import PrejudgeProcess
from rest_framework.views import APIView
from rest_framework.response import Response
class PrejudgeRound(APIView):
def get(self, request, round_id):
p = PrejudgeProcess(round_id=round_id)
result = p.run()
return Response(result)
class PrejudgeScript(APIView):
def get(self, request, round_id, script_id):
p = PrejudgeProcess(round_id=round_id, script_id=script_id)
result = p.run()
return Response(result)
class PrejudgeCase(APIView):
def get(self, request, round_id, script_id, case_id):
p = PrejudgeProcess(round_id=round_id, script_id=script_id, case_id=case_id)
result = p.run()
return Response(result)
|
[
"jbsv43@sina.com"
] |
jbsv43@sina.com
|
81c0be1f662045795975963953f6ca78e7b13dc9
|
e173098f9ecd39bef112432a8bb7ed7fb1209fe9
|
/wfm_client/migrations/0017_auto_20160914_1241.py
|
b48fd76cef7c8e48e380eb28be2f3b3e3694d74f
|
[] |
no_license
|
isaiahiyede/inventory
|
51b639257c14e257ababae047d83caa93b809893
|
cedecc5b6d22d977b4bdac00e5faf775da7382ab
|
refs/heads/master
| 2021-08-29T08:41:02.062763
| 2017-12-13T15:46:57
| 2017-12-13T15:46:57
| 114,137,491
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,015
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-09-14 12:41
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('wfm_client', '0016_item_item_edited_by'),
]
operations = [
migrations.AlterField(
model_name='item',
name='item_category',
field=models.CharField(blank=True, max_length=100, null=True),
),
migrations.AlterField(
model_name='item',
name='item_desc',
field=models.CharField(blank=True, max_length=150, null=True),
),
migrations.AlterField(
model_name='item',
name='item_name',
field=models.CharField(blank=True, max_length=50, null=True),
),
migrations.AlterField(
model_name='item',
name='item_num',
field=models.CharField(blank=True, max_length=100, null=True),
),
]
|
[
"a.ajibike@zaposta.com"
] |
a.ajibike@zaposta.com
|
d7148b6c3654a2afeb0b18046d3d86b65ce7fde1
|
edd75bcf8c450dfce6b26b92b4fc012b399bd319
|
/Exe41_dividas_com_juros.py
|
83923f6847e4b9e94b7bd00a054b371381f67bc8
|
[
"MIT"
] |
permissive
|
lucaslk122/Exercicios-python-estrutura-de-repeticao
|
ac130cf9f6e78aff50e15e41aa1badfba55d6c8b
|
1f203918e9bb8415128bb69f515240057b118a14
|
refs/heads/main
| 2022-12-20T17:08:43.105838
| 2020-10-20T14:08:15
| 2020-10-20T14:08:15
| 304,305,173
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,904
|
py
|
print("""Quantidade de Parcelas % de Juros sobre o valor inicial da dívida
1 0
3 10
6 15
9 20
12 25""")
divida = float(input("Digite o valor da sua divida: "))
quantidade_parcelas = int(input("Digite a quantidade de parcelas: "))
if quantidade_parcelas == 1:
print(F"Valor da divida: R${divida}")
print("Valor do juros: 0")
print("Quantidade de parcelas: 1")
print(f"Valor da parcela; R${divida}")
elif quantidade_parcelas == 3:
print(F"Valor da divida: R${divida}")
juros = round((divida * 0.1),2)
parcelas = round(((divida + juros)/ quantidade_parcelas),2 )
print(f"Valor do juros: R${juros}")
print("Numero de parcelas: 3")
print(f"Valor da parcela; R${parcelas}")
elif quantidade_parcelas == 6:
print(F"Valor da divida: R${divida}")
juros = round((divida * 0.15),2)
parcelas = round(((divida + juros)/ quantidade_parcelas),2 )
print(f"Valor do juros: R${juros}")
print(f"Numero de parcelas: {quantidade_parcelas}")
print(f"Valor da parcela; R${parcelas}")
elif quantidade_parcelas == 9:
print(F"Valor da divida: R${divida}")
juros = round((divida * 0.2),2)
parcelas = round(((divida + juros)/ quantidade_parcelas),2 )
print(f"Valor do juros: R${juros}")
print(f"Numero de parcelas: {quantidade_parcelas}")
print(f"Valor da parcela; R${parcelas}")
elif quantidade_parcelas == 12:
print(F"Valor da divida: R${divida}")
juros = round((divida * 0.25),2)
parcelas = round(((divida + juros)/ quantidade_parcelas),2 )
print(f"Valor do juros: R${juros}")
print(f"Numero de parcelas: {quantidade_parcelas}")
print(f"Valor da parcela; R${parcelas}")
else:
print("Opção invalida, reinicie o programa")
|
[
"71664028+lucaslk122@users.noreply.github.com"
] |
71664028+lucaslk122@users.noreply.github.com
|
f1876448cae5a208714bf6e18d72d3522170ef33
|
9576d5a3676b09f3b892083988cfbe6985a9ef4a
|
/resender.py
|
465639185d7a50c7a097a50fe6b58cd6e0360243
|
[] |
no_license
|
ual-cci/music_gen_interaction_RTML
|
da4d2a8c754423d223ca342bac577967e291ad71
|
39419d5bd53ff685a2a9efcf4f373a624c8b28f9
|
refs/heads/master
| 2021-09-04T09:41:55.976149
| 2021-08-16T19:58:54
| 2021-08-16T19:58:54
| 215,372,452
| 6
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,150
|
py
|
"""
def resender_function(arr):
pass
"""
from autoencoder.autoencoders_handler import process_using_VAE, to_latent_vector_using_VAE
def resender_function(arr):
# Send one frame only - the last generated one
#send_one_frame(arr)
send_all_gen_frames(arr)
#encode_spectrogram_VAE(arr)
#arr = process_spectrogram_VAE(arr)
# return spectrogram ...
return arr
def process_spectrogram_VAE(arr):
print("processing")
processed = process_using_VAE(arr)
print("HAX OUTPUT === ", processed.shape)
return processed
def encode_spectrogram_VAE(arr):
print("encoding")
latents = to_latent_vector_using_VAE(arr)
print("HAX OUTPUT === ", latents.shape)
sequence_length = 40
to_send = latents #[sequence_length:]
for count, one_frame in enumerate(to_send):
print("HAX OUTPUT === ", one_frame.shape)
if count % 4 == 0:
osc_handler.send_arr(one_frame)
def send_one_frame(arr):
last_spectum = arr[-1]
low_frequencies = last_spectum[0:512]
#print("HAX OUTPUT === ", low_frequencies.shape)
global osc_handler
osc_handler.send_arr(low_frequencies)
def send_all_gen_frames(arr):
# settings/server sequence_length by default on 40
sequence_length = 40
global osc_handler
to_send = arr[sequence_length:]
for count, one_frame in enumerate(to_send):
low_frequencies = one_frame[0:512]
#print("HAX OUTPUT === ", low_frequencies.shape)
if count % 4 == 0:
osc_handler.send_arr(low_frequencies)
# https://github.com/kivy/oscpy
from oscpy.client import OSCClient
class OSCSender(object):
"""
Sends OSC messages from GUI
"""
def send_arr(self,arr):
signal_latent = arr
signal_latent = [float(v) for v in signal_latent]
print("Sending message=", [0, 0, len(signal_latent)])
self.osc.send_message(b'/send_gan_i', [0, 0] + signal_latent)
def __init__(self):
#address = "127.0.0.1"
#port = 8000
address = '0.0.0.0'
port = 8000
self.osc = OSCClient(address, port)
osc_handler = OSCSender()
|
[
"previtus@gmail.com"
] |
previtus@gmail.com
|
7bb2e73006c6ee160b2255fc289034c470d89208
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03126/s820418794.py
|
c6ce4db220fdbf39b8cd22445a417e1ceea7b485
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 172
|
py
|
n, m = map(int, input().split())
foods = [0] * (m + 1)
for i in range(n):
k, *a = map(int, input().split())
for j in a:
foods[j] += 1
print(foods.count(n))
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
f9f75ef1594181d3bfa18a88bdb0291d5d66b770
|
2c565dadbf0f02fe0f08c2b2111bf71140a018dc
|
/convert.py
|
47f7d78c53a21d89a8e24ea0245ae57015705dd7
|
[] |
no_license
|
nottrobin/discoursifier
|
e30396df2c13e9a9481149ac9f2f2cd6e820fddb
|
b803900b676ea56e9bced58044662c35f0616a42
|
refs/heads/master
| 2020-04-22T09:52:55.556816
| 2019-02-12T09:05:08
| 2019-02-12T09:05:08
| 144,270,341
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,682
|
py
|
#! /usr/bin/env python3
from glob import glob
import markdown
import re
import json
filepaths = glob("**/*.md", recursive=True)
def convert_notifications(content):
"""
Convert old-style notifications:
!!! Note "title":
this is some note contents
Into new style:
[note="title"]
this is some note contents
[/note]
"""
notification_match = (
"!!! (Note|Warning|Positive|Negative|Important|Tip|Information)"
'(?: "([^"]*)")?:?(.*\n(?: .+\n)*)'
)
for match in re.finditer(notification_match, content):
matched_text = match.group(0)
note_type = match.group(1).lower()
title = match.group(2)
body = match.group(3).strip()
if note_type in ["warning", "important"]:
note_type = "caution"
if note_type == "tip":
note_type = "note"
if note_type and body:
body = re.sub("^ ", "", body).replace("\n ", "\n")
options = ""
if note_type != "note":
options = f"={note_type}"
if title:
options = f'{options} title="{title}"'
replacement = f"[note{options}]\n{body}\n[/note]\n"
content = content.replace(matched_text, replacement)
return content
def convert_metadata(content):
"""
Convert Markdown metadata
(See https://python-markdown.github.io/extensions/meta_data/)
"Title" will be added as a <h1>, if there isn't one already
"TODO" will be preserved in `<!-- -->` HTML comments
anything else will be ignored
"""
parser = markdown.Markdown(extensions=["markdown.extensions.meta"])
parser.convert(content)
title = parser.Meta.get("title", [None])[0]
todo = "\n- ".join(parser.Meta.get("todo", []))
content = re.sub("^( *\w.*\n)*", "", content).lstrip()
title_match = re.match("^# ([^\n]+)(.*)$", content, re.DOTALL)
if title_match:
# Prefer the <h1> value to the metadata
title = title_match.groups()[0]
content = title_match.groups()[1].strip()
if todo:
content = f"<!--\nTodo:\n- {todo}\n-->\n\n" + content
return title, content
title_map = {}
# Convert markdown
for path in filepaths:
with open(path) as file_handle:
content = file_handle.read()
content = convert_notifications(content)
title, content = convert_metadata(content)
title_map[path] = title
with open(path, "w") as file_handle:
file_handle.write(content)
# Write title mapping to file
with open("title-map.json", "w") as title_map_file:
json.dump(title_map, title_map_file)
|
[
"robin@robinwinslow.co.uk"
] |
robin@robinwinslow.co.uk
|
491148774ac1fa2b690aa1334fcef76f3d45bf60
|
ac69799f105ec928ecfd5e1aa67062b3e19dead3
|
/sdk/python/tests/compiler/testdata/tekton_pipeline_conf.py
|
481b38418982617f055d134e0b11f7f8a6f2541a
|
[
"LicenseRef-scancode-generic-cla",
"Apache-2.0"
] |
permissive
|
kubeflow/kfp-tekton
|
5326373a4056b2b3ad78fc9199cae91c9a084282
|
136e7a93528b1b5845dcab058d46272b15af6c54
|
refs/heads/master
| 2023-08-05T07:25:29.697741
| 2023-08-03T16:04:20
| 2023-08-03T16:04:20
| 217,148,415
| 149
| 111
|
Apache-2.0
| 2023-09-11T18:21:37
| 2019-10-23T20:33:01
|
TypeScript
|
UTF-8
|
Python
| false
| false
| 2,470
|
py
|
# Copyright 2021 kubeflow.org
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from kfp import dsl, components
import kfp_tekton
import json
from kubernetes.client import V1SecurityContext
from kubernetes.client.models import V1Volume, V1PersistentVolumeClaimVolumeSource, \
V1PersistentVolumeClaimSpec, V1ResourceRequirements
def echo_op():
return components.load_component_from_text("""
name: echo
description: echo
implementation:
container:
image: busybox
command:
- sh
- -c
args:
- echo
- Got scheduled
""")()
@dsl.pipeline(
name='echo',
description='echo pipeline'
)
def echo_pipeline():
echo = echo_op()
workspace_json = {'new-ws': {"readOnly": True}}
echo.add_pod_annotation('workspaces', json.dumps(workspace_json))
pipeline_conf = kfp_tekton.compiler.pipeline_utils.TektonPipelineConf()
pipeline_conf.add_pipeline_label('test', 'label')
pipeline_conf.add_pipeline_label('test2', 'label2')
pipeline_conf.add_pipeline_annotation('test', 'annotation')
pipeline_conf.set_security_context(V1SecurityContext(run_as_user=0))
pipeline_conf.set_automount_service_account_token(False)
pipeline_conf.add_pipeline_env('WATSON_CRED', 'ABCD1234')
pipeline_conf.add_pipeline_workspace(workspace_name="new-ws", volume=V1Volume(
name='data',
persistent_volume_claim=V1PersistentVolumeClaimVolumeSource(
claim_name='data-volume')
), path_prefix='artifact_data/')
pipeline_conf.add_pipeline_workspace(workspace_name="new-ws-template",
volume_claim_template_spec=V1PersistentVolumeClaimSpec(
access_modes=["ReadWriteOnce"],
resources=V1ResourceRequirements(requests={"storage": "30Gi"})
))
pipeline_conf.set_generate_component_spec_annotations(False)
if __name__ == "__main__":
from kfp_tekton.compiler import TektonCompiler
TektonCompiler().compile(echo_pipeline, 'echo_pipeline.yaml', tekton_pipeline_conf=pipeline_conf)
|
[
"noreply@github.com"
] |
kubeflow.noreply@github.com
|
aae3482d533ac325cf980331aa0c2e91802bc44c
|
625daac7e73b98935f9fe93e647eb809b48b712e
|
/Arcade/The_Core/isSmooth.py
|
3866fb0c2934491668610ab80dcd586c71aad324
|
[] |
no_license
|
aleksaa01/codefights-codesignal
|
19b2d70779cc60f62511b6f88ae5d049451eac82
|
a57a5589ab2c9d9580ef44900ea986c826b23051
|
refs/heads/master
| 2022-03-15T04:46:40.356440
| 2019-12-08T15:41:37
| 2019-12-08T15:41:37
| 112,034,380
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,226
|
py
|
def isSmooth(arr):
first = arr[0]
last = arr[-1]
if len(arr) % 2 == 0:
middle = arr[len(arr) // 2 - 1] + arr[len(arr) // 2]
else:
middle = arr[len(arr) // 2]
return first == middle == last
"""
We define the middle of the array arr as follows:
if arr contains an odd number of elements, its middle is the element whose index number is the same when counting
from the beginning of the array and from its end;
if arr contains an even number of elements, its middle is the sum of the two elements whose index numbers when
counting from the beginning and from the end of the array differ by one.
An array is called smooth if its first and its last elements are equal to one another and to the middle.
Given an array arr, determine if it is smooth or not.
Example
For arr = [7, 2, 2, 5, 10, 7], the output should be
isSmooth(arr) = true.
The first and the last elements of arr are equal to 7, and its middle also equals 2 + 5 = 7.
Thus, the array is smooth and the output is true.
For arr = [-5, -5, 10], the output should be
isSmooth(arr) = false.
The first and middle elements are equal to -5, but the last element equals 10.
Thus, arr is not smooth and the output is false.
"""
|
[
"some12curious@gmail.com"
] |
some12curious@gmail.com
|
4a2196a9ecc0a0210ca916a9f75a99c30dd18bba
|
c268dcf432f3b7171be6eb307aafbe1bd173285a
|
/reddit2telegram/channels/~inactive/comedynecrophilia/app.py
|
32bb06036f62836443d6cbaa929a8408c8d36c71
|
[
"MIT"
] |
permissive
|
Fillll/reddit2telegram
|
a7162da2cc08c81bcc8078ea4160d4ee07461fee
|
5d8ee3097e716734d55a72f5a16ce3d7467e2ed7
|
refs/heads/master
| 2023-08-09T10:34:16.163262
| 2023-07-30T18:36:19
| 2023-07-30T18:36:19
| 67,726,018
| 258
| 205
|
MIT
| 2023-09-07T02:36:36
| 2016-09-08T17:39:46
|
Python
|
UTF-8
|
Python
| false
| false
| 155
|
py
|
#encoding:utf-8
subreddit = 'comedynecrophilia'
t_channel = '@comedynecrophilia'
def send_post(submission, r2t):
return r2t.send_simple(submission)
|
[
"git@fillll.ru"
] |
git@fillll.ru
|
7a69ab2b1e9995247ed2756773d1d9f7a656de28
|
fcb4a07f27494497ef03e157de7ab50fa4e9375f
|
/core/admin.py
|
e76cfa38f98048dcac69da644072a4683ef1269b
|
[] |
no_license
|
21toffy/IMA
|
2ff452025fad908270d2aab0bafa3ee4c26c7710
|
03770d49578817e1466cedc8e09df1840f5349b0
|
refs/heads/master
| 2023-03-02T02:15:58.778086
| 2021-02-14T16:08:01
| 2021-02-14T16:08:01
| 298,578,125
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 154
|
py
|
from django.contrib import admin
from .models import HomeView
from .models import ViewCount
admin.site.register(HomeView)
admin.site.register(ViewCount)
|
[
"oketofoke@gmail.com"
] |
oketofoke@gmail.com
|
7a6a61c44bb4e6e43038823fc2ef01793c1f76ee
|
a6d77d00163b80dfb6d34ee254f9ba049290e43e
|
/fabfile.py
|
c58c9178b11c15c41c67b72a520ac88bcb192822
|
[
"Apache-2.0"
] |
permissive
|
tswicegood/armstrong.core.arm_layout
|
6f1cf6c9dc2c9a4030348fa88972eb8a57682705
|
70850a3068660b51a93816e83ecee73637c781c0
|
refs/heads/master
| 2021-01-16T21:04:18.735887
| 2011-07-20T20:33:59
| 2011-07-20T20:33:59
| 2,080,221
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| true
| false
| 365
|
py
|
from armstrong.dev.tasks import *
settings = {
'DEBUG': True,
'INSTALLED_APPS': (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'armstrong.core.arm_layout',
'lettuce.django',
),
}
tested_apps = ("arm_layout", )
|
[
"development@domain51.com"
] |
development@domain51.com
|
aa4ae4645bd75fa85f73bee597894e82e3e0ac43
|
209c876b1e248fd67bd156a137d961a6610f93c7
|
/python/paddle/fluid/tests/unittests/test_basic_gru_unit_op.py
|
ae3e6dc4f77d2653909e2ea5e62135ab3859e314
|
[
"Apache-2.0"
] |
permissive
|
Qengineering/Paddle
|
36e0dba37d29146ebef4fba869490ecedbf4294e
|
591456c69b76ee96d04b7d15dca6bb8080301f21
|
refs/heads/develop
| 2023-01-24T12:40:04.551345
| 2022-10-06T10:30:56
| 2022-10-06T10:30:56
| 544,837,444
| 0
| 0
|
Apache-2.0
| 2022-10-03T10:12:54
| 2022-10-03T10:12:54
| null |
UTF-8
|
Python
| false
| false
| 4,838
|
py
|
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy
import paddle.fluid as fluid
import paddle.fluid.layers as layers
import paddle.fluid.core as core
from paddle.fluid.contrib.layers import BasicGRUUnit
from paddle.fluid.executor import Executor
from paddle.fluid import framework
import numpy as np
np.set_seed(123)
SIGMOID_THRESHOLD_MIN = -40.0
SIGMOID_THRESHOLD_MAX = 13.0
EXP_MAX_INPUT = 40.0
def sigmoid(x):
y = np.copy(x)
y[x < SIGMOID_THRESHOLD_MIN] = SIGMOID_THRESHOLD_MIN
y[x > SIGMOID_THRESHOLD_MAX] = SIGMOID_THRESHOLD_MAX
return 1. / (1. + np.exp(-y))
def tanh(x):
y = -2. * x
y[y > EXP_MAX_INPUT] = EXP_MAX_INPUT
return (2. / (1. + np.exp(y))) - 1.
def step(step_in, pre_hidden, gate_w, gate_b, candidate_w, candidate_b):
concat_1 = np.concatenate([step_in, pre_hidden], 1)
gate_input = np.matmul(concat_1, gate_w)
gate_input += gate_b
gate_input = sigmoid(gate_input)
r, u = np.split(gate_input, indices_or_sections=2, axis=1)
r_hidden = r * pre_hidden
candidate = np.matmul(np.concatenate([step_in, r_hidden], 1), candidate_w)
candidate += candidate_b
c = tanh(candidate)
new_hidden = u * pre_hidden + (1 - u) * c
return new_hidden
class TestBasicGRUUnit(unittest.TestCase):
def setUp(self):
self.hidden_size = 5
self.batch_size = 5
def test_run(self):
x = layers.data(name='x', shape=[-1, self.hidden_size], dtype='float32')
pre_hidden = layers.data(name="pre_hidden",
shape=[-1, self.hidden_size],
dtype='float32')
gru_unit = BasicGRUUnit("gru_unit", self.hidden_size)
new_hidden = gru_unit(x, pre_hidden)
new_hidden.persisbale = True
if core.is_compiled_with_cuda():
place = core.CUDAPlace(0)
else:
place = core.CPUPlace()
exe = Executor(place)
exe.run(framework.default_startup_program())
param_list = fluid.default_main_program().block(0).all_parameters()
# process weight and bias
gate_w_name = "gru_unit/BasicGRUUnit_0.w_0"
gate_b_name = "gru_unit/BasicGRUUnit_0.b_0"
candidate_w_name = "gru_unit/BasicGRUUnit_0.w_1"
candidate_b_name = "gru_unit/BasicGRUUnit_0.b_1"
gate_w = np.array(
fluid.global_scope().find_var(gate_w_name).get_tensor())
gate_w = np.random.uniform(-0.1, 0.1,
size=gate_w.shape).astype('float32')
fluid.global_scope().find_var(gate_w_name).get_tensor().set(
gate_w, place)
gate_b = np.array(
fluid.global_scope().find_var(gate_b_name).get_tensor())
gate_b = np.random.uniform(-0.1, 0.1,
size=gate_b.shape).astype('float32')
fluid.global_scope().find_var(gate_b_name).get_tensor().set(
gate_b, place)
candidate_w = np.array(
fluid.global_scope().find_var(candidate_w_name).get_tensor())
candidate_w = np.random.uniform(
-0.1, 0.1, size=candidate_w.shape).astype('float32')
fluid.global_scope().find_var(candidate_w_name).get_tensor().set(
candidate_w, place)
candidate_b = np.array(
fluid.global_scope().find_var(candidate_b_name).get_tensor())
candidate_b = np.random.uniform(
-0.1, 0.1, size=candidate_b.shape).astype('float32')
fluid.global_scope().find_var(candidate_b_name).get_tensor().set(
candidate_b, place)
step_input_np = np.random.uniform(
-0.1, 0.1, (self.batch_size, self.hidden_size)).astype('float32')
pre_hidden_np = np.random.uniform(
-0.1, 0.1, (self.batch_size, self.hidden_size)).astype('float32')
out = exe.run(feed={
'x': step_input_np,
'pre_hidden': pre_hidden_np
},
fetch_list=[new_hidden])
api_out = out[0]
np_out = step(step_input_np, pre_hidden_np, gate_w, gate_b, candidate_w,
candidate_b)
np.testing.assert_allclose(api_out, np_out, rtol=0.0001, atol=0)
if __name__ == '__main__':
unittest.main()
|
[
"noreply@github.com"
] |
Qengineering.noreply@github.com
|
ff9ea866ff4b14d685a908c860c705eb0ba43a96
|
0a7223017a2e2f83fa15f5ffbe2355c92ab1e62d
|
/landscapes_assignment/settings.py
|
399c2d01e8c170cc8c50b4e0f117d1558955e1cc
|
[] |
no_license
|
mazurbeam/landscapes
|
7d033dad59e3cbceae1c2ca563de726f5286ae48
|
94085cf5cc8f7f40a93c4d4bb5ab652a87bfa448
|
refs/heads/master
| 2021-01-01T15:26:38.899601
| 2017-07-18T15:43:59
| 2017-07-18T15:43:59
| 97,616,642
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,162
|
py
|
"""
Django settings for landscapes_assignment project.
Generated by 'django-admin startproject' using Django 1.10.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'vr1@i&#b6n+5^sc_=z_6@6l3s_lk&szc5h=()e5evp6ks2q32e'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'apps.landscapes',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'landscapes_assignment.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'landscapes_assignment.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
|
[
"mazurbeam@gmail.com"
] |
mazurbeam@gmail.com
|
b87776d03301cf9cd4a5dbd6cedc5763d2a60525
|
1375f57f96c4021f8b362ad7fb693210be32eac9
|
/kubernetes/test/test_v1_subject_access_review.py
|
813eff18c4f80272dcd42bfa8903f85d0e4fa9b2
|
[
"Apache-2.0"
] |
permissive
|
dawidfieluba/client-python
|
92d637354e2f2842f4c2408ed44d9d71d5572606
|
53e882c920d34fab84c76b9e38eecfed0d265da1
|
refs/heads/master
| 2021-12-23T20:13:26.751954
| 2017-10-06T22:29:14
| 2017-10-06T22:29:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 909
|
py
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.7.4
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.models.v1_subject_access_review import V1SubjectAccessReview
class TestV1SubjectAccessReview(unittest.TestCase):
""" V1SubjectAccessReview unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1SubjectAccessReview(self):
"""
Test V1SubjectAccessReview
"""
model = kubernetes.client.models.v1_subject_access_review.V1SubjectAccessReview()
if __name__ == '__main__':
unittest.main()
|
[
"mehdy@google.com"
] |
mehdy@google.com
|
af32c19a05a7ba77388de6a5ca7225230bb65d65
|
f0d713996eb095bcdc701f3fab0a8110b8541cbb
|
/pdMwiMpYkJkn8WY83_6.py
|
eb07438f90569faa3aa2f1d20414768ac52e5228
|
[] |
no_license
|
daniel-reich/turbo-robot
|
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
|
a7a25c63097674c0a81675eed7e6b763785f1c41
|
refs/heads/main
| 2023-03-26T01:55:14.210264
| 2021-03-23T16:08:01
| 2021-03-23T16:08:01
| 350,773,815
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 424
|
py
|
"""
Write a function that **recursively** determines if a string is a palindrome.
### Examples
is_palindrome("abcba") ➞ True
is_palindrome("b") ➞ True
is_palindrome("") ➞ True
is_palindrome("ad") ➞ False
### Notes
An empty string counts as a palindrome.
"""
def is_palindrome(word):
if len(word) < 2: return True
return word[0] == word[-1] and is_palindrome(word[1:-1])
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
c1225ac0b8b80664c5c7e7d70b5f53f12f8fd153
|
0b3c5260cd5c33a1beccc5710a5d0fd097a5ea15
|
/anchore_engine/db/db_archivedocument.py
|
b13024780094cf487f4e5bfefd708f78d7b91e2c
|
[
"Apache-2.0"
] |
permissive
|
omerlh/anchore-engine
|
fb2d7cb3d8bd259f6c973b450fbaa2c2e00497f0
|
669a0327f8baaee3f5c7c64b482909fe38830d80
|
refs/heads/master
| 2021-09-02T12:48:51.661648
| 2018-01-02T19:26:47
| 2018-01-02T19:26:47
| 116,236,136
| 1
| 0
| null | 2018-01-04T08:41:39
| 2018-01-04T08:41:39
| null |
UTF-8
|
Python
| false
| false
| 4,670
|
py
|
import time
from anchore_engine import db
from anchore_engine.db import ArchiveDocument
# specific DB interface helpers for the 'services' table
def add(userId, bucket, archiveId, documentName, inobj, session=None):
if not session:
session = db.Session
our_result = session.query(ArchiveDocument).filter_by(userId=userId, bucket=bucket,archiveId=archiveId,documentName=documentName).first()
if not our_result:
new_service = ArchiveDocument(userId=userId, bucket=bucket,archiveId=archiveId,documentName=documentName)
new_service.update(inobj)
session.add(new_service)
else:
dbobj = {}
dbobj.update(inobj)
our_result.update(dbobj)
dbobj.clear()
return(True)
def get_all(session=None):
if not session:
session = db.Session
ret = []
our_results = session.query(ArchiveDocument)
for result in our_results:
obj = dict((key,value) for key, value in vars(result).iteritems() if not key.startswith('_'))
ret.append(obj)
return(ret)
def get(userId, bucket, archiveId, session=None):
#session = db.Session()
ret = {}
result = session.query(ArchiveDocument).filter_by(userId=userId, bucket=bucket, archiveId=archiveId).first()
if result:
obj = dict((key,value) for key, value in vars(result).iteritems() if not key.startswith('_'))
ret.update(obj)
return(ret)
def get_byname(userId, documentName, session=None):
if not session:
session = db.Session
ret = {}
result = session.query(ArchiveDocument).filter_by(userId=userId, documentName=documentName).first()
if result:
obj = dict((key,value) for key, value in vars(result).iteritems() if not key.startswith('_'))
ret = obj
return(ret)
def exists(userId, bucket, archiveId, session=None):
if not session:
session = db.Session
ret = {}
result = session.query(ArchiveDocument.userId, ArchiveDocument.bucket, ArchiveDocument.archiveId).filter_by(userId=userId, bucket=bucket, archiveId=archiveId).first()
from anchore_engine.subsys import logger
if result:
for i in range(0, len(result.keys())):
k = result.keys()[i]
ret[k] = result[i]
#obj = dict((key,value) for key, value in vars(result).iteritems() if not key.startswith('_'))
#ret = obj
return(ret)
def list_all(session=None, **dbfilter):
if not session:
session = db.Session
ret = []
results = session.query(ArchiveDocument.bucket, ArchiveDocument.archiveId, ArchiveDocument.userId, ArchiveDocument.record_state_key, ArchiveDocument.record_state_val, ArchiveDocument.created_at, ArchiveDocument.last_updated).filter_by(**dbfilter)
for result in results:
obj = {}
for i in range(0,len(result.keys())):
k = result.keys()[i]
obj[k] = result[i]
if obj:
ret.append(obj)
return(ret)
def list_all_byuserId(userId, session=None, **dbfilter):
if not session:
session = db.Session
ret = []
dbfilter['userId'] = userId
results = session.query(ArchiveDocument.bucket, ArchiveDocument.archiveId, ArchiveDocument.userId, ArchiveDocument.record_state_key, ArchiveDocument.record_state_val, ArchiveDocument.created_at, ArchiveDocument.last_updated).filter_by(**dbfilter)
for result in results:
obj = {}
for i in range(0,len(result.keys())):
k = result.keys()[i]
obj[k] = result[i]
if obj:
ret.append(obj)
return(ret)
def update(userId, bucket, archiveId, documentName, inobj, session=None):
return(add(userId, bucket, archiveId, documentName, inobj, session=session))
def delete_byfilter(userId, remove=True, session=None, **dbfilter):
if not session:
session = db.Session
ret = False
results = session.query(ArchiveDocument).filter_by(**dbfilter)
if results:
for result in results:
if remove:
session.delete(result)
else:
result.update({"record_state_key": "to_delete", "record_state_val": str(time.time())})
ret = True
return(ret)
def delete(userId, bucket, archiveId, remove=True, session=None):
if not session:
session = db.Session
result = session.query(ArchiveDocument).filter_by(userId=userId, bucket=bucket, archiveId=archiveId).first()
if result:
if remove:
session.delete(result)
else:
result.update({"record_state_key": "to_delete", "record_state_val": str(time.time())})
return(True)
|
[
"nurmi@anchore.com"
] |
nurmi@anchore.com
|
079c862affe8e445280aaa0c46eb37b192e9b4c3
|
ffe59803cd35129ea317a53b2b4a754bfb9a200d
|
/longest_common_prefix.py
|
6591f14b796eff634370383a695dd8b356ff4109
|
[] |
no_license
|
codyowl/leetcode
|
78a7a96c54d6592c34987620793eed3dcf1fe1fd
|
706924944c6b8d94a7247de13ffb9b1d715496fd
|
refs/heads/master
| 2020-06-16T10:19:09.477053
| 2019-08-30T13:37:14
| 2019-08-30T13:37:14
| 195,537,374
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,353
|
py
|
class Solution(object):
def longestCommonPrefix(self, strs):
"""
:type strs: List[str]
:rtype: str
"""
prefix_first = []
if len(strs) == 1:
for data in strs:
if len(data)>0:
return strs[0][0]
else:
return ""
else:
none_finder = [data for data in strs if len(data)>0]
if len(none_finder) != len(strs):
return ""
else:
for data in strs:
prefix_first.append(data[0])
#check whether atelast the first letter is same or not
if not len(set(prefix_first)) == 1:
return ""
else:
min_length = min([len(d) for d in strs])
# truncating the list based on minimum length of words
trun_list = [data[0:min_length+1] for data in strs]
prefix_list = []
# to get all the letters from words
for i in range(min_length):
prefix_list.append([data[i] for data in trun_list])
final_str = ""
for data in prefix_list:
if len(set(data)) == 1:
final_str += data[0]
else:
break
return final_str
s = Solution()
# tweaked after seeing this input on test case
# print s.longestCommonPrefix(["", ""])
# # tweaked after seeing this input on test case
# print s.longestCommonPrefix([""])
# # tweaked afet seeing this input on test case
# print s.longestCommonPrefix(["", "b"])
print s.longestCommonPrefix(["abab","aba",""])
|
[
"codyowl@gmail.com"
] |
codyowl@gmail.com
|
ccb13a5e3d845bf09eec3fb40555e403ece082c2
|
677d142be25f5904b4ab418ce5ffa1387fe0f695
|
/app/config.py
|
a0f945c9dbd2036423157c7b61e20d7f0375030d
|
[
"MIT"
] |
permissive
|
mfannick/pitch
|
ee2ded6a70d6f26e5fa34f635ca5821766006b20
|
af321e36d2ad23bc2129b4d6cc41c7779bdea063
|
refs/heads/master
| 2022-10-12T16:07:54.439498
| 2019-09-30T07:53:03
| 2019-09-30T07:53:03
| 210,202,951
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,367
|
py
|
import os
class Config:
'''
General configuration parent class
'''
SECRET_KEY ='Fannick'
SQLALCHEMY_DATABASE_URI = 'postgresql+psycopg2://annick:escofavi@localhost/pitch'
UPLOADED_PHOTOS_DEST ='app/static/photos'
# simple mde configurations
SIMPLEMDE_JS_IIFE = True
SIMPLEMDE_USE_CDN = True
# email configurations
MAIL_SERVER = 'smtp.googlemail.com'
MAIL_PORT = 587
MAIL_USE_TLS = True
MAIL_USERNAME = os.environ.get("MAIL_USERNAME")
MAIL_PASSWORD = os.environ.get("MAIL_PASSWORD")
@staticmethod
def init_app(app):
pass
class ProdConfig(Config):
'''
Production configuration child class
Args:
Config: The parent configuration class with General configuration settings
'''
pass
SQLALCHEMY_DATABASE_URI = os.environ.get("DATABASE_URL")
class TestConfig(Config):
SQLALCHEMY_DATABASE_URI = 'postgresql+psycopg2://annick:escofavi@localhost/watchlist_test'
class DevConfig(Config):
'''
Development configuration child class
Args:
Config: The parent configuration class with General configuration settings
'''
SQLALCHEMY_DATABASE_URI = 'postgresql+psycopg2://annick:escofavi@localhost/pitch'
DEBUG = True
config_options = {
'development':DevConfig,
'production':ProdConfig,
'test':TestConfig
}
|
[
"mfannick1@gmail.com"
] |
mfannick1@gmail.com
|
1d6d23ca4b07fa9ea47cddf4a29db00d629a4c56
|
7c081ac61722f11de1758c9701662f85c1bb802c
|
/pysigep/webservices/webservice_base.py
|
270eeb115b16b5f8f51c39007bedbef3ee9fb66f
|
[
"MIT"
] |
permissive
|
trocafone/pysigep
|
8a3ad14febce45bc54aea721b481c87bfcd92f50
|
a899fb85e9195ac8686313e20c8bec7c03bde198
|
refs/heads/develop
| 2021-01-16T22:07:06.093299
| 2016-07-05T13:57:20
| 2016-07-05T13:57:20
| 62,578,970
| 1
| 0
| null | 2016-07-05T13:57:22
| 2016-07-04T17:40:17
|
Python
|
UTF-8
|
Python
| false
| false
| 2,803
|
py
|
# -*- coding: utf-8 -*-
# #############################################################################
#
# The MIT License (MIT)
#
# Copyright (c) 2016 Michell Stuttgart
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
###############################################################################
import xml.etree.cElementTree as Et
import requests
from pysigep import sigep_exceptions
class WebserviceBase(object):
def __init__(self, url):
self._url = url
@property
def url(self):
return self._url
def request(self, obj_param, ssl_verify=False):
try:
resposta = requests.post(self.url, data=obj_param.get_data(),
headers={'Content-type': 'text/xml'},
verify=ssl_verify)
if not resposta.ok:
msg = self.parse_error(resposta.text.encode('utf8'))
raise sigep_exceptions.ErroValidacaoXML(msg)
# Criamos um response dinamicamente para cada tipo de classe
response = obj_param.response_class_ref()
response.status_code = resposta.status_code
response.encoding = resposta.encoding
response.xml = resposta.text.encode('utf8')
response.body_request = resposta.request.body
return response
except requests.ConnectionError as exc:
raise sigep_exceptions.ErroConexaoComServidor(exc.message)
except requests.Timeout as exc:
raise sigep_exceptions.ErroConexaoTimeOut(exc.message)
except requests.exceptions.RequestException as exc:
raise sigep_exceptions.ErroRequisicao(exc.message)
def parse_error(self, xml):
return Et.fromstring(xml).findtext('.//faultstring')
|
[
"michellstut@gmail.com"
] |
michellstut@gmail.com
|
716098a8f7469e8ffcbdd834a9aae73b196fa55b
|
5efc1623d9e06d9b0caa104630d2b5d7610fb19d
|
/learn/deep_reinforcement_learning_course/deep_q_doom.py
|
d44d9b1f72a46b6b1336e6e4f9410ed328773ead
|
[] |
no_license
|
techyajay/phd
|
20fd01535b5147b7ef86aa19f6683fa01dca4404
|
a1348bb6645a67a1f09aef7155c0db1720291bb6
|
refs/heads/master
| 2020-04-13T19:37:35.060245
| 2018-12-18T13:44:30
| 2018-12-18T13:44:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,915
|
py
|
"""Deep Q learning for the game Doom.
See: https://medium.freecodecamp.org/an-introduction-to-deep-q-learning-lets-play-doom-54d02d8017d8
"""
import pathlib
import typing
import skimage
import vizdoom
from absl import app
from absl import flags
from absl import logging
from labm8 import bazelutil
FLAGS = flags.FLAGS
flags.DEFINE_string(
'doom_config',
str(bazelutil.DataPath(
'phd/learn/deep_reinforcement_learning_course/data/doom_config.cfg')),
'Path to Doom config file.')
flags.DEFINE_string(
'doom_scenario',
str(bazelutil.DataPath(
'phd/learn/deep_reinforcement_learning_course/data/doom_scenario.wad')),
'Path to Doom scenario file.')
def CreateEnvironment(
config_path: typing.Optional[pathlib.Path] = None,
scenario_path: typing.Optional[pathlib.Path] = None
) -> typing.Tuple[None, typing.List[typing.List[int]]]:
"""Create the Doom game environment.
Returns:
A tuple of the environment and action space.
"""
config_path = config_path or FLAGS.doom_config
scenario_path = scenario_path or FLAGS.doom_scenario
game = vizdoom.DoomGame()
game.load_config(config_path)
game.set_doom_scenario_path(scenario_path)
game.init()
left = [1, 0, 0]
right = [0, 1, 0]
shoot = [0, 0, 1]
possible_actions = [left, right, shoot]
return game, possible_actions
def PreprocessFrame(frame):
# Crop the screen (remove the roof because it contains no information).
cropped_frame = frame[30:-10, 30:-30]
# Normalize Pixel Values.
normalized_frame = cropped_frame / 255.0
# Resize.
preprocessed_frame = skimage.transform.resize(normalized_frame, [84, 84])
return preprocessed_frame
def main(argv: typing.List[str]):
"""Main entry point."""
if len(argv) > 1:
raise app.UsageError("Unknown arguments: '{}'.".format(' '.join(argv[1:])))
logging.info('Done.')
if __name__ == '__main__':
app.run(main)
|
[
"chrisc.101@gmail.com"
] |
chrisc.101@gmail.com
|
24bef4da51b7e473f483cd9fe30280a5be5bb0ea
|
d6851011cf9669036078a848a55f3dab7528bbd1
|
/tests/mock_commands/ghdl
|
806201636fad5ce30b2824afa73edbd89f47b119
|
[
"BSD-2-Clause"
] |
permissive
|
msfschaffner/edalize
|
442743f6c2f41da034482b69c5276efd1a6a40ad
|
d6757f36c6fc804f1876c907d0df55485937c0f2
|
refs/heads/master
| 2022-11-30T15:52:48.724111
| 2020-04-13T16:33:20
| 2020-04-16T10:35:00
| 256,582,248
| 1
| 0
|
BSD-2-Clause
| 2020-04-17T18:38:44
| 2020-04-17T18:38:43
| null |
UTF-8
|
Python
| false
| false
| 441
|
#!/usr/bin/env python
import os
import sys
cmd_file = 'analyze.cmd' if sys.argv[1] == '-i' else 'elab-run.cmd'
with open(cmd_file, 'a') as f:
f.write(' '.join(sys.argv[1:]) + '\n')
if sys.argv[1] == '-i':
for arg in sys.argv:
if arg.startswith('--std'):
std = arg.split('=')[1]
output_file = 'work-obj'+std+'.cf'
with open(output_file, 'a'):
os.utime(output_file, None)
|
[
"olof.kindgren@gmail.com"
] |
olof.kindgren@gmail.com
|
|
8232a25f494ea58fe5f4743fe20ae90eea3baca4
|
087f7ec4bb11bca64f29eac49df1104d885067b4
|
/midiscenemanager/midiio.py
|
cf0f27661d1cb8cb11b1990db844f4a85846ac42
|
[
"MIT"
] |
permissive
|
SpotlightKid/midiscenemanager
|
38dd7e5299fc97fae3de6e8ecfde934c26974da0
|
a4f9268ba73f575d5d3313eaf256eb9cebdcbdd0
|
refs/heads/master
| 2023-06-01T15:21:16.520511
| 2023-05-21T14:44:33
| 2023-05-21T14:44:33
| 111,223,358
| 2
| 0
| null | 2017-11-26T15:26:59
| 2017-11-18T17:06:18
|
Python
|
UTF-8
|
Python
| false
| false
| 6,597
|
py
|
# -*- coding: utf-8 -*-
#
# midiio.py
#
"""Wrapper clas for rtmidi.MidiOut to facilitate sending common MIDI events."""
import binascii
import rtmidi
from rtmidi.midiconstants import *
from rtmidi.midiutil import open_midioutput
from .sequencer import SequencerThread
def parse_sysex_string(s):
return binascii.unhexlify(s.replace(' ', ''))
class MidiOutWrapper:
def __init__(self, midi, name, ch=1):
self.channel = ch
self.midi = midi
self.name = name
@property
def midi(self):
return self._midi
@midi.setter
def midi(self, obj):
if hasattr(self, '_midi'):
with self._midi.lock:
self._midi.midiout = obj
else:
self._midi = SequencerThread(obj)
self._midi.start()
def _cleanup(self):
self.midi.stop()
self.midi.midiout.close_port()
def send_channel_message(self, status, data1=None, data2=None, ch=None, delay=0):
"""Send a MIDI channel mode message."""
msg = [(status & 0xf0) | ((ch if ch else self.channel) - 1 & 0xF)]
if data1 is not None:
msg.append(data1 & 0x7F)
if data2 is not None:
msg.append(data2 & 0x7F)
self.midi.add(msg, delta=delay)
def send_system_common_message(self, status=0xF7, data1=None, data2=None):
msg = [status & 0xF7]
if msg[0] in (0xF1, 0xF2, 0xF3):
msg.append(data1 & 0x7F)
if msg[0] == 0xF2:
msg.append(data2 & 0x7F)
self.midi.add(msg, delta=delay)
def send_system_realtime_message(self, status=0xF8):
self.midi.add([status & 0xF7], delta=1)
def send_system_exclusive(self, value=""):
msg = parse_sysex_string(value)
if (msg and msg.startswith(b'\xF0') and msg.endswith(b'\xF7') and
all((val < 128 for val in msg[1:-1]))):
self.midi.add(msg, delta=delay)
else:
raise ValueError("Invalid sysex string: %s", msg)
def send_note_off(self, note=60, velocity=0, ch=None, delay=0):
"""Send a 'Note Off' message."""
self.send_channel_message(NOTE_OFF, note, velocity, ch=ch, delay=delay)
def send_note_on(self, note=60, velocity=127, ch=None, delay=0):
"""Send a 'Note On' message."""
self.send_channel_message(NOTE_ON, note, velocity, ch=ch, delay=delay)
def send_poly_pressure(self, note=60, value=0, ch=None, delay=0):
"""Send a 'Polyphonic Pressure' (Aftertouch) message."""
self.send_channel_message(POLY_PRESSURE, note, value, ch=ch, delay=delay)
def send_control_change(self, cc=0, value=0, ch=None, delay=0):
"""Send a 'Control Change' message."""
self.send_channel_message(CONTROL_CHANGE, cc, value, ch=ch, delay=delay)
def send_program_change(self, program=0, ch=None, delay=0):
"""Send a 'Program Change' message."""
self.send_channel_message(PROGRAM_CHANGE, program, ch=ch, delay=delay)
def send_channel_pressure(self, value=0, ch=None, delay=0):
"""Send a 'Polyphonic Pressure' (Aftertouch) message."""
self.send_channel_message(CHANNEL_PRESSURE, value, ch=ch, delay=delay)
def send_pitch_bend(self, value=8192, ch=None, delay=0):
"""Send a 'Program Change' message."""
self.send_channel_message(PITCH_BEND, value & 0x7f, (value >> 7) & 0x7f, ch=ch,
delay=delay)
def send_bank_select(self, bank=None, msb=None, lsb=None, ch=None, delay=0):
"""Send 'Bank Select' MSB and/or LSB 'Control Change' messages."""
if bank is not None:
msb = (bank << 7) & 0x7F
lsb = bank & 0x7F
if msb is not None:
self.send_control_change(BANK_SELECT_MSB, msb, ch=ch, delay=delay)
if lsb is not None:
self.send_control_change(BANK_SELECT_LSB, lsb, ch=ch, delay=delay)
def send_modulation(self, value=0, ch=None, delay=0):
"""Send a 'Modulation' (CC #1) 'Control Change' message."""
self.send_control_change(MODULATION, value, ch=ch, delay=delay)
def send_breath_controller(self, value=0, ch=None, delay=0):
"""Send a 'Breath Controller' (CC #3) 'Control Change' message."""
self.send_control_change(BREATH_CONTROLLER, value, ch=ch, delay=delay)
def send_foot_controller(self, value=0, ch=None, delay=0):
"""Send a 'Foot Controller' (CC #4) 'Control Change' message."""
self.send_control_change(FOOT_CONTROLLER, value, ch=ch, delay=delay)
def send_channel_volume(self, value=127, ch=None, delay=0):
"""Send a 'Volume' (CC #7) 'Control Change' message."""
self.send_control_change(CHANNEL_VOLUME, value, ch=ch, delay=delay)
def send_balance(self, value=63, ch=None, delay=0):
"""Send a 'Balance' (CC #8) 'Control Change' message."""
self.send_control_change(BALANCE, value, ch=ch, delay=delay)
def send_pan(self, value=63, ch=None, delay=0):
"""Send a 'Pan' (CC #10) 'Control Change' message."""
self.send_control_change(PAN, value, ch=ch, delay=delay)
def send_expression(self, value=127, ch=None, delay=0):
"""Send a 'Expression' (CC #11) 'Control Change' message."""
self.send_control_change(EXPRESSION_CONTROLLER, value, ch=ch, delay=delay)
def send_all_sound_off(self, ch=None, delay=0):
"""Send a 'All Sound Off' (CC #120) 'Control Change' message."""
self.send_control_change(ALL_SOUND_OFF, 0, ch=ch, delay=delay)
def send_reset_all_controllers(self, ch=None, delay=0):
"""Send a 'All Sound Off' (CC #121) 'Control Change' message."""
self.send_control_change(RESET_ALL_CONTROLLERS, 0, ch=ch, delay=delay)
def send_local_control(self, value=1, ch=None, delay=0):
"""Send a 'Local Control On/Off' (CC #122) 'Control Change' message."""
self.send_control_change(EXPRESSION_CONTROLLER, 0, ch=ch, delay=delay)
def send_all_notes_off(self, ch=None, delay=0):
"""Send a 'All Notes Off' (CC #123) 'Control Change' message."""
self.send_control_change(ALL_NOTES_OFF, 0, ch=ch, delay=delay)
# add more convenience methods for other common MIDI events here...
def get_midiout(port, api="UNSPECIFIED"):
api = getattr(rtmidi, 'API_' + api)
midiout, name = open_midioutput(port, api=api, interactive=False, use_virtual=False)
return MidiOutWrapper(midiout, name)
def get_midiout_ports(api="UNSPECIFIED"):
mo = rtmidi.MidiOut(rtapi=getattr(rtmidi, 'API_' + api))
return sorted(mo.get_ports())
|
[
"chris@chrisarndt.de"
] |
chris@chrisarndt.de
|
edaf6548d496e07a077b970fcdf68d7076a424c7
|
716d9e678c884fd9e9f07bbf57c7a0ec684f8255
|
/foodboxes/app_items/migrations/0001_initial.py
|
8934997852b104c017f9a76a5e7ef50064a6e946
|
[] |
no_license
|
arifgafizov/foodboxes_v.2.0
|
e6716ba3ab3c0dd77bac212b90db8d710f46d495
|
1093a520e391fd409ba18bab341d6ffbec1104c7
|
refs/heads/master
| 2023-02-17T21:20:32.491143
| 2021-01-21T19:57:26
| 2021-01-21T19:57:26
| 330,394,733
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 767
|
py
|
# Generated by Django 3.1.5 on 2021-01-10 16:33
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Item',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100)),
('description', models.TextField()),
('image', models.ImageField(default=None, null=True, upload_to='items_images')),
('weight', models.IntegerField()),
('price', models.DecimalField(decimal_places=2, max_digits=13)),
],
),
]
|
[
"agafizov@gmail.com"
] |
agafizov@gmail.com
|
0006a8010b47859f57692841a577d972fa0ffe63
|
fcd02cbf7fae38f0b0d6a95deedc49d5993927da
|
/models/backbone/__init__.py
|
5d8d9e12d55ba6f87789786da9b2fb6e2c00b36a
|
[
"Apache-2.0"
] |
permissive
|
Highlightbeast/DBNet.pytorch
|
7d5082c66516319fb8a02855c1a03be6c122a143
|
d95a7dbd37b031f2cf1ca33c63f5658d29803242
|
refs/heads/master
| 2022-09-21T02:51:33.104828
| 2020-06-05T07:28:01
| 2020-06-05T07:28:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 786
|
py
|
# -*- coding: utf-8 -*-
# @Time : 2019/8/23 21:54
# @Author : zhoujun
__all__ = ['build_backbone']
from .resnet import *
from .resnest import *
from .shufflenetv2 import *
from .MobilenetV3 import MobileNetV3
support_backbone = ['resnet18', 'deformable_resnet18', 'deformable_resnet50',
'resnet50', 'resnet34', 'resnest101', 'resnet152',
'resnest50', 'resnest101', 'resnest200', 'resnest269',
'shufflenet_v2_x0_5', 'shufflenet_v2_x1_0', 'shufflenet_v2_x1_5', 'shufflenet_v2_x2_0',
'MobileNetV3']
def build_backbone(backbone_name, **kwargs):
assert backbone_name in support_backbone, f'all support backbone is {support_backbone}'
backbone = eval(backbone_name)(**kwargs)
return backbone
|
[
"572459439@qq.com"
] |
572459439@qq.com
|
abcd8f62143d26c371224047ec2538617405b71b
|
9e988c0dfbea15cd23a3de860cb0c88c3dcdbd97
|
/sdBs/AllRun/pg_1627+006/sdB_pg_1627+006_lc.py
|
3d8cb550156a2ce741e67bc7f9c22da1c2ea971c
|
[] |
no_license
|
tboudreaux/SummerSTScICode
|
73b2e5839b10c0bf733808f4316d34be91c5a3bd
|
4dd1ffbb09e0a599257d21872f9d62b5420028b0
|
refs/heads/master
| 2021-01-20T18:07:44.723496
| 2016-08-08T16:49:53
| 2016-08-08T16:49:53
| 65,221,159
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 344
|
py
|
from gPhoton.gAperture import gAperture
def main():
gAperture(band="NUV", skypos=[247.399583,0.530394], stepsz=30., csvfile="/data2/fleming/GPHOTON_OUTPU/LIGHTCURVES/sdBs/sdB_pg_1627+006/sdB_pg_1627+006_lc.csv", maxgap=1000., overwrite=True, radius=0.00555556, annulus=[0.005972227,0.0103888972], verbose=3)
if __name__ == "__main__":
main()
|
[
"thomas@boudreauxmail.com"
] |
thomas@boudreauxmail.com
|
d06d2e0a911f5010f8561348d3ab54923e923e31
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_059/ch86_2020_06_22_20_42_01_067760.py
|
2a606c4d7a824d5d1b2a4313ad94567cbeff57d6
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 941
|
py
|
with open('criptografado.txt', 'r') as arquivo:
conteudo = arquivo.read()
for i in conteudo:
if i == 's':
with open('criptografado.txt', 'w') as arquivo2:
arquivo2.write(conteudo.replace(i, 'z'))
if i == 'a':
with open('criptografado.txt', 'w') as arquivo2:
arquivo2.write(conteudo.replace(i, 'e'))
if i == 'r':
with open('criptografado.txt', 'w') as arquivo2:
arquivo2.write(conteudo.replace(i, 'b'))
if i == 'b':
with open('criptografado.txt', 'w') as arquivo2:
arquivo2.write(conteudo.replace(i, 'r'))
if i == 'e':
with open('criptografado.txt', 'w') as arquivo2:
arquivo2.write(conteudo.replace(i, 'a'))
if i == 'z':
with open('criptografado.txt', 'w') as arquivo2:
arquivo2.write(conteudo.replace(i, 's'))
|
[
"you@example.com"
] |
you@example.com
|
461ccf1fcf588f87ddd2359e410e5de3eddd855e
|
ad113ffed76e72ed0a881a7a6d6a74ea9021e5bf
|
/tests_compare.py
|
09f259dd46dda07db612be2449e8522ef61f31b4
|
[] |
no_license
|
biletboh/bitexchange
|
03c0bfc04e2f103928c173f014a75b6ceea0def9
|
8d541d6bb82f5e3ff4c71cb65b609503ba6b9417
|
refs/heads/master
| 2021-01-22T10:46:46.717291
| 2017-05-30T14:43:37
| 2017-05-30T14:43:37
| 92,656,591
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,388
|
py
|
import time
import os
import configparser
from bitfinex.client import TradeClient
from exmoclient import ExmoTradeClient
from compare import compare_exchange
# Set up configuration
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
conf = configparser.ConfigParser()
conf.read(os.path.join(BASE_DIR, 'settings.ini'))
# Get API keys
bitfinex_api = conf.get('bitexchange', 'BITFINEX_API_KEY')
bitfinex_secret = conf.get('bitexchange', 'BITFINEX_API_SECRET')
exmo_api = conf.get('bitexchange', 'EXMO_API_KEY')
exmo_secret = conf.get('bitexchange', 'EXMO_API_SECRET')
# Set up bitfinex Trade Client
tradeclient = TradeClient(bitfinex_api, bitfinex_secret)
# Set up Exmo Trade Client
tradeclient2 = ExmoTradeClient(exmo_api, exmo_secret)
# Simple Tests
print("Run compare algorithm simple tests")
# second exchange is cheaper than first
bitfinex_data = [2300, 2310]
exmo_data = [2000, 2010]
print('Test 1:')
compare_exchange(tradeclient, tradeclient2, bitfinex_data, exmo_data)
# first exchange is cheaper than second
bitfinex_data = [2000, 2010] # data is in a format [bid, ask]
exmo_data = [2300, 2310]
print('Test 2:')
compare_exchange(tradeclient, tradeclient2, bitfinex_data, exmo_data)
# an exchange difference is below 1.5%
bitfinex_data = [2000, 2010]
exmo_data = [2020, 2030]
print('Test 3:')
compare_exchange(tradeclient, tradeclient2, bitfinex_data, exmo_data)
|
[
"biletskyboh@gmail.com"
] |
biletskyboh@gmail.com
|
8db79c9a273b1cf103453d785a36dac40873619a
|
928c53ea78be51eaf05e63f149fb291ec48be73e
|
/Linked_List_Cycle_II.py
|
38b7ef80851bbce59fda03879bef014eaff77462
|
[] |
no_license
|
saurabhchris1/Algorithm-Pratice-Questions-LeetCode
|
35021d8fc082ecac65d7970d9f83f9be904fb333
|
ea4a7d6a78d86c8619f91a75594de8eea264bcca
|
refs/heads/master
| 2022-12-10T16:50:50.678365
| 2022-12-04T10:12:18
| 2022-12-04T10:12:18
| 219,918,074
| 5
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,226
|
py
|
# Given a linked list, return the node where the cycle begins.
# If there is no cycle, return null.
#
# There is a cycle in a linked list if there is some node in
# the list that can be reached again by continuously following
# the next pointer. Internally, pos is used to denote the index
# of the node that tail's next pointer is connected to. Note that pos is not passed as a parameter.
# Notice that you should not modify the linked list.
#
# Input: head = [3,2,0,-4], pos = 1
# Output: tail connects to node index 1
# Explanation: There is a cycle in the linked list, where tail connects to the second node.
class Solution:
def detectCycle(self, head):
if not head:
return None
intersection = self.findIntersection(head)
if not intersection:
return None
ptr1 = head
ptr2 = intersection
while ptr1 != ptr2:
ptr1 = ptr1.next
ptr2 = ptr2.next
return ptr1
def findIntersection(self, head):
slow = head
fast = head
while fast and fast.next:
slow = slow.next
fast = fast.next.next
if slow == fast:
return fast
return None
|
[
"saurabhchris1@gmail.com"
] |
saurabhchris1@gmail.com
|
724d42efc164f460aadf422bde792465cd5a9eb8
|
d4129d743b958e6ed71af445c0dd7baa7f2ad6e4
|
/teambeat/admin.py
|
3be24de4fad0c0b5aac5759d5cef320a24ead2c2
|
[] |
no_license
|
astromitts/team-beat
|
f2077bdeaa457bb8cd11094f14a75bdf170a9b0e
|
a49608890e4fe2b238cbec9c0e3d9629aae51c55
|
refs/heads/main
| 2023-08-10T16:11:14.231042
| 2020-12-09T14:20:04
| 2020-12-09T14:20:04
| 319,043,973
| 0
| 0
| null | 2021-09-22T19:42:46
| 2020-12-06T13:48:36
|
Python
|
UTF-8
|
Python
| false
| false
| 439
|
py
|
from django.contrib import admin
from teambeat.models import (
Organization,
OrganizationInvitation,
OrganizationUser,
Team,
TeamAdmin,
TeamMember,
TeamMemberStatus
)
admin.site.register(Organization)
admin.site.register(OrganizationUser)
admin.site.register(OrganizationInvitation)
admin.site.register(Team)
admin.site.register(TeamAdmin)
admin.site.register(TeamMember)
admin.site.register(TeamMemberStatus)
|
[
"morinbe@gmail.com"
] |
morinbe@gmail.com
|
f4f16e0aed94288f25b1a7cbcc39162959543704
|
d554b1aa8b70fddf81da8988b4aaa43788fede88
|
/5 - Notebooks e Data/1 - Análises numéricas/Arquivos David/Atualizados/logDicas-master/data/2019-1/223/users/4194/codes/1644_2711.py
|
77e78d3042d279f2c36b818186c04f26f7708ed1
|
[] |
no_license
|
JosephLevinthal/Research-projects
|
a3bc3ca3b09faad16f5cce5949a2279cf14742ba
|
60d5fd6eb864a5181f4321e7a992812f3c2139f9
|
refs/heads/master
| 2022-07-31T06:43:02.686109
| 2020-05-23T00:24:26
| 2020-05-23T00:24:26
| 266,199,309
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 343
|
py
|
valor = float(input("Qual o seu valor disponivel?"))
tickets = int(input("Quantos tickets deseja comprar?"))
vt = float(input("Quanto custa um ticket?"))
passes = int(input("Quantos passes de onibus deseja comprar?"))
vp = float(input("Valor dos passes"))
if(valor >= tickets*vt + passes*vp):
print("SUFICIENTE")
else:
print("INSUFICIENTE")
|
[
"jvlo@icomp.ufam.edu.br"
] |
jvlo@icomp.ufam.edu.br
|
e74457c0c58813616881e9d64c3a8320e2e88c3e
|
d8b1362113e4f3302ab1d04d5f57c1b4c8c44b6a
|
/leetcode_py2/Medium 73. Set Matrix Zeroes.py
|
18cf03f8dbfd6347ba627f0749d6d0ea7e565484
|
[] |
no_license
|
mohki890/Danny-Huang
|
06d12b2e1ac110684cbf114db46079cda5a380eb
|
3eb27b793b5a819f3fb5e166b02e04d593f1bf37
|
refs/heads/master
| 2020-05-16T00:22:11.602739
| 2019-03-29T15:00:14
| 2019-03-29T15:00:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,596
|
py
|
# -*- coding: utf-8 -*-
# !/usr/bin/env python
"""
This module is provided by
Authors: hxk11111
Date: 2019/1/5
File: Medium 73. Set Matrix Zeroes.py
"""
'''
Given a m x n matrix, if an element is 0, set its entire row and column to 0. Do it in-place.
Example 1:
Input:
[
[1,1,1],
[1,0,1],
[1,1,1]
]
Output:
[
[1,0,1],
[0,0,0],
[1,0,1]
]
Example 2:
Input:
[
[0,1,2,0],
[3,4,5,2],
[1,3,1,5]
]
Output:
[
[0,0,0,0],
[0,4,5,0],
[0,3,1,0]
]
Follow up:
A straight forward solution using O(mn) space is probably a bad idea.
A simple improvement uses O(m + n) space, but still not the best solution.
Could you devise a constant space solution?
'''
class Solution(object):
def setZeroes(self, matrix):
"""
:type matrix: List[List[int]]
:rtype: void Do not return anything, modify matrix in-place instead.
"""
row = len(matrix)
col = len(matrix[0])
for r in range(row):
for c in range(col):
if matrix[r][c] == 0:
for i in range(row):
if matrix[i][c] != 0:
matrix[i][c] = "."
for j in range(col):
if matrix[r][j] != 0:
matrix[r][j] = "."
for r in range(row):
for c in range(col):
if matrix[r][c] == ".":
matrix[r][c] = 0
if __name__ == '__main__':
s = Solution()
l = [
[0, 1, 2, 0],
[3, 4, 5, 2],
[1, 3, 1, 5]
]
s.setZeroes(l)
print l
|
[
"huangxiangkai@baidu.com"
] |
huangxiangkai@baidu.com
|
a395b580244b142aed88453bc740a4d78ac26421
|
074e2815a0c3dbb03cae346560c27e409a4444e4
|
/drivers/ssd1351/ssd1351_16bit.py
|
9d2c75c185149003b96bc9cac270e8e172672775
|
[
"MIT"
] |
permissive
|
maysrp/micropython-nano-gui
|
9127e1cbb024810ac2920c8227a448d6cf4678b1
|
5dbcc65828106cfb15c544bb86cc7380a9a83c47
|
refs/heads/master
| 2023-01-19T18:06:52.286059
| 2020-11-29T10:26:11
| 2020-11-29T10:26:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,266
|
py
|
# SSD1351_16bit.py MicroPython driver for Adafruit color OLED displays.
# Adafruit 1.5" 128*128 OLED display: https://www.adafruit.com/product/1431
# Adafruit 1.27" 128*96 display https://www.adafruit.com/product/1673
# For wiring details see drivers/ADAFRUIT.md in this repo.
# This driver is based on the Adafruit C++ library for Arduino
# https://github.com/adafruit/Adafruit-SSD1351-library.git
# The MIT License (MIT)
# Copyright (c) 2019 Peter Hinch
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import framebuf
import utime
import gc
import micropython
from uctypes import addressof
import sys
# https://github.com/peterhinch/micropython-nano-gui/issues/2
# The ESP32 does not work reliably in SPI mode 1,1. Waveforms look correct.
# Keep 0,0 on STM as testing was done in that mode.
_bs = 0 if sys.platform == 'esp32' else 1 # SPI bus state
# Initialisation commands in cmd_init:
# 0xfd, 0x12, 0xfd, 0xb1, # Unlock command mode
# 0xae, # display off (sleep mode)
# 0xb3, 0xf1, # clock div
# 0xca, 0x7f, # mux ratio
# 0xa0, 0x74, # setremap 0x74
# 0x15, 0, 0x7f, # setcolumn
# 0x75, 0, 0x7f, # setrow
# 0xa1, 0, # set display start line
# 0xa2, 0, # displayoffset
# 0xb5, 0, # setgpio
# 0xab, 1, # functionselect: serial interface, internal Vdd regulator
# 0xb1, 0x32, # Precharge
# 0xbe, 0x05, # vcommh
# 0xa6, # normaldisplay
# 0xc1, 0xc8, 0x80, 0xc8, # contrast abc
# 0xc7, 0x0f, # Master contrast
# 0xb4, 0xa0, 0xb5, 0x55, # set vsl (see datasheet re ext circuit)
# 0xb6, 1, # Precharge 2
# 0xaf, # Display on
# SPI baudrate: Pyboard can produce 10.5MHz or 21MHz. Datasheet gives max of 20MHz.
# Attempt to use 21MHz failed but might work on a PCB or with very short leads.
class SSD1351(framebuf.FrameBuffer):
# Convert r, g, b in range 0-255 to a 16 bit colour value RGB565
# acceptable to hardware: rrrrrggggggbbbbb
@staticmethod
def rgb(r, g, b):
return ((r & 0xf8) << 5) | ((g & 0x1c) << 11) | (b & 0xf8) | ((g & 0xe0) >> 5)
def __init__(self, spi, pincs, pindc, pinrs, height=128, width=128):
if height not in (96, 128):
raise ValueError('Unsupported height {}'.format(height))
self.spi = spi
self.rate = 11000000 # See baudrate note above.
self.pincs = pincs
self.pindc = pindc # 1 = data 0 = cmd
self.height = height # Required by Writer class
self.width = width
# Save color mode for use by writer_gui (blit)
self.mode = framebuf.RGB565
gc.collect()
self.buffer = bytearray(self.height * self.width * 2)
super().__init__(self.buffer, self.width, self.height, self.mode)
self.mvb = memoryview(self.buffer)
pinrs(0) # Pulse the reset line
utime.sleep_ms(1)
pinrs(1)
utime.sleep_ms(1)
# See above comment to explain this allocation-saving gibberish.
self._write(b'\xfd\x12\xfd\xb1\xae\xb3\xf1\xca\x7f\xa0\x74'\
b'\x15\x00\x7f\x75\x00\x7f\xa1\x00\xa2\x00\xb5\x00\xab\x01'\
b'\xb1\x32\xbe\x05\xa6\xc1\xc8\x80\xc8\xc7\x0f'\
b'\xb4\xa0\xb5\x55\xb6\x01\xaf', 0)
self.show()
gc.collect()
def _write(self, mv, dc):
self.spi.init(baudrate=self.rate, polarity=_bs, phase=_bs)
self.pincs(1)
self.pindc(dc)
self.pincs(0)
self.spi.write(bytes(mv))
self.pincs(1)
# Write lines from the framebuf out of order to match the mapping of the
# SSD1351 RAM to the OLED device.
def show(self):
mvb = self.mvb
bw = self.width * 2 # Width in bytes
self._write(b'\x5c', 0) # Enable data write
if self.height == 128:
for l in range(128):
l0 = (95 - l) % 128 # 95 94 .. 1 0 127 126 .. 96
start = l0 * self.width * 2
self._write(mvb[start : start + bw], 1) # Send a line
else:
for l in range(128):
if l < 64:
start = (63 -l) * self.width * 2 # 63 62 .. 1 0
elif l < 96:
start = 0
else:
start = (191 - l) * self.width * 2 # 127 126 .. 95
self._write(mvb[start : start + bw], 1) # Send a line
|
[
"peter@hinch.me.uk"
] |
peter@hinch.me.uk
|
6c459b34f8f0ab75e9f04a8db8d170bea67c1736
|
39e647e9ec8524a1cee90ef15f37a3d3bbf8ac43
|
/poet/trunk/pythonLibs/Imaging-1.1.7/Scripts/pilconvert.py
|
71b1fe3830cf45b22540f16176b2ca141291253c
|
[
"LicenseRef-scancode-secret-labs-2011"
] |
permissive
|
AgileAdaptiveTools/POETTools
|
85158f043e73b430c1d19a172b75e028a15c2018
|
60244865dd850a3e7346f9c6c3daf74ea1b02448
|
refs/heads/master
| 2021-01-18T14:46:08.025574
| 2013-01-28T19:18:11
| 2013-01-28T19:18:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,366
|
py
|
#! /usr/local/bin/python
#
# The Python Imaging Library.
# $Id$
#
# convert image files
#
# History:
# 0.1 96-04-20 fl Created
# 0.2 96-10-04 fl Use draft mode when converting images
# 0.3 96-12-30 fl Optimize output (PNG, JPEG)
# 0.4 97-01-18 fl Made optimize an option (PNG, JPEG)
# 0.5 98-12-30 fl Fixed -f option (from Anthony Baxter)
#
import site
import getopt, string, sys
from PIL import Image
def usage():
print "PIL Convert 0.5/1998-12-30 -- convert image files"
print "Usage: pilconvert [option] infile outfile"
print
print "Options:"
print
print " -c <format> convert to format (default is given by extension)"
print
print " -g convert to greyscale"
print " -p convert to palette image (using standard palette)"
print " -r convert to rgb"
print
print " -o optimize output (trade speed for size)"
print " -q <value> set compression quality (0-100, JPEG only)"
print
print " -f list supported file formats"
sys.exit(1)
if len(sys.argv) == 1:
usage()
try:
opt, argv = getopt.getopt(sys.argv[1:], "c:dfgopq:r")
except getopt.error, v:
print v
sys.exit(1)
format = None
convert = None
options = { }
for o, a in opt:
if o == "-f":
Image.init()
id = Image.ID[:]
id.sort()
print "Supported formats (* indicates output format):"
for i in id:
if Image.SAVE.has_key(i):
print i+"*",
else:
print i,
sys.exit(1)
elif o == "-c":
format = a
if o == "-g":
convert = "L"
elif o == "-p":
convert = "P"
elif o == "-r":
convert = "RGB"
elif o == "-o":
options["optimize"] = 1
elif o == "-q":
options["quality"] = string.atoi(a)
if len(argv) != 2:
usage()
try:
im = Image.open(argv[0])
if convert and im.mode != convert:
im.draft(convert, im.size)
im = im.convert(convert)
if format:
apply(im.save, (argv[1], format), options)
else:
apply(im.save, (argv[1],), options)
except:
print "cannot convert image",
print "(%s:%s)" % (sys.exc_type, sys.exc_value)
|
[
"ssaltzman@mitre.org"
] |
ssaltzman@mitre.org
|
22ee1291f7c8806b76361ad8a451f24e1c1d6079
|
e573161a9d4fc74ef4debdd9cfd8956bdd1d0416
|
/src/products/models/order.py
|
4c07332ebf2b2204eeff6db64f51f087e3f38f77
|
[] |
no_license
|
tanjibpa/rx-verify
|
a11c471afc628524bf95103711102258e6e04c19
|
3947fd2f9a640b422014d1857b9377e42d8961a5
|
refs/heads/main
| 2023-02-23T08:16:43.910998
| 2021-01-23T07:07:43
| 2021-01-23T07:07:43
| 331,103,311
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 526
|
py
|
from django.db import models
from base.models import BaseModel
from .raw_material import RawMaterial
from .product import Product
class Order(BaseModel):
raw_materials = models.ManyToManyField(RawMaterial)
product = models.ForeignKey(Product, on_delete=models.CASCADE)
quantity = models.IntegerField(default=1)
approved = models.BooleanField(default=False)
class Meta:
db_table = "orders"
verbose_name_plural = "Orders"
verbose_name = "Order"
ordering = ["-updated_at"]
|
[
"ikram.tanjib@gmail.com"
] |
ikram.tanjib@gmail.com
|
35418b217181a0478eb1d238b23420bc0c421bbf
|
5154dbf4eee6ea6499957bd1e6b6860abcb3d85a
|
/Face-Recognition/recognize_faces_image.py
|
b53c526e33bc593d8f67f4668ff14dcfb3734ed7
|
[] |
no_license
|
sayands/opencv-implementations
|
876c345a6842335d70b2d9b27e746da5a6fd938f
|
c8f0c7b9dca5e6d874b863bd70e4ec3898f6e7d5
|
refs/heads/master
| 2020-03-19T11:51:54.513186
| 2018-12-22T05:48:48
| 2018-12-22T05:48:48
| 136,481,409
| 2
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,537
|
py
|
# import the necessary packages
import face_recognition
import argparse
import pickle
import cv2
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-e", "--encodings", required = True, help ="Path to serialized db of facial encodings")
ap.add_argument("-i", "--image", required = True, help = "path to input image")
ap.add_argument("-d", "--detection-method", type = str, default = "hog", help ="face detection model to use either hog or cnn")
args = vars(ap.parse_args())
# load the known faces and embedings
print("[INFO] loading encodings...")
data = pickle.loads(open(args["encodings"], "rb").read())
# load the input image and convert it from BGR to RGB
image = cv2.imread(args["image"])
rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# detect the (x, y) coordinates of the bounding boxes corresponding
# to each face in the input image, then compute the facial
# embeddings for each face
print("[INFO] recognizing faces...")
boxes = face_recognition.face_locations(rgb, model = args["detection_method"])
encodings = face_recognition.face_encodings(rgb, boxes)
# initialize the list of names for each face detected
names = []
# loop over the facial embeddings
for encoding in encodings:
# attempt to match each face in the input image to our known encodings
matches = face_recognition.compare_faces(data["encodings"], encoding)
name = "Unknown"
# check to see if we have found a match
if True in matches:
# find the indexes of all matched faces then initialise a
# dictionary to count the total no.of times each face was
# matched
matchedIdxs = [i for (i,b) in enumerate(matches) if b]
counts = {}
# loop over the matched indexes and maintain a count for each
# recognised face
for i in matchedIdxs:
name = data["names"][i]
counts[name] = counts.get(name, 0) + 1
# determine the recognized face with the largest number of votes
name = max(counts, key = counts.get)
# update the list of names
names.append(name)
# loop over the recognized faces
for ((top, right, bottom, left), name) in zip(boxes, names):
# draw the predicted face name on the image
cv2.rectangle(image, (left, top), (right, bottom), (0, 255, 0), 2 )
y = top - 15 if top - 15 > 15 else top + 15
cv2.putText(image, name, (left, y), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (0, 255, 0), 2)
# show the output image
cv2.imshow("Image", image)
cv2.waitKey(0)
|
[
"sayandsarkar.1997@gmail.com"
] |
sayandsarkar.1997@gmail.com
|
e1c8620a560ca4069bcc80749ec00da1a6b6bace
|
87277cddfc489dd7d3837ffccda2f11bb4ad43cc
|
/py/Task198.py
|
5b0fbec9bf247b0e9b9e05389fafae6b23ba0644
|
[] |
no_license
|
rain-zhao/leetcode
|
22c01d1803af7dd66164a204e6dc718e6bab6f0e
|
8d47147f1c78896d7021aede767b5c659cd47035
|
refs/heads/master
| 2022-05-29T10:54:14.709070
| 2022-05-14T09:38:05
| 2022-05-14T09:38:05
| 242,631,842
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,417
|
py
|
class Solution:
def rob(self, nums: [int]) -> int:
if not nums:
return 0
if len(nums) == 1:
return nums[0]
# init
dp = [[0, 0] for _ in range(len(nums))]
dp[0] = [0, nums[0]]
for i, num in enumerate(nums[1:], 1):
dp[i][0] = max(dp[i-1][0], dp[i-1][1])
dp[i][1] = dp[i-1][0]+num
return max(dp[-1][0], dp[-1][1])
def rob2(self, nums: [int]) -> int:
if not nums:
return 0
if len(nums) == 1:
return nums[0]
# init
dp = [0] * len(nums)
dp[0] = nums[0]
dp[1] = max(nums[0], nums[1])
for i, num in enumerate(nums[2:], 2):
dp[i] = max(dp[i-2]+num, dp[i-1])
return dp[-1]
def rob3(self, nums: [int]) -> int:
if not nums:
return 0
if len(nums) == 1:
return nums[0]
# init
do = nums[0]
nodo = 0
# loop
for num in nums[1:]:
do, nodo = nodo + num, max(do, nodo)
return max(do, nodo)
def rob4(self, nums: [int]) -> int:
if not nums:
return 0
if len(nums) == 1:
return nums[0]
# init
fst = nums[0]
sed = max(nums[0], nums[1])
# loop
for num in nums[2:]:
fst, sed = sed, max(fst + num, sed)
return sed
|
[
"rangeree@foxmail.com"
] |
rangeree@foxmail.com
|
328ae953d1241d177cd632306190037e5ea3a1da
|
1078c61f2c6d9fe220117d4c0fbbb09f1a67f84c
|
/paws/lib/python2.7/site-packages/euca2ools-3.4.1_2_g6b3f62f2-py2.7.egg/EGG-INFO/scripts/euare-servercertlistbypath
|
5273dd6177c560a319e8eaa8198e7b0d60df4f5c
|
[
"MIT"
] |
permissive
|
cirobessa/receitas-aws
|
c21cc5aa95f3e8befb95e49028bf3ffab666015c
|
b4f496050f951c6ae0c5fa12e132c39315deb493
|
refs/heads/master
| 2021-05-18T06:50:34.798771
| 2020-03-31T02:59:47
| 2020-03-31T02:59:47
| 251,164,945
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 235
|
#!/media/ciro/LOCALDRV/A_DESENVOLVIMENTO/AWS/receitas/paws/bin/python -tt
import euca2ools.commands.iam.listservercertificates
if __name__ == '__main__':
euca2ools.commands.iam.listservercertificates.ListServerCertificates.run()
|
[
"cirobessa@yahoo.com"
] |
cirobessa@yahoo.com
|
|
581477f217c0de64acb027fb57b07564e4b2d1eb
|
0df898bf192b6ad388af160ecbf6609445c34f96
|
/middleware/backend/app/alembic/versions/20201021_001556_.py
|
ad0a9945ac1de56f57047d383df661ef635fc9e4
|
[] |
no_license
|
sasano8/magnet
|
a5247e6eb0a7153d6bbca54296f61194925ab3dc
|
65191c877f41c632d29133ebe4132a0bd459f752
|
refs/heads/master
| 2023-01-07T10:11:38.599085
| 2020-11-13T02:42:41
| 2020-11-13T02:42:41
| 298,334,432
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 666
|
py
|
"""empty message
Revision ID: 20201021_001556
Revises: 20201021_001530
Create Date: 2020-10-21 00:15:57.520730
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '20201021_001556'
down_revision = '20201021_001530'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_unique_constraint(None, 'trade_job', ['name'])
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(None, 'trade_job', type_='unique')
# ### end Alembic commands ###
|
[
"y-sasahara@ys-method.com"
] |
y-sasahara@ys-method.com
|
e207e8460215041552640cba1dd67c59d79db97c
|
956fd28ea7a7ec83b62cd85691c512e735e60b3a
|
/bin/azure/mgmt/eventgrid/operations/__init__.py
|
e7dcca6122851fa842b7b3ecff4c908c0301d34f
|
[
"MIT"
] |
permissive
|
zdmc23/bash-lambda-layer
|
5517a27809d33801c65504c11f867d0d511b2e1c
|
e762df0189cfb894dab2d96bae1655b8857d5efb
|
refs/heads/master
| 2021-01-05T02:32:20.765963
| 2020-02-16T09:41:47
| 2020-02-16T09:41:47
| 240,846,840
| 0
| 0
|
MIT
| 2020-02-16T06:59:55
| 2020-02-16T06:59:54
| null |
UTF-8
|
Python
| false
| false
| 1,000
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .domains_operations import DomainsOperations
from .domain_topics_operations import DomainTopicsOperations
from .event_subscriptions_operations import EventSubscriptionsOperations
from .operations import Operations
from .topics_operations import TopicsOperations
from .topic_types_operations import TopicTypesOperations
__all__ = [
'DomainsOperations',
'DomainTopicsOperations',
'EventSubscriptionsOperations',
'Operations',
'TopicsOperations',
'TopicTypesOperations',
]
|
[
"191707+zdmc23@users.noreply.github.com"
] |
191707+zdmc23@users.noreply.github.com
|
0e3e892d28c69731125eab1f400dfb5cdf382315
|
e0b5a869c687fea3c9dda138734d25b3c5e68b88
|
/9. Decorators/9. 2 Exercises/Problem 1- Logged.py
|
22f6e5741de525740b650eedb601357d66b590f8
|
[] |
no_license
|
asen-krasimirov/Python-OOP-Course
|
b74de5f83fb3e287cb206d48c3db79d15657c902
|
c6df3830168d8b8d780d4fb4ccfe67d1bb350f7e
|
refs/heads/main
| 2023-02-01T04:09:33.796334
| 2020-12-15T14:56:59
| 2020-12-15T14:56:59
| 309,389,119
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 443
|
py
|
def logged(func):
def wrapper(*args, **kwargs):
func_name = func.__name__
parameters = args+tuple(kwargs.keys())
result = func(*parameters)
information = f"you called {func_name}{parameters}\nit returned {result}"
return information
return wrapper
@logged
def func(*args):
return 3 + len(args)
@logged
def sum_func(a, b):
return a + b
print(func(4, 4, 4))
print(sum_func(1, 4))
|
[
"68907559+asen-krasimirov@users.noreply.github.com"
] |
68907559+asen-krasimirov@users.noreply.github.com
|
edacf9cfce5b069972303074f24fded09f89fe81
|
19c2f173d3d5384710b9271853fc3e1e60a054e2
|
/env/bin/pip
|
28e2a8a511905191e3cd014074c7318b9333df4b
|
[] |
no_license
|
rahonur/hellosisters
|
8cd1332ccb7a347c00383bb643bd14a27f781b9f
|
94da959d4be276e280ca5e1049a1c566523c9d60
|
refs/heads/main
| 2023-01-24T02:57:31.518942
| 2020-11-30T22:53:59
| 2020-11-30T22:53:59
| 314,942,537
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 265
|
#!/home/vagrant/src/hello-sisters-master/env/bin/python3.6
# -*- coding: utf-8 -*-
import re
import sys
from pip._internal.cli.main import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"vagrant@vagrant.vm"
] |
vagrant@vagrant.vm
|
|
e2fe714bb3d00b6ae53efd2431b00c1a3ed70786
|
b56ca08eb67163d3ccb02ff0775f59a2d971d910
|
/backend/moderation/migrations/0009_merge_20191003_0725.py
|
f5847052ee467c5c2157709054ed2f2005b4a4ae
|
[] |
no_license
|
globax89/dating-work
|
f23d07f98dcb5efad62a1c91cdb04b1a8ef021f7
|
bb3d09c4e2f48ecd3d73e664ab8e3982fc97b534
|
refs/heads/master
| 2022-12-11T22:45:19.360096
| 2019-10-16T07:01:40
| 2019-10-16T07:01:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 279
|
py
|
# Generated by Django 2.2.4 on 2019-10-03 07:25
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('moderation', '0008_auto_20190930_2116'),
('moderation', '0008_auto_20190930_0958'),
]
operations = [
]
|
[
"zdimon77@gmail.com"
] |
zdimon77@gmail.com
|
915597ed60c80192a3ea7c652ca9ce6cd0a4d85d
|
9fcc0fc4e8fe9fd2618ad9506594811685cbe065
|
/lt_cmu.py
|
73895d53c24cfe4949402d2e2e037a74250b57ba
|
[] |
no_license
|
cheezebone/Timit_Phoneme_Recognition
|
36a1617a02449184c1cc6f5f4b91c8f30bf1b20f
|
b7fc8318d160828d03371fee3424ca494387a102
|
refs/heads/master
| 2023-01-19T02:17:11.012204
| 2020-11-23T12:33:06
| 2020-11-23T12:33:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 466
|
py
|
from persephone import corpus
from persephone import corpus_reader
from persephone import rnn_ctc
lt_corpus = corpus.Corpus("fbank_and_pitch", "phonemes", "lt")
lt_corpus = corpus_reader.CorpusReader(lt_corpus, batch_size=32)
model = rnn_ctc.Model("exp_cmu/", lt_corpus, num_layers=3, hidden_size=256)
# model.train(max_epochs=50)
model.eval(restore_model_path="exp_cmu/model/model_best.ckpt")
# model.transcribe(restore_model_path="exp_cmu/model/model_best.ckpt")
|
[
"virtuoso.irfan@gmail.com"
] |
virtuoso.irfan@gmail.com
|
dbffe0154bc07e3e8416502de5c23ab330d083b1
|
7b60c00eb1a45fb8fb58aefaf786a5c29700ed7e
|
/payment_gateways/migrations/0010_auto_20180828_1359.py
|
8d6ce36b12732ee6fe2536c64631d11c09c2feff
|
[] |
no_license
|
kshutashvili/mfo
|
521b126146b7582ca6b56bc6bb605f4aee79dfc2
|
663662dd58ee0faab667d5e9bb463301342cb21a
|
refs/heads/master
| 2022-12-15T20:00:44.759395
| 2019-06-23T17:06:13
| 2019-06-23T17:06:13
| 203,863,751
| 0
| 0
| null | 2022-11-22T02:24:17
| 2019-08-22T19:56:43
|
Python
|
UTF-8
|
Python
| false
| false
| 992
|
py
|
# Generated by Django 2.0.2 on 2018-08-28 10:59
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('payment_gateways', '0009_auto_20180828_1259'),
]
operations = [
migrations.AlterModelOptions(
name='city24payment',
options={'verbose_name': "Транзакция Банк 'Фамільний'", 'verbose_name_plural': "Транзакции Банк 'Фамільний'"},
),
migrations.AlterField(
model_name='city24payment',
name='order_id',
field=models.BigIntegerField(verbose_name="Уникальный идентификатор транзакции Банк 'Фамільний'"),
),
migrations.AlterField(
model_name='city24payment',
name='service_id',
field=models.IntegerField(verbose_name="Номер EF в системе Банк 'Фамільний'"),
),
]
|
[
"phonxis@gmail.com"
] |
phonxis@gmail.com
|
840164d5a14a0caf4c19930e425304128403178c
|
41ede4fd3bfba1bff0166bca7aee80dcf21434c6
|
/suvari/gtk2chain/reverses2/libgsf/actions.py
|
a7de56c2638edd3d8d8dcf8cb916ee27fd666874
|
[] |
no_license
|
pisilinux/playground
|
a7db4b42559a21cc72fd4c8649e0231ab6a3eb3c
|
e4e12fff8a847ba210befc8db7e2af8556c3adf7
|
refs/heads/master
| 2022-08-12T23:03:27.609506
| 2022-08-11T18:28:19
| 2022-08-11T18:28:19
| 8,429,459
| 16
| 22
| null | 2022-08-11T18:28:20
| 2013-02-26T09:37:11
|
Python
|
UTF-8
|
Python
| false
| false
| 678
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Licensed under the GNU General Public License, version 3.
# See the file http://www.gnu.org/licenses/gpl.txt
from pisi.actionsapi import get
from pisi.actionsapi import autotools
from pisi.actionsapi import pisitools
def setup():
autotools.configure("--disable-static \
--enable-introspection \
--with-pic")
pisitools.dosed("libtool"," -shared ", " -Wl,--as-needed -shared ")
def build():
autotools.make()
def install():
autotools.rawInstall("DESTDIR=%s" % get.installDIR())
pisitools.dodoc("AUTHORS", "BUGS", "COPY*", "HACKING", "NEWS", "README", "TODO")
|
[
"suvarice@gmail.com"
] |
suvarice@gmail.com
|
253e032a7578b0693751dc6c07b7ec26f2937f27
|
3fe96227e910eb3ef13b80ded266da924d32cb86
|
/pump/main/models.py
|
26fe322b093b7cff7fe553bf16ab51637c5d9dc2
|
[] |
no_license
|
akingon/pump
|
b974054c54d12b8efa644a161e3ab9acb9b82601
|
6ce07a9b3faf7e1ed97062f854f57624a896e8a7
|
refs/heads/master
| 2021-01-18T06:32:25.974631
| 2015-05-01T14:27:22
| 2015-05-01T14:27:22
| 31,909,188
| 0
| 0
| null | 2015-03-09T16:18:46
| 2015-03-09T16:18:46
| null |
UTF-8
|
Python
| false
| false
| 3,054
|
py
|
from django.db import models
from django.template import Context
from django.template.loader import get_template
from .criteria import Houghton, ABC, PassFail
class Response(models.Model):
added = models.DateTimeField(auto_now_add=True)
# Houghton
q1 = models.TextField(blank=True, default="")
q2 = models.TextField(blank=True, default="")
q3 = models.TextField(blank=True, default="")
q4 = models.TextField(blank=True, default="")
q5 = models.TextField(blank=True, default="")
q6 = models.TextField(blank=True, default="")
# ABC
q7 = models.TextField(blank=True, default="")
q8 = models.TextField(blank=True, default="")
q9 = models.TextField(blank=True, default="")
q10 = models.TextField(blank=True, default="")
q11 = models.TextField(blank=True, default="")
q12 = models.TextField(blank=True, default="")
q13 = models.TextField(blank=True, default="")
q14 = models.TextField(blank=True, default="")
q15 = models.TextField(blank=True, default="")
q16 = models.TextField(blank=True, default="")
q17 = models.TextField(blank=True, default="")
q18 = models.TextField(blank=True, default="")
q19 = models.TextField(blank=True, default="")
q20 = models.TextField(blank=True, default="")
q21 = models.TextField(blank=True, default="")
q22 = models.TextField(blank=True, default="")
# pick up
q23 = models.TextField(blank=True, default="")
# look behind
q24 = models.TextField(blank=True, default="")
def __unicode__(self):
return "Response %s" % self.added
def results(self):
s = Scorer(self)
return s.results()
def email_text(self):
""" body of email version """
t = get_template("main/response_email.txt")
c = Context(dict(object=self))
return t.render(c)
class Scorer(object):
def __init__(self, r):
h_values = [r.q1, r.q2, r.q3, r.q4, r.q5, r.q6]
h_values = [int(v) for v in h_values if v != ""]
self.h = Houghton(h_values)
a_values = [
r.q7, r.q8, r.q9, r.q10, r.q11, r.q12,
r.q13, r.q14, r.q15, r.q16, r.q17, r.q18,
r.q19, r.q20, r.q21, r.q22,
]
a_values = [int(v) for v in a_values if v != ""]
self.abc = ABC(a_values)
self.pick_up = PassFail(int(r.q23 or '1'))
self.look_behind = PassFail(int(r.q24 or '1'))
def number_passed(self):
return (
self.h.pass_fail() +
self.abc.pass_fail() +
self.pick_up.pass_fail() +
self.look_behind.pass_fail())
def percentage_likelihood(self):
percents = ['95.6', '93.9', '92.1', '81.4', '59.3']
return percents[self.number_passed()]
def results(self):
return dict(
houghton=self.h,
abc=self.abc,
pick_up=self.pick_up,
look_behind=self.look_behind,
number_passed=self.number_passed(),
percentage_likelihood=self.percentage_likelihood(),
)
|
[
"anders@columbia.edu"
] |
anders@columbia.edu
|
7879ca81348c624f31155cb54edc381e6820a388
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02602/s085786581.py
|
99dcc1901822553bf70ab6c5531a4412172e6f36
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 147
|
py
|
n, k = map(int, input().split())
A = list(map(int, input().split()))
for i in range(k, n):
if A[i-k] < A[i]: print('Yes')
else: print('No')
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
c9de3cec05d3f3046559f4d8e7e7f95a8b23248e
|
78cc4a9de8815bb042b25f70cbea5da2058553e8
|
/src/fetch/fetchclass/grap_userinfo.py
|
bc27d9475706abb115c0787ee0e0262e5e6afa7e
|
[] |
no_license
|
simple2source/fetch_crwal
|
504c6aae18cc6c9520c3a5b6cb5e76fb65500d82
|
390998556f72a2053574e6ad5c58cbf0b850c8c0
|
refs/heads/master
| 2021-01-19T02:47:07.951209
| 2016-06-26T03:57:38
| 2016-06-26T03:57:38
| 52,797,780
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,705
|
py
|
# coding: utf-8
"""
script to send email daily about the grap user info in MySQL grapuser_info table
"""
import MySQLdb
import common
from prettytable import PrettyTable
import datetime, time
import libaccount
sql_config = common.sql_config
def update_num():
try:
sql = """select grap_source, user_name from grapuser_info where account_type = '购买账号'"""
db = MySQLdb.connect(**sql_config)
cursor = db.cursor()
cursor.execute(sql)
data = cursor.fetchall()
for i in data:
a = libaccount.Manage(i[0])
ck_str = a.redis_ck_get(i[1])
a.num_update(i[0], i[1], ck_str)
time.sleep(3)
except Exception as e:
print Exception, e
def grap_info():
db = MySQLdb.connect(**sql_config)
cursor = db.cursor()
sql = """ select grap_source, user_name, account_mark, buy_num, pub_num, expire_time
from grapuser_info """ # where account_type = '购买账号' """
cursor.execute(sql)
data = cursor.fetchall()
x = PrettyTable(['来源', '用户名', '地区', '购买余额', '发布余额', '过期时间'])
for i in data:
ll = list(i)
ll2 = ll[:5] + [str(ll[5])]
x.add_row(ll2)
db.close()
return x.get_html_string(sortby=u"过期时间").encode('utf8')
def eformat(html):
msg_style = """<style type="text/css">
.body{
font-family: Monaco, Menlo, Consolas, "Courier New", "Lucida Sans Unicode", "Lucida Sans", "Lucida Console", monospace;
font-size: 14px;
line-height: 20px;
}
.table{ border-collapse:collapse; border:solid 1px gray; padding:6px}
.table td{border:solid 1px gray; padding:6px}
.color-ok {color: green;}
.color-warning {color: coral;}
.color-error {color: red;}
.bg-ok {background-color: lavender;}
.bg-warning {background-color: yellow;}
.bg-error {background-color: deeppink;}
</style>"""
msg_head = """<html><head><meta charset="utf-8"></head>""" + msg_style + "<body>"
msg = msg_head + """<h2>简历下载账号信息</h2>"""
msg2 = grap_info()
day_list = [str(datetime.datetime.now().date() + datetime.timedelta(days=x)) for x in xrange(-30, 60)]
day_list2 = ['<td>' + x + '</td>' for x in day_list]
for i in day_list2:
msg2 = msg2.replace(i, i.replace('<td>', '<td style="color:red; text-align:right">'))
msg = msg + msg2 + "</body></html>"
msg = msg.replace('<table>', '<table class="table">').replace('<td>', '<td style="text-align:right">').replace('<th>', "<th class='table'>")
# print msg
return msg
if __name__ == '__main__':
data =grap_info()
msg = eformat(data)
common.sendEmail('main', '简历渠道账号信息', msg, msg_type=1, des= 'op')
|
[
"dbawww@126.com"
] |
dbawww@126.com
|
63f1be347c7875fec36366c86232173a42630430
|
3a7412502b89b917f23cda9a3318d2dc4d02185b
|
/panoptes/analysis/panels/events/admin.py
|
c24c66fb0d3a697e3b34747002864d68c5a7f646
|
[
"BSD-2-Clause"
] |
permissive
|
cilcoberlin/panoptes
|
5f0b19d872993bc5c7f51a44c9ccc596fe0a8ab5
|
67d451ea4ffc58c23b5f347bfa5609fa7f853b45
|
refs/heads/master
| 2021-01-21T00:17:42.038637
| 2012-07-10T03:20:47
| 2012-07-10T03:20:47
| 1,660,305
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 300
|
py
|
from django.contrib import admin
from panoptes.analysis.panels.events.models import LocationCalendar
class LocationCalendarAdmin(admin.ModelAdmin):
list_display = ('name', 'location', 'calendar_id')
ordering = ('location', 'order')
admin.site.register(LocationCalendar, LocationCalendarAdmin)
|
[
"justin.locsei@oberlin.edu"
] |
justin.locsei@oberlin.edu
|
de1964489e7a06b573dd7b0b5646fc231e174d46
|
146cd740649b87032cbbfb97cde6ae486f76230b
|
/venv/lib/python3.6/site-packages/PIL/BdfFontFile.py
|
fc85c8a287d218f13805660da1f2d7bc33c3793c
|
[] |
no_license
|
shellyhuang18/plank-filter-master
|
8b7024c46334062496f05d31eefc618ebae50b4e
|
8993a5b00f45841c3385fe997857bfdd10b71a84
|
refs/heads/master
| 2020-03-30T18:14:45.017957
| 2018-12-27T20:51:25
| 2018-12-27T20:51:25
| 151,490,556
| 0
| 1
| null | 2018-12-19T22:42:26
| 2018-10-03T22:50:58
|
Python
|
UTF-8
|
Python
| false
| false
| 3,119
|
py
|
#
# The Python Imaging Library
# $Id$
#
# bitmap distribution font (bdf) file parser
#
# history:
# 1996-05-16 fl created (as bdf2pil)
# 1997-08-25 fl converted to FontFile driver
# 2001-05-25 fl removed bogus __init__ call
# 2002-11-20 fl robustification (from Kevin Cazabon, Dmitry Vasiliev)
# 2003-04-22 fl more robustification (from Graham Dumpleton)
#
# Copyright (c) 1997-2003 by Secret Labs AB.
# Copyright (c) 1997-2003 by Fredrik Lundh.
#
# See the README file for information on usage and redistribution.
#
from __future__ import print_function
from . import Image, FontFile
# --------------------------------------------------------------------
# parse X Bitmap Distribution Format (BDF)
# --------------------------------------------------------------------
bdf_slant = {
"R": "Roman",
"I": "Italic",
"O": "Oblique",
"RI": "Reverse Italic",
"RO": "Reverse Oblique",
"OT": "Other"
}
bdf_spacing = {
"P": "Proportional",
"M": "Monospaced",
"C": "Cell"
}
def bdf_char(f):
# skip to STARTCHAR
while True:
s = f.readline()
if not s:
return None
if s[:9] == b"STARTCHAR":
break
id = s[9:].strip().decode('ascii')
# load symbol properties
props = {}
while True:
s = f.readline()
if not s or s[:6] == b"BITMAP":
break
i = s.find(b" ")
props[s[:i].decode('ascii')] = s[i+1:-1].decode('ascii')
# load bitmap
bitmap = []
while True:
s = f.readline()
if not s or s[:7] == b"ENDCHAR":
break
bitmap.append(s[:-1])
bitmap = b"".join(bitmap)
[x, y, l, d] = [int(p) for p in props["BBX"].split()]
[dx, dy] = [int(p) for p in props["DWIDTH"].split()]
bbox = (dx, dy), (l, -d-y, x+l, -d), (0, 0, x, y)
try:
im = Image.frombytes("1", (x, y), bitmap, "hex", "1")
except ValueError:
# deal with zero-width characters
im = Image.new("1", (x, y))
return id, int(props["ENCODING"]), bbox, im
##
# Font file plugin for the X11 BDF format.
class BdfFontFile(FontFile.FontFile):
def __init__(self, fp):
FontFile.FontFile.__init__(self)
s = fp.readline()
if s[:13] != b"STARTFONT 2.1":
raise SyntaxError("not a valid BDF file")
props = {}
comments = []
while True:
s = fp.readline()
if not s or s[:13] == b"ENDPROPERTIES":
break
i = s.find(b" ")
props[s[:i].decode('ascii')] = s[i+1:-1].decode('ascii')
if s[:i] in [b"COMMENT", b"COPYRIGHT"]:
if s.find(b"LogicalFontDescription") < 0:
comments.append(s[i+1:-1].decode('ascii'))
while True:
c = bdf_char(fp)
if not c:
break
id, ch, (xy, dst, src), im = c
if 0 <= ch < len(self.glyph):
self.glyph[ch] = xy, dst, src, im
|
[
"shellyhuang81@gmail.com"
] |
shellyhuang81@gmail.com
|
b2e647ca7e61dc983ece0837c25e3743abde5e29
|
e755453c853ae400d94f562ad215b59166b63782
|
/tests/splay_tests/test_contains.py
|
90f77cc4f6c2ffda415a14fbc9008bc8390adea1
|
[
"MIT"
] |
permissive
|
lycantropos/dendroid
|
0cb3e276dd9c476b82b0b7a17c25c2e05616a993
|
fd11c74a395eb791caf803c848805569869080f6
|
refs/heads/master
| 2023-04-07T11:07:55.550796
| 2023-03-27T00:46:03
| 2023-03-27T00:46:03
| 215,369,321
| 0
| 1
|
MIT
| 2020-09-24T05:02:02
| 2019-10-15T18:29:36
|
Python
|
UTF-8
|
Python
| false
| false
| 891
|
py
|
from typing import Tuple
from hypothesis import given
from dendroid.hints import Value
from tests.utils import (BaseSet,
are_keys_equal,
implication,
to_height,
to_max_binary_tree_height,
set_value_to_key)
from . import strategies
@given(strategies.non_empty_sets_with_values)
def test_properties(set_with_value: Tuple[BaseSet, Value]) -> None:
set_, value = set_with_value
assert implication(value in set_,
are_keys_equal(set_value_to_key(set_, value),
set_.tree.root.key))
@given(strategies.non_empty_sets)
def test_accessing_in_order(set_: BaseSet) -> None:
for value in set_:
value in set_
tree = set_.tree
assert to_height(tree) == to_max_binary_tree_height(tree)
|
[
"azatibrakov@gmail.com"
] |
azatibrakov@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.