blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 220
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 257
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
77f5af4c2ac23f8d0dcf8e352325b8f01ef19cd8
|
e2f71bcc6a5cad8d8f6ad96852a1f9446d05f891
|
/code/leetcode/self_dividing_numbers.py
|
e81a6f24af8d503670afcadfa5de5b9e0c8ae834
|
[] |
no_license
|
GodferyChen/LearnPython
|
210d1c150f5f6d5b5f086ec9f77a539f5c1770e1
|
d5cb60a73df946c74b92aa6aeb87c04c4d54788c
|
refs/heads/master
| 2021-09-02T15:06:51.207012
| 2018-01-03T10:28:59
| 2018-01-03T10:28:59
| 106,989,159
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 440
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
class Solution:
@staticmethod
def selfDividingNumbers(left, right):
def check(num):
digits = set(map(int, str(num)))
if 0 in digits: return False
return not any(num % d for d in digits)
return filter(check, range(left, right + 1))
if __name__ == '__main__':
dict = Solution().selfDividingNumbers(1, 22)
print(list(dict))
|
[
"chenxh.cz@gmail.com"
] |
chenxh.cz@gmail.com
|
4a59086461f4c14e5aedf3eef1b38135994a18bb
|
7f490476dafd6663c2598db20b1c0422940af701
|
/day4/Atm/conf/settings.py
|
fdaa199b99bae582318b0314c235e3848aded20e
|
[] |
no_license
|
ChacoLv/python-oldboy
|
46bc409ef10ecda70c2c6600b38216ab53c4c99c
|
1c3e18c9ac3e61376927cfa6ce010001d53cdd93
|
refs/heads/master
| 2020-04-05T14:10:45.821224
| 2017-10-14T07:10:39
| 2017-10-14T07:10:39
| 94,796,866
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 629
|
py
|
# -*- coding:utf-8 -*-
# LC
import os
import sys
import logging
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(BASE_DIR)
DATABASE ={
'engine':'file_storage',
'name':'accounts',
'path':"%s/db" % BASE_DIR
}
LOG_LEVEL = logging.INFO
LOG_TYPES = {
'transaction' : 'transactions.log',
'access':'access.log'
}
TRANSACTION_TYPE = {
'repay':{'action':'plus','interest':0,'receipt':0},
'withdraw':{'action':'minus','interest':0.05,'receipt':0},
'transfer':{'action':'minus','interest':0.05,'receipt':1},
'consume':{'action':'minus','interest':0,'receipt':1}
}
|
[
"chenglv@live.com"
] |
chenglv@live.com
|
99d9bcb03ca5a0d8950b41223b64796de4b33c4e
|
c0a6bf77a648eda8abc828931ee4f19abff2c8e1
|
/dependencies.py
|
a1047bfd3ef59cbbb08931882b439995de9fe3f2
|
[] |
no_license
|
elujambio/OpenCV
|
bb0b766042301d75c56fe806fe41af531bba870f
|
e2e392833572ca10d24b89d29e5811eb1e203f0d
|
refs/heads/master
| 2020-07-24T01:02:44.435542
| 2017-06-14T18:38:47
| 2017-06-14T18:38:47
| 94,362,035
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 242
|
py
|
#
#
# DEPENDENCIES
#
#
#
# This programs need the next dependencies
#
#
# 1.- numpy
# pip install numpy
# brew install numpy
# 2.- cv2
# Easiest install is through miniconda
# 3.- matplotlib
# pip install matplotlib
# 4.- FFmpeg
|
[
"eugeniolujambio@gmail.com"
] |
eugeniolujambio@gmail.com
|
382b4289c3b1bb000f2690f9c6f2a63fe5e1583c
|
f33885d6f1e963586f9e7b1e1a46a271d125e2e7
|
/ci/nur/fileutils.py
|
338149b414047c1411f8783359d43a434d120e33
|
[
"MIT"
] |
permissive
|
nix-community/NUR
|
cad821a31d965ade9869c21f03edf9f7bb4cdf02
|
80012e6c2de5ea9c4101948b0d58c745e7813180
|
refs/heads/master
| 2023-09-03T05:05:30.497198
| 2023-09-03T04:32:01
| 2023-09-03T04:32:01
| 123,327,588
| 965
| 385
|
MIT
| 2023-09-12T07:10:52
| 2018-02-28T18:49:50
|
Python
|
UTF-8
|
Python
| false
| false
| 921
|
py
|
import json
import os
import shutil
from contextlib import contextmanager
from pathlib import Path
from tempfile import NamedTemporaryFile
from typing import Any, Generator, Union
PathType = Union[str, Path]
def to_path(path: PathType) -> Path:
if isinstance(path, Path):
return path
else:
return Path(path)
def write_json_file(data: Any, path: PathType) -> None:
path = to_path(path)
f = NamedTemporaryFile(mode="w+", prefix=path.name, dir=str(path.parent))
with f as tmp_file:
json.dump(data, tmp_file, indent=4, sort_keys=True)
shutil.move(tmp_file.name, path)
# NamedTemporaryFile tries to delete the file and fails otherwise
open(tmp_file.name, "a").close()
@contextmanager
def chdir(dest: PathType) -> Generator[None, None, None]:
previous = os.getcwd()
os.chdir(dest)
try:
yield
finally:
os.chdir(previous)
|
[
"joerg@thalheim.io"
] |
joerg@thalheim.io
|
a2455184714558aeedd27f30413d548c77e63c4b
|
7e260342bb04eba9bff4289da938e859b8d68b82
|
/contrib/scripts.py
|
d6d2ef643382ab83ba2df65618bc02d78d78ab2f
|
[
"MIT"
] |
permissive
|
christopherjenness/fava
|
72c2d0e201f7792ac32a643be0479fa7623efc27
|
71c25d8a0ae08aa84150e33d464000d0161610ea
|
refs/heads/master
| 2020-04-28T15:29:34.446050
| 2019-03-12T17:58:03
| 2019-03-12T17:58:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,374
|
py
|
#!/usr/bin/env python3
"""Various utilities."""
import json
import os
from beancount.query import query_env
from beancount.query import query_parser
import click
import requests
BASE_PATH = os.path.normpath(
os.path.join(os.path.dirname(__file__), "../fava")
)
LANGUAGES = ["de", "es", "fr", "nl", "pt", "ru", "zh-CN", "sk", "uk"]
@click.group()
def cli():
"""Various utilities."""
def _env_to_list(attributes):
for name in attributes.keys():
if isinstance(name, tuple):
name = name[0]
yield name
@cli.command()
def generate_bql_grammar_json():
"""Generate a JSON file with BQL grammar attributes.
The online code editor needs to have the list of available columns,
functions, and keywords for syntax highlighting and completion.
Should be run whenever the BQL changes."""
target_env = query_env.TargetsEnvironment()
data = {
"columns": sorted(set(_env_to_list(target_env.columns))),
"functions": sorted(set(_env_to_list(target_env.functions))),
"keywords": sorted({kw.lower() for kw in query_parser.Lexer.keywords}),
}
path = os.path.join(
os.path.dirname(__file__),
"../fava/static/javascript/codemirror/bql-grammar.json",
)
with open(path, "w") as json_file:
json.dump(data, json_file)
@cli.command()
def download_translations():
"""Fetch updated translations from POEditor.com."""
token = os.environ.get("POEDITOR_TOKEN")
if not token:
raise click.UsageError(
"The POEDITOR_TOKEN environment variable needs to be set."
)
for language in LANGUAGES:
download_from_poeditor(language, "po", token)
download_from_poeditor(language, "mo", token)
@cli.command()
def upload_translations():
"""Upload .pot message catalog to POEditor.com."""
token = os.environ.get("POEDITOR_TOKEN")
if not token:
raise click.UsageError(
"The POEDITOR_TOKEN environment variable needs to be set."
)
path = os.path.join(BASE_PATH, f"translations/messages.pot")
click.echo(f"Uploading message catalog: {path}")
data = {
"api_token": token,
"id": 90283,
"updating": "terms",
"sync_terms": 1,
}
files = {"file": open(path, "rb")}
request = requests.post(
"https://api.poeditor.com/v2/projects/upload", data=data, files=files
)
click.echo("Done: " + str(request.json()["result"]["terms"]))
def download_from_poeditor(language, format_, token):
"""Download .{po,mo}-file from POEditor and save to disk."""
click.echo(f'Downloading .{format_}-file for language "{language}"')
language_short = language[:2]
data = {
"api_token": token,
"id": 90283,
"language": language,
"type": format_,
}
request = requests.post(
"https://api.poeditor.com/v2/projects/export", data=data
)
url = request.json()["result"]["url"]
content = requests.get(url).content
folder = os.path.join(
BASE_PATH, "translations", language_short, "LC_MESSAGES"
)
if not os.path.exists(folder):
os.makedirs(folder)
path = os.path.join(folder, f"messages.{format_}")
with open(path, "wb") as file_:
file_.write(content)
click.echo(f'Downloaded to "{path}"')
if __name__ == "__main__":
cli()
|
[
"mail@jakobschnitzer.de"
] |
mail@jakobschnitzer.de
|
e743150c8f84f40c1a8ed70fe9e7fdb1c26ef371
|
1f2c5ba0a545bf6793d9f2fbaf172e3478320701
|
/property/migrations/0003_auto_20200513_2357.py
|
c2c8f284025db10fd1c93f1b7aa1780847fd4872
|
[] |
no_license
|
MusawerAli/DjangoRent
|
537afc4555cf21f9c53384704d1d5a3fc4d2f68e
|
640110e4f63cd39b556467da63470dbde401cfa0
|
refs/heads/master
| 2022-07-07T02:29:43.673984
| 2020-05-15T21:28:57
| 2020-05-15T21:28:57
| 261,726,506
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 672
|
py
|
# Generated by Django 3.0.6 on 2020-05-13 23:57
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('property', '0002_auto_20200513_2129'),
]
operations = [
migrations.AddField(
model_name='memberdetail',
name='last_login',
field=models.DateTimeField(default=datetime.datetime(2020, 5, 13, 23, 57, 32, 478982)),
),
migrations.AlterField(
model_name='memberdetail',
name='created_at',
field=models.DateTimeField(default=datetime.datetime(2020, 5, 13, 23, 57, 32, 478903)),
),
]
|
[
"pakjalihouse@gmail.com"
] |
pakjalihouse@gmail.com
|
59778d5cfdb33ed8ffbcd1d7c0f2b05cd15a366d
|
5d22d9b2cb5cad7970c1055aeef55d2e2a5acb8e
|
/py/google/cj2014/round1A/FullBinaryTree.py
|
df737dafe506eb93570aed7b49ecc60662a2dc43
|
[
"MIT"
] |
permissive
|
shhuan/algorithms
|
36d70f1ab23dab881bf1a15573fbca7b2a3f4235
|
2830c7e2ada8dfd3dcdda7c06846116d4f944a27
|
refs/heads/master
| 2021-05-07T14:21:15.362588
| 2017-11-07T08:20:16
| 2017-11-07T08:20:16
| 109,799,698
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,055
|
py
|
# -*- coding: utf-8 -*-
"""
created by huash06 at 2015-04-08 10:48
"""
__author__ = 'huash06'
import sys
import os
import py.lib.Utils as Utils
from datetime import datetime
# sys.stdin = open('input/sample.txt', 'r')
sys.stdin = open('input/B-large-practice.in', 'r')
# sys.stdout = open('output/B-large-practice.out', 'w')
MAXNN = 301
def count_node(graph, node, parent):
cc = 1
for i in range(len(graph)):
if i != parent and graph[node][i]:
cc += count_node(graph, i, node)
return cc
def dfs(graph, node, parent, memo):
"""
返回以node為根的子樹變成完全二叉樹時,剪掉的節點數量和剩餘的節點數量
:param graph:
:param node:
:param parent:
:param memo: record calculated result
:return: how many node in this full-binary tree rooted at node
"""
max1 = -1
max2 = -1
if memo[node][parent] == -1 or True:
for child in graph[node]:
if child != parent:
nc = dfs(graph, child, node, memo)
if nc > max1:
max2 = max1
max1 = nc
elif nc > max2:
max2 = nc
if max2 == -1:
memo[node][parent] = 1
else:
memo[node][parent] = 1 + max1 + max2
return memo[node][parent]
T = int(sys.stdin.readline())
sys.setrecursionlimit(3000)
# start_time = datetime.now()
for ti in range(1, T + 1):
N = int(sys.stdin.readline())
GRAPH = dict()
for ei in range(1, N+1):
GRAPH[ei] = list()
for ni in range(N-1):
S, T = map(int, sys.stdin.readline().strip().split(' '))
GRAPH[S].append(T)
GRAPH[T].append(S)
count = N
memo = [[-1 for c in range(N+1)] for r in range(N+1)]
for r in range(1, N+1):
c = N - dfs(GRAPH, r, 0, memo)
if c < count:
count = c
print('Case #{}: {}'.format(ti, count))
# end_time = datetime.now()
# time_cost = end_time-start_time
# print('Time Cost: {}s'.format(time_cost.seconds))
|
[
"shuangquanhuang@gmail.com"
] |
shuangquanhuang@gmail.com
|
927b63df755b0644229f3e3df8678e837b661a28
|
a1abd9c305ca803ea298f3de34e11dbdb03dea03
|
/hackerrank/cutTheSticks.py
|
7224a79bb98682106f6fe9e110023af56fe14da6
|
[] |
no_license
|
bayramtuccar/PythonNote
|
96f80315843208eac359d8f082b3e51a435542e7
|
446650ef395e04175f2877607ef8671f4ce53721
|
refs/heads/master
| 2020-04-08T09:11:58.019490
| 2018-11-26T18:16:28
| 2018-11-26T18:16:28
| 159,211,238
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 666
|
py
|
#!/bin/python3
import sys
def cutTheSticks(arr):
' Find the cutted stick number '
ret_list = []
while True:
arr_len = arr.__len__()
if arr_len == 0:
break
ret_list.append(arr_len)
min_mem = min(arr)
for idx in range(arr_len).__reversed__():
new_value = arr[idx] - min_mem
if new_value > 0:
arr[idx] = new_value
else:
del arr[idx]
return ret_list
if __name__ == "__main__":
n = int(input().strip())
arr = list(map(int, input().strip().split(' ')))
result = cutTheSticks(arr)
print("\n".join(map(str, result)))
|
[
"noreply@github.com"
] |
bayramtuccar.noreply@github.com
|
bc89d5b081e0e99dac177fc3c5169ae89ad3b7c1
|
5fbcd2d59b38b5b3d28b0f13c762f0a968a21133
|
/application/scroll.py
|
f19286ad1e181feac83b95a051e3d6395004dfba
|
[] |
no_license
|
Walia666/Alerter
|
1a03e482a2cd05719d601bc22552471eedffc30e
|
83267d0d004c11e331eaf7d7a6145fe40a11cc8a
|
refs/heads/master
| 2020-03-19T07:45:59.185918
| 2018-06-05T08:38:54
| 2018-06-05T08:38:54
| 136,146,559
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,516
|
py
|
def docsnew(request):
client = Elasticsearch()
values=[]
if request.method == 'POST':
log_field=request.POST['log_field']
array=request.POST['data']
kibanaquery=request.POST['kibana_query']
datefrom = request.POST['datetime_from_name']
dateto= request.POST['datetime_to_name']
pattern = '%d-%m-%Y %H:%M:%S'
datefromsec = int(time.mktime(time.strptime(datefrom, pattern)))
datetosec=int(time.mktime(time.strptime(dateto, pattern)))
keyarray=array.split(',')
for item in keyarray:
marked=request.POST.get(item,)
values.append(marked)
newlstvalues = [str(x) for x in values]
current_timestamp=time.time()
from_date=abs(current_timestamp - datefromsec)
to_date=abs(current_timestamp- datetosec)
from_date_int=int(from_date)
to_date_int=int(to_date)
objs = Log.objects.raw('SELECT * FROM application_log WHERE log_field = %s', [log_field])
for obj in objs:
index=obj.index
ipendpoint=obj.ip_endpoint
logfield=obj.log_field
data=ipendpoint.split(":")
hostmain=data[0]
portmain=data[1]
portint=int(portmain)
client=Elasticsearch(host=hostmain, port=portint)
document = {
"size": 100,
"query": {
"bool": {
"must": [
{
"query_string": {
"query":kibanaquery,
"analyze_wildcard": 'true'
}
},
{
"range": {
"@timestamp": {
"from": "now-"+str(from_date_int)+"s",
"to": "now-"+str(to_date_int)+"s"
}
}
}
],
"must_not": []
}
},
"docvalue_fields": [
"@timestamp"
]
}
result= client.search(index=index, body=document,scroll='2m')
sid = result['_scroll_id']
scroll_size = result['hits']['total']
scroll=[]
result1=[]
newlist=[]
newlist1=[]
keyarray1=[]
keyarray2=[]
resultlist=[]
list1=zip(keyarray,newlstvalues)
for row in result['hits']['hits']:
resultx=row["_source"]
t1=resultx.keys()
for c,d in list1:
for e in t1:
if c == e:
newlist.append(c)
newlist1.append(d)
for c,d in zip(newlist,newlist1):
if d != "None":
keyarray1.append(c)
l=len(keyarray1)
for c,d in list1:
if d != "None":
keyarray2.append(c)
keyarray3=list(set(keyarray2) - set(keyarray1))
t3 = result['hits']['total']
while (scroll_size > 0):
result = client.scroll(scroll_id = sid, scroll = '2m')
#zipped list that contains fields of index and its values
for row in result['hits']['hits']:
for x in keyarray1:
result1=row["_source"][x]
resultlist.append(result1)
sid = result['_scroll_id']
scroll_size = len(result['hits']['hits'])
res=len(resultlist)
def chunks(l, n):
for i in range(0, len(l), n):
yield l[i:i+n]
splitlist=list(chunks(resultlist, l))
#adding the field element at the end of the list
keyarray1.extend(keyarray3)
import csv
with open("fields.csv", "wb") as myfile:
writer = csv.writer(myfile, quoting=csv.QUOTE_ALL)
writer.writerow(keyarray1)
for row in splitlist:
writer.writerow(row)
#code to read the csv file
with open('fields.csv', 'rb') as myfile:
response = HttpResponse(myfile, content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename=fields.csv'
return response
return render(request, 'application/docs.html', {'res1':t3,'scroll':res})
|
[
"anshulwaia128@gmail.com"
] |
anshulwaia128@gmail.com
|
d34afd28088c387fc104acc632df1276df76726e
|
b2c070e09bff49241fcff98bcde825cfa96e93ca
|
/HackerEarth/Recursion/SubsetGeneration.py
|
9af011b3289a694f328f9d18d4a03292e2e93f09
|
[
"MIT"
] |
permissive
|
Beryl2208/CI-2
|
dcb1b923f9c4f1f8b167c36c8b22a80522322c53
|
f671292dad2695e37458866442a6b951ba4e1a71
|
refs/heads/master
| 2022-12-26T19:11:28.559911
| 2020-10-06T06:27:51
| 2020-10-06T06:27:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 743
|
py
|
# Subset or Subsequence generation
# Input - "abc", Output - "a", "b", "c", "ab", "ac", "abc", "bc"
# Input - "abcd", Output - "a", "b", "c", "d", "ab", "ac", "ad", "abc", "acd", "abd", "abcd", "bc", "bcd", "bd", "cd"
# "abc" "ab" "ac" "a" "bc" "b" "c" ""
# \ / \ / \ / \ /
# "ab" "a" "b" ""
# \ / \ /
# "a" ""
# \ /
# curr = ""
# Options -
# 1) Consider curr as a part of subset
# 2) Do not consider curr as a part of subset
def Subset(s, index = 0, curr = ''):
if index == len(s):
print(curr, end = ' ')
return
Subset(s, index + 1, curr + s[index])
Subset(s, index + 1, curr)
Subset("abc")
print()
Subset("abcd")
print()
|
[
"AbhiSaphire@github.com"
] |
AbhiSaphire@github.com
|
2641b37d027fbff1ece30b7f2825fb2fcbd20653
|
7950c4faf15ec1dc217391d839ddc21efd174ede
|
/leetcode-cn/0150.0_Evaluate_Reverse_Polish_Notation.py
|
0a7404c8bbd5ea8d7d771e5b14d18c16066b3ef5
|
[] |
no_license
|
lixiang2017/leetcode
|
f462ecd269c7157aa4f5854f8c1da97ca5375e39
|
f93380721b8383817fe2b0d728deca1321c9ef45
|
refs/heads/master
| 2023-08-25T02:56:58.918792
| 2023-08-22T16:43:36
| 2023-08-22T16:43:36
| 153,090,613
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,151
|
py
|
'''
approach: Stack
Time: O(N)
Space: O(N)
执行用时:32 ms, 在所有 Python 提交中击败了60.21%的用户
内存消耗:14.3 MB, 在所有 Python 提交中击败了76.44%的用户
'''
class Solution(object):
def evalRPN(self, tokens):
"""
:type tokens: List[str]
:rtype: int
"""
stack = []
for token in tokens:
stack.append(token)
while len(stack) >= 3 and stack[-1] in ['+', '-', '*', '/']:
operator = stack.pop()
operand2 = int(stack.pop())
operand1 = int(stack.pop())
result = 0
if operator == '+':
result = operand1 + operand2
elif operator == '-':
result = operand1 - operand2
elif operator == '*':
result = operand1 * operand2
elif operator == '/':
# Note that division between two integers should truncate toward zero.
result = int(operand1 * 1.0/ operand2)
stack.append(result)
return int(stack[-1])
|
[
"lixiang@rxthinking.com"
] |
lixiang@rxthinking.com
|
0d51a3c5f0b0c6421d7aa1d1e00845b6402846f4
|
aacb6439137be5b0ad694ebb5ecae1844223d0aa
|
/ex001.py
|
643b0abf024e25f17116526058958999be8c92bb
|
[] |
no_license
|
raphaelsmuniz/cursoEmVideoPython
|
3a47507347a79267b1efd6e4dff9fda4bb424a7a
|
08f7baeba3fd9a848adf87b403284da9fba17932
|
refs/heads/master
| 2021-12-15T10:04:53.576411
| 2021-12-08T18:46:30
| 2021-12-08T18:46:30
| 128,147,369
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 83
|
py
|
nome = input('Qual é o seu nome? ')
print('É um grande prazer te conhecer', nome)
|
[
"raphaelnsx@gmail.com"
] |
raphaelnsx@gmail.com
|
91b770c32ce191e1618b591a09cd70ba96ab08fe
|
96449d8ba4addbf1758ea923097aaa8b799c9283
|
/chapter 6/in_and_is.py
|
e5a786aed37096bbae3d40b1a5cc8d1592073ea1
|
[] |
no_license
|
Pravin2796/python-practice-
|
446905714f9cb592e091c858719ff045005329a3
|
d8d631ba1ccd7d1b7863d7b0f58099eae93ec54d
|
refs/heads/master
| 2023-07-18T12:10:44.989715
| 2021-08-30T12:29:24
| 2021-08-30T12:29:24
| 401,336,363
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 116
|
py
|
# a = None
# if (a is a None):
# print("yes")
# else:
# print("no")
a = [45,20,654]
print(45 in a)
|
[
"praviinjd@gmail.com"
] |
praviinjd@gmail.com
|
2a06563b85200a8f318510a6b133096d34966f34
|
a920ae4837161ba75d82efaec118a73f846bab7b
|
/eelbrain/vessels/process.py
|
e5d0644f55af1db1505ee74f7ff4a883853c55f3
|
[] |
no_license
|
kriek197/Eelbrain
|
316b422e7137d32003d9c787c3aee7c444f2a438
|
03c06ba4517307821ff2c1811a3edd34b7274cf0
|
refs/heads/master
| 2020-12-25T09:00:40.915929
| 2012-03-07T16:44:31
| 2012-03-07T16:44:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,394
|
py
|
'''
Created on Feb 24, 2012
@author: christian
'''
import mdp as _mdp
import data as _data
def rm_pca(ds, rm=[], source='MEG', target='MEG'):
"""
Perform PCA and remove certain components. Use gui.pca to find components
initially. Algorithm from the gui!
"""
if not rm:
raise ValueError("No components selected")
if isinstance(source, basestring):
source = ds[source]
rm = sorted(rm)
n_comp = max(rm) + 1
# do the pca
pca = _mdp.nodes.PCANode(output_dim=n_comp)
for epoch in source.data:
pca.train(epoch)
pca.stop_training()
# remove the components
n_epochs, n_t, n_sensors = source.data.shape
data = source.data.copy() # output data
# take serialized data views for working with the PCANode
new_data = data.view()
old_data = source.data.view()
# reshape the views
new_data.shape = (n_epochs * n_t, n_sensors)
old_data.shape = (n_epochs * n_t, n_sensors)
# project the components and remove
proj = pca.execute(old_data)
for i in xrange(proj.shape[1]):
if i not in rm:
proj[:,i] = 0
rm_comp_data = pca.inverse(proj)
new_data -= rm_comp_data
# create the output new ndvar
dims = source.dims
properties = source.properties
ds[target] = _data.ndvar(dims, data, properties, name=target)
|
[
"christian@Christian-Brodbecks-MacBook-Pro.local"
] |
christian@Christian-Brodbecks-MacBook-Pro.local
|
fae49df9e810cfda124a6a5a4f2482b7634a7e3d
|
2b95304dd1d5eb74b7fe55929d04d42ad7617cfe
|
/nursery/migrations/0005_auto_20210109_1707.py
|
5ee8a517166acef6190733ed3c34a6c72c579266
|
[] |
no_license
|
dicegame363/Plant-and-Nursery
|
4e04bb29af74236730e3b75209dac99d1f74d6a0
|
7ad105a37af7f429be6d55fc8160c2ca45e4001c
|
refs/heads/main
| 2023-02-15T21:57:09.375526
| 2021-01-13T05:34:36
| 2021-01-13T05:34:36
| 328,098,784
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 630
|
py
|
# Generated by Django 3.1.5 on 2021-01-09 11:37
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('nursery', '0004_auto_20210109_1701'),
]
operations = [
migrations.RemoveField(
model_name='orderproperty',
name='plant',
),
migrations.AddField(
model_name='orderproperty',
name='plant',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to='nursery.plant'),
preserve_default=False,
),
]
|
[
"dicegamemail@gmail.com"
] |
dicegamemail@gmail.com
|
36e4e2986c5061bf137f8a9132190a868617ee89
|
2205147e87a78f6056188ef20459f193e7601ae6
|
/paillierself.py
|
f314edcf51f4e8039b80935a2e1de186a1e8799b
|
[] |
no_license
|
chenggang12138/decision
|
1f707447d0d6f55f0c38316bfbd7df0a2c7c96f7
|
320dc0667cefa5b537aaac498f2e08eb2966770b
|
refs/heads/main
| 2023-06-02T00:46:26.460597
| 2021-06-22T07:32:25
| 2021-06-22T07:32:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 12,788
|
py
|
"""Paillier encryption library for partially homomorphic encryption."""
import random
try:
from collections.abc import Mapping
except ImportError:
Mapping = dict
from phe import EncodedNumber
from phe.util import invert, powmod, getprimeover, isqrt
DEFAULT_KEYSIZE = 2048
def generate_paillier_keypair(private_keyring=None, n_length=DEFAULT_KEYSIZE):
p = q = n = None
n_len = 0
while n_len != n_length:
p = getprimeover(n_length // 2)
q = p
while q == p:
q = getprimeover(n_length // 2)
n = p * q
n_len = n.bit_length()
public_key = PaillierPublicKey(n)
private_key = PaillierPrivateKey(public_key, p, q)
if private_keyring is not None:
private_keyring.add(private_key)
return n,public_key, private_key
class PaillierPublicKey(object):
def __init__(self, n):
self.g = n + 1
self.n = n
self.nsquare = n * n
self.max_int = n // 3 - 1
def __repr__(self):
publicKeyHash = hex(hash(self))[2:]
return "<PaillierPublicKey {}>".format(publicKeyHash[:10])
def __eq__(self, other):
return self.n == other.n
def __hash__(self):
return hash(self.n)
def raw_encrypt(self, plaintext, r_value=None):
if not isinstance(plaintext, int):
raise TypeError('Expected int type plaintext but got: %s' %
type(plaintext))
if self.n - self.max_int <= plaintext < self.n:
# Very large plaintext, take a sneaky shortcut using inverses
neg_plaintext = self.n - plaintext # = abs(plaintext - nsquare)
neg_ciphertext = (self.n * neg_plaintext + 1) % self.nsquare
nude_ciphertext = invert(neg_ciphertext, self.nsquare)
else:
# we chose g = n + 1, so that we can exploit the fact that
# (n+1)^plaintext = n*plaintext + 1 mod n^2
nude_ciphertext = (self.n * plaintext + 1) % self.nsquare
r = r_value or self.get_random_lt_n()
obfuscator = powmod(r, self.n, self.nsquare)
return (nude_ciphertext * obfuscator) % self.nsquare
def get_random_lt_n(self):
"""Return a cryptographically random number less than :attr:`n`"""
return random.SystemRandom().randrange(1, self.n)
def encrypt(self, value, precision=None, r_value=None):
if isinstance(value, EncodedNumber):
encoding = value
else:
encoding = EncodedNumber.encode(self, value, precision)
return self.encrypt_encoded(encoding, r_value)
def encrypt_encoded(self, encoding, r_value):
# If r_value is None, obfuscate in a call to .obfuscate() (below)
obfuscator = r_value or 1
ciphertext = self.raw_encrypt(encoding.encoding, r_value=obfuscator)
encrypted_number = EncryptedNumber(self, ciphertext, encoding.exponent)
if r_value is None:
encrypted_number.obfuscate()
return encrypted_number
class PaillierPrivateKey(object):
def __init__(self, public_key, p, q):
if not p*q == public_key.n:
raise ValueError('given public key does not match the given p and q.')
if p == q:
# check that p and q are different, otherwise we can't compute p^-1 mod q
raise ValueError('p and q have to be different')
self.public_key = public_key
if q < p: #ensure that p < q.
self.p = q
self.q = p
else:
self.p = p
self.q = q
self.psquare = self.p * self.p
self.qsquare = self.q * self.q
self.p_inverse = invert(self.p, self.q)
self.hp = self.h_function(self.p, self.psquare)
self.hq = self.h_function(self.q, self.qsquare)
@staticmethod
def from_totient(public_key, totient):
p_plus_q = public_key.n - totient + 1
p_minus_q = isqrt(p_plus_q * p_plus_q - public_key.n * 4)
q = (p_plus_q - p_minus_q) // 2
p = p_plus_q - q
if not p*q == public_key.n:
raise ValueError('given public key and totient do not match.')
return PaillierPrivateKey(public_key, p, q)
def __repr__(self):
pub_repr = repr(self.public_key)
return "<PaillierPrivateKey for {}>".format(pub_repr)
def decrypt(self, encrypted_number):
encoded = self.decrypt_encoded(encrypted_number)
return encoded.decode()
def decrypt_encoded(self, encrypted_number, Encoding=None):
if not isinstance(encrypted_number, EncryptedNumber):
raise TypeError('Expected encrypted_number to be an EncryptedNumber'
' not: %s' % type(encrypted_number))
if self.public_key != encrypted_number.public_key:
raise ValueError('encrypted_number was encrypted against a '
'different key!')
if Encoding is None:
Encoding = EncodedNumber
encoded = self.raw_decrypt(encrypted_number.ciphertext(be_secure=False))
return Encoding(self.public_key, encoded,
encrypted_number.exponent)
def raw_decrypt(self, ciphertext):
if not isinstance(ciphertext, int):
raise TypeError('Expected ciphertext to be an int, not: %s' %
type(ciphertext))
decrypt_to_p = self.l_function(powmod(ciphertext, self.p-1, self.psquare), self.p) * self.hp % self.p
decrypt_to_q = self.l_function(powmod(ciphertext, self.q-1, self.qsquare), self.q) * self.hq % self.q
return self.crt(decrypt_to_p, decrypt_to_q)
def h_function(self, x, xsquare):
"""Computes the h-function as defined in Paillier's paper page 12,
'Decryption using Chinese-remaindering'.
"""
return invert(self.l_function(powmod(self.public_key.g, x - 1, xsquare),x), x)
def l_function(self, x, p):
"""Computes the L function as defined in Paillier's paper. That is: L(x,p) = (x-1)/p"""
return (x - 1) // p
def crt(self, mp, mq):
u = (mq - mp) * self.p_inverse % self.q
return mp + (u * self.p)
def __eq__(self, other):
return self.p == other.p and self.q == other.q
def __hash__(self):
return hash((self.p, self.q))
class PaillierPrivateKeyring(Mapping):
def __init__(self, private_keys=None):
if private_keys is None:
private_keys = []
public_keys = [k.public_key for k in private_keys]
self.__keyring = dict(zip(public_keys, private_keys))
def __getitem__(self, key):
return self.__keyring[key]
def __len__(self):
return len(self.__keyring)
def __iter__(self):
return iter(self.__keyring)
def __delitem__(self, public_key):
del self.__keyring[public_key]
def add(self, private_key):
if not isinstance(private_key, PaillierPrivateKey):
raise TypeError("private_key should be of type PaillierPrivateKey, "
"not %s" % type(private_key))
self.__keyring[private_key.public_key] = private_key
def decrypt(self, encrypted_number):
relevant_private_key = self.__keyring[encrypted_number.public_key]
return relevant_private_key.decrypt(encrypted_number)
class EncryptedNumber(object):
def __init__(self, public_key, ciphertext, exponent=0):
self.public_key = public_key
self.__ciphertext = ciphertext
self.exponent = exponent
self.__is_obfuscated = False
if isinstance(self.ciphertext, EncryptedNumber):
raise TypeError('ciphertext should be an integer')
if not isinstance(self.public_key, PaillierPublicKey):
raise TypeError('public_key should be a PaillierPublicKey')
def __add__(self, other):
"""Add an int, float, `EncryptedNumber` or `EncodedNumber`."""
if isinstance(other, EncryptedNumber):
return self._add_encrypted(other)
elif isinstance(other, EncodedNumber):
return self._add_encoded(other)
else:
return self._add_scalar(other)
def __radd__(self, other):
"""Called when Python evaluates `34 + <EncryptedNumber>`
Required for builtin `sum` to work.
"""
return self.__add__(other)
def __mul__(self, other):
"""Multiply by an int, float, or EncodedNumber."""
if isinstance(other, EncryptedNumber):
raise NotImplementedError('Good luck with that...')
if isinstance(other, EncodedNumber):
encoding = other
else:
encoding = EncodedNumber.encode(self.public_key, other)
product = self._raw_mul(encoding.encoding)
exponent = self.exponent + encoding.exponent
return EncryptedNumber(self.public_key, product, exponent)
def __rmul__(self, other):
return self.__mul__(other)
def __sub__(self, other):
return self + (other * -1)
def __rsub__(self, other):
return other + (self * -1)
def __truediv__(self, scalar):
return self.__mul__(1 / scalar)
def ciphertext(self, be_secure=True):
if be_secure and not self.__is_obfuscated:
self.obfuscate()
return self.__ciphertext
def decrease_exponent_to(self, new_exp):
if new_exp > self.exponent:
raise ValueError('New exponent %i should be more negative than '
'old exponent %i' % (new_exp, self.exponent))
multiplied = self * pow(EncodedNumber.BASE, self.exponent - new_exp)
multiplied.exponent = new_exp
return multiplied
def obfuscate(self):
r = self.public_key.get_random_lt_n()
r_pow_n = powmod(r, self.public_key.n, self.public_key.nsquare)
self.__ciphertext = self.__ciphertext * r_pow_n % self.public_key.nsquare
self.__is_obfuscated = True
def _add_scalar(self, scalar):
encoded = EncodedNumber.encode(self.public_key, scalar,
max_exponent=self.exponent)
return self._add_encoded(encoded)
def _add_encoded(self, encoded):
if self.public_key != encoded.public_key:
raise ValueError("Attempted to add numbers encoded against "
"different public keys!")
# In order to add two numbers, their exponents must match.
a, b = self, encoded
if a.exponent > b.exponent:
a = self.decrease_exponent_to(b.exponent)
elif a.exponent < b.exponent:
b = b.decrease_exponent_to(a.exponent)
# Don't bother to salt/obfuscate in a basic operation, do it
# just before leaving the computer.
encrypted_scalar = a.public_key.raw_encrypt(b.encoding, 1)
sum_ciphertext = a._raw_add(a.ciphertext(False), encrypted_scalar)
return EncryptedNumber(a.public_key, sum_ciphertext, a.exponent)
def _add_encrypted(self, other):
if self.public_key != other.public_key:
raise ValueError("Attempted to add numbers encrypted against "
"different public keys!")
# In order to add two numbers, their exponents must match.
a, b = self, other
if a.exponent > b.exponent:
a = self.decrease_exponent_to(b.exponent)
elif a.exponent < b.exponent:
b = b.decrease_exponent_to(a.exponent)
sum_ciphertext = a._raw_add(a.ciphertext(False), b.ciphertext(False))
return EncryptedNumber(a.public_key, sum_ciphertext, a.exponent)
def _raw_add(self, e_a, e_b):
return e_a * e_b % self.public_key.nsquare
def _raw_mul(self, plaintext):
if not isinstance(plaintext, int):
raise TypeError('Expected ciphertext to be int, not %s' %
type(plaintext))
if plaintext < 0 or plaintext >= self.public_key.n:
raise ValueError('Scalar out of bounds: %i' % plaintext)
if self.public_key.n - self.public_key.max_int <= plaintext:
# Very large plaintext, play a sneaky trick using inverses
neg_c = invert(self.ciphertext(False), self.public_key.nsquare)
neg_scalar = self.public_key.n - plaintext
return powmod(neg_c, neg_scalar, self.public_key.nsquare)
else:
return powmod(self.ciphertext(False), plaintext, self.public_key.nsquare)
|
[
"noreply@github.com"
] |
chenggang12138.noreply@github.com
|
1e65974aa90d149f18d9f0db6df93ae6acdfea77
|
2273e24209e914b106482b1d3950e3e340f57ead
|
/netmiko_ex1.py
|
a95c4055c955a7757226a2d337c64aef1b287392
|
[
"Apache-2.0"
] |
permissive
|
bashamshaik/pynet_test8
|
3bf416dc93b801ee4ba902a07e8555dae5f5d7ed
|
e2561b016b9627d9ae31cc8bdcfa005a9c773232
|
refs/heads/master
| 2020-06-24T04:01:41.211031
| 2017-07-13T19:11:28
| 2017-07-13T19:11:28
| 96,918,166
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,176
|
py
|
#!/usr/bin/env python
"""Exercises using Netmiko"""
from __future__ import print_function
from getpass import getpass
from netmiko import ConnectHandler
pynet_rtr1 = {
'device_type': 'cisco_ios',
'ip': '184.105.247.70',
'username': 'pyclass',
'password': getpass,
}
pynet_srx = {
'device_type': 'juniper_junos',
'ip': '184.105.247.76',
'username': 'pyclass',
'password': getpass,
}
for a_device in (pynet_rtr1, pynet_srx):
net_connect = ConnectHandler(**a_device)
print("Current Prompt: " + net_connect.find_prompt())
show_ver = net_connect.send_command("show version")
print()
print('#' * 80)
print(show_ver)
print('#' * 80)
print()
if 'cisco' in a_device['device_type']:
cmd = "show run"
elif 'juniper' in a_device['device_type']:
cmd = "show configuration"
show_run = net_connect.send_command(cmd)
filename = net_connect.base_prompt + ".txt"
print("Save show run output: {}\n".format(filename))
save_file(filename, show_run)
if __name__ == "__main__":
main()
|
[
"jdoe@domain.com"
] |
jdoe@domain.com
|
0d049d8ba10dab9d75bd9355eb364b3565a2349b
|
6a7e9e0e9c08132166f566bd88ae1c46ff8f9c0a
|
/azure-mgmt-recoveryservicesbackup/azure/mgmt/recoveryservicesbackup/models/generic_container.py
|
f6e8fff7ae9f79d55e3c6619b9dd2ff2044fb9c6
|
[
"MIT"
] |
permissive
|
ashirey-msft/azure-sdk-for-python
|
d92381d11c48f194ec9f989f5f803db614fb73f2
|
e04778e13306dad2e8fb044970215bad6296afb6
|
refs/heads/master
| 2020-03-23T06:05:39.283442
| 2018-09-15T00:18:26
| 2018-09-15T00:18:26
| 141,188,192
| 0
| 1
|
MIT
| 2018-07-16T20:02:52
| 2018-07-16T20:02:52
| null |
UTF-8
|
Python
| false
| false
| 2,678
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .protection_container import ProtectionContainer
class GenericContainer(ProtectionContainer):
"""Base class for generic container of backup items.
All required parameters must be populated in order to send to Azure.
:param friendly_name: Friendly name of the container.
:type friendly_name: str
:param backup_management_type: Type of backup managemenent for the
container. Possible values include: 'Invalid', 'AzureIaasVM', 'MAB',
'DPM', 'AzureBackupServer', 'AzureSql', 'AzureStorage', 'AzureWorkload',
'DefaultBackup'
:type backup_management_type: str or
~azure.mgmt.recoveryservicesbackup.models.BackupManagementType
:param registration_status: Status of registration of the container with
the Recovery Services Vault.
:type registration_status: str
:param health_status: Status of health of the container.
:type health_status: str
:param container_type: Required. Constant filled by server.
:type container_type: str
:param fabric_name: Name of the container's fabric
:type fabric_name: str
:param extended_information: Extended information (not returned in List
container API calls)
:type extended_information:
~azure.mgmt.recoveryservicesbackup.models.GenericContainerExtendedInfo
"""
_validation = {
'container_type': {'required': True},
}
_attribute_map = {
'friendly_name': {'key': 'friendlyName', 'type': 'str'},
'backup_management_type': {'key': 'backupManagementType', 'type': 'str'},
'registration_status': {'key': 'registrationStatus', 'type': 'str'},
'health_status': {'key': 'healthStatus', 'type': 'str'},
'container_type': {'key': 'containerType', 'type': 'str'},
'fabric_name': {'key': 'fabricName', 'type': 'str'},
'extended_information': {'key': 'extendedInformation', 'type': 'GenericContainerExtendedInfo'},
}
def __init__(self, **kwargs):
super(GenericContainer, self).__init__(**kwargs)
self.fabric_name = kwargs.get('fabric_name', None)
self.extended_information = kwargs.get('extended_information', None)
self.container_type = 'GenericContainer'
|
[
"noreply@github.com"
] |
ashirey-msft.noreply@github.com
|
0ec032d171d3f69969f5f45b107df6415097393f
|
c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c
|
/cases/synthetic/sieve-big-6283.py
|
56c4dc6454c5bcb3750e15efea45835eab1b8d51
|
[] |
no_license
|
Virtlink/ccbench-chocopy
|
c3f7f6af6349aff6503196f727ef89f210a1eac8
|
c7efae43bf32696ee2b2ee781bdfe4f7730dec3f
|
refs/heads/main
| 2023-04-07T15:07:12.464038
| 2022-02-03T15:42:39
| 2022-02-03T15:42:39
| 451,969,776
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 31,757
|
py
|
# A resizable list of integers
class Vector(object):
items: [int] = None
size: int = 0
def __init__(self:"Vector"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector", idx: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector") -> int:
return self.size
# A resizable list of integers
class Vector2(object):
items: [int] = None
items2: [int] = None
size: int = 0
size2: int = 0
def __init__(self:"Vector2"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector2") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector2") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector2") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector2") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector2", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector2", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector2", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector2", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector2", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector2", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector2", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector2", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector2") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector2") -> int:
return self.size
# A resizable list of integers
class Vector3(object):
items: [int] = None
items2: [int] = None
items3: [int] = None
size: int = 0
size2: int = 0
size3: int = 0
def __init__(self:"Vector3"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector3") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector3") -> int:
return len(self.items)
# Returns current capacity
def capacity3(self:"Vector3") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector3") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector3") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity3(self:"Vector3") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector3", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector3", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append3(self:"Vector3", item: int, item2: int, item3: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector3", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector3", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all3(self:"Vector3", new_items: [int], new_items2: [int], new_items3: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector3", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector3", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at3(self:"Vector3", idx: int, idx2: int, idx3: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector3", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector3", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get3(self:"Vector3", idx: int, idx2: int, idx3: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector3") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector3") -> int:
return self.size
# Retrieves the current size of the vector
def length3(self:"Vector3") -> int:
return self.size
# A resizable list of integers
class Vector4(object):
items: [int] = None
items2: [int] = None
items3: [int] = None
items4: [int] = None
size: int = 0
size2: int = 0
size3: int = 0
size4: int = 0
def __init__(self:"Vector4"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector4") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector4") -> int:
return len(self.items)
# Returns current capacity
def capacity3(self:"Vector4") -> int:
return len(self.items)
# Returns current capacity
def capacity4(self:"Vector4") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity3(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity4(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector4", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector4", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append3(self:"Vector4", item: int, item2: int, item3: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append4(self:"Vector4", item: int, item2: int, item3: int, item4: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector4", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector4", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all3(self:"Vector4", new_items: [int], new_items2: [int], new_items3: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all4(self:"Vector4", new_items: [int], new_items2: [int], new_items3: [int], new_items4: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
item4:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector4", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector4", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at3(self:"Vector4", idx: int, idx2: int, idx3: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at4(self:"Vector4", idx: int, idx2: int, idx3: int, idx4: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector4", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector4", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get3(self:"Vector4", idx: int, idx2: int, idx3: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get4(self:"Vector4", idx: int, idx2: int, idx3: int, idx4: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector4") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector4") -> int:
return self.size
# Retrieves the current size of the vector
def length3(self:"Vector4") -> int:
return self.size
# Retrieves the current size of the vector
def length4(self:"Vector4") -> int:
return self.size
# A resizable list of integers
class Vector5(object):
items: [int] = None
items2: [int] = None
items3: [int] = None
items4: [int] = None
items5: [int] = None
size: int = 0
size2: int = 0
size3: int = 0
size4: int = 0
size5: int = 0
def __init__(self:"Vector5"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity3(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity4(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity5(self:"Vector5") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity3(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity4(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity5(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector5", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector5", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append3(self:"Vector5", item: int, item2: int, item3: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append4(self:"Vector5", item: int, item2: int, item3: int, item4: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append5(self:"Vector5", item: int, item2: int, item3: int, item4: int, item5: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector5", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector5", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all3(self:"Vector5", new_items: [int], new_items2: [int], new_items3: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all4(self:"Vector5", new_items: [int], new_items2: [int], new_items3: [int], new_items4: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
item4:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all5(self:"Vector5", new_items: [int], new_items2: [int], new_items3: [int], new_items4: [int], new_items5: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
item4:int = 0
item5:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector5", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector5", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at3(self:"Vector5", idx: int, idx2: int, idx3: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at4(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at5(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int, idx5: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector5", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector5", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get3(self:"Vector5", idx: int, idx2: int, idx3: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get4(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int) -> $Type:
return self.items[idx]
# Retrieves an item at a given index
def get5(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int, idx5: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length3(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length4(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length5(self:"Vector5") -> int:
return self.size
# A faster (but more memory-consuming) implementation of vector
class DoublingVector(Vector):
doubling_limit:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector2(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector2") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector2") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector3(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
doubling_limit3:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector3") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector3") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity3(self:"DoublingVector3") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector4(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
doubling_limit3:int = 1000
doubling_limit4:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity3(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity4(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector5(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
doubling_limit3:int = 1000
doubling_limit4:int = 1000
doubling_limit5:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity3(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity4(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity5(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Makes a vector in the range [i, j)
def vrange(i:int, j:int) -> Vector:
v:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange2(i:int, j:int, i2:int, j2:int) -> Vector:
v:Vector = None
v2:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange3(i:int, j:int, i2:int, j2:int, i3:int, j3:int) -> Vector:
v:Vector = None
v2:Vector = None
v3:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange4(i:int, j:int, i2:int, j2:int, i3:int, j3:int, i4:int, j4:int) -> Vector:
v:Vector = None
v2:Vector = None
v3:Vector = None
v4:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange5(i:int, j:int, i2:int, j2:int, i3:int, j3:int, i4:int, j4:int, i5:int, j5:int) -> Vector:
v:Vector = None
v2:Vector = None
v3:Vector = None
v4:Vector = None
v5:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
# Sieve of Eratosthenes (not really)
def sieve(v:Vector) -> object:
i:int = 0
j:int = 0
k:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve2(v:Vector, v2:Vector) -> object:
i:int = 0
i2:int = 0
j:int = 0
j2:int = 0
k:int = 0
k2:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve3(v:Vector, v2:Vector, v3:Vector) -> object:
i:int = 0
i2:int = 0
i3:int = 0
j:int = 0
j2:int = 0
j3:int = 0
k:int = 0
k2:int = 0
k3:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve4(v:Vector, v2:Vector, v3:Vector, v4:Vector) -> object:
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
j:int = 0
j2:int = 0
j3:int = 0
j4:int = 0
k:int = 0
k2:int = 0
k3:int = 0
k4:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve5(v:Vector, v2:Vector, v3:Vector, v4:Vector, v5:Vector) -> object:
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
i5:int = 0
j:int = 0
j2:int = 0
j3:int = 0
j4:int = 0
j5:int = 0
k:int = 0
k2:int = 0
k3:int = 0
k4:int = 0
k5:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
# Input parameter
n:int = 50
n2:int = 50
n3:int = 50
n4:int = 50
n5:int = 50
# Data
v:Vector = None
v2:Vector = None
v3:Vector = None
v4:Vector = None
v5:Vector = None
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
i5:int = 0
# Crunch
v = vrange(2, n)
v2 = vrange(2, n)
v3 = vrange(2, n)
v4 = vrange(2, n)
v5 = vrange(2, n)
sieve(v)
# Print
while i < v.length():
print(v.get(i))
i = i + 1
|
[
"647530+Virtlink@users.noreply.github.com"
] |
647530+Virtlink@users.noreply.github.com
|
dc0b8088b98d3de65f6b57c8d2a62cb0d9a6c470
|
913d46cbc5abe5556e05b59ffd1b55009a848e32
|
/lanenet-lane-detection/tools/my2train_dense.py
|
2c0f51d4ba0b5671c4a937609deedc2e30051218
|
[
"Apache-2.0"
] |
permissive
|
yeyang1021/Sparse-to-Dense
|
7c3fcd34c1d41e16369788471c0040cdac1dc7fb
|
785c1dc5f05ebf4c267d5c3e46b37463f291e14b
|
refs/heads/master
| 2022-11-27T17:48:00.209011
| 2019-06-06T03:10:20
| 2019-06-06T03:10:20
| 190,501,005
| 1
| 1
| null | 2022-11-21T22:27:55
| 2019-06-06T02:32:59
|
Python
|
UTF-8
|
Python
| false
| false
| 12,068
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Time :
# @Author :
# @Site :
# @File : my2train_dense.py
# @IDE: PyCharm Community Edition
"""
训练lanenet模型
"""
import argparse
import math
import os
import os.path as ops
import time
import cv2
import glog as log
import numpy as np
import tensorflow as tf
try:
from cv2 import cv2
except ImportError:
pass
from config import global_config
from lanenet_model import mylanenet1_merge_model
from data_provider import my2_data_processor
CFG = global_config.cfg
VGG_MEAN = [103.939, 116.779, 123.68]
def init_args():
"""
:return:
"""
parser = argparse.ArgumentParser()
parser.add_argument('--dataset_dir', type=str, help='The training dataset dir path')
parser.add_argument('--net', type=str, help='Which base net work to use', default='vgg')
parser.add_argument('--weights_path', type=str, help='The pretrained weights path')
return parser.parse_args()
def train_net(dataset_dir, weights_path=None, net_flag='vgg'):
"""
:param dataset_dir:
:param net_flag: choose which base network to use
:param weights_path:
:return:
"""
train_dataset_file = ops.join(dataset_dir, 'train.txt')
val_dataset_file = ops.join(dataset_dir, 'val.txt')
assert ops.exists(train_dataset_file)
c = []
d = []
e = []
f = []
for dd in ['/gpu:0', '/gpu:1']:
with tf.device(dd):
train_dataset = my2_data_processor.DataSet(train_dataset_file)
val_dataset = my2_data_processor.DataSet(val_dataset_file)
input_tensor1 = tf.placeholder(dtype=tf.float32,
shape=[CFG.TRAIN.BATCH_SIZE, CFG.TRAIN.IMG_HEIGHT,
CFG.TRAIN.IMG_WIDTH, 3],
name='input_tensor1')
input_tensor2 = tf.placeholder(dtype=tf.float32,
shape=[CFG.TRAIN.BATCH_SIZE, CFG.TRAIN.IMG_HEIGHT,
CFG.TRAIN.IMG_WIDTH, 1],
name='input_tensor2')
gt_label = tf.placeholder(dtype=tf.float32,
shape=[CFG.TRAIN.BATCH_SIZE, CFG.TRAIN.IMG_HEIGHT,
CFG.TRAIN.IMG_WIDTH, 1],
name='gt_label')
mask_label = tf.placeholder(dtype=tf.float32,
shape=[CFG.TRAIN.BATCH_SIZE, CFG.TRAIN.IMG_HEIGHT,
CFG.TRAIN.IMG_WIDTH, 1],
name='mask_label')
cam_label = tf.placeholder(dtype=tf.float32,
shape=[CFG.TRAIN.BATCH_SIZE, 4],
name='cam_label')
xx_label = tf.placeholder(dtype=tf.float32,
shape=[CFG.TRAIN.BATCH_SIZE, CFG.TRAIN.IMG_HEIGHT,
CFG.TRAIN.IMG_WIDTH, 1],
name='xx_label')
yy_label = tf.placeholder(dtype=tf.float32,
shape=[CFG.TRAIN.BATCH_SIZE, CFG.TRAIN.IMG_HEIGHT,
CFG.TRAIN.IMG_WIDTH, 1],
name='yy_label')
phase = tf.placeholder(dtype=tf.string, shape=None, name='net_phase')
net = mylanenet1_merge_model.LaneNet(net_flag=net_flag, phase=phase)
# calculate the loss
compute_ret = net.compute_loss(input_tensor1=input_tensor1, input_tensor2=input_tensor2,
gt_label=gt_label, mask_label=mask_label, cam_label= cam_label, xx_label= xx_label, yy_label = yy_label)
total_loss = compute_ret['total_loss']
dense_loss = compute_ret['dense_loss']
x_loss = compute_ret['x_loss']
y_loss = compute_ret['y_loss']
c.append(total_loss)
d.append(dense_loss)
e.append(x_loss)
f.append(y_loss)
global_step = tf.Variable(0, trainable=False)
learning_rate = tf.train.exponential_decay(CFG.TRAIN.LEARNING_RATE, global_step,
5000, 0.96, staircase=True)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
optimizer = tf.train.AdamOptimizer(learning_rate= learning_rate).minimize(loss=total_loss,var_list=tf.trainable_variables(),global_step=global_step)
# Set tf saver
saver = tf.train.Saver()
model_save_dir = 'model/culane_lanenet'
if not ops.exists(model_save_dir):
os.makedirs(model_save_dir)
train_start_time = time.strftime('%Y-%m-%d-%H-%M-%S', time.localtime(time.time()))
model_name = 'dense_depthnet_{:s}_{:s}.ckpt'.format(net_flag, str(train_start_time))
model_save_path = ops.join(model_save_dir, model_name)
# Set tf summary
tboard_save_path = 'tboard/dense_depthnet/{:s}'.format(net_flag)
if not ops.exists(tboard_save_path):
os.makedirs(tboard_save_path)
train_cost_scalar = tf.summary.scalar(name='train_cost', tensor=total_loss)
val_cost_scalar = tf.summary.scalar(name='val_cost', tensor=total_loss)
learning_rate_scalar = tf.summary.scalar(name='learning_rate', tensor=learning_rate)
train_merge_summary_op = tf.summary.merge([ train_cost_scalar,
learning_rate_scalar])
val_merge_summary_op = tf.summary.merge([ val_cost_scalar])
# Set sess configuration
sess_config = tf.ConfigProto(device_count={'GPU': 1})
sess_config.gpu_options.per_process_gpu_memory_fraction = CFG.TRAIN.GPU_MEMORY_FRACTION
sess_config.gpu_options.allow_growth = CFG.TRAIN.TF_ALLOW_GROWTH
sess_config.gpu_options.allocator_type = 'BFC'
sess = tf.Session(config=sess_config)
summary_writer = tf.summary.FileWriter(tboard_save_path)
summary_writer.add_graph(sess.graph)
# Set the training parameters
train_epochs = CFG.TRAIN.EPOCHS
log.info('Global configuration is as follows:')
log.info(CFG)
with tf.device('/cpu:0'):
sum = tf.add_n(c)
sum1 = tf.add_n(d)
sum2 = tf.add_n(e)
sum3 = tf.add_n(f)
with sess.as_default():
tf.train.write_graph(graph_or_graph_def=sess.graph, logdir='',
name='{:s}/lanenet_model.pb'.format(model_save_dir))
if weights_path is None:
log.info('Training from scratch')
init = tf.global_variables_initializer()
sess.run(init)
else:
log.info('Restore model from last model checkpoint {:s}'.format(weights_path))
saver.restore(sess=sess, save_path=weights_path)
# 加载预训练参数
if net_flag == 'vgg' and weights_path is None:
pretrained_weights = np.load(
'./data/vgg16.npy',
encoding='latin1').item()
for vv in tf.trainable_variables():
weights_key = vv.name.split('/')[-3]
try:
weights = pretrained_weights[weights_key][0]
_op = tf.assign(vv, weights)
sess.run(_op)
except Exception as e:
continue
train_cost_time_mean = []
val_cost_time_mean = []
for epoch in range(train_epochs):
# training part
t_start = time.time()
gt_imgs, depth_labels, gt_labels, mask_labels, gt_cams, gt_x, gt_y = train_dataset.next_batch(CFG.TRAIN.BATCH_SIZE)
phase_train = 'train'
_, c, d_loss, x_l, y_l, train_summary, = \
sess.run([optimizer, sum, sum1, sum2, sum3,
train_merge_summary_op],
feed_dict={input_tensor1: gt_imgs,
input_tensor2: depth_labels,
gt_label: gt_labels,
mask_label: mask_labels,
cam_label: gt_cams,
xx_label: gt_x,
yy_label: gt_y,
phase: phase_train})
if math.isnan(c) :
log.error('cost is: {:.5f}'.format(c))
return
cost_time = time.time() - t_start
train_cost_time_mean.append(cost_time)
summary_writer.add_summary(summary=train_summary, global_step=epoch)
# validation part
gt_imgs_val, depth_labels_val, gt_labels_val, mask_labels_val, cam_val, gt_x_val, gt_y_val \
= val_dataset.next_batch(CFG.TRAIN.VAL_BATCH_SIZE)
phase_val = 'test'
t_start_val = time.time()
c_val, d_loss_val, x_l_val, y_l_val, val_summary = \
sess.run([sum, sum1, sum2, sum3, val_merge_summary_op],
feed_dict={input_tensor1: gt_imgs_val,
input_tensor2: depth_labels_val,
gt_label: gt_labels_val,
mask_label: mask_labels_val,
cam_label: cam_val,
xx_label: gt_x_val,
yy_label: gt_y_val,
phase: phase_val})
summary_writer.add_summary(val_summary, global_step=epoch)
cost_time_val = time.time() - t_start_val
val_cost_time_mean.append(cost_time_val)
if epoch % CFG.TRAIN.DISPLAY_STEP == 0:
log.info('Epoch: {:d} total_loss= {:6f}'
'dense_loss= {:6f}, x_loss= {:6f}, y_loss= {:6f}'
' mean_cost_time= {:5f}s '.
format(epoch + 1, c, d_loss, x_l, y_l, np.mean(train_cost_time_mean)))
train_cost_time_mean.clear()
if epoch % CFG.TRAIN.TEST_DISPLAY_STEP == 0:
log.info('Epoch_Val: {:d} total_loss= {:6f} '
'dense_loss= {:6f}, x_loss= {:6f}, y_loss= {:6f}'
'mean_cost_time= {:5f}s '.
format(epoch + 1, c_val, d_loss_val, x_l_val, y_l_val, np.mean(val_cost_time_mean)))
val_cost_time_mean.clear()
if epoch % 2000 == 0:
saver.save(sess=sess, save_path=model_save_path, global_step=epoch)
sess.close()
return
if __name__ == '__main__':
# init args
args = init_args()
# train lanenet
train_net(args.dataset_dir, args.weights_path, net_flag=args.net)
|
[
"noreply@github.com"
] |
yeyang1021.noreply@github.com
|
89387841556725ab30f6a4260ab7846497b4b594
|
6a72f47a5e49b4e9b74a2e6c4ca2591cdf7b7f03
|
/snake.py
|
0b33fa36662a2f46898f9bb56452710861de4f3f
|
[
"MIT"
] |
permissive
|
ritik-gupta/modern-snake-game-opencv
|
a54f0a3f5ae7179a2bf37b49a971f04fc5f2b7aa
|
053fdbe13eee37258e1aeae6fafef0f4e7f21847
|
refs/heads/master
| 2021-09-22T11:22:57.002569
| 2018-09-09T13:19:24
| 2018-09-09T13:19:24
| 148,025,233
| 8
| 6
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,079
|
py
|
import cv2
import numpy as np
from time import time
import random
import math
import webcolors
font = cv2.FONT_HERSHEY_COMPLEX_SMALL
apple = cv2.imread("apple.png", -1)
apple_mask = apple[:, :, 3]
apple_mask_inv = cv2.bitwise_not(apple_mask)
apple = apple[:, :, 0:3]
apple = cv2.resize(apple, (40, 40), interpolation=cv2.INTER_AREA)
apple_mask = cv2.resize(apple_mask, (40, 40), interpolation=cv2.INTER_AREA)
apple_mask_inv = cv2.resize(apple_mask_inv, (40, 40), interpolation=cv2.INTER_AREA)
blank_img = np.zeros((480, 640, 3), np.uint8)
video = cv2.VideoCapture(0)
kernel_erode = np.ones((4, 4), np.uint8)
kernel_close = np.ones((15, 15), np.uint8)
color = input("Enter color: ")
rgb = webcolors.name_to_rgb(color)
red = rgb.red
blue = rgb.blue
green = rgb.green
lower_upper = []
def color_convert(r, bl, g):
co = np.uint8([[[bl, g, r]]])
hsv_color = cv2.cvtColor(co, cv2.COLOR_BGR2HSV)
hue = hsv_color[0][0][0]
lower_upper.append([hue - 10, 100, 100])
lower_upper.append([hue + 10, 255, 255])
return lower_upper
def detect_color(h):
lu = color_convert(red, blue, green)
lower = np.array(lu[0])
upper = np.array(lu[1])
mask = cv2.inRange(h, lower, upper)
mask = cv2.erode(mask, kernel_erode, iterations=1)
mask = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernel_close)
return mask
def orientation(p, q, r):
val = int(((q[1] - p[1]) * (r[0] - q[0])) - ((q[0] - p[0]) * (r[1] - q[1])))
if val == 0:
return 0
elif val > 0:
return 1
else:
return 2
def intersect(p, q, r, s):
o1 = orientation(p, q, r)
o2 = orientation(p, q, s)
o3 = orientation(r, s, p)
o4 = orientation(r, s, q)
if o1 != o2 and o3 != o4:
return True
return False
start_time = int(time())
q, snake_len, score, temp = 0, 200, 0, 1
point_x, point_y = 0, 0
last_point_x, last_point_y, dist, length = 0, 0, 0, 0
points = []
list_len = []
random_x = random.randint(10, 550)
random_y = random.randint(10, 400)
a, b, c, d = [], [], [], []
while 1:
xr, yr, wr, hr = 0, 0, 0, 0
ret, frame = video.read()
frame = cv2.flip(frame, 1)
if q == 0 and point_x != 0 and point_y != 0:
last_point_x = point_x
last_point_y = point_y
q = 1
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
mask = detect_color(hsv)
# finding contours
_, contour, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
# drawing rectangle around the accepted blob
try:
for i in range(0, 10):
xr, yr, wr, hr = cv2.boundingRect(contour[i])
if (wr*hr) > 2000:
break
except:
pass
cv2.rectangle(frame, (xr, yr), (xr + wr, yr + hr), (0, 0, 255), 2)
point_x = int(xr+(wr/2))
point_y = int(yr+(hr/2))
dist = int(math.sqrt(pow((last_point_x - point_x), 2) + pow((last_point_y - point_y), 2)))
if point_x != 0 and point_y != 0 and dist > 5:
list_len.append(dist)
length += dist
last_point_x = point_x
last_point_y = point_y
points.append([point_x, point_y])
if length >= snake_len:
for i in range(len(list_len)):
length -= list_len[0]
list_len.pop(0)
points.pop(0)
if length <= snake_len:
break
blank_img = np.zeros((480, 640, 3), np.uint8)
for i, j in enumerate(points):
if i == 0:
continue
cv2.line(blank_img, (points[i-1][0], points[i-1][1]), (j[0], j[1]), (blue, green, red), 5)
cv2.circle(blank_img, (last_point_x, last_point_y), 5, (10, 200, 150), -1)
if random_x < last_point_x < (random_x + 40) and random_y < last_point_y < (random_y + 40):
score += 1
random_x = random.randint(10, 550)
random_y = random.randint(10, 400)
frame = cv2.add(frame, blank_img)
roi = frame[random_y:random_y+40, random_x:random_x+40]
img_bg = cv2.bitwise_and(roi, roi, mask=apple_mask_inv)
img_fg = cv2.bitwise_and(apple, apple, mask=apple_mask)
dst = cv2.add(img_bg, img_fg)
frame[random_y:random_y + 40, random_x:random_x + 40] = dst
cv2.putText(frame, str("Score - "+str(score)), (250, 450), font, 1, (0, 0, 0), 2, cv2.LINE_AA)
if len(points) > 5:
b = points[len(points)-2]
a = points[len(points)-1]
for i in range(len(points)-3):
c = points[i]
d = points[i+1]
if intersect(a, b, c, d) and len(c) != 0 and len(d) != 0:
temp = 0
break
if temp == 0:
break
cv2.imshow("frame", frame)
if (int(time())-start_time) > 1:
snake_len += 40
start_time = int(time())
key = cv2.waitKey(1)
if key == 27:
break
video.release()
cv2.destroyAllWindows()
cv2.putText(frame, str("Game Over!"), (100, 230), font, 3, (255, 0, 0), 3, cv2.LINE_AA)
cv2.putText(frame, str("Press any key to Exit."), (180, 260), font, 1, (255, 200, 0), 2, cv2.LINE_AA)
cv2.imshow("frame", frame)
k = cv2.waitKey(0)
cv2.destroyAllWindows()
|
[
"gritik95@gmail.com"
] |
gritik95@gmail.com
|
09fc27c0d2db4456503b39b6e34088fd5e8e840b
|
a553c852772514ee392a359cb306128381a85b6c
|
/mysite/mysite/settings.py
|
c49fceaff2b3e63c7da2ad4c85e6ad2c32742546
|
[] |
no_license
|
YenHengLin/login-bootdjango
|
0ba78cc55eff7e75c0063121de01106e504ed71c
|
9b0fb9e8a5ef4fc16536edc0867e4c5c678a1e69
|
refs/heads/master
| 2022-12-19T01:50:32.986362
| 2020-08-28T21:27:39
| 2020-08-28T21:27:39
| 289,205,928
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,101
|
py
|
"""
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 3.1.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve(strict=True).parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '-z7-a+7kf$k38xo1%d^u!j5tm^ws@isvyrikdqgbcqnl10jdda'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'login.apps.LoginConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
|
[
"NE6081098@gs.ncku.edu.tw"
] |
NE6081098@gs.ncku.edu.tw
|
e98613e9520a652001c93b17ff4600e2c67e99bf
|
0db8032fc10ffee088736dc19f5e766a8d4bc72d
|
/02-array-seq/list_slice.py
|
942778ef6bdd142fba0099be3b61dd1730cc3b73
|
[
"MIT"
] |
permissive
|
niyunsheng/fluent-python
|
9ddec1ed09b819b3a2d920c3b1bc092094719150
|
26d91e56a58c7423253066f65ffb03b90d822e86
|
refs/heads/master
| 2022-10-22T22:27:05.787416
| 2020-06-13T15:42:29
| 2020-06-13T15:42:29
| 256,383,732
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 484
|
py
|
# 建立由列表组成的列表
board = [['_'] * 3 for _ in range(3)]
print(board)
board[1][2] = 'X'
print(board)
# 上面这个代码等同于
board = []
for i in range(3):
row=['_'] * 3
board.append(row)
# 下面这种方法不对,因为外面的列表其实包含 3 个指向同一个列表的引用
board = [['_'] * 3 ]*3
print(board)
board[1][2] = 'X'
print(board)
# 上面这个代码等同于
row=['_'] * 3
board = []
for i in range(3):
board.append(row)
|
[
"ni-ys13@tsinghua.org.cn"
] |
ni-ys13@tsinghua.org.cn
|
c63b1fa4e04f7ce429a6950d384a4e64e36fe1b8
|
3017e7f0e8cd99469c7c98ec8a4b9b75d39c0c2f
|
/pythonkitabi/ingilizce/ch11/mia/splitspace.py
|
61f80bf9bd1d710ebaf5c34572bcbad903ed206a
|
[] |
no_license
|
Rmys/projects
|
de6cb9d5d3f027d98c812647369d1e487d902c4b
|
60ce197bc1fb7ad3fa31f12559b74ee450b69df1
|
refs/heads/master
| 2020-03-19T15:36:11.603931
| 2011-09-16T00:15:34
| 2011-09-16T00:15:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 869
|
py
|
"""
splitspace.py - splitter view manager for the mdi framework
copyright: (C) 2001, Boudewijn Rempt
email: boud@rempt.xs4all.nl
"""
from qt import *
from resources import TRUE, FALSE
class SplitSpace(QSplitter):
def __init__(self, *args):
apply(QSplitter.__init__,(self, ) + args)
self.views=[]
def addView(self, view):
self.views.append(view)
def removeView(self, view): pass
def activeWindow(self):
for view in self.views:
if view.hasFocus():
return view
return self.views[0]
def cascade(self): pass
def windowList(self):
return self.views
def tile(self): pass
def canCascade(self):
return FALSE
def canTile(self):
return FALSE
def activateWindow(self, view):
view.setFocus()
|
[
"ismail@users.noreply.github.com"
] |
ismail@users.noreply.github.com
|
a34d3b1af0338b070bb96765121b7d9ea124b37d
|
03fa51b0145bbc68dd17ce5a50374dbb1881ca7c
|
/fingerTransferTMAEfunctions.py
|
4b9bc18a34cfc4210f952c75709b5af1c4a9733e
|
[] |
no_license
|
SDAMcIntyre/sidewaysOptaconStim
|
4f1eb225b9fbbdf4a6e9915c56c9169c3893ab1f
|
8c4095efdcfe55a196478c6e21e2f34d40bcfe55
|
refs/heads/master
| 2020-05-04T21:40:55.415185
| 2015-02-23T07:08:29
| 2015-02-23T07:08:29
| 26,157,645
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,564
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 12 14:33:59 2014
@author: sarahmcintyre
"""
import optacon
import optaconSideways
from psychopy import data
import numpy
import os
single_presentation = optaconSideways.single_presentation
zeros = numpy.zeros
sign = numpy.sign
def stim_set(presentationTime, stepDuration, standard, comparison, exptFolder, exptName, nReps=1):
standard = [standard, -standard]
comparison+=[x*-1 for x in comparison]
if not os.path.exists(exptFolder):
os.makedirs(exptFolder)
stimCombinations = [{'compISOI':compISOI, 'stndISOI':stndISOI, 'standardPosition':standardPosition} for
compISOI in comparison for stndISOI in standard for standardPosition in ['left','right']]
trials = data.TrialHandler(stimCombinations,nReps=nReps,method='sequential')
trials.data.addDataType('blockNo')
stimList = []
repList = []
nameList = []
blockList = []
blockNo = 1
for thisTrial in trials:
blockNo += 1 #starts at 2 because 1 is reserved for lead time
trials.data.add('blockNo', blockNo)
stndName = 'STNDISOI_'+str(thisTrial['stndISOI'])
compName = 'COMPISOI_'+str(thisTrial['compISOI'])
isoiLR = [0,0]
stepVectorLR = [0,0]
if thisTrial['standardPosition'] == 'left':
stndPos = 0
compPos = 1
name = [stndName+'_'+compName]
else:
stndPos = 1
compPos = 0
name = [compName+'_'+stndName]
isoiLR[stndPos] = abs(thisTrial['stndISOI'])
isoiLR[compPos] = abs(thisTrial['compISOI'])
stepVectorLR[stndPos] = sign(thisTrial['stndISOI'])
stepVectorLR[compPos] = sign(thisTrial['compISOI'])
stim, rep = single_presentation(presDur=presentationTime,
stepDur=stepDuration,
isoi = isoiLR,
rowsToUse=range(0,6), #assumes Optacon is sideways
colsToUse=[range(18,24),range(0,6)], #first is left, second is right; depends on Optacon orientation in eperiment
stepVector = stepVectorLR,
randomPos=[False,False], spread=[True,True]
)
name = name*len(stim)
stimList += stim
repList += rep
nameList += name
blockList += [blockNo] * (len(stim))
trials.saveAsText(fileName=exptFolder+exptName+'_stimList',
stimOut=['compISOI','stndISOI','standardPosition'],
dataOut=['blockNo_raw'],
appendFile=False)
optacon.write_protocol_file(fileName=exptFolder+exptName+'_protocol',
stimList=stimList,
stimRep=repList,
blockList=blockList,
stimName=nameList)
print 'created files in folder \"'+exptFolder+'\":\n'+exptName+'_stimList.dlm\n'+exptName+'_protocol.txt'
|
[
"sdamcintyre@gmail.com"
] |
sdamcintyre@gmail.com
|
26689f9b99d8bd56db24a4aeb5febdc30e9b5fc8
|
21f2f5aebdd72b2b12ad7691caf2fc0940ed81c7
|
/CSV_WebAPI/Q1.py
|
556aa4ea8f1ebc788b30360ac97ceae679103255
|
[] |
no_license
|
seelem27/TCP-IP-Network-Application-Development
|
4c17e8ab9c19f7ce005d0e47ceec0051d05f3815
|
53fe62ef175160ac87c517bb0460fb2b182d0f60
|
refs/heads/master
| 2020-08-28T19:59:13.515182
| 2019-10-27T04:59:19
| 2019-10-27T04:59:19
| 217,806,622
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 779
|
py
|
import http.client
import json
import csv
HOST = 'localhost'
PORT = 5000
print('### Connecting to {}:{}'.format(HOST, PORT))
conn = http.client.HTTPConnection(HOST, PORT)
with open('Q1.csv') as file:
stations = csv.DictReader(file)
for station in stations:
station = dict(station)
print('### Sending HTTP Request')
conn.request('POST', '/api/stations', json.dumps(station), {
'Accept': 'application/json',
'Content-Type': 'application/json',
})
print('### HTTP Response Received')
response = conn.getresponse()
if response.status == 201:
result = json.loads(response.read())
print(result)
else:
print('### ERROR: {}'.format(response.status))
|
[
"dzyenlem@gmail.com"
] |
dzyenlem@gmail.com
|
3efb1c41585980c718232ed289499bfcb00fe969
|
9b852d13a5cb849b42a56959d80af13de9124381
|
/predict.py
|
e26395c9906cfde3f2441f74a58b53d51a8b11a6
|
[
"MIT"
] |
permissive
|
HungUnicorn/udacity-flower-image-classifier
|
086228f689fe70f34a62c90be50330482288df93
|
9625d3e94f178cd6090168f5a97c8d278cebc5c8
|
refs/heads/master
| 2022-09-19T03:08:45.251131
| 2020-06-01T13:22:26
| 2020-06-01T13:22:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,682
|
py
|
import argparse
import json
from util import process_image, load_model
import torch
def predict():
args = cli()
device = torch.device("cuda" if args.gpu else "cpu")
print(f'Device: {device}')
image = process_image(args.image_path)
model = load_model()
with open(args.category_names, 'r') as f:
cat_to_name = json.load(f)
top_ps, top_class = _predict(model, image, args.top_k, device)
for i, c in enumerate(top_class):
print(f"Prediction {i + 1}: "
f"{cat_to_name[c]} .. "
f"({100.0 * top_ps[i]:.3f}%)")
def cli():
parser = argparse.ArgumentParser()
parser.add_argument("image_path")
parser.add_argument("checkpoint")
parser.add_argument("--top_k", default=1, type=int)
parser.add_argument("--category_names",
default="cat_to_name.json")
parser.add_argument("--gpu", action="store_true")
return parser.parse_args()
def _predict(model, image, topk, device):
tensor_image = torch.from_numpy(image).type(torch.FloatTensor)
tensor_image = tensor_image.unsqueeze_(0)
model.to(device)
model.eval()
with torch.no_grad():
print(f'Device: {device}')
output = model(tensor_image.to(device)).cpu()
probs = torch.exp(output)
top_p, top_class = probs.topk(topk, dim=1)
top_p = top_p.numpy()[0]
top_class = top_class.numpy()[0]
idx_to_class = {val: key for key, val in
model.class_to_idx.items()}
top_class = [idx_to_class[i] for i in top_class]
return top_p, top_class
if __name__ == "__main__":
predict()
|
[
"hungchang@hungchang.fritz.box"
] |
hungchang@hungchang.fritz.box
|
48ad1087d1425fbf659db1aec546c48a22425705
|
5491e80f7dc72a8091b16c26a5cfee93381ee30d
|
/Challenge202E_I_AM_BENDER_Binary_To_Text/challenge202E.py
|
a35a3a4915220b1f0ced3a8f61896c03fca380db
|
[] |
no_license
|
tuipopenoe/DailyProgrammer
|
87167c2ae275c40c3b1a30ae14497a3289f8797f
|
8d42947b576b78456fa72cdf5b886cff9f32b769
|
refs/heads/master
| 2016-09-05T21:13:30.805504
| 2015-10-16T02:57:20
| 2015-10-16T02:57:20
| 21,139,505
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 353
|
py
|
#!/usr/bin/env python
# Tui Popenoe
# challenge202E.py - Binary to String
import sys
import binascii
def i_am_bender(binary):
return binascii.unhexlify('%x' % int(binary, 2))
def main():
if len(sys.argv) > 1:
print(i_am_bender(sys.argv[1]))
else:
print(i_am_bender(sys.stdin.read()))
if __name__ == '__main__':
main()
|
[
"tuipopenoe@gmail.com"
] |
tuipopenoe@gmail.com
|
3c0a2790de6ddfcdd8adb3a0d60a8fc2ac185316
|
9e5c5745c9ffd4ac1b71303559d97b4730047389
|
/zaj5/zad1.py
|
ec785fb3fbc89eb248b6992ba31380791095ce36
|
[] |
no_license
|
Wladaaa/python
|
6e12c1a313f7531fb1f999eeb70d5cc2a06966c4
|
9b6cdd26cb56740c4ae77c741a1623d8f32f4000
|
refs/heads/master
| 2018-09-19T13:29:41.601791
| 2018-06-06T09:39:58
| 2018-06-06T09:39:58
| 123,256,734
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 443
|
py
|
#!/usr/bin/env python
#encoding: utf-8
width = input()
f = open("text.txt")
res=""
k=1
for line in f:
line = line.strip()
if len(line)>width:
for i in range(len(line)):
if(k<=width):
k+=1
res=res+line[i]
else:
k=2
print res.center(width)
res = line[i]
else: print line.center(width-len(line))
print res.center(width)
|
[
"noreply@github.com"
] |
Wladaaa.noreply@github.com
|
a01ab751ec102dc7d935fcfbf2e45baffaef1dc6
|
ad19cff460a1f28c2b63f7ea66d13695fe8742a5
|
/orders/migrations/0001_initial.py
|
e79b19914736d02e3c1dd92507a6b477bb678b25
|
[] |
no_license
|
OsamaAburideh/E-Commerce-website
|
172f8ef3a6c7bf71b2ac674e766cf0cefeb103b0
|
87847fcdd19775f4065384ac8f2054bc9a1eae3f
|
refs/heads/master
| 2022-12-26T17:52:33.446521
| 2020-10-06T09:07:39
| 2020-10-06T09:07:39
| 301,666,169
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,809
|
py
|
# Generated by Django 2.2.5 on 2019-12-04 16:13
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('shop', '0002_auto_20191107_1145'),
]
operations = [
migrations.CreateModel(
name='Order',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(max_length=50)),
('last_name', models.CharField(max_length=50)),
('email', models.EmailField(max_length=254)),
('address', models.CharField(max_length=50)),
('postal_code', models.CharField(max_length=50)),
('city', models.CharField(max_length=50)),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now_add=True)),
('paid', models.BooleanField(default=False)),
],
options={
'ordering': ('created',),
},
),
migrations.CreateModel(
name='OrderItem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('price', models.DecimalField(decimal_places=2, max_digits=10)),
('quantity', models.PositiveIntegerField(default=1)),
('order', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='items', to='orders.Order')),
('product', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='order_items', to='shop.Product')),
],
),
]
|
[
"hamis9474@gmail.com"
] |
hamis9474@gmail.com
|
704bab0f499353c6dd95d262b807b096e058052b
|
7cd6b93e953141981d453cd984f9bc7f55803fbd
|
/tests/entity/first_table_entity.py
|
bd220d628ab736d894672307d2843ef7e8df0c15
|
[
"MIT"
] |
permissive
|
xiaolingzi/lingorm-python
|
e2db4164c1b32d19defa7528a48d54e81d835e7f
|
4b614bac1d6427010d7b355e1f67b0bbff52edbc
|
refs/heads/master
| 2022-12-24T12:01:43.556077
| 2020-09-30T07:25:28
| 2020-09-30T07:25:28
| 86,041,922
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 670
|
py
|
from lingorm.mapping import *
class FirstTableEntity(ORMEntity):
__table__ = "first_table"
__database__ = ""
id = Field(field_name="id", field_type="int",
is_primary=True, is_generated=True)
first_name = Field(field_name="first_name", field_type="string", length="45")
first_number = Field(field_name="first_number", field_type="int")
first_time = Field(field_name="first_time", field_type="datetime")
def __init__(self, **kwargs):
self.id = kwargs.get("id")
self.first_name = kwargs.get("first_name")
self.first_number = kwargs.get("first_number")
self.first_time = kwargs.get("first_time")
|
[
"xlne@foxmail.com"
] |
xlne@foxmail.com
|
9022778a28d5b1a4ebe981f1821582811b688f2e
|
5394eb884ad815f68f99d240f015986fc5235a5d
|
/civic/civic.py
|
d0825a26f134f23ca1db12637dbd1c3b68f39b24
|
[] |
no_license
|
lisabang/civic-annotator
|
4938830d63c9830401e12eceaeb24185af8ae8d8
|
4739c367e7fe403c6e4c844d5b4593d8bc53f36d
|
refs/heads/master
| 2020-04-01T11:48:55.688806
| 2018-10-16T06:44:23
| 2018-10-16T06:44:23
| 153,178,879
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,447
|
py
|
import sys
from cravat import BaseAnnotator
from cravat import constants, InvalidData
from pyliftover import LiftOver
import sqlite3
import requests
import json
import os
class CravatAnnotator(BaseAnnotator):
def setup(self):
r = requests.get('https://civicdb.org/api/variants?count=5000&page=1')
variants=json.loads(r.text)['records']
lifter = LiftOver(constants.liftover_chain_paths['hg19'])
vdict = {}
for variant in variants:
chrom_37 = variant['coordinates']['chromosome']
pos_37 = variant['coordinates']['start']
if chrom_37 is None or pos_37 is None: continue
new_coords = lifter.convert_coordinate("chr" + chrom_37, int(pos_37))
if len(new_coords) > 0:
chrom_38 = new_coords[0][0].replace('chr','')
pos_38 = new_coords[0][1]
else:
continue
ref = variant['coordinates']['reference_bases']
alt = variant['coordinates']['variant_bases']
toks = [chrom_38, pos_38, ref, alt]
if None not in toks:
vkey = ':'.join(map(str, toks))
vdict[vkey] = variant
else:
continue
self.civicdata = vdict
def annotate(self, input_data, secondary_data=None):
input_data["chrom"]=input_data["chrom"][3:]
out={}
var_key = ":".join([input_data["chrom"],str(input_data["pos"]),input_data["ref_base"],input_data["alt_base"]])
match=self.civicdata.get(var_key, False)
if match:
out["description"]=match['description']
out["clinical_a_score"]=match['civic_actionability_score']
civic_id = match['id']
out['link'] = 'https://civicdb.org/links/variant/'+str(civic_id)
evidence_link = 'https://civicdb.org/api/variants/{civic_id}/evidence_items?count=5000&page=1'.format(civic_id=civic_id)
r = requests.get(evidence_link)
d = json.loads(r.text)
diseases = {x['disease']['display_name'] for x in d['records']}
if len(diseases) > 0:
out['diseases'] = ', '.join(sorted(list(diseases)))
return out
def cleanup(self):
pass
if __name__ == '__main__':
annotator = CravatAnnotator(sys.argv)
annotator.run()
|
[
"kmoad@insilico.us.com"
] |
kmoad@insilico.us.com
|
f925da5ed13558e612f1387d8b63e570d0355070
|
85d92ec15fa5319fa360435c43e4efbf71275ac6
|
/conv_layer.py
|
66105fea7b276777a53bc6e91318d1e667ad8b7c
|
[] |
no_license
|
YvesAugusto/resnet_first_blocks
|
2750c493631cf71664df773601160b43bb71969b
|
583357a314b4bec7d503046c10fdb34cbd235060
|
refs/heads/main
| 2023-02-02T21:31:45.846185
| 2020-12-16T13:55:17
| 2020-12-16T13:55:17
| 302,890,892
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 961
|
py
|
import tensorflow.compat.v1 as tf
tf.compat.v1.disable_eager_execution()
import numpy as np
def init_filter(d, maps_input, maps_output, stride):
return (np.random.randn(d, d, maps_input, maps_output) * np.sqrt(2.0 / (d * d * maps_input))).astype(np.float32)
class ConvLayer:
def __init__(self, d, maps_input, maps_output, stride=2, padding='VALID'):
self.W = tf.Variable(init_filter(d, maps_input, maps_output, stride))
self.bias = tf.Variable(np.zeros(maps_output, dtype=np.float32))
self.stride = stride
self.padding = padding
def forward(self, X):
X = tf.nn.conv2d(
X,
self.W,
strides=[self.stride, self.stride],
padding=self.padding
)
X = X + self.bias
return X
def copyFromKerasLayers(self, layer):
W, bias = layer.get_weights()
op1 = self.W.assign(W)
op2 = self.bias.assign(bias)
self.session.run((op1, op2))
def get_params(self):
return [self.W, self.bias]
|
[
"noreply@github.com"
] |
YvesAugusto.noreply@github.com
|
f7ba672b11d8d99d484ebed80a07211c25e5f2e1
|
e0915e453dc9ccab98381b96f476bcfb34f51b8f
|
/tests/test_transformations/manual_test_affine.py
|
5ec086b996293db3653d066f5e3043141fa920e2
|
[
"MIT"
] |
permissive
|
AsajuHuishi/hylfm-net
|
a1f34b451ccb5bc71e056b2f3eb29efa486db603
|
baf447a75a30b67cd2929ab2a4427afb3d5e0f78
|
refs/heads/master
| 2023-01-13T04:22:40.633352
| 2020-11-20T13:45:53
| 2020-11-20T13:45:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,809
|
py
|
import collections
from hylfm.datasets import ZipDataset, get_dataset_from_info, get_tensor_info
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
def compare_slices(sample, title, *names):
fig, axes = plt.subplots(ncols=len(names))
fig.suptitle(title)
for name, ax in zip(names, axes):
im = ax.imshow(sample[name].squeeze())
ax.set_title(f"{name}")
# fig.colorbar(im, cax=ax, orientation='horizontal')
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
fig.colorbar(im, cax=cax, orientation="vertical")
plt.show()
def manual_test_this():
meta = {
"nnum": 19,
"z_out": 49,
"scale": 4,
"shrink": 8,
"interpolation_order": 2,
"z_ls_rescaled": 241,
"pred_z_min": 0,
"pred_z_max": 838,
"crop_names": ["wholeFOV"],
} # z_min full: 0, z_max full: 838; 60/209*838=241; 838-10/209*838=798
ls_info = get_tensor_info("heart_static.beads_ref_wholeFOV", "ls", meta=meta)
ls_trf_info = get_tensor_info("heart_static.beads_ref_wholeFOV", "ls_trf", meta=meta)
ls_reg_info = get_tensor_info("heart_static.beads_ref_wholeFOV", "ls_reg", meta=meta)
dataset = ZipDataset(
collections.OrderedDict(
[
("ls", get_dataset_from_info(info=ls_info, cache=True)),
("ls_trf", get_dataset_from_info(info=ls_trf_info, cache=True)),
("ls_reg", get_dataset_from_info(info=ls_reg_info, cache=True)),
]
)
)
sample = dataset[0]
compare_slices({"ls_reg": sample["ls_reg"].max(2), "ls_trf": sample["ls_trf"].max(2)}, "lala", "ls_reg", "ls_trf")
if __name__ == "__main__":
manual_test_this()
|
[
"thefynnbe@gmail.com"
] |
thefynnbe@gmail.com
|
eed24e362210d27d3b99aa46c67d4679c0bd7265
|
d77ca3d15691d209e4d16748f6035efdd6dd094d
|
/参考答案/Task4.py
|
5fc03ab1c41c0ba3021062707ca9ecb162005825
|
[] |
no_license
|
3927o/W2OnlineWinterCampus_Python
|
45772f79e79c0ed73a9af041678d9630178244db
|
8251c405a3fde8513fe7ae4595e9fa78210fccbe
|
refs/heads/master
| 2023-03-10T01:49:03.960152
| 2021-03-02T07:53:39
| 2021-03-02T07:53:39
| 335,962,358
| 1
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,338
|
py
|
import math
# Problem1
print(eval("{'name': 'xiaoming'}"))
# Problem2
def 解一元二次方程(a, b, c):
delta = math.sqrt(b*b - 4*a*c)
x1 = (-1*b + delta) / 2*a
x2 = (-1 * b - delta) / 2 * a
if delta > 0:
print(f"x1={x1}, x2={x2}")
elif delta == 0:
print(f"x={x1}")
else:
print("No solution")
# Problem3
def calu(*kwargs, op=0):
"""
:param kwargs: 待计算的一系列实数
:param op: 0:加法, 1:减法, 2:乘法, 3:除法
:return: 计算结果
"""
ans = 0
if op == 0:
for i in kwargs:
ans += i
elif op == 1:
for i in kwargs:
ans -= i
elif op == 2:
ans = 1
for i in kwargs:
ans *= i
elif op == 3:
ans = kwargs[0]
kwargs = kwargs[1:]
for i in kwargs:
ans /= i
else:
print("Wrong parameter")
print(f"ans is {ans}")
return ans
# Problem4
def show_info(name, age, **args):
print(f"name: {name}")
print(f"age: {age}")
for key in args: # 如果看不懂该处的循环语句的话请自行百度"迭代"相关知识
print(f"{key}: {args[key]}")
解一元二次方程(1, 2, 1)
calu(1, 2, 3, 4)
calu(1, 2, 3, 4, op=1)
calu(1, 2, 3, 4, op=2)
calu(1, 2, 3, 4, op=3)
show_info("lin", 19, school="fzu")
|
[
"1624497311@qq.com"
] |
1624497311@qq.com
|
909907147f166591898af0551a064a3edf97bb2a
|
5630227ae646a79dba45ab0b07b5026c6a53b86d
|
/code kata/factorialll.py
|
870c67328426b832d80bfbc39207f32952bf3778
|
[] |
no_license
|
vigneshpriya/GUVI
|
79d5b7a06b116529fb709f4e1a1c79ae4c3f714d
|
5915fae12f2cad939bfe9501478d211b815cf4aa
|
refs/heads/master
| 2020-06-03T00:07:44.393425
| 2019-06-24T06:06:55
| 2019-06-24T06:06:55
| 191,355,055
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 81
|
py
|
num1=int(input())
facto=1
for x in range(1,num1+1):
facto=facto*x
print(facto)
|
[
"noreply@github.com"
] |
vigneshpriya.noreply@github.com
|
5f91841d99dce028ef4112a7f1b5929f5529de42
|
729aa3af1e6de25c0e46192ef62aaf77cc622979
|
/comentarios/models.py
|
68e967afb7853be71fb6423710c8f2e8619ff015
|
[] |
no_license
|
xuting1108/API-Pontos-Tur-sticos
|
8b583869006b8570c44eebfc885bb3db7eff4f1d
|
7a01434e806a7b3b1409f7c490071ba682525ad3
|
refs/heads/master
| 2022-11-19T15:09:48.057402
| 2020-06-15T21:38:00
| 2020-06-15T21:38:00
| 267,150,058
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 377
|
py
|
from django.db import models
from django.contrib.auth.models import User
class Comentario(models.Model):
usuario = models.ForeignKey(User, on_delete=models.CASCADE)
comentarios = models.TextField()
data = models.DateTimeField(auto_now_add=True)
aprovado = models.BooleanField(default=True)
def __str__(self):
return self.usuario.username
|
[
"xuting1108@hotmail.com"
] |
xuting1108@hotmail.com
|
5a102571e72e68d6537181a5c74ddaaa6cc4da2c
|
51a80d87fd9f009d8b97573288d6ebd0b420b995
|
/baekjoon/DP/실3-2579.py
|
d80de7b2ec3eb6eb4b90c942f29c8b74388aa257
|
[] |
no_license
|
kim-ellen821/mycoding
|
02858509c9bf0892d81d73f01e838c762644c3cf
|
20c03c529d07f0a776fd909a60cabaca3a8fcc34
|
refs/heads/master
| 2023-08-15T08:06:50.872463
| 2021-04-01T09:19:43
| 2021-10-13T13:02:25
| 353,621,725
| 0
| 0
| null | 2021-04-01T09:19:44
| 2021-04-01T08:00:52
|
Python
|
UTF-8
|
Python
| false
| false
| 322
|
py
|
n = int(input())
stairs =[int(input()) for _ in range(n)]
dp = [0 for _ in range(n)]
dp[0] = stairs[0]
if n>1:
dp[1] = stairs[0] + stairs[1]
dp[2] = max(stairs[0]+stairs[2], stairs[1]+stairs[2])
for i in range(3, n):
dp[i] = max(dp[i-3]+stairs[i-1]+stairs[i], dp[i-2] + stairs[i])
print(dp[n-1])
|
[
"ellenkim821@gmail.com"
] |
ellenkim821@gmail.com
|
d5e1d94b0f4269311fc4634072447854264afac3
|
f0d713996eb095bcdc701f3fab0a8110b8541cbb
|
/CDqMdrTvfn2Wa8igp_16.py
|
12713c2aa2161258166fab90eabe089a4b047990
|
[] |
no_license
|
daniel-reich/turbo-robot
|
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
|
a7a25c63097674c0a81675eed7e6b763785f1c41
|
refs/heads/main
| 2023-03-26T01:55:14.210264
| 2021-03-23T16:08:01
| 2021-03-23T16:08:01
| 350,773,815
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 467
|
py
|
"""
Create a function that returns the next element in an **arithmetic sequence**.
In an arithmetic sequence, each element is formed by adding the same constant
to the previous element.
### Examples
next_element([3, 5, 7, 9]) ➞ 11
next_element([-5, -6, -7]) ➞ -8
next_element([2, 2, 2, 2, 2]) ➞ 2
### Notes
All input arrays will contain **integers only**.
"""
def next_element(lst):
a = lst[-1] - lst[-2]
return lst[-1] + a
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
65da41f502db3348a39743f88ed5359debb87fa3
|
6489f80c1bc2c51f41c186c260a3370f899acd20
|
/hpm_generator/trans_hpm_dict.py
|
fd85253aeac1f3c8790ad0c5d78ac4bb781d9dcc
|
[] |
no_license
|
Este1le/Auto-tuning
|
16bc316359d97c3dfff0e41c5aad9a18122b7b25
|
856b6176af770b200897f56b7a46f6699402ef28
|
refs/heads/master
| 2020-04-13T21:27:57.516254
| 2019-09-16T17:19:07
| 2019-09-16T17:19:07
| 163,456,912
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 614
|
py
|
"""
Hyperparameter dictionary for Transformer Neural Machine Translation models.
"""
hpm_dict = {}
# Preprocessing
hpm_dict['bpe_symbols'] = [10000, 30000, 50000]
# Model architecture
hpm_dict['num_embed'] = ["\"256:256\"", "\"512:512\"", "\"1024:1024\""]
hpm_dict['num_layers'] = ["\"2:2\"", "\"4:4\""]
# Training configuration
hpm_dict['batch_size'] = [2048, 4096]
hpm_dict['initial_learning_rate'] = [0.0003, 0.0006, 0.001]
# Transformer
hpm_dict['transformer_model_size'] = [256, 512, 1024]
hpm_dict['transformer_attention_heads'] = [8, 16]
hpm_dict['transformer_feed_forward_num_hidden'] = [1024, 2048]
|
[
"xuanzhang@jhu.edu"
] |
xuanzhang@jhu.edu
|
342fb839f815e73042d5f54cba60d89c396936c7
|
8e9ea4e7300a51ad3251526e5a1dfdcef7e9bff5
|
/Tasks/Kostin-Rozhkov/PyTasks/11.py
|
b86436bbe2740c82fd5a976abab4e01421fcb218
|
[] |
no_license
|
BlackenedJustice/testRepo
|
2baff560ed2d9d7de38bb1a0f813ac044c2bf217
|
81fa3893bfd4bba61d81928528b48255aaafdd33
|
refs/heads/master
| 2020-05-19T17:42:58.062510
| 2019-07-15T07:30:26
| 2019-07-15T07:30:26
| 185,140,879
| 6
| 30
| null | 2019-08-03T10:52:56
| 2019-05-06T06:58:20
|
Python
|
UTF-8
|
Python
| false
| false
| 204
|
py
|
print("Введите скорость v и время t: ")
v, t = map(int, input().split())
print("Через {} часов Вася окажется на {} километре".format(t, v * t % 109))
|
[
"fulstocky@gmail.com"
] |
fulstocky@gmail.com
|
a040259d98517f83a0d490fdf920c7ed7c432d74
|
95e0e0d7e024db0c359f566ebee6a2a0480b228f
|
/2_15.py
|
96c4ca692c1cb81b891e4812bbb65bed0b75ca26
|
[] |
no_license
|
ChaYe001/interface
|
0067f85d5e1f208b757ef92a63777e6cb3e4f814
|
a5715e209b46ce6fd06b89d3ffbf882075532852
|
refs/heads/master
| 2023-06-17T23:19:53.457665
| 2021-07-09T10:04:47
| 2021-07-09T10:04:47
| 264,167,089
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,871
|
py
|
#__author__ = 'Bo.Gu'
# -*- coding: utf-8 -*-
#!/usr/bin/python3
import os
import numpy as np
import codecs
def caca(score, num):
for filename in ['Trueface_0604_2_15.txt.txt']:
f = codecs.open(filename,mode='r',encoding='utf-8')
line = f.readlines()
count1 = 0
count = 0.0
face = 1
noresult = -1
realface = 0
for i in range(len(line)):
d = line[i].split()
arr = np.array(d)
st1r = ''.join(arr)
thre = st1r.split('2倍:')[1][0:5]
thre = float(thre)
# print(thre)
if '(D)' in st1r:
if count <num and count1 < 5:
noresult += 1
face += 1
count = 0
count1 = 1
if 1>=float(thre) >= float(score):
# if 2*thre**2/(thre**2 + score**2)-1 >= 0:
# # print('D_'+ str(thre))
count+=1
else:
count1+=1
if count >= num:
continue
if count1 > 5:
continue
# if thre > 1:
# print(thre)
if 1>=float(thre) >= float(score):
# print(thre)
# if 2*thre**2/(thre**2 + score**2)-1 >= 0:
# print('T_'+ str(thre))
count += 0
# print(count)
if count >= num:
# print(thre)
realface+=1
continue
if count <num and count1 < 5:
noresult += 1
acc = realface / face *100
# print(face)
acc = round(acc,2)
score = round(score, 2)
acc2 = noresult / face *100
acc2 = round(acc2, 2)
acc3 = (face - realface - noresult) / (face - noresult) * 100
acc3 = round(acc3, 2)
if filename == 'Trueface_0604_2_15.txt.txt' or filename == 'Trueface_0531.txt':
if acc >=10:
print('阈值: ' + str(score) + '帧数:' + str(num) + '——真脸人数:' + str(realface) +
'——无结果数量:' + str(noresult) + ' 正样本_' + str(filename) + '——正确率:' + str(acc))
else:
if acc <=20:
print('阈值: ' + str(score) + '帧数:' + str(num) + '——假脸人数:' + str(face - realface - noresult) + '——无结果数量:' + str(noresult) +
' 负样本_' + str(filename) + '——正确率:' + str(100-acc-acc2) + '--无结果为假:' + str(100 -acc) + '--去除无结果:' + str(acc3))
f.close()
# print('---------------------------------------------------------------')
for score in range(50, 98):
for num in range(1,2):
caca(score/100, num)
# caca(0.7, 1)
|
[
"787715780@qq.com"
] |
787715780@qq.com
|
b7b7a1616c5953738e9174b20cb9c545135de13f
|
c38d524359ae6d2683ab306a0e4c6585072c1c17
|
/Tree_search_node.py
|
65688658a3140ba249d91be37b291482ae139484
|
[] |
no_license
|
ohade2014/Hurricane-Evacuation-Problem-A-
|
8092492ac67aac3acbe150d38d0e0d23ed1533f8
|
c7a4a300e83bbf9fac7b520485d5e5713c626815
|
refs/heads/master
| 2021-02-12T20:38:03.789288
| 2020-03-03T12:33:25
| 2020-03-03T12:33:25
| 244,628,752
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 695
|
py
|
class Tree_search_node:
def __init__(self, loc, num_of_people, people_rescued, time, terminated, shelters, peop, ppl_vex, h, info):
self.location = loc
#self.there_is_people
self.people_rescue_now = num_of_people
self.people_rescued = people_rescued
self.time = time
self.terminated = terminated
self.childrens = []
self.shelters = shelters
self.peop = peop
self.ppl_vex = ppl_vex
self.h = h
self.info = info
def __lt__(self, other):
if self.h == other.h:
return self.location < other.location
else:
return self.h < other.h
|
[
"noreply@github.com"
] |
ohade2014.noreply@github.com
|
07872d994013a1adcc497126aa1cd055dd02639e
|
254a6554b297b2a78619714ddc1b9fe36ee0c5dc
|
/app.py
|
cef0d6770296b2101cfcc4350e82dd49fa8899d2
|
[] |
no_license
|
copperstick6/SeeGreen-GHome
|
a39d928aca12069eb616bee770b9fc7f27ec32c0
|
bce5bacc759489483b9fd1a9f13c21c273fabf28
|
refs/heads/master
| 2021-01-19T23:52:43.478458
| 2017-04-22T12:10:38
| 2017-04-22T12:10:38
| 89,043,390
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,423
|
py
|
import logging
from random import randint
from flask import Flask, render_template, request, redirect
from flask_ask import Ask, statement, question, session
app = Flask(__name__)
ask = Ask(app, "/")
logging.getLogger("flask_ask").setLevel(logging.DEBUG)
@ask.launch
def start():
welcome_msg = render_template('welcome')
return question(welcome_msg)
@ask.intent("YesIntent")
def next_round():
round_msg = render_template('round')
session.attributes['answers'] = 1
return question(round_msg)
@ask.intent("MoveIntent")
def initRound():
next_msg = render_template('Guess1')
session.attributes['answers'] = int(session.attributes['answers']) + 1
return question(next_msg)
@ask.intent("AnswerIntent", convert={'first': int})
def answer(first):
if int(session.attributes['answers']) < 2:
return statement("Invalid instruction")
elif int(session.attributes['answers']) == 2:
input = int(first)
if input == 1:
render question(render_template('compostResponse'))
elif input == 2:
render_question(render_template('recyclingResponse'))
elif input == 3:
render_question(render_template('recyclingResponse'))
elif input == 4:
elif input == 5:
msg = render_template('win')
return question(msg)
@ask.intent("CompostIntent")
if __name__ == '__main__':
app.run(debug=True)
|
[
"copperstick6@gmail.com"
] |
copperstick6@gmail.com
|
acbe56204565b2430875764921acfc9bfbe415f6
|
8e93241d733ee81a47886cd630c1330538fb13c9
|
/app/core/models.py
|
996643c753830fac246be5f142b07fc80718a438
|
[] |
no_license
|
piuart/app-hc
|
b92f7eb440f32acff8e1809e60cc51164ef54277
|
b962dc8e76ead46b8a586d9a19ba99b387df4f32
|
refs/heads/main
| 2023-04-15T19:35:11.240143
| 2021-04-23T12:36:34
| 2021-04-23T12:36:34
| 359,742,403
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 659
|
py
|
from django.db import models
from django.conf import settings
class BaseModel(models.Model):
user_creation = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE, related_name='user_creations',
null=True, blank=True, )
date_creation = models.DateTimeField(auto_now_add=True, null=True, blank=True)
user_updated = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE, related_name='user_updated',
null=True, blank=True)
date_updated = models.DateTimeField(auto_now=True, null=True, blank=True)
class Meta:
abstract = True
|
[
"jorge.diazdorta@gmail.com"
] |
jorge.diazdorta@gmail.com
|
7f48901ffb7957bdfddcefdc91daa54ac94994a7
|
10ab15571c44a1c6f115b6145d45e14eba2c82d4
|
/judges/generic/tests/OK/solutions/solution.py
|
41a54231e987ecfc0d4065ec30664ecd2cd66764
|
[
"MIT"
] |
permissive
|
kolejka/kolejka-judge
|
d669b04b0f011a4a8df75165b5c3da2834356807
|
0614c861c08d3196b4c5e7c0e7bf9aae92a9845f
|
refs/heads/master
| 2022-11-07T17:05:32.094084
| 2022-11-02T14:13:51
| 2022-11-02T14:13:51
| 199,710,634
| 2
| 1
|
MIT
| 2021-10-08T20:03:14
| 2019-07-30T18:58:58
|
Python
|
UTF-8
|
Python
| false
| false
| 126
|
py
|
import sys
count = int(sys.stdin.readline())
for c in range(count):
sys.stdout.write(sys.stdin.readline().strip()+'\n')
|
[
"grzegorz.gutowski@uj.edu.pl"
] |
grzegorz.gutowski@uj.edu.pl
|
df8e2b17597057dd4520430460658f9392acb0fb
|
508f741288cd122b5a668db83ce187a2f6ffb527
|
/slope/forms.py
|
4268ce50b1ec33e05a80cc5a9b9e010d69b75132
|
[] |
no_license
|
uzbhasan/csms4-api
|
dec3eae61717ee437b3259b5806999efebf56d2a
|
6b453379a087235dd275967a4957443b111b13a0
|
refs/heads/master
| 2022-12-29T18:28:29.597808
| 2020-10-22T16:31:48
| 2020-10-22T16:31:48
| 292,186,924
| 0
| 0
| null | 2020-10-22T16:31:49
| 2020-09-02T05:23:44
|
Python
|
UTF-8
|
Python
| false
| false
| 2,669
|
py
|
from django import forms
from django.core.exceptions import ValidationError
from django.utils import timezone
from company.models import Company
from slope.models import Slope, Order
class SlopeCreateUpdateAdminForm(forms.ModelForm):
"""
A form for creating/updating new slope by admin panel.
"""
company = forms.ModelChoiceField(queryset=Company.objects.all().filter(is_inactive=False, type=False))
class Meta:
model = Slope
fields = ('name', 'lat', 'lng', 'address', 'announced_at', 'deadline', 'company')
def clean_company(self):
company = self.cleaned_data.get('company')
if not company:
raise ValidationError('Slope must have a company')
if company and company.type:
raise ValidationError('The company must be an expert')
return company
def clean(self):
# "Announced at" hamda "deadline" maydonlarining
# qiymatlaridagi bog'liqlikning mosligini tekshirish
cleaned_data = super().clean()
announced_at = cleaned_data.get('announced_at')
deadline = cleaned_data.get('deadline')
if announced_at and deadline is None:
self.add_error('deadline', 'Announced slopes must have a deadline')
if announced_at is None and deadline:
self.add_error('deadline', 'Unannounced slopes should not have a deadline')
if announced_at and deadline and announced_at > deadline:
self.add_error('deadline', 'The deadline of the order may not be less than the date of announce')
class OrderCreateUpdateAdminForm(forms.ModelForm):
"""
A form for creating/updating new order by admin panel.
"""
company = forms.ModelChoiceField(queryset=Company.objects.all().filter(is_inactive=False, type=True))
class Meta:
model = Order
fields = ('slope', 'company', 'deadline')
def clean_company(self):
company = self.cleaned_data.get('company')
if not company:
raise ValidationError('Slope must have a company')
if company and not company.type:
raise ValidationError('The company must be an engineer')
return company
def clean(self):
# "Announced at" hamda "deadline" maydonlarining
# qiymatlaridagi bog'liqlikning mosligini tekshirish
cleaned_data = super().clean()
now = timezone.now()
deadline = cleaned_data.get('deadline')
if deadline and now > deadline:
self.add_error(
'deadline',
f'The deadline of the order may not be less than the current time (current time: { now })'
)
|
[
"uzbhasan@gmail.com"
] |
uzbhasan@gmail.com
|
2f1945306ffdac1bf6be7627f999c03df7d3cbb9
|
75a3e0432327b3f2bd7cbf68fe59cd9babab5f35
|
/AL_USDMaya/0.26.0/package.py
|
4437e09099a953eb52a772da065d7d98540aac11
|
[
"MIT"
] |
permissive
|
cundesi/open-source-rez-packages
|
b6ff9100cfd4156c0c43d4760ed6b77a0b641bd5
|
28d272db59b5eaa18c625ce9f35ea47c141f778f
|
refs/heads/master
| 2020-03-23T08:16:54.639021
| 2018-07-06T04:50:31
| 2018-07-06T04:50:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 712
|
py
|
# -*- coding: utf-8 -*-
name = 'al_usdmaya'
version = '0.26.0'
requires = [
'qt-5.6'
]
build_requires = ['cmake-3.2']
variants = [
['platform-linux', 'arch-x86_64', 'maya-2017', 'usd-0.8.3']
]
def commands():
# env.MAYA_SCRIPT_PATH.append("{this.root}/share/usd/plugins/usdMaya/resources/")
# env.MAYA_SHELVES_ICONS = "{this.root}/share/usd/plugins/usdMaya/resources/"
# env.MAYA_SHELF_PATH.append("{this.root}/share/usdplugins/usdMaya/resources/")
env.MAYA_PLUG_IN_PATH.append("{this.root}/plugin")
env.PYTHONPATH.append("{this.root}/lib/python")
# env.XBMLANGPATH.append("{this.root}/plugins/usdMaya/resources/")
timestamp = 1515875738
format_version = 2
|
[
"daniel.flood-1@uts.edu.au"
] |
daniel.flood-1@uts.edu.au
|
4591ae12de8f049e5e206fe3071156c3140089ff
|
e431b0938da36d2a400e7a4b2e8849a27bc83f17
|
/test/Python/Test_Gasmix.py
|
52533662301f312fc3113601e14e0e4004c5cfd1
|
[] |
no_license
|
abhikv/MAGBOLTZ-py
|
c9541a98114d16c3e8b947a9b4bd36fed65bdf06
|
50ed30dd8ccafddce96a5650d9bdc9bfed6dbc9f
|
refs/heads/master
| 2020-04-19T03:26:17.293723
| 2019-01-23T03:57:35
| 2019-01-23T03:57:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,100
|
py
|
import sys
import warnings
import time
warnings.filterwarnings("ignore", message="numpy.dtype size changed")
warnings.filterwarnings("ignore", message="numpy.ufunc size changed")
sys.path.append('../../src/Scripts/Python')
from Gasmix import Gasmix
t1 = time.time()
TestObj = Gasmix()
NGS = []
for i in range(6):
NGS.append(0)
NGS[0]=2
EROOT = []
QT1 = []
QT2 = []
QT3 = []
QT4 = []
DEN = [1 for i in range(4000)]
DENS = 0
NGAS = 1
NSTEP = 4000
NANISO = 2
ESTEP = 1.25e-4
EG = [6.25e-5 + i * (ESTEP) for i in range(4000)]
EFINAL = 0.5
AKT = 2.6037269846599997e-2
ARY = 0
TEMPC = 0
TORR = 0
IPEN = 0
TestObj.setCommons(NGS, EG, EROOT, QT1, QT2, QT3, QT4, DEN, DENS, NGAS, NSTEP,
NANISO, ESTEP, EFINAL, AKT, ARY, TEMPC, TORR, IPEN)
if __name__ == '__main__':
TestObj.Run()
print(TestObj.Gases[0].Q[0][0])
print(TestObj.Gases[1].Q[0][0])
print(TestObj.Gases[2].Q[0][0])
print(TestObj.Gases[3].Q[0][0])
print(TestObj.Gases[4].Q[0][0])
print(TestObj.Gases[0].Q[0][0])
print("hi")
t2 = time.time()
print("time:")
print(t2 - t1)
|
[
"atoumbashar@gmail.com"
] |
atoumbashar@gmail.com
|
4f729df74aa3cb8e7f8acf86cf08033467732bf3
|
5982a9c9c9cb682ec9732f9eeb438b62c61f2e99
|
/Problem_234/my_bad_solution.py
|
d6896b10334da48b8afeefb2a9c1fcca30a0b44b
|
[] |
no_license
|
chenshanghao/LeetCode_learning
|
6fdf98473be8f2240dd86d5586bbd1bbb95d6b0c
|
acf2395f3b946054009d4543f2a13e83402323d3
|
refs/heads/master
| 2021-10-23T05:23:01.970535
| 2019-03-15T05:08:54
| 2019-03-15T05:08:54
| 114,688,902
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 470
|
py
|
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
def isPalindrome(self, head):
"""
:type head: ListNode
:rtype: bool
"""
val_list = []
while(head):
val_list.append(head.val)
head = head.next
if val_list == val_list[::-1]:
return True
else:
return False
|
[
"21551021@zju.edu.cn"
] |
21551021@zju.edu.cn
|
d1b1f4eb1db480a94e0fb5a36e25b96368b38a44
|
efe946e6fab19f948aa81a36b67d56feae322094
|
/nn/io.py
|
bdbc60c8afccadd949b9fbdb9855a044e09b9e90
|
[] |
no_license
|
krivacic/BMI203_final
|
f3a148b51c99cd2dd742d4eeadf0c7c1f6195ec7
|
537c8e79e3865df9588a08009baa7ea5dc6978f8
|
refs/heads/master
| 2021-01-23T03:53:27.907540
| 2017-03-25T06:30:24
| 2017-03-25T06:30:24
| 86,131,770
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,494
|
py
|
import numpy as np
"""
A note on data parsing:
Most of this is self-explanatory. Each basepair is a 4-bit byte consisting of a single 1 and three 0s.
What I want to point out is that for the negative test set, I am just taking every 17 basepairs as one
unit of data, rather than every possible 17-basepair stretch. I tried this at first; the datafile alone
is over 200 mb, and training the neural network was impossible.
"""
def parse(l):
Xt = []
i = 0
for item in l:
Xt.append([])
for ch in item:
if ch == 'A':
Xt[i].extend([0,0,0,1])
elif ch == 'T':
Xt[i].extend([0,0,1,0])
elif ch == 'G':
Xt[i].extend([0,1,0,0])
elif ch == 'C':
Xt[i].extend([1,0,0,0])
i += 1
X = np.array(Xt)
return X
def get_data(filename):
Xt =[]
with open(filename) as f:
i = 0
for line in f:
Xt.append([])
for ch in line:
if ch == 'A':
Xt[i].extend([0,0,0,1])
elif ch == 'T':
Xt[i].extend([0,0,1,0])
elif ch == 'G':
Xt[i].extend([0,1,0,0])
elif ch == 'C':
Xt[i].extend([1,0,0,0])
i += 1
X = np.array(Xt)
#print("Finished parsing positives")
return X
"""
To be run only once. Makes a file that has every possible 17-length read of the negative data.
"""
def get_negatives():
temp = [[]]
i = 0
with open("yeast-upstream-1k-negative.fa") as f:
for line in f:
if line[0:1] == '>':
temp.append([])
i +=1
elif line[0:1] != '>':
for ch in line:
if ch != '\n':
temp[i].extend(ch)
X = []
k = 0
for i in temp:
n = 0
for j in i:
X.append([])
X[k].extend(i[n:n+17])
n += 17
k += 1
new = [s for s in X if len(s) > 16]
out = open('negative_formatted_less.txt','w')
for item in new:
out.write("%s\n"%item)
def read_negatives():
X = []
with open('negative_formatted_less.txt','r') as inf:
for line in inf:
X.append(eval(line))
#print("Finished reading negatives")
return X
"""
X1 = get_data()
l = read_negatives()
X = parse(l)
print("done parsing negatives")
print(X1)
print(X)
"""
|
[
"krivacic@gmail.com"
] |
krivacic@gmail.com
|
52152c8ace1241e6d3feec3091389b8c3c65fe7e
|
3ee456a106601cfa0f7b0ad751bf3114207c4afd
|
/src/homepage/urls.py
|
548a905f60b8eb0a08f2cd5c95a25cd21460fe12
|
[] |
no_license
|
jamhodor/webtest
|
f6d5c84ece23c63ee5791ee9f0aa0c8ee06fcae2
|
bd6c14bb77e3a0512e8feecaa5b6e51f73372634
|
refs/heads/master
| 2022-12-14T23:23:17.240239
| 2019-02-17T11:18:47
| 2019-02-17T11:18:47
| 133,946,982
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 151
|
py
|
from django.urls import path
from . import views
urlpatterns = [
path('', views.home, name="home"),
path('tiny', views.tinymce, name="tiny")
]
|
[
"silvani.services@gmail.com"
] |
silvani.services@gmail.com
|
67086c4670dfe4cb66c73ee192fb47a5a8183bcf
|
4597f9e8c2772f276904b76c334b4d181fa9f839
|
/Python/Compare-Version-Numbers.py
|
85b753029af257cf562da8fb4d2fb870da2c0e73
|
[] |
no_license
|
xxw1122/Leetcode
|
258ee541765e6b04a95e225284575e562edc4db9
|
4c991a8cd024b504ceb0ef7abd8f3cceb6be2fb8
|
refs/heads/master
| 2020-12-25T11:58:00.223146
| 2015-08-11T02:10:25
| 2015-08-11T02:10:25
| 40,542,869
| 2
| 6
| null | 2020-09-30T20:54:57
| 2015-08-11T13:21:17
|
C++
|
UTF-8
|
Python
| false
| false
| 872
|
py
|
class Solution:
# @param a, a string
# @param b, a string
# @return a boolean
def compareVersion(self, version1, version2):
seq1 = []
seq2 = []
if version1.find('.') >= 0:
seq1 = version1.split('.')
else:
seq1.append(version1)
if version2.find('.') >= 0:
seq2 = version2.split('.')
else:
seq2.append(version2)
for i in range(len(seq1)):
seq1[i] = int(seq1[i])
for i in range(len(seq2)):
seq2[i] = int(seq2[i])
maxlen = max(len(seq1), len(seq2))
for i in range(len(seq1), maxlen):
seq1.append(0)
for i in range(len(seq2), maxlen):
seq2.append(0)
if seq1 < seq2:
return -1
elif seq1 > seq2:
return 1
else:
return 0
|
[
"jiangyi0425@gmail.com"
] |
jiangyi0425@gmail.com
|
93d6bca660ec3009959a781aec2acb3664d4890f
|
17bc840981c87626f4de91017ee1d9d1a650510c
|
/cnn8.py
|
6ef34b7d4c542623a84c39efa8c8c1da928c993d
|
[] |
no_license
|
mhusseinsh/RaceCarControl
|
6e0bb99d96cb9d15b78a2945c0724fdadc2f038d
|
af58251311b001e6f521826946da57e7f26ccb8f
|
refs/heads/master
| 2021-09-10T13:41:25.034934
| 2018-03-27T01:34:01
| 2018-03-27T01:34:01
| 112,090,949
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,017
|
py
|
#!/usr/bin/python
# import the necessary packages
from skimage.measure import compare_ssim
import argparse
import imutils
import cv2
##
import os
import sys
import csv
import numpy as np
import tensorflow as tf
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import keras.backend as K
from keras.preprocessing.image import img_to_array, load_img
from keras.models import Sequential, model_from_json
from keras.optimizers import Adam, SGD
from keras.layers import Dense, Activation, Conv2D, MaxPooling2D, Dropout, Flatten, BatchNormalization
from keras.utils import np_utils
from keras.utils import plot_model
from keras.callbacks import EarlyStopping
from tensorflow.python import debug as tf_debug
# global variables
cursor_index = 0
images = list()
labels_list = list()
count = 0
fps = 0
fig = 0 # figure object for veiwing frames and navigation
# function that handels csv data supported
# -----------------------------------------------------------------------------
def process_dataset(images_path, labels_path):
global images
global labels_list
with open(labels_path) as f:
read_file = csv.reader(f, delimiter=',')
for row in read_file:
labels_list.append([row[0], row[2], row[3], row[4]])
# print(row[1])
image_names = [x[0] for x in labels_list]
for image_name in image_names:
for filename in os.listdir(images_path):
# print(os.path.splitext(filename)[0])
if filename.endswith(".jpg") and (os.path.splitext(filename)[0]) == image_name:
images.append(img_to_array(load_img(images_path + '/' + filename)))
break
for row in labels_list:
del row[0]
# print(labels_list)
# print('--------------------------------------------------------')
# print('--------------------------------------------------------')
# print('--------------------------------------------------------')
# print('--------------------------------------------------------')
# print('--------------------------------------------------------')
# print(images[0].shape)
# print('--------------------------------------------------------')
# print('--------------------------------------------------------')
# print('--------------------------------------------------------')
def split_train_valid():
global count
shuffled_index = np.random.permutation(len(images))
indices_train = shuffled_index[0:int(0.9*len(images))]
indices_valid = shuffled_index[int(0.9*len(images)):len(images)]
count = indices_valid
train_data = [images[i] for i in indices_train]
train_labels = [labels_list[i] for i in indices_train]
valid_data = [images[i] for i in indices_valid]
valid_labels = [labels_list[i] for i in indices_valid]
return train_data, train_labels, valid_data, valid_labels
class cnn():
def __init__(self):
self.model = Sequential()
def train_network():
#sess = K.get_session()
#sess = tf_debug.LocalCLIDebugWrapperSession(sess)
#K.set_session(sess)
x, y, x_val, y_val = split_train_valid()
print(len(x[0]))
print(x[0].shape)
rows = x[0].shape[0]
cols = x[0].shape[1]
channels = x[0].shape[2]
print(np.array(x).shape)
y = np.array(y).astype(float)
y_val = np.array(y_val).astype(float)
cnn_m = cnn()
# Create cnn
#cnn_m.model.add(Conv2D(256, kernel_size=3, kernel_initializer='random_uniform', bias_initializer='zeros', padding='same', data_format="channels_last", input_shape=(rows, cols, channels)))
#cnn_m.model.add(Activation('relu'))
#cnn_m.model.add(MaxPooling2D(pool_size=(2, 2)))
#cnn_m.model.add(BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True, beta_initializer='zeros', gamma_initializer='ones', moving_mean_initializer='zeros', moving_variance_initializer='ones', beta_regularizer=None, gamma_regularizer=None, beta_constraint=None, gamma_constraint=None))
#cnn_m.model.add(MaxPooling2D(pool_size=(2, 2)))
cnn_m.model.add(Conv2D(128, kernel_size=3, kernel_initializer='random_uniform', bias_initializer='zeros', padding='same', data_format="channels_last", input_shape=(rows, cols, channels)))
#cnn_m.model.add(BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True, beta_initializer='zeros', gamma_initializer='ones', moving_mean_initializer='zeros', moving_variance_initializer='ones', beta_regularizer=None, gamma_regularizer=None, beta_constraint=None, gamma_constraint=None))
cnn_m.model.add(Activation('relu'))
cnn_m.model.add(MaxPooling2D(pool_size=(2, 2)))
#cnn_m.model.add(MaxPooling2D(pool_size=(2, 2)))
cnn_m.model.add(Conv2D(64, kernel_size=3, kernel_initializer='random_uniform', bias_initializer='zeros', padding='same'))
#cnn_m.model.add(BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True, beta_initializer='zeros', gamma_initializer='ones', moving_mean_initializer='zeros', moving_variance_initializer='ones', beta_regularizer=None, gamma_regularizer=None, beta_constraint=None, gamma_constraint=None))
cnn_m.model.add(Activation('relu'))
#cnn_m.model.add(MaxPooling2D(pool_size=(2, 2)))
cnn_m.model.add(Conv2D(32, kernel_size=3, kernel_initializer='random_uniform', bias_initializer='zeros', padding='same'))
#cnn_m.model.add(BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True, beta_initializer='zeros', gamma_initializer='ones', moving_mean_initializer='zeros', moving_variance_initializer='ones', beta_regularizer=None, gamma_regularizer=None, beta_constraint=None, gamma_constraint=None))
cnn_m.model.add(Activation('relu'))
#cnn_m.model.add(MaxPooling2D(pool_size=(2, 2)))
cnn_m.model.add(Conv2D(16, kernel_size=3, kernel_initializer='random_uniform', bias_initializer='zeros', padding='same'))
#cnn_m.model.add(BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True, beta_initializer='zeros', gamma_initializer='ones', moving_mean_initializer='zeros', moving_variance_initializer='ones', beta_regularizer=None, gamma_regularizer=None, beta_constraint=None, gamma_constraint=None))
cnn_m.model.add(Activation('relu'))
cnn_m.model.add(Dropout(0.2))
cnn_m.model.add(Flatten())
cnn_m.model.add(Dense(64))
cnn_m.model.add(Activation('relu'))
cnn_m.model.add(Dense(32))
cnn_m.model.add(Activation('relu'))
cnn_m.model.add(Dropout(0.2))
cnn_m.model.add(Dense(3))
#cnn_m.model.add(Activation('linear'))
adam = Adam(lr=0.001, decay=0.0005)
#sgd = SGD(lr=0.00001, decay=0.0005)
# Define attributes of the cnn; categorial, optimizer_type, performance metrics
cnn_m.model.compile(loss='mean_squared_error', optimizer=adam, metrics=['accuracy'])
#cnn_m.model.compile(loss='mean_squared_error', optimizer=sgd, metrics=['accuracy'])
# Fit the model to the training data
#early_stopping = EarlyStopping(monitor='val_loss', patience=2)
history = cnn_m.model.fit(np.array(x), y, epochs=1, batch_size=16, validation_data=(np.asarray(x_val), y_val), shuffle=True)
#history = cnn_m.model.fit(np.array(x), y, epochs=200, batch_size=16, validation_data=(np.asarray(x_val), y_val), shuffle=True, callbacks=[early_stopping])
#score = cnn_m.model.evaluate(np.array(x_val), y_val, batch_size=8)
#print('score', score)
# 2. save your trained model
# serialize model to JSON
model_json = cnn_m.model.to_json()
with open("model_test.json", "w") as json_file:
json_file.write(model_json)
# serialize weights to HDF5
cnn_m.model.save_weights("model_test.h5")
print("Saved model to disk")
# list all data in history
print(history.history.keys())
print(count)
# summarize history for accuracy
fig1 = plt.figure()
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
#fig1.savefig("model accuracy.png")
#plt.show()
# summarize history for loss
fig2 = plt.figure()
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
#fig2.savefig("model loss.png")
#plt.show()
# # function responsible for processing the video and extracting the frames
# # -----------------------------------------------------------------------------
# def process_video(video_path):#, timestamp, events_id):
# # variables initialization
# global count
# global fps
# success = True
# # creating directory for new images
# video_name = os.path.basename(os.path.splitext(video_path)[0])
# print("Start processing: " + video_name)
# if not os.path.isdir(video_name):
# os.mkdir(video_name)
# # initialize captured video and retrieve the frames per second for the video
# vidcap = cv2.VideoCapture(video_path)
# fps = vidcap.get(cv2.CAP_PROP_FPS)
# success, image = vidcap.read() # images are numpy.ndarray
# # saving video frames into images list
# while success:
# images.append((timestamp_list[count], image))
# # save frame as JPEG file, uncomment if needed
# # cv2.imwrite(os.path.join(video_name, "frame%d.jpg" % count), image)
# count += 1
# success, image = vidcap.read()
# print("finished reading %d frames" %count)
# main function
# -----------------------------------------------------------------------------
def main():
if len(sys.argv) == 3:
images_path = sys.argv[1]
labels_path = sys.argv[2]
process_dataset(images_path, labels_path)
train_network()
else:
print('Wrong number of arguments')
if __name__ == '__main__':
main()
|
[
"mh806@tf.uni-freiburg.de"
] |
mh806@tf.uni-freiburg.de
|
9a75061cef61e257b2fb8173f4084eba8aaf6596
|
5b1d10b56af48aa2eef4b5735fe74de0b7f22982
|
/freelancer/forms.py
|
767432c850982fbc2ec984197204747c1eb31750
|
[] |
no_license
|
rishabh-1004/econnect
|
70fbc7458bba9fa5215c246a7e0e0b1db8ed2584
|
ca1976b260c8d1e795773ced5e6ff006d11fba48
|
refs/heads/master
| 2020-04-27T15:59:47.692991
| 2019-03-08T04:17:49
| 2019-03-08T04:17:49
| 115,212,282
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,413
|
py
|
from django import forms
from freelancer.models import FreelancerProfile,Test
class FreelancerProfileForm(forms.ModelForm):
college1=forms.CharField(required=False,max_length=200,widget=forms.TextInput(
attrs={
'class':'form-control col-md-6',
'placeholder':'Sample text',
}
))
college2=forms.CharField(required=False,max_length=200,widget=forms.TextInput(
attrs={
'class':'form-control col-md-6',
'placeholder':'Sample text',
}
))
college3=forms.CharField(required=False,max_length=200,widget=forms.TextInput(
attrs={
'class':'form-control col-md-6',
'placeholder':'Sample text',
}
))
school1=forms.CharField(required=False,max_length=200,widget=forms.TextInput(
attrs={
'class':'form-control col-md-6',
'placeholder':'Sample text',
}
))
school2=forms.CharField(required=False,max_length=200,widget=forms.TextInput(
attrs={
'class':'form-control col-md-6',
'placeholder':'Sample text',
}
))
job1=forms.CharField(required=False,max_length=200,widget=forms.TextInput(
attrs={
'class':'form-control col-md-6',
'placeholder':'Sample text',
}
))
job2=forms.CharField(required=False,max_length=200,widget=forms.TextInput(
attrs={
'class':'form-control col-md-6',
'placeholder':'Sample text',
}
))
job3=forms.CharField(required=False,max_length=200,widget=forms.TextInput(
attrs={
'class':'form-control col-md-6',
'placeholder':'Sample text',
}
))
job4=forms.CharField(required=False,max_length=200,widget=forms.TextInput(
attrs={
'class':'form-control col-md-6',
'placeholder':'Sample text',
}
))
internship1=forms.CharField(required=False,max_length=200,widget=forms.TextInput(
attrs={
'class':'form-control col-md-6',
'placeholder':'Sample text',
}
))
internship2=forms.CharField(required=False,max_length=200,widget=forms.TextInput(
attrs={
'class':'form-control col-md-6',
'placeholder':'Sample text',
}
))
internship3=forms.CharField(required=False,max_length=200,widget=forms.TextInput(
attrs={
'class':'form-control col-md-6',
'placeholder':'Sample text',
}
))
internship4=forms.CharField(required=False,max_length=200,widget=forms.TextInput(
attrs={
'class':'form-control col-md-6',
'placeholder':'Sample text',
}
))
project1=forms.CharField(required=False,max_length=200,widget=forms.TextInput(
attrs={
'class':'form-control col-md-6',
'placeholder':'Sample text',
}
))
project2=forms.CharField(required=False,max_length=200,widget=forms.TextInput(
attrs={
'class':'form-control col-md-6',
'placeholder':'Sample text',
}
))
project3=forms.CharField(required=False,max_length=200,widget=forms.TextInput(
attrs={
'class':'form-control col-md-6',
'placeholder':'Sample text',
}
))
project4=forms.CharField(required=False,max_length=200,widget=forms.TextInput(
attrs={
'class':'form-control col-md-6',
'placeholder':'Sample text',
}
))
skill1=forms.CharField(required=False,max_length=200,widget=forms.TextInput(
attrs={
'class':'form-control col-md-6',
'placeholder':'Sample text',
}
))
skill2=forms.CharField(required=False,max_length=200,widget=forms.TextInput(
attrs={
'class':'form-control col-md-6',
'placeholder':'Sample text',
}
))
skill3=forms.CharField(required=False,max_length=200,widget=forms.TextInput(
attrs={
'class':'form-control col-md-6',
'placeholder':'Sample text',
}
))
skill4=forms.CharField(required=False,max_length=200,widget=forms.TextInput(
attrs={
'class':'form-control col-md-6',
'placeholder':'Sample text',
}
))
research1=forms.CharField(required=False,max_length=200,widget=forms.TextInput(
attrs={
'class':'form-control col-md-6',
'placeholder':'Sample text',
}
))
research2=forms.CharField(required=False,max_length=200,widget=forms.TextInput(
attrs={
'class':'form-control col-md-6',
'placeholder':'Sample text',
}
))
research3=forms.CharField(required=False,max_length=200,widget=forms.TextInput(
attrs={
'class':'form-control col-md-6',
'placeholder':'Sample text',
}
))
research4=forms.CharField(required=False,max_length=200,widget=forms.TextInput(
attrs={
'class':'form-control col-md-6',
'placeholder':'Sample text',
}
))
additional=forms.CharField(widget=forms.Textarea(
attrs={
'class':'form-control',
'placeholder':'About You',
'rows':"5",
'column':"30",
}
))
picture = forms.ImageField(help_text="Upload image: ", required=False)
class Meta:
model=FreelancerProfile
fields=[
'picture','phone','school1','school2','college1','college2','college3','job1','job2','job3','job4','internship1',
'internship2','internship3','internship4','project1','project2','project3','project4','skill1',
'skill2','skill3','skill4','research1','research2','research3','research4','additional',
]
def clean(self):
self.cleaned_data["jobs"]=self.cleaned_data['job1']+","+self.cleaned_data['job2']+","+self.cleaned_data['job3']+","+self.cleaned_data['job4']
self.cleaned_data["college"]=self.cleaned_data['college1']+","+self.cleaned_data['college2']+","+self.cleaned_data['college3']
self.cleaned_data["school"]=self.cleaned_data['school1']+","+self.cleaned_data['school2']
self.cleaned_data["internships"]=self.cleaned_data['internship1']+","+self.cleaned_data['internship2']+","+self.cleaned_data['internship3']+","+self.cleaned_data['internship4']
self.cleaned_data["projects"]=self.cleaned_data['project1']+","+self.cleaned_data['project2']+","+self.cleaned_data['project3']+","+self.cleaned_data['project4']
self.cleaned_data["skills"]=self.cleaned_data['skill1']+","+self.cleaned_data['skill2']+","+self.cleaned_data['skill3']+","+self.cleaned_data['skill4']
self.cleaned_data["research"]=self.cleaned_data['research1']+","+self.cleaned_data['research2']+","+self.cleaned_data['research3']+","+self.cleaned_data['research4']
return self.cleaned_data
class TestForm(forms.ModelForm):
job1=forms.CharField(max_length=120)
job2=forms.CharField(max_length=120)
job3=forms.CharField(max_length=120)
job4=forms.CharField(max_length=120)
class Meta:
model=Test
fields=[
'jobs','job1','job2','job3','job4','school',
]
def clean(self):
self.cleaned_data["jobs"]=self.cleaned_data['job1']+self.cleaned_data['job2'],self.cleaned_data['job3']+self.cleaned_data['job4']
return self.cleaned_data
|
[
"rishabh.sharma@tryscribe.com"
] |
rishabh.sharma@tryscribe.com
|
4163616ca559754ae87d91b4c6028ae70537bcbb
|
52984f7f9241a14313d4ab8b4bce17df8b28cbed
|
/blog/views.py
|
419f5eebf831be063e865b49d2dddddb7e8f9d80
|
[] |
no_license
|
aevans1910/happy-me
|
703e3c7f0829d7f0c6059af62c57240432d54ae4
|
fe4b6012db935c33d4135780b6c846683d7056c2
|
refs/heads/master
| 2021-09-26T04:42:58.680987
| 2020-03-07T03:03:02
| 2020-03-07T03:03:02
| 244,538,436
| 0
| 0
| null | 2021-09-22T18:42:04
| 2020-03-03T04:06:59
|
Python
|
UTF-8
|
Python
| false
| false
| 1,413
|
py
|
from django.shortcuts import render
from django.views.generic.list import ListView
from django.views.generic.detail import DetailView
from django.views.generic import CreateView
from django.http import HttpResponseRedirect
from django.urls import reverse_lazy
from blog.models import Posts
from blog.forms import PostsForm
class PostsListView(ListView):
""" Renders a list of all the Posts """
model = Posts
def get(self, request):
""" GET a list of Posts """
posts = self.get_queryset().all()
return render(request, 'blog/list.html', {
'posts': posts
})
class PostsDetailView(DetailView):
""" Renders a specific post based on it's slug """
model = Posts
def get(self, request, slug):
""" Returns a specific blog post by slug """
post = self.get_queryset().get(slug__iexact=slug)
return render(request, 'blog/post.html', {
'post': post
})
class PostsCreateView(CreateView):
def get(self, request, *args, **kwargs):
context = {'form': PostsForm()}
return render(request, 'blog/new.html', context)
def post(self, request, *args, **kwargs):
form = PostsForm(request.POST)
if form.is_valid():
post = form.save()
return HttpResponseRedirect(reverse_lazy('blog-list-page'))
return render(request, 'blog/new.html', {'form':form})
|
[
"ariane.evans@student.makeschool.com"
] |
ariane.evans@student.makeschool.com
|
9fb2b348d1dece719559d459a60ef6d70f2c461d
|
af0195a1489600d2f6f82aaec5f2026da10b91f8
|
/overSpeeding.py
|
31192019bb68583a8b3b725f7b2dd7a9076ec97f
|
[] |
no_license
|
AnupKumarPanwar/Traffic-violation-detection
|
7736ee29da57f4f7ba666cfa1365a7d110b53481
|
366bb489a9cbeedb4ac73e66f4491e8d54ae476d
|
refs/heads/master
| 2021-04-15T13:25:04.142666
| 2018-03-30T06:29:26
| 2018-03-30T06:29:26
| 126,157,226
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,005
|
py
|
import numpy as np
import cv2
# cap = cv2.VideoCapture(0)
has_detected=False
tracker = cv2.TrackerKCF_create()
haar_face_cascade = cv2.CascadeClassifier('./cars.xml')
cap = cv2.VideoCapture('http://192.168.5.165:8080/video')
detection_array=[0]*10
image_count=0
enterFrame=False
exitFrame=False
enterFrameY=0
lastFrameY=0
totalFrames=0
while(True):
# Capture frame-by-frame
ret, frame = cap.read()
# Our operations on the frame come here
# gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
gray = cv2.cvtColor(frame, cv2.IMREAD_COLOR)
# cv2.putText(gray, "Tracking failure detected", (10,50), cv2.FONT_HERSHEY_SIMPLEX, 0.75,(0,0,255),2)
#load cascade classifier training file for haarcascade
cars = haar_face_cascade.detectMultiScale(gray, scaleFactor=1.1, minNeighbors=5);
cv2.line(gray,(0, 400), (500,400),(0,0,255),20)
#print the number of faces found
# print('Cars found: ', len(cars))
# if len(cars)==1 and !has_detected:
# has_detected=True
# imwrite('./uploads/car.jpg')
# elif len(cars)==0:
# has_detected=False
if enterFrame:
totalFrames+=1
# if has_detected:
ok, bbox = tracker.update(gray)
if ok:
p1 = (int(bbox[0]), int(bbox[1]))
p2 = (int(bbox[0] + bbox[2]), int(bbox[1] + bbox[3]))
cv2.rectangle(gray, p1, p2, (255,0,0), 2, 1)
detection_array=detection_array[1:]
detection_array.append(1)
if 0 not in detection_array:
lastFrameY=int(bbox[1])
if not enterFrame:
enterFrameY=int(bbox[1])
enterFrame=True
print ("Enter frame : ", str(enterFrameY))
# print('Cars found: 1')
else:
# print ("condition")
detection_array=detection_array[1:]
detection_array.append(0)
has_detected=False
# print (detection_array)
if 1 not in detection_array and not exitFrame:
if enterFrame:
exitFrameY=lastFrameY
exitFrame=True
pxCovered=exitFrameY - enterFrameY
print ("Px covered : "+str(pxCovered))
print ("Total Frames : "+str(totalFrames))
print ("Speed : "+str(round(abs(pxCovered*21/totalFrames)),2))
# print('Cars found: 0')
#go over list of faces and draw them as rectangles on original colored
if len(cars)==1:
for (x, y, w, h) in cars:
bbox = (x, y, w, h)
if has_detected==False:
tracker = cv2.TrackerKCF_create()
has_detected=True
ok = tracker.init(gray, bbox)
# imwrite('./uploads/car.jpg')
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# Display the resulting frame
cv2.imshow('frame', gray)
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()
|
[
"1anuppanwar@gmail.com"
] |
1anuppanwar@gmail.com
|
bd4bfd2045243258a2936d602e25e747bd5817ce
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/nouns/_quivered.py
|
ae5ecb9ccecdd6d0e423ea42fa27b78863065fdc
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 240
|
py
|
from xai.brain.wordbase.nouns._quiver import _QUIVER
#calss header
class _QUIVERED(_QUIVER, ):
def __init__(self,):
_QUIVER.__init__(self)
self.name = "QUIVERED"
self.specie = 'nouns'
self.basic = "quiver"
self.jsondata = {}
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
434c4bd312a9abd7b4c412e91f46470e4d93787a
|
3151fabc3eb907d6cd1bb17739c215a8e95a6370
|
/storagetest/pkgs/pts/compilebench/__init__.py
|
2b4e431708e278479b68217206765020f8856961
|
[
"MIT"
] |
permissive
|
txu2k8/storage-test
|
a3afe96dc206392603f4aa000a7df428d885454b
|
62a16ec57d619f724c46939bf85c4c0df82ef47c
|
refs/heads/master
| 2023-03-25T11:00:54.346476
| 2021-03-15T01:40:53
| 2021-03-15T01:40:53
| 307,604,046
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,040
|
py
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
"""
@file : __init__.py.py
@Time : 2020/11/12 18:27
@Author: Tao.Xu
@Email : tao.xu2008@outlook.com
"""
from .compile_bench import *
__all__ = ['CompileBench']
"""
compilebench
==============
https://oss.oracle.com/~mason/compilebench/
https://openbenchmarking.org/test/pts/compilebench
Compilebench tries to age a filesystem by simulating some of the disk IO
common in creating, compiling, patching, stating and reading kernel trees.
It indirectly measures how well filesystems can maintain directory locality
as the disk fills up and directories age.
This current test is setup to use the makej mode with 10 initial directories
Quick and dirty usage: (note the -d option changed in 0.6)
1. Untar compilebench
2. run commands:
./compilebench -D some_working_dir -i 10 -r 30
./compilebench -D some_working_dir -i 10 --makej
./copmilebench -D some_working_dir -i 10 --makej -d /dev/xxx -t trace_file
./compilebench --help for more
"""
if __name__ == '__main__':
pass
|
[
"tao.xu2008@outlook.com"
] |
tao.xu2008@outlook.com
|
623e4cf25c34849f6d992b333e824ebbbd7a03eb
|
afcc21cdc0369127f0bb4e543cceee2e6c523354
|
/util.py
|
11d298bb9f99e64a1626671f0ce86b8c4f0fde61
|
[] |
no_license
|
p13i/Segmentally-Boosted-HMMs
|
60ea6b46c0228bbf7522ee4a9bdc0237c5ede3a0
|
e26a7108886233fc89f43dbbee6aafb785f7201c
|
refs/heads/master
| 2020-05-07T16:22:37.978992
| 2018-01-30T22:38:27
| 2018-01-30T22:38:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,424
|
py
|
# sliding window for 1 second
import sys
import numpy as np
from scipy import stats
import math
import Constants
# data : nd array num_samples * num_features
# function handle : calls this function on all frames and concatenates the result in the output
# sliding_size : number of seconds in sliding window if index_time is False else the number of samples in window
# overlap " between 0 -100 ; percentage overlap between consecutive frames
# if index_time = True the first column must be the timestamp column and should have time in milli seconds
def sliding_window(data,labels,sliding_window_size,overlap,function_handle,index_time = False):
if(sliding_window_size < 0 or overlap < 0 or overlap >100 or Constants.smaller_sliding_window> data.shape[0]):
print("incorrect input formatting")
return (None,None)
if data.shape[0] > Constants.smaller_sliding_window and data.shape[0] < Constants.sliding_window_size:
sliding_window_size = Constants.smaller_sliding_window
# initialize the output array
# detect size of output
r,c = data.shape
if r > 1000:
pass
print("Shape of input array:",data.shape)
temp = function_handle(data[0:sliding_window_size,:])
output = None
output_labels = None
# run the sliding window
if not index_time:
overlap_samples = int(np.floor(sliding_window_size * ((100.0 - overlap)/100.0)))
print(overlap_samples)
num_rows_output = int((r - sliding_window_size)/overlap_samples) + 1
output = np.zeros((num_rows_output,temp.shape[0]))
output_labels = np.zeros(num_rows_output)
start = 0
stop = sliding_window_size
rindex = 0
while stop < r:
window = data[start:stop,:]
output[rindex,:] = function_handle(window)
m = stats.mode(labels[start:stop],axis=None)
output_labels[rindex] = m[0]
rindex += 1
start = start + overlap_samples
stop = start + sliding_window_size
return output,output_labels
def sliding_window_v2(data,labels,sliding_window_size,overlap,function_handle,index_time = False):
if(sliding_window_size < 0 or overlap < 0 or overlap >100 or sliding_window_size > data.shape[0]):
print("incorrect input formatting")
sys.exit(1)
# initialize the output array
# detect size of output
r,c = data.shape
print("Shape of input array:",data.shape)
temp = function_handle(data[0:sliding_window_size,:])
output = output_labels = None
# run the sliding window
if not index_time:
print(temp.shape[0])
print(int(np.floor(sliding_window_size * ((100.0 - overlap)/100.0))))
output = np.zeros((int(np.ceil(r/int(np.floor(sliding_window_size * ((100.0 - overlap)/100.0))))) + 1 ,temp.shape[0]))
output_labels = np.zeros((int(np.ceil(r/int(np.floor(sliding_window_size * ((100.0 - overlap)/100.0))))) + 1 ))
start = 0
stop = sliding_window_size - 1
rindex = 0
while start < r-10:
label_window = labels[start:stop]
unique_labels = np.unique(label_window)
if len(unique_labels) != 1:
# adjust start and stop
#@TODO this could be dangerous
new_label = label_window[-1]
# if np.min(np.where(label_window == new_label)[0]) < int(np.floor(sliding_window_size * ((100.0 - overlap)/100.0))):
# start = start + np.min(np.where(label_window == new_label)[0])
# stop = start + sliding_window_size
# else:
# stop = start + np.min(np.where(label_window == new_label)[0])
start = start + np.min(np.where(label_window == new_label)[0])
stop = start + sliding_window_size
print("In If")
print("start:",start)
print("stop:",stop)
window = data[start:stop, :]
features = function_handle(window)
output[rindex, :] = features
output_labels[rindex] = label_window[0]
start = start + int(np.floor(sliding_window_size * ((100.0 - overlap)/100.0)))
stop = start + sliding_window_size
rindex += 1
print("start:", start)
print("stop:", stop)
mask = np.all(output == 0,axis=1)
output_labels = output_labels[~mask]
output = output[~mask,:]
return output,output_labels
|
[
"vmurahari3@gatech.edu"
] |
vmurahari3@gatech.edu
|
b5dcb853f578c7877f1e458fff4ed77f236bb315
|
5d293f040ae1a86e8da752b3fa5c6823504fc3ca
|
/RaspberryPi_Therm/plataform-device.py
|
345df366aace20865e42925f422b83e30b776673
|
[] |
no_license
|
rzarref/konker-devices
|
8c99ce829853b9d2cc636a123a5e5d59c4546809
|
109dfcc049fe4c5e79775952fb923d5ceaf5e030
|
refs/heads/master
| 2020-12-02T16:43:56.666694
| 2017-02-04T17:33:16
| 2017-02-04T17:33:16
| 96,576,301
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,019
|
py
|
#! /usr/bin/python3
import paho.mqtt.client as mqtt
import json
import os
import threading
from threading import Timer
import time
import RPi.GPIO as GPIO
GPIO.setmode(GPIO.BOARD)
GPIO.setup(11, GPIO.IN)
os.system('modprobe w1-gpio')
os.system('modprobe w1-therm')
### Temperature Sensor DS18B20
## Nota: mudar o diretório de 28-0415a444b2ff para o nome visto em /sys/bus/w1/devices/
base_dir = '/sys/bus/w1/devices/28-0415a444b2ff/'
device_file = base_dir + '/w1_slave'
def read_temp_raw():
f = open(device_file, 'r')
lines = f.readlines()
f.close()
return lines
def read_temp():
lines = read_temp_raw()
while lines[0].strip()[-3:] != 'YES':
time.sleep(0.2)
lines = read_temp_raw()
equals_pos = lines[1].find('t=')
if equals_pos != -1:
temp_string = lines[1][equals_pos+2:]
temp_c = float(temp_string) / 1000.0
return temp_c
### End of Temperature Sensor DS18B20
### MQTT code
def on_connect(client, userdata, rc):
print("Connected with result code "+str(rc))
mqttc.subscribe("sub/" + dev_name + "/sub")
def on_message(mqttc, userdata, msg):
json_data = msg.payload.decode('utf-8')
print("Message received: "+json_data)
global firmware_ver, measure_type, measure_value, measure_unit
data = json.loads(json_data)
firmware_ver = data.get("fw")
measure_type = data.get("metric")
measure_value = data.get("value")
measure_unit = data.get("unit")
dev_name = ""
passkey = ""
ip = ""
mqttc = mqtt.Client("Konker" + dev_name)
mqttc.username_pw_set(dev_name, passkey)
mqttc.on_message = on_message
mqttc.on_connect = on_connect
mqttc.connect(ip, 1883)
mqttc.loop_start()
### End of MQTT code
current_milli_time = lambda: int(round(time.time() * 1000))
def TempMessage():
temp=read_temp()
(rc, mid) = mqttc.publish("pub/"+ dev_name +"/temperature", json.dumps({"ts": current_milli_time(), "metric": "temperature", "value": temp, "unit": "Celsius"}))
print(rc)
t = threading.Timer(10.0, TempMessage)
t.start()
|
[
"noreply@github.com"
] |
rzarref.noreply@github.com
|
7741d2640a25fdf9bfc3c4d3a9f38b475e4ced61
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02753/s804894259.py
|
636735cbd5323d345ac8e012b55a33a9143478c1
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 94
|
py
|
line = list(input())
line.sort()
if line[0] == line[2]:
print("No")
else:
print("Yes")
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
8334889f54f837ede8e0bea0e4beb142a561347e
|
cf2bfe1cba8a8069ea5f7dd19338140dbcb785fd
|
/bcs/rsa.py
|
2004e5058f35cb8e9d50c8432c28373274d53dcc
|
[] |
no_license
|
mirrorbrain/m2
|
7f67057bbd748e978f18cdbe64fd27cd3b703263
|
166ebcf9be8720fc595771e4fcdfa9af89e46049
|
refs/heads/master
| 2021-04-09T11:18:00.252505
| 2019-05-14T14:09:29
| 2019-05-14T14:09:29
| 125,514,880
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,650
|
py
|
#!/usr/bin/env python
#Auteurs
#Nogues Mael
#Grandmontagne Mathieu
import random
from random import randrange
#On utilse une fonction de calcul de racine carree pour nombre de longueur indefini car la fonciton fourni par python n'est pas capable de calculer au deca de 256 bits.
def exact_sqrt(x):
"""Calculate the square root of an arbitrarily large integer.
The result of exact_exact_sqrt(x) is a tuple (a, r) such that a**2 + r = x, where
a is the largest integer such that a**2 <= x, and r is the "remainder". If
x is a perfect square, then r will be zero.
The algorithm used is the "long-hand square root" algorithm, as described at
http://mathforum.org/library/drmath/view/52656.html
Tobin Fricke 2014-04-23
Max Planck Institute for Gravitational Physics
Hannover, Germany
"""
N = 0 # Problem so far
a = 0 # Solution so far
# We'll process the number two bits at a time, starting at the MSB
L = x.bit_length()
L += (L % 2) # Round up to the next even number
for i in xrange(L, -1, -1):
# Get the next group of two bits
n = (x >> (2*i)) & 0b11
# Check whether we can reduce the remainder
if ((N - a*a) << 2) + n >= (a<<2) + 1:
b = 1
else:
b = 0
a = (a << 1) | b # Concatenate the next bit of the solution
N = (N << 2) | n # Concatenate the next bit of the problem
return (a, N-a*a)
#renvoi (x^y)%n en utilisant l'exponentiation modulaire O(log(n))
def modular_exp(a, n, m):
b = 1
while (n != 0):
if ((n & 1) != 0):
b = (a*b)%m
a = (a*a)%m
n = n//2
return b
#renvoi vrai si 2^p-1 = 3^p-1 = 5^p-1 = 7^p-1 = 1 mod p
def prim(p) :
return (modular_exp(2,p-1,p) == 1 and modular_exp(3,p-1,p) == 1 and modular_exp(5,p-1,p) == 1 and modular_exp(7,p-1,p) == 1)
#renvoi un nombre premier de n bit en utilisant la primalite de PGP
def prim_rand(n) :
p = 2*(random.getrandbits(n-2) + pow(2,n-1)) + 1
p_mod3 = p%3
p_mod5 = p%5
p_mod7 = p%7
while (p_mod3 == 0 or p_mod5 == 0 or p_mod7 == 0 or not prim(p)):
p += 2
p_mod3 = (p_mod3 + 2)%3
p_mod5 = (p_mod5 + 2)%5
p_mod7 = (p_mod7 + 2)%7
return p
#renvoi l inverse de a modulo n
def modular_inv (a, n) :
b = a
a = n
q = a // b
r = a - q * b
v_0 = 1
v_1 = -q % n
while r != 0 :
a = b
b = r
q = a // b
r = a - q * b
v = (v_0 - q * v_1) % n
v_0 = v_1
v_1 = v
return v_0
#genere un le tuple (e, n,p,q,d) pour le couple de cles RSA
def keygenerator(size) :
p = q = 0
#on prend un premier aleatoire different pour p et q
while p == q :
p = prim_rand(size)
q = prim_rand(size)
n = p*q
phi_n = (p - 1) * (q - 1)
e = (2 ** 16) + 1
d = modular_inv(e, phi_n) #on calcul d td d inverse de e mod phi(n)
while d < 0 : #si d < 0 on ajoute phi(n)
d = d + phi_n
if ((d >= phi_n) or (d == e) ) :#si d est sup a phi(n) ou que e a pour inverse lui meme on creer une nouvelle cle
e, n, p, q, d = keygenerator(size)
return e, n, p, q, d
#####################################TRAPPE#####################################
def fermat(n):
if n & 1 == 0 :
return [n >> 1, 2]
temp, _ = exact_sqrt(n)
x = long(temp)
if x * x == n :
return [x, x]
x += 1
while True :
y2 = x * x - n
temp, _ = exact_sqrt(y2)
y = long(temp)
if y * y == y2 :
break
else:
x += 1
return x - y, x + y
def factor_n (n, a):
n_trap = n % a
return fermat(n_trap)
def exploit_trap (p_trap, q_trap, a):
k = 1
p = a * k + p_trap
while not prim(p) :
k += 1
p = a * k + p_trap
k = 1
q = a * k + q_trap
while not prim(q) :
k += 1
q = a * k + q_trap
n = p * q
return n, p ,q
def search_keys (p, q, d):
phi_n = (p - 1) * (q - 1)
e = modular_inv(d, phi_n)
return e
def keygenerator_trap(size) :
a = random.getrandbits(3*size/4)
p = q = 0
#on prend un premier aleatoire different pour p et q
while p == q :
p_trap = prim_rand(size/4)
k = 1
while True :
p = a * k + p_trap
if prim(p) :
break
k += 1
q_trap = prim_rand(size/4)
k = 1
while True :
q = a * k + q_trap
if prim(q) :
break
k += 1
n = p * q
phi_n = (p - 1) * (q - 1)
e = (2 ** 16) + 1
d = modular_inv(e, phi_n) #on calcul d tel que d est l'inverse de e mod phi(n)
while d < 0 : #si d < 0 on ajoute phi(n)
d = d + phi_n
if (d >= phi_n) or (d == e) :#si d est sup a phi(n) ou que e a pour inverse lui meme on creer une nouvelle cle
e, n, p, q, d, a = keygenerator_trap(size)
return e, n, p, q, d, a
################################################################################
#genere les tuples cle publique , cle prive
def key(size):
e,n,p,q,d = keygenerator(size)
return (e,n), (d,n)
def encrypt(pk, plaintext):
#la cle publique
d, n = pk
#pour chaque caratere on aplique c^e mod n
cipher = [modular_exp(ord(char),e,n) for char in plaintext] # on conveti le char en ascii
return cipher
def decrypt(pk, ciphertext):
#la cle privee
e, n = pk
#pour chaque caractere on aplique c^d mod n
plain = [(modular_exp(char,d,n)) for char in ciphertext]
return plain
if __name__ == '__main__':
message = raw_input("entrer un message a chifrer : ")
size = int(raw_input("entrer une taille pour p et q : "))
e, n, p, q, d, a = keygenerator_trap(size)
print "public : ", d, ", ", n , "\nprivate : ", e, ", ", n
print "p : ", p, "\nq : ", q
print "####################################################################"
p_trap, q_trap = factor_n(n, a)
n, p, q = exploit_trap(p_trap, q_trap, a)
e = search_keys(p, q, d)
print "Exploitation de la trappe"
print "\nFactorisation de N\'"
print "p\' : ", p_trap
print "q\' : ", q_trap
print "\nFactorisation de N"
print "p : ", p
print "q : ", q, "\n"
print "####################################################################"
print "Production des clefs RSA"
public = [d , n]
print "Clef publique : "
print "n : ", public[1]
print "d : ", public[0]
private = [e, n]
print "\nClef privee : "
print "n : ", private[1]
print "e : ", private[0]
encrypted_msg = encrypt(public, message)
decrypted_msg = decrypt(private, encrypted_msg)
print "\n Utilisation des clefs"
print "chiffre : ", encrypted_msg
print "message : ", ''.join(map(chr, decrypted_msg)), "\n"
print "####################################################################"
# messagetab = [ord(char) for char in message] # on converti le message initiale en ascci
# print "message initial"
# print messagetab
#
# encrypted_msg = encrypt(public, message)#chiffrement du message
# print "message chiffre"
# print encrypted_msg
#
# print "message dechifre"
# decrypt_msg = decrypt(private, encrypted_msg)# dechifrement du message
# print decrypt_msg
#
# if messagetab == decrypt_msg : #si le tableau ascii du dechifrement est identique au tableau du message initial le chiffrement/dechifrement et fonctionnel
# print "chifrement/dechifrement fonctionnel"
# else :
# print "erreur lors du dechiffrement"
|
[
"mael.nogues@gmail.com"
] |
mael.nogues@gmail.com
|
25db6853c20d38eb7685a70d89d5c574e83abf94
|
586a3c12e6ecfb6d9a5292a46e840de57deb1df1
|
/li_regress.py
|
b139fe6443410e7094eb22f01d30a0113064333b
|
[] |
no_license
|
kount002/Anticlone
|
b4df3e2587c5c2e59b888a691c655e2b248338ef
|
3552240568edd217e311387e2502b636bb956afb
|
refs/heads/master
| 2020-04-16T17:43:41.984928
| 2018-09-18T13:50:52
| 2018-09-18T13:50:52
| 61,560,050
| 0
| 0
| null | 2016-06-30T17:34:46
| 2016-06-20T15:48:43
|
Python
|
UTF-8
|
Python
| false
| false
| 3,052
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 19 13:34:33 2016
Allows to work with clone counts from different pikles
Need to supply path to two pickled pandas libraries
Usage: lib_combine.py ~/anal/path2 ~/anal/path2
Requires explore_graph.py in the same folder to work
"""
import matplotlib
matplotlib.use('Agg')
import sys
import pickle
import pandas as pd
import numpy as np
import matplotlib.pylab as plt
from sklearn import linear_model
import re
def open_lib(path): #loads pickled df into memory
try:
with open(path, 'rb') as f:
lib=pickle.load(f)
print('Read', path)
return(lib)
except:
print('Cannot open', path, '\n check path')
def norm_f(df):
lc=list(df.columns)
lcs=[x for x in lc if x!='Annotation']
df.replace(0, np.nan, inplace=True) #replaces all zeros for NaN
df[lcs]=np.log10(df[lcs]) #log-transfrom data
return(df)
##############main##############
#path1=sys.argv[1]
#path2=sys.argv[2]
#parameters
path2='/home/kount002/anal/human/miphage/clone_count/clone_count_df.pkl'
path1='/home/kount002/anal/human/phm_diversity/custom_prime/clone_count/clone_count_df.pkl'
keep1=[] #empty will use all columns
keep2=['A16000', 'Annotation'] #empty will use all columns
#join tables from two df pickles
df1=open_lib(path1)
df2=open_lib(path2)
print('Columns from right table:', df1.columns)
print('Columns from left table:', df2.columns)
if not keep1:
keep1=df1.columns
if not keep2:
keep2=df2.columns
df=df1[keep1].join(df2[keep2], how='outer', lsuffix='l')
df['Annotation'].fillna(df.Annotationl, inplace=True) #collaple Annotations into one column
del(df['Annotationl'])
mod=(lambda x: True if re.search(r'EK[0-9]{3}(?!N)', x) else False)
xl=[x for x in df.columns if mod(x)]
dfr=df.copy(deep=True)
#remove low values and log transforms them before regression use filtered values for model and real for prediction
df=np.log10(df[xl+['A16000']])
df.replace(np.nan, 1, inplace=True)
dfm=df.loc[(df.iloc[:,:4]>1).any(axis=1),:] #extract rows with values over 1
#prep data for multiple regression
dfy=dfm.A16000
dfx=dfm[xl]
model=linear_model.LinearRegression(fit_intercept=False)
model.fit(dfx, dfy)
coef=model.coef_
print('Coefficients for ', dfx.columns, 'are', coef)
dfpy=dfr.iloc[:,:3].pow(coef, axis=1).product(axis=1) #formula to calculate predicted value based on coef
#sum((expression count)**coef)
dfpy=pd.DataFrame(dfpy)
dfpy.columns=['Regress']
dfpy['Summed']=dfr.iloc[:,:3].sum(axis=1)
dfpy=dfpy.join(dfr[['A16000', 'Annotation']], how='inner')
with open('560_561_570LR.pkl','wb') as f:
#pickle.dump(pd.DataFrame(dfpy), f)
pickle.dump(dfpy, f)
#plot scatter for predicted model
dfpy.replace(np.nan, 0, inplace=True)
#py=model.predict(df.iloc[:,:3]) #adjust fo reduced counts after small coef
plt.figure(figsize=(6,6))
plt.scatter(np.log10(dfr['A16000']), np.log10(dfpy['Summed']))
plt.savefig('Scatter_test.png')
|
[
"kount002@gmail.com"
] |
kount002@gmail.com
|
b280a2a7d4766e6375a02765b3244e920e0b405b
|
a0eb6744e6f7f509b96d21f0bc8b3f8387f6861c
|
/notebook/list_2d_sort.py
|
ed70c8ed858f38ef3ada5a56ba0468b997f515fc
|
[
"MIT"
] |
permissive
|
nkmk/python-snippets
|
a6c66bdf999502e52f4795a3074ced63bf440817
|
f9dd286a9cf93f474e20371f8fffc4732cb3c4d5
|
refs/heads/master
| 2023-08-03T04:20:05.606293
| 2023-07-26T13:21:11
| 2023-07-26T13:21:11
| 98,900,570
| 253
| 77
|
MIT
| 2020-10-25T01:12:53
| 2017-07-31T14:54:47
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 2,885
|
py
|
import pprint
l_2d = [[20, 3, 100], [1, 200, 30], [300, 10, 2]]
pprint.pprint(l_2d, width=20)
# [[20, 3, 100],
# [1, 200, 30],
# [300, 10, 2]]
pprint.pprint(sorted(l_2d), width=20)
# [[1, 200, 30],
# [20, 3, 100],
# [300, 10, 2]]
pprint.pprint([sorted(l) for l in l_2d], width=20)
# [[3, 20, 100],
# [1, 30, 200],
# [2, 10, 300]]
pprint.pprint([list(x) for x in zip(*[sorted(l) for l in zip(*l_2d)])], width=20)
# [[1, 3, 2],
# [20, 10, 30],
# [300, 200, 100]]
import numpy as np
print(np.sort(l_2d))
# [[ 3 20 100]
# [ 1 30 200]
# [ 2 10 300]]
print(np.sort(l_2d, axis=0))
# [[ 1 3 2]
# [ 20 10 30]
# [300 200 100]]
print(type(np.sort(l_2d)))
# <class 'numpy.ndarray'>
print(np.sort(l_2d).tolist())
# [[3, 20, 100], [1, 30, 200], [2, 10, 300]]
print(type(np.sort(l_2d).tolist()))
# <class 'list'>
l_2d_error = [[1, 2], [3, 4, 5]]
# print(np.sort(l_2d_error))
# ValueError: setting an array element with a sequence. The requested array has an inhomogeneous shape after 1 dimensions. The detected shape was (2,) + inhomogeneous part.
pprint.pprint(sorted(l_2d, key=lambda x: x[1]), width=20)
# [[20, 3, 100],
# [300, 10, 2],
# [1, 200, 30]]
pprint.pprint(sorted(l_2d, key=lambda x: x[2]), width=20)
# [[300, 10, 2],
# [1, 200, 30],
# [20, 3, 100]]
import operator
pprint.pprint(sorted(l_2d, key=operator.itemgetter(1)), width=20)
# [[20, 3, 100],
# [300, 10, 2],
# [1, 200, 30]]
pprint.pprint(sorted(l_2d, key=operator.itemgetter(2)), width=20)
# [[300, 10, 2],
# [1, 200, 30],
# [20, 3, 100]]
l_2d_dup = [[1, 3, 100], [1, 200, 30], [1, 3, 2]]
pprint.pprint(l_2d_dup, width=20)
# [[1, 3, 100],
# [1, 200, 30],
# [1, 3, 2]]
pprint.pprint(sorted(l_2d_dup), width=20)
# [[1, 3, 2],
# [1, 3, 100],
# [1, 200, 30]]
pprint.pprint(sorted(l_2d_dup, key=operator.itemgetter(0, 2)), width=20)
# [[1, 3, 2],
# [1, 200, 30],
# [1, 3, 100]]
pprint.pprint(sorted(l_2d_dup, key=lambda x: (x[0], x[2])), width=20)
# [[1, 3, 2],
# [1, 200, 30],
# [1, 3, 100]]
import pandas as pd
df = pd.DataFrame(l_2d_dup, columns=['A', 'B', 'C'], index=['X', 'Y', 'Z'])
print(df)
# A B C
# X 1 3 100
# Y 1 200 30
# Z 1 3 2
print(df.sort_values('C'))
# A B C
# Z 1 3 2
# Y 1 200 30
# X 1 3 100
print(df.sort_values('Z', axis=1))
# A C B
# X 1 100 3
# Y 1 30 200
# Z 1 2 3
print(df.sort_values(['A', 'C']))
# A B C
# Z 1 3 2
# Y 1 200 30
# X 1 3 100
df = pd.DataFrame(l_2d_dup)
print(df)
# 0 1 2
# 0 1 3 100
# 1 1 200 30
# 2 1 3 2
print(df.sort_values(2))
# 0 1 2
# 2 1 3 2
# 1 1 200 30
# 0 1 3 100
print(df.sort_values(2, axis=1))
# 0 2 1
# 0 1 100 3
# 1 1 30 200
# 2 1 2 3
print(df.sort_values([0, 2]))
# 0 1 2
# 2 1 3 2
# 1 1 200 30
# 0 1 3 100
|
[
"nkmk.on@gmail.com"
] |
nkmk.on@gmail.com
|
b58af860125842d181ec3ff20c4bc93e788bb891
|
913653e54fc434b50ac1445be1f8537107e25678
|
/MVC/Main.py
|
49144281594225b35231db4456ffbcda3afdb4a0
|
[] |
no_license
|
Rafael-Marinho/ES3_Cota-es
|
90b9baeb1881bacfa4b3e19df7f82e3b9e88d4d5
|
ec8d3721c5e5289ded14928e4bb1cecdc07c7ff0
|
refs/heads/master
| 2021-08-23T03:47:17.703921
| 2017-12-03T01:54:44
| 2017-12-03T01:54:44
| 105,531,589
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 473
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Created on 8 de ago de 2017
@author: Rafael Marinho
'''
from MVC import View
from MVC import Controller
'''
# O "Main" tem a função de iniciar o programa,
# e pra isso ele busca os valores das cotações
# das moedas e da IBOVESPA, e prontamente
# chama a interface principal do usuário.
'''
Controller.ControllerSearch.Sources.ControllerSources(None, '3')
View.Observer.View.menu(None)
|
[
"noreply@github.com"
] |
Rafael-Marinho.noreply@github.com
|
cbd6a1d2b982c797f5f2c66a32486edd99e62acf
|
b27954c892897fbe0186464eb2460745b7eeaa86
|
/API Test App/ConcertAPI/env/bin/easy_install
|
fca567f124fa8685c1172b52d0aff85bfe7a8b81
|
[] |
no_license
|
bounswe/bounswe2017group2
|
c7ef702b663460ada41ab0a334273074df9739bd
|
5e3bd50228faf8eb327d21588c2854f6215973f9
|
refs/heads/master
| 2021-01-22T03:57:46.299748
| 2019-08-16T14:15:17
| 2019-08-16T14:15:17
| 81,464,305
| 8
| 6
| null | 2019-08-16T14:15:18
| 2017-02-09T15:26:15
|
Python
|
UTF-8
|
Python
| false
| false
| 278
|
#!/Users/berkkocabagli/ConcertAPI/ConcertAPI/env/bin/python3.6
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"kberkkocabagli@gmail.com"
] |
kberkkocabagli@gmail.com
|
|
8c5ed7790f16d81a0c36ea704e83ed858dde2f9b
|
71cb8d9eb437a9faf330931f3713ba5dc688405d
|
/analyze_data.py
|
20d0de521c42d33cccb15314658cdb6ae2767102
|
[
"MIT"
] |
permissive
|
mattare2/perceptual-acoustic-similarity
|
294d967ab2cd47120d33e650f7488d37cec199ca
|
eced010ee2d1a36c6052c8afd1b8c4af709dc418
|
refs/heads/master
| 2021-01-18T11:26:36.763005
| 2015-04-21T07:21:22
| 2015-04-21T07:21:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,408
|
py
|
import csv
import os
from functools import partial
from acousticsim.main import acoustic_similarity_mapping
from acousticsim.helper import get_vowel_points
from acousticsim.praat.wrapper import (to_pitch_praat, to_formants_praat,
to_intensity_praat, to_mfcc_praat)
from acousticsim.distance.point import point_distance, euclidean
from acousticsim.distance.dct import dct_distance
from acousticsim.distance.dtw import dtw_distance
from acousticsim.distance.xcorr import xcorr_distance
praat_path = r'C:\Users\michael\Documents\Praat\praatcon.exe'
data_dir = r'C:\Users\michael\Documents\Data\ATI_new'
model_dir = os.path.join(data_dir, 'Models')
shadower_dir = os.path.join(data_dir, 'Shadowers')
female_models = os.listdir(os.path.join(model_dir,'Female'))
male_models = os.listdir(os.path.join(model_dir,'Male'))
female_shadowers = os.listdir(os.path.join(shadower_dir,'Female'))
male_shadowers = os.listdir(os.path.join(shadower_dir,'Male'))
## Representations
# MFCC (acousticsim)
# MFCC (Praat)
# Formants (Praat)
# Intensity (Praat)
# Pitch (Praat)
# AmpEnvs (acousticsim)
## Distance functions
# DTW
# XCorr
# DCT
# Vowel midpoint
# Vowel third
def callback(*value):
print(*value)
praat_mfcc = partial(to_mfcc_praat, praat_path )
praat_formants = partial(to_formants_praat, praat_path)
praat_intensity = partial(to_intensity_praat, praat_path )
praat_pitch = partial(to_pitch_praat, praat_path )
def midpoint_distance(rep_one, rep_two):
base, _ = os.path.splitext(rep_one._filepath)
one_textgrid = base + '.TextGrid'
begin,end = get_vowel_points(one_textgrid, tier_name = 'Vowel', vowel_label = 'V')
if begin is None or end is None:
print(one_textgrid)
point_one = begin + ((end - begin)/2)
base, _ = os.path.splitext(rep_two._filepath)
two_textgrid = base + '.TextGrid'
begin,end = get_vowel_points(two_textgrid, tier_name = 'Vowel', vowel_label = 'V')
if begin is None or end is None:
print(one_textgrid)
point_two = begin + ((end - begin)/2)
return point_distance(rep_one, rep_two, point_one, point_two)
def third_distance(rep_one, rep_two):
base, _ = os.path.splitext(rep_one._filepath)
one_textgrid = base + '.TextGrid'
begin,end = get_vowel_points(one_textgrid, tier_name = 'Vowel', vowel_label = 'V')
point_one = begin + ((end - begin)/3)
base, _ = os.path.splitext(rep_two._filepath)
two_textgrid = base + '.TextGrid'
begin,end = get_vowel_points(two_textgrid, tier_name = 'Vowel', vowel_label = 'V')
point_two = begin + ((end - begin)/3)
return point_distance(rep_one, rep_two, point_one, point_two)
def vowel_dist(dist_func, rep_one, rep_two):
base, _ = os.path.splitext(rep_one._filepath)
one_textgrid = base + '.TextGrid'
one_begin,one_end = get_vowel_points(one_textgrid, tier_name = 'Vowel', vowel_label = 'V')
base, _ = os.path.splitext(rep_two._filepath)
two_textgrid = base + '.TextGrid'
two_begin,two_end = get_vowel_points(two_textgrid, tier_name = 'Vowel', vowel_label = 'V')
return dist_func(rep_one[one_begin, one_end], rep_two[two_begin, two_end])
def duration_distance(rep_one, rep_two):
base, _ = os.path.splitext(rep_one._filepath)
one_textgrid = base + '.TextGrid'
one_begin,one_end = get_vowel_points(one_textgrid, tier_name = 'Vowel', vowel_label = 'V')
if one_begin is None:
one_begin = 0
if one_end is None:
one_end = rep_one._duration
one_durations = [one_begin, one_end - one_begin, rep_one._duration - one_end]
base, _ = os.path.splitext(rep_two._filepath)
two_textgrid = base + '.TextGrid'
two_begin,two_end = get_vowel_points(two_textgrid, tier_name = 'Vowel', vowel_label = 'V')
if two_begin is None:
two_begin = 0
if two_end is None:
two_end = rep_two._duration
two_durations = [two_begin, two_end - two_begin, rep_two._duration - two_end]
return euclidean(one_durations, two_durations)
vowel_dtw = partial(vowel_dist,dtw_distance)
vowel_dct = partial(vowel_dist,dct_distance)
vowel_xcorr = partial(vowel_dist,xcorr_distance)
def load_axb():
path_mapping = list()
with open(os.path.join(data_dir,'axb.txt'),'r') as f:
reader = csv.DictReader(f, delimiter = '\t')
for line in reader:
shadower = line['Shadower'][-3:]
model = line['Model'][-3:]
word = line['Word']
if model in female_models:
model_path = os.path.join(model_dir, 'Female',model, '{}_{}.wav'.format(model,word))
else:
model_path = os.path.join(model_dir, 'Male',model, '{}_{}.wav'.format(model,word))
if shadower in female_shadowers:
baseline_path = os.path.join(shadower_dir, 'Female',shadower, '{}_{}_baseline.wav'.format(shadower,word))
shadowed_path = os.path.join(shadower_dir, 'Female',shadower, '{}_{}_shadowing{}.wav'.format(shadower,word, model))
else:
baseline_path = os.path.join(shadower_dir, 'Male',shadower, '{}_{}_baseline.wav'.format(shadower,word))
shadowed_path = os.path.join(shadower_dir, 'Male',shadower, '{}_{}_shadowing{}.wav'.format(shadower,word, model))
path_mapping.append((baseline_path, model_path, shadowed_path))
return list(set(path_mapping))
def output_acousticsim(path_mapping, output, output_filename):
with open(output_filename, 'w') as f:
writer = csv.writer(f, delimiter = '\t')
writer.writerow(['Shadower', 'Model', 'Word', 'BaseToModel', 'ShadToModel'])
for pm in path_mapping:
baseline_prod = os.path.basename(pm[0])
model_prod = os.path.basename(pm[1])
shad_prod = os.path.basename(pm[2])
shadower = shad_prod[:3]
model,ext = os.path.splitext(model_prod)
model, word = model.split('_')
writer.writerow([shadower, model, word, output[(baseline_prod,model_prod)],
output[(shad_prod,model_prod)]])
def get_mfcc_dtw(path_mapping):
asim = acoustic_similarity_mapping(path_mapping, rep = 'mfcc',
match_function = 'dtw', use_multi=True,
num_cores = 6)
return asim
def get_mfcc_vowel_mid(path_mapping):
asim = acoustic_similarity_mapping(path_mapping, rep = 'mfcc',
match_function = midpoint_distance, use_multi=True,
num_cores = 6, call_back = callback)
return asim
def convert_path_mapping(path_mapping):
new_path_mapping = set()
for mapping in path_mapping:
new_path_mapping.add((mapping[0],mapping[1]))
new_path_mapping.add((mapping[2],mapping[1]))
return list(new_path_mapping)
def calc_asim(path_mapping, rep, match_func, cache = None):
asim, cache = acoustic_similarity_mapping(path_mapping, rep = rep,
match_function = match_func, use_multi=True,
num_cores = 4, cache = cache, return_rep = True)
return asim, cache
if __name__ == '__main__':
rep_dict = {'mfcc': 'mfcc',
'mfcc_praat':praat_mfcc,
'ampenv': 'envelopes',
'pitch_praat': praat_pitch,
'intensity_praat': praat_intensity,
'formants_praat': praat_formants
}
dist_dict = {'dtw': 'dtw',
'dct': 'dct',
'xcorr': 'xcorr',
'dtw_vowel': vowel_dtw,
'dct_vowel': vowel_dct,
'xcorr_vowel': vowel_xcorr,
'midpoint': midpoint_distance,
'third': third_distance}
path_mapping = load_axb()
for_asim = convert_path_mapping(path_mapping)
for k,v in rep_dict.items():
cache = None
for k2,v2 in dist_dict.items():
if os.path.exists('{}_{}.txt'.format(k, k2)):
continue
print(k, k2)
asim, cache = calc_asim(for_asim, v, v2, cache = cache)
output_acousticsim(path_mapping, asim, '{}_{}.txt'.format(k, k2))
#Duration distance
asim, cache = calc_asim(for_asim, v, duration_distance, cache = cache)
output_acousticsim(path_mapping, asim, 'segmental_duration.txt')
|
[
"michael.e.mcauliffe@gmail.com"
] |
michael.e.mcauliffe@gmail.com
|
e93bfd5399e5ab1d1e5fa8e1374a7859d94a0446
|
512b388a53022f561e2375b4621f78572d3b4f04
|
/clients/migrations/0010_auto_20200904_1044.py
|
cb1046a194005d2c79ecd0cc9708388a797fa99b
|
[] |
no_license
|
Madoka09/Worker15
|
006d5ac44dc55c3ae7f72d3b8300f3567395cdff
|
181012d309052b2df3d4ef99a197e8acef73a185
|
refs/heads/master
| 2023-03-24T05:29:02.060796
| 2021-03-16T21:56:21
| 2021-03-16T21:56:21
| 336,394,683
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 386
|
py
|
# Generated by Django 3.0.4 on 2020-09-04 15:44
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('clients', '0009_auto_20200903_2132'),
]
operations = [
migrations.RenameField(
model_name='clientsaddress',
old_name='altern_phone',
new_name='alternate_phone',
),
]
|
[
"personal.galvan.francisco@gmail.com"
] |
personal.galvan.francisco@gmail.com
|
a4a1fc8eb31f932680b72e4e0a52588a352db967
|
739c75868a776d8e245214a7f0bec77fbd4842cb
|
/src/main.py
|
2db044388ca6996d5bb09898c2adfff7a048ad5a
|
[] |
no_license
|
herman-d/python-playground
|
cdc114f74d14cf7ef3b259105f9c14b029994a52
|
c87c76dac68aafd3147c2a5b4c7ab963d9a3d70f
|
refs/heads/main
| 2023-02-18T23:01:09.792736
| 2021-01-17T03:32:29
| 2021-01-17T03:32:29
| 330,306,373
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 375
|
py
|
import sample_pb2
import sys
output = sample_pb2.Sample()
output.field_one = "test"
output.field_two = 999
print("output to sample.bin")
f = open("sample.bin", "wb")
f.write(output.SerializeToString())
f.close
print("input from sample.bin")
f = open("sample.bin", "rb")
input = sample_pb2.Sample()
input.ParseFromString(f.read())
f.close()
print("output")
print(input)
|
[
"herman.ng@zendesk.com"
] |
herman.ng@zendesk.com
|
98f7d82d2aeffaa9258b3f75f8faf29c0d5ff00a
|
41e91e6321cdaa61a2675436de54e8f951f42a57
|
/home/admin.py
|
50e6847e6fd38fde417d32b23d2dcca676906b5a
|
[] |
no_license
|
tot-samij88/dkp-django
|
b33d66aa6e3a4f5d2d282129857f29217e2ce255
|
af5690f20a69c517c989326f634723f1dd0606ea
|
refs/heads/master
| 2023-01-02T12:00:00.057399
| 2020-10-16T02:19:39
| 2020-10-16T02:19:39
| 304,496,751
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 724
|
py
|
from django.contrib import admin
from .models import FirstSlide
from .models import NextSlide
class FirstSlideAdmin(admin.ModelAdmin):
list_display = (
'id',
'title',
'sub_text',
'is_published'
)
list_display_links = (
'id',
'title',
'sub_text',
)
list_editable = ('is_published',)
class NextSlideAdmin(admin.ModelAdmin):
list_display = (
'id',
'title',
'sub_text',
'is_published'
)
list_display_links = (
'id',
'title',
'sub_text',
)
list_editable = ('is_published',)
admin.site.register(FirstSlide, FirstSlideAdmin)
admin.site.register(NextSlide, NextSlideAdmin)
|
[
"mizeravladik@gmail.com"
] |
mizeravladik@gmail.com
|
e8c5090580a34ad557e421bca450abaa32b7813c
|
8c1fc7f897b83d0367605836439c7d05673d623b
|
/part1/get_capabilities.py
|
43de71ef5ca06f28fd8fdb46c5ab5ed5df256fe8
|
[] |
no_license
|
inwk6312-summer2018/netconf-yang-tutorial-priyankshah95
|
071872c364c9065df8d97c75e97b071313867d31
|
cae171de7d73a922667cb8ad9a844c5f7e99a2fd
|
refs/heads/master
| 2020-03-17T11:34:39.921447
| 2018-05-17T17:26:48
| 2018-05-17T17:26:48
| 133,556,470
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 482
|
py
|
from ncclient import manager
import sys
HOST = '10.1.98.176'
PORT = 830
USER = 'cisco1'
PASS = 'cisco1'
def main():
with manager.connect(host=HOST,port=PORT,username=USER,password=PASS, hostkey_verify = False, device_params = {'name':'default'},look_for_keys=False,allow_agent=False) as m:
print("***Below listed are the remote devices capabilities***")
for capability in m.server_capabilities:
print(capability.split('?')[0])
if __name__=='__main__':
sys.exit(main())
|
[
"priyank.shah@dal.ca"
] |
priyank.shah@dal.ca
|
e8ad7c6fc7df5ae3504281a89fd22b8eadb6cdef
|
b75918b2ac1dfaf2c1219f40d63004900c9338b1
|
/tests/conftest.py
|
bdc760d2cb724f587868a0e459829b3640bca13f
|
[] |
no_license
|
solashirai/ExplainableCourseRecommender
|
e0f036da9814a0187daa5635da0ff2f86386026d
|
6a2795cfc4536548ac3679b3d23b953e55a50a37
|
refs/heads/main
| 2023-04-14T14:27:36.054830
| 2021-04-19T02:29:48
| 2021-04-19T02:29:48
| 302,346,189
| 1
| 0
| null | 2021-04-18T16:13:48
| 2020-10-08T13:17:44
|
Python
|
UTF-8
|
Python
| false
| false
| 14,309
|
py
|
import pytest
from escore.models import *
from escore.services.course import GraphCourseQueryService
from frex.stores import LocalGraph, RemoteGraph
from escore.utils.path import DATA_DIR
from escore.pipeline import RecommendCoursesPipeline
from rdflib import URIRef, Namespace
individual_ns = Namespace(
"https://tw.rpi.edu/ontology-engineering/oe2020/course-recommender-individuals/"
)
@pytest.fixture(scope="session")
def course_graph() -> LocalGraph:
return LocalGraph(
file_paths=(
(DATA_DIR / "courses.ttl").resolve(),
(DATA_DIR / "scheduled_courses.ttl").resolve(),
(DATA_DIR / "rpi_departments.ttl").resolve(),
(DATA_DIR / "parsed_grad_requirements.ttl").resolve(),
(DATA_DIR / "users.ttl").resolve(),
)
)
@pytest.fixture(scope="session")
def course_qs(course_graph) -> GraphCourseQueryService:
return GraphCourseQueryService(queryable=course_graph)
@pytest.fixture(scope="session")
def course_rec_pipe(course_qs) -> RecommendCoursesPipeline:
return RecommendCoursesPipeline(course_query_service=course_qs)
@pytest.fixture(scope="session")
def pl_course(csci_dept_code, csci_dept):
return Course(
uri=individual_ns['crs938c5b7e20ea7e1620a2dd6329e6f0af274b46c3'],
course_code=CourseCode(
uri=individual_ns['crsCodeed2eaaf90b6625c9f6a5731e3f1a933357cd88b2'],
name="CSCI-4430",
department_code=csci_dept_code,
course_level=4430.0,
cross_listed=tuple()
),
name="Programming Languages",
credits=4,
department=csci_dept,
description="This course is a study of the important concepts found in current programming languages. "
"Topics include language processing (lexical analysis, parsing, type-checking, interpretation "
"and compilation, run-time environment), the role of abstraction (data abstraction and control "
"abstraction), programming paradigms (procedural, functional, object-oriented, logic-oriented, "
"generic), and formal language definition.",
special_tags=frozenset(),
required_prerequisites=frozenset({
individual_ns['crsd930192130a654416bffd45ce16415ee608df66d'],
individual_ns['crs9797fa54cb6f077d0e7cf31e23bdbafbbe00e8af']
}),
corequisites=frozenset(),
recommended_prerequisites=frozenset(),
topics=frozenset({
TopicArea(
uri=individual_ns['topic00001'],
name='placeholder for topic',
sub_topic_of=frozenset(),
discipline='placeholder discipline'
),
}
),
offering_terms=("FALL",),
offering_period="ANNUAL"
)
@pytest.fixture(scope="session")
def csci_dept_code(csci_dept):
return DepartmentCode(
uri=individual_ns['dptc0026'],
name="CSCI",
department=csci_dept
)
@pytest.fixture(scope="session")
def csci_dept():
return Department(
uri=individual_ns['dpt0026'],
name="Computer Science",
offered_major_uris=tuple(),
offered_degree_uris=tuple()
)
@pytest.fixture(scope="session")
def csci_major(csci_dept):
return Major(
uri=individual_ns['majCSCI'],
name="Computer Science Major",
department=csci_dept
)
@pytest.fixture(scope="session")
def csci_top_level_req(csci_dept):
return Requirement(
uri=individual_ns['reqfd44455d4e7c62e5f83dde9ab7da8583adbfd31e'],
fulfilled_by_requirement_uris=frozenset(),
sub_requirement_uris=frozenset({
individual_ns['req5f790f12a27e66b3f8c6534a79003cb5910d7fde'],
individual_ns['req78e077dfe6014ee50f8fac4e06b2ae06333cc271'],
individual_ns['reqa323d5f642970db3393d958ea2e8c6510032e1e2'],
individual_ns['reqae4289fd7815ec563f927cc14309b63a797ab630'],
individual_ns['reqbbf0c827d4009fdd91575d3974c3e9be28909b6c'],
individual_ns['reqe29978dd3d6ce495c371fda071f87f6c36f0739f'],
}),
share_credits_with_requirement_uris=frozenset({
individual_ns['req5f790f12a27e66b3f8c6534a79003cb5910d7fde'],
individual_ns['req78e077dfe6014ee50f8fac4e06b2ae06333cc271'],
individual_ns['reqa323d5f642970db3393d958ea2e8c6510032e1e2'],
individual_ns['reqae4289fd7815ec563f927cc14309b63a797ab630'],
individual_ns['reqbbf0c827d4009fdd91575d3974c3e9be28909b6c'],
individual_ns['reqe29978dd3d6ce495c371fda071f87f6c36f0739f'],
}),
restriction_requirement_uris=frozenset(),
requires_credits=128,
course_code_restriction=CourseCodeRestriction(
uri=individual_ns['ccr79a36c1af79f9c7271a61771aab09de994fccd4f'],
valid_course_code_names=frozenset(),
required_special_tag_names=frozenset(),
valid_department_code_names=frozenset(),
)
)
@pytest.fixture(scope="session")
def csci_option_req():
return Requirement(
uri=individual_ns['req78e077dfe6014ee50f8fac4e06b2ae06333cc271'],
requires_credits=16,
share_credits_with_requirement_uris=frozenset({
individual_ns['reqfd44455d4e7c62e5f83dde9ab7da8583adbfd31e'],
}),
sub_requirement_uris=frozenset(),
restriction_requirement_uris=frozenset(),
fulfilled_by_requirement_uris=frozenset(),
course_code_restriction=CourseCodeRestriction(
uri=individual_ns['ccr5a9b245b2af51a7b021a10d532b88f33418a97ca'],
valid_course_code_names=frozenset(),
required_special_tag_names=frozenset(),
valid_department_code_names=frozenset({'CSCI'}),
min_level=4000
)
)
@pytest.fixture(scope="session")
def csci_bs_deg(csci_major, csci_top_level_req):
return Degree(
uri=individual_ns['degBSInCSCI'],
name='BS in Computer Science',
major=csci_major,
requirements=(csci_top_level_req,)
)
@pytest.fixture(scope="session")
def owen_pos(csci_major, csci_bs_deg):
return PlanOfStudy(
uri=individual_ns['pos9a8e6844c6ecbac12f9f92da68ac51c5bd67704f'],
class_year=2021,
planned_major=csci_major,
planned_degree=csci_bs_deg,
completed_course_sections=frozenset({
individual_ns["crsSec0838fe4beedeff7709d32d16ca67c9aa2373dba7"],
individual_ns["crsSec0cf0d1a768ef7b1d580ac0aaf258257b8c766ecb"],
individual_ns["crsSec0d060d8550b4d97fa0aa0188e75a213e37114cb5"],
individual_ns["crsSec1d571602ec11f8e32dcde3b985cb277b68b7abb5"],
individual_ns["crsSec40567fef852031bad43995aa8cab7c4877bc0a02"],
individual_ns["crsSec4d3630ed52401a5362753db61595b8e1aec66bd8"],
individual_ns["crsSec5241e24de4b9d40df379b7916e4698ac81354f6f"],
individual_ns["crsSec5fd627bdf533aefd6f25ebb995fccc08e57f8dc2"],
individual_ns["crsSec615e6c5aee4bbf92e6e193f86346602825bba571"],
individual_ns["crsSec663dda052cc6e9647d255c294c71409b1883963f"],
individual_ns["crsSec6a1c91448f2bdb49b519784e470a68c37318b45c"],
individual_ns["crsSec79431f36805f7d501cc79356e3f69b26340e1d98"],
individual_ns["crsSec8102566ff399c31b30351decb38ba3893db8e2f5"],
individual_ns["crsSec8281ac09fc60458b13bdfef54b75f0b8e771837e"],
individual_ns["crsSec8bb40720e14ff5d40a16d71efbfab65bbcd742eb"],
individual_ns["crsSec99b5492130e02e1dcb08692178a020c1c2444195"],
individual_ns["crsSecbc29e94fcaa333888baa92efb31dad194e1718b6"],
individual_ns["crsSecc4b387e96f764565a80950390b36235fc00eabf1"],
individual_ns["crsSeccb117aa26ddc5cf711c70466adcc656492e8a464"],
individual_ns["crsSecce866dba24b0cdf1e707f40e0ee7fbb8de068406"],
individual_ns["crsSecd5c95ece2b749c2e0beb1d2bfde0e23e5ad45d93"],
individual_ns["crsSece04b10767b92aa4d53eb5a5b044ef13673b49448"],
individual_ns["crsSece405364a6acf6b819c02915a204114f26ff8551f"],
individual_ns["crsSecf5a9dafe85e39b30bdbd45b3371eeefd7520569d"],
individual_ns["crsSecf603c709ea539acc6b9bb842d574c3d9eb7c17fa"],
individual_ns["crsSecf7b40623128f286084d451d67cc7fb4b60b11c94"],
individual_ns["crsSecf8b3e82fd2f512b3db0727642c6a1b7153581d47"],
individual_ns["crsSecfb9210e5ca6bd4844b7bf9bdf1cb1c5956f81d08"],
}),
completed_courses=frozenset({
individual_ns["crsafed9cb99a22f3c1c24a461212de74c061147fdc"],
individual_ns["crsd13b01ead0fba8b4aa112ce4a06999a774cf7b2d"],
individual_ns["crs16512f1cf1a0772c4b025c3d6ec1edcd0d8fe1fb"],
individual_ns["crsfb2686b704f12418fbb57e79c573d4bb0fd2f418"],
individual_ns["crsbb2f79ec60f43618cd25567f87e71171d29aee83"],
individual_ns["crs3040f719acb6d5f911e4a1e0efdae1aab16e71d5"],
individual_ns["crs76deeb1ecf1123e7b7b6918afd3e7e9c65a5bbdc"],
individual_ns["crsa9004db87efa99687062b8819ace3f59d4e235cd"],
individual_ns["crs8e3b954b259c3b7c341a8839f81fb05deeff68ea"],
individual_ns["crs938c5b7e20ea7e1620a2dd6329e6f0af274b46c3"],
individual_ns["crs667378d70c52e4a84617225e20e380eb49540f42"],
individual_ns["crsd930192130a654416bffd45ce16415ee608df66d"],
individual_ns["crs11d22a217c292f1bd278d88b96fa770c9a6fa207"],
individual_ns["crs66ece4f97b7ad555666d9477af785bcaa7a40e8a"],
individual_ns["crs547b5ccb36b817d3e2df2a96a09aa18f678bc4e0"],
individual_ns["crs4b79ba1b9717a21b3aff7a7d656a471eea21448a"],
individual_ns["crs0f4511984f6fb0682b0185c2dc94b50dbc4efd2a"],
individual_ns["crs70c201e1b37def5c83e4458b044028e8a44f91c7"],
individual_ns["crs9797fa54cb6f077d0e7cf31e23bdbafbbe00e8af"],
individual_ns["crs1f544a878959fae04cb9d08b258e527007df5491"],
individual_ns["crs61c14eb096ee7002039fb8baee948b4495f08440"],
individual_ns["crsb195823511b1f4a6f4b656734aab626993defec6"],
individual_ns["crs8aabf92b49dce005f10db4d14605ad4d5eb920d7"],
individual_ns["crs2a22ca2e61da1be778732a493f944011f5b30519"],
individual_ns["crs72de52b44f46d5b08b2917495701f202699880ca"],
individual_ns["crsc746a794a800d873f1e5deff86c0c58e25f94848"],
individual_ns["crs622f7a32272ea2f04599f688790c2571325b949a"],
individual_ns["crs7c03aa6fefaf99476e8158ef5943f5ee91ee6146"],
}),
ongoing_course_sections=frozenset(),
planned_courses=frozenset(),
)
@pytest.fixture(scope="session")
def placeholder_advisor():
return Advisor(
uri=individual_ns['PLACEHOLDER-ADVISOR-URI'],
name="Placeholder advisor name",
advises_student_uris=tuple()
)
@pytest.fixture(scope="session")
def owen_student(owen_pos, placeholder_advisor):
return Student(
uri=individual_ns['usrowen'],
study_plan=owen_pos,
name="owen",
class_year=2021,
topics_of_interest=frozenset({TopicArea(
uri=individual_ns['hardcodedUserInterest'],
name='semantic web',
sub_topic_of=frozenset(),
discipline="placeholder discipline",
)}),
registered_courses=frozenset(),
advisor=placeholder_advisor,
)
@pytest.fixture(scope="session")
def blank_student(placeholder_advisor, csci_major, csci_bs_deg):
return Student(
uri=individual_ns['blank_user'],
study_plan=PlanOfStudy(
uri=individual_ns['blank_user_pos'],
class_year=2023,
planned_major=csci_major,
planned_degree=csci_bs_deg,
completed_courses=frozenset({}),
completed_course_sections=frozenset({}),
ongoing_course_sections=frozenset(),
planned_courses=frozenset(),
),
name="blank",
class_year=2023,
topics_of_interest=frozenset({TopicArea(
uri=individual_ns['hardcodedUserInterest'],
name='ontology engineering',
sub_topic_of=frozenset(),
discipline="placeholder discipline",
)}),
registered_courses=frozenset(),
advisor=placeholder_advisor,
)
@pytest.fixture(scope="session")
def bs2(placeholder_advisor, csci_major, csci_bs_deg):
return Student(
uri=individual_ns['blank_user'],
study_plan=PlanOfStudy(
uri=individual_ns['blank_user_pos'],
class_year=2023,
planned_major=csci_major,
planned_degree=csci_bs_deg,
completed_courses=frozenset({}),
completed_course_sections=frozenset({}),
ongoing_course_sections=frozenset(),
planned_courses=frozenset(),
),
name="blank",
class_year=2023,
topics_of_interest=frozenset({TopicArea(
uri=individual_ns['hardcodedUserInterest'],
name='artificial intelligence',
sub_topic_of=frozenset(),
discipline="placeholder discipline",
)}),
registered_courses=frozenset(),
advisor=placeholder_advisor,
)
@pytest.fixture(scope="session")
def bs1(placeholder_advisor, csci_major, csci_bs_deg):
return Student(
uri=individual_ns['blank_user'],
study_plan=PlanOfStudy(
uri=individual_ns['blank_user_pos'],
class_year=2023,
planned_major=csci_major,
planned_degree=csci_bs_deg,
completed_courses=frozenset({}),
completed_course_sections=frozenset({}),
ongoing_course_sections=frozenset(),
planned_courses=frozenset(),
),
name="blank",
class_year=2023,
topics_of_interest=frozenset({TopicArea(
uri=individual_ns['hardcodedUserInterest'],
name='machine learning',
sub_topic_of=frozenset(),
discipline="placeholder discipline",
)}),
registered_courses=frozenset(),
advisor=placeholder_advisor,
)
|
[
"solashakashirai@gmail.com"
] |
solashakashirai@gmail.com
|
c8bf10335c7c1e07b2176c968917ab7c4d5ace34
|
0f3a0be642cd6a2dd792c548cf7212176761e9b1
|
/pywps_services/r_mult.py
|
9910ee9228a37f667c6a73112163cb45b3e7d2ec
|
[] |
no_license
|
huhabla/wps-grass-bridge
|
63a5d60735d372e295ec6adabe527eec9e72635a
|
aefdf1516a7517b1b745ec72e2d2481a78e10017
|
refs/heads/master
| 2021-01-10T10:10:34.246497
| 2014-01-22T23:40:58
| 2014-01-22T23:40:58
| 53,005,463
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,141
|
py
|
# ################################################ #
# This process was generated using GrassXMLtoPyWPS #
# Author: Soeren Gebbert #
# Mail: soerengebbert <at> googlemail <dot> com #
# ################################################ #
from pywps.Process import WPSProcess
from PyWPSGrassModuleStarter import PyWPSGrassModuleStarter
class r_mult(WPSProcess):
def __init__(self):
WPSProcess.__init__(self, identifier = 'r.mult', title = 'Multiplies a raster map with one or more raster maps', version = 1, statusSupported = True, storeSupported = True, metadata = [{'type': 'simple', 'title': 'raster'}, {'type': 'simple', 'title': 'math'}], abstract = 'http://grass.osgeo.org/grass70/manuals/html70_user/r.mult.html')
# Literal and complex inputs
self.addComplexInput(identifier = 'inputs', title = 'Raster maps to multiply', minOccurs = 1, maxOccurs = 1024, formats = [{'mimeType': 'image/tiff'}, {'mimeType': 'image/geotiff'}, {'mimeType': 'application/geotiff'}, {'mimeType': 'application/x-geotiff'}, {'mimeType': 'image/png'}, {'mimeType': 'image/gif'}, {'mimeType': 'image/jpeg'}, {'mimeType': 'application/x-erdas-hfa'}, {'mimeType': 'application/netcdf'}, {'mimeType': 'application/x-netcdf'}])
self.addLiteralInput(identifier = 'grass_resolution_ns', title = 'Resolution of the mapset in north-south direction in meters or degrees', abstract = 'This parameter defines the north-south resolution of the mapset in meter or degrees, which should be used to process the input and output raster data. To enable this setting, you need to specify north-south and east-west resolution.', minOccurs = 0, maxOccurs = 1, type = type(0.0), allowedValues = '*')
self.addLiteralInput(identifier = 'grass_resolution_ew', title = 'Resolution of the mapset in east-west direction in meters or degrees', abstract = 'This parameter defines the east-west resolution of the mapset in meters or degrees, which should be used to process the input and output raster data. To enable this setting, you need to specify north-south and east-west resolution.', minOccurs = 0, maxOccurs = 1, type = type(0.0), allowedValues = '*')
self.addLiteralInput(identifier = 'grass_band_number', title = 'Band to select for processing (default is all bands)', abstract = 'This parameter defines band number of the input raster files which should be processed. As default all bands are processed and used as single and multiple inputs for raster modules.', minOccurs = 0, maxOccurs = 1, type = type(0), allowedValues = '*')
# complex outputs
self.addComplexOutput(identifier = 'output', title = 'The result of the mathematical operation', formats = [{'mimeType': 'image/tiff'}, {'mimeType': 'image/geotiff'}, {'mimeType': 'application/geotiff'}, {'mimeType': 'application/x-geotiff'}, {'mimeType': 'application/x-erdas-hfa'}, {'mimeType': 'application/netcdf'}, {'mimeType': 'application/x-netcdf'}])
def execute(self):
starter = PyWPSGrassModuleStarter()
starter.fromPyWPS("r.mult", self.inputs, self.outputs, self.pywps)
if __name__ == "__main__":
process = r_mult()
process.execute()
|
[
"soerengebbert@23da3d23-e2f9-862c-be8f-f61c6c06f202"
] |
soerengebbert@23da3d23-e2f9-862c-be8f-f61c6c06f202
|
f18aa238412ae3b90eb7279e821825886faaa8c3
|
60ee4380a9a56c8d6e883b47b1c856646095e234
|
/lintcode/Easy/109_Triangle.py
|
4f67ff01d874c514efbaec5eae8390d1971be15e
|
[
"MIT"
] |
permissive
|
Rhadow/leetcode
|
e3d143d619a949a5ae6dc1b107b04d29675bdc31
|
43209626720321113dbfbac67b3841e6efb4fab3
|
refs/heads/master
| 2020-12-18T22:32:18.523143
| 2018-05-11T05:35:51
| 2018-05-11T05:35:51
| 42,924,724
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 662
|
py
|
class Solution:
"""
@param triangle: a list of lists of integers.
@return: An integer, minimum path sum.
"""
def minimumTotal(self, triangle):
# write your code here
res = triangle[0]
for i in range(1, len(triangle)):
for j in range(len(triangle[i]) - 1, -1, -1):
left = triangle[i][j] + (res[j] if j < len(res) else sys.maxint)
right = triangle[i][j] + (res[j - 1] if j - 1 >= 0 else sys.maxint)
if (j >= len(res)):
res.append(min(left, right))
else:
res[j] = min(left, right)
return min(res)
|
[
"howard3433@gmail.com"
] |
howard3433@gmail.com
|
9a45f1acb0c54622917608aaf57f4470f9c80e65
|
1a204b3a937af1b600ef44d176aedf006c2cb59c
|
/venv/bin/pycodestyle
|
0d16705078b4dafb3f6334f06d5a3977284bd469
|
[] |
no_license
|
stevenkeezer/pythontodo-r
|
6c6ba8ba893bc21ed19f0becde4994f4793d74de
|
fab7783d94526fefb310ca1a246bfe66b1a14e60
|
refs/heads/master
| 2020-09-07T19:14:16.927777
| 2019-11-11T02:52:22
| 2019-11-11T02:52:22
| 220,887,568
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 264
|
#!/Users/stevenkeezer/Documents/Sierra/pythontodo-p/venv/bin/python3.7
# -*- coding: utf-8 -*-
import re
import sys
from pycodestyle import _main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(_main())
|
[
"stevengkeezer@gmail.com"
] |
stevengkeezer@gmail.com
|
|
2d51dc8a47690b543abd5f2196e6d22032e34caf
|
de3b77cb0927f28cbd85e9142c2dfd7c8be7c27e
|
/tests/migrations/015_user_demographics_up.py
|
9e08957363737d8cf8968f4a19885fea3c67bec4
|
[
"MIT"
] |
permissive
|
LoansBot/database
|
f3dcbccde59fdb80c876d2612f250662946588e6
|
eeaed26c2dcfdf0f9637b47ebe15cd1e000d8cc4
|
refs/heads/master
| 2021-07-02T22:07:18.683278
| 2021-06-02T04:09:38
| 2021-06-02T04:09:38
| 239,400,935
| 0
| 1
|
MIT
| 2021-06-02T04:14:31
| 2020-02-10T01:06:53
|
Python
|
UTF-8
|
Python
| false
| false
| 1,166
|
py
|
import unittest
import helper
class UpTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.connection = helper.setup_connection()
cls.cursor = cls.connection.cursor()
@classmethod
def tearDownClass(cls):
cls.cursor.close()
cls.connection.rollback()
helper.teardown_connection(cls.connection)
def tearDown(self):
self.connection.rollback()
def test_user_demographics_exist(self):
self.assertTrue(
helper.check_if_table_exist(self.cursor, 'user_demographics')
)
def test_user_demographic_lookups_exist(self):
self.assertTrue(
helper.check_if_table_exist(self.cursor, 'user_demographic_lookups')
)
def test_user_demographic_views_exist(self):
self.assertTrue(
helper.check_if_table_exist(self.cursor, 'user_demographic_views')
)
def test_user_demographic_history_exist(self):
self.assertTrue(
helper.check_if_table_exist(self.cursor, 'user_demographic_history')
)
if __name__ == '__main__':
unittest.main()
|
[
"noreply@github.com"
] |
LoansBot.noreply@github.com
|
85a9890e0d57ba8ddc27db43d7b86627e0d843a4
|
179a739d5f4d672b461ecbe88af285e946f898af
|
/tools/m_map_funcs.py
|
5350dbed340d764c5742771b1625debfe92295ec
|
[] |
no_license
|
farisawan-2000/papermario
|
38e2ef57ce9099202e064ab9b3fb582bb6df8218
|
3a918a952b1a7ef326c76b03b0d6af26100ab650
|
refs/heads/master
| 2023-02-23T07:02:55.730335
| 2021-01-28T08:39:38
| 2021-01-28T08:39:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,049
|
py
|
#!/usr/bin/python3
import os
import sys
import subprocess
from pathlib import Path
script_dir = os.path.dirname(os.path.realpath(__file__))
root_dir = os.path.abspath(os.path.join(script_dir, ".."))
src_dir = root_dir + "/src/"
asm_dir = root_dir + "/asm/"
common_files = []
for root, dirs, files in os.walk(os.path.join(src_dir, "world", "common")):
for fname in files:
if fname.endswith(".inc.c"):
common_files.append(fname[:-6])
for root, dir, files in os.walk(os.path.join(asm_dir, "nonmatchings", "world")):
for fname in files:
if fname.endswith(".s"):
prefix = Path(root).parent.name + "_"
with open(os.path.join(root, fname)) as f:
orig_text = f.read()
new_text = orig_text
for common in common_files:
new_text = new_text.replace(" " + common, " " + prefix + common)
if new_text != orig_text:
with open(os.path.join(root, fname), "w", newline="\n") as f:
f.write(new_text)
|
[
"ethteck@gmail.com"
] |
ethteck@gmail.com
|
225fa3db85cd8e94446ef380a7fa4851213b66d9
|
1e7aa28fddc02226757ca7d0b253b8bf83ce1e9c
|
/princess.py
|
dc6e013749a161b980c1cf3d8cf0ea8f586b826a
|
[] |
no_license
|
cherianb59/puzzles
|
7c4bb85d98cc66438fb19eef5e3b6bd7ce5b5706
|
bbba3a20581807c078e5ca2d55a90fad317d93fd
|
refs/heads/main
| 2023-03-30T19:02:27.386411
| 2021-04-01T07:25:38
| 2021-04-01T07:25:38
| 353,611,346
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,907
|
py
|
DOOR_LENGTH=7
DEPTH_LENGTH=10
#array represents the states of the doors, whether the princess could be in them based on the sequence of doors the prince has checked
#position is where the prince is cheking this round
#the new array outputs possible princess locations
#the output for an array position is based on the two adjacent status of the old array
def step(array,position):
old=array[:]
array[0]= int((old[1]) and not (position==1))
#this doesnt apply to first and last door
for i in range(len(old[1:-1])):
array[i+1]= int((old[i] | old[i+2]) and not (position==i+2))
# print(array,old,position)
array[DOOR_LENGTH-1]= int((old[DOOR_LENGTH-2]) and not (position==DOOR_LENGTH))
#print(array,old,position)
return array
def try_sequence(sequence):
#pass a sequence that the prince will try
depth=0
#initialise the possible places the princess with be, she can be anywhere at teh start
princess=[]
for i in range(DOOR_LENGTH):
princess.append(1)
empty=[]
for i in range(DOOR_LENGTH):
empty.append(0)
while depth<DEPTH_LENGTH and princess != empty:
#print(princess)
princess=step(princess,sequence[depth])
depth=depth+1
#print(depth)
if (princess == empty):
print(sequence)
success.append(sequence)
else:
fail.append(sequence)
# recursively call function for gradually increasing depths
def loop(length,array):
if (length==DEPTH_LENGTH):
#print array
#return array
try_sequence(array)
else:
for i in range(DOOR_LENGTH):
array[length]=i+1
loop(length+1,array)
sequence=[]
#varaible length empty list
for i in range(DEPTH_LENGTH):
sequence.append(0)
#print sequence
#how to return all the arrays?
success=[]
fail=[]
loop(0,sequence)
#try_sequence([2,3,4,2,3,4])
#try_sequence([2,3,4,5,6,2,3,4,5,6])
#try_sequence([2,3,3,2])
print(len(success))
|
[
"cherian.b59@gmail.com"
] |
cherian.b59@gmail.com
|
1d34bc71726742a51fd0ca6d27499f22e06c5e75
|
8e1a2833732102e5e421c34f98240de9dfa3172f
|
/reference_scripts/client_1.py
|
4a8305bfd8d4e4932bd711bcdd03f88698ecd407
|
[] |
no_license
|
NoPainsNoGains33/Network-Security
|
6f9306548d907276502c8e279eb73feafd15507a
|
39af41d7bea91e1b6289e2c9d4904c227481aa10
|
refs/heads/main
| 2021-02-16T07:44:56.987954
| 2019-12-05T06:26:20
| 2019-12-05T06:26:20
| 244,981,879
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,565
|
py
|
import socket
import sys
import time
import base64
import argparse
import os
import json
# import the message type
from message_head_pb2 import MESS
from threading import Thread
# to handle Ctrl+C
from signal import signal, SIGINT
arg_parser = argparse.ArgumentParser(description="Client-side application script to communicate via P2P chat")
arg_parser.add_argument("-sp", type=int, help="Port address for the server", dest="server_port", required=True)
arg_parser.add_argument("-sip", type=str, help="IP address for the server", dest="server_ip", required=True)
arg_parser.add_argument("-u", type=str, help="Username for the client", dest="username", required=True)
args = arg_parser.parse_args()
message_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
message_socket.bind((socket.gethostname(), 0))
# store the list of users on the network when requested from the server
AVAILABLE_USERS = dict()
# use stdout to print output over 'print', to avoid carriage returns in the prompts
def printout(text):
sys.stdout.write(text)
sys.stdout.flush()
# signal handler to kill the client
def kill_client(kill_code, frame):
raise Exception
# advertise that the server is either coming online on the network, or confirm that it is still online
def sign_in():
client_message = MESS()
client_message.type = MESS.Type.SIGN_IN
client_message.payload = args.username
message_socket.sendto(client_message.SerializeToString(), (args.server_ip, args.server_port))
# method body for the thread to send chat commands
def user_send():
try:
# create a protobuf message for chat commands
command_message = MESS()
global AVAILABLE_USERS
while True:
command = raw_input()
# command type: list
# description: retrieve a list of all the clients currently on the network,
# from the server
if (command == "list"):
command_message.type = MESS.Type.LIST
message_socket.sendto(command_message.SerializeToString(), (args.server_ip, args.server_port))
continue;
# command type: send
# description: send a chat message to another client in the
# form 'send <peer-name> <message>'
if (("send" in command)):
if (command.split()[1] in AVAILABLE_USERS.keys()):
command_message.type = MESS.Type.SEND
# prepend the message payload, i.e, the message to send, with the sender name
command_message.payload = command.replace("send " + command.split()[1], args.username)
message_socket.sendto(command_message.SerializeToString(), (
AVAILABLE_USERS[command.split()[1]][0], AVAILABLE_USERS[command.split()[1]][1]))
printout("+> ")
continue;
# if the peer is no longer in the network, i.e, stale entry in AVAILABLE_USERS
else:
printout("Sorry, this user is not on the network.\n")
printout("+> ")
# if command entered is invalid
else:
printout("Sorry, invalid command\n")
printout("+> ")
# should catch network exceptions
except Exception, e:
print
"Exception occurred!: " + str(e)
sys.exit(1)
def user_receive():
try:
global AVAILABLE_USERS
# sign in and register with server when client is booted
sign_in()
printout("+> ")
received_message = MESS()
while True:
data, node_address = message_socket.recvfrom(4096)
received_message.ParseFromString(data)
# when the client receives a list of available users from the server on bootup
if (received_message.type == MESS.Type.USER_LIST):
AVAILABLE_USERS = json.loads(received_message.payload)
printout("<- Signed In Users: " + ", ".join(AVAILABLE_USERS.keys()) + "\n")
printout("+> ")
# when the client sends a message to another peer/client
if (received_message.type == MESS.Type.SEND):
sender = received_message.payload.split()[0]
AVAILABLE_USERS[sender] = node_address
# prepare the sender id to display along with received mesage
sender_string = "<From " + str(node_address[0]) + ":" + str(node_address[1]) + ":" + sender + ">:"
printout("\n<- " + sender_string + received_message.payload.replace(sender, "") + "\n")
printout("+> ")
if (received_message.type == MESS.Type.USER_POLL):
sign_in()
except Exception, e:
print
"Sorry, an error occurred: " + str(e)
sys.exit(1)
if __name__ == "__main__":
try:
# call the signal handler in the main thread
signal(SIGINT, kill_client)
# thread to handle received socket messages
receive_thread = Thread(target=user_receive, args=[])
# thread to send socket messages
send_thread = Thread(target=user_send, args=[])
# start the thread
receive_thread.start()
send_thread.start()
# keep the main thread alive to maintain context and catch the exit signal
while True:
time.sleep(0.5)
# handle the exit condition
except Exception, e:
printout("\nClient exited manually.")
message_socket.close()
os._exit(0)
|
[
"dsilva.r@husky.neu.edu"
] |
dsilva.r@husky.neu.edu
|
4e7988d4ea8ba41f28f5989c68714cda98909800
|
2eabb719792dd552fb98850a571f2d7742bd8530
|
/10.Sorting&BS/MinMaxDivision.py
|
24177f567ebeee33c17bf52b89fe63d025233119
|
[] |
no_license
|
mjmingd/study_algorithm
|
93772b6849ba4afac61d594a2f6e52bbf0439858
|
29cb49a166a1dfd19c39613a0e9895c545a6bfe9
|
refs/heads/master
| 2022-03-26T11:57:23.383292
| 2020-01-03T15:07:36
| 2020-01-03T15:07:36
| 197,862,178
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,382
|
py
|
'''
Codility - MinMaxDivision
# similar question : leetcod(Split Array Largest Sum)
You are given integers K, M and a non-empty array A consisting of N integers. Every element of the array is not greater than M.
You should divide this array into K blocks of consecutive elements. The size of the block is any integer between 0 and N. Every element of the array should belong to some block.
The sum of the block from X to Y equals A[X] + A[X + 1] + ... + A[Y]. The sum of empty block equals 0.
The large sum is the maximal sum of any block.
For example, you are given integers K = 3, M = 5 and array A such that:
A[0] = 2
A[1] = 1
A[2] = 5
A[3] = 1
A[4] = 2
A[5] = 2
A[6] = 2
The array can be divided, for example, into the following blocks:
[2, 1, 5, 1, 2, 2, 2], [], [] with a large sum of 15;
[2], [1, 5, 1, 2], [2, 2] with a large sum of 9;
[2, 1, 5], [], [1, 2, 2, 2] with a large sum of 8;
[2, 1], [5, 1], [2, 2, 2] with a large sum of 6.
The goal is to minimize the large sum. In the above example, 6 is the minimal large sum.
Write a function:
def solution(K, M, A)
that, given integers K, M and a non-empty array A consisting of N integers, returns the minimal large sum.
For example, given K = 3, M = 5 and array A such that:
A[0] = 2
A[1] = 1
A[2] = 5
A[3] = 1
A[4] = 2
A[5] = 2
A[6] = 2
the function should return 6, as explained above.
Write an efficient algorithm for the following assumptions:
N and K are integers within the range [1..100,000];
M is an integer within the range [0..10,000];
each element of array A is an integer within the range [0..M].
Copyright 2009–2019 by Codility Limited. All Rights Reserved. Unauthorized copying, publication or disclosure prohibited.
'''
def IsValid(A, K, sumLimit) :
blockS, blockCnt = 0, 0
for x in A :
if blockS + x > sumLimit :
blockS = x
blockCnt += 1
else :
blockS += x
if blockCnt >= K : return False
return True
def solution(K, M, A):
'''
time complexity : O(N*log(N+M))
space complexity : O(1)
'''
low = max(A)
up = sum(A)
if K == 1 : return up
if K >= len(A) : return low
while low <= up :
mid = (low + up) // 2
if IsValid(A,K,mid):
up = mid - 1
else :
low = mid + 1
return low
|
[
"noreply@github.com"
] |
mjmingd.noreply@github.com
|
1f25eaacf5c9ccac5ef060cdcaf3e75712ac30ba
|
4cc285b0c585241ff4404087e6fbb901195639be
|
/NeuralNetworkNumbers/venv/Lib/site-packages/tensorflow/_api/v2/compat/v2/nn/__init__.py
|
422cec64251f38906def1dec89cf3e9f3c1cb091
|
[] |
no_license
|
strazhg/NeuralNetworksPython
|
815542f4ddbb86e918e657f783158f8c078de514
|
15038e44a5a6c342336c119cdd2abdeffd84b5b1
|
refs/heads/main
| 2023-04-16T18:51:29.602644
| 2021-04-27T14:46:55
| 2021-04-27T14:46:55
| 361,944,482
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 129
|
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:f9f44f6062a76ea4edc6b57e9980c88ed09cd53ee57337d2e7cebd8696fc0e2f
size 6611
|
[
"golubstrazh@gmail.com"
] |
golubstrazh@gmail.com
|
d7a8c18dd4736731eb4b2c57480333040f07c6da
|
ffc18cc92b55ef9867a588a2ea0c6c7cbffc9f8a
|
/venv/bin/gunicorn_paster
|
c0c8a22f3ee2dd511293e1bf2b0385802e7c6aad
|
[] |
no_license
|
dculibrk/pathgameweb
|
4e3c1a13e5045e4af602bf6c4339636b8bd32e05
|
40482d325a4e38c73bc26222f0ecf7776a8ceb16
|
refs/heads/master
| 2023-02-06T07:15:24.821085
| 2020-06-08T16:48:57
| 2020-06-08T16:48:57
| 178,581,137
| 0
| 0
| null | 2023-02-02T03:25:13
| 2019-03-30T16:08:21
|
Python
|
UTF-8
|
Python
| false
| false
| 274
|
#!/Users/duba/Documents/work/CG/game/deploy_heroku/venv/bin/python3.6
# -*- coding: utf-8 -*-
import re
import sys
from gunicorn.app.pasterapp import run
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(run())
|
[
"duba@Dubravkos-MacBook-Pro.local"
] |
duba@Dubravkos-MacBook-Pro.local
|
|
d4e081e72a8ec5a06360e56051a15f3f071d84e0
|
9563128aa1ed41b4eeb4f252f5fcfcb732f94fc4
|
/2- PYTHON V4/3- Django 2.2v4/1- Django Intro/3- Dojo Survey/dojo_survey_app/views.py
|
03fa8bc39de883f21fe95dee83f6c2cea848bf62
|
[] |
no_license
|
alexbarg3000/Coding-Dojo
|
dcaa7e4ad4b17dd6008b2f1035ab746831f23fc4
|
aaa44820a26281e7d84f8c4d35d7ea9772230d93
|
refs/heads/main
| 2023-03-21T08:36:40.837760
| 2021-03-15T02:56:45
| 2021-03-15T02:56:45
| 347,510,909
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 889
|
py
|
from django.shortcuts import render, HttpResponse, redirect
def index(request):
return render(request,"index.html")
def create_user(request):
if request.method == 'POST':
semesters= request.POST.getlist('semester')
name_from_form = request.POST['name']
location_from_form = request.POST['location']
language_from_form = request.POST['language']
comment_from_form = request.POST['comment']
preference_from_form = request.POST['preference']
semester_from_form = semesters
context = {
"name_on_template" : name_from_form,
"location_on_template" : location_from_form,
"language_on_template" : language_from_form,
"comment_on_template" : comment_from_form,
"preference_on_template" : preference_from_form,
"semester_on_template" : semester_from_form,
}
return render(request,"show.html",context)
|
[
"alexbarg3000@yahoo.com"
] |
alexbarg3000@yahoo.com
|
c3084d05deb4895e75b4f3656c9386de01067a15
|
ee6c5924bca5ad4df64c6922129d600ae3e1ba71
|
/info_Instagram.py
|
725ff71e88a0df89e7bada35a956061c37bf3195
|
[] |
no_license
|
jev0m/information-div0m
|
f30520f912225ab10f95fd95efe6faaaee314df2
|
447c33588199f610992d7d3421afbf3ccf39e871
|
refs/heads/main
| 2023-03-21T13:52:24.434171
| 2021-03-19T12:59:45
| 2021-03-19T12:59:45
| 349,423,827
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,661
|
py
|
import requests
import pyfiglet
## By Xcode & @xcodeon1
## By Twitter : @Matrix0700
R = '\033[31m'
G = '\033[32m'
B = '\033[34m'
print(R+" @xcodeon1")
br = pyfiglet.figlet_format("Info.py")
print(R+br)
user = input(B+"username :")
print(R+"-"*40)
url = "https://i.instagram.com:443/api/v1/users/lookup/"
cookies = {"mid": "XOSINgABAAG1IDmaral3noOozrK0rrNSbPuSbzHq"}
headers = {"Connection": "close", "X-IG-Connection-Type": "WIFI", "X-IG-Capabilities": "3R4=",
"Accept-Language": "ar-AE",
"Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",
"User-Agent": "Instagram 99.4.0 S3od_al3nzi (Dmaral3noOoz)",
"Accept-Encoding": "gzip, deflate"}
data = {"signed_body": "35a2d547d3b6ff400f713948cdffe0b789a903f86117eb6e2f3e573079b2f038.{\"q\":\"%s\"}" % user }
re = requests.post(url, headers=headers, cookies=cookies, data=data)
info = re.json()
# print(info)
print(G+"Username :"+user)
if info['email_sent'] == False :
print(G+"Email_Sent : False")
else:
print("Sms_Sent : True")
if info['sms_sent'] == False :
print(G+"sms_Sent : False")
else:
print("sms : True")
def emailPhoneIsuue(info):
try:
if info['obfuscated_email']:
print(G+"His Phone Email Is : "+info['obfuscated_email'])
else:
pass
except KeyError:
'obfuscated_email'
pass
try:
if info['obfuscated_phone']:
print(G+"His Phone number Is: "+ info['obfuscated_phone'])
else:
print("oh")
except KeyError:
'obfuscated_phone'
pass
emailPhoneIsuue(info)
print(R+"-"*40)
print("\n")
|
[
"noreply@github.com"
] |
jev0m.noreply@github.com
|
008e4b6abdfc8b439322dd825a7295685d2716fa
|
ad9bce4809d413b2595b2a7eb85ef6dbc2660016
|
/botinterface/nonebotconfig_example.py
|
f7c2f1bf0db797f9a7d6b15ebf21b7daed14cb83
|
[
"MIT"
] |
permissive
|
bothbot/awesome-bothbot
|
7fabee78d24ed92685769492596d8b57321aa40a
|
764998897f4482614121b0a5a9205312febfb2a9
|
refs/heads/master
| 2022-12-14T02:20:36.603647
| 2020-09-16T01:24:18
| 2020-09-16T01:24:18
| 294,954,817
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 342
|
py
|
# -*- coding: UTF-8 -*-
from nonebot.default_config import *
# 也可以在此处对nonebot进行设置
# 全局管理员
SUPERUSERS = {
#12345,
#...,
}
# 命令起始标识
COMMAND_START = {'!', '!'}
# nonebot的debug开关
DEBUG = False
# 有命令在运行时的显示
SESSION_RUNNING_EXPRESSION = '您有命令正在运行!'
|
[
"1362941473@qq.com"
] |
1362941473@qq.com
|
4728245652c5aa72351604934b2ca40e9612e261
|
1e44f7826fc872a480400b9f6f4658fc48d688ab
|
/Functions/Python/MIND/__init__.py
|
2eb6e9d1916786ecefe974d8ea5c95504295f414
|
[] |
no_license
|
hsokooti/RegUn
|
220e01f5957a81efb39b1351c5d1ebbc44622f82
|
f029d61e1146af2992ae71d0f59c6e881db95aad
|
refs/heads/master
| 2022-05-04T05:10:38.485534
| 2022-04-21T21:42:49
| 2022-04-21T21:42:49
| 151,310,972
| 25
| 5
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 98
|
py
|
from .pyMIND import *
from .search_region import *
__version__ = "0.1.0"
__author__ = "hsokooti"
|
[
"h.sokooti_oskooyi@lumc.nl"
] |
h.sokooti_oskooyi@lumc.nl
|
d7590593aba400293172eda8319a4f422cb65f7f
|
6655650f052a3b140f02de41b7393a24cd23747a
|
/D_D/getPDF.py
|
fb3b0e8242ff5eb0b62cc71c0b0243156728f832
|
[] |
no_license
|
bivasmaiti26/census2001auto
|
5238286934c4bd82405aaf3b2830bfebe6ce2162
|
6c1a2d7e96d80d2b7e75d7a1096611bc4ef9b844
|
refs/heads/master
| 2020-04-22T18:48:40.621253
| 2018-01-08T15:10:47
| 2018-01-08T15:10:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 538
|
py
|
import urllib
def downloadPDF(state_code):
#getting the string fpr state code
if (state_code<10):
string_state='0'+str(state_code)
else:
string_state=str(state_code)
i=1
while True:
#getting the string for district code
if (i<10):
string_district='0'+str(i)
else:
string_district=str(i)
try:
urllib.urlretrieve ("http://censusindia.gov.in/Dist_File/datasheet-"+string_state+string_district+".pdf","district"+string_district+".pdf")
except:
break
i=i+1
#Daman & Diu
code=25
downloadPDF(code)
|
[
"preetskhalsa97@gmail.com"
] |
preetskhalsa97@gmail.com
|
3bbd40f0544e8ea2cab309d60a3303790e349651
|
ad915d13a5984798cbafb19bbf0fdf5842a78a08
|
/pyfinnotech/tests/test_client_credential_token.py
|
97c28261c8d42aae191f5f7d6db395ea5e561383
|
[] |
no_license
|
mahdi13/pyfinnotech
|
1158aa8f0d636810e4fb76c030d907ccd973a0d5
|
4803faa6e2a99a11afc946b39a0e923018f84047
|
refs/heads/master
| 2022-06-05T03:57:44.635283
| 2022-05-16T12:23:16
| 2022-05-16T12:23:16
| 240,782,995
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,221
|
py
|
from pyfinnotech.tests.helper import ApiClientTestCase
client_invalid_mock_cards = [
'000000000000000',
'A000000000000000',
]
class ClientCredentialTestCase(ApiClientTestCase):
def test_fetch_client_credential(self):
client_credential = self.api_client.client_credential
self.assertIsNotNone(client_credential.token)
self.assertIsNotNone(client_credential.refresh_token)
self.assertIsNotNone(client_credential.creation_date)
self.assertIsNotNone(client_credential.life_time)
self.assertIsNotNone(client_credential.scopes)
self.assertIsNotNone(client_credential.scopes)
def test_refresh_client_credential(self):
client_credential = self.api_client.client_credential
client_credential.refresh(self.api_client)
self.assertIsNotNone(client_credential.token)
self.assertIsNotNone(client_credential.refresh_token)
self.assertIsNotNone(client_credential.creation_date)
self.assertIsNotNone(client_credential.life_time)
self.assertIsNotNone(client_credential.scopes)
self.assertIsNotNone(client_credential.scopes)
def test_revoke_client_credential(self):
# TODO:
pass
|
[
"mahdi13.1373@gmail.com"
] |
mahdi13.1373@gmail.com
|
addd10e6193e7e8522a2c5f729c47c0dba75866f
|
c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c
|
/cases/synthetic/sieve-big-5429.py
|
f003b6f244e54ff69c91f1a5eb2bd1fb0dbdf743
|
[] |
no_license
|
Virtlink/ccbench-chocopy
|
c3f7f6af6349aff6503196f727ef89f210a1eac8
|
c7efae43bf32696ee2b2ee781bdfe4f7730dec3f
|
refs/heads/main
| 2023-04-07T15:07:12.464038
| 2022-02-03T15:42:39
| 2022-02-03T15:42:39
| 451,969,776
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 31,755
|
py
|
# A resizable list of integers
class Vector(object):
items: [int] = None
size: int = 0
def __init__(self:"Vector"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector", idx: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector") -> int:
return self.size
# A resizable list of integers
class Vector2(object):
items: [int] = None
items2: [int] = None
size: int = 0
size2: int = 0
def __init__(self:"Vector2"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector2") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector2") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector2") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector2") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector2", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector2", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector2", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector2", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector2", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector2", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector2", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector2", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector2") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector2") -> int:
return self.size
# A resizable list of integers
class Vector3(object):
items: [int] = None
items2: [int] = None
items3: [int] = None
size: int = 0
size2: int = 0
size3: int = 0
def __init__(self:"Vector3"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector3") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector3") -> int:
return len(self.items)
# Returns current capacity
def capacity3(self:"Vector3") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector3") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector3") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity3(self:"Vector3") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector3", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector3", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append3(self:"Vector3", item: int, item2: int, item3: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector3", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector3", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all3(self:"Vector3", new_items: [int], new_items2: [int], new_items3: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector3", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector3", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at3(self:"Vector3", idx: int, idx2: int, idx3: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector3", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector3", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get3(self:"Vector3", idx: int, idx2: int, idx3: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector3") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector3") -> int:
return self.size
# Retrieves the current size of the vector
def length3(self:"Vector3") -> int:
return self.size
# A resizable list of integers
class Vector4(object):
items: [int] = None
items2: [int] = None
items3: [int] = None
items4: [int] = None
size: int = 0
size2: int = 0
size3: int = 0
size4: int = 0
def __init__(self:"Vector4"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector4") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector4") -> int:
return len(self.items)
# Returns current capacity
def capacity3(self:"Vector4") -> int:
return len(self.items)
# Returns current capacity
def capacity4(self:"Vector4") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity3(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity4(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector4", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector4", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append3(self:"Vector4", item: int, item2: int, item3: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append4(self:"Vector4", item: int, item2: int, item3: int, item4: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector4", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector4", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all3(self:"Vector4", new_items: [int], new_items2: [int], new_items3: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all4(self:"Vector4", new_items: [int], new_items2: [int], new_items3: [int], new_items4: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
item4:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector4", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector4", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at3(self:"Vector4", idx: int, idx2: int, idx3: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at4(self:"Vector4", idx: int, idx2: int, idx3: int, idx4: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector4", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector4", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get3(self:"Vector4", idx: int, idx2: int, idx3: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get4(self:"Vector4", idx: int, idx2: int, idx3: int, idx4: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector4") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector4") -> int:
return self.size
# Retrieves the current size of the vector
def length3(self:"Vector4") -> int:
return self.size
# Retrieves the current size of the vector
def length4(self:"Vector4") -> int:
return self.size
# A resizable list of integers
class Vector5(object):
items: [int] = None
items2: [int] = None
items3: [int] = None
items4: [int] = None
items5: [int] = None
size: int = 0
size2: int = 0
size3: int = 0
size4: int = 0
size5: int = 0
def __init__(self:"Vector5"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity3(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity4(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity5(self:"Vector5") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity3(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity4(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity5(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector5", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector5", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append3(self:"Vector5", item: int, item2: int, item3: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append4(self:"Vector5", item: int, item2: int, item3: int, item4: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append5(self:"Vector5", item: int, item2: int, item3: int, item4: int, item5: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector5", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector5", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all3(self:"Vector5", new_items: [int], new_items2: [int], new_items3: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all4(self:"Vector5", new_items: [int], new_items2: [int], new_items3: [int], new_items4: [int]) -> object:
item:int = 0
$TypedVar = 0
item3:int = 0
item4:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all5(self:"Vector5", new_items: [int], new_items2: [int], new_items3: [int], new_items4: [int], new_items5: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
item4:int = 0
item5:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector5", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector5", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at3(self:"Vector5", idx: int, idx2: int, idx3: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at4(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at5(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int, idx5: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector5", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector5", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get3(self:"Vector5", idx: int, idx2: int, idx3: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get4(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get5(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int, idx5: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length3(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length4(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length5(self:"Vector5") -> int:
return self.size
# A faster (but more memory-consuming) implementation of vector
class DoublingVector(Vector):
doubling_limit:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector2(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector2") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector2") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector3(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
doubling_limit3:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector3") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector3") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity3(self:"DoublingVector3") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector4(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
doubling_limit3:int = 1000
doubling_limit4:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity3(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity4(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector5(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
doubling_limit3:int = 1000
doubling_limit4:int = 1000
doubling_limit5:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity3(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity4(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity5(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Makes a vector in the range [i, j)
def vrange(i:int, j:int) -> Vector:
v:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange2(i:int, j:int, i2:int, j2:int) -> Vector:
v:Vector = None
v2:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange3(i:int, j:int, i2:int, j2:int, i3:int, j3:int) -> Vector:
v:Vector = None
v2:Vector = None
v3:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange4(i:int, j:int, i2:int, j2:int, i3:int, j3:int, i4:int, j4:int) -> Vector:
v:Vector = None
v2:Vector = None
v3:Vector = None
v4:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange5(i:int, j:int, i2:int, j2:int, i3:int, j3:int, i4:int, j4:int, i5:int, j5:int) -> Vector:
v:Vector = None
v2:Vector = None
v3:Vector = None
v4:Vector = None
v5:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
# Sieve of Eratosthenes (not really)
def sieve(v:Vector) -> object:
i:int = 0
j:int = 0
k:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve2(v:Vector, v2:Vector) -> object:
i:int = 0
i2:int = 0
j:int = 0
j2:int = 0
k:int = 0
k2:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve3(v:Vector, v2:Vector, v3:Vector) -> object:
i:int = 0
i2:int = 0
i3:int = 0
j:int = 0
j2:int = 0
j3:int = 0
k:int = 0
k2:int = 0
k3:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve4(v:Vector, v2:Vector, v3:Vector, v4:Vector) -> object:
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
j:int = 0
j2:int = 0
j3:int = 0
j4:int = 0
k:int = 0
k2:int = 0
k3:int = 0
k4:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve5(v:Vector, v2:Vector, v3:Vector, v4:Vector, v5:Vector) -> object:
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
i5:int = 0
j:int = 0
j2:int = 0
j3:int = 0
j4:int = 0
j5:int = 0
k:int = 0
k2:int = 0
k3:int = 0
k4:int = 0
k5:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
# Input parameter
n:int = 50
n2:int = 50
n3:int = 50
n4:int = 50
n5:int = 50
# Data
v:Vector = None
v2:Vector = None
v3:Vector = None
v4:Vector = None
v5:Vector = None
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
i5:int = 0
# Crunch
v = vrange(2, n)
v2 = vrange(2, n)
v3 = vrange(2, n)
v4 = vrange(2, n)
v5 = vrange(2, n)
sieve(v)
# Print
while i < v.length():
print(v.get(i))
i = i + 1
|
[
"647530+Virtlink@users.noreply.github.com"
] |
647530+Virtlink@users.noreply.github.com
|
ff8d820c965642aa2c3f657c8f50044852250de7
|
95dd3f021a03d408e93b40f498e58c7b07abb796
|
/gans/cgan.py
|
4a16c4d40226b0c455b3cbcfd51a9789fdd5e258
|
[
"MIT"
] |
permissive
|
er-Bot/gans
|
18e1c46352c7b2d7591eb3ca0e7335bb97ea5e69
|
fc19446750e10896dd3b1746b0ccb3c4d3b5ed8d
|
refs/heads/main
| 2023-03-15T09:04:20.926340
| 2021-03-09T00:57:57
| 2021-03-09T00:57:57
| 345,826,581
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,594
|
py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from tqdm.auto import tqdm
from torchvision.utils import make_grid
import matplotlib.pyplot as plt
__all__ = ["Discriminator", "Generator", "CGAN"]
criterion = nn.BCEWithLogitsLoss()
hidden_dim = 128
class Discriminator(nn.Module):
def __init__(self, in_dim):
super(Discriminator, self).__init__()
self.model = nn.Sequential(
nn.Linear(in_dim, 4 * hidden_dim),
nn.LeakyReLU(0.2, inplace=True),
nn.Linear(4 * hidden_dim, 2 * hidden_dim),
nn.Dropout(0.4),
nn.LeakyReLU(0.2, inplace=True),
nn.Linear(2 * hidden_dim, hidden_dim),
nn.Dropout(0.4),
nn.LeakyReLU(0.2, inplace=True),
nn.Linear(hidden_dim, 1)
)
def forward(self, x, y):
d_in = torch.cat((x, y), -1)
return self.model(d_in)
class Generator(nn.Module):
def __init__(self, in_dim, out_dim):
super(Generator, self).__init__()
self.model = nn.Sequential(
nn.Linear(in_dim, hidden_dim),
nn.BatchNorm1d(hidden_dim),
nn.ReLU(inplace=True),
nn.Linear(hidden_dim, 2 * hidden_dim),
nn.BatchNorm1d(2 * hidden_dim),
nn.ReLU(inplace=True),
nn.Linear(2 * hidden_dim, 4 * hidden_dim),
nn.BatchNorm1d(4 * hidden_dim),
nn.ReLU(inplace=True),
nn.Linear(4 * hidden_dim, 8 * hidden_dim),
nn.BatchNorm1d(8 * hidden_dim),
nn.ReLU(inplace=True),
nn.Linear(8 * hidden_dim, out_dim),
nn.Sigmoid()
)
def forward(self, z, y):
g_in = torch.cat((z, y), -1)
return self.model(g_in)
class CGAN:
def __init__(self):
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# img_size of the form (1, w, h) e.g. for MNIST it's (1, 28, 28)
def setup(self, z_dim, n_classes, img_size, lr, betas):
self.z_dim = z_dim
self.n_classes = n_classes
self.img_size = img_size
assert len(img_size) == 3, 'size sould be of format : (channel, width, heigt)'
x_dim = img_size[1] * img_size[2]
self.generator = Generator(z_dim + n_classes, x_dim).to(self.device)
self.discriminator = Discriminator(x_dim + n_classes).to(self.device)
self.g_opt = torch.optim.Adam(self.generator.parameters(), lr=lr, betas=betas)
self.d_opt = torch.optim.Adam(self.discriminator.parameters(), lr=lr, betas=betas)
self.d_loss_history = []
self.g_loss_history = []
self.z = self.noise(100)
self.start_epoch = 0
def load_state(self, path):
state = torch.load(path, map_location=self.device)
self.z_dim = state['z_dim']
self.n_classes = state['n_classes']
self.img_size = state['img_size']
self.generator = state['gen']
self.discriminator = state['disc']
self.g_opt = state['g_opt']
self.d_opt = state['d_opt']
self.d_loss_history = state['d_loss_history'].tolist()
self.g_loss_history = state['g_loss_history'].tolist()
self.z = state['z']
self.start_epoch = state['start_epoch']
def noise(self, n):
return torch.randn(n, self.z_dim, device=self.device)
def show_images(self, images, figsize=(10, 10), nrow=10, show=False, path='.'):
img_unflat = images.detach().cpu().view(-1, *self.img_size)
img_grid = make_grid(img_unflat, nrow=nrow)
plt.figure(figsize=figsize)
plt.imshow(img_grid.permute(1, 2, 0).squeeze())
if not show:
plt.savefig(path)
else:
plt.show()
plt.close(None)
def get_discriminator_loss(self, real, labels, batch_size):
noise = self.noise(batch_size)
fake_image_gen = self.generator(noise, labels)
fake_image_pred = self.discriminator(fake_image_gen.detach(), labels)
fake_image_loss = criterion(fake_image_pred, torch.zeros_like(fake_image_pred))
real_image_pred = self.discriminator(real, labels)
real_image_loss = criterion(real_image_pred, torch.ones_like(real_image_pred))
disc_loss = (fake_image_loss + real_image_loss) / 2
return disc_loss
def get_generator_loss(self, labels, batch_size):
noise = self.noise(batch_size)
fake_image_gen = self.generator(noise, labels)
fake_image_pred = self.discriminator(fake_image_gen, labels)
gen_loss = criterion(fake_image_pred, torch.ones_like(fake_image_pred))
return gen_loss
def one_hot(self, labels):
return F.one_hot(labels, self.n_classes).to(self.device)
def train(self, dataloader, n_epochs, display_step=1, save_step=50, path='.'):
for epoch in range(self.start_epoch, n_epochs + 1):
for real, labels in tqdm(dataloader):
batch_size = len(real)
real = real.view(batch_size, -1).to(self.device) # flatten
y = self.one_hot(labels)
""" Update discriminator """
self.d_opt.zero_grad()
disc_loss = self.get_discriminator_loss(real, y, batch_size)
disc_loss.backward()
self.d_opt.step()
self.d_loss_history += [disc_loss.item()]
""" Update generator """
self.g_opt.zero_grad()
gen_loss = self.get_generator_loss(y, batch_size)
gen_loss.backward()
self.g_opt.step()
self.g_loss_history += [gen_loss.item()]
### Some visuals ###
if epoch % display_step == 0:
print(f"Epoch {epoch}: G_loss = {self.g_loss_history[-1]}, D_loss = {self.d_loss_history[-1]}")
yy = self.one_hot(torch.arange(0, 100, 1)//10)
generated = self.generator(self.z, yy)
self.show_images(generated, path=path+'/sample-%04d.png'%epoch)
# loss functions
step_bins = 20
n_example = (len(self.d_loss_history) // step_bins) * step_bins
plt.clf()
plt.figure(figsize=(10, 5))
plt.plot(
range(n_example // step_bins),
torch.Tensor(self.g_loss_history[:n_example]).view(-1, step_bins).mean(1),
label="Generator loss"
)
plt.plot(
range(n_example // step_bins),
torch.Tensor(self.d_loss_history[:n_example]).view(-1, step_bins).mean(1),
label="Discriminator loss"
)
plt.legend()
plt.savefig(path+'/loss-%04d.png'%epoch)
plt.close(None)
### Model saving ###
if epoch % save_step == 0:
state = {
'z_dim': self.z_dim,
'n_classes': self.n_classes,
'img_size': self.img_size,
'gen': self.generator,
'disc': self.discriminator,
'd_opt': self.d_opt,
'g_opt': self.g_opt,
'd_loss_history': torch.Tensor(self.d_loss_history),
'g_loss_history': torch.Tensor(self.g_loss_history),
'z': self.z,
'start_epoch': epoch + 1,
}
torch.save(state, path+'/cgan-%04d.h5'%epoch)
|
[
"jammalenneiym@gmail.com"
] |
jammalenneiym@gmail.com
|
65b6df87c504933ef7b372aea5898baeca87f534
|
f2307fcc2c42e38d7a7699354cbc5db0fa737c3e
|
/client.py
|
eeae53b82571534ff7806cc06be583209ca3a887
|
[] |
no_license
|
Ahmed-Masoud/HTTP-SERVER
|
6b852406376c8c7361951a241eb3a4e063aea9ae
|
a25e997ab942de24b552dd6a3ef6a30ce3e32ac1
|
refs/heads/master
| 2021-01-20T09:55:20.521193
| 2016-10-30T21:43:35
| 2016-10-30T21:43:35
| 72,087,361
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,215
|
py
|
import socket
# create a socket object
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# request input from user
request = raw_input('Enter a file request: ')
# parse input to get the port number
my_list = request.split(" ")
method = my_list[0]
fileName = my_list[1]
host = my_list[2]
if len(my_list) == 4:
port = int(my_list[3])
else:
port = 80
# connection to hostname on the port.
try:
s.connect((host, port))
except:
print("connection refused !!")
quit()
if method == "GET":
s.send(request)
response_code = s.recv(512)
print("Status Code : "+response_code+"\n")
if response_code == "404 Not Found":
quit()
text_file = open(fileName, "w+")
while True:
serverMsg = s.recv(512)
if not serverMsg:
break
print(serverMsg)
text_file.write(serverMsg)
text_file.close()
elif method == "POST":
try:
f = open(fileName, 'r')
except:
print("No Such File !!")
quit()
s.send(request)
response_code = s.recv(512)
print("Status Code : "+response_code+"\n")
while True:
data = f.readline(512)
if not data:
break
s.send(data)
f.close()
|
[
"ahmedmasoud@AhmedMasoud.local"
] |
ahmedmasoud@AhmedMasoud.local
|
8735a84fe481b35e5e0bc98c2b53e6a0f1d8876c
|
cb1c4fbf7ce4addaf5092b4951a34717a09b5584
|
/helloworld/hello_world.py
|
03f16b8b2e2ed3fca69ab0b536179c29d3c73f60
|
[] |
no_license
|
neetfreek/python-brushup
|
25c06c2a08929ba34a1422c40ece9470e97541fa
|
e6d8dc09eeb4fd51f040d23f4afbf3f9690e0466
|
refs/heads/master
| 2023-06-13T01:58:26.160531
| 2021-07-01T13:07:17
| 2021-07-01T13:07:17
| 374,714,220
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 608
|
py
|
from _datetime import datetime
def hello_world():
"""Print hello world with greeting based on current time."""
_print_time_based_hello_world()
# Print good morning/afternoon/evening and hello world.
def _print_time_based_hello_world():
print(f"{_get_message_for_current_time()}, and hello, world!")
# Return a suitable message based on the current time.
def _get_message_for_current_time():
hour_now = datetime.now().hour
if hour_now < 12:
return "Good morning"
elif hour_now < 18:
return "Good afternoon"
else:
return "Good evening"
hello_world()
|
[
"jonathan.widdowson1@gmail.com"
] |
jonathan.widdowson1@gmail.com
|
6a244e5d202b43213040fc14188fe4cf309356c2
|
a7b78ab632b77d1ed6b7e1fa46c33eda7a523961
|
/src/foreign_if/python/UT/src/eigen/test_049.py
|
558da13e6a88495e2835d12cb1b59571e2a9938d
|
[
"BSD-2-Clause"
] |
permissive
|
frovedis/frovedis
|
80b830da4f3374891f3646a2298d71a3f42a1b2d
|
875ae298dfa84ee9815f53db5bf7a8b76a379a6f
|
refs/heads/master
| 2023-05-12T20:06:44.165117
| 2023-04-29T08:30:36
| 2023-04-29T08:30:36
| 138,103,263
| 68
| 13
|
BSD-2-Clause
| 2018-12-20T10:46:53
| 2018-06-21T01:17:51
|
C++
|
UTF-8
|
Python
| false
| false
| 926
|
py
|
#!/usr/bin/env python
import sys
from frovedis.exrpc.server import FrovedisServer
from frovedis.linalg import eigsh
from scipy.sparse import csr_matrix
desc = "Testing eigsh() for csr_matrix and which = 'SM': "
# initializing the Frovedis server
argvs = sys.argv
argc = len(argvs)
if argc < 2:
print ('Please give frovedis_server calling command as the first argument \n'
'(e.g. "mpirun -np 2 /opt/nec/frovedis/ve/bin/frovedis_server")')
quit()
FrovedisServer.initialize(argvs[1])
# sample square symmetric sparse matrix (6x6)
mat = csr_matrix([[ 2.,-1., 0., 0.,-1., 0.], [-1., 3.,-1., 0.,-1., 0.],
[ 0.,-1., 2.,-1., 0., 0.], [ 0., 0.,-1., 3.,-1.,-1],
[-1.,-1., 0.,-1., 3., 0.], [ 0., 0., 0.,-1., 0., 1.]])
try:
eigen_vals, eigen_vecs = eigsh(mat, k = 3, which = 'SM')
print(desc, "Passed")
except:
print(desc, "Failed")
FrovedisServer.shut_down()
|
[
"takuy_araki@nec.com"
] |
takuy_araki@nec.com
|
622e8c48fd724272dd0e5b469fa3f82e3007373f
|
5a2b017c19cf937a79727072026729b3328b75ed
|
/python_fundamentals/venv/Scripts/pip-script.py
|
432f702cf3c08256c6ce57ee71eac4c899cfbe74
|
[] |
no_license
|
barstow123/python_stack
|
3efbf0e827c0270958ea3a6c6ce090a6a8383faa
|
bf530bac8cd9b18f6ad1ace7b3f15e294e4259da
|
refs/heads/master
| 2022-12-21T20:25:40.600932
| 2018-10-22T17:01:49
| 2018-10-22T17:01:49
| 151,271,753
| 0
| 1
| null | 2022-12-14T09:22:36
| 2018-10-02T14:48:19
|
Python
|
UTF-8
|
Python
| false
| false
| 465
|
py
|
#!"C:\Users\abars\Documents\Sublime programs\School Projects\python_stack\python_fundamentals\venv\Scripts\python.exe"
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip')()
)
|
[
"39165000+barstow123@users.noreply.github.com"
] |
39165000+barstow123@users.noreply.github.com
|
107f871602e5d1be87f6b15c3d28bfc62bf8fb3b
|
696bfb83e741b0ada656c633038098c5a4dcc78a
|
/manage.py
|
234ce79ac54748a952e4022175d094416f017afd
|
[] |
no_license
|
Smorta/Planning-Prom
|
dee5cb4186f27de596ee375bce78afd6243fb695
|
169c62f911ef3d3a6f32c949dad4592ca1072e38
|
refs/heads/main
| 2023-06-10T01:48:07.473233
| 2021-07-02T08:57:01
| 2021-07-02T08:57:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 687
|
py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'GestioPro.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[
"noreply@github.com"
] |
Smorta.noreply@github.com
|
6bfdd66d686ef7efaf2da0136d5f0114fbfc0a74
|
6af78ebddf5354d0a61612e36e0ae22304be9210
|
/venv/Scripts/pip3.8-script.py
|
b1bc705446e2069def9bdcf9c1bd95c06b274905
|
[] |
no_license
|
webclinic017/finance-9
|
f87f537c2965836e4025898bffad9beb111f07f4
|
d26b35e4186f7d4b1b1fced055950223e4017d25
|
refs/heads/master
| 2023-01-13T04:31:03.737958
| 2020-11-22T05:08:11
| 2020-11-22T05:08:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 421
|
py
|
#!C:\Users\pjk14\Desktop\Python\venv\finance\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==20.2.3','console_scripts','pip3.8'
__requires__ = 'pip==20.2.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==20.2.3', 'console_scripts', 'pip3.8')()
)
|
[
"pjk1413@gmail.com"
] |
pjk1413@gmail.com
|
4e4d077903b600f877450602ac08672e8025466d
|
aad25c18666ccd481501e25116003d96456504d1
|
/todo/admin.py
|
2477a7e2a8b7e6469e91a93a2d2b3d76a0f39431
|
[] |
no_license
|
exuberantcoyote/mgt659-todo-django
|
b70059eb6d038675366289a7428e3c54e3544023
|
7ebe46a0101c91ce1671781a16673f50d62d3a61
|
refs/heads/master
| 2021-01-10T12:08:12.018637
| 2016-02-19T01:57:54
| 2016-02-19T01:57:54
| 51,622,129
| 0
| 0
| null | 2016-02-15T21:47:49
| 2016-02-12T22:58:52
|
Python
|
UTF-8
|
Python
| false
| false
| 150
|
py
|
from django.contrib import admin
# Register your models here.
from . import models
admin.site.register(models.User)
admin.site.register(models.Task)
|
[
"trenttolley@gmail.com"
] |
trenttolley@gmail.com
|
178985974999fd81c154f0575eef230e8d729aa0
|
cf23c35b7ea10655c3d072e2b7215ab56041f814
|
/Ottieni_matrici_Rettificazione.py
|
757bbf80df25e4f892899acad34a6d2c2d94ad5c
|
[] |
no_license
|
GiuseppeCannata/StereoVision
|
4e6a8129dade58cc68a1f3c1ffdddf91a47f2482
|
f82c4d64509f923c8e1b46be443520da9cbc0f14
|
refs/heads/master
| 2021-07-17T10:27:23.295334
| 2020-05-14T21:04:38
| 2020-05-14T21:04:38
| 156,020,484
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,867
|
py
|
import numpy as np
import cv2
"""
Modulo per ottenere le matrici di rettificazione
"""
# === GESTIONE ============================================================
Folder_save_calib = '/home/giuseppe/Scrivania/ResultCalib' # cartella dove sono salvati i risultati delle matrici di rettificazione
Folder_to_save_rect = '/home/giuseppe/Scrivania/ResultRect' # cartella su cui salvare i risultati della rettificazione
# Nome delle due immagini da rettificare
nome_img_left = "/home/giuseppe/Scrivania/_ "
nome_img_right = "/home/giuseppe/Scrivania/_ "
# =======================================================================
def leggi_da_file_matrici(folder, nome_file):
mtx = np.load(folder + nome_file)
return mtx
def Salva_su_file(folder, nome_file, elemento_da_salvare):
np.save(folder + nome_file, elemento_da_salvare)
im_left = cv2.imread(nome_img_left)
im_right = cv2.imread(nome_img_right)
# LETTURA MATRICI CALIBRAZIONE
mtx_left = leggi_da_file_matrici(Folder_save_calib , "/Matrice_Intrinseca_Sx.npy")
mtx_right = leggi_da_file_matrici(Folder_save_calib , "/Matrice_Intrinseca_Dx.npy")
dist_left = leggi_da_file_matrici(Folder_save_calib , "/Matrice_Distorsione_Sx.npy")
dist_right = leggi_da_file_matrici(Folder_save_calib , "/Matrice_Distorsione_Dx.npy")
R = leggi_da_file_matrici(Folder_save_calib , "/R.npy")
T = leggi_da_file_matrici(Folder_save_calib , "/T.npy")
# GENERAZIONE MATRICI RETTIFICAZIONE
R1 = np.zeros((3,3)) # R1 --> output 3x3 matrix, rectification transform (rotation matrix) for the first camera
R2 = np.zeros((3,3)) # R2 --> Output 3x3 rectification transform (rotation matrix) for the second camera.
# La vista di destra e di sinistra della telecamera stereo sono spostate l'una rispetto all'altra
# lungo l'asse x (e possono presentare un eventuale piccolo spostamento verticale, quindi lungo l asse y).
# Nelle immagini rettificate, le corrispondenti linee epipolari nelle fotocamere sinistra e destra
# sono orizzontali e hanno la stessa coordinata y. Per cui punti omologhi giacciono sulla stessa retta.
P1 = np.zeros((3,4))#output 3x4 matrix
P2 = np.zeros((3,4)) #output 3x4 matrix
Q = np.zeros((4,4)) #output 3x4 matrix
cv2.stereoRectify( mtx_left, dist_left, mtx_right, dist_right, (640,480), R, T, R1, R2, P1, P2, Q, flags = cv2.CALIB_ZERO_DISPARITY)
#con il flag calib zero disparity facciamo in modo che i centri delle due telecamere coincidono tra loro --> cy1 == cy2
#questo dovrebbe grarantire che oggetti molto distanti tra loro hanno diparità zero.
#ricorda oggetti vicini hanno disparità alta oggetti lontani bassa
# SALVATAGGIO MATRICI PER RETTIFICAZIONE
Salva_su_file(Folder_to_save_rect, "/Matrice_R1.npy", R1)
Salva_su_file(Folder_to_save_rect, "/Matrice_R2.npy", R2)
Salva_su_file(Folder_to_save_rect, "/Matrice_P1.npy", P1)
Salva_su_file(Folder_to_save_rect, "/Matrice_P2.npy", P2)
|
[
"thebloodofjazz29@gmail.com"
] |
thebloodofjazz29@gmail.com
|
7ca78d0c38317f6f641d8132aba60941648200ba
|
de0cabc94e287cec4ff07c186cc6c708eab168e2
|
/flea/translate.py
|
14576b0fd88fb755aa69c497aa33c9551ea9cca9
|
[
"MIT"
] |
permissive
|
chudym2/flea-pipeline
|
453536af2838c6f25c30765f4f4465da5a9d6591
|
2bb29d793a1c35a2f344cca70d1f3b768a5760cc
|
refs/heads/master
| 2022-03-18T13:51:30.068639
| 2018-08-31T20:24:53
| 2018-08-31T20:24:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,045
|
py
|
#!/usr/bin/env python
"""
Translate DNA reads from a fasta file.
"""
import sys
import click
from Bio import SeqIO
from Bio.Seq import Seq
from Bio.Alphabet import IUPAC, Gapped
from flea.util import insert_gaps
def _translate(record, gapped=False):
result = record[:]
if gapped:
translated = record.seq.ungap('-').translate()
result.seq = Seq(insert_gaps(str(record.seq), str(translated), '---', '-'),
alphabet=Gapped(IUPAC.IUPACProtein))
else:
result.seq = record.seq.translate()
return result
def translate(infile, outfile, gapped=False):
alphabet=IUPAC.ambiguous_dna
if gapped:
alphabet = Gapped(alphabet)
records = SeqIO.parse(infile, "fasta", alphabet=alphabet)
result = (_translate(r, gapped) for r in records)
SeqIO.write(result, outfile, "fasta")
@click.command()
@click.option('-g', '--gapped', is_flag=True, help='allow gaps')
def main(gapped):
translate(sys.stdin, sys.stdout, gapped)
if __name__ == "__main__":
main()
|
[
"kemal@kemaleren.com"
] |
kemal@kemaleren.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.