blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
281
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 6
116
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 313
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 18.2k
668M
⌀ | star_events_count
int64 0
102k
| fork_events_count
int64 0
38.2k
| gha_license_id
stringclasses 17
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 107
values | src_encoding
stringclasses 20
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.02M
| extension
stringclasses 78
values | content
stringlengths 2
6.02M
| authors
listlengths 1
1
| author
stringlengths 0
175
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a695bc06ce684d2ee84feed4520602f58645f929
|
c4991e2c816b97f36cdd6de8f3fe235e72b8ac1c
|
/laporan/helper_scripts/cleancsv.py
|
0dd91afe1126239f52d8737882e5e64d020486b4
|
[] |
no_license
|
Capstone-Project-B21-CAP0113/ml-tf
|
6bfc5a831b46273ae7e79530cacc411584d6226b
|
a5fe62687e0aa8b420c0b6baf3d81756cc26cedf
|
refs/heads/main
| 2023-05-10T02:51:25.779101
| 2021-06-09T06:45:39
| 2021-06-09T06:45:39
| 368,582,369
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 799
|
py
|
import csv
import os
import string
filepath = os.path.dirname(os.path.abspath(__file__))
filename = os.path.join(filepath, "..", "raw_data", "laporan.csv")
targetname = os.path.join(filepath, "..", "cleaned_data", "laporan.csv")
rows = []
with open(filename, "r", newline="", encoding="ISO-8859-1") as file:
reader = csv.reader(file, delimiter=";")
for row in reader:
text, label = row
text = text.lower().strip().translate(str.maketrans("", "", string.punctuation))
label = label.strip().lower()
if label != "invalid":
rows.append([text, label])
with open(targetname, "w+", newline="", encoding="ISO-8859-1") as file:
writer = csv.writer(file, delimiter=";")
for row in rows:
writer.writerow(row)
|
[
"mrizki.agungp@gmail.com"
] |
mrizki.agungp@gmail.com
|
62944c9463507cf794cdc4d07af1d651c8746200
|
16a1e6c3e07c60e855500f492dc48c59df7977c1
|
/src/simple_blog/migrations/0005_auto_20190703_1213.py
|
ad9c84e5ff12b71ccf50642eae856edce0a94fb6
|
[] |
no_license
|
polly-morphism/django_learn
|
fc0d5ebe3697423715e6a3ec5bee115893edd0da
|
62666d534ee755f66c7ef45a059a250b69dc7bda
|
refs/heads/master
| 2023-04-30T00:51:47.406880
| 2019-07-04T14:17:42
| 2019-07-04T14:17:42
| 194,562,810
| 0
| 0
| null | 2023-04-21T20:40:27
| 2019-06-30T21:26:09
|
Python
|
UTF-8
|
Python
| false
| false
| 392
|
py
|
# Generated by Django 2.0.2 on 2019-07-03 12:13
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('simple_blog', '0004_auto_20190702_1025'),
]
operations = [
migrations.AlterField(
model_name='blogpost',
name='title',
field=models.CharField(max_length=120),
),
]
|
[
"polinasirko@gmail.com"
] |
polinasirko@gmail.com
|
8efeda44d905898ff678ae343caf148717963d54
|
38c606ed14564591c1aa6e65c7dab255aebf76f9
|
/0x11-python-network_1/5-hbtn_header.py
|
2c76d103cd07681c5295f2c7dd0ea62d4798e81a
|
[] |
no_license
|
camilooob/holbertonschool-higher_level_programming
|
d7cee708b308bed86fcc384d7451de26fa9cafaa
|
db9b6760e7e4998c5f00a4f2cfeb17ec14e44cab
|
refs/heads/master
| 2020-09-29T00:19:25.736344
| 2020-05-15T01:34:32
| 2020-05-15T01:34:32
| 226,900,553
| 1
| 8
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 183
|
py
|
#!/usr/bin/python3
"""the package request"""
if __name__ == "__main__":
import requests
import sys
r = requests.get(sys.argv[1])
print(r.headers.get('X-Request-Id'))
|
[
"camilobaq@hotmail.com"
] |
camilobaq@hotmail.com
|
ec38fded91da8c42d88d6db25141cbe9973e26e2
|
716fd93c7de69e10e54ed1efef60bd91b7c2f118
|
/main/ByteFeatureExtraction.py
|
b7a82b52ba385b87489c439418c47005d888d7c6
|
[] |
no_license
|
sharminpathan/microsoft-malware-classsification-challenge
|
bba58b7ae86c24ff053a417d00a8f54e5b92f8aa
|
d1749817fbcd0205a3598aa21d724b7d12f192af
|
refs/heads/master
| 2021-06-08T15:13:18.400086
| 2016-09-23T18:42:55
| 2016-09-23T18:42:55
| 71,841,253
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,672
|
py
|
from __future__ import print_function
from pyspark.sql import SQLContext, Row, SparkSession
import sys
from pyspark.sql.types import *
import re
import os
# sc = SparkContext(conf=SparkConf().setAppName("MalwareClassifier"))
spark = SparkSession \
.builder \
.appName("OpcodeExtraction") \
.getOrCreate()
sc = spark.sparkContext;
sqlContext = SQLContext(sc)
##############################################
#Method for preprocessing byte features from testing dataset
##########################################3
def cleanDoc(bytefileData):
# Removing unwanted items from the list.
filteredFile = re.sub("\?|\n|\r", "", bytefileData)
# Removing line pointers.
removePointer = [word.encode('utf-8') for word in filteredFile.split() if len(word) < 3]
return removePointer
def getTrainingData(keyId,accessKey,dataset,label):
# =========================================================================
# Access Key and secret key necessary to read data from Amazon S3
# =========================================================================
sc._jsc.hadoopConfiguration().set('fs.s3n.awsAccessKeyId', keyId)
sc._jsc.hadoopConfiguration().set('fs.s3n.awsSecretAccessKey', accessKey)
hashFileData = sc.textFile("s3n://eds-uga-csci8360/data/project2/labels/"+dataset+".txt").map(
lambda doc: doc.encode("utf-8").strip())
entirehashFileData = hashFileData.zipWithIndex().map(lambda doc: (doc[1], doc[0])).cache()
# =========================================================================
# Reading (hashcode).bytes file from s3:
# Cleaning of the data through function cleanDoc()
# =========================================================================
byteFile = hashFileData.map(lambda doc: ("s3n://eds-uga-csci8360/data/project2/binaries/" + doc + ".bytes"))
filePath = byteFile.reduce(lambda str1, str2: str1 + "," + str2)
byteFileCollect = sc.wholeTextFiles(filePath,36)
cleanFile = byteFileCollect.map(lambda doc: (doc[0].encode('utf-8'), cleanDoc(doc[1])))
wholeTextFileNameRDD = cleanFile.map(lambda (x, y): (os.path.splitext(os.path.basename(x))[0], y))
print("Step 2 done")
# =========================================================================
# Reading label file from s3
# =========================================================================
labelData = sc.textFile("s3n://eds-uga-csci8360/data/project2/labels/"+label+".txt").map(
lambda doc: doc.encode("utf-8").strip()).cache()
entireLabelData = labelData.zipWithIndex().map(lambda doc: (doc[1], doc[0]))
print("Step 3 done")
# =========================================================================
# Joining RDD's of HashFile,Label and content
# =========================================================================
hashFileLablePair = entirehashFileData.join(entireLabelData, numPartitions=2)
hashFileLableRDD = hashFileLablePair.values()
hashFileLableRDDPair = hashFileLableRDD.keyBy(lambda line: line[0]).mapValues(lambda line: line[1])
dataSet = hashFileLableRDDPair.join(wholeTextFileNameRDD, numPartitions=2)
finalDataSetRDD = dataSet.map(lambda (x, y): (x, y[0], y[1]))
print("Step 4 done")
# =========================================================================
# creating DATAFRAME
# =========================================================================
schemaString = "hashcodefile label content"
fields = [StructField("hashcodefile", StringType(), True), StructField("label", StringType(), True),
StructField("content", ArrayType(StringType(), False), True)]
schema = StructType(fields)
schemaByte = spark.createDataFrame(finalDataSetRDD, schema)
schemaByte.write.parquet("trainingData.parquet")
print("Training data preprocessing completed")
#########################################################
#Method for parquet file generation og Testing dataset
#########################################################
def getTestingData(keyId,accessKey,dataset):
# =========================================================================
# Access Key and secret key necessary to read data from Amazon S3
# =========================================================================
sc._jsc.hadoopConfiguration().set('fs.s3n.awsAccessKeyId', keyId)
sc._jsc.hadoopConfiguration().set('fs.s3n.awsSecretAccessKey',accessKey)
# =========================================================================
# Reading training file from s3
# =========================================================================
hashFileTestData = sc.textFile("s3n://eds-uga-csci8360/data/project2/labels/"+dataset+".txt").map(
lambda doc: doc.encode("utf-8").strip())
# =========================================================================
# Reading (hashcode).bytes file from s3
# Read bytes file from s3 and stored it in RDD format (Filename, FileData)
# Cleaning of the data through function cleanDoc()
# =========================================================================
byteTestFile = hashFileTestData.map(lambda doc: ("s3n://eds-uga-csci8360/data/project2/binaries/" + doc + ".bytes"))
testFilePath = byteTestFile.reduce(lambda str1, str2: str1 + "," + str2)
byteTestFileCollect = sc.wholeTextFiles(testFilePath, 36)
cleanTestFile = byteTestFileCollect.map(lambda doc: (doc[0].encode('utf-8'), cleanDoc(doc[1])))
wholeTestTextFileNameRDD = cleanTestFile.map(lambda (x, y): (os.path.splitext(os.path.basename(x))[0], y))
# =========================================================================
# creating DATAFRAME
# =========================================================================
schemaString = "hashcodefile label features"
fields = [StructField("hashcodefile", StringType(), True),
StructField("features", ArrayType(StringType(), False), True)]
schema = StructType(fields)
schemaTestByte = spark.createDataFrame(wholeTestTextFileNameRDD, schema)
# =========================================================================
# Reading and writing to Parquet file file from s3
# =========================================================================
schemaTestByte.write.parquet("byteTestFile.parquet")
def main(args):
if(args[2]=='Testing'):
getTestingData(keyId=args[0],accesskey=args[1],dataset=args[4])
if(args[3]=='Training'):
getTrainingData(keyId=args[0],accessKey=args[1],dataset=args[4],label=args[5])
if __name__ == "__main__":
main(sys.argv)
|
[
"noreply@github.com"
] |
noreply@github.com
|
e3f8fa6106a55397778e7a92beef1263e413f784
|
794a797aa4991455f563e895c8d7fcc1ee027d4b
|
/web_proj/users/views.py
|
e59a449601f1a39c6edd480ba30902a4e5553ab2
|
[] |
no_license
|
INT31302/destroy-django
|
95501f063550cfce46addef433024564e5217fd5
|
2784cdc92418ddf8db9db7ecaab86f26b6281946
|
refs/heads/master
| 2021-01-04T10:37:29.215056
| 2020-02-26T11:49:18
| 2020-02-26T11:49:18
| 240,507,362
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,674
|
py
|
from django.shortcuts import render, redirect
from .forms import UserLoginForm, UserCreationForm
from django.contrib.auth import get_user_model
from django.contrib import auth
from django.http import HttpResponseRedirect
from django.urls import reverse
def login(request):
if request.method == "GET":
context = {
'form': UserLoginForm()
}
return render(request, 'users/login.html', context)
if request.method == "POST":
user_id = request.POST.get('user_id')
password = request.POST.get('password')
user = auth.authenticate(request, user_id=user_id, password=password)
if user is not None:
auth.login(request, user)
return HttpResponseRedirect(reverse('main'))
else:
context = {
'form': UserLoginForm(),
'error': 'username or password is incorrect'
}
return render(request, 'users/login.html', context)
def logout(request):
auth.logout(request)
return redirect('main')
def register(request):
if request.method == "GET":
context = {
'form': UserCreationForm()
}
return render(request, 'users/register.html', context)
if request.method == "POST":
User = get_user_model()
form = UserCreationForm(request.POST)
if form.is_valid():
user = form.save()
auth.login(request, user)
return HttpResponseRedirect(reverse('main'))
else:
context = {
'form': UserCreationForm()
}
return render(request, 'users/register.html', context)
|
[
"tkdwo287@gmail.com"
] |
tkdwo287@gmail.com
|
a65c10c99ffc0188afff812eb4dd157e925471de
|
62389833015a808eb204265745cd77d97e40fc66
|
/tests/nodeos_voting_test.py
|
720b4150211714c89d63892d1dce3aec417a00f8
|
[
"MIT",
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
amadeobrands/eos
|
9b9c1597608e85505a4d6ded91c27fd8231bf8f3
|
74c6c27e9555aa093355625fceba86f2a072d9b9
|
refs/heads/master
| 2020-03-21T16:45:25.245770
| 2018-06-26T18:53:08
| 2018-06-26T18:53:08
| 138,791,617
| 1
| 0
|
MIT
| 2018-06-26T20:47:21
| 2018-06-26T20:47:21
| null |
UTF-8
|
Python
| false
| false
| 930
|
py
|
#!/usr/bin/env python3
import testUtils
import decimal
import re
###############################################################
# nodeos_voting_test
# --dump-error-details <Upon error print etc/eosio/node_*/config.ini and var/lib/node_*/stderr.log to stdout>
# --keep-logs <Don't delete var/lib/node_* folders upon test completion>
###############################################################
Print=testUtils.Utils.Print
errorExit=testUtils.Utils.errorExit
cmdError=testUtils.Utils.cmdError
from core_symbol import CORE_SYMBOL
# create TestState with all common flags, except --mongodb
testState=testUtils.TestState("--mongodb")
args = testState.parse_args()
testState.cluster=testUtils.Cluster(walletd=True)
testState.pnodes=4
testState.totalNodes=4
testState.totalProducers=testState.pnodes*21
try:
testState.start(testUtils.Cluster(walletd=True))
testState.success()
finally:
testState.end()
exit(0)
|
[
"johnc@objectcomputing.com"
] |
johnc@objectcomputing.com
|
858cc9748de831e9a2a85e7db00bd03e602606c5
|
dccac0b6a21d2110fa6045b1d6a64c22280104ce
|
/givinggraph/companycause/company_cause_pickler.py
|
41d1db121175a378b19d9bb82c835275a130efdb
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
erichilarysmithsr/givinggraph
|
cf3319e823c3d1c15b021430fdfe26b4719acb2e
|
9f48a8767d9672079a616676a1c29054672fb8d3
|
refs/heads/master
| 2021-01-20T17:03:47.775235
| 2020-08-31T12:20:55
| 2020-08-31T12:20:55
| 35,964,712
| 0
| 1
|
MIT
| 2020-08-31T12:20:56
| 2015-05-20T18:00:04
|
Python
|
UTF-8
|
Python
| false
| false
| 2,537
|
py
|
#!/usr/bin/env python
#
# Description: This takes the company-cause relationship
# data and pickles it for the classifier to run on.
#
import argparse
from collections import defaultdict
import io
import re
import pickle
import string
company_words = set()
punct_re = re.compile('[%s]' % re.escape(string.punctuation))
def read_causes(filename):
global company_words
causes = set()
co2causes = defaultdict(lambda: set())
for line in io.open(filename, mode='rt'):
parts = line.strip().split('\t')
co2causes[parts[2]].add(parts[7])
causes.add(parts[7])
company_words |= set(do_tokenize(parts[2]))
return co2causes, causes
def do_tokenize(s):
s = punct_re.sub(' ', s.lower())
s = re.sub('\s+', ' ', s)
return s.strip().split()
def tokenize(s):
global company_words
toks = do_tokenize(s)
return [t for t in toks if t not in company_words]
def read_pages(filename, co2causes):
"""Read company web page file, retaining only those in co2causes"""
co2page = dict()
for line in io.open(filename, mode='rt', encoding='latin_1'):
parts = line.strip().split('\t')
if parts[1] in co2causes and len(parts) > 2:
co2page[parts[1]] = ' '.join([parts[i] for i in range(3,len(parts),2)])
return co2page
if __name__ == '__main__':
global company_words
ap = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.RawTextHelpFormatter)
ap.add_argument('--homepages',
metavar='HOMEPAGES',
default='company_aboutus.tsv',
help='file in format company_name<TAB>web_text')
ap.add_argument('--causes',
metavar='CAUSES',
default='company_causes.tsv',
help='file in format company_name<TAB>cause . Note that companies may appear more than once.')
args = ap.parse_args()
company2causes, causes = read_causes(args.causes)
print 'read %d companies with causes' % len(company2causes.keys())
company2page = read_pages(args.homepages, company2causes)
print 'read %d homepages' % len(company2causes.keys())
# Pickle results
pickle.dump((dict(company2causes),causes,company2page,company_words),open('company_cause_results.p','wb'))
|
[
"atqamar@gmail.com"
] |
atqamar@gmail.com
|
fb6fe6a61827c33a91fb6298e582ac17bc9a7dcd
|
f03fe75e6506394ef337b24a9c546f5a09e4e0ba
|
/mountapi/http/response.py
|
6bd7275b1ad3f57acd9e2018899fc0a58089f2e0
|
[
"MIT"
] |
permissive
|
pszpetkowski/mountAPI
|
d75911faab4bd06f749612bb1f21f026003aca5e
|
a9b47fa316857797ed98dbc7717f62f4f75ab07b
|
refs/heads/master
| 2022-01-20T11:55:16.840465
| 2018-05-24T19:52:46
| 2018-05-24T19:52:46
| 134,129,912
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,346
|
py
|
import time
from wsgiref.handlers import format_date_time
from mountapi.http.exceptions import HttpClientError
from mountapi.http.status import Status, HTTP_200_OK
from mountapi.lib import json
class Response:
def __init__(self, content=None, status: Status = None):
if isinstance(content, str):
self._content = content.encode()
elif isinstance(content, dict):
self._content = json.dumps(content).encode()
elif content is not None:
raise ValueError('Response can only be made using str or dict')
else:
self._content = None
self._status: Status = status or HTTP_200_OK
@classmethod
def from_result(cls, result):
if isinstance(result, Response):
return result
else:
return Response(result)
@classmethod
def from_http_client_error(cls, e: HttpClientError):
return Response(e.message, e.status)
def to_bytes(self) -> bytes:
return (
b'HTTP/1.1 %i %b\r\n'
b'Date: %b\r\n'
b'Connection: closed\r\n'
b'Content-Type: application/json\r\n\r\n'
b'%b' % (
self._status.code, self._status.reason,
format_date_time(time.time()).encode(),
self._content,
)
)
|
[
"piotr.szpetkowski@pyquest.space"
] |
piotr.szpetkowski@pyquest.space
|
dc366e5f9d3e23b0a058bfa1e95c81120b0bb442
|
c32ae13373f89c40496059d4d9fb18b9db9d320d
|
/Algorithms/AL_prim_algorithm.py
|
d2ceeb2393601b011920ea25c6bd77bfebf4ed7b
|
[] |
no_license
|
Roha-Lee/data-structure-and-algorithm-study
|
b65cfedc7ae21208ad4620bfaf448f087c35a1d4
|
27728070baea859ab6f9b611c20758ae68f4a3be
|
refs/heads/master
| 2023-08-21T06:26:04.314742
| 2021-10-05T15:59:08
| 2021-10-05T15:59:08
| 404,923,666
| 1
| 0
| null | 2021-10-05T15:59:09
| 2021-09-10T01:48:57
|
Python
|
UTF-8
|
Python
| false
| false
| 2,126
|
py
|
import heapq
from collections import defaultdict
from heapdict import heapdict
def prim(graph):
new_edges = []
adjacent_edges = defaultdict(list)
for w, s, t in graph['edges']:
adjacent_edges[s].append((w, s, t))
adjacent_edges[t].append((w, t, s))
queue = []
nodes = graph['vertices']
curr = nodes[0]
linked = {curr}
while True:
for item in adjacent_edges[curr]:
if item[2] not in linked:
heapq.heappush(queue, item)
edge = heapq.heappop(queue)
while edge[2] in linked:
edge = heapq.heappop(queue)
linked.add(edge[2])
curr = edge[2]
new_edges.append(edge)
if len(linked) == len(nodes):
break
return {'vertices': graph['vertices'], 'edges': new_edges}
def prim_advanced(graph):
new_edges = []
queue = heapdict()
start_v = graph['vertices'][0]
from_where = dict()
adjacent_edges = defaultdict(list)
for w, s, t in graph['edges']:
adjacent_edges[s].append((w, s, t))
adjacent_edges[t].append((w, t, s))
for vertice in graph['vertices']:
queue[vertice] = float('Inf')
from_where[vertice] = None
queue[start_v] = 0
from_where[start_v] = start_v
while queue:
node, curr_weight = queue.popitem()
new_edges.append((curr_weight, from_where[node], node))
for w, s, t in adjacent_edges[node]:
if t in queue and queue[t] > w:
queue[t] = w
from_where[t] = s
return {'vertices': graph['vertices'], 'edges': new_edges[1:]}
if __name__ == '__main__':
graph = {
'vertices': ['A','B','C','D','E','F','G'],
'edges': [
(7, 'A', 'B'),
(5, 'A', 'D'),
(8, 'B', 'C'),
(9, 'B', 'D'),
(5, 'C', 'E'),
(7, 'D', 'E'),
(6, 'D', 'F'),
(5, 'E', 'C'),
(8, 'E', 'F'),
(9, 'E', 'G'),
(11, 'F', 'G'),
]
}
print(prim_advanced(graph))
|
[
"rohagru@gmail.com"
] |
rohagru@gmail.com
|
6d5f990d8d3028f01181e6c34b162bd29b142c84
|
58035ab2fd07cb8e8951d0ef6190e3a5e0083e7d
|
/Intermediário/aula_41_excessoes_try_except/aula_41_try_except_condicional.py
|
c76b737bb835dec996e8f05e76922d8e9b513ae9
|
[] |
no_license
|
lavinomenezes/Curso-python-3-do-zero-ao-avancado
|
4fae6c2efee0b74c1012e6fdcc9f834289ba329e
|
2da94bfa32d6f178e91b07a08a7f94972d4444c5
|
refs/heads/main
| 2023-08-16T09:55:09.466218
| 2021-10-05T12:17:15
| 2021-10-05T12:17:15
| 413,801,157
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 266
|
py
|
def conv(v):
try:
v = int(v)
return v
except ValueError:
try:
v = float(v)
return v
except:
pass
n = conv(input())
if n is not None:
print(n * 5)
else:
print("Isso não é numero")
|
[
"lprogramacao@protonmail.com"
] |
lprogramacao@protonmail.com
|
0332091c980a247b508924dc4e03710be5f08839
|
b0856a2d66cc4c71705b8c16c169848070294cf6
|
/removeDupSortedArray.py
|
3f60a7ee970822ff9418506693aa240504fabb51
|
[] |
no_license
|
jfriend08/LeetCode
|
9e378ff015edc3102a4785b0832cf0eeb09f5fc2
|
f76d3cf2e7fd91767f80bd60eed080a7bad06e62
|
refs/heads/master
| 2021-01-21T19:28:25.354537
| 2016-01-15T04:53:11
| 2016-01-15T04:53:11
| 28,518,328
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 832
|
py
|
'''
Follow up for "Remove Duplicates":
What if duplicates are allowed at most twice?
For example,
Given sorted array nums = [1,1,1,2,2,3],
Your function should return length = 5, with the first five elements of nums being 1, 1, 2, 2 and 3.
It doesn't matter what you leave beyond the new length.
Subscribe to see which companies asked this question
'''
class Solution(object):
def removeDuplicates(self, nums):
if not nums:
return 0
mapCount = {}
maxNum = nums[-1]
for num in nums:
try:
mapCount[num] += 1
except:
mapCount[num] = 1
res = []
for num in xrange(maxNum+1):
if num in mapCount:
res += ( [num] if mapCount[num]==1 else [num, num])
return len(res)
sol = Solution()
print sol.removeDuplicates([1,1,1,2,2,3])
print sol.removeDuplicates([])
|
[
"ys486@cornell.edu"
] |
ys486@cornell.edu
|
03690b2f621b493ce7076a3b1f0858c93e63b9bf
|
84825a00091d9d3bb6a1f57b79904d9614cefe95
|
/train_ignore.py
|
6f1686b92e3c08040e27993860ba4869ef98860d
|
[] |
no_license
|
jkooy/segmentation-ignore
|
3039db6d23f3b569ff0159f341c5f03938f8dd3b
|
e0f35b1214ada8391346e15df7abff526592e3ed
|
refs/heads/master
| 2022-12-15T08:07:11.943541
| 2020-09-13T22:56:25
| 2020-09-13T22:56:25
| 288,622,525
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 16,163
|
py
|
import argparse
import os
import numpy as np
from tqdm import tqdm
from mypath import Path
from dataloaders import make_data_loader
from modeling.sync_batchnorm.replicate import patch_replication_callback
from modeling.deeplab import *
from utils import SegmentationLosses
from utils.calculate_weights import calculate_weigths_labels
from utils.lr_scheduler import LR_Scheduler
from utils.saver import Saver
from utils.summaries import TensorboardSummary
from utils.metrics import Evaluator
from weight_net import *
import copy
class Trainer(object):
def __init__(self, args):
self.args = args
# Define Saver
self.saver = Saver(args)
self.saver.save_experiment_config()
# Define Tensorboard Summary
self.summary = TensorboardSummary(self.saver.experiment_dir)
self.writer = self.summary.create_summary()
# Define Dataloader
kwargs = {'num_workers': args.workers, 'pin_memory': True}
self.train_loader, self.val_loader, self.test_loader, self.nclass = make_data_loader(args, **kwargs)
# for VOC self.test_loader is None
# Define network
model = DeepLab(num_classes=self.nclass,
backbone=args.backbone,
output_stride=args.out_stride,
sync_bn=args.sync_bn,
freeze_bn=args.freeze_bn)
v_model = v_DeepLab(num_classes=self.nclass,
backbone=args.backbone,
output_stride=args.out_stride,
sync_bn=args.sync_bn,
freeze_bn=args.freeze_bn)
self.vnet = VNet(1, 100, 1).cuda()
train_params = [{'params': model.get_1x_lr_params(), 'lr': args.lr},
{'params': model.get_10x_lr_params(), 'lr': args.lr * 10}]
v_model_train_params = [{'params': model.get_1x_lr_params(), 'lr': args.lr},
{'params': model.get_10x_lr_params(), 'lr': args.lr * 10}]
# Define Optimizer
optimizer = torch.optim.SGD(train_params, momentum=args.momentum,
weight_decay=args.weight_decay, nesterov=args.nesterov)
self.optimizer_v_model = torch.optim.SGD(v_model_train_params, momentum=args.momentum,
weight_decay=args.weight_decay, nesterov=args.nesterov)
self.optimizer_vnet = torch.optim.Adam(self.vnet.params(), 1e-3,
weight_decay=1e-4)
# Define Criterion
# whether to use class balanced weights
if args.use_balanced_weights:
classes_weights_path = os.path.join(Path.db_root_dir(args.dataset), args.dataset + '_classes_weights.npy')
if os.path.isfile(classes_weights_path):
weight = np.load(classes_weights_path)
else:
weight = calculate_weigths_labels(args.dataset, self.train_loader, self.nclass)
weight = torch.from_numpy(weight.astype(np.float32))
else:
weight = None
self.criterion = SegmentationLosses(weight=weight, cuda=args.cuda).build_loss(mode=args.loss_type)
self.valcriterion = SegmentationLosses(weight=weight, cuda=args.cuda).build_loss('ce')
self.model, self.v_model, self.optimizer = model, v_model, optimizer
# Define Evaluator
self.evaluator = Evaluator(self.nclass)
# Define lr scheduler
self.scheduler = LR_Scheduler(args.lr_scheduler, args.lr,
args.epochs, len(self.train_loader))
# Using cuda
if args.cuda:
self.model = torch.nn.DataParallel(self.model, device_ids=self.args.gpu_ids)
patch_replication_callback(self.model)
self.model = self.model.cuda()
# Resuming checkpoint
self.best_pred = 0.0
if args.resume is not None:
if not os.path.isfile(args.resume):
raise RuntimeError("=> no checkpoint found at '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
args.start_epoch = checkpoint['epoch']
if args.cuda:
self.model.module.load_state_dict(checkpoint['state_dict'])
else:
self.model.load_state_dict(checkpoint['state_dict'])
if not args.ft:
self.optimizer.load_state_dict(checkpoint['optimizer'])
self.best_pred = checkpoint['best_pred']
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
# Clear start epoch if fine-tuning
if args.ft:
args.start_epoch = 0
def training(self, epoch):
train_loss = 0.0
val_loader_iter = iter(self.val_loader)
self.model.train()
tbar = tqdm(self.train_loader)
num_img_tr = len(self.train_loader)
for i, sample in enumerate(tbar):
image, target = sample['image'], sample['label']
if self.args.cuda:
image, target = image.cuda(), target.cuda()
self.v_model.load_state_dict(self.model.state_dict())
output = self.v_model(image)
cost = self.criterion(output, target)
cost_v = torch.reshape(cost, (-1, 1))
v_lambda = self.vnet(cost_v.data)
l_f_v = torch.sum(cost_v * v_lambda) / len(cost_v)
self.v_model.zero_grad()
grads = torch.autograd.grad(l_f_v, (self_v_model.params()), create_graph=True)
v_lr = args.lr * ((0.1 ** int(epoch >= 80)) * (0.1 ** int(epoch >= 100))) # For ResNet32
v_model.update_params(lr_inner=v_lr, source_params=grads)
del grads
# phase 2. pixel weights step
try:
sample_val = next(val_loader_iter)
except StopIteration:
val_loader_iter = iter(self.val_loader)
sample_val = next(val_loader_iter)
inputs_val, targets_val = sample_val['image'], sample_val['label']
if self.args.cuda:
inputs_val, targets_val = inputs_val.cuda(), targets_val.cuda()
y_g_hat = self.v_model(inputs_val)
l_g_meta = self.valcriterion(y_g_hat, targets_val)
self.optimizer_vnet.zero_grad()
l_g_meta.backward()
self.optimizer_vnet.step()
# phase 1. network weight step (w)
output = self.model(image)
cost = self.criterion(output, target)
cost_v = torch.reshape(cost, (-1, 1))
with torch.no_grad():
v_new = self.vnet(cost_v)
loss = torch.sum(cost_v * v_new) / len(cost_v)
self.scheduler(self.optimizer, i, epoch, self.best_pred)
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
train_loss += loss.item()
tbar.set_description('Train loss: %.3f' % (train_loss / (i + 1)))
self.writer.add_scalar('train/total_loss_iter', loss.item(), i + num_img_tr * epoch)
# Show 10 * 3 inference results each epoch
if i % (num_img_tr // 10) == 0:
global_step = i + num_img_tr * epoch
self.summary.visualize_image(self.writer, self.args.dataset, image, target, output, global_step)
self.writer.add_scalar('train/total_loss_epoch', train_loss, epoch)
print('[Epoch: %d, numImages: %5d]' % (epoch, i * self.args.batch_size + image.data.shape[0]))
print('Loss: %.3f' % train_loss)
if self.args.no_val:
# save checkpoint every epoch
is_best = False
self.saver.save_checkpoint({
'epoch': epoch + 1,
'state_dict': self.model.module.state_dict(),
'optimizer': self.optimizer.state_dict(),
'best_pred': self.best_pred,
}, is_best)
def validation(self, epoch):
self.model.eval()
self.evaluator.reset()
tbar = tqdm(self.val_loader, desc='\r')
test_loss = 0.0
for i, sample in enumerate(tbar):
image, target = sample['image'], sample['label']
if self.args.cuda:
image, target = image.cuda(), target.cuda()
with torch.no_grad():
output = self.model(image)
loss = self.valcriterion(output, target)
test_loss += loss.item()
tbar.set_description('Test loss: %.3f' % (test_loss / (i + 1)))
pred = output.data.cpu().numpy()
target = target.cpu().numpy()
pred = np.argmax(pred, axis=1)
# Add batch sample into evaluator
self.evaluator.add_batch(target, pred)
# Fast test during the training
Acc = self.evaluator.Pixel_Accuracy()
Acc_class = self.evaluator.Pixel_Accuracy_Class()
mIoU = self.evaluator.Mean_Intersection_over_Union()
FWIoU = self.evaluator.Frequency_Weighted_Intersection_over_Union()
self.writer.add_scalar('val/total_loss_epoch', test_loss, epoch)
self.writer.add_scalar('val/mIoU', mIoU, epoch)
self.writer.add_scalar('val/Acc', Acc, epoch)
self.writer.add_scalar('val/Acc_class', Acc_class, epoch)
self.writer.add_scalar('val/fwIoU', FWIoU, epoch)
print('Validation:')
print('[Epoch: %d, numImages: %5d]' % (epoch, i * self.args.batch_size + image.data.shape[0]))
print("Acc:{}, Acc_class:{}, mIoU:{}, fwIoU: {}".format(Acc, Acc_class, mIoU, FWIoU))
print('Loss: %.3f' % test_loss)
new_pred = mIoU
if new_pred > self.best_pred:
is_best = True
self.best_pred = new_pred
self.saver.save_checkpoint({
'epoch': epoch + 1,
'state_dict': self.model.module.state_dict(),
'optimizer': self.optimizer.state_dict(),
'best_pred': self.best_pred,
}, is_best)
def main():
parser = argparse.ArgumentParser(description="PyTorch DeeplabV3Plus Training")
parser.add_argument('--backbone', type=str, default='resnet',
choices=['resnet', 'xception', 'drn', 'mobilenet'],
help='backbone name (default: resnet)')
parser.add_argument('--out-stride', type=int, default=16,
help='network output stride (default: 8)')
parser.add_argument('--dataset', type=str, default='pascal',
choices=['pascal', 'coco', 'cityscapes'],
help='dataset name (default: pascal)')
parser.add_argument('--use-sbd', action='store_true', default=True,
help='whether to use SBD dataset (default: True)')
parser.add_argument('--workers', type=int, default=4,
metavar='N', help='dataloader threads')
parser.add_argument('--base-size', type=int, default=513,
help='base image size')
parser.add_argument('--crop-size', type=int, default=513,
help='crop image size')
parser.add_argument('--sync-bn', type=bool, default=None,
help='whether to use sync bn (default: auto)')
parser.add_argument('--freeze-bn', type=bool, default=False,
help='whether to freeze bn parameters (default: False)')
parser.add_argument('--loss-type', type=str, default='ce',
choices=['ce', 'focal', 'pw'],
help='loss func type (default: ce)')
# training hyper params
parser.add_argument('--epochs', type=int, default=None, metavar='N',
help='number of epochs to train (default: auto)')
parser.add_argument('--start_epoch', type=int, default=0,
metavar='N', help='start epochs (default:0)')
parser.add_argument('--batch-size', type=int, default=None,
metavar='N', help='input batch size for \
training (default: auto)')
parser.add_argument('--test-batch-size', type=int, default=None,
metavar='N', help='input batch size for \
testing (default: auto)')
parser.add_argument('--use-balanced-weights', action='store_true', default=False,
help='whether to use balanced weights (default: False)')
# optimizer params
parser.add_argument('--lr', type=float, default=None, metavar='LR',
help='learning rate (default: auto)')
parser.add_argument('--lr-scheduler', type=str, default='poly',
choices=['poly', 'step', 'cos'],
help='lr scheduler mode: (default: poly)')
parser.add_argument('--momentum', type=float, default=0.9,
metavar='M', help='momentum (default: 0.9)')
parser.add_argument('--weight-decay', type=float, default=5e-4,
metavar='M', help='w-decay (default: 5e-4)')
parser.add_argument('--nesterov', action='store_true', default=False,
help='whether use nesterov (default: False)')
# cuda, seed and logging
parser.add_argument('--no-cuda', action='store_true', default=
False, help='disables CUDA training')
parser.add_argument('--gpu-ids', type=str, default='0',
help='use which gpu to train, must be a \
comma-separated list of integers only (default=0)')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
# checking point
parser.add_argument('--resume', type=str, default=None,
help='put the path to resuming file if needed')
parser.add_argument('--checkname', type=str, default=None,
help='set the checkpoint name')
# finetuning pre-trained models
parser.add_argument('--ft', action='store_true', default=False,
help='finetuning on a different dataset')
# evaluation option
parser.add_argument('--eval-interval', type=int, default=1,
help='evaluuation interval (default: 1)')
parser.add_argument('--no-val', action='store_true', default=False,
help='skip validation during training')
args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()
if args.cuda:
try:
args.gpu_ids = [int(s) for s in args.gpu_ids.split(',')]
except ValueError:
raise ValueError('Argument --gpu_ids must be a comma-separated list of integers only')
if args.sync_bn is None:
if args.cuda and len(args.gpu_ids) > 1:
args.sync_bn = True
else:
args.sync_bn = False
# default settings for epochs, batch_size and lr
if args.epochs is None:
epoches = {
'coco': 30,
'cityscapes': 200,
'pascal': 50,
}
args.epochs = epoches[args.dataset.lower()]
if args.batch_size is None:
args.batch_size = 4 * len(args.gpu_ids)
if args.test_batch_size is None:
args.test_batch_size = args.batch_size
if args.lr is None:
lrs = {
'coco': 0.1,
'cityscapes': 0.01,
'pascal': 0.007,
}
args.lr = lrs[args.dataset.lower()] / (4 * len(args.gpu_ids)) * args.batch_size
if args.checkname is None:
args.checkname = 'deeplab-' + str(args.backbone)
print(args)
torch.manual_seed(args.seed)
trainer = Trainer(args)
print('Starting Epoch:', trainer.args.start_epoch)
print('Total Epoches:', trainer.args.epochs)
for epoch in range(trainer.args.start_epoch, trainer.args.epochs):
trainer.training(epoch)
if not trainer.args.no_val and epoch % args.eval_interval == (args.eval_interval - 1):
trainer.validation(epoch)
trainer.writer.close()
if __name__ == "__main__":
main()
|
[
"609431036@qq.com"
] |
609431036@qq.com
|
6986e4a3aefa6b38a25901843b1e0235afa0a0f9
|
f99a238019b2c6344b310ffa7b1cad74f696a34c
|
/app/configurations/serializer.py
|
7eaafc21b05ce0dbd4d6528fda55b3229604ea8d
|
[] |
no_license
|
alexandrealfa/Medicall_Api
|
b4df47afbb56eae0d4be0d53fd9859dbc57b246b
|
193263f72d86e839286d2de4a37fc47925c199f6
|
refs/heads/master
| 2023-04-05T23:06:21.194557
| 2021-04-27T15:42:49
| 2021-04-27T15:42:49
| 360,715,381
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 134
|
py
|
from flask import Flask
from flask_marshmallow import Marshmallow
ma = Marshmallow()
def init_app(app: Flask):
ma.init_app(app)
|
[
"Profissional@alexandrealfa.com"
] |
Profissional@alexandrealfa.com
|
97ca3f7c7b2929ed35618e50cc05f7800801eb49
|
4c6927247a3a5fcc73d6101a3cb905f85f03c7a8
|
/Models/train_classifier.py
|
8088da0f280d084f9ed4330a493c7e2d2106351b
|
[
"Unlicense"
] |
permissive
|
PZebarth/Python-ML-Pipeline
|
2c0ca9ea4b44b388b0ba83f44a9653f5967fa32e
|
b12b32db850c95298b225638f7a32e54e5d1221f
|
refs/heads/main
| 2023-02-10T05:34:53.559108
| 2021-01-05T19:18:28
| 2021-01-05T19:18:28
| 325,661,300
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,565
|
py
|
import sys
from sqlalchemy import create_engine
import pandas as pd
import nltk
nltk.download(['punkt', 'wordnet', 'averaged_perceptron_tagger'])
from nltk.tokenize import word_tokenize
from nltk.stem import WordNetLemmatizer
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
from sklearn.multioutput import MultiOutputClassifier
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
from sklearn.model_selection import GridSearchCV
from sklearn.decomposition import TruncatedSVD
import pickle
def load_data(database_filepath):
'''
Loads sql database from filepath.
Inputs:
database_filepath - filepath string
Outputs:
X - input variable from database
Y - outout variables from database
category_names - category names of output variables from database
'''
# creating engine
engine = create_engine(f'sqlite:///{database_filepath}')
# creating dataframe from sql database
df = pd.read_sql_table('messages_disaster', con=engine)
# input variable
X = df.message
# outout variables
Y = df.drop(['id','message','original','genre'], axis=1)
# category names of output variables
category_names = Y.columns
return X, Y, category_names
def tokenize(text):
'''
Takes text input and returns tokenized and lemmatized list of words in
lower case with white space stripped.
Input:
text - string
Output:
clean_tokens - list of strings
'''
# tokenize text
tokens = word_tokenize(text)
# initiate lemmatizer
lemmatizer = WordNetLemmatizer()
# iterate through each token
clean_tokens = []
for tok in tokens:
# lemmatize, normalize case, and remove leading/trailing white space
clean_tok = lemmatizer.lemmatize(tok).lower().strip()
clean_tokens.append(clean_tok)
return clean_tokens
def build_model(X,Y):
'''
Creates model from cross validation of parameters for pipeline.
Output:
cv - cross validation model
'''
# instantiating pipeline
pipeline = Pipeline([
#('features', FeatureUnion([
('vect', CountVectorizer(tokenizer=tokenize)),
('tfidf', TfidfTransformer()),
('clf', MultiOutputClassifier(RandomForestClassifier()))
#]))
])
# creating parameters for cross validation
parameters = {'clf__estimator__max_depth': [2, 4],
'clf__estimator__n_estimators': [5, 10],
'clf__estimator__min_samples_split': [2, 3]}
# performing cross validation on pipeline
cv = GridSearchCV(pipeline, param_grid = parameters)
return pipeline
def evaluate_model(model, X_test, Y_test, category_names):
'''
Evalutes model with
'''
# creating predictions on test data input variables
Y_pred = model.predict(X_test)
# classification report for each category
print(classification_report(Y_test, Y_pred, target_names = category_names))
def save_model(model, model_filepath):
'''
Saves model in pickle format.
Input:
model - supervised machine learning model
model_filepath - filepath string
'''
with open(model_filepath, 'wb') as f:
pickle.dump(model, f)
def main():
if len(sys.argv) == 3:
database_filepath, model_filepath = sys.argv[1:]
print('Loading data...\n DATABASE: {}'.format(database_filepath))
X, Y, category_names = load_data(database_filepath)
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2)
print('Building model...')
model = build_model()
print('Training model...')
model.fit(X_train, Y_train)
print('Evaluating model...')
evaluate_model(model, X_test, Y_test, category_names)
print('Saving model...\n MODEL: {}'.format(model_filepath))
save_model(model, model_filepath)
print('Trained model saved!')
else:
print('Please provide the filepath of the disaster messages database '\
'as the first argument and the filepath of the pickle file to '\
'save the model to as the second argument. \n\nExample: python '\
'train_classifier.py ../data/DisasterResponse.db classifier.pkl')
if __name__ == '__main__':
main()
|
[
"noreply@github.com"
] |
noreply@github.com
|
7c1474662eb7c545cc6e3b85fcff298c9b0e3059
|
9ec5cf9f52ff107ca4596629d193718d3082ad9a
|
/user_interface.py
|
becd87cd17a8adfe48f3ef749918162b4ce039bf
|
[
"MIT"
] |
permissive
|
rnandon/RPSLS
|
28672af059c2fe11c7862bf97fe41cbb8a2414f4
|
d70581e3abd6c30c3878bb509a9bef5e25a83619
|
refs/heads/main
| 2023-06-30T19:31:21.954791
| 2021-07-21T14:10:18
| 2021-07-21T14:10:18
| 386,705,585
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 16,448
|
py
|
### IMPORTS
### ================================
from time import sleep
# User Interface class: Handles all interactions with terminal and user
class User_Interface:
### INIT METHODS
### ======================================================================
def __init__(self, menu_width=80, options_width=64, border_thickness=3):
# Basic definitions
self.border_character = '*'
self.separator = '||'
self.menu_width = menu_width
self.options_width = options_width
self.border_thickness = border_thickness
self.build_custom_strings()
def build_custom_strings(self):
# Custom widths
self.main_between_border_space = self.menu_width - (2 * self.border_thickness)
self.secondary_between_border_space = self.options_width - (2 * self.border_thickness)
self.left_cell_width = (self.main_between_border_space - len(self.separator)) // 2
self.right_cell_width = self.main_between_border_space - len(self.separator) - self.left_cell_width
# Creating custom string blocks
self.main_pad = '\t\t'
self.secondary_pad = '\t\t\t'
self.end = '\n'
self.left_main_border = f'{self.main_pad}{self.border_character * self.border_thickness}'
self.right_main_border = f'{self.border_character * self.border_thickness}{self.end}'
self.left_secondary_border = f'{self.secondary_pad}{self.border_character * self.border_thickness}'
self.right_secondary_border = self.right_main_border
self.main_full_bar = f'{self.main_pad}{self.border_character * self.menu_width}{self.end}'
self.main_empty_bar = f'{self.left_main_border}{" " * self.main_between_border_space}{self.right_main_border}'
self.secondary_full_bar = f'{self.secondary_pad}{self.border_character * self.options_width}{self.end}'
self.secondary_empty_bar = f'{self.left_secondary_border}{" " * self.options_width}{self.right_secondary_border}'
### DISPLAY METHODS
### ======================================================================
def display_welcome(self, game_name):
self.refresh()
welcome_screen = self.get_welcome_screen(game_name)
welcome_options = self.get_welcome_options()
print(welcome_screen)
user_selection = self.verify_inputs(welcome_options, ['1', '2'])
if user_selection == '2':
self.refresh()
self.display_rules(game_name)
return user_selection
def display_player_number_prompt(self):
player_options = self.get_player_number_options()
user_selection = self.verify_inputs(player_options, ['1', '2'])
return user_selection
def get_player_number_options(self):
player_number_options = f'{self.main_full_bar}'
player_number_options += f'{self.main_empty_bar}'
player_number_options += f'{self.main_full_bar}'
player_number_options += f'{self.left_main_border}{self.center_value_in_space("HOW MANY PEOPLE ARE PLAYING?", self.main_between_border_space)}{self.right_main_border}'
player_number_options += f'{self.left_main_border}{self.center_value_in_space("1 OR 2", self.main_between_border_space)}{self.right_main_border}'
player_number_options += f'{self.main_empty_bar}'
player_number_options += f'{self.main_full_bar}'
return player_number_options
def display_rules(self, game_name):
formatted_rules = self.get_rules(["Rock crushes Scissors",
"Scissors cuts Paper",
"Paper covers Rock",
"Rock crushes Lizard",
"Lizard poisons Spock",
"Spock smashes Scissors",
"Scissors decapitates Lizard",
"Lizard eats Paper",
"Paper disproves Spock",
"Spock vaporizes Rock",
"",
"Press enter when you're done"])
input(formatted_rules)
self.display_welcome(game_name)
def display_game_screen(self):
# Display options
option_string = "Please select an option: \n\n"
option_string += ''' 1. Rock
2. Paper
3. Scissors
4. Lizard
5. Spock'''
# Format game screen and options menu
game_screen = self.get_game_screen()
# Display the screen and get user input back
#print(game_screen)
user_options = ['1', '2', '3', '4', '5']
options_values = ['Rock', 'Paper', 'Scissors', 'Lizard', 'Spock']
user_selection = self.verify_inputs(game_screen, user_options)
selected_value = options_values[int(user_selection) - 1]
return selected_value
def display_winner(self, winner):
winner_screen = self.get_winner_screen(winner)
print(winner_screen)
return 0
def display_results(self, player1_gesture, player2_gesture, winner):
message = f'{self.secondary_full_bar}'
message += f'{self.left_secondary_border}{self.center_value_in_space(f"Player 1 chose {player1_gesture}.", self.secondary_between_border_space)}{self.right_secondary_border}'
message += f'{self.left_secondary_border}{self.center_value_in_space(f"Player 2 chose {player2_gesture}.", self.secondary_between_border_space)}{self.right_secondary_border}'
if winner:
message += f'{self.left_secondary_border}{self.center_value_in_space(f"{winner} wins the round!", self.secondary_between_border_space)}{self.right_secondary_border}'
else:
message += f'{self.left_secondary_border}{self.center_value_in_space("Draw!", self.secondary_between_border_space)}{self.right_secondary_border}'
message += f'{self.secondary_full_bar}'
print(message)
sleep(1)
def display_restart(self):
restart_screen = self.get_restart_screen()
user_selection = self.verify_inputs(restart_screen, ['y', 'n'])
return user_selection
def display_exit(self):
exit_screen = self.get_exit_screen()
print(exit_screen)
def refresh(self):
print(f'{self.end * 100}')
### STRING FORMATTING METHODS
### =======================================================================
def get_welcome_screen(self, game_name):
# Top of welcome screen w/ 2 empty lines before game name
welcome_screen = f'{self.main_full_bar}'
welcome_screen += f'{self.main_empty_bar}'
welcome_screen += f'{self.main_full_bar}'
welcome_screen += f'{self.main_empty_bar}'
welcome_screen += f'{self.main_empty_bar}'
# Add each part of the game name on a new line
for part in game_name:
line_content = self.center_value_in_space(part, self.main_between_border_space)
welcome_screen += f'{self.left_main_border}{line_content}{self.right_main_border}'
# Two empty lines followed by two full width bars of the border character
welcome_screen += f'{self.main_empty_bar}'
welcome_screen += f'{self.main_empty_bar}'
welcome_screen += f'{self.main_full_bar}'
welcome_screen += f'{self.main_full_bar}'
return welcome_screen
def get_welcome_options(self):
# Top of options, single full bar + title line
welcome_options = f'{self.secondary_full_bar}'
welcome_options += f'{self.left_secondary_border}{self.center_value_in_space("ARE YOU READY TO BEGIN?", self.secondary_between_border_space)}{self.right_secondary_border}'
# Options
welcome_options += f'{self.left_secondary_border}{self.center_value_in_space("1. START GAME", self.secondary_between_border_space)}{self.right_secondary_border}'
welcome_options += f'{self.left_secondary_border}{self.center_value_in_space("2. SHOW RULES", self.secondary_between_border_space)}{self.right_secondary_border}'
# Bottom of options, single full bar and push input area over
welcome_options += f'{self.secondary_full_bar}'
welcome_options += f'{self.end}{self.end}{self.secondary_pad}'
return welcome_options
def get_rules(self, rules):
# Top of rules screen w/ 1 empty line
rules_screen = f'{self.main_full_bar}'
rules_screen += f'{self.left_main_border}{self.center_value_in_space("RPSLS - RULES", self.main_between_border_space)}{self.right_main_border}'
rules_screen += f'{self.main_full_bar}'
rules_screen += f'{self.main_empty_bar}'
# Add each rule on a new line
for rule in rules:
line_content = self.center_value_in_space(rule, self.main_between_border_space)
rules_screen += f'{self.left_main_border}{line_content}{self.right_main_border}'
# One empty line followed by two full width bars of the border character
rules_screen += f'{self.main_empty_bar}'
rules_screen += f'{self.main_full_bar}'
rules_screen += f'{self.main_full_bar}'
return rules_screen
def get_game_screen(self):
# Get the content for the title row
title_row_width = self.menu_width - (2 * self.border_thickness)
title_row = self.center_value_in_space("RPSLS", title_row_width)
# Title bar - Title row between two full bars
game_screen = f'{self.main_full_bar}'
game_screen += f'{self.left_main_border}{title_row}{self.right_main_border}'
game_screen += f'{self.main_full_bar}'
user_options = ['1', '2', '3', '4', '5']
options_values = ['Rock', 'Paper', 'Scissors', 'Lizard', 'Spock']
for i in range(5):
current_line = f'{self.left_main_border}{self.center_value_in_space(f"{user_options[i]}. {options_values[i]}", self.main_between_border_space)}{self.right_main_border}'
game_screen += current_line
game_screen += f'{self.main_empty_bar}'
game_screen += f'{self.main_full_bar}'
game_screen += f'{self.main_full_bar}'
return game_screen
def get_game_options(self, option_name, options):
# Top of options, single full bar + title line
game_options = f'{self.secondary_full_bar}'
game_options += f'{self.left_secondary_border}{self.center_value_in_space(option_name, self.secondary_between_border_space)}{self.right_secondary_border}'
# Format and add options
for i in range(len(options)):
option = options[i]
current_line = f'{self.left_secondary_border}{self.center_value_in_space(option, self.secondary_between_border_space)}{self.right_secondary_border}'
game_options += current_line
# Bottom of options, single full bar and push input area over
game_options += f'{self.secondary_full_bar}'
game_options += f'{self.end}{self.end}{self.secondary_pad}'
return game_options
def get_winner_screen(self, winner):
# Top of screen
winner_screen = f'{self.main_full_bar}'
winner_screen += f'{self.main_empty_bar}'
winner_screen += f'{self.left_main_border}{self.center_value_in_space(f"{winner} WINS THE GAME!!!", self.main_between_border_space)}{self.right_main_border}'
winner_screen += f'{self.main_empty_bar}'
winner_screen += f'{self.main_full_bar}'
return winner_screen
def get_restart_screen(self):
# Top of screen, full bar + empty bar
restart_screen = f'{self.main_full_bar}'
restart_screen += f'{self.main_empty_bar}'
# Middle of screen, format message into empty lines
restart_message = ['WOULD YOU', 'LIKE TO', 'PLAY AGAIN?', 'Y/N']
for part in restart_message:
current_line = f'{self.left_main_border}{self.center_value_in_space(part, self.main_between_border_space)}{self.right_main_border}'
restart_screen += current_line
# Bottom of screen, empty bar + full bar
restart_screen += f'{self.main_empty_bar}'
restart_screen += f'{self.main_full_bar}'
return restart_screen
def get_exit_screen(self):
# Top of screen, full bar + empty bar
exit_screen = f'{self.main_full_bar}'
exit_screen += f'{self.main_empty_bar}'
# Middle of screen, format message into empty lines
exit_message = ['THANKS', 'FOR', 'PLAYING!']
for part in exit_message:
current_line = f'{self.left_main_border}{self.center_value_in_space(part, self.main_between_border_space)}{self.right_main_border}'
exit_screen += current_line
# Bottom of screen, empty bar + full bar
exit_screen += f'{self.main_empty_bar}'
exit_screen += f'{self.main_full_bar}'
return exit_screen
def get_option_values(self, data):
options = []
# Using numeric selection values, used for list indices after verification
selections = [f'{i+1}' for i in range(len(data))]
# Add each option to the list
for i in range(len(data)):
current_option = f'{i + 1}: {data[i].name}'
options.append(current_option)
return (selections, options)
### AUXILIARY METHODS
### ======================================================================
def format_cell_data(self, left_data, right_data):
max_data_length = max(len(left_data), len(right_data)) # Make sure to account for different lengths of data
left_data_length = len(left_data)
right_data_length = len(right_data)
left_formatted = []
right_formatted = []
for i in range(max_data_length):
# Avoid going out of range
left_current_data = ''
right_current_data = ''
if i < left_data_length:
left_current_data = left_data[i]
if i < right_data_length:
right_current_data = right_data[i]
# Left cell formatting
if left_current_data:
# Format data if there is some
left_formatted.append(self.center_value_in_space(f'NAME: {left_current_data.name}', self.left_cell_width))
status_line = f' HEALTH: {left_current_data.get_health()} POWER: {left_current_data.get_resource()}'
left_formatted.append(f'{status_line}{" " * (self.left_cell_width - len(status_line))}')
else:
# Fill with empty lines
left_formatted.append(" " * self.left_cell_width)
left_formatted.append(" " * self.left_cell_width)
# Right cell formatting
if right_current_data:
# Format data if there is some
right_formatted.append(self.center_value_in_space(f'NAME: {right_current_data.name}', self.right_cell_width))
status_line = f' HEALTH: {right_current_data.get_health()} ENERGY: {right_current_data.get_resource()}'
right_formatted.append(f'{status_line}{" " * (self.right_cell_width - len(status_line))}')
else:
# Fill with empty lines
right_formatted.append(" " * self.right_cell_width)
right_formatted.append(" " * self.right_cell_width)
return (left_formatted, right_formatted)
def verify_inputs(self, message, options):
# Take in array of valid options and reprompt the user until a valid option is selected
valid_selection = False
user_input = ""
while not valid_selection:
user_input = input(message).lower()
if user_input in options:
valid_selection = True
return user_input
def center_value_in_space(self, value, total_width):
# Format value to be centered in line
left_pad = (total_width - len(value)) // 2
right_pad = total_width - left_pad - len(value)
return f'{" " * left_pad}{value}{" " * right_pad}'
### TESTING
### ======================================================================
def tests(self):
print("Testing internal variables")
print(self.left_main_border)
print(self.left_secondary_border)
print(self.right_main_border)
print(self.right_secondary_border)
print(self.main_full_bar)
print(self.main_empty_bar)
self.display_welcome(['WELCOME TO', 'ROBOTS', 'VS.', 'DINOSAURS'])
# test = User_Interface()
# test.tests()
|
[
"ryan@ryannd.com"
] |
ryan@ryannd.com
|
e34031ead30b061df8ad2a49abc53d039f59b9a8
|
3ddf743c71a63ab50f05585bcaeb1beb616c7952
|
/bims/_struct.py
|
8d77d6ff4cd903e6f0cb9e86c4b06022563c9a4e
|
[] |
no_license
|
lyreal666/pythonNotes
|
c75ef715243777f403eef36bb200641d046ca439
|
817a596a002f8d7dee14846fb7c4c089fb0224ee
|
refs/heads/master
| 2020-03-18T09:09:40.879883
| 2018-06-07T15:50:55
| 2018-06-07T15:50:55
| 134,547,817
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,874
|
py
|
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
import struct
import base64
__author__ = 'Ly'
'''
使用 struct 模块获取二进制字节字符
BMP格式采用小端方式存储数据,文件头的结构按顺序如下:
两个字节:'BM'表示Windows位图,'BA'表示OS/2位图;
一个4字节整数:表示位图大小;
一个4字节整数:保留位,始终为0;
一个4字节整数:实际图像的偏移量;
一个4字节整数:Header的字节数;
一个4字节整数:图像宽度;
一个4字节整数:图像高度;
一个2字节整数:始终为1;
一个2字节整数:颜色数。
'''
bin_bytes = struct.pack('>I', 1234)
print(bin_bytes)
int_data = struct.unpack('>I', bin_bytes)
print(int_data)
# 作业
def bmp_info(data):
rs = struct.unpack('<cc', data[0:2])
fm = rs[0] + rs[1]
print(fm)
if fm == b'BM' or b'BA':
width = struct.unpack('<I', data[18:22])[0]
height = struct.unpack('<I', data[22:26])[0]
color = struct.unpack('<H', data[28:30])[0]
print(width, height, color)
return {
'width': width,
'height': height,
'color': color
}
else:
return None
bmp_data = base64.b64decode('Qk1oAgAAAAAAADYAAAAoAAAAHAAAAAoAAAABA'
'BAAAAAAADICAAASCwAAEgsAAAAAAAAAAAAA/3//'
'f/9//3//f/9//3//f/9//3//f/9//3//f/9//3//f'
'/9//3//f/9//3//f/9//3//f/9//3//f/9/AHwAfAB'
'8AHwAfAB8AHwAfP9//3//fwB8AHwAfAB8/3//f/9/AH'
'wAfAB8AHz/f/9//3//f/9//38AfAB8AHwAfAB8AHwAfA'
'B8AHz/f/9//38AfAB8/3//f/9//3//fwB8AHz/f/9//3/'
'/f/9//3//f/9/AHwAfP9//3//f/9/AHwAfP9//3//fwB8'
'AHz/f/9//3//f/9/AHwAfP9//3//f/9//3//f/9//38AfA'
'B8AHwAfAB8AHwAfP9//3//f/9/AHwAfP9//3//f/9//38'
'AfAB8/3//f/9//3//f/9//3//fwB8AHwAfAB8AHwAfAB'
'8/3//f/9//38AfAB8/3//f/9//3//fwB8AHz/f/9//3/'
'/f/9//3//f/9/AHwAfP9//3//f/9/AHwAfP9//3//fwB'
'8AHz/f/9/AHz/f/9/AHwAfP9//38AfP9//3//f/9/AHwA'
'fAB8AHwAfAB8AHwAfAB8/3//f/9/AHwAfP9//38AfAB8AH'
'wAfAB8AHwAfAB8/3//f/9//38AfAB8AHwAfAB8AHwAfAB8'
'/3//f/9/AHwAfAB8AHz/fwB8AHwAfAB8AHwAfAB8AHz/f'
'/9//3//f/9//3//f/9//3//f/9//3//f/9//3//f/9//'
'3//f/9//3//f/9//3//f/9//3//f/9//38AAA==')
# 测试
bi = bmp_info(bmp_data)
assert bi['width'] == 28
assert bi['height'] == 10
assert bi['color'] == 16
print('ok')
|
[
"2713151713@qq.com"
] |
2713151713@qq.com
|
8867f39a15ecd8cd0dd71e88553af2701a2d68cf
|
afe99c7c6130c69ddf9fc5a648dca362e0d246df
|
/helloFlask.py
|
3fab0e62835b0b087ba62d46a4340e9cb306e0b0
|
[] |
no_license
|
mrscorrigan/helloFlask
|
6603e752800146b3199bb36011e08b3058e36281
|
8b98a8d32cd346f0fbc6d6140eb03c14fbb00e62
|
refs/heads/master
| 2021-01-11T00:14:26.807894
| 2016-10-11T13:33:03
| 2016-10-11T13:33:03
| 70,572,094
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 570
|
py
|
from flask import Flask, render_template
app = Flask(__name__)
countries = [{
'Country' : 'Ireland',
'Capital' : 'Dublin',
'Population' : '4.5 million'},
{'Country' : 'France',
'Capital' : 'Paris',
'Population' : '60 million'},
{'Country' : 'England',
'Capital' : 'London',
'Population' : '65 million'
}]
@app.route('/')
def get_index():
return render_template("index.html")
@app.route('/country')
def get_country():
return render_template("countries.html", data=countries)
if __name__ == '__main__':
app.run()
|
[
"stephen.corrigan@live.co.uk"
] |
stephen.corrigan@live.co.uk
|
fb697305a58694c4deb40e0b22f339d6d2a99677
|
25e7b6238bb4b5b219dabd019b5db74c87e7b09a
|
/quantist3/ok/tests/test_change_data_form.py
|
04d051361f1ee3306e5c66b823105a79ac22a1f0
|
[] |
no_license
|
pan-cai/quantist3
|
33976db1e1780db992334f34fdecf7bf952f2908
|
7cf71c4a64704cb00c0457ca56729d020a0f7739
|
refs/heads/master
| 2021-05-05T17:06:25.161287
| 2018-01-20T16:38:55
| 2018-01-20T16:38:55
| 117,344,953
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 372
|
py
|
# -*- coding: utf-8 -*-
from unittest import TestCase
import matplotlib.pyplot as plt
from sklearn.preprocessing import MinMaxScaler
import seaborn as sns
import pandas as pd
import global_list as gl
class TestChangeDataForm(TestCase):
def test_forex_basic(self):
data = gl.TEST_FOREX_XAUUSDD_DATA
data = pd.read_csv(data)
print(data[:3])
|
[
"liupan8910@163.com"
] |
liupan8910@163.com
|
172e64c35f84f0536cb29864b0cfe3bec9409520
|
768f5f74ef4fbb03cf6d0e052b18b0775e0f1250
|
/node_modules/socket.io-client/node_modules/ws/build/config.gypi
|
12d98e954983376d9d8db4b1f7960c0e2ee61f2d
|
[
"MIT"
] |
permissive
|
gnanagowthaman/SailAngularzur
|
f6ca1f3ff9805a5e8eba99fec04b1beb32f8fbf7
|
90e2953c3f0f05c4aad97c1604261ae84ee27e90
|
refs/heads/master
| 2020-03-12T16:23:22.843392
| 2018-07-07T13:42:03
| 2018-07-07T13:42:03
| 130,714,891
| 0
| 0
| null | 2018-07-07T13:42:04
| 2018-04-23T15:05:48
| null |
UTF-8
|
Python
| false
| false
| 5,254
|
gypi
|
# Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": []
},
"variables": {
"asan": 0,
"coverage": "false",
"debug_devtools": "node",
"debug_http2": "false",
"debug_nghttp2": "false",
"force_dynamic_crt": 0,
"gas_version": "2.23",
"host_arch": "x64",
"icu_data_file": "icudt60l.dat",
"icu_data_in": "../../deps/icu-small/source/data/in/icudt60l.dat",
"icu_endianness": "l",
"icu_gyp_path": "tools/icu/icu-generic.gyp",
"icu_locales": "en,root",
"icu_path": "deps/icu-small",
"icu_small": "true",
"icu_ver_major": "60",
"llvm_version": 0,
"node_byteorder": "little",
"node_enable_d8": "false",
"node_enable_v8_vtunejit": "false",
"node_install_npm": "true",
"node_module_version": 57,
"node_no_browser_globals": "false",
"node_prefix": "/",
"node_release_urlbase": "https://nodejs.org/download/release/",
"node_shared": "false",
"node_shared_cares": "false",
"node_shared_http_parser": "false",
"node_shared_libuv": "false",
"node_shared_nghttp2": "false",
"node_shared_openssl": "false",
"node_shared_zlib": "false",
"node_tag": "",
"node_use_bundled_v8": "true",
"node_use_dtrace": "false",
"node_use_etw": "false",
"node_use_lttng": "false",
"node_use_openssl": "true",
"node_use_perfctr": "false",
"node_use_v8_platform": "true",
"node_without_node_options": "false",
"openssl_fips": "",
"openssl_no_asm": 0,
"shlib_suffix": "so.57",
"target_arch": "x64",
"uv_parent_path": "/deps/uv/",
"uv_use_dtrace": "false",
"v8_enable_gdbjit": 0,
"v8_enable_i18n_support": 1,
"v8_enable_inspector": 1,
"v8_no_strict_aliasing": 1,
"v8_optimized_debug": 0,
"v8_promise_internal_field_count": 1,
"v8_random_seed": 0,
"v8_trace_maps": 0,
"v8_use_snapshot": "true",
"want_separate_host_toolset": 0,
"nodedir": "/home/vahai/.node-gyp/8.10.0",
"standalone_static_library": 1,
"cache_lock_stale": "60000",
"ham_it_up": "",
"legacy_bundling": "",
"sign_git_tag": "",
"user_agent": "npm/5.8.0 node/v8.10.0 linux x64",
"always_auth": "",
"bin_links": "true",
"key": "",
"allow_same_version": "",
"description": "true",
"fetch_retries": "2",
"heading": "npm",
"if_present": "",
"init_version": "1.0.0",
"user": "1000",
"prefer_online": "",
"force": "",
"only": "",
"read_only": "",
"cache_min": "10",
"init_license": "ISC",
"editor": "vi",
"rollback": "true",
"tag_version_prefix": "v",
"cache_max": "Infinity",
"timing": "",
"userconfig": "/home/vahai/.npmrc",
"engine_strict": "",
"init_author_name": "",
"init_author_url": "",
"tmp": "/tmp",
"depth": "Infinity",
"package_lock_only": "",
"save_dev": "",
"usage": "",
"metrics_registry": "https://registry.npmjs.org/",
"otp": "",
"package_lock": "true",
"progress": "true",
"https_proxy": "",
"save_prod": "",
"cidr": "",
"onload_script": "",
"sso_type": "oauth",
"rebuild_bundle": "true",
"save_bundle": "",
"shell": "/bin/bash",
"dry_run": "",
"prefix": "/home/vahai/.nvm/versions/node/v8.10.0",
"scope": "",
"browser": "",
"cache_lock_wait": "10000",
"ignore_prepublish": "",
"registry": "https://registry.npmjs.org/",
"save_optional": "",
"searchopts": "",
"versions": "",
"cache": "/home/vahai/.npm",
"send_metrics": "",
"global_style": "",
"ignore_scripts": "",
"version": "",
"local_address": "",
"viewer": "man",
"node_gyp": "/home/vahai/.nvm/versions/node/v8.10.0/lib/node_modules/npm/node_modules/npm-lifecycle/node_modules/node-gyp/bin/node-gyp.js",
"prefer_offline": "",
"color": "true",
"no_proxy": "",
"fetch_retry_mintimeout": "10000",
"maxsockets": "50",
"offline": "",
"sso_poll_frequency": "500",
"umask": "0002",
"fetch_retry_maxtimeout": "60000",
"logs_max": "10",
"message": "%s",
"ca": "",
"cert": "",
"global": "",
"link": "",
"access": "",
"also": "",
"save": "true",
"unicode": "",
"long": "",
"production": "",
"searchlimit": "20",
"unsafe_perm": "true",
"auth_type": "legacy",
"node_version": "8.10.0",
"tag": "latest",
"git_tag_version": "true",
"commit_hooks": "true",
"script_shell": "",
"shrinkwrap": "true",
"fetch_retry_factor": "10",
"save_exact": "",
"strict_ssl": "true",
"dev": "",
"globalconfig": "/home/vahai/.nvm/versions/node/v8.10.0/etc/npmrc",
"init_module": "/home/vahai/.npm-init.js",
"parseable": "",
"globalignorefile": "/home/vahai/.nvm/versions/node/v8.10.0/etc/npmignore",
"cache_lock_retries": "10",
"searchstaleness": "900",
"node_options": "",
"save_prefix": "^",
"scripts_prepend_node_path": "warn-only",
"group": "1000",
"init_author_email": "",
"searchexclude": "",
"git": "git",
"optional": "true",
"json": ""
}
}
|
[
"gnana@vahaitech.com"
] |
gnana@vahaitech.com
|
bfe8e4bc295bbf5a06577105e22905e15b024ebe
|
1aec3c93eaa1fc271ea80141a3a41a24cd60c8d9
|
/mcrouter/test/test_loadbalancer_route.py
|
854b6970f6db6a6b80bfcb4620c6a999a5ebd5a3
|
[
"BSD-3-Clause"
] |
permissive
|
boboozy/mcrouter
|
810859b997ea2c687c67723a3ad94aa88e93b746
|
d78f599bd3887a87d5785422a25e3ac07b0de169
|
refs/heads/master
| 2021-07-25T09:52:09.175808
| 2017-11-04T01:02:51
| 2017-11-04T01:11:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,743
|
py
|
# Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from mcrouter.test.MCProcess import Mcrouter
from mcrouter.test.McrouterTestCase import McrouterTestCase
class TestLoadBalancerRoute(McrouterTestCase):
config = './mcrouter/test/test_loadbalancer_route.json'
null_route_config = './mcrouter/test/test_nullroute.json'
mcrouter_server_extra_args = ['--server-load-interval-ms=50']
extra_args = []
def setUp(self):
self.mc = []
for _i in range(8):
self.mc.append(Mcrouter(self.null_route_config,
extra_args=self.mcrouter_server_extra_args))
self.add_server(self.mc[_i])
self.mcrouter = self.add_mcrouter(
self.config,
extra_args=self.extra_args)
def test_loadbalancer(self):
n = 20000
for i in range(0, n):
key = 'someprefix:{}:|#|id=123'.format(i)
self.assertTrue(not self.mcrouter.get(key))
self.assertTrue(self.mcrouter.stats()['cmd_get_count'] > 0)
lblrc = 'load_balancer_load_reset_count'
self.assertEqual(int(self.mcrouter.stats("all")[lblrc]), 0)
sum = 0
for i in range(8):
self.assertTrue(self.mc[i].stats()['cmd_get_count'] > 0)
sum += int(self.mc[i].stats()['cmd_get_count'])
self.assertEqual(sum, n)
|
[
"facebook-github-bot@users.noreply.github.com"
] |
facebook-github-bot@users.noreply.github.com
|
a8988a72dbe29d4bb33b7047b87fdca681431144
|
0cdf1a68a2c92fccb9bee72caded43bd529d19fa
|
/myblog/bin/easy_install-3.6
|
ecf6cebedf88a1b974e412782c2a044b8072ea6a
|
[] |
no_license
|
nicolett85/momlife
|
9a0fb0660d0be904a6118d6cb32ff609a7900ac5
|
a6caf8e5148daef6a5d0b62b240aa2bbee53681d
|
refs/heads/master
| 2020-04-21T08:20:36.887276
| 2019-03-11T18:23:04
| 2019-03-11T18:23:04
| 169,416,734
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 255
|
6
|
#!/Users/nim/Momlife/myblog/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"nicole.melzer1@freenet.de"
] |
nicole.melzer1@freenet.de
|
15c9096c932868854571f4061ed0c3a68eec026e
|
6efc2eb23678741263da7ac6bd868a9f3a37d38b
|
/01.stock_investment/05.chart_analysis/test_boto3/test_s3_download.py
|
363a8247b52c0222ee245d335c3d2c697ee9f4c1
|
[] |
no_license
|
predora005/business-research
|
c6272b129353a302673cf8a13c1629b5ade4a50e
|
96743cc6a0b592c87e6d0f2de341fc3bbb3ef3b1
|
refs/heads/main
| 2023-06-18T08:08:24.537951
| 2021-07-22T04:19:09
| 2021-07-22T04:19:09
| 314,985,045
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,270
|
py
|
# coding: utf-8
import boto3
import tempfile
##################################################
# メイン
##################################################
if __name__ == '__main__':
BUCKET_NAME = ''
OBJECT_NAME1 = 'dir1/file1.txt'
FILE_NAME1 = 'file1.txt'
OBJECT_NAME2 = 'dir1/file2.txt'
FILE_NAME2 = 'file2.txt'
OBJECT_NAME3 = 'dir2/file3.csv'
FILE_NAME3 = 'file3.csv'
OBJECT_NAME4 = 'dir2/file4.txt'
FILE_NAME4 = 'file4.txt'
##############################
s3 = boto3.resource('s3')
s3.Bucket(BUCKET_NAME).download_file(OBJECT_NAME1, FILE_NAME1)
##############################
# The download_file method
s3 = boto3.client('s3')
s3.download_file(BUCKET_NAME, OBJECT_NAME2, FILE_NAME2)
##############################
s3 = boto3.resource('s3')
bucket = s3.Bucket(BUCKET_NAME)
with open(FILE_NAME3, 'wb') as f:
bucket.download_fileobj(OBJECT_NAME3, f)
##############################
# The download_fileobj method
s3 = boto3.client('s3')
with open(FILE_NAME4, 'wb') as f:
#with tempfile.NamedTemporaryFile(mode='wb') as f:
s3.download_fileobj(BUCKET_NAME, OBJECT_NAME4, f)
print(f.name)
print(f.tell)
|
[
"46834065+predora005@users.noreply.github.com"
] |
46834065+predora005@users.noreply.github.com
|
f0e7916ceda6be72c1bab7f4e085da19a24d89ff
|
d8a50cbc09cef1c99ba1b2d9b3c2d90502ffd2c8
|
/Demo/app.py
|
c8b6217d5693a46a874f1066cf71b9b73cb05773
|
[] |
no_license
|
NgTuanLoc/Melbourne-Housing-Prediction
|
82466bf72418600a8e7517e62b6e408602b30a5d
|
b322d508fcbe010397930bd970333106b8882980
|
refs/heads/master
| 2023-04-05T04:57:04.602587
| 2021-04-20T15:25:01
| 2021-04-20T15:25:01
| 359,858,596
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,553
|
py
|
import numpy as np
import pandas as pd
from flask import Flask, request, jsonify, render_template
import pickle
from model import SVM, RF, KNN, full_pipeline, data, features, test, SVM_Grid, KNN_Grid, RF_Random, convert
app = Flask(__name__)
@app.route('/index')
@app.route('/')
def home():
return render_template('index.html')
@app.route('/predict',methods=['POST'])
def predict():
X = [item for item in request.form.values()]
X = [float(X[i]) if X[i].isnumeric() else np.nan for i in range(8) ] + [str(X[i]) for i in range(8, 15)]
X = pd.DataFrame(data=[X], columns=features)
item = data.head(1)
for i in features:
item[i] = X[i]
final_features = full_pipeline.transform(item)
prediction_svm = convert(SVM_Grid.best_estimator_.predict(final_features))
prediction_knn = convert(KNN_Grid.best_estimator_.predict(final_features))
prediction_rf = convert(RF_Random.predict(final_features))
return render_template('index.html', prediction_svm="(SVM) House's Price should be {}".format(prediction_svm), prediction_knn="(KNN) House's Price should be {}".format(prediction_knn), prediction_rf="(RF) House's Price should be {}".format(prediction_rf) )
@app.route('/results',methods=['POST'])
def results():
data = request.get_json(force=True)
prediction = RF.predict(full_pipeline.transform(np.array(list(data.values()))))
output = prediction
return jsonify(output)
if __name__ == "__main__":
app.run(debug=True)
|
[
"18521011@gm.uit.edu.vn"
] |
18521011@gm.uit.edu.vn
|
a98daa0410363b639ee81fc77a48ba3c678abf66
|
48e124e97cc776feb0ad6d17b9ef1dfa24e2e474
|
/sdk/python/pulumi_azure_native/insights/get_guest_diagnostics_settings_association.py
|
3440cdd68c76aa4250f607aaf13bbb8ba2ffb7dc
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
bpkgoud/pulumi-azure-native
|
0817502630062efbc35134410c4a784b61a4736d
|
a3215fe1b87fba69294f248017b1591767c2b96c
|
refs/heads/master
| 2023-08-29T22:39:49.984212
| 2021-11-15T12:43:41
| 2021-11-15T12:43:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,562
|
py
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'GetGuestDiagnosticsSettingsAssociationResult',
'AwaitableGetGuestDiagnosticsSettingsAssociationResult',
'get_guest_diagnostics_settings_association',
'get_guest_diagnostics_settings_association_output',
]
@pulumi.output_type
class GetGuestDiagnosticsSettingsAssociationResult:
"""
Virtual machine guest diagnostic settings resource.
"""
def __init__(__self__, guest_diagnostic_settings_name=None, id=None, location=None, name=None, tags=None, type=None):
if guest_diagnostic_settings_name and not isinstance(guest_diagnostic_settings_name, str):
raise TypeError("Expected argument 'guest_diagnostic_settings_name' to be a str")
pulumi.set(__self__, "guest_diagnostic_settings_name", guest_diagnostic_settings_name)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="guestDiagnosticSettingsName")
def guest_diagnostic_settings_name(self) -> str:
"""
The guest diagnostic settings name.
"""
return pulumi.get(self, "guest_diagnostic_settings_name")
@property
@pulumi.getter
def id(self) -> str:
"""
Azure resource Id
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def location(self) -> str:
"""
Resource location
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
Azure resource name
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
Azure resource type
"""
return pulumi.get(self, "type")
class AwaitableGetGuestDiagnosticsSettingsAssociationResult(GetGuestDiagnosticsSettingsAssociationResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetGuestDiagnosticsSettingsAssociationResult(
guest_diagnostic_settings_name=self.guest_diagnostic_settings_name,
id=self.id,
location=self.location,
name=self.name,
tags=self.tags,
type=self.type)
def get_guest_diagnostics_settings_association(association_name: Optional[str] = None,
resource_uri: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetGuestDiagnosticsSettingsAssociationResult:
"""
Virtual machine guest diagnostic settings resource.
API Version: 2018-06-01-preview.
:param str association_name: The name of the diagnostic settings association.
:param str resource_uri: The fully qualified ID of the resource, including the resource name and resource type.
"""
__args__ = dict()
__args__['associationName'] = association_name
__args__['resourceUri'] = resource_uri
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:insights:getGuestDiagnosticsSettingsAssociation', __args__, opts=opts, typ=GetGuestDiagnosticsSettingsAssociationResult).value
return AwaitableGetGuestDiagnosticsSettingsAssociationResult(
guest_diagnostic_settings_name=__ret__.guest_diagnostic_settings_name,
id=__ret__.id,
location=__ret__.location,
name=__ret__.name,
tags=__ret__.tags,
type=__ret__.type)
@_utilities.lift_output_func(get_guest_diagnostics_settings_association)
def get_guest_diagnostics_settings_association_output(association_name: Optional[pulumi.Input[str]] = None,
resource_uri: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetGuestDiagnosticsSettingsAssociationResult]:
"""
Virtual machine guest diagnostic settings resource.
API Version: 2018-06-01-preview.
:param str association_name: The name of the diagnostic settings association.
:param str resource_uri: The fully qualified ID of the resource, including the resource name and resource type.
"""
...
|
[
"noreply@github.com"
] |
noreply@github.com
|
59fe0e859dc6987503f7f78594e9789a09d02ae2
|
cc60064828984edca97af87427159981e89f582d
|
/torch/_dynamo/output_graph.py
|
aee9bfebcf4d9f2f436a509da52fcbe1f879468d
|
[
"BSD-2-Clause",
"LicenseRef-scancode-secret-labs-2011",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"BSL-1.0",
"Apache-2.0"
] |
permissive
|
d4l3k/pytorch
|
fdd28e089aa77c1cd897da02c9dd765d53f5ab05
|
11890156e7d10d1fb72c41a38a1b94fc27004d2b
|
refs/heads/master
| 2023-04-16T01:22:23.749295
| 2023-04-04T13:09:25
| 2023-04-06T01:51:10
| 153,366,388
| 0
| 0
|
NOASSERTION
| 2018-10-16T23:14:39
| 2018-10-16T23:14:38
| null |
UTF-8
|
Python
| false
| false
| 34,350
|
py
|
import collections
import copy
import functools
import itertools
import logging
import operator
import re
import sys
import traceback
from dataclasses import dataclass
from typing import Any, Dict, List, NamedTuple, Optional, OrderedDict, Set, Union
import torch._guards
import torch._logging
import torch.nn
from torch import fx
from torch._guards import (
Checkpointable,
Guard,
GuardsCheckpointState,
Source,
TracingContext,
)
from torch.fx.experimental.symbolic_shapes import ShapeEnv
from . import config, logging as torchdynamo_logging, variables
from .backends.registry import CompiledFn, CompilerFn
from .bytecode_transformation import (
create_call_function,
create_instruction,
Instruction,
unique_id,
)
from .codegen import PyCodegen
from .exc import BackendCompilerFailed, unimplemented
from .guards import GuardBuilder
from .mutation_guard import is_dynamic_nn_module
from .side_effects import SideEffects
from .source import (
ConstantSource,
DeterministicAlgorithmsSource,
is_constant_source,
LocalSource,
ParamBufferSource,
ShapeEnvSource,
)
from .utils import (
assert_no_fake_params_or_buffers,
checkpoint_params,
CleanupHook,
clone_inputs,
count_calls,
counters,
dynamo_timed,
format_graph_code,
format_graph_tabular,
same,
)
from .variables.base import VariableTracker
from .variables.builder import GraphArg, TrackedFake, VariableBuilder, wrap_fx_proxy
from .variables.nn_module import NNModuleVariable
from .variables.tensor import (
SymNodeVariable,
TensorVariable,
UnspecializedPythonVariable,
)
log = logging.getLogger(__name__)
graph_tabular_log = torch._logging.getArtifactLogger(__name__, "graph")
graph_code_log = torch._logging.getArtifactLogger(__name__, "graph_code")
class OutputGraphState(NamedTuple):
graphargs: List[GraphArg]
tracked_fakes: List[TrackedFake]
guard_state: GuardsCheckpointState
nn_modules: Optional[Dict[str, torch.nn.Module]]
param_name_to_source: Optional[Dict[str, Source]]
side_effects: SideEffects
timestamp: int
def diff(self, other: "OutputGraphState", *, prefix: str = "") -> Optional[str]:
for k in self._fields:
if k == "guard_state":
r = self.guard_state.diff(other.guard_state)
if r is not None:
return r
continue
elif k == "side_effects":
r = self.side_effects.diff(other.side_effects)
if r is not None:
return r
continue
sv = getattr(self, k)
ov = getattr(other, k)
if sv != ov:
return f"{prefix}{k} mismatch: {sv} != {ov}"
return None
# Back compat .guards api
@property
def guards(self):
return self.guard_state.dynamo_guards
@functools.lru_cache(None)
def _step_logger():
return torchdynamo_logging.get_step_logger(log)
@dataclass
class GraphCompileReason:
"""Stores why a given output graph was compiled; i.e. what caused the graph break."""
reason: str
user_stack: List[traceback.FrameSummary]
def _get_gen_rand_values_fn(random_calls):
def _gen_rand_values():
return [fn(*args, **kwargs) for fn, args, kwargs in random_calls]
return _gen_rand_values
class FakeRootModule(torch.nn.Module):
"""Trick the constructor of fx.GraphModule"""
def __init__(self, nn_modules: Dict[str, torch.nn.Module]):
super().__init__()
for k, v in nn_modules.items():
setattr(self, k, v)
def __repr__(self):
return "FakeRootModule(...)"
class WrapperBackend:
def __init__(self, backend: CompilerFn, original_example_inputs):
self.backend: CompilerFn = backend
self.original_example_inputs = original_example_inputs
@property
def example_inputs(self):
return clone_inputs(self.original_example_inputs)
def __call__(self, gm: torch.fx.GraphModule, example_inputs: List[torch.Tensor]):
self.restore = checkpoint_params(gm)
self.gm = gm
copy_gm = copy.deepcopy(self.gm)
self.candidate = self.backend(copy_gm, self.original_example_inputs)
if self.candidate is None or self.candidate is self.gm.forward:
return self.gm.forward
if not config.verify_correctness:
return self.candidate
# if verify_correctness=True
try:
correct = self.gm.forward(*self.example_inputs)
result = self.candidate(*self.example_inputs)
# TODO: replace `same` function with the one in testing
if same(correct, result):
return self.candidate
raise RuntimeError(f"incorrect results of backend {self}")
return self.gm.forward
except Exception:
log.exception("error in verify_correctness")
raise
finally:
self.restore()
class OutputGraph(fx.Tracer, Checkpointable[OutputGraphState]):
"""
Wrapper class to hold outputs of InstructionTranslator. Mainly the
generated fx.Graph.
"""
def __init__(
self,
f_globals: Dict[str, Any],
code_options: Dict[str, Any],
compiler_fn: CompilerFn,
root_tx,
export: bool,
export_constraints,
):
super().__init__()
self.graph = torch.fx.Graph()
self.graphargs: List[GraphArg] = []
self.export = export
self.export_constraints = export_constraints
# In export mode, we force the shape_env to strictly disallow any constraining
# of the user marked dynamic dims
fake_mode = torch._subclasses.FakeTensorMode(
shape_env=ShapeEnv(
allow_scalar_outputs=config.capture_scalar_outputs,
allow_dynamic_output_shape_ops=config.capture_dynamic_output_shape_ops,
)
if config.dynamic_shapes
else None,
# TODO (tmanlaibaatar) Remove this once we always lift params and buffers
allow_non_fake_inputs=True if self.export else False,
)
self.tracing_context: TracingContext = TracingContext(fake_mode)
if config.dynamic_shapes:
# Register a SHAPE_ENV guard to make sure we setup shape guards
# that show up in ShapeEnv
self.guards.add(ShapeEnvSource().make_guard(GuardBuilder.SHAPE_ENV))
self.guards.add(
DeterministicAlgorithmsSource().make_guard(
GuardBuilder.DETERMINISTIC_ALGORITHMS
)
)
# tracked_fakes says where any tensor that was wrapped to fake came
# from. It is similar to GraphArg, in that all GraphArgs will get
# will get added to TrackedFakes, but TrackedFakes also contains
# GraphArgs that got pruned, and things like Tensor attributes which
# aren't explicit graph inputs. Used by shape guard
self.tracked_fakes: List[TrackedFake] = []
# Although we prune unused graphargs before sending graphs to
# compilers, we may have legitimately triggered shape guards
# on "unused" inputs that we must keep track of. So after
# remove_unused_graphargs is called, orig_graphargs and
# graphargs no longer alias; orig_graphargs is the original
# graphargs, and graphargs is the pruned list. Guard creation
# should use original graphargs.
self.orig_graphargs: List[GraphArg] = self.graphargs
self.nn_modules: Optional[Dict[str, torch.nn.Module]] = dict()
# Stores the full fqn of a param or buffer to the relevant source.
self.param_name_to_source: Optional[Dict[str, Source]] = dict()
self.side_effects = SideEffects()
self.code_options = dict(code_options)
self.output_instructions: List[Instruction] = []
# used to track nodes that are added between calls of copy_graphstate
# and restore_graphstate
self.timestamp = 0
# Node => computed real value (see utils.get_real_value)
self.real_value_cache: Dict[fx.Node, torch.Tensor] = {}
# Not checkpointed
self.compiler_fn: CompilerFn = compiler_fn
self.root_globals = f_globals
self.root_tx = root_tx
from torch._dynamo.symbolic_convert import InstructionTranslatorBase
self._current_tx: List[InstructionTranslatorBase] = []
self.cleanups: List[CleanupHook] = []
self.should_exit = False
self.random_values_var = None
self.initial_random_state = ()
self.unspec_variable_map: Dict[str, UnspecializedPythonVariable] = {}
# Enables creating unique node names by tracking
# all current placeholder node names
self.name_to_input: OrderedDict[
str, Optional[fx.Proxy]
] = collections.OrderedDict()
@property
def output(self):
return self
@property
def fake_mode(self):
return self.root_tx.fake_mode
@property
def shape_env(self):
return self.tracing_context.fake_mode.shape_env
@property
def guards(self) -> Set[Guard]:
return self.tracing_context.guards_context.dynamo_guards
def push_tx(self, tx):
self._current_tx.append(tx)
def pop_tx(self):
return self._current_tx.pop()
@property
def current_tx(self):
return self.root_tx if not self._current_tx else self._current_tx[-1]
def copy_graphstate(self) -> OutputGraphState:
"""Create a checkpoint of the current state by copying everything"""
assert self.nn_modules is not None
assert self.param_name_to_source is not None
guards_graph_state = self.tracing_context.guards_context.copy_graphstate()
state = OutputGraphState(
list(self.graphargs),
list(self.tracked_fakes),
guards_graph_state,
dict(self.nn_modules),
dict(self.param_name_to_source),
self.side_effects.clone(),
self.timestamp,
)
self.timestamp += 1
return state
def restore_graphstate(self, state: OutputGraphState):
"""Restore a checkpoint created by self.copy_graphstate()"""
(
self.graphargs,
self.tracked_fakes,
guards_state,
self.nn_modules,
self.param_name_to_source,
self.side_effects,
self.timestamp,
) = state
self.tracing_context.guards_context.restore_graphstate(guards_state)
# FX deepcopy doesn't work for a partially created graph, so just remove new nodes
removed_nodes = 0
for node in reversed(list(self.graph.nodes)):
if node.meta["creation_timestamp"] > self.timestamp:
# Erasing node alone does not remove the meta information
# So, remove the help tensor explicitly
if "example_value" in node.meta:
del node.meta["example_value"]
self.remove_node(node)
self.real_value_cache.pop(node, None)
removed_nodes += 1
log.debug(f"restore_graphstate: removed {removed_nodes} nodes")
def add_grapharg(self, arg: GraphArg):
curr_pos = len(self.graphargs)
self.graphargs.append(arg)
def count_calls(self):
return count_calls(self.graph)
def get_submodule(self, keys):
assert keys
obj = self.nn_modules
for k in keys.split("."):
if isinstance(obj, dict):
obj = obj[k]
else:
obj = getattr(obj, k)
return obj
def create_graph_input(self, name, type_expr=None):
# unique
if name in self.name_to_input:
for i in itertools.count():
if f"{name}_{i}" not in self.name_to_input:
name = f"{name}_{i}"
break
if self.name_to_input:
prev_name = next(reversed(self.name_to_input))
ctx = self.graph.inserting_after(self.name_to_input[prev_name])
else:
ctx = self.graph.inserting_before(None)
with ctx:
proxy = self.create_proxy("placeholder", name, (), {}, type_expr=type_expr)
self.name_to_input[name] = proxy.node
return proxy
def new_var(self, name="tmp"):
existing = set(self.code_options["co_varnames"])
for i in itertools.count():
var = f"___{name}_{i}"
if var not in existing:
self.code_options["co_varnames"] += (var,)
return var
def update_co_names(self, name):
"""Ensure self.code_options.co_names contains name"""
if name not in self.code_options["co_names"]:
self.code_options["co_names"] += (name,)
@staticmethod
def module_has_hooks(mod, only_check_unsupported=False):
supported_hooks = [
"_forward_pre_hooks",
"_forward_hooks",
]
unsupported_hooks = [
"_backward_pre_hooks",
"_backward_hooks",
"_state_dict_pre_hooks",
"_state_dict_hooks",
"_load_state_dict_pre_hooks",
"_load_state_dict_post_hooks",
]
check_hooks = unsupported_hooks
if not only_check_unsupported:
check_hooks += supported_hooks
return any(len(getattr(mod, x)) > 0 for x in check_hooks if hasattr(mod, x))
def register_attr_or_module(
self,
target: Union[torch.nn.Module, torch.Tensor, Any],
*names,
**options,
):
if is_dynamic_nn_module(target):
return variables.UnspecializedNNModuleVariable(target, **options)
options = dict(options)
options["guards"] = set(options.get("guards", []))
assert "source" in options
source = options["source"]
assert not isinstance(source, ParamBufferSource)
if isinstance(target, torch.Tensor):
if not is_constant_source(source):
options["guards"].add(source.make_guard(GuardBuilder.TENSOR_MATCH))
def wrap_name(module_key):
assert self.param_name_to_source is not None
self.param_name_to_source[module_key] = source
return wrap_fx_proxy(
self.root_tx,
self.create_proxy("get_attr", module_key, tuple(), {}),
example_value=target,
**options,
)
elif isinstance(target, torch.nn.Module):
assert isinstance(target, torch.nn.Module)
if self.module_has_hooks(target, only_check_unsupported=True):
torch._logging.warning_once(
log, "nn.Module hooks are not fully supported, they may be ignored"
)
options["guards"].add(source.make_guard(GuardBuilder.NN_MODULE))
def wrap_name(module_key):
return NNModuleVariable(type(target), module_key, **options)
elif isinstance(target, (torch.SymInt, torch.SymFloat)):
# HACKY CODE REGION BEGIN
# WE ARE PIGGYBACKING ON EXISTING INFRA TO REGISTER ATTRS
# This ultimately gets written to self.nn_modules, which is unfortunate
# Attrs that are tenors and symints and such need to be migrated to have their
# own storage
# alas, this is like this for now
def wrap_name(module_key):
return SymNodeVariable.create(
self,
self.create_proxy("get_attr", module_key, tuple(), {}),
sym_num=target,
**options,
)
# HACKY CODE REGION END
else:
def wrap_name(module_key):
self.output.update_co_names(module_key)
self.root_globals[module_key] = target
return VariableBuilder(self, ConstantSource(source_name=module_key))(
target
)
assert self.nn_modules is not None
for k, v in self.nn_modules.items():
if v is target:
# it already exists
return wrap_name(k)
# create a new unique name
name = "_".join(map(str, names))
# Strip the guard lookup L/G access
name = re.sub(r"^[GL]\['?(.*?)'?\]$", r"\1", name)
# e.g. replace abc.xyz[123].qkv with abc.xyz_123.qkv
name = re.sub(r"\[(\d+)\]", r"_\g<1>", name)
# e.g. replace abc.xyz_123.qkv with abc_xyz_123_qkv
name = re.sub(r"[^a-zA-Z0-9]", "_", name)
if not name or not name[0].isalpha():
name = "sub" + name
base = name
for i in itertools.count():
if name not in self.nn_modules:
self.nn_modules[name] = target
if isinstance(target, torch.nn.Module):
def register_leaf_name(leaf_name):
assert self.param_name_to_source is not None
new_source = ParamBufferSource(source, leaf_name)
new_name = f"{name}.{leaf_name}"
self.param_name_to_source[new_name] = new_source
# annoying, but there are cases when we do not have parameters
# see test_nn_moduledict_contains
if hasattr(target, "_parameters"):
for leaf_name, _ in target.named_parameters(
remove_duplicate=False
):
register_leaf_name(leaf_name)
if hasattr(target, "_buffers"):
for leaf_name, _ in target.named_buffers(
remove_duplicate=False
):
register_leaf_name(leaf_name)
return wrap_name(name)
name = f"{base}_{i}"
raise AssertionError("unreachable")
def compile_subgraph(
self, tx, partial_convert=False, reason: Optional[GraphCompileReason] = None
):
"""
Generate a subgraph to continue execution on user code.
Automatically restore live variables.
"""
from .eval_frame import disable
self.partial_convert = partial_convert
self.compile_subgraph_reason = reason
log.debug(f"COMPILING GRAPH due to {reason}")
if not all(block.can_restore() for block in tx.block_stack):
unimplemented("compile_subgraph with block_depth != 0")
prefix_insts: List[Instruction] = []
if sys.version_info >= (3, 11):
# prefix instructions (Python 3.11+)
for inst in tx.prefix_insts:
if inst.opname == "MAKE_CELL":
prefix_insts.append(
create_instruction("MAKE_CELL", argval=inst.argval)
)
elif inst.opname == "COPY_FREE_VARS":
prefix_insts.append(
create_instruction(
"COPY_FREE_VARS", arg=len(tx.code_options["co_freevars"])
)
)
else:
prefix_insts.append(inst)
def append_prefix_insts():
self.add_output_instructions(prefix_insts)
prefix_insts.clear()
for block in reversed(tx.block_stack):
block.exit(tx)
tx.prune_dead_locals()
stack_values = list(tx.stack)
assert self.nn_modules is not None
root = FakeRootModule(self.nn_modules)
# Add all the local vars to the "stack" so restore at the end
restore_vars = []
val_to_names: OrderedDict[
VariableTracker, List[str]
] = collections.OrderedDict()
if stack_values:
val_to_names[stack_values[-1]] = list()
for k, v in tx.symbolic_locals.items():
# Note! this explicitly uses .local_name for matching
# Failure to do so will cause spurious registrations in val_to_names.
# This will in turn result in spurious variables showing up in the graph.
# This was very tricky to debug. For an example, dump the graph at call_user_compiler
# while running test_subgraphs.py
if isinstance(v.source, LocalSource) and v.source.local_name == k:
continue # no need to restore initial state
if v not in val_to_names:
val_to_names[v] = list()
val_to_names[v].append(k)
for v in val_to_names.keys():
restore_vars.extend(val_to_names[v])
stack_values.extend([v] * len(val_to_names[v]))
# to handle random calls
if len(tx.random_calls) > 0:
append_prefix_insts()
random_calls_instructions = []
self.random_values_var = self.new_var("random_values")
rand_fn_name = unique_id("__gen_rand_values")
rand_fn = disable(_get_gen_rand_values_fn(tx.random_calls))
self.install_global(rand_fn_name, rand_fn)
codegen = PyCodegen(tx, root)
random_calls_instructions.extend(
[
codegen.create_load_global("random", True, add=True),
codegen.create_load_attr("setstate"),
codegen.create_load_const(tx.output.initial_random_state),
]
+ create_call_function(1, False),
)
random_calls_instructions.extend(
codegen.load_function_name(rand_fn_name, True)
)
random_calls_instructions.extend(create_call_function(0, False))
random_calls_instructions.append(
codegen.create_store(tx.output.random_values_var),
)
self.add_output_instructions(random_calls_instructions)
if (
stack_values
and all(
not isinstance(v, UnspecializedPythonVariable) for v in stack_values
)
and all(isinstance(x, TensorVariable) for x in stack_values)
and len(set(stack_values)) == len(stack_values)
and self.side_effects.is_empty()
):
append_prefix_insts()
# optimization to generate better code in a common case
self.add_output_instructions(
self.compile_and_call_fx_graph(tx, list(reversed(stack_values)), root)
+ [create_instruction("UNPACK_SEQUENCE", arg=len(stack_values))]
)
else:
graph_output_var = self.new_var("graph_out")
pass1 = PyCodegen(tx, root, graph_output_var)
self.side_effects.codegen_save_tempvars(pass1)
pass1.foreach(stack_values)
self.side_effects.codegen_update_mutated(pass1)
# one more time now that we have established tempvars
pass2 = PyCodegen(
tx,
root,
graph_output_var,
tempvars={val: None for val, count in pass1.uses.items() if count > 1},
)
self.side_effects.codegen_save_tempvars(pass2)
pass2.foreach(stack_values)
self.side_effects.codegen_update_mutated(pass2)
output = []
if count_calls(self.graph) != 0 or len(pass2.graph_outputs) != 0:
output.extend(
self.compile_and_call_fx_graph(tx, pass2.graph_output_vars(), root)
)
if len(pass2.graph_outputs) != 0:
output.append(pass2.create_store(graph_output_var))
else:
output.append(create_instruction("POP_TOP"))
append_prefix_insts()
self.add_output_instructions(output + pass2.get_instructions())
# restore all the live local vars
self.add_output_instructions(
[PyCodegen(tx).create_store(var) for var in reversed(restore_vars)]
)
def compile_and_call_fx_graph(self, tx, rv, root):
"""
Generate code from self.graph and return the Instruction()s to
call that generated code.
"""
from .eval_frame import disable
assert isinstance(rv, list)
assert isinstance(root, FakeRootModule)
for output in rv:
self.guards.update(output.guards)
self.create_node(
"output", "output", (self.create_arg(tuple(x.as_proxy() for x in rv)),), {}
)
self.remove_unused_graphargs()
ncalls = count_calls(self.graph)
counters["stats"]["calls_captured"] += ncalls
# free a bit of memory
for node in self.graph.nodes:
if "example_value" in node.meta:
del node.meta["example_value"]
self.real_value_cache.clear()
gm = fx.GraphModule(root, self.graph)
gm.recompile()
gm.compile_subgraph_reason = self.compile_subgraph_reason
name = unique_id("__compiled_fn")
assert_no_fake_params_or_buffers(gm)
compiled_fn = self.call_user_compiler(gm)
compiled_fn = disable(compiled_fn)
counters["stats"]["unique_graphs"] += 1
self.install_global(name, compiled_fn)
graph_code_log.debug(format_graph_code(name, gm))
graph_tabular_log.debug(format_graph_tabular(name, gm))
cg = PyCodegen(tx)
cg.make_call_generated_code(name)
return cg.get_instructions()
@dynamo_timed(phase_name="backend_compile")
def call_user_compiler(self, gm: fx.GraphModule) -> CompiledFn:
tot = 0
placeholders = []
for node in gm.graph.nodes:
if node.op in ("call_function", "call_method", "call_module"):
tot += 1
if node.op == "placeholder":
placeholders.append(node)
torch._dynamo.utils.increment_op_count(tot)
assert len(placeholders) == len(self.graphargs)
for pl, arg in zip(placeholders, self.graphargs):
pl._dynamo_source = arg.source
gm._param_name_to_source = self.param_name_to_source
try:
name = (
self.compiler_fn.__name__
if hasattr(self.compiler_fn, "__name__")
else ""
)
_step_logger()(logging.INFO, f"calling compiler function {name}")
compiler_fn = self.compiler_fn
# WrapperBackend needs real inputs, for now, to verify correctness
if config.verify_correctness:
compiler_fn = WrapperBackend(compiler_fn, self.example_inputs())
# NOTE: [Real Tensors in Accuracy Evaluation]
#
# Today, tensors are passed to backends as fake at compile time. See the .fake_example_inputs()
# call to compiler_fn below. At runtime, backends use real tensors.
#
# This should be a strong invariant we hold across all backends,
# and generally, it is. However, for accuracy evaluation, we need real tensors at compile time,
# for now, due to the unfortunate setup described below.
#
# Due to the nature of how we invoke comparison as a backend in two different ways:
#
# (1) Less bad, but still worth rewriting, WrapperBackend above, which takes
# real inputs for its ctor. see the config.verify_correctnes above.
#
# (2) More bad, and very worth rewriting, the minifier installs accuracy comparison as
# a true backend, and therefore needs to be compiled with real inputs. This is made trickier
# by the fact that the minifier will spawn new processes during minification. As such, we have
# created a global flag, MINIFIER_SPAWNED, that should be set IF AND ONLY IF this run was spawned
# as part of accuracy minification. This flag is not a contract, and ideally will not be here long.
#
# The longer term PoR is to:
# (A) Rewrite the minifier accuracy evaluation and verify_correctness code to share the same
# correctness and accuracy logic, so as not to have two different ways of doing the same thing.
#
# (B) Refactor minifier accuracy backend to do its comparison fully at runtime, so as not to need to
# pass real tensors to it at compile time.
is_top_level_minifying = (
config.repro_after is not None and config.repro_level == 4
)
if torch._dynamo.debug_utils.MINIFIER_SPAWNED or is_top_level_minifying:
# Disable the tracing context so we don't pick up the ambient
# fake tensor mode
with torch._guards.tracing(None):
compiled_fn = compiler_fn(gm, self.example_inputs())
elif config.DO_NOT_USE_legacy_non_fake_example_inputs:
compiled_fn = compiler_fn(gm, self.example_inputs())
else:
compiled_fn = compiler_fn(gm, self.fake_example_inputs())
_step_logger()(logging.INFO, f"done compiler function {name}")
assert callable(compiled_fn), "compiler_fn did not return callable"
except Exception as e:
raise BackendCompilerFailed(self.compiler_fn, e).with_traceback(
e.__traceback__
) from None
return compiled_fn
def fake_example_inputs(self) -> List[torch.Tensor]:
result = []
for arg in self.graphargs:
example = arg.get_fake_examples()
if example is not None:
result.extend(example)
else:
# Fallback, in case fake_tensor was not set
# Particularly for graph args that are not tensors
result.extend(arg.get_examples())
return result
def example_inputs(self) -> List[torch.Tensor]:
result = []
for arg in self.graphargs:
result.extend(arg.get_examples())
return result
def remove_unused_graphargs(self) -> None:
for node in reversed(list(self.graph.nodes)):
if len(list(node.users)) == 0:
if node.op == "get_attr":
self.remove_node(node)
elif node.op == "call_function" and node.target is operator.getitem:
self.remove_node(node)
expanded_graphargs = []
for arg in self.graphargs:
expanded_graphargs.extend([arg] * len(arg))
arg.uses = 0
for node, arg in zip(self.graph.nodes, expanded_graphargs):
assert node.op == "placeholder"
arg.uses += len(node.users)
for node, arg in list(zip(self.graph.nodes, expanded_graphargs)):
if arg.uses == 0:
log.debug(f"REMOVE UNUSED GRAPHARG {arg.source.name()}")
if "example_value" in node.meta:
del node.meta["example_value"]
self.remove_node(node)
self.real_value_cache.pop(node, None)
self.graphargs = [arg for arg in self.graphargs if arg.uses > 0]
def add_output_instructions(self, prefix: List[Instruction]) -> None:
"""
We call this on the creation of a new compiled subgraph that is inserted
before user code.
"""
self.output_instructions.extend(prefix)
self.should_exit = True
def install_global(self, name, value) -> None:
self.cleanups.append(CleanupHook.create(self.root_globals, name, value))
def cleanup(self) -> None:
# There is a reference cycle between tracer and OutputGraph, causing
# some of the tensor objects to be held alive for longer than necessary.
self.root_tx = None
# Note: generated fx graph will hold a reference to the nn_module,
# So depending on the backend they may not be released
self.nn_modules = None
self.param_name_to_source = None
# Cleanup graphargs
for graph_arg in self.graphargs:
graph_arg.erase()
for node in self.graph.nodes:
if "example_value" in node.meta:
del node.meta["example_value"]
self.real_value_cache.clear()
self.name_to_input.clear()
self.side_effects.keepalive = []
def create_proxy(
self,
kind,
target,
args,
kwargs,
name=None,
type_expr=None,
proxy_factory_fn=None,
):
rv = super().create_proxy(
kind, target, args, kwargs, name, type_expr, proxy_factory_fn
)
# append stack trace to fx node
tx = self.current_tx
nn_module_stack = tx.nn_module_stack
if nn_module_stack:
rv.node.meta["nn_module_stack"] = nn_module_stack.copy()
if kind in {"call_function", "call_method"}:
rv.node.meta["source_fn"] = target
elif kind == "call_module":
# For modules we store the class
rv.node.meta["source_fn"] = rv.node.meta["nn_module_stack"][target][1]
frame_summaries: List[traceback.FrameSummary] = []
while tx:
frame_summaries.append(tx.frame_summary())
tx = getattr(tx, "parent", None)
# Reverse the frame_summaries, such that the innermost frame is at the last
frame_summaries.reverse()
# official from_list stub doesn't have new-style type
msgs = traceback.StackSummary.from_list(frame_summaries).format() # type: ignore[arg-type]
rv.node.stack_trace = "".join(msgs)
return rv
def create_node(self, *args, **kwargs):
node = super().create_node(*args, **kwargs)
node.meta["creation_timestamp"] = self.timestamp
return node
# Note: we did not override erase_node since
# we call self.graph.erase_node elsewhere
def remove_node(self, node):
self.graph.erase_node(node)
self.name_to_input.pop(node.name, None)
|
[
"pytorchmergebot@users.noreply.github.com"
] |
pytorchmergebot@users.noreply.github.com
|
2c198ce9caa80d3848e36c87c340082b71dfce04
|
4d37628a27c5a50a70fa06f78be346223c37ade0
|
/jobs/migrations.py
|
88f61681fe41fbe0da93397de6760842a9ab4e57
|
[
"MIT"
] |
permissive
|
vinissimus/jobs
|
93dbc0fd2c755b63d685165996b27a260e5e367c
|
6e15749465f7da44e4dc0ad2f520ea6f7fbb67fe
|
refs/heads/master
| 2023-01-01T01:29:50.332671
| 2020-10-23T15:27:49
| 2020-10-23T15:27:49
| 281,219,465
| 7
| 0
|
MIT
| 2020-10-23T15:31:47
| 2020-07-20T20:30:13
|
Python
|
UTF-8
|
Python
| false
| false
| 1,978
|
py
|
from .utils import setup_stdout_logging
from pathlib import Path
import asyncio
import asyncpg
import glob
import logging
import sys
import typing
logger = logging.getLogger("jobs")
current = Path(__file__)
def get_migrations_path() -> Path:
return current.parent / "sql"
def get_available():
files: typing.Dict[int, str] = {}
path = str(get_migrations_path())
for item in glob.glob(f"{path}/*.up.sql"):
file = item.replace(path + "/", "")
version = int(file.split("_")[0])
files[version] = file
return files
def load_migration(name: str):
file = get_migrations_path() / name
with file.open() as f:
return f.read()
async def migrate(db: asyncpg.Connection = None):
migrations = get_available()
try:
current = await db.fetchval("SELECT migration FROM jobs.migrations")
except asyncpg.exceptions.UndefinedTableError:
current = 0
logger.info("Current migration %s", current)
applied = current
async with db.transaction():
for avail in sorted(list(migrations.keys())):
if avail > current:
logger.info("Appling migration %s", migrations[avail])
data = load_migration(migrations[avail])
await db.execute(data)
applied = avail
if applied != current:
logger.info("Update migrations history version: %s", applied)
await db.execute("update jobs.migrations set migration=$1", applied)
else:
logger.info("No migrations applied. Your db it's at latest version")
async def main(dsn: str):
db = await asyncpg.connect(dsn=dsn)
await migrate(db)
usage = """
run it with:
job-migrations postgresql://xxx:xxxx@localhost:5432/db
"""
def run():
if len(sys.argv) != 2:
print(usage)
sys.exit(1)
setup_stdout_logging()
dsn = sys.argv[1]
asyncio.run(main(dsn))
if __name__ == "__main__":
run()
|
[
"jordic@gmail.com"
] |
jordic@gmail.com
|
88cd8b0357888139ff786f741b6c8b37d83347f8
|
85ba5ef5e07b2afc1f043448fa99922f95c4051f
|
/newproject/mainapp/migrations/0005_auto_20201031_1120.py
|
11fa3ca10c01884771561faf1f41f726f0f765b8
|
[
"Apache-2.0"
] |
permissive
|
Floou/new-django-project
|
62367a7490b162d5f17360005a90f53dcf8ff810
|
b16af7b1d75ab32a5c18c406b965e13af4a465b5
|
refs/heads/main
| 2023-01-16T05:40:52.558224
| 2020-11-24T10:22:46
| 2020-11-24T10:22:46
| 305,136,987
| 0
| 0
|
Apache-2.0
| 2020-11-26T10:33:49
| 2020-10-18T15:45:38
|
Python
|
UTF-8
|
Python
| false
| false
| 3,144
|
py
|
# Generated by Django 2.2 on 2020-10-31 06:20
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('mainapp', '0004_auto_20201030_2245'),
]
operations = [
migrations.RemoveField(
model_name='match',
name='tool',
),
migrations.AddField(
model_name='match',
name='rebounds',
field=models.IntegerField(default=0, verbose_name='Подборы'),
),
migrations.AlterField(
model_name='match',
name='block_shot',
field=models.IntegerField(default=0, verbose_name='Блокшоты'),
),
migrations.AlterField(
model_name='match',
name='broadcast',
field=models.IntegerField(default=0, verbose_name='Передачи'),
),
migrations.AlterField(
model_name='match',
name='interceptions',
field=models.IntegerField(default=0, verbose_name='Перехваты'),
),
migrations.AlterField(
model_name='match',
name='result_match',
field=models.IntegerField(default=0, verbose_name='Результат матча'),
),
migrations.AlterField(
model_name='match',
name='win_guest',
field=models.BooleanField(default=False, verbose_name='Победа гостей'),
),
migrations.AlterField(
model_name='match',
name='win_owner',
field=models.BooleanField(default=True, verbose_name='Победа хозяев'),
),
migrations.AlterField(
model_name='player',
name='name',
field=models.CharField(max_length=128, verbose_name='Имя'),
),
migrations.AlterField(
model_name='player',
name='number',
field=models.IntegerField(default=0, verbose_name='Номер'),
),
migrations.AlterField(
model_name='player',
name='position',
field=models.CharField(max_length=128, verbose_name='Позиция'),
),
migrations.AlterField(
model_name='player',
name='surname',
field=models.CharField(max_length=128, verbose_name='Фамилия'),
),
migrations.AlterField(
model_name='team',
name='defeat',
field=models.IntegerField(default=0, verbose_name='Поражения'),
),
migrations.AlterField(
model_name='team',
name='name_team',
field=models.CharField(max_length=128, verbose_name='Команда'),
),
migrations.AlterField(
model_name='team',
name='win',
field=models.IntegerField(default=0, verbose_name='Победы'),
),
migrations.AlterField(
model_name='trainer',
name='surname',
field=models.CharField(max_length=128, verbose_name='Фамилия'),
),
]
|
[
"holygalaxy444@gmail.com"
] |
holygalaxy444@gmail.com
|
eec8945dbcbb6005896f2f79e0af6bfef503477a
|
7d73f11e797e1cc5eaed09a49c0e5fa3fc505242
|
/sess/Kwant.py
|
650ca558dfa043b72ca17500dd48fffb51dd06ff
|
[] |
no_license
|
philetus/heks
|
2596a702804c12b7b07978f5aa3be60cf9fd66c5
|
6844c188b4602e59e11dfa240b0b31505bb32f97
|
refs/heads/master
| 2021-01-10T20:07:47.361066
| 2013-12-18T21:04:20
| 2013-12-18T21:04:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 424
|
py
|
from Gleff_s import Gleff_s
class Kwant:
"""container for one or more leeff nods representing a quantity
"""
def __init__(self, rent):
self.rent = rent
self.ked_s = []
self.esy_leeff = False
self.trgr_d = False
# default and acceptable children
self.trgr_ked = Gleff_s.d # dasyo
self.kan_hasy = set([Gleff_s.t, Gleff_s.d, Gleff_s.f])
|
[
"philetus@gmail.com"
] |
philetus@gmail.com
|
b77fb5f005efdd5c4f2ca65558fcaf8e4c8faa64
|
c09f257d720201e46f78cd2b6a8341cb6068b18f
|
/admtooCore/migrations/0010_userclass_group.py
|
7cbc3e80291e87303663c0961bb2ebfdff3c356b
|
[] |
no_license
|
sxpert/admlabo
|
53010e37c4af19dbed8e4e54ecf44314c8978ddd
|
08c331087febce50a564da6b27dcd778c1626353
|
refs/heads/master
| 2021-01-11T06:52:45.883845
| 2017-06-01T11:57:45
| 2017-06-01T11:57:45
| 71,981,404
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 469
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('admtooCore', '0009_newuser_os_lang'),
]
operations = [
migrations.AddField(
model_name='userclass',
name='group',
field=models.ForeignKey(blank=True, to='admtooCore.Group', null=True),
preserve_default=True,
),
]
|
[
"raphael.jacquot@obs.ujf-grenoble.fr"
] |
raphael.jacquot@obs.ujf-grenoble.fr
|
174155d348b686c6c3722940f778241c84ea97ad
|
1a824a3e0a1d9969686f9d7783ba6b652d813e89
|
/main.py
|
160116be674b7171301545c2f68b3400f48f5ef1
|
[
"MIT"
] |
permissive
|
murilocg/ActivityAnalysisOfCodeReview
|
282db5675fded29f96118cd605e6af86d28cfd6f
|
ad0f14bf93838e7ec4e6204a6361310c119c4bb0
|
refs/heads/main
| 2023-03-29T22:53:26.159007
| 2021-04-11T02:24:14
| 2021-04-11T02:24:14
| 354,158,002
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,316
|
py
|
import app.extract_data.process_repositories as process_repositories
import app.extract_data.process_pull_requests as process_pull_requests
import app.report.create_report as create_report
import app.filter_data.filter_pull_requests as filter_pull_requests
import os
def main(repo_first, repo_limit, pr_first, token):
if not os.path.exists('tmp'):
os.mkdir('tmp')
process_repositories.start(repo_first, repo_limit, token)
process_pull_requests.start(repo_limit, pr_first, token)
filter_pull_requests.start(repo_limit)
create_report.start(repo_limit)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description='Create a ArcHydro schema')
parser.add_argument('--t', metavar='path', required=True, help='Authentication token to Gihtub API')
parser.add_argument('--rf', metavar='path', required=True, help='How many repositories retrieve per query')
parser.add_argument('--rl', metavar='path', required=True, help='Maximum number of repositories that will be analyzed')
parser.add_argument('--pf', metavar='path', required=True, help='How many pull requests retrieve per query')
args = parser.parse_args()
main(repo_first=int(args.rf.strip()), repo_limit=int(args.rl.strip()), pr_first=int(args.pf.strip()), token=args.t.strip())
|
[
"murilo.costa@dtidigital.com.br"
] |
murilo.costa@dtidigital.com.br
|
f80e0eb67f0790a4fdf274aeb6c73eb6e9eec19b
|
cdc996370837c00003296556afdb33e2f2fee884
|
/devel_scripts/launcher.py
|
5237995d7e1aaac822ae3a4d546bf7b117644b25
|
[] |
no_license
|
capitaneanu/borunte_robot
|
1d4f14aadb2aa9e041ea0fdccc85d424cf155fb2
|
85e8765cbfae879f297c5254733a2dea48daeba0
|
refs/heads/master
| 2022-09-15T03:09:14.062484
| 2020-05-20T17:39:01
| 2020-05-20T17:39:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,218
|
py
|
# -*- coding: utf-8 -*-
import os
import shlex
import sys
import time
TIMEOUT = 1.0
processes = [] # type: List[sh.RunningCommand]
class ProgramTerminated(Exception):
pass
def check_pid(pid):
""" Check For the existence of a unix pid. """
try:
os.kill(pid, 0)
except OSError:
return False
else:
return True
def start_process(command, line):
"""
:type command: sh.Command
"""
processes.append(
command(shlex.split(line), _out=sys.stdout, _err=sys.stderr, _bg=True)
)
time.sleep(TIMEOUT)
def terminate_processes():
for process in processes:
if process is None:
continue
try:
process.terminate()
except OSError:
pass
process.wait()
def check_processes():
for process in processes:
if process is None:
continue
if not check_pid(process.pid):
raise ProgramTerminated()
def wait_loop():
try:
while True:
check_processes()
time.sleep(TIMEOUT)
except KeyboardInterrupt:
pass
except ProgramTerminated:
print('A program terminated, stopping other processes.')
|
[
"mail@roessler.systems"
] |
mail@roessler.systems
|
1fcab9c53a7ede0ecb7dfb6ee6e2ec6b84a8d078
|
0f44be0680ccbc2f8f96abfe97f5d1a094cd6e98
|
/erokov.py
|
494d719d07af47089ded91dc77709f24a452c75e
|
[] |
no_license
|
kimihito/erokov
|
f75bf3199531af17a700dac854f00df19b59d3c1
|
32390edbade3d84f8be87367654ff1f6c229ca62
|
refs/heads/master
| 2016-09-05T23:20:41.926046
| 2012-08-27T10:33:11
| 2012-08-27T10:33:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,019
|
py
|
#!/usr/bin/env python
# coding: utf-8
#AVのタイトルをマルコフ連鎖で作るスクリプトを書いてます
import random
import MeCab
def wakati(text):
t = MeCab.Tagger("-Owakati")
m = t.parse(text)
result = m.split(" ")
return result
if __name__ == "__main__":
filename = "title_sort_uniq.txt"
wordlist = []
src = open(filename,"r").read().split("\n")
for tmpsrc in src:
wordlist += wakati(tmpsrc)
erokov = {}
w1 = ""
w2 = ""
for word in wordlist:
if w1 and w2:
if (w1,w2) not in erokov:
erokov[(w1,w2)] = []
erokov[(w1,w2)].append(word)
w1,w2 = w2, word
count = 0
sentence = ""
w1,w2 = random.choice(erokov.keys())
while count < 11:
try:
tmp = random.choice(erokov[(w1,w2)])
sentence += tmp
w1,w2 = w2, tmp
count += 1
except KeyError:
print "Error!"
pass
print sentence
|
[
"tatsurotamashiro@gmail.com"
] |
tatsurotamashiro@gmail.com
|
0b95e3e1bbb3f55d15b1c6c094ba74b27204d4e7
|
1a5bdc323b1d13022cabdfe2519207b95d152448
|
/h_computation.py
|
1fcbb6e7a29abb6a1d305c3b39926951a763200f
|
[] |
no_license
|
sampathgunasena/hydrogen_computation
|
04467b83720970afb4aaad986b7b67e2104cdcea
|
b45dea4d32942a8f016f6a10086a540fcc26d371
|
refs/heads/master
| 2023-01-24T09:58:16.361907
| 2020-12-11T16:32:14
| 2020-12-11T16:32:14
| 320,627,897
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,160
|
py
|
########################################################################################
#
#script that solves schrodinger equation via generalised eigen value decomposition method
#
#######################################################################################
import numpy as np
from scipy import linalg
import math
from matplotlib import pyplot as plt
# We will use atomic units throughout the code
alpha = np.array([13.00773, 1.962079, 0.444529, 0.1219492])
#alpha=np.random.rand(4)
#creating multi-dim arrays to hold values for overlap, kinetic, potential matrices
S = np.zeros((4,4))
T = np.zeros((4,4))
V = np.zeros((4,4))
#Compute the overlap, kinetic and potential energies matrices
for i in range(len(alpha)):
for j in range(len(alpha)):
S[i,j] = (math.pi/(alpha[i]+alpha[j]))**(3./2.)
T[i,j] = 3.*(alpha[i]*alpha[j]*math.pi**(3./2.))/(alpha[i]+alpha[j])**(5./2.)
V[i,j] = -2.*math.pi/(alpha[i]+alpha[j])
#creating Hamiltonain matrix
H = T + V
#Solve the generalized eigenvalue problem
# eval and vec are arrays with eigenvuales and eigenvectors
# The eigenvualue corresponding to val[i] is the column vec[:,i].
# eigen value => energy, corresponding eigen vector => c coefficient
val, vec = linalg.eig(H,S)
print("Eigenvalues: ", val.real)
print("Eigenvectors: ", vec.real)
# Print the ground state energy
print("Ground State Energy: ", val.real.min())
# Index of the ground state eigenvalue and eigenvector
imin = val.real.argmin()
# Ground state eigenvector, i.e. the gaussian coefficients of the ground state wavefunction
vec_ground = np.atleast_1d(vec[:,imin])
print("Ground State Eigenvector: ", vec_ground)
#Normalize the ground state wavefunction.
norm = 0.0
for i in range(len(alpha)):
for j in range(len(alpha)):
norm = norm + vec_ground[i] * vec_ground[j] * S[i,j]
vec_ground = vec_ground / math.sqrt(norm)
print("Normalized eigen vector: ", vec_ground)
#Plot numerical wavefucntion, exact solution, and the individual gaussians.
x = np.linspace(0, 5, 100)
fig = plt.figure()
ax1 = fig.add_subplot(1,1,1)
ax1.plot(x, 1/math.sqrt(np.pi)*np.exp(-x), linewidth=2, color='red', label='Exact')
ax1.plot(x, abs(vec_ground[0]*np.exp(-1.*alpha[0]*x*x) + vec_ground[1]*np.exp(-1.*alpha[1]*x*x) + vec_ground[2]*np.exp(-1.*alpha[2]*x*x) + vec_ground[3]*np.exp(-1.*alpha[3]*x*x)), linewidth=2, color='blue', label='Computational')
ax1.plot(x, abs(vec_ground[0]*np.exp(-1.*alpha[0]*x*x)), linewidth=1, color='black', label=r"$c_1 exp(-\alpha_1r^2)$")
ax1.plot(x, abs(vec_ground[1]*np.exp(-1.*alpha[1]*x*x)), linewidth=1, color='black', label=r"$c_2 exp(-\alpha_2r^2)$")
ax1.plot(x, abs(vec_ground[2]*np.exp(-1.*alpha[2]*x*x)), linewidth=1, color='black', label=r"$c_3 exp(-\alpha_3r^2)$")
ax1.plot(x, abs(vec_ground[3]*np.exp(-1.*alpha[3]*x*x)), linewidth=1, color='black', label=r"$c_4 exp(-\alpha_4r^2)$")
plt.title('Ground State Energy: %.6f hartree' % val.real.min())
plt.xlabel('r (bohr)')
plt.ylabel('1/bohr^3')
plt.legend(loc='upper right')
ax1.text(0.18, 0.6, r"$\Psi = \sum_i c_i exp(-\alpha_ir^2)$", transform=ax1.transAxes, fontsize=14,
verticalalignment='top')
plt.show()
|
[
"sampathprasadg@gmail.com"
] |
sampathprasadg@gmail.com
|
624e6493ba366cde8a495ba0effb21374417bbd1
|
4d0213e588149b9fa86fbe35faea8657052d9254
|
/setup.py
|
27b1a3f4ad3e7c71aeb236803df30c35aed1ff6d
|
[
"Apache-2.0"
] |
permissive
|
Pyligent/gen-efficientnet-pytorch
|
1e492dec87fa33458e452472c65ed0f7afd1a876
|
b3bc163478737924f508978a6f0c96e07046e025
|
refs/heads/master
| 2020-12-14T15:51:36.930259
| 2019-10-30T22:31:10
| 2019-10-30T22:31:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,688
|
py
|
""" Setup
"""
from setuptools import setup, find_packages
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
exec(open('geffnet/version.py').read())
setup(
name='geffnet',
version=__version__,
description='(Generic) EfficientNets for PyTorch',
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/rwightman/gen-efficientnet-pytorch',
author='Ross Wightman',
author_email='hello@rwightman.com',
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
],
# Note that this is a string of words separated by whitespace, not a list.
keywords='pytorch pretrained models efficientnet mixnet mobilenetv3 mnasnet',
packages=find_packages(exclude=['data']),
install_requires=['torch >= 1.1', 'torchvision'],
python_requires='>=3.6',
)
|
[
"rwightman@gmail.com"
] |
rwightman@gmail.com
|
1a936a9e7953688d553a7624af7c366260d7accf
|
e9946f265bf832d94f4af6e2315a51a2bba30e30
|
/node_modules/mongoose/node_modules/mongodb/node_modules/kerberos/build/config.gypi
|
3fcf9d17ffc9169de57e8f73a00108318176f919
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
scharton/swagwise-skeleton
|
7882806bc0db1f548c02680e570391f9824a95e0
|
1d50baab1215825b39980f4c4260ed028ca20cff
|
refs/heads/master
| 2021-01-21T00:35:51.575782
| 2014-07-31T04:07:05
| 2014-07-31T04:07:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,031
|
gypi
|
# Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": []
},
"variables": {
"clang": 1,
"host_arch": "x64",
"node_install_npm": "true",
"node_prefix": "",
"node_shared_cares": "false",
"node_shared_http_parser": "false",
"node_shared_libuv": "false",
"node_shared_openssl": "false",
"node_shared_v8": "false",
"node_shared_zlib": "false",
"node_tag": "",
"node_unsafe_optimizations": 0,
"node_use_dtrace": "true",
"node_use_etw": "false",
"node_use_openssl": "true",
"node_use_perfctr": "false",
"python": "/usr/bin/python",
"target_arch": "x64",
"v8_enable_gdbjit": 0,
"v8_no_strict_aliasing": 1,
"v8_use_snapshot": "false",
"nodedir": "/var/root/.node-gyp/0.10.29",
"copy_dev_lib": "true",
"standalone_static_library": 1,
"save_dev": "",
"browser": "",
"viewer": "man",
"rollback": "true",
"usage": "",
"globalignorefile": "/usr/local/etc/npmignore",
"init_author_url": "",
"shell": "/bin/sh",
"parseable": "",
"shrinkwrap": "true",
"email": "",
"init_license": "ISC",
"cache_max": "Infinity",
"init_author_email": "",
"sign_git_tag": "",
"cert": "",
"git_tag_version": "true",
"local_address": "",
"long": "",
"registry": "https://registry.npmjs.org/",
"fetch_retries": "2",
"npat": "",
"key": "",
"message": "%s",
"versions": "",
"globalconfig": "/usr/local/etc/npmrc",
"always_auth": "",
"spin": "true",
"cache_lock_retries": "10",
"heading": "npm",
"fetch_retry_mintimeout": "10000",
"proprietary_attribs": "true",
"json": "",
"description": "true",
"engine_strict": "",
"https_proxy": "",
"init_module": "/var/root/.npm-init.js",
"userconfig": "/var/root/.npmrc",
"node_version": "v0.10.29",
"user": "",
"save": "true",
"editor": "vi",
"tag": "latest",
"global": "",
"optional": "true",
"username": "",
"bin_links": "true",
"force": "",
"searchopts": "",
"depth": "Infinity",
"rebuild_bundle": "true",
"searchsort": "name",
"unicode": "true",
"fetch_retry_maxtimeout": "60000",
"ca": "",
"save_prefix": "^",
"strict_ssl": "true",
"dev": "",
"fetch_retry_factor": "10",
"group": "",
"save_exact": "",
"cache_lock_stale": "60000",
"version": "",
"cache_min": "10",
"cache": "/var/root/.npm",
"searchexclude": "",
"color": "true",
"save_optional": "",
"user_agent": "npm/1.4.14 node/v0.10.29 darwin x64",
"ignore_scripts": "",
"cache_lock_wait": "10000",
"production": "",
"save_bundle": "",
"umask": "18",
"git": "git",
"init_author_name": "",
"onload_script": "",
"tmp": "/var/root/tmp",
"unsafe_perm": "",
"link": "",
"prefix": "/usr/local"
}
}
|
[
"scharton@gmail.com"
] |
scharton@gmail.com
|
85f5723a1b5f6ace5827d42b8a4f999504cb1d52
|
92e3840c3b799a8dffffa2aa5fc3ee9c4c3e3bea
|
/src/apps/video_tags/classification/en_vtag_process.py
|
58b8abccecb8387790fdc8ab9f5d4b6aab2f13de
|
[] |
no_license
|
ZouJoshua/nlp_server
|
a5c1de32b1fcce769fd70af71425897f0dd03abf
|
ef53a3dc5856aff5e6ba8ad449f0b21962acbd80
|
refs/heads/master
| 2022-12-05T14:51:45.816542
| 2019-09-19T10:32:54
| 2019-09-19T10:32:54
| 173,694,625
| 0
| 0
| null | 2022-11-21T21:38:00
| 2019-03-04T07:24:12
|
Python
|
UTF-8
|
Python
| false
| false
| 19,874
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Author : Joshua
@Time : 2019/3/4 12:26
@File : predict.py
@Desc : 印度英语视频tag处理
"""
from pyquery import PyQuery
import re
import logging
import json
import requests
from nltk import ne_chunk, pos_tag, word_tokenize
from nltk.tree import Tree
class EnProcess(object):
def __init__(self, vtag2kwline, fix2list, word2fix, kw2vtag, stopwords, logger=None):
self.fix2list = fix2list
self.vtag2kwline = vtag2kwline
self.kw2vtag = kw2vtag
self.stopwords = stopwords
self.word2fix = word2fix
if logger:
self.log = logger
else:
self.log = logging.getLogger("nlp_v_tags_process")
self.log.setLevel(logging.INFO)
def get_cleaned_tags(self, title, taglist):
self.log.info("Processing en video tag of taglist:【{}】".format(taglist))
newtaglist = []
# 保留tf>=5的tag
resultdict = {}
oldtagdict = {}
title_lower = title.lower()
tmp_title = ''
tmp_title2 = ''
old_tagdeleteset = set()
for vtag in taglist:
vtag = vtag.lower()
token = vtag.split(' ')
if len(token) == 1:
tmp_title2 += vtag + ' '
if title_lower.find(tmp_title2.strip()) >= 0:
tmp_title = tmp_title2
old_tagdeleteset.add(vtag)
continue
else:
break
else:
break
taglist2 = []
if tmp_title != '' and len(tmp_title.strip().split(' ')) >= 2:
# print(title_lower)
# print(tmp_title.strip())
for vtag in taglist:
vtag = vtag.lower()
if vtag not in old_tagdeleteset:
taglist2.append(vtag)
else:
taglist2 = taglist
taglist = taglist2
for vtag in taglist:
vtag = vtag.lower()
if vtag.endswith('video') or vtag.endswith('song') or vtag.endswith('movie') or vtag.endswith('show'):
vtag = vtag + 's'
if vtag not in oldtagdict:
oldtagdict[vtag] = 1
else:
oldtagdict[vtag] += 1
vtag2, cresultdict, details = self.trim_vtag(vtag)
# print(title)
# print(vtag+'==>'+'#'.join(vtag2))
# for debug_word in details:
# print('\t'+debug_word)
for k, v in cresultdict.items():
if k not in resultdict:
resultdict[k] = v
else:
resultdict[k].extend(v)
newtaglist.extend(vtag2)
# newtaglist process
x2list = []
x2dict = {}
mergetaglist = []
mergetagdict = {}
tmp_title = tmp_title.strip()
if tmp_title != '' and len(tmp_title.split(' ')) >= 2:
if tmp_title not in mergetagdict:
mergetaglist.append((tmp_title, 30, 'onegramemerge'))
mergetagdict[tmp_title] = 'onegramemerge'
for ntag in newtaglist:
ntag = ntag.strip()
if ntag != '' and ntag not in self.fix2list:
if ntag not in x2dict:
x2dict[ntag] = 1
else:
x2dict[ntag] += 1
x2list.append(ntag)
# step0:title split
pattern1 = r'(\||\-\s{1}|\s{1}\-|\(|\)|\?|!|–\s{1}|\s{1}–|│|' \
r'\"|\'\s{1}|\s{1}\'|‘\s{1}|\s{1}‘|’\s{1}|\s{1}’|:|\s{1}\[|\]\s{1}|~|\/\s{1}|\s{1}\/|🔴|•)'
res = re.compile(pattern1, flags=0)
title2 = res.sub("#", title.lower())
for trunk in title2.split('#'):
trunk = trunk.strip()
if trunk == '': continue
ntaglist = []
foundit = 0
if trunk in self.vtag2kwline:
if self.vtag2kwline[trunk][4] == 0 and self.vtag2kwline[trunk][0] >= 2:
ntaglist.append(trunk)
foundit = 1
if foundit == 0 and trunk in self.kw2vtag:
tagset = self.kw2vtag[trunk]
for ntag in tagset:
if ntag in self.vtag2kwline:
if self.vtag2kwline[ntag][4] == 0 and self.vtag2kwline[ntag][0] >= 2:
ntaglist.append(ntag)
for xtag in ntaglist:
if xtag not in mergetagdict:
mergetaglist.append((xtag, 25, 'trunk'))
mergetagdict[xtag] = 'trunk'
# if trunk in title_split_tag and trunk not in mergetagdict:
# trunkres = title_split_tag[trunk]
# mergetaglist.append((trunkres, 25, 'trunk'))
# mergetagdict[trunkres] = 'trunk'
# step1:
for k, v in x2dict.items():
if v >= 2 and k not in mergetagdict:
mergetaglist.append((k, 10 * v, 'tf>=2'))
mergetagdict[k] = 'tf>=2'
# step2:
step2_dict = {}
for x in x2list:
for y in x2list:
if len(x) < len(y) and x in oldtagdict and (y.startswith(x + ' ') or y.endswith(' ' + x)):
if x not in step2_dict:
step2_dict[x] = 1 + len(x.split(' '))
else:
step2_dict[x] += 1 + len(x.split(' '))
sortedtstep2_dict = sorted(step2_dict.items(), key=lambda k: k[1], reverse=True)
for k, v in sortedtstep2_dict:
if v >= 3:
if k not in mergetagdict:
mergetagdict[k] = 'fix'
mergetaglist.append((k, v, 'fix'))
# stpe3: x2list 剩下的
step3dict = {}
for k in x2list:
ff = 0
if k in self.vtag2kwline:
ff = 1
elif title.lower().strip().startswith(k) or title.lower().strip().endswith(k):
ff = 1
else:
pass
if ff == 0: continue
if k not in step3dict:
step3dict[k] = ff
else:
step3dict[k] += ff
sortedtstep3_dict = sorted(step3dict.items(), key=lambda k: k[1], reverse=True)
for k, v in sortedtstep3_dict:
if k not in mergetagdict:
mergetagdict[k] = 'x2'
if len(mergetaglist) < 7:
mergetaglist.append((k, v, 'x2'))
# step4: type period lang
for k, vlist in resultdict.items():
max_dict = {}
for v in vlist:
v = v.strip()
if v not in max_dict:
max_dict[v] = 1
else:
max_dict[v] += 1
sortedmax_dict = sorted(max_dict.items(), key=lambda k: k[1], reverse=True)
if k == 'period':
for kk, vv in sortedmax_dict:
if kk not in ['best', 'top', 'latest', 'updates', 'today', 'new']:
ptag = kk
if ptag != '' and ptag not in mergetagdict:
mergetagdict[ptag] = 'ptag'
mergetaglist.append(('p_' + ptag, 0.5, 'ptag'))
break
if k == 'lang':
for kk, vv in sortedmax_dict:
ltag = kk
if ltag != '' and ltag not in mergetagdict:
mergetagdict[ltag] = 'ltag'
mergetaglist.append(('l_' + ltag, 0.5, 'ltag'))
break
if k == 'type':
if len(sortedmax_dict) > 0:
cc_tag = sortedmax_dict[0][0]
if cc_tag != '' and cc_tag not in mergetagdict:
mergetagdict[cc_tag] = 'ttag'
mergetaglist.append((cc_tag, 0.5, 'ttag'))
for kk, vv in sortedmax_dict:
if len(kk.split(' ')) >= 2:
if kk != '' and kk not in mergetagdict:
mergetagdict[kk] = 'ttag'
mergetaglist.append((kk, 0.5, 'ttag'))
return [item[0] for item in mergetaglist]
def extract_tag(self, title, text):
self.log.info("extracting tags from title and text...")
mergetaglist = []
mergetagdict = {}
lasttaglist = []
pattern1 = r'(\||\-\s{1}|\s{1}\-|\(|\)|\?|!|–\s{1}|\s{1}–|│|' \
r'\"|\'\s{1}|\s{1}\'|‘\s{1}|\s{1}‘|’\s{1}|\s{1}’|:|\s{1}\[|\]\s{1}|~|\/\s{1}|\s{1}\/|🔴|•)'
res = re.compile(pattern1, flags=0)
title2 = res.sub("#", title)
title2_lower = title2.lower()
if text.startswith(title):
text = text[len(title):]
text2 = text.replace('\\n', ' #')
pattern_http = re.compile(r'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+')
pattern_replace = re.compile(r'(▬|=)')
text2 = pattern_http.sub("#", text2)
text2 = pattern_replace.sub("#", text2)
text2_lower = text2.lower()
text2_ner_list = self.get_continuous_chunks(text2)
debug_list1 = []
debug_list2 = []
title_nerlist = []
for title_trunk in title2.split('#'):
title_trunk = title_trunk.strip()
title_trunk_lower = title_trunk.lower()
if title_trunk == '': continue
if text2_lower.find(title_trunk_lower) >= 0 and title_trunk != title2:
debug_list1.append(title_trunk_lower)
if title_trunk_lower in self.vtag2kwline:
if title_trunk_lower not in mergetagdict:
mergetaglist.append([title_trunk_lower, 'title_trunk_vtag'])
mergetagdict[title_trunk_lower] = None
elif title_trunk_lower in self.kw2vtag:
for vtag in self.kw2vtag[title_trunk_lower]:
if vtag not in mergetagdict:
mergetaglist.append([title_trunk_lower, 'title_trunk_kw'])
mergetagdict[title_trunk_lower] = None
# debug_list2.append(title_trunk_lower)
title_trunk_list = self.get_continuous_chunks(title_trunk)
title_nerlist.extend(title_trunk_list)
tfdict = {}
for trunk in title_nerlist:
trunk_lower = trunk.lower()
if trunk_lower == '': continue
if trunk_lower in self.stopwords: continue
n = len(trunk_lower.split(' '))
x = 1.5
if n >= 2:
x = 2
if trunk_lower not in tfdict:
tfdict[trunk_lower] = x
else:
tfdict[trunk_lower] += x
for trunk in text2_ner_list:
trunk_lower = trunk.lower()
if trunk_lower in self.stopwords: continue
if trunk_lower == '': continue
if trunk_lower not in tfdict:
tfdict[trunk_lower] = 1
else:
tfdict[trunk_lower] += 1
sorted_tfdict = sorted(tfdict.items(), key=lambda k: k[1], reverse=True)
sorted_tfdict2 = [x for x in sorted_tfdict if x[1] >= 2]
for c_tag, c_tf in sorted_tfdict2:
if c_tag in self.vtag2kwline or len(c_tag.split(' ')) >= 2:
if c_tag not in mergetagdict:
mergetaglist.append([c_tag, 'tf_vtag'])
mergetagdict[c_tag] = None
for i, (tag, reason) in enumerate(mergetaglist):
if i >= 5: break
lasttaglist.append(tag)
return lasttaglist
def trim_vtag(self, inputline):
# inputline = 'latest news 2019 news 2018'
inputraw = inputline
resultdict = {}
details = []
# 1. 预清洗
inputline = inputline.replace('#', ' ')
inputtoken = []
for w in inputline.split(' '):
w = w.strip()
if w != '':
inputtoken.append(w)
inputline = ' '.join(inputtoken)
details.append(inputraw + '0==>' + inputline)
# 2. 预判断:is in vtag2kwline or not
c_tag = []
if inputline in self.vtag2kwline:
c_tag = [inputline]
elif inputline in self.kw2vtag:
c_tag = list(self.kw2vtag[inputline])
if len(c_tag) >= 1:
details.append(inputline + '1==>' + '#'.join(c_tag))
return c_tag, resultdict, details
else:
pass
# 小于2元词不处理,直接返回
if len(inputtoken) < 2:
details.append(inputline + '2==>' + inputline)
return [inputline], resultdict, details
else:
pass
# 3.trim1 process: period trim 时间性单词 或修饰行状语
pattern_period = r'^top\s{1}\d.\s{1}|^best|^best of|^hit|2015|2016|2017|2018|2019|latest|updates|today| new$|new released|^new '
res_period = re.compile(pattern_period, flags=0)
res1 = res_period.sub('', inputline.strip())
res1_tokens = []
for w in res1.split(' '):
w = w.strip()
if w != '':
res1_tokens.append(w)
res1 = ' '.join(res1_tokens)
res1findall = res_period.findall(inputline.strip())
resultdict['period'] = res1findall
details.append(inputline + '3==>' + res1)
# 3. 预判断:is in vtag2kwline or not
c_tag = []
if res1 in self.vtag2kwline:
c_tag = [res1]
elif res1 in self.kw2vtag:
c_tag = list(self.kw2vtag[res1])
if len(c_tag) >= 1:
details.append(inputline + '4==>' + '#'.join(c_tag))
return c_tag, resultdict, details
else:
pass
# 小于2元词不处理,直接返回
if len(res1_tokens) < 2:
details.append(inputline + '5==>' + inputline)
return [inputline], resultdict, details
else:
pass
# 4.trim2 process: language trim
res1 = res1.replace('in english', 'english')
res1 = res1.replace('in hindi', 'hindi')
res1 = res1.replace('in hind', 'hindi')
res1 = res1.replace('in hinid', 'hindi')
res1 = res1.replace('in telugu', 'telugu')
res1 = res1.replace('in tamil', 'tamil')
res1 = res1.replace('in malayalam', 'malayalam')
res1 = res1.replace('in bhojpuri', 'bhojpuri')
res1 = res1.replace('in punjabi', 'punjabi')
res1 = res1.replace('bangla', 'bengali')
res1 = res1.replace('in bengali', 'bengali')
res1 = res1.replace('in marathi', 'marathi')
res1 = res1.replace('in kannada', 'kannada')
res1 = res1.replace('in gujarati', 'gujarati')
res1 = res1.replace('in rajasthani', 'rajasthani')
res1 = res1.replace('haryanavi', 'haryanvi')
res1 = res1.replace('in haryanvi', 'haryanvi')
res1 = res1.replace('in assamese', 'assamese')
res1 = res1.replace('in bodo', 'bodo')
res1 = res1.replace('in dogri', 'dogri')
res1 = res1.replace('in kashmiri', 'kashmiri')
res1 = res1.replace('in konkani', 'konkani')
res1 = res1.replace('in maithili', 'maithili')
res1 = res1.replace('in manipuri', 'manipuri')
res1 = res1.replace('in nepali', 'nepali')
res1 = res1.replace('in odia', 'odia')
res1 = res1.replace('in sanskrit', 'sanskrit')
res1 = res1.replace('in santali', 'santali')
res1 = res1.replace('in sindhi', 'sindhi')
res1 = res1.replace('in urdu', 'urdu')
# 4. 预判断:is in vtag2kwline or not
c_tag = []
if res1 in self.vtag2kwline:
c_tag = [res1]
elif res1 in self.kw2vtag:
c_tag = list(self.kw2vtag[res1])
if len(c_tag) >= 1:
details.append(res1 + '6==>' + '#'.join(c_tag))
return c_tag, resultdict, details
else:
pass
# 小于2元词不处理,直接返回
if len(res1.split(' ')) < 2:
details.append(res1 + '7==>' + res1)
return [res1], resultdict, details
else:
pass
pattern_lang = r'english|hindi|telugu|tamil|malayalam|' \
r'bhojpuri|punjabi|bengali|marathi|kannada|' \
r'gujarati|rajasthani|haryanvi|assamese|bodo|' \
r'dogri|kashmiri|konkani|maithili|manipuri|nepali|' \
r'odia|sanskrit|santali|sindhi|urdu|haryanavi'
res_lang = re.compile(pattern_lang, flags=0)
res2 = res_lang.sub('', res1.strip())
res2_tokens = []
for w in res2.split(' '):
w = w.strip()
if w != '':
res2_tokens.append(w)
res2 = ' '.join(res2_tokens)
if res2.endswith('video') or res2.endswith('song') or res2.endswith('movie') or res2.endswith('show'):
res2 = res2 + 's'
res2findall = res_lang.findall(res1.strip())
resultdict['lang'] = res2findall
details.append(res1 + '8==>' + res2)
# 4. 预判断:is in vtag2kwline or not
c_tag = []
if res2 in self.vtag2kwline:
c_tag = [res2]
elif res2 in self.kw2vtag:
c_tag = list(self.kw2vtag[res2])
if len(c_tag) > 1:
details.append(res2 + '9==>' + '#'.join(c_tag))
return c_tag, resultdict, details
else:
pass
# 小于等于2元词不处理,直接返回
if len(res2_tokens) < 2:
details.append(res1 + '10==>' + res1)
return [res1], resultdict, details
else:
pass
# 5.trim3 process: type
# trim2: type
word = res2
word2 = word
resultdict['type'] = []
for k, v in self.word2fix.items():
if word.find(k + ' ') >= 0 or word.find(' ' + k) >= 0 or word == k:
word2 = word.replace(k, '').strip()
resultdict['type'].append(k)
word = word2
if word2 in self.word2fix:
word2 = ''
res3_tokens = []
for x in word2.split(' '):
if x != '' and x != 's':
res3_tokens.append(x)
res3 = ' '.join(res3_tokens)
# 5. 预判断:is in vtag2kwline or not
c_tag = []
if res3 in self.vtag2kwline:
c_tag = [res3]
elif res3 in self.kw2vtag:
c_tag = list(self.kw2vtag[res3])
if len(c_tag) > 1:
details.append(res3 + '11==>' + '#'.join(c_tag))
return c_tag, resultdict, details
else:
pass
# 小于等于2元词不处理,直接返回
if len(res3_tokens) < 2:
details.append(res2 + '12==>' + res2)
return [res2], resultdict, details
else:
pass
details.append(res3 + '13==>' + res3)
return [res3], resultdict, details
def get_continuous_chunks(self, text):
chunked = ne_chunk(pos_tag(word_tokenize(text)))
continuous_chunk = []
current_chunk = []
for i in chunked:
if type(i) == Tree:
current_chunk.append(" ".join([token for token, pos in i.leaves()]))
elif current_chunk:
continuous_chunk.append(" ".join(current_chunk))
continuous_chunk.append(i[0])
current_chunk = []
else:
continuous_chunk.append(i[0])
continue
if current_chunk:
continuous_chunk.append(" ".join(current_chunk))
current_chunk = []
return continuous_chunk
|
[
"joshua_zou@163.com"
] |
joshua_zou@163.com
|
e3fccd35bcac0946969cbb7de0a9f8057ab2c8ee
|
5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d
|
/alipay/aop/api/domain/BsPlanInviteConfig.py
|
f6e1ed05d6ca34d93fd5c47f5c13a9c375717bbe
|
[
"Apache-2.0"
] |
permissive
|
alipay/alipay-sdk-python-all
|
8bd20882852ffeb70a6e929038bf88ff1d1eff1c
|
1fad300587c9e7e099747305ba9077d4cd7afde9
|
refs/heads/master
| 2023-08-27T21:35:01.778771
| 2023-08-23T07:12:26
| 2023-08-23T07:12:26
| 133,338,689
| 247
| 70
|
Apache-2.0
| 2023-04-25T04:54:02
| 2018-05-14T09:40:54
|
Python
|
UTF-8
|
Python
| false
| false
| 1,362
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class BsPlanInviteConfig(object):
def __init__(self):
self._end_time = None
self._start_time = None
@property
def end_time(self):
return self._end_time
@end_time.setter
def end_time(self, value):
self._end_time = value
@property
def start_time(self):
return self._start_time
@start_time.setter
def start_time(self, value):
self._start_time = value
def to_alipay_dict(self):
params = dict()
if self.end_time:
if hasattr(self.end_time, 'to_alipay_dict'):
params['end_time'] = self.end_time.to_alipay_dict()
else:
params['end_time'] = self.end_time
if self.start_time:
if hasattr(self.start_time, 'to_alipay_dict'):
params['start_time'] = self.start_time.to_alipay_dict()
else:
params['start_time'] = self.start_time
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = BsPlanInviteConfig()
if 'end_time' in d:
o.end_time = d['end_time']
if 'start_time' in d:
o.start_time = d['start_time']
return o
|
[
"jishupei.jsp@alibaba-inc.com"
] |
jishupei.jsp@alibaba-inc.com
|
44a5e08d9cac25512e3bde356d613603a2b8634d
|
efbaa5ed71ff8d18eb4634554ade11388eab354d
|
/pytenable-tio-agent-search.py
|
0e84276a17b3e5f4739c1ae78b1ea458ef091c5c
|
[] |
no_license
|
ThisTooShallXSS/tio_automation
|
9595f19450f97e13c125a8f620f9b52cd9c5497f
|
e1a607d1c7eea2b58b258cd37561a8a963187918
|
refs/heads/master
| 2022-09-18T12:59:31.719196
| 2022-09-07T15:36:59
| 2022-09-07T15:36:59
| 127,326,575
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,020
|
py
|
""" --------------------------------------------------------------------------------------------------------------------
PyTenable - Tenable.io Search Agent Details by Agent Name
Before you can run this, you must generate an API Key that can be used for authentication.
You can generate the API key using these steps: https://docs.tenable.com/tenableio/Content/Settings/GenerateAPIKey.htm
To install pyTenable:
pip3 install pytenable
Usage:
$ python3 pytenable-tio-agent-search.py
$ python3 pytenable-tio-agent-search.py <AgentName>
$ python3 pytenable-tio-agent-search.py Garys-Laptop
# ------------------------------------------------------------------------------------------------------------------ """
from tenable.io import TenableIO
tio = TenableIO('REPLACE_THIS_WITH_YOUR_ACCESS_KEY', 'REPLACE_THIS_WITH_YOUR_SECRET_KEY')
# Uncomment the 2 logging lines below to see ERROR or DEBUG logs in stdout while running the script.
#
#import logging
#logging.basicConfig(level=logging.ERROR)
""" --------------------------------------------------------------------------------------------------------------------
Function: find_agent
Input: The hostname or full agent name as a str()
Output: The resulting agent.iterator from pytenable for any matching agents to the name filter.
Notes: This func assumes you've only matched a single agent for the name you provided. Needs to be
expanded upon if you know there is likely to be multiple agents for a given name.
# ------------------------------------------------------------------------------------------------------------------ """
def find_agent(name):
found_agent = 0
for agent in tio.agents.list(('name', 'match', name)):
found_agent = agent
return agent
""" --------------------------------------------------------------------------------------------------------------------
Function: user_input_agent_name
Input: User input from input() in CLI, or receives "agent_name" as an argument of the script.
Output: Returns a str() of the desired agent name we're looking for.
Notes: This doesn't verify/regex to ensure proper formatting or naming conventions. Just makes sure its not null.
# ------------------------------------------------------------------------------------------------------------------ """
def user_input_agent_name():
import sys
agent_name = ""
try:
agent_name = sys.argv[1] # Try to use the arg passed in with the script.
except:
agent_name = input("Please provide your Agent name: ") # Allow user input if no args were passed in.
if len(agent_name) == 0: # If the user hits enter/doesn't provide input, we will bail after returning an invalid name.
print("This is an invalid agent name!")
return agent_name
""" --------------------------------------------------------------------------------------------------------------------
Function: get_uuid_from_tenableid
Input: Receives the cleaned up tenable_id, which is not the same as a TIO asset UUID. Uses the agent name to
verify that we found the right agent based on the tenable_id.
Output: None, this is the function that outputs to CLI the information about the agent asset.
Notes: There could be more than one asset matched with the filter for the tenable_id. This is not accounted for.
# ------------------------------------------------------------------------------------------------------------------ """
def get_uuid_from_tenableid(tenable_id, agent_name):
agents = tio.v3.explore.assets.search_host(
filter={
"and": [
{
"property": "tenable_id",
"operator": "eq",
"value": [
tenable_id
]
},
{
"property": "types",
"operator": "eq",
"value": "host"
}
]
},
limit=2, sort=[('last_observed', 'asc')])
return agents
""" --------------------------------------------------------------------------------------------------------------------
Function: search_for_agent_info
Input: None, this is our entry point.
Output: This main function outputs the data in CLI when a matching agent is found.
Notes:
# ------------------------------------------------------------------------------------------------------------------ """
def search_for_agent_info():
from pprint import pprint
tenable_id = 0 # Set this to zero, so we know if the search has failed.
agent_name = user_input_agent_name() # Retrieve the agent_name from the CLI or argv[1]
if len(agent_name) >= 3: # Adding some basic length check to ensure the name is valid before proceeding.
agent_info = find_agent(agent_name)
tenable_id = agent_info['uuid'] # The find_agent() func only returns a portion of the data we need about the agent.
# We store the "uuid" of the agent, which is really the tenable_id with hyphens.
if tenable_id == 0: # By this point, if tenable_id is still zero then the find_agent() func failed.
print("No matching agent found, or your spelling was off.")
if len(tenable_id) == 36: # The agent UUID has hyphens, while the tenable_id does not.
tenable_id = tenable_id.replace('-', '') # Remove the hyphens to convert the agent UUID to a tenable_id.
print("Agent tenable_id: ", tenable_id) # Outputs the re-formatted agent tenable_id to CLI.
agent_info = get_uuid_from_tenableid(tenable_id, agent_name) # This func returns a larger blob of asset details using the tenable_id.
for agent in agent_info: # Ideally, this only loops once because you only matched a single agent.
pprint(agent) # This pretty-prints the json data about the matching agent. POC code to show how to get to the data.
if __name__ == '__main__':
search_for_agent_info()
|
[
"dhewitt@tenable.com"
] |
dhewitt@tenable.com
|
f7a62a6bfdeea9f804803f5f08804e7de81f0233
|
6e0b343068262087c683df6104b42f39121d4237
|
/tagger.py
|
ec6b8ec73497d38319eaeb0d539429446b007bb8
|
[
"Apache-2.0"
] |
permissive
|
jay-booker/tagger
|
15a2d5fb77ac0395b40a451649e6aac2b29de485
|
db11745e7692b502ba13b07429c0877272e2b90f
|
refs/heads/master
| 2020-12-30T15:28:58.338207
| 2017-05-13T01:34:07
| 2017-05-13T01:34:07
| 91,140,040
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 915
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import os.path
from shutil import copy
from numpy import *
def taggerTest():
classList=[]
for i in range(1,10):
file_path = ur'blog/%d.txt'%i
docList=open(file_path).read() #打开文本文件
classList[i]=raw_input()
if classList[i] == 1:
dest_dir = ur'result/market'
if not os.path.isdir(dest_dir):
os.makedirs(dest_dir)
copy(file_path, dest_dir)
elif classList[i] == 2:
dest_dir = ur'result/plate'
if not os.path.isdir(dest_dir):
os.makedirs(dest_dir)
copy(file_path, dest_dir)
else:
dest_dir = ur'result/simple'
if not os.path.isdir(dest_dir):
os.makedirs(dest_dir)
copy(file_path, dest_dir)
if __name__ == '__main__':
taggerTest()
|
[
"2621356581@qq.com"
] |
2621356581@qq.com
|
6cbf68cfefcf531b80bbf4328d132845e0942789
|
aa52f9f3a4610ea80e79f975d59f96b0ceb0a964
|
/users/migrations/0003_remove_user_username.py
|
0c338afcb3c1db7fee2a82aa4c5d6ecf268d8ea9
|
[] |
no_license
|
FaisKhan/hostipalCRM
|
a57b991ff293b1147fca7276d00db51660f12ad9
|
30243ec4866172ce638c8752b74c566975b71b0f
|
refs/heads/master
| 2023-03-17T12:35:02.284249
| 2021-03-21T19:34:58
| 2021-03-21T19:34:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 326
|
py
|
# Generated by Django 3.1.7 on 2021-03-21 18:37
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('users', '0002_auto_20210105_1952'),
]
operations = [
migrations.RemoveField(
model_name='user',
name='username',
),
]
|
[
"pankajmandrai22@gmail.com"
] |
pankajmandrai22@gmail.com
|
8442d347a65050e0a2f5b3af4c5780d32da6872a
|
e1c75b98e7dd5e7e07ee6171c76366b7f0b202a7
|
/l4project/exercises_system_project/exerciser/tests.py
|
7db201187334d06150f6511115ae4a5cc4bf165c
|
[] |
no_license
|
emivulpe/weave_novo
|
f318bdf902faa58949e30ea0e3c9b263cdc38021
|
6939937538efcecc2c7e969a995eb2c63efba93b
|
refs/heads/master
| 2021-03-12T22:18:48.868267
| 2015-03-27T17:08:51
| 2015-03-27T17:08:51
| 32,998,746
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 27,133
|
py
|
from django.test import TestCase, Client
from exerciser.models import Application, User, Teacher, Step, Group, AcademicYear, Student, Question, Option
from django.utils import timezone
from django.core.urlresolvers import reverse
from django.conf import settings
from django.utils.importlib import import_module
#imports for views
from django.core.urlresolvers import reverse
"""
# models test
class ApplicationTest(TestCase):
def test_application_creation(self):
app = Application.objects.get_or_create(name = 'test app')[0]
self.assertTrue(isinstance(app, Application))
self.assertEqual(app.__unicode__(), app.name)
class IndexViewTests(TestCase):
def test_index_view_with_no_applications(self):
response = self.client.get(reverse('index'))
self.assertEqual(response.status_code, 200)
self.assertQuerysetEqual(response.context['applications'], [])
def test_index_view_with_applications(self):
response = self.client.get(reverse('index'))
app = Application.objects.get_or_create(name = 'test app')[0]
self.assertEqual(response.status_code, 200)
self.assertEqual((response.context['applications'] >= 0), True)
class LogInfoDbTests(TestCase):
def setUp(self):
# Setup Test User
user = User.objects.create_user(
username='test user',
password='password'
)
teacher = Teacher.objects.get_or_create(user = user)[0]
app = Application.objects.get_or_create(name = 'test app')[0]
step = Step.objects.get_or_create(application = app, order = 1)[0]
year = AcademicYear.objects.get_or_create(start = 2014)[0]
group = Group.objects.get_or_create(teacher = teacher, academic_year = year, name = 'test group')[0]
student = Student.objects.get_or_create(teacher=teacher,group=group,student_id = 'test student')[0]
def test_log_info_db_valid(self):
c = Client()
c.login(username='test user',password='password')
engine = import_module(settings.SESSION_ENGINE)
store = engine.SessionStore()
store.save() # we need to make load() work, or the cookie isworthless
c.cookies[settings.SESSION_COOKIE_NAME] = store.session_key
session = c.session
session.update({'teacher': 'test user', 'year':2014, 'group':'test group', 'student': 'test student'})
session.save()
response = c.post(reverse('log_info_db'), {'time': 20, 'step': 1, 'direction' : 'next', 'example_name':'test app'})
self.assertEqual(response.status_code, 200)
def test_log_info_db_invalid_data(self):
c = Client()
c.login(username='test user',password='password')
engine = import_module(settings.SESSION_ENGINE)
store = engine.SessionStore()
store.save() # we need to make load() work, or the cookie isworthless
c.cookies[settings.SESSION_COOKIE_NAME] = store.session_key
session = c.session
session.update({'teacher': 'test user', 'group' : 'test group', 'year':2014})
session.save()
response = c.post(reverse('log_info_db'), {'time': 20, 'step': 1, 'direction' : 'back', 'example_name':'invalid app'})
self.assertEqual(response.status_code, 200)
def test_log_info_db_invalid_key(self):
c = Client()
c.login(username='test user',password='password')
engine = import_module(settings.SESSION_ENGINE)
store = engine.SessionStore()
store.save() # we need to make load() work, or the cookie isworthless
c.cookies[settings.SESSION_COOKIE_NAME] = store.session_key
session = c.session
session.update({'teacher': 'test user'})
session.save()
response = c.post(reverse('log_info_db'), {'invalid key': 20, 'step': 1, 'direction' : 'next', 'example_name':'test app'})
self.assertEqual(response.status_code, 200)
class LogQuestionInfoDbTests(TestCase):
def setUp(self):
# Setup Test User
user = User.objects.create_user(
username='test user',
password='password'
)
teacher = Teacher.objects.get_or_create(user = user)[0]
app = Application.objects.get_or_create(name = 'test app')[0]
step = Step.objects.get_or_create(application = app, order = 1)[0]
year = AcademicYear.objects.get_or_create(start = 2014)[0]
group = Group.objects.get_or_create(teacher = teacher, academic_year = year, name = 'test group')[0]
student = Student.objects.get_or_create(teacher=teacher,group=group,student_id = 'test student')[0]
question = Question.objects.get_or_create(application = app, step = step, question_text = 'test question')[0]
option = Option.objects.get_or_create(question = question, number = 1, content = 'test option')[0]
def test_log_question_info_db_valid(self):
c = Client()
c.login(username='test user',password='password')
engine = import_module(settings.SESSION_ENGINE)
store = engine.SessionStore()
store.save() # we need to make load() work, or the cookie isworthless
c.cookies[settings.SESSION_COOKIE_NAME] = store.session_key
session = c.session
session.update({'teacher': 'test user', 'year':2014, 'group':'test group', 'student': 'test student'})
session.save()
response = c.post(reverse('log_question_info_db'), {'time': 20, 'step': 1, 'direction' : 'next', 'example_name':'test app','answer':'test option','multiple_choice':'true'})
self.assertEqual(response.status_code, 200)
def test_log_question_info_db_invalid_data(self):
c = Client()
c.login(username='test user',password='password')
engine = import_module(settings.SESSION_ENGINE)
store = engine.SessionStore()
store.save()
c.cookies[settings.SESSION_COOKIE_NAME] = store.session_key
session = c.session
session.update({'teacher': 'test user', 'group' : 'test group', 'year':2014})
session.save()
response = c.post(reverse('log_question_info_db'), {'time': 20, 'step': 1, 'direction' : 'next', 'example_name':'invalid app','answer':'test option','multiple_choice':'true'})
self.assertEqual(response.status_code, 200)
def test_log_question_info_db_invalid_option(self):
c = Client()
c.login(username='test user',password='password')
engine = import_module(settings.SESSION_ENGINE)
store = engine.SessionStore()
store.save()
c.cookies[settings.SESSION_COOKIE_NAME] = store.session_key
session = c.session
session.update({'teacher': 'test user', 'group' : 'test group', 'year':2014})
session.save()
response = c.post(reverse('log_question_info_db'), {'time': 20, 'step': 1, 'direction' : 'next', 'example_name':'test app','answer':'invalid option','multiple_choice':'true'})
self.assertEqual(response.status_code, 200)
def test_log_question_info_db_invalid_key(self):
c = Client()
c.login(username='test user',password='password')
engine = import_module(settings.SESSION_ENGINE)
store = engine.SessionStore()
store.save() # we need to make load() work, or the cookie isworthless
c.cookies[settings.SESSION_COOKIE_NAME] = store.session_key
session = c.session
session.update({'teacher': 'test user'})
session.save()
response = c.post(reverse('log_question_info_db'), {'invalid key': 20, 'step': 1, 'direction' : 'next', 'example_name':'test app','answer':'test option','multiple_choice':'true'})
self.assertEqual(response.status_code, 200)
class StudentGroupListTests(TestCase):
def setUp(self):
# Setup Test User
user = User.objects.create_user(
username='test user',
password='password'
)
teacher = Teacher.objects.get_or_create(user = user)[0]
year = AcademicYear.objects.get_or_create(start = 2014)[0]
group = Group.objects.get_or_create(teacher = teacher, academic_year = year, name = 'test group')[0]
def test_student_group_list_valid(self):
c = Client()
response = c.get(reverse('student_group_list'), {'teacher': 'test user', 'year':2014, 'group':'test group'})
self.assertEqual(response.status_code, 200)
def test_student_group_list_invalid_data(self):
c = Client()
response = c.get(reverse('student_group_list'), {'teacher': 'invalid user', 'year':2014, 'group':'test group'})
self.assertEqual(response.status_code, 200)
def test_student_group_list_invalid_key(self):
c = Client()
response = c.get(reverse('student_group_list'), {'invalid': 'test user', 'year':2014, 'group':'test group'})
self.assertEqual(response.status_code, 200)
class CreateGroupTests(TestCase):
def setUp(self):
# Setup Test User
user = User.objects.create_user(
username='test user',
password='password'
)
teacher = Teacher.objects.get_or_create(user = user)[0]
year = AcademicYear.objects.get_or_create(start = 2014)[0]
group = Group.objects.get_or_create(teacher = teacher, academic_year = year, name = 'test group')[0]
def test_create_group_valid(self):
c = Client()
response = c.post(reverse('create_group'), {'teacher': 'test user', 'year':2014, 'group':'new group','num_students':10})
self.assertEqual(response.status_code, 200)
def test_create_group_invalid_data(self):
c = Client()
response = c.post(reverse('create_group'), {'teacher': 'invalid user', 'year':2014, 'group':'test group','num_students':10})
self.assertEqual(response.status_code, 200)
def test_create_group_invalid_key(self):
c = Client()
response = c.post(reverse('create_group'), {'invalid keys': 'test user', 'year':2014, 'group':'test group','num_students':10})
self.assertEqual(response.status_code, 200)
class DeleteGroupTests(TestCase):
def setUp(self):
# Setup Test User
user = User.objects.create_user(
username='test user',
password='password'
)
teacher = Teacher.objects.get_or_create(user = user)[0]
year = AcademicYear.objects.get_or_create(start = 2014)[0]
group = Group.objects.get_or_create(teacher = teacher, academic_year = year, name = 'test group')[0]
def test_delete_group_valid(self):
c = Client()
response = c.post(reverse('delete_group'), {'teacher': 'test user', 'year':2014, 'group':'test group'})
self.assertEqual(response.status_code, 200)
def test_delete_group_invalid_data(self):
c = Client()
response = c.post(reverse('delete_group'), {'teacher': 'invalid user', 'year':2014, 'group':'test group'})
self.assertEqual(response.status_code, 200)
def test_delete_group_invalid_key(self):
c = Client()
response = c.post(reverse('delete_group'), {'invalid keys': 'test user', 'year':2014, 'group':'test group'})
self.assertEqual(response.status_code, 200)
class UpdateGroupTests(TestCase):
def setUp(self):
# Setup Test User
user = User.objects.create_user(
username='test user',
password='password'
)
teacher = Teacher.objects.get_or_create(user = user)[0]
year = AcademicYear.objects.get_or_create(start = 2014)[0]
group = Group.objects.get_or_create(teacher = teacher, academic_year = year, name = 'test group')[0]
def test_update_group_valid(self):
c = Client()
response = c.post(reverse('update_group'), {'teacher': 'test user', 'year':2014, 'group':'test group','num_students':10})
self.assertEqual(response.status_code, 200)
def test_update_group_invalid_data(self):
c = Client()
response = c.post(reverse('update_group'), {'teacher': 'invalid user', 'year':2014, 'group':'test group','num_students':10})
self.assertEqual(response.status_code, 200)
def test_update_group_invalid_key(self):
c = Client()
response = c.post(reverse('update_group'), {'invalid keys': 'test user', 'year':2014, 'group':'test group','num_students':10})
self.assertEqual(response.status_code, 200)
class RegisterGroupWithSessionTests(TestCase):
def setUp(self):
# Setup Test User
user = User.objects.create_user(
username='test user',
password='password'
)
teacher = Teacher.objects.get_or_create(user = user)[0]
year = AcademicYear.objects.get_or_create(start = 2014)[0]
group = Group.objects.get_or_create(teacher = teacher, academic_year = year, name = 'test group')[0]
def test_register_group_with_session_valid(self):
c = Client()
c.login(username='test user',password='password')
engine = import_module(settings.SESSION_ENGINE)
store = engine.SessionStore()
store.save()
c.cookies[settings.SESSION_COOKIE_NAME] = store.session_key
session = c.session
session.update({'teacher': 'test user', 'year':2014})
session.save()
response = c.post(reverse('register_group_with_session'), {'group' : 'test group'})
self.assertEqual(response.status_code, 200)
def test_register_group_with_session_invalid_data(self):
c = Client()
c.login(username='test user',password='password')
engine = import_module(settings.SESSION_ENGINE)
store = engine.SessionStore()
store.save()
c.cookies[settings.SESSION_COOKIE_NAME] = store.session_key
session = c.session
session.update({'teacher': 'test user', 'year':2014})
session.save()
response = c.post(reverse('register_group_with_session'), {'group' : 'invalid group'})
self.assertEqual(response.status_code, 200)
def test_register_group_with_session_invalid_key(self):
c = Client()
c.login(username='test user',password='password')
engine = import_module(settings.SESSION_ENGINE)
store = engine.SessionStore()
store.save()
c.cookies[settings.SESSION_COOKIE_NAME] = store.session_key
session = c.session
session.update({'teacher': 'test user', 'year':2014})
session.save()
response = c.post(reverse('register_group_with_session'), {'invalid key' : 'test group'})
self.assertEqual(response.status_code, 200)
class SaveSessionIdsTests(TestCase):
def test_save_session(self):
c = Client()
response = c.post(reverse('save_session_ids'), {})
self.assertEqual(response.status_code, 302)
class RegisterTeacherWithSessionTests(TestCase):
def setUp(self):
# Setup Test User
user = User.objects.create_user(
username='test user',
password='password'
)
teacher = Teacher.objects.get_or_create(user = user)[0]
def test_register_teacher_with_session_valid(self):
c = Client()
response = c.post(reverse('register_teacher_with_session'), {'teacher' : 'test user'})
self.assertEqual(response.status_code, 200)
def test_register_teacher_with_session_invalid_data(self):
c = Client()
response = c.post(reverse('register_teacher_with_session'), {'teacher' : 'invalid teacher'})
self.assertEqual(response.status_code, 200)
def test_register_teacher_with_session_invalid_key(self):
c = Client()
response = c.post(reverse('register_teacher_with_session'), {'invalid key' : 'test user'})
self.assertEqual(response.status_code, 200)
class RegisterStudentWithSessionTests(TestCase):
def setUp(self):
# Setup Test User
user = User.objects.create_user(
username='test user',
password='password'
)
teacher = Teacher.objects.get_or_create(user = user)[0]
year = AcademicYear.objects.get_or_create(start = 2014)[0]
group = Group.objects.get_or_create(teacher = teacher, academic_year = year, name = 'test group')[0]
student = Student.objects.get_or_create(teacher = teacher, group = group, student_id = 'test student')[0]
def test_register_student_with_session_valid(self):
c = Client()
c.login(username='test user',password='password')
engine = import_module(settings.SESSION_ENGINE)
store = engine.SessionStore()
store.save()
c.cookies[settings.SESSION_COOKIE_NAME] = store.session_key
session = c.session
session.update({'teacher': 'test user', 'year':2014,'group':'test group'})
session.save()
response = c.post(reverse('register_student_with_session'), {'student' : 'test student'})
self.assertEqual(response.status_code, 200)
def test_register_student_with_session_invalid_data(self):
c = Client()
c.login(username='test user',password='password')
engine = import_module(settings.SESSION_ENGINE)
store = engine.SessionStore()
store.save()
c.cookies[settings.SESSION_COOKIE_NAME] = store.session_key
session = c.session
session.update({'teacher': 'test user', 'year':2014,'group':'test group'})
session.save()
response = c.post(reverse('register_student_with_session'), {'student' : 'invalid student'})
self.assertEqual(response.status_code, 200)
def test_register_student_with_session_invalid_key(self):
c = Client()
c.login(username='test user',password='password')
engine = import_module(settings.SESSION_ENGINE)
store = engine.SessionStore()
store.save()
c.cookies[settings.SESSION_COOKIE_NAME] = store.session_key
session = c.session
session.update({'teacher': 'test teacher','year':2014,'group':'test group'})
session.save()
response = c.post(reverse('register_student_with_session'), {'invalid key' : 'test student'})
self.assertEqual(response.status_code, 200)
class GetGroupsForYearTests(TestCase):
def setUp(self):
# Setup Test User
user = User.objects.create_user(
username='test user',
password='password'
)
teacher = Teacher.objects.get_or_create(user = user)[0]
year = AcademicYear.objects.get_or_create(start = 2014)[0]
group = Group.objects.get_or_create(teacher = teacher, academic_year = year, name = 'test group')[0]
def test_get_groups_for_year_valid(self):
c = Client()
c.login(username='test user',password='password')
engine = import_module(settings.SESSION_ENGINE)
store = engine.SessionStore()
store.save()
c.cookies[settings.SESSION_COOKIE_NAME] = store.session_key
session = c.session
session.update({'teacher': 'test user'})
session.save()
response = c.post(reverse('get_groups_for_year'), {'year' : 2014})
self.assertEqual(response.status_code, 200)
def test_get_groups_for_year_invalid_key(self):
c = Client()
c.login(username='test user',password='password')
engine = import_module(settings.SESSION_ENGINE)
store = engine.SessionStore()
store.save()
c.cookies[settings.SESSION_COOKIE_NAME] = store.session_key
session = c.session
session.update({'teacher': 'test user'})
session.save()
response = c.post(reverse('get_groups_for_year'), {'invalid key' : 2014})
self.assertEqual(response.status_code, 200)
def test_get_groups_for_year_invalid_teacher(self):
c = Client()
c.login(username='test user',password='password')
engine = import_module(settings.SESSION_ENGINE)
store = engine.SessionStore()
store.save()
c.cookies[settings.SESSION_COOKIE_NAME] = store.session_key
session = c.session
session.update({'teacher': 'invalid user'})
session.save()
response = c.post(reverse('get_groups_for_year'), {'year' : 2014})
self.assertEqual(response.status_code, 200)
def test_get_groups_for_year_invalid_year(self):
c = Client()
c.login(username='test user',password='password')
engine = import_module(settings.SESSION_ENGINE)
store = engine.SessionStore()
store.save()
c.cookies[settings.SESSION_COOKIE_NAME] = store.session_key
session = c.session
session.update({'teacher': 'test user'})
session.save()
response = c.post(reverse('get_groups_for_year'), {'year' : 2015})
self.assertEqual(response.status_code, 200)
class RegisterYearWithSessionTests(TestCase):
def setUp(self):
# Setup Test User
user = User.objects.create_user(
username='test user',
password='password'
)
teacher = Teacher.objects.get_or_create(user = user)[0]
year = AcademicYear.objects.get_or_create(start = 2014)[0]
def test_register_year_with_session_valid(self):
c = Client()
c.login(username='test user',password='password')
engine = import_module(settings.SESSION_ENGINE)
store = engine.SessionStore()
store.save()
c.cookies[settings.SESSION_COOKIE_NAME] = store.session_key
session = c.session
session.update({'teacher': 'test user'})
session.save()
response = c.post(reverse('register_year_with_session'), {'year' : 2014})
self.assertEqual(response.status_code, 200)
def test_register_year_with_session_invalid_year(self):
c = Client()
c.login(username='test user',password='password')
engine = import_module(settings.SESSION_ENGINE)
store = engine.SessionStore()
store.save()
c.cookies[settings.SESSION_COOKIE_NAME] = store.session_key
session = c.session
session.update({'teacher': 'test user'})
session.save()
response = c.post(reverse('register_year_with_session'), {'year' : 2015})
self.assertEqual(response.status_code, 200)
def test_register_year_with_session_invalid_key(self):
c = Client()
c.login(username='test user',password='password')
engine = import_module(settings.SESSION_ENGINE)
store = engine.SessionStore()
store.save()
c.cookies[settings.SESSION_COOKIE_NAME] = store.session_key
session = c.session
session.update({'teacher': 'test user'})
session.save()
response = c.post(reverse('register_year_with_session'), {'invalid key' : 2014})
self.assertEqual(response.status_code, 200)
class ResetSessionTests(TestCase):
def test_reset_session_valid(self):
c = Client()
c.login(username='test user',password='password')
engine = import_module(settings.SESSION_ENGINE)
store = engine.SessionStore()
store.save()
c.cookies[settings.SESSION_COOKIE_NAME] = store.session_key
session = c.session
session.update({'teacher': 'test user', 'year' : 2014, 'group' : 'test group', 'student': 'test student', 'student_registered': True})
session.save()
response = c.post(reverse('reset_session'), {})
self.assertEqual(response.status_code, 302)
class DelSessionVariableTests(TestCase):
def test_del_session_variable_valid(self):
c = Client()
c.login(username='test user',password='password')
engine = import_module(settings.SESSION_ENGINE)
store = engine.SessionStore()
store.save()
c.cookies[settings.SESSION_COOKIE_NAME] = store.session_key
session = c.session
session.update({'delete me': 'delete me'})
session.save()
response = c.post(reverse('del_session_variable'), {'to_delete': 'delete me'})
self.assertEqual(response.status_code, 302)
def test_del_session_variable_invalid_key(self):
c = Client()
c.login(username='test user',password='password')
engine = import_module(settings.SESSION_ENGINE)
store = engine.SessionStore()
store.save()
c.cookies[settings.SESSION_COOKIE_NAME] = store.session_key
session = c.session
session.update({'to_delete': 'delete me'})
session.save()
response = c.post(reverse('del_session_variable'), {'bad key': 'delete me'})
self.assertEqual(response.status_code, 302)
class GetStudentsTests(TestCase):
def setUp(self):
# Setup Test User
user = User.objects.create_user(
username='test user',
password='password'
)
teacher = Teacher.objects.get_or_create(user = user)[0]
year = AcademicYear.objects.get_or_create(start = 2014)[0]
group = Group.objects.get_or_create(teacher = teacher, academic_year = year, name = 'test group')[0]
def test_get_students_valid(self):
c = Client()
c.login(username='test user',password='password')
response = c.get(reverse('get_students'), {'group' : 'test group', 'year' : 2014})
self.assertEqual(response.status_code, 200)
def test_get_students_invalid(self):
c = Client()
response = c.get(reverse('get_students'), {'wrong key' : 'test group', 'year' : 2014})
self.assertEqual(response.status_code, 200)
def test_get_students_invalid_year(self):
c = Client()
c.login(username='test user',password='password')
response = c.get(reverse('get_students'), {'group' : 'test group', 'year' : 2015})
self.assertEqual(response.status_code, 200)
"""
class GetQuestionDataTests(TestCase):
def setUp(self):
# Setup Test User
user = User.objects.create_user(
username='test user',
password='password'
)
teacher = Teacher.objects.get_or_create(user = user)[0]
year = AcademicYear.objects.get_or_create(start = 2014)[0]
group = Group.objects.get_or_create(teacher = teacher, academic_year = year, name = 'test group')[0]
student = Student.objects.get_or_create(teacher = teacher, group = group, student_id = 'test student')[0]
application = Application.objects.get_or_create(name = 'test application')[0]
step = Step.objects.get_or_create(application = application, order = 1)[0]
question = Question.objects.get_or_create(application = application, step = step, question_text = 'test question')[0]
def test_get_question_data_missing_key(self):
print "here222"
c = Client()
c.login(username='test user',password='password')
engine = import_module(settings.SESSION_ENGINE)
store = engine.SessionStore()
store.save()
c.cookies[settings.SESSION_COOKIE_NAME] = store.session_key
session = c.session
session.update({'student_registered': True})
session.save()
response = c.get(reverse('get_question_data'), {'year' : 2014, 'group' : 'test group', 'step' : 1, 'question' : 'test question', 'student':'test student'})
print response.content
self.assertEqual(response.status_code, 200)
print "there222"
def test_get_question_data_invalid(self):
c = Client()
c.login(username='test user',password='password')
engine = import_module(settings.SESSION_ENGINE)
store = engine.SessionStore()
store.save()
c.cookies[settings.SESSION_COOKIE_NAME] = store.session_key
session = c.session
session.update({'student_registered': True})
session.save()
response = c.get(reverse('get_question_data'), {'year' : 2014,'app_name' : 'invalid application', 'group' : 'test group', 'step' : 1, 'question' : 'test question', 'student':'test student'})
self.assertEqual(response.status_code, 200)
class UpdateTimeGraphTests(TestCase):
def setUp(self):
# Setup Test User
user = User.objects.create_user(
username='user',
password='password'
)
teacher = Teacher.objects.get_or_create(user = user)[0]
print teacher
year = AcademicYear.objects.get_or_create(start = 2014)[0]
group = Group.objects.get_or_create(teacher = teacher, academic_year = year, name = 'test group')[0]
student = Student.objects.get_or_create(teacher = teacher, group = group, student_id = 'test student')[0]
application = Application.objects.get_or_create(name = 'test application')[0]
step = Step.objects.get_or_create(application = application, order = 1)[0]
question = Question.objects.get_or_create(application = application, step = step, question_text = 'test question')[0]
def test_update_time_graph_valid(self):
print "here"
c = Client()
c.login(username='test user',password='password')
engine = import_module(settings.SESSION_ENGINE)
store = engine.SessionStore()
store.save()
c.cookies[settings.SESSION_COOKIE_NAME] = store.session_key
session = c.session
session.update({'student_registered': True})
session.save()
response = c.get(reverse('update_time_graph'), {'year' : 2014,'app_name' : 'test application', 'group' : 'test group', 'student':'test student'})
self.assertEqual(response.status_code, 200)
print "there"
def test_update_time_graph_invalid(self):
c = Client()
c.login(username='test user',password='password')
engine = import_module(settings.SESSION_ENGINE)
store = engine.SessionStore()
store.save()
c.cookies[settings.SESSION_COOKIE_NAME] = store.session_key
session = c.session
session.update({'student_registered': True})
session.save()
response = c.get(reverse('update_time_graph'), {'year' : 2014,'app_name' : 'test application', 'group' : 'test group', 'step' : 1, 'question' : 'test question', 'student':'test student'})
self.assertEqual(response.status_code, 200)
|
[
"emivulpe@abv.bg"
] |
emivulpe@abv.bg
|
03213cca6b6982658fae655340fce5d5c656b08d
|
1d88f6dcd0689c8b7a5f8e42cadac8f46837a353
|
/database_setup.py
|
21d18dfba463e6069a54919aa56ba6626cfb5e89
|
[] |
no_license
|
Aarthyravi/Item-catalog
|
2d61f1b1b1eaa65e5a14cf0ae598d474733ffb5f
|
0bb1df314295c6ae8585f3f7fc7b996367bf9e84
|
refs/heads/master
| 2021-01-20T14:50:04.246072
| 2017-06-12T19:54:38
| 2017-06-12T19:54:38
| 90,681,271
| 2
| 1
| null | 2017-05-10T16:38:55
| 2017-05-08T23:25:17
|
Python
|
UTF-8
|
Python
| false
| false
| 1,744
|
py
|
import os
import sys
from sqlalchemy import Column, ForeignKey, Integer, String
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship, backref
from sqlalchemy import create_engine
Base = declarative_base()
class User(Base):
__tablename__ = 'user'
id = Column(Integer, primary_key=True)
name = Column(String(250), nullable=False)
email = Column(String(250), nullable=False)
picture = Column(String(250))
class Restaurant(Base):
__tablename__ = 'restaurant'
name = Column(String(80), nullable=False)
id = Column(Integer, primary_key=True)
user_id = Column(Integer, ForeignKey('user.id'))
user = relationship(User)
@property
def serialize(self):
return{
'id': self.id,
'name': self.name,
}
class MenuItem(Base):
__tablename__ = 'menu_item'
name = Column(String(80), nullable=False)
id = Column(Integer, primary_key=True)
course = Column(String(250))
description = Column(String(250))
price = Column(String(8))
restaurant_id = Column(Integer, ForeignKey('restaurant.id'))
restaurant = relationship(Restaurant, backref=backref('menu_item',
cascade='all, delete'))
user_id = Column(Integer, ForeignKey('user.id'))
user = relationship(User)
@property
def serialize(self):
return{
'name': self.name,
'description': self.description,
'id': self.id,
'price': self.price,
'couse': self.course,
}
engine = create_engine('sqlite:///restaurantmenuwithusers.db')
Base.metadata.create_all(engine)
|
[
"noreply@github.com"
] |
noreply@github.com
|
3ecd4acf486810b559bb1eb756b9b32e70f99558
|
f05084e33f189c3ca16982a6704fa808831fa71a
|
/yayo/cart/views.py
|
5009693a7231de1285c2d32c1e33dd096dbdca83
|
[] |
no_license
|
willyowi/Yayo-maluku-shop
|
f7581ae4c5ca0a1dc6a9daa92701d0965d27914c
|
7c8844bd3cbd97fdac01f991b45ca55b5f419c38
|
refs/heads/master
| 2021-01-06T16:06:36.397007
| 2020-02-18T15:20:46
| 2020-02-18T15:20:46
| 241,390,237
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 890
|
py
|
from django.shortcuts import render,redirect,get_object_or_404
from django.views.decorators.http import require_POST
from shop.models import Product
from .cart import Cart
from .forms import CartAddProductForm
# Create your views here.
@require_POST
def cart_add(request, product_id):
cart = Cart(request)
product = get_object_or_404(Product, id=product_id)
form = CartAddProductForm(request.POST)
if form.is_valid():
cd = form.cleaned_data
cart.add(product=product,quantity=cd['quantity'],update_quantity=cd['update'])
return redirect('cart:cart_detail')
def cart_remove(request,product_id):
cart = Cart(request)
product = get_object_or_404(Product, id=product_id)
cart.remove(product)
return redirect('cart:cart_detail')
def cart_detail(request):
cart = Cart(request)
return render(request, 'cart/detail.html', {'cart': cart})
|
[
"wilsonowino1@gmail.com"
] |
wilsonowino1@gmail.com
|
78af662df05acb6737539f48c49fead8f92cfc09
|
d59267485f570cf6b2150e54855b481c43f9e7f7
|
/src/leave_tutor_maker.py
|
e2c86286243f15d48e092787f61baa08b2f91ab1
|
[
"MIT"
] |
permissive
|
NKUST-ITC/NKUST-AP-API
|
d22d044884862fa48f37dec377801d4865807a3b
|
96b5961170fb99f87490be9abdf869a8556c25d3
|
refs/heads/master
| 2021-06-18T14:31:20.491382
| 2021-01-09T02:58:40
| 2021-01-09T02:58:40
| 188,676,351
| 7
| 6
|
MIT
| 2021-03-31T19:08:04
| 2019-05-26T11:49:30
|
Python
|
UTF-8
|
Python
| false
| false
| 844
|
py
|
from utils.leave_tool import teacher_list
import json
if __name__ == "__main__":
campus_list = [
{'id': 1, "campus_name": '建工校區'},
{'id': 2, "campus_name": '燕巢校區'},
{'id': 3, "campus_name": '第一校區'},
{'id': 4, "campus_name": '楠梓校區'},
{'id': 5, "campus_name": '旗津校區'}
]
data = {'data': []}
username = input('NKUST account : ')
password = input('NKUST password : ')
for i in campus_list:
_temp_ = {
"campusName": i['campus_name'],
"department": None
}
res = teacher_list(
username=username,
password=password,
campus_id=i['id']
)
_temp_['department'] = res
data['data'].append(_temp_)
open('res.json', 'w').write(json.dumps(data))
|
[
"wow@taki.dog"
] |
wow@taki.dog
|
2a19cd96099adcbad5170c2df54d9cead8f36f92
|
043a2ac159574b58beb2ca8f574d22dd00ffb9ec
|
/message.py
|
1022b2b8a456bb2b22fd47fefd3545daa12bc0ed
|
[] |
no_license
|
shirleyng92/shirley-py18
|
f963319d07d9d6ccbc742111bbc0056706e268a5
|
3f142cc216db6759c49be35e656e38c51a1572b2
|
refs/heads/master
| 2020-04-03T14:19:28.906846
| 2018-11-23T01:57:17
| 2018-11-23T01:57:17
| 155,317,998
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 187
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 30 12:04:20 2018
@author: shirley.ng
"""
message = "Test Message"
print(message.lower())
print(message.upper())
print(message.swapcase())
|
[
"kah.shirley@gmail.com"
] |
kah.shirley@gmail.com
|
9f2c888cefcb36f53f24c59107c582549cd9639f
|
ed56e7d42fc4ed4cbbb6c8d987a3de01b09b19d7
|
/mmpdblib/fragment_io.py
|
65ed409752943039634c6e887b6d0b9c3da2ebc9
|
[
"BSD-3-Clause",
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
rnaimehaom/mmpdb_v2
|
b3b5a9d6c44bdbe6ab70254bd61acfa85d50e945
|
b9fec0a21e9c3d5ad26fee938aa58a1467b72b73
|
refs/heads/main
| 2023-07-17T08:09:07.062883
| 2021-08-16T13:01:36
| 2021-08-16T13:01:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 19,575
|
py
|
# mmpdb - matched molecular pair database generation and analysis
#
# Copyright (c) 2015-2017, F. Hoffmann-La Roche Ltd.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of F. Hoffmann-La Roche Ltd. nor the names of
# its contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
from __future__ import print_function, absolute_import
import sys
import json
import re
import itertools
from . import __version__ as mmpdblib_version
from . import config
from . import fileio
from . import fragment_algorithm
from . import reporters
from .fragment_types import FragmentRecord, FragmentErrorRecord, FragmentFormatError
from ._compat import basestring
SOFTWARE = "mmpdb-" + mmpdblib_version
##### Read
parse_max_heavies_value = config.positive_int_or_none
parse_max_rotatable_bonds_value = config.positive_int_or_none
parse_min_heavies_per_const_frag = config.nonnegative_int
parse_min_heavies_total_const_frag = config.nonnegative_int
def parse_num_cuts_value(value):
if value not in ("1", "2", "3"):
raise ValueError("must be '1', '2', or '3'")
return int(value)
def parse_method_value(value):
if value not in ("chiral",):
if value in ("hussain", "dalke"):
raise ValueError("'chiral' is supported in mmpdb v2, not %r" % (value,))
raise ValueError("must be 'chiral'")
return value
class FragmentReader(object):
def __init__(self, metadata, options, reader, location):
self.version = metadata["version"]
self.software = metadata["software"]
self.options = options
self._reader = reader
self.location = location
def __iter__(self):
if self._reader is None:
raise ValueError("I/O operation on closed file")
return self._reader
def __next__(self):
if self._reader is None:
raise ValueError("I/O operation on closed file")
return next(self._reader)
next = __next__
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
def close(self):
reader = self._reader
if reader is None:
return
self._reader = None
reader.close()
def read_fragment_records(source):
if source is None:
infile = fileio.open_input(None)
filename = "<stdin>"
close = None
elif isinstance(source, basestring):
infile = fileio.open_input(source)
filename = source
close = infile.close
else:
infile = source
filename = getattr(source, "name", "<unknown>")
close = None
location = fileio.Location(filename)
location.save(record_format="fragment")
line_reader = enumerate(infile, 1)
line_reader, metadata, options, options_dict = _get_options(line_reader, location)
reader = _read_fragment_records(line_reader, close, location, options_dict)
x = next(reader)
assert x == "ready"
return FragmentReader(metadata, options, reader, location)
_json_loads = None
_json_module_name = None
def get_json_loads():
global _json_loads, _json_module_name
if _json_loads is None:
# Timings reported for a fragment file with 37177 lines
# (35634 "RECORD" and 1534 "IGNORE" records.)
try:
# 40.05 seconds
import ujson
_json_loads = ujson.decode
_json_module_name = "ujson"
except ImportError:
try:
# 41.85 seconds
import cjson
_json_loads = cjson.decode
_json_module_name = "cjson"
except ImportError:
# 55.5 seconds
_json_loads = json.loads
_json_module_name = "json"
return _json_loads
_option_parser = {
"cut_smarts": str,
"max_heavies": parse_max_heavies_value,
"max_rotatable_bonds": parse_max_rotatable_bonds_value,
"method": parse_method_value,
"num_cuts": parse_num_cuts_value,
"rotatable_smarts": str,
"salt_remover": str,
"min_heavies_per_const_frag": parse_min_heavies_per_const_frag,
"min_heavies_total_const_frag": parse_min_heavies_total_const_frag
}
def _get_options(line_reader, location):
options_dict = {}
options = config.FragmentOptions(**config.DEFAULT_FRAGMENT_OPTIONS.to_dict())
version = None
software = None
lineno = 0
loads = get_json_loads()
for lineno, line in line_reader:
try:
fields = loads(line)
except ValueError as err:
if lineno == 1:
raise FragmentFormatError("The input does not appear to be a fragment file",
location)
raise
#fields = line.rstrip("\n").split("\t")
if version is None:
if len(fields) == 2 and fields[0] == "VERSION":
version = fields[1]
if version != "mmpdb-fragment/2":
location.save(lineno=lineno)
raise FragmentFormatError("This reader only supports version 'mmpdb-fragment/2', not version %r"
% (version,), location)
else:
location.save(lineno=lineno)
raise FragmentFormatError("Missing VERSION from first line in the file", location)
continue
if len(fields) == 2 and fields[0] == "SOFTWARE":
software = fields[1]
continue
if len(fields) != 3 or fields[0] != "OPTION":
# Push back the current line
line_reader = itertools.chain([(lineno, line)], line_reader)
return line_reader, {"version": version, "software": software}, options, options_dict
_, name, value_str = fields
if name not in _option_parser:
location.save(lineno=lineno)
raise FragmentFormatError("Unknown OPTION %r" % (name,), location)
if name in options_dict and options_dict[name][1] != value_str:
location.save(lineno=lineno)
raise FragmentFormatError("OPTION %s already set to %r on line %d"
% (name, options_dict[name][1], options_dict[name][0]),
location)
parser = _option_parser[name]
try:
value = parser(value_str)
except ValueError as err:
location.save(lineno=lineno)
raise FragmentFormatError("Cannot understand option %s (%r): %s"
% (name, value_str, err), location)
setattr(options, name, value)
options_dict[name] = (lineno, value_str)
# No remaining data
if version is None:
location.save(lineno=lineno)
raise FragmentFormatError("Missing required VERSION line")
return iter([]), {"version": "version", "software": software}, options, options_dict
def _read_fragment_records(line_reader, close, location, options_dict):
recno = 0
lineno = 0
line = None
def get_recno():
return recno
def get_lineno():
return lineno
def get_record():
return line
location.register(get_recno=get_recno,
get_lineno=get_lineno,
get_record=get_record,
)
loads = get_json_loads()
Fragmentation = fragment_algorithm.Fragmentation
yield "ready"
try:
for lineno, line in line_reader:
try:
fields = loads(line)
except ValueError as err:
err.message = err.message.replace("line 1", "line %d" % (lineno,))
err.args = (err.message,) + err.args[1:]
raise
label = fields[0]
if label == "RECORD":
assert label == "RECORD"
recno += 1
try:
_, id, input_smiles, num_normalized_heavies, normalized_smiles, fragment_fields_list = fields
except ValueError:
raise FragmentFormatError("Expected 7 fields on RECORD line, not %d" % (len(fields),),
location)
try:
# This is the hot spot for the reader. About 70% of the time is spent here.
fragmentations = [Fragmentation(*fragment_fields) for fragment_fields in fragment_fields_list]
except TypeError:
# Try to report the error a bit more nicely:
for fragment_i, fragment_fields in enumerate(fragment_fields_list):
if len(fragment_fields) != 10:
raise FragmentFormatError("Expected fragment[%d] with 10 fields, not %d (%r)"
% (fragment_i, len(fragment_fields), fragment_fields),
location)
raise AssertionError # I don't know what caused this error
yield FragmentRecord(
id, input_smiles, num_normalized_heavies, normalized_smiles, fragmentations)
continue
if label == "IGNORE":
try:
_, id, input_smiles, errmsg = fields
except ValueError:
raise FragmentFormatError("Expected 4 fields on IGNORE line, not %d" % (len(fields),),
location)
yield FragmentErrorRecord(id, input_smiles, errmsg)
continue
### It looks like this code isn't used.
## if label == "OPTION":
## try:
## _, name, value_str = fields
## except ValueError:
## raise FragmentFormatError("Expected 3 fields on OPTION line, not %d" % (len(fields),),
## location)
## if name not in options_dict:
## print("options_dict", options_dict)
## raise FragmentFormatError("Cannot set the new option %r" % (name,), location)
## old_lineno, old_value_str = options_dict[name]
## if old_value_str != value_str:
## raise FragmentFormatError("Cannot modify option %r from %r to %r "
## % (name, old_value_str, value_str), location)
## continue
raise FragmentFormatError("Unknown label %r" % (label,), location)
finally:
location.save(recno=get_recno(),
lineno=get_lineno(),
record=None)
if close is not None:
close()
class FileCache(object):
def __init__(self, table, options):
self.table = table
self.options = options
def get(self, name):
return self.table.get(name)
def suggest_faster_json(reporter):
loads = get_json_loads()
if _json_module_name == "json":
reporter.warning("Neither ujson nor cjson installed. Falling back to Python's slower built-in json decoder.")
def load_cache(filename, reporter):
reporter = reporters.get_reporter(reporter)
table = {}
suggest_faster_json(reporter)
with read_fragment_records(filename) as reader:
for record in reporter.progress(reader, "Loading cache record"):
table[record.id] = record
return FileCache(table, reader.options)
##### Write
def get_fragment_sort_key(frag):
return (frag.num_cuts,
frag.variable_symmetry_class, frag.variable_num_heavies, frag.variable_smiles,
frag.constant_num_heavies, frag.constant_smiles, frag.constant_with_H_smiles,
frag.attachment_order
)
class FragmentWriter(object):
def __init__(self, filename, outfile, options):
self.filename = filename
self._outfile = outfile
self.options = options
def close(self):
self._outfile.close()
def __enter__(self):
return self
def __exit__(self, *args):
self._outfile.close()
def write_version(self):
json.dump(("VERSION", "mmpdb-fragment/2"), self._outfile)
self._outfile.write("\n")
json.dump(("SOFTWARE", SOFTWARE), self._outfile)
self._outfile.write("\n")
def write_options(self, options):
for k, v in sorted(options.to_text_settings()):
if "\n" in k or "\r" in k or "\t" in k or " " in k:
raise ValueError("Unsupported whitespace in key %r" % (k,))
if "\n" in v or "\r" in v or "\t" in v:
raise ValueError("Unsupported whitespace in %s value %r" % (k, v))
json.dump(("OPTION", k, v), self._outfile)
self._outfile.write("\n")
def write_records(self, fragment_records):
outfile = self._outfile
for rec in fragment_records:
if rec.errmsg:
json.dump(("IGNORE", rec.id, rec.input_smiles, rec.errmsg), outfile)
outfile.write("\n")
else:
fragment_fields = []
record = ("RECORD", rec.id, rec.input_smiles, rec.num_normalized_heavies,
rec.normalized_smiles, fragment_fields)
fragmentations = sorted(rec.fragments, key = get_fragment_sort_key)
for frag in fragmentations:
fragment_fields.append((
frag.num_cuts, frag.enumeration_label,
frag.variable_num_heavies, frag.variable_symmetry_class, frag.variable_smiles,
frag.attachment_order, frag.constant_num_heavies, frag.constant_symmetry_class,
frag.constant_smiles, frag.constant_with_H_smiles,
))
json.dump(record, outfile)
outfile.write("\n")
_wildcard_pat = re.compile( re.escape("[*]")
+ "|"
+ re.escape("*"))
def relabel(smiles, order=None):
input_smiles = smiles
input_order = order
if order is None:
order = list(range(smiles.count("*")))
else:
assert not isinstance(order[0], int), ("Fix this for Python 3", order)
order = [int(c) for c in order]
def add_isotope_tag_to_wildcard(m):
return "[*:%d]" % (order.pop(0)+1,)
return _wildcard_pat.sub(add_isotope_tag_to_wildcard, smiles)
class FragInfoWriter(object):
def __init__(self, filename, outfile, options):
self.filename = filename
self._outfile = outfile
self.options = options
def close(self):
self._outfile.close()
def __enter__(self):
return self
def __exit__(self, *args):
self._outfile.close()
def write_version(self):
self._outfile.write("FORMAT mmpdb-fraginfo/2\n")
self._outfile.write("SOFTWARE " + SOFTWARE + "\n")
def write_options(self, options):
for k, v in sorted(options.to_text_settings()):
if "\n" in k or "\r" in k or "\t" in k or " " in k:
raise ValueError("Unsupported whitespace in key %r" % (k,))
if "\n" in v or "\r" in v or "\t" in v:
raise ValueError("Unsupported whitespace in %s value %r" % (k, v))
self._outfile.write("OPTION %s=%s\n" % (k, v))
def write_records(self, fragment_records):
outfile = self._outfile
for rec in fragment_records:
if rec.errmsg:
outfile.write("IGNORE id=%r %r errmsg=%r\n"
% (rec.id, rec.input_smiles, rec.errmsg))
else:
outfile.write("RECORD id=%r %r #heavies=%d #fragmentations=%d\n"
% (rec.id, rec.input_smiles, rec.num_normalized_heavies, len(rec.fragments)))
fragmentations = sorted(rec.fragments, key = get_fragment_sort_key)
for frag in fragmentations:
reaction = "variable %s // constant %s" % (
relabel(frag.variable_smiles, frag.attachment_order),
relabel(frag.constant_smiles))
outfile.write(" FRAG #cuts=%d enum_label=%s %s\n"
" variable: #heavies=%d symm_class=%s %s attachment_order=%s\n"
" constant: #heavies=%d symm_class=%s %s H-smiles=%s\n"
% (frag.num_cuts, frag.enumeration_label, reaction,
frag.variable_num_heavies, frag.variable_symmetry_class,
frag.variable_smiles, frag.attachment_order,
frag.constant_num_heavies, frag.constant_symmetry_class,
frag.constant_smiles, frag.constant_with_H_smiles
))
def open_fragment_writer(filename, options, format_hint=None):
if format_hint is not None and format_hint not in ("fragments", "fragments.gz", "fraginfo", "fraginfo.gz"):
raise ValueError("Unsupported format_hint: %r" % (format_hint,))
outfile = fileio.open_output(filename, format_hint)
if format_hint is None:
if filename is None:
format_hint = "fragment"
else:
lc_filename = filename.lower()
if ( lc_filename.endswith(".fraginfo.gz")
or lc_filename.endswith(".fraginfo")):
format_hint = "fraginfo"
else:
format_hint = "fragment"
if "fraginfo" in format_hint:
writer = FragInfoWriter(filename, outfile, options)
else:
writer = FragmentWriter(filename, outfile, options)
writer.write_version()
writer.write_options(options)
return writer
|
[
"seritaka@gmail.com"
] |
seritaka@gmail.com
|
6ebc2e58e5cbf4e6730008505a1a7ceab1780374
|
f497c8c504334f69f5569b48ef1bb5cc0305d009
|
/chapter03_SyntaxBestPractices_abovetheClassLevel/skip__init__.py
|
6e2e97564f6459d67cc29a3eccf5946f81b505c4
|
[] |
no_license
|
gloryfromca/ExpertPythonProgramming
|
8c70df8ac62ce8547934322b178b69455c73bf18
|
50c14847182c29d5323b03f9d8b3ef1288084a38
|
refs/heads/master
| 2020-03-07T03:26:06.019407
| 2018-03-30T12:20:33
| 2018-03-30T12:20:33
| 127,235,239
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 430
|
py
|
class NonZero(int):
def __new__(cls, value):
return super().__new__(cls, value) if value != 0 else None
def __init__(self, value):
print("value is: " + str(value))
print(NonZero(1))
print('=============================')
class NonZero(int):
def __new__(cls, value):
return value if value != 0 else None
def __init__(self, value):
print("value is: " + str(value))
print(NonZero(1))
|
[
"huizhang1995@gmail.com"
] |
huizhang1995@gmail.com
|
6b1a01203f4a242049e9bd1a1259b9c32fc96e5b
|
dd3e3d677b3234524860dce3e92aa256b20bd725
|
/updated_web_sever/src_new/manage.py
|
1827f5d696e03eba91733261aeb9697847a33cc7
|
[] |
no_license
|
trabelsim/SKSR
|
3e042a5b4ccf46920ae13e3f0569e43ac1e5d5e1
|
81184e02e471af3b2b9819f2603009a38a105c5d
|
refs/heads/master
| 2022-11-12T05:19:36.857571
| 2020-06-25T10:37:34
| 2020-06-25T10:37:34
| 255,897,319
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 645
|
py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'sksr.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[
"noreply@github.com"
] |
noreply@github.com
|
a0de098b7fb337afcf4e32da975420b2f2bc696d
|
343463189f96335e565b6a3a2c6849561b7bd2ac
|
/pdw_008_get_full_inherited_dict.py
|
a3ded763e82c03ee5f4d12ce2d495306ed383d7b
|
[] |
no_license
|
acbalanza/PythonDoesWhat
|
3b5831921fc9c6ec879926730d128ebabcef453b
|
70547163b4a8171cada6415910b225fc823af621
|
refs/heads/master
| 2021-05-26T14:33:46.427022
| 2013-08-29T06:55:34
| 2013-08-29T06:55:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 988
|
py
|
pdw_id = 8
title = "Get full inherited __dict__"
author = "Kurt"
pub_date = (2010, 12, 6, 10, 57)
tags = ('dict', 'inheritance')
"""
``__dict__`` (or ``vars()``) is great, but sometimes you want to get more
than just a given instance's members. For instance, in:
"""
class A(object):
testA = "A"
testB = "A"
testb = "A"
class B(A):
testB = "B"
testb = "B"
"""
it might be nice to get dictionary access to the ``testA`` class member.
Here's a function that could do that:
"""
def get_full_dict(obj):
parent_dicts = [obj.__dict__] + \
[cls.__dict__ for cls in obj.__class__.__mro__ if cls.__name__ != "object"]
return reduce(lambda a,b: dict(b.items() + a.items()), parent_dicts, {})
"""
Let's test it out:
>>> b = B()
>>> b.testb = "b"
>>> b_dict = get_full_dict(b)
>>> b_dict["testA"]
'A'
>>> b_dict["testB"]
'B'
>>> b_dict["testb"]
'b'
For real use, you probably want to use the ``inspect`` module's ``getmembers()`` function.
"""
|
[
"makuro@gmail.com"
] |
makuro@gmail.com
|
a30781f84b1feca4e4a793f1a648138952c65940
|
b2cefb7a2a83aa93ee1b15a780b5ddf6c498215b
|
/examples/nlp/duplex_text_normalization/data/data_split.py
|
3b053a34419980bc0351c55707a288cbdab02f16
|
[
"Apache-2.0"
] |
permissive
|
VahidooX/NeMo
|
bfde8c9b48c818342a9c6290fb9dee62fafeca38
|
866cc3f66fab3a796a6b74ef7a9e362c2282a976
|
refs/heads/main
| 2023-07-23T19:13:39.948228
| 2022-04-29T21:51:54
| 2022-04-29T21:51:54
| 227,733,473
| 1
| 2
|
Apache-2.0
| 2022-09-15T15:30:13
| 2019-12-13T01:55:21
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 5,238
|
py
|
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script creates data splits of the Google Text Normalization dataset
of the format mentioned in the `text_normalization doc <https://github.com/NVIDIA/NeMo/blob/main/docs/source/nlp/text_normalization.rst>`.
USAGE Example:
1. Download the Google TN dataset from https://www.kaggle.com/google-nlu/text-normalization
2. Unzip the English subset (e.g., by running `tar zxvf en_with_types.tgz`). Then there will a folder named `en_with_types`.
3. Run this script
# python data_split.py \
--data_dir=en_with_types/ \
--output_dir=data_split/ \
--lang=en
In this example, the split files will be stored in the `data_split` folder.
The folder should contain three subfolders `train`, 'dev', and `test` with `.tsv` files.
"""
from argparse import ArgumentParser
from os import listdir, mkdir
from os.path import isdir, isfile, join
from tqdm import tqdm
from nemo.collections.nlp.data.text_normalization import constants
# Local Constants
TEST_SIZE_EN = 100002
TEST_SIZE_RUS = 100007
def read_google_data(data_file: str, lang: str, split: str):
"""
The function can be used to read the raw data files of the Google Text Normalization
dataset (which can be downloaded from https://www.kaggle.com/google-nlu/text-normalization)
Args:
data_file: Path to the data file. Should be of the form output-xxxxx-of-00100
lang: Selected language.
split: data split
Return:
data: list of examples
"""
data = []
cur_classes, cur_tokens, cur_outputs = [], [], []
with open(data_file, 'r', encoding='utf-8') as f:
for linectx, line in tqdm(enumerate(f)):
es = line.strip().split('\t')
if split == "test":
# For the results reported in the paper "RNN Approaches to Text Normalization: A Challenge":
# + For English, the first 100,002 lines of output-00099-of-00100 are used for the test set
# + For Russian, the first 100,007 lines of output-00099-of-00100 are used for the test set
if lang == constants.ENGLISH and linectx == TEST_SIZE_EN:
break
if lang == constants.RUSSIAN and linectx == TEST_SIZE_RUS:
break
if len(es) == 2 and es[0] == '<eos>':
data.append((cur_classes, cur_tokens, cur_outputs))
# Reset
cur_classes, cur_tokens, cur_outputs = [], [], []
continue
# Remove _trans (for Russian)
if lang == constants.RUSSIAN:
es[2] = es[2].replace('_trans', '')
# Update the current example
assert len(es) == 3
cur_classes.append(es[0])
cur_tokens.append(es[1])
cur_outputs.append(es[2])
return data
if __name__ == '__main__':
parser = ArgumentParser(description='Preprocess Google text normalization dataset')
parser.add_argument('--data_dir', type=str, required=True, help='Path to folder with data')
parser.add_argument('--output_dir', type=str, default='preprocessed', help='Path to folder with preprocessed data')
parser.add_argument(
'--lang', type=str, default=constants.ENGLISH, choices=constants.SUPPORTED_LANGS, help='Language'
)
args = parser.parse_args()
# Create the output dir (if not exist)
if not isdir(args.output_dir):
mkdir(args.output_dir)
mkdir(args.output_dir + '/train')
mkdir(args.output_dir + '/dev')
mkdir(args.output_dir + '/test')
for fn in sorted(listdir(args.data_dir))[::-1]:
fp = join(args.data_dir, fn)
if not isfile(fp):
continue
if not fn.startswith('output'):
continue
# Determine the current split
split_nb = int(fn.split('-')[1])
if split_nb < 90:
cur_split = "train"
elif split_nb < 95:
cur_split = "dev"
elif split_nb == 99:
cur_split = "test"
data = read_google_data(data_file=fp, lang=args.lang, split=cur_split)
# write out
output_file = join(args.output_dir, f'{cur_split}', f'{fn}.tsv')
print(fp)
print(output_file)
output_f = open(output_file, 'w', encoding='utf-8')
for inst in data:
cur_classes, cur_tokens, cur_outputs = inst
for c, t, o in zip(cur_classes, cur_tokens, cur_outputs):
output_f.write(f'{c}\t{t}\t{o}\n')
output_f.write('<eos>\t<eos>\n')
print(f'{cur_split}_sentences: {len(data)}')
|
[
"noreply@github.com"
] |
noreply@github.com
|
0218e149fe0ab10e2548c94f6a93ff8e3d23e1de
|
227807bb34a91b94e4c832ae14bb21fe6297b456
|
/src/python/layers/neuralode.py
|
9656da6157bc8a11c7d9ad4cd2fe5b4038275469
|
[] |
no_license
|
connorzl/DeformCoSeg
|
386af79704d8ceb1d6da23670b8ec7e1b10f61d9
|
981d001212eb3446fcee606e3a5ada3196d17e3c
|
refs/heads/master
| 2023-02-05T01:12:05.305998
| 2020-09-08T00:07:37
| 2020-09-08T00:07:37
| 288,631,182
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,904
|
py
|
import torch
from torch import nn
from torchdiffeq import odeint
import numpy as np
class ODEFunc(nn.Module):
def __init__(self):
super(ODEFunc, self).__init__()
m = 50
nlin = nn.ReLU()
self.net = nn.Sequential(
nn.Linear(4, m),
nlin,
nn.Linear(m, m),
nlin,
nn.Linear(m, m),
nlin,
nn.Linear(m, 3),
)
for m in self.net.modules():
if isinstance(m, nn.Linear):
nn.init.normal_(m.weight, mean=0, std=1e-1)
nn.init.constant_(m.bias, val=0)
def forward(self, t, y):
new_t = t.repeat(y.shape[0],1)
yt = torch.cat((y,new_t), 1)
res = self.net(yt - 0.5)
return res
#return self.net(yt-0.5)
class NeuralODE():
def __init__(self, device=torch.device('cpu')):
super(NeuralODE, self).__init__()
self.timing = torch.from_numpy(np.array([0, 1]).astype('float32'))
self.timing_inv = torch.from_numpy(np.array([1, 0]).astype('float32'))
self.timing = self.timing.to(device)
self.timing_inv = self.timing_inv.to(device)
self.func = ODEFunc()
self.func = self.func.to(device)
self.device = device
def to_device(self, device):
self.func = self.func.to(device)
self.timing = self.timing.to(device)
self.timing_inv = self.timing_inv.to(device)
self.device = device
def parameters(self):
return self.func.parameters()
def forward(self, u):
y = odeint(self.func, u, self.timing)[1]
return y
def inverse(self, u):
return odeint(self.func, u, self.timing_inv)[1]
def integrate(self, u, t1, t2, device):
new_time = torch.from_numpy(np.array([t1,t2]).astype('float32')).to(device)
return odeint(self.func, u, new_time)[1]
|
[
"hjwdzh@gmail.com"
] |
hjwdzh@gmail.com
|
540590ef128c7fc98cb5a28c475cbf774c51ff24
|
d96787f92bd86c8d8bcf01a4e7ec8f7feec24194
|
/kattis/nizovi/solution.py
|
9c70b792c4c93471dd6c04868b1338089c92b9f2
|
[] |
no_license
|
iandioch/solutions
|
133cbc3af58fadcde0b2e981fb0e7d05801070a7
|
8b3e458b3c01179ddf776bfbb897f263f22f3693
|
refs/heads/master
| 2023-04-09T03:39:16.952817
| 2023-03-15T20:00:53
| 2023-03-15T20:00:53
| 47,693,495
| 48
| 40
| null | 2019-10-22T14:52:59
| 2015-12-09T13:36:55
|
Python
|
UTF-8
|
Python
| false
| false
| 721
|
py
|
s = input()
curr = ''
indent = 0
lines = []
for c in s:
curr += c
if curr == '{':
lines.append('{}{}'.format(' '*indent, c))
curr = ''
indent += 2
elif curr.endswith('}') or curr.endswith('},'):
d = curr.find('}')
if len(curr[:d]) > 0:
lines.append('{}{}'.format(' '*indent, curr[:d]))
indent -= 2
lines.append('{}{}'.format(' '*indent, curr[d:]))
curr = ''
elif curr[-1] == ',':
lines.append('{}{}'.format(' '*indent, curr))
curr = ''
# remove commas trailing afer }s
for j in range(len(lines)-1, -1, -1):
if lines[j].strip() == ',':
del lines[j]
lines[j-1] += ','
print('\n'.join(lines))
|
[
"iandioch11@gmail.com"
] |
iandioch11@gmail.com
|
a52afe00239a068bf21f59ef14158190e2601cab
|
ea7d223d83134f02d92d7771a02a64196fbcbdc7
|
/CreditCards/views/auth.py
|
ecc06036ac8a489b3093161a92012aa7ee7fb198
|
[] |
no_license
|
vishalkallem/Personal-Manager
|
5d58864b42ee2ac97d78ebfa752ef006c8a5d36d
|
0f04ae4dc5da2b54c71830257674808a8370076a
|
refs/heads/master
| 2020-04-11T04:08:10.046578
| 2018-12-12T14:49:43
| 2018-12-12T14:49:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,322
|
py
|
from django.views import View
from django.shortcuts import render, redirect
from django.contrib import messages
from django.contrib.auth import login, authenticate, logout
from django.contrib.auth.models import User
from CreditCards.forms.auth import *
class SignUpController(View):
def get(self, request):
form = SignUpForm()
context = {
'form': form,
'title': 'Sign Up | Credit Cards'
}
return render(request=request, template_name='CreditCards/signup.html', context=context)
def post(self, request):
form = SignUpForm(request.POST)
if form.is_valid():
User.objects.create_user(**form.cleaned_data)
user = authenticate(request=request, username=form.cleaned_data['username'],
password=form.cleaned_data['password'])
if user is not None:
login(request, user)
return redirect('CreditCards:all_cards')
else:
messages.error(request, "Invalid Credentials entered! Please try again!")
return render(request=request, template_name='CreditCards/signup.html', context={'form': form})
class LoginController(View):
def get(self, request):
if request.user.is_authenticated:
return redirect('CreditCards:all_cards')
login_form = LoginForm
context = {
'form': login_form,
'title': 'Login | Credit Cards'
}
return render(request=request, template_name="CreditCards/login.html", context=context)
def post(self, request):
login_form = LoginForm(request.POST)
if login_form.is_valid():
user = authenticate(request=request, username=login_form.cleaned_data['username'],
password=login_form.cleaned_data['password'])
if user is not None:
login(request, user)
return redirect('CreditCards:all_cards')
else:
messages.error(request, "Invalid Credentials entered! Please try again!")
return redirect('CreditCards:login')
def logout_user(request):
logout(request)
return redirect('CreditCards:login')
|
[
"noreply@github.com"
] |
noreply@github.com
|
744e247595723089f2db9932761d4a68b5fa65c8
|
8abcd291bb0e83457593c33aa904efaae8193778
|
/activemq-mon.py
|
83e45d2790ad49f50c93c7730d293a3e6d24ec53
|
[] |
no_license
|
btguys/activeMQ-mon
|
4b46abce0e3b8038deff75da5dd080831c2441c8
|
c60ac06e90e64b0a79af931a80ebf4aa6af0a560
|
refs/heads/master
| 2021-01-16T21:37:41.273862
| 2016-07-14T09:46:47
| 2016-07-14T09:46:47
| 63,322,092
| 4
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,597
|
py
|
#!/bin/env python
#-*- coding:utf-8 -*-
__author__ = 'qiuyongjie'
import string, xml.dom.minidom, sys, urllib2, base64, json, time, socket
ip = "127.0.0.1"
endpoint = socket.gethostname()
step = 60
ts = int(time.time())
tag = ''
keys = ('size','consumerCount','enqueueCount','dequeueCount')
request = urllib2.Request("http://%s:8161/admin/xml/queues.jsp" %ip)
base64string = base64.b64encode('admin:admin')
request.add_header("Authorization", "Basic %s" % base64string)
result = urllib2.urlopen(request)
xmlStr = string.replace(result.read(),'\t', '')
xmlStr = string.replace(xmlStr,'\n', '')
data = xml.dom.minidom.parseString(xmlStr)
queues = root.getElementsByTagName( "queues" )[0]
p = []
for queue in queues.childNodes:
for key in keys:
q = {}
q["endpoint"] = endpoint
q["timestamp"] = ts
q["step"] = step
q["counterType"]= "GAUGE"
q["metric"] = "activemq.%s" % key
q["tags"] = "queuename=%s,%s" % (queue.getAttribute('name'),tag)
q["value"] = int(queue.getElementsByTagName("stats")[0].getAttribute(key))
p.append(q)
#print json.dumps(p, indent=4)
method = "POST"
handler = urllib2.HTTPHandler()
opener = urllib2.build_opener(handler)
url = 'http://127.0.0.1:1988/v1/push'
request = urllib2.Request(url, data=json.dumps(p) )
request.add_header("Content-Type",'application/json')
request.get_method = lambda: method
try:
connection = opener.open(request)
except urllib2.HTTPError,e:
connection = e
# check. Substitute with appropriate HTTP code.
if connection.code == 200:
print connection.read()
else:
print '{"err":1,"msg":"%s"}' % connection
|
[
"yjqiu@cctechhk.com"
] |
yjqiu@cctechhk.com
|
907a3117714649a6d18e4ff188d89b213ab23196
|
be0f3dfbaa2fa3d8bbe59229aef3212d032e7dd1
|
/DaVinci_v39r1/tuplemaking/misidrestripping2015/runssmisidrestripping2012.py
|
990ae9cec1042354b70a1ce99da16007f43cbb9e
|
[] |
no_license
|
Sally27/backup_cmtuser_full
|
34782102ed23c6335c48650a6eaa901137355d00
|
8924bebb935b96d438ce85b384cfc132d9af90f6
|
refs/heads/master
| 2020-05-21T09:27:04.370765
| 2018-12-12T14:41:07
| 2018-12-12T14:41:07
| 185,989,173
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 15,035
|
py
|
# $Id: $
# Test your line(s) of the stripping
#
# NOTE: Please make a copy of this file for your testing, and do NOT change this one!
#
##use CommonParticlesArchive
#from CommonParticlesArchive import CommonParticlesArchiveConf
#CommonParticlesArchiveConf().redirect("stripping21r1p1a")
#
#from Gaudi.Configuration import *
#from Configurables import DaVinci
#from StrippingConf.Configuration import StrippingConf
#
#
## Tighten Trk Chi2 to <3
#from CommonParticles.Utils import DefaultTrackingCuts
#DefaultTrackingCuts().Cuts = { "Chi2Cut" : [ 0, 3 ],
# "CloneDistCut" : [5000, 9e+99 ] }
#
##
##Raw event juggler to split Other/RawEvent into Velo/RawEvent and Tracker/RawEvent
##
#from Configurables import RawEventJuggler
#juggler = RawEventJuggler( DataOnDemand=True, Input=2.0, Output=4.2 )
#
##
##Fix for TrackEff lines
##
#from Configurables import DecodeRawEvent
#DecodeRawEvent().setProp("OverrideInputs",4.2)
#
## Specify the name of your configuration
#confname="B23MuNu" #FOR USERS
#
## NOTE: this will work only if you inserted correctly the
## default_config dictionary in the code where your LineBuilder
## is defined.
#from StrippingSelections import buildersConf
#confs = buildersConf()
#
#from StrippingSelections.Utils import lineBuilder, buildStreamsFromBuilder
##confs[confname]["CONFIG"]["SigmaPPi0CalPrescale"] = 0.5 ## FOR USERS, YOU ONLY NEED TO QUICKLY MODIFY CutName and NewValue (no need to recompile the package but please update the default_config before committing)
#streams = buildStreamsFromBuilder(confs,confname)
#
##clone lines for CommonParticles overhead-free timing
#print "Creating line clones for timing"
#for s in streams:
# for l in s.lines:
# if "_TIMING" not in l.name():
# cloned = l.clone(l.name().strip("Stripping")+"_TIMING")
# s.appendLines([cloned])
#
##define stream names
#leptonicMicroDSTname = 'Leptonic'
#charmMicroDSTname = 'Charm'
#pidMicroDSTname = 'PID'
#bhadronMicroDSTname = 'Bhadron'
#mdstStreams = [ leptonicMicroDSTname,charmMicroDSTname,pidMicroDSTname,bhadronMicroDSTname ]
#dstStreams = [ "BhadronCompleteEvent", "CharmCompleteEvent", "CharmToBeSwum", "Dimuon",
# "EW", "Semileptonic", "Calibration", "MiniBias", "Radiative" ]
#
#stripTESPrefix = 'Strip'
#
#from Configurables import ProcStatusCheck
#
#from PhysConf.Filters import LoKi_Filters
#flts = LoKi_Filters(VOID_Code = "( TrSource(TrSOURCE('/Event/Rec/Track/Best', TrLONG))"\
# " >> ( sum( TrPT,TrP < 1 * TeV ) > 1 * TeV ) )" ,
# VOID_Preambulo = ["from LoKiTracks.decorators import *" ,
# "from LoKiCore.functions import * ",
# "from GaudiKernel.SystemOfUnits import *"])
#filterBadEvents = GaudiSequencer("BadEventFilter",
# ModeOR = True,
# Members = [ flts.sequencer("GECFilter"),
# ProcStatusCheck() ] )
#streamFilter = { 'default' : filterBadEvents,
# 'MiniBias' : ProcStatusCheck() }
#
#
#sc = StrippingConf( Streams = streams,
# MaxCandidates = 2000,
# AcceptBadEvents = False,
# BadEventSelection = streamFilter,
# TESPrefix = stripTESPrefix,
# ActiveMDSTStream = True,
# Verbose = True,
# DSTStreams = dstStreams,
# MicroDSTStreams = mdstStreams )
#
from Configurables import DecayTreeTuple, FilterDesktop,CombineParticles,FitDecayTrees, TupleToolRecoStats, TupleToolTrigger, TupleToolTISTOS, CondDB
from DecayTreeTuple.Configuration import *
#ADDED for BDT reason
#from Configurables import LoKi__Hybrid__TupleTool
#from Configurables import LoKi__Hybrid__Tool as MyFactory
#mf = MyFactory("HybridFactory")
#mf.Modules.append( 'LoKiPhysMC.decorators' )
tuple = DecayTreeTuple("B_Tuple")
#tuple.Inputs = [location]
tuple.Inputs = ["/Event/Semileptonic/Phys/B23MuNu_TriFakeMuLine/Particles"]
#tuple.Inputs = ["Phys/DecayTreeFitterB"]
tuple.ToolList = [
"TupleToolKinematic",
"TupleToolEventInfo",
"TupleToolRecoStats",
"TupleToolANNPID"
]
tuple.addBranches({ # remove all "^" except where needed.
"Jpsi" : "^(J/psi(1S) -> mu+ mu-)",
"mu1" : " J/psi(1S) -> ^mu+ mu-",
"mu2" : " J/psi(1S) -> mu+ ^mu-"
})
tuple.Jpsi.ToolList += [ "TupleToolTISTOS" ]
tuple.Jpsi.addTool( TupleToolTISTOS, name = "TupleToolTISTOS" )
tuple.Jpsi.TupleToolTISTOS.Verbose = True
tuple.Jpsi.TupleToolTISTOS.TriggerList = [
"L0DiMuonDecision"
, "L0MuonDecision"
, "L0HadronDecision"
, "Hlt1TrackAllL0Decision"
, "Hlt1TrackMuonDecision"
, "Hlt1DiMuonHighMassDecision"
, "Hlt1SingleMuonHighPTDecision"
, "Hlt2TopoMu2BodyBBDTDecision"
, "Hlt2TopoMu3BodyBBDTDecision"
, "Hlt2Topo2BodyBBDTDecision"
, "Hlt2Topo3BodyBBDTDecision"
, "Hlt2DiMuonDetachedJPsiDecision"
, "Hlt2DiMuonDetachedDecision"
, "Hlt2SingleMuonDecision"
, "Hlt2DiMuonDetachedHeavyDecision"
]
LoKi_All1=tuple.addTupleTool("LoKi::Hybrid::TupleTool/LoKi_All")
LoKi_All1.Variables = {
'MINIPCHI2' : "MIPCHI2DV(PRIMARY)",
'MINIP' : "MIPDV(PRIMARY)",
'ETA' : 'ETA',
'PHI' : 'PHI'
}
LoKi_Jpsi1=tuple.Jpsi.addTupleTool("LoKi::Hybrid::TupleTool/LoKi_Bplus")
LoKi_Jpsi1.Variables = {
'TAU' : "BPVLTIME()",
'DIRA_OWNPV' : "BPVDIRA",
'FD_CHI2' : "BPVVDCHI2",
'ENDVERTEX_CHI2' : "VFASPF(VCHI2/VDOF)",
'X_travelled' : "VFASPF(VX)-BPV(VX)",
'Y_travelled' : "VFASPF(VY)-BPV(VY)",
'Z_travelled' : "VFASPF(VZ)-BPV(VZ)",
'P_Parallel' : "BPVDIRA*P",
'P_Perp' : "sin(acos(BPVDIRA))*P",
'BPVVDZ' : "BPVVDZ",
'Corrected_Mass' : "BPVCORRM"
}
LoKi_mu11=tuple.mu1.addTupleTool("LoKi::Hybrid::TupleTool/LoKi_mu1")
LoKi_mu11.Variables = {
'PIDmuLoki' : "PIDmu",
'PIDKLoki' : "PIDK",
'PIDpLoki' : "PIDp",
'ghost' : "TRGHP",
'TRACK_CHI2' : "TRCHI2DOF",
'NNK' : "PPINFO(PROBNNK)",
'NNpi' : "PPINFO(PROBNNpi)",
'NNmu' : "PPINFO(PROBNNmu)",
'isMuonLoose' : "switch(ISMUONLOOSE,1,0)",
'isMuonLoki' : "switch(ISMUON,1,0)",
'inMuon' : "switch(INMUON,1,0)",
'nShared' : "PPINFO(LHCb.ProtoParticle.MuonNShared,-1000)"
}
LoKi_mu22=tuple.mu2.addTupleTool("LoKi::Hybrid::TupleTool/LoKi_mu2")
LoKi_mu22.Variables = {
'PIDmuLoki' : "PIDmu",
'PIDKLoki' : "PIDK",
'PIDpLoki' : "PIDp",
'ghost' : "TRGHP",
'TRACK_CHI2' : "TRCHI2DOF",
'NNK' : "PPINFO(PROBNNK)",
'NNpi' : "PPINFO(PROBNNpi)",
'NNmu' : "PPINFO(PROBNNmu)",
'isMuonLoose' : "switch(ISMUONLOOSE,1,0)",
'isMuonLoki' : "switch(ISMUON,1,0)",
'inMuon' : "switch(INMUON,1,0)",
'nShared' : "PPINFO(LHCb.ProtoParticle.MuonNShared,-1000)"
}
tuple.Decay = "J/psi(1S) -> ^mu+ ^mu-"
tuple2 = DecayTreeTuple("B_Tuple2")
tuple2.Inputs = ["/Event/Semileptonic/Phys/B23MuNu_TriFakeMuLine/Particles"]
tuple2.ToolList = [
"TupleToolKinematic",
"TupleToolEventInfo",
"TupleToolRecoStats",
"TupleToolPid",
"TupleToolANNPID"
]
tuple2.addBranches({ # remove all "^" except where needed.
"Bplus" : "^([B+ -> (J/psi(1S) -> mu+ mu-) mu+]CC)",
"mu1" : "[B+ -> (J/psi(1S) -> ^mu+ mu-) mu+]CC",
"mu2" : "[B+ -> (J/psi(1S) -> mu+ ^mu-) mu+]CC ",
"mu3" : "[B+ -> (J/psi(1S) -> mu+ mu-) ^mu+]CC ",
})
tuple2.Bplus.ToolList += [ "TupleToolTISTOS" ]
tuple2.Bplus.addTool( TupleToolTISTOS, name = "TupleToolTISTOS" )
tuple2.Bplus.TupleToolTISTOS.Verbose = True
tuple2.Bplus.TupleToolTISTOS.TriggerList = [
"L0DiMuonDecision"
, "L0MuonDecision"
, "L0HadronDecision"
, "Hlt1TrackAllL0Decision"
, "Hlt1TrackMuonDecision"
, "Hlt1DiMuonHighMassDecision"
, "Hlt1SingleMuonHighPTDecision"
, "Hlt2TopoMu2BodyBBDTDecision"
, "Hlt2TopoMu3BodyBBDTDecision"
, "Hlt2Topo2BodyBBDTDecision"
, "Hlt2Topo3BodyBBDTDecision"
, "Hlt2DiMuonDetachedJPsiDecision"
, "Hlt2DiMuonDetachedDecision"
, "Hlt2SingleMuonDecision"
, "Hlt2DiMuonDetachedHeavyDecision"
]
LoKi_All=tuple2.addTupleTool("LoKi::Hybrid::TupleTool/LoKi_All")
LoKi_All.Variables = {
'MINIPCHI2' : "MIPCHI2DV(PRIMARY)",
'MINIP' : "MIPDV(PRIMARY)",
'ETA' : 'ETA',
'PHI' : 'PHI'
}
LoKi_Bplus=tuple2.Bplus.addTupleTool("LoKi::Hybrid::TupleTool/LoKi_Bplus")
LoKi_Bplus.Variables = {
'TAU' : "BPVLTIME()",
'DIRA_OWNPV' : "BPVDIRA",
'FD_CHI2' : "BPVVDCHI2",
'ENDVERTEX_CHI2' : "VFASPF(VCHI2/VDOF)",
'X_travelled' : "VFASPF(VX)-BPV(VX)",
'Y_travelled' : "VFASPF(VY)-BPV(VY)",
'Z_travelled' : "VFASPF(VZ)-BPV(VZ)",
'P_Parallel' : "BPVDIRA*P",
'P_Perp' : "sin(acos(BPVDIRA))*P",
'BPVVDZ' : "BPVVDZ",
'Corrected_Mass' : "BPVCORRM"
}
LoKi_mu1=tuple2.mu1.addTupleTool("LoKi::Hybrid::TupleTool/LoKi_mu1")
LoKi_mu1.Variables = {
'PIDmuLoki' : "PIDmu",
'PIDKLoki' : "PIDK",
'PIDpLoki' : "PIDp",
'ghost' : "TRGHP",
'TRACK_CHI2' : "TRCHI2DOF",
'NNK' : "PPINFO(PROBNNK)",
'NNpi' : "PPINFO(PROBNNpi)",
'NNmu' : "PPINFO(PROBNNmu)",
'isMuonLoose' : "switch(ISMUONLOOSE,1,0)",
'isMuonLoki' : "switch(ISMUON,1,0)",
'inMuon' : "switch(INMUON,1,0)",
'nShared' : "PPINFO(LHCb.ProtoParticle.MuonNShared,-1000)"
}
LoKi_mu2=tuple2.mu2.addTupleTool("LoKi::Hybrid::TupleTool/LoKi_mu2")
LoKi_mu2.Variables = {
'PIDmuLoki' : "PIDmu",
'PIDKLoki' : "PIDK",
'PIDpLoki' : "PIDp",
'ghost' : "TRGHP",
'TRACK_CHI2' : "TRCHI2DOF",
'NNK' : "PPINFO(PROBNNK)",
'NNpi' : "PPINFO(PROBNNpi)",
'NNmu' : "PPINFO(PROBNNmu)",
'isMuonLoose' : "switch(ISMUONLOOSE,1,0)",
'isMuonLoki' : "switch(ISMUON,1,0)",
'inMuon' : "switch(INMUON,1,0)",
'nShared' : "PPINFO(LHCb.ProtoParticle.MuonNShared,-1000)"
}
LoKi_mu3=tuple2.mu3.addTupleTool("LoKi::Hybrid::TupleTool/LoKi_mu3")
LoKi_mu3.Variables = {
'PIDmuLoki' : "PIDmu",
'PIDKLoki' : "PIDK",
'PIDpLoki' : "PIDp",
'ghost' : "TRGHP",
'TRACK_CHI2' : "TRCHI2DOF",
'NNK' : "PPINFO(PROBNNK)",
'NNpi' : "PPINFO(PROBNNpi)",
'NNmu' : "PPINFO(PROBNNmu)",
'isMuonLoose' : "switch(ISMUONLOOSE,1,0)",
'isMuonLoki' : "switch(ISMUON,1,0)",
'inMuon' : "switch(INMUON,1,0)",
'nShared' : "PPINFO(LHCb.ProtoParticle.MuonNShared,-1000)"
}
tuple2.Decay = "[B+ -> ^(J/psi(1S) -> ^mu+ ^mu-) ^mu+]CC" #^J/psi(1S)->
from DecayTreeTuple.Configuration import *
from Configurables import TupleToolVertexMisppMuMu
tuple2.Bplus.addTool(TupleToolVertexMisppMuMu)
tuple2.Bplus.ToolList+=["TupleToolVertexMisppMuMu"]
from DecayTreeTuple.Configuration import *
from Configurables import TupleToolVertexMispmMuMu
tuple2.Bplus.addTool(TupleToolVertexMispmMuMu)
tuple2.Bplus.ToolList+=["TupleToolVertexMispmMuMu"]
from DecayTreeTuple.Configuration import *
from Configurables import TupleToolVertexMismpMuMu
tuple2.Bplus.addTool(TupleToolVertexMismpMuMu)
tuple2.Bplus.ToolList+=["TupleToolVertexMismpMuMu"]
from DecayTreeTuple.Configuration import *
from Configurables import TupleToolVertexMisMuMuMu
tuple2.Bplus.addTool(TupleToolVertexMisMuMuMu)
tuple2.Bplus.ToolList+=["TupleToolVertexMisMuMuMu"]
from DecayTreeTuple.Configuration import *
from Configurables import TupleToolSallyvs3
tuple2.Bplus.addTool(TupleToolSallyvs3)
tuple2.Bplus.ToolList+=["TupleToolSallyvs3"]
#tuple2.Decay = "[B+ -> ^(J/psi(1S) -> ^mu+ ^mu-) ^mu+]CC" #^J/psi(1S)->
from DecayTreeTuple.Configuration import *
from Configurables import TupleToolApplypMuIsolation
tuple2.Bplus.addTool(TupleToolApplypMuIsolation)
tuple2.Bplus.TupleToolApplypMuIsolation.OutputSuffix="_weights"
tuple2.Bplus.TupleToolApplypMuIsolation.WeightsFile="weights_110614_Lc_pX.xml"
tuple2.Bplus.ToolList+=["TupleToolApplypMuIsolation"]
#Mysterious things to make isolation work
name="TupleToolApplypMuIsolation"
from Configurables import ChargedProtoParticleMaker
veloprotos = ChargedProtoParticleMaker(name+"ProtoPMaker")
veloprotos.Inputs = ["Rec/Track/Best"]
veloprotos.Output = "Rec/ProtoP/myProtoPMaker/ProtoParticles"
from Configurables import DaVinci
DaVinci().appendToMainSequence( [ veloprotos ])
from Gaudi.Configuration import *
from Configurables import ProtoParticleCALOFilter, CombinedParticleMaker,NoPIDsParticleMaker
from CommonParticles.Utils import *
algorithm = NoPIDsParticleMaker('StdNoPIDsVeloPions', Particle = 'pion', )
algorithm.Input = "Rec/ProtoP/myProtoPMaker/ProtoParticles"
selector = trackSelector ( algorithm , trackTypes = ['Velo'] )
locations = updateDoD ( algorithm )
DaVinci().appendToMainSequence( [ algorithm ])
from Configurables import TimingAuditor, SequencerTimerTool
TimingAuditor().addTool(SequencerTimerTool,name="TIMER")
TimingAuditor().TIMER.NameSize = 60
from Configurables import AuditorSvc, ChronoAuditor
AuditorSvc().Auditors.append( ChronoAuditor("Chrono") )
#from Configurables import StrippingReport
#sr = StrippingReport(Selections = sc.selections())
#from Configurables import AlgorithmCorrelationsAlg
#ac = AlgorithmCorrelationsAlg(Algorithms = list(set(sc.selections())))
#DaVinci().HistogramFile = 'DV_stripping_histosnew2.root'
DaVinci().TupleFile = "B23MuNuFakeSS.root"
#DaVinci().HistogramFile = 'DVHistosnshared.root'
#DaVinci().TupleFile = "DVTuplesnshared.root"
#DaVinci().EvtMax = 10000
DaVinci().PrintFreq = 2000
#DaVinci().UserAlgorithms = [ tuple ]
#DaVinci().appendToMainSequence( [ tuple ] )
DaVinci().appendToMainSequence( [ tuple2 ] )
#DaVinci().appendToMainSequence( [ tuple2 ] )
#DaVinci().appendToMainSequence( [ sc.sequence() ] )
#DaVinci().appendToMainSequence( [ tuple] )
#DaVinci().appendToMainSequence( [ tuple2] )
#DaVinci().appendToMainSequence( [ sr ] )
#DaVinci().appendToMainSequence( [ ac ] )
#DaVinci().appendToMainSequence( [ tuple] )
#DaVinci().appendToMainSequence( [ tuple2] )
DaVinci().DataType = "2012"
DaVinci().InputType = "DST"
DaVinci().Lumi = True
DaVinci().Simulation = False
# change the column size of timing table
from Configurables import TimingAuditor, SequencerTimerTool
TimingAuditor().addTool(SequencerTimerTool,name="TIMER")
TimingAuditor().TIMER.NameSize = 60
#NTupleSvc().Output = ["FILE1 DATAFILE='trythis.root' TYP='ROOT' OPT='NEW'"]
MessageSvc().Format = "% F%60W%S%7W%R%T %0W%M"
#from GaudiConf import IOHelper
#IOHelper().inputFiles(['./00050733_00021988_1.semileptonic.dst'], clear=True)
# database
#DaVinci().DDDBtag = "dddb-20120831"
#DaVinci().CondDBtag = "cond-20121008"
#DaVinci().Lumi = True
# input file
#importOptions("$STRIPPINGSELECTIONSROOT/tests/data/Reco15a_Run164668.py")
#importOptions("$STRIPPINGSELECTIONSROOT/tests/data/Reco14_Run125113.py")
|
[
"slavomirastefkova@b2pcx39016.desy.de"
] |
slavomirastefkova@b2pcx39016.desy.de
|
2e618ec30de1abe5e786ff02bf4e9c6a5555c288
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2364/60623/305882.py
|
64207e9aa3a3c85e563c2b8fdf16e3f809b357a9
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 55
|
py
|
a=input()
if a=='100':
print(10)
else:
print(a)
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
c07ea095d26e9c98fab8360a7d68827391dce5d3
|
26f7f766ef8393ad31ca620818fa5c6c630d1ec7
|
/students/migrations/0011_auto_20210814_1736.py
|
dde06d0745a46ac8c836c84fa7e7bad46c41a385
|
[] |
no_license
|
jemgithub0418/cvs
|
8621dde853edaca44cbac9d50c4a8ddab60f2ee8
|
d19344391d1d097f1859b07dd342ef1516d5b8ff
|
refs/heads/master
| 2023-07-11T23:08:57.780206
| 2021-08-16T08:13:08
| 2021-08-16T08:13:08
| 353,946,962
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,126
|
py
|
# Generated by Django 3.1.7 on 2021-08-14 09:36
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('students', '0010_auto_20210814_1727'),
]
operations = [
migrations.AddField(
model_name='studentprofile',
name='LRN_or_student_number',
field=models.CharField(default=13213216, max_length=55),
preserve_default=False,
),
migrations.AddField(
model_name='studentprofile',
name='contact_number',
field=models.CharField(blank=True, max_length=15, null=True),
),
migrations.AddField(
model_name='studentprofile',
name='date_of_birth',
field=models.DateField(default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AddField(
model_name='studentprofile',
name='middle_name',
field=models.CharField(blank=True, max_length=55, null=True),
),
]
|
[
"jeremijaredramos.gmail.com"
] |
jeremijaredramos.gmail.com
|
b6e7ed8e10f237b72cc2b014204ea9dd5c8549b9
|
8eb45e774eed886192f2088d0f692c3fe0c1fe3d
|
/test_store.py
|
15bd836a3139b404a3811db0d468b887bfa99892
|
[
"Apache-2.0"
] |
permissive
|
C-EO/snapstore
|
cbc2e548fdb62b8194bc1d99fe7d21a4c71b7e98
|
6d54a2c491701f18ee736149c16ba3f4e3958160
|
refs/heads/master
| 2022-01-08T22:29:57.730413
| 2018-10-29T20:32:06
| 2018-10-29T20:32:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,797
|
py
|
import store
import unittest
from flask import json
class StoreTestCase(unittest.TestCase):
def setUp(self):
# TODO: setup fixture data, test revision bumps
store.app.config['TESTING'] = True
self.c = store.app.test_client()
self.headers = {
'X-Ubuntu-Series': 16,
'X-Ubuntu-Architecture': 'amd64'
}
def tearDown(self):
pass
def test_hello(self):
r = self.c.get('/')
assert 'Hello' in r.data
def test_details_ok(self):
''' snap install bar '''
r = self.c.get('/api/v1/snaps/details/bar')
j = json.loads(r.data)
assert j['package_name'] == 'bar'
def test_details_empty(self):
''' snap install xyzzy '''
r = self.c.get('/api/v1/snaps/details/xyzzy', headers=self.headers)
j = json.loads(r.data)
assert 'No such package' in j['errors']
def test_search_old_install_path(self):
''' snap install bar (<= snapd 2.0.??) '''
r = self.c.get('/api/v1/search?q=package_name:"bar"')
j = json.loads(r.data)
assert j['_embedded']['clickindex:package'][0]['package_name'] == 'bar'
def test_search_all(self):
''' snap find '''
r = self.c.get('/api/v1/search?q=')
j = json.loads(r.data)
assert len(j['_embedded']['clickindex:package']) == 3
def test_search_partial(self):
''' snap find ba '''
r = self.c.get('/api/v1/search?q=ba')
j = json.loads(r.data)
assert len(j['_embedded']['clickindex:package']) == 2
def test_search_exact(self):
''' snap find foobar25 '''
r = self.c.get('/api/v1/search?q=foobar25')
j = json.loads(r.data)
assert j['_embedded']['clickindex:package'][0]['package_name'] == 'foobar25'
def test_metadata_local(self):
''' snap refresh (>= snapd 2.0.??)
with only snaps from our local repo '''
r = self.c.post('/api/v1/snaps/metadata',
data=json.dumps({'snaps': [
{'snap_id': 'xxxxxxxxxxxxxxxxxxxxxxxxxxxxxbar',
'revision': 1, 'confinement': 'strict'}],
"fields": ["download_url", "revision"]}),
headers=self.headers)
j = json.loads(r.data)
assert len(j['_embedded']['clickindex:package']) == 1
def test_metadata_remote(self):
''' snap refresh (>= snapd 2.0.??)
with only snaps from upstream repo '''
r = self.c.post('/api/v1/snaps/metadata',
data=json.dumps({'snaps': [
{'snap_id': 'mVyGrEwiqSi5PugCwyH7WgpoQLemtTd6',
'revision': 1, 'confinement': 'strict'}],
"fields": ["download_url", "revision"]}),
headers=self.headers)
j = json.loads(r.data)
assert len(j['_embedded']['clickindex:package']) == 1
def test_metadata_mixed(self):
''' snap refresh (>= snapd 2.0.??)
with snaps from both local and remote '''
r = self.c.post('/api/v1/snaps/metadata',
data=json.dumps({'snaps': [
{'snap_id': 'mVyGrEwiqSi5PugCwyH7WgpoQLemtTd6',
'revision': 1, 'confinement': 'strict'},
{'snap_id': 'xxxxxxxxxxxxxxxxxxxxxxxxxxxxxbar',
'revision': 1, 'confinement': 'strict'}
],
"fields": ["download_url", "revision"]}),
headers=self.headers)
j = json.loads(r.data)
assert len(j['_embedded']['clickindex:package']) == 2
if __name__ == '__main__':
unittest.main()
|
[
"bret.barker@canonical.com"
] |
bret.barker@canonical.com
|
50cdb57985ff7079984d9965a6af7e120f8fcc61
|
6d941762888f76cd55cb1efae8a3c3d1c064b76d
|
/dbtrigger/config/__init__.py
|
747d5855e2f0a87decd0e4d84d8145ec2606cb7c
|
[
"MIT"
] |
permissive
|
julienc91/dbtrigger
|
ac551914d34b100e7122cc3e96c91190cf51e89d
|
d06916a019641377bf3d45b2e8e38399643450db
|
refs/heads/master
| 2020-03-31T18:49:24.309774
| 2018-10-28T19:42:06
| 2018-10-28T19:42:06
| 152,474,063
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 128
|
py
|
# -*- coding: utf-8 -*-
import appdirs
from .config import Settings
settings = Settings(appdirs.user_data_dir('dbtrigger'))
|
[
"git@julienc.io"
] |
git@julienc.io
|
54b5ff124fe95b4e02d8269ba0f78709c44407b9
|
d3aa3b4cae0db74140607e896b3e8134a83614d1
|
/main.py
|
181530a47064374934257761f8b9fbbcbf4de7ca
|
[
"Apache-2.0"
] |
permissive
|
luosolo/SuPyPlex
|
6629eff3cc442fa8d510aab721d474c7c24d77c9
|
7c7f8c8dd41c1da9eec1936549f59d7005955168
|
refs/heads/main
| 2023-01-04T02:43:29.510414
| 2020-10-28T21:58:56
| 2020-10-28T21:58:56
| 308,093,065
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 201
|
py
|
from supyplex.game import SuPyplex
from os import path
ROOT_DIR = path.dirname(path.abspath("main.py"))
if __name__ == '__main__':
game = SuPyplex(ROOT_DIR)
game.setup()
game.main_loop()
|
[
"sandro.labruzzo@isti.cnr.it"
] |
sandro.labruzzo@isti.cnr.it
|
b02a3215d5c955daec98e2db06f5171974b90720
|
05ec80585e500eb75baade82bada8f0c5a2a76dc
|
/Backtracking/GenerateIP.py
|
4b339410a665caec82a4815768bb4049c6a8bab4
|
[] |
no_license
|
NenadPantelic/GeeksforGeeks-Must-Do-Interview-preparation
|
24477da148d4b9fe8113f669f21984d081327563
|
180c6b1bc6a4b6e1b44c409c220368b391b672b8
|
refs/heads/master
| 2021-01-05T19:16:22.436554
| 2020-11-15T14:39:02
| 2020-11-15T14:39:02
| 241,113,139
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,550
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 25 21:19:12 2020
@author: nenad
"""
def is_valid(ip, pos,segments):
len_flag = True
# ip is not valid in form 0x e.g 121.03.22.234
if len(ip) > 1 and ip[0] == "0":
len_flag = False
# check ip length, value and if ip's part is already checked
return len_flag and len(ip) > 0 and 0 <= int(ip) <= 255 and segments[pos] == False
def genIP(string):
ips = []
n = len(string)
segments = [False] * n
solve(string, n, 0, ips, segments, [])
print(ips)
def solve(string, n, pos, ips,segments, ip):
# ip has 4 parts
if len(ip) == 4:
# if we raached end of the string that we process
if pos>=n:
ips.append(".".join(ip))
return
# one part of ip has length from 1 to 3, both inclusive
for i in range(1,min(4, n-pos+1)):
# take substring as ip's quartette
substr = string[pos:pos+i]
# if ip is valid
if is_valid(substr, pos,segments):
# mark that char as used
segments[pos] = True
# check the rest of the string - can we form the rest of ip from that substring
solve(string, n, pos+i, ips, segments, ip + [substr])
# backtrack if we can't do that
segments[pos] = False
return
# Test 0
string = "1111"
genIP(string)
# Test 1
string = "11211"
genIP(string)
# Test 2
string = "112112"
genIP(string)
# Test 3
string = "25500255"
genIP(string)
|
[
"nenadpantelickg@gmail.com"
] |
nenadpantelickg@gmail.com
|
b632edb4abed10644c2eca37adee10ff3ebf2a1e
|
080397d0e6d573ef6d7eb9c2bc6b1b5787cfe0d1
|
/tests/twitter_learning_journal/builders/test_cache_path_builder.py
|
2dfeb176343ff67367981b17880cefdbe6d09dac
|
[
"Beerware"
] |
permissive
|
DEV3L/twitter-learning-journal
|
ecd0eb922e369b10fd6e039d652eed7078601139
|
a51d22a60a3d1249add352d8357975a7f2db585c
|
refs/heads/master
| 2021-09-20T17:27:11.157096
| 2018-08-13T11:58:34
| 2018-08-13T11:58:34
| 114,556,953
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 244
|
py
|
from app.twitter_learning_journal.builders.cache_path_builder import build_cache_path
def test_build_cache_path():
expected_cache_path = './data/pickle/tweets/test'
assert expected_cache_path == build_cache_path(sub_directory='test')
|
[
"jus.beall@gmail.com"
] |
jus.beall@gmail.com
|
2141ff44b91cb0e955de2264973b3405ba521111
|
e5dcd8f886c7db7f66446fdbcb239f1075fcdb57
|
/airline/users/views.py
|
fc478d817399aee4373f4f75d194e5b1ff808da4
|
[
"MIT"
] |
permissive
|
avulaankith/Django-Codes
|
bf4f40816c6ad4d599bfa28cdfbf01a9caa508b9
|
e4216f6a51b5baa745d5a0214afcaf024d048f44
|
refs/heads/main
| 2023-07-14T18:12:16.855718
| 2021-09-02T03:46:03
| 2021-09-02T03:46:03
| 402,280,475
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,055
|
py
|
from django.contrib.auth import authenticate, login, logout
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render
from django.urls import reverse
# Create your views here.
def index(request):
if not request.user.is_authenticated:
return HttpResponseRedirect(reverse("login"))
return render(request, "users/user.html")
def login_view(request):
if request.method == "POST":
username = request.POST["username"]
password = request.POST["password"]
user = authenticate(request, username=username, password=password)
if user is not None:
login(request, user)
return HttpResponseRedirect(reverse("index"))
else:
return render(request, "users/login.html", {
"message": "Invalid credentials."
})
else:
return render(request, "users/login.html")
def logout_view(request):
logout(request)
return render(request, "users/login.html", {
"message": "Logged out."
})
|
[
"avulaankith@gmail.com"
] |
avulaankith@gmail.com
|
dc9388fcc7ecf66dabb9bc64fe98c2f689c370d6
|
20176bf4fbd8aec139c7b5a27f2c2e155e173e6e
|
/data/all-pratic/Anusha Koila/print_odd_num.py
|
d290d9c8c6e77770c4fb451217c46810fd11629d
|
[] |
no_license
|
githubjyotiranjan/pytraining
|
4ac4a1f83cc4270e2939d9d32c705019c5bc61c5
|
8b50c4ab7848bd4cbfdfbc06489768d577289c66
|
refs/heads/master
| 2020-03-19T06:22:20.793296
| 2018-06-15T20:08:11
| 2018-06-15T20:08:11
| 136,013,642
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 337
|
py
|
#print odd numbers
#raise exception when negtive numbers are inputted
try:
num = int(input("Enter an number :"))
except erro1:
if(num<0):
print("Negative numbers not allowed")
else:
print("ODD numbers list :\n ")
for i in range(1,num):
res=i%2
if(res!=0):
print(i)
i=i+1
|
[
"jsatapathy007@gmail.com"
] |
jsatapathy007@gmail.com
|
87a581165b5f12912053dbd78b6fc1deb72afe4c
|
771f79ef8115d3f0d9efd0e2b3514bb5c8ecca8a
|
/AvgMark.py
|
da64dee9590229e34e9ea084f918a212b75bc4fc
|
[] |
no_license
|
harris112/UniProjs
|
031139c90ea6b99b409d72f9063160535064c48d
|
87b422cc327f53535cd7029c911b040a86bc32b8
|
refs/heads/master
| 2021-05-15T16:55:42.366567
| 2018-03-08T09:01:46
| 2018-03-08T09:01:46
| 107,562,165
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 363
|
py
|
name = input('Enter student name please:')
mark1 = int(input('Enter first mark:'))
mark2 = int(input('Enter second mark:'))
mark3 = int(input('Enter third mark:'))
mark4 = int(input('Enter fourth mark:'))
mark5 = int(input('Enter fifth mark:'))
sum = mark1+mark2+mark3+mark4+mark5
average = sum / 5
print ( ' Average marks of ' + name + ' is ' + str(average))
|
[
"U1774154@unimail.hud.ac.uk"
] |
U1774154@unimail.hud.ac.uk
|
09211b6e271fa9c3294ac4db7a0b0daa4b1b7b19
|
a75015a0755e1e619889dce12fcf4128e8267b01
|
/irtokz/indic_tokenizer.py
|
51e11c4f08aa39f8311a0bc7e39c9a6c8cbcba74
|
[
"MIT"
] |
permissive
|
ltrc/indic-tokenizer
|
12ec2011359f45069067f4248145da4a3e901793
|
a7b25f396e41390100ce4b2b86a7c1a6ab744afe
|
refs/heads/master
| 2021-01-18T05:09:31.308492
| 2018-10-25T11:11:10
| 2018-10-25T11:11:10
| 43,815,728
| 3
| 5
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 14,712
|
py
|
#!/usr/bin/env python
# -*- coding=utf-8 -*-
from __future__ import division, unicode_literals
import re
import os.path
class IndicTokenizer():
def __init__(self, lang='hin', split_sen=False):
self.lang = lang
self.split_sen = split_sen
file_path = os.path.dirname(os.path.abspath(__file__))
self.urd = lang in ['urd', 'kas']
if lang == 'asm':
self.lang = 'ben'
if lang in ["mar", "nep", "bod", "kok"]:
self.lang = 'hin'
# load nonbreaking prefixes from file
self.NBP = dict()
with open('%s/data/NONBREAKING_PREFIXES' % file_path) as fp:
for line in fp:
if line.startswith('#'):
continue
if '#NUMERIC_ONLY#' in line:
line = line.replace('#NUMERIC_ONLY#', '').split()[0]
self.NBP[line] = 2
else:
self.NBP[line.strip()] = 1
# precompile regexes
self.fit()
def fit(self):
# remove junk characters
self.junk = re.compile('[\x00-\x1f]')
# seperate out on Latin-1 supplementary characters
self.latin = re.compile('([\xa1-\xbf\xd7\xf7])')
# seperate out on general unicode punctituations except "’"
self.upunct = re.compile('([\u2012-\u2018\u201a-\u206f])')
# seperate out on unicode mathematical operators
self.umathop = re.compile('([\u2200-\u2211\u2213-\u22ff])')
# seperate out on unicode fractions
self.ufrac = re.compile('([\u2150-\u2160])')
# seperate out on unicode superscripts and subscripts
self.usupsub = re.compile('([\u2070-\u209f])')
# seperate out on unicode currency symbols
self.ucurrency = re.compile('([\u20a0-\u20cf])')
# seperate out all "other" ASCII special characters
self.specascii = re.compile(r'([\\!@#$%^&*()_+={\[}\]|";:<>?`~/])')
# keep multiple dots together
self.multidot = re.compile(r'(\.\.+)([^\.])')
if self.urd:
# keep multiple dots (urdu-dots) together
self.multidot_urd = re.compile('(\u06d4\u06d4+)([^\u06d4])')
else:
# keep multiple purna-viram together
self.multiviram = re.compile('(\u0964\u0964+)([^\u0964])')
# keep multiple purna deergh-viram together
self.multidviram = re.compile('(\u0965\u0965+)([^\u0965])')
# split contractions right (both "'" and "’")
self.numcs = re.compile("([0-9\u0966-\u096f])'s")
self.aca = re.compile(
"([a-zA-Z\u0080-\u024f])'([a-zA-Z\u0080-\u024f])")
self.acna = re.compile(
"([a-zA-Z\u0080-\u024f])'([^a-zA-Z\u0080-\u024f])")
self.nacna = re.compile(
"([^a-zA-Z\u0080-\u024f])'([^a-zA-Z\u0080-\u024f])")
self.naca = re.compile(
"([^a-zA-Z0-9\u0966-\u096f\u0080-\u024f])"
"'([a-zA-Z\u0080-\u024f])")
# multiple hyphens
self.multihyphen = re.compile('(-+)')
# restore multi-dots
self.restoredots = re.compile(r'(DOT)(\1*)MULTI')
if self.urd:
self.restoreudots = re.compile(r'(DOTU)(\1*)MULTI')
else:
self.restoreviram = re.compile(r'(PNVM)(\1*)MULTI')
self.restoredviram = re.compile(r'(DGVM)(\1*)MULTI')
# split sentences
if self.urd:
self.splitsenur1 = re.compile(
' ([.?\u06d4]) '
'([\u0617-\u061a\u0620-\u065f\u066e-\u06d3'
'\u06d5\u06fa-\u06ffA-Z\(\{\[<])')
self.splitsenur2 = re.compile(
' ([.?\u06d4]) ([\)\}\]\'"> ]+) ')
else:
self.splitsenir1 = re.compile(
' ([|.?\u0964\u0965]) ([\u0900-\u0d7fA-Z\(\{\[<])')
self.splitsenir2 = re.compile(
' ([|.?\u0964\u0965]) ([\)\}\]\'"> ]+) ')
def normalize(self, text):
"""
Performs some common normalization, which includes:
- Removal of Byte order mark, word joiner, etc.
- Removal of ZERO_WIDTH_NON_JOINER and ZERO_WIDTH_JOINER
- ZERO_WIDTH_SPACE and NO_BREAK_SPACE replaced by spaces
- Unicode Punctituation replaced with ASCII
"""
text = text.replace('\u00A0', ' ') # NO_BREAK_SPACE
text = text.replace('\u00AD', '') # SOFT_HYPHEN
text = text.replace('\u2060', '') # WORD_JOINER
text = text.replace('\u200A', ' ') # H_SP
text = text.replace('\u200B', ' ') # ZERO_WIDTH_SPACE
text = text.replace('\u200C', '') # ZERO_WIDTH_NON_JOINER
text = text.replace('\u200D', '') # ZERO_WIDTH_JOINER
text = text.replace('\u200E', '') # LEFT_TO_RIGHT_MARK
text = text.replace('\u200F', '') # RIGHT_TO_LEFT_MARK
text = text.replace('\uFEFF', '') # BYTE_ORDER_MARK
text = text.replace('\uFFFE', '') # BYTE_ORDER_MARK_2
text = re.sub('[\u2010\u2043]', '-', text) # hyphen
text = re.sub('[\u2018\u2019]', "'", text) # single quotes
text = re.sub('[\u201c\u201d]', '"', text) # double quotes
return text
def tokenize_prefixes(self, text):
words = text.split()
text_len = len(words) - 1
text = str()
for i, word in enumerate(words):
if word.endswith('.'):
dotless = word[:-1]
if dotless.isdigit():
word = dotless + ' .'
elif ('.' in dotless and re.search('[a-zA-Z]', dotless)) or \
self.NBP.get(dotless, 0) == 1 or \
(i < text_len and words[i + 1][0].islower()):
pass
elif self.NBP.get(dotless, 0) == 2 and \
(i < text_len and words[i + 1][0].isdigit()):
pass
elif i < text_len and words[i + 1][0].isdigit():
pass
else:
word = dotless + ' .'
text += "%s " % word
return ' %s ' % text
def tokenize_by_script(self, text, digits, letters,
lang, special_ch=''):
if lang != self.lang:
return text
# seperate out "," except for Indic and Ascii digits
text = re.sub('([^0-9%s]),' % digits, r'\1 , ', text)
text = re.sub(',([^0-9%s])' % digits, r' , \1', text)
# separate out on Indic letters followed by non-Indic letters
text = re.sub(
'([%s])([^%s-])' % (letters, letters),
r'\1 \2',
text)
text = re.sub(
'([^%s-])([%s])' % (letters, letters),
r'\1 \2',
text)
# seperate out Indic special chars
if special_ch:
text = re.sub('([%s])' % special_ch, r' \1 ', text)
# separate out hyphens
text = re.sub(
'(-?[0-9%s]-+[0-9%s]-?){,}' % (digits, digits),
lambda m: r'%s' % (m.group().replace('-', ' - ')),
text)
# separate out hyphens not in between alphabets
text = re.sub(
r'(.)-([^a-zA-Z%s])' % letters,
r'\1 - \2',
text)
text = re.sub(
r'([^a-zA-Z%s])-(.)' % letters,
r'\1 - \2',
text)
return text
def tokenize(self, text):
text = self.normalize(text)
text = ' %s ' % (text)
# remove junk characters
text = self.junk.sub('', text)
# seperate out on Latin-1 supplementary characters
text = self.latin.sub(r' \1 ', text)
# seperate out on general unicode punctituations except "’"
text = self.upunct.sub(r' \1 ', text)
# seperate out on unicode mathematical operators
text = self.umathop.sub(r' \1 ', text)
# seperate out on unicode fractions
text = self.ufrac.sub(r' \1 ', text)
# seperate out on unicode superscripts and subscripts
text = self.usupsub.sub(r' \1 ', text)
# seperate out on unicode currency symbols
text = self.ucurrency.sub(r' \1 ', text)
# seperate out all "other" ASCII special characters
text = self.specascii.sub(r' \1 ', text)
# keep multiple dots together
text = self.multidot.sub(lambda m: r' %sMULTI %s' % (
'DOT' * len(m.group(1)), m.group(2)), text)
if self.urd:
# keep multiple dots (urdu-dots) together
text = self.multidot_urd.sub(lambda m: r' %sMULTI %s' % (
'DOTU' * len(m.group(1)), m.group(2)), text)
else:
# keep multiple purna-viram together
text = self.multiviram.sub(lambda m: r' %sMULTI %s' % (
'PNVM' * len(m.group(1)), m.group(2)), text)
# keep multiple purna deergh-viram together
text = self.multidviram.sub(lambda m: r' %sMULTI %s' % (
'DGVM' * len(m.group(1)), m.group(2)), text)
# split contractions right (both "'" and "’")
text = self.nacna.sub(r"\1 ' \2", text)
text = self.naca.sub(r"\1 ' \2", text)
text = self.acna.sub(r"\1 ' \2", text)
text = self.aca.sub(r"\1 '\2", text)
text = self.numcs.sub(r"\1 's", text)
text = text.replace("''", " ' ' ")
# seperate out hyphens
text = self.multihyphen.sub(lambda m: r'%s' % ' '.join(m.group(1)),
text)
# handle non-breaking prefixes
text = self.tokenize_prefixes(text)
# tokenize by language script
text = self.tokenize_by_script(text, '\u0966-\u096f',
'\u0900-\u0963\u0970-\u097f', 'hin')
text = self.tokenize_by_script(text, '\u09e6-\u09ef',
'\u0980-\u09e3\u09f0-\u09ff', 'ben',
special_ch='\u09f2\u09f3\u09fa\u09fb')
text = self.tokenize_by_script(text, '\u0ae6-\u0aef',
'\u0A80-\u0AE3\u0Af0-\u0Aff', 'guj',
special_ch='\u0AD0\u0AF1')
text = self.tokenize_by_script(text, '\u0d66-\u0d6f',
'\u0D00-\u0D63\u0D73-\u0D7f', 'mal',
special_ch='\u0d73\u0d74\u0d75')
text = self.tokenize_by_script(text, '\u0a66-\u0a6f',
'\u0A00-\u0A63\u0A70-\u0A7f', 'pan')
text = self.tokenize_by_script(text, '\u0c66-\u0c6f',
'\u0c00-\u0c63\u0c70-\u0c7f', 'tel',
special_ch='\u0c78-\u0c7f')
text = self.tokenize_by_script(text, '\u0be6-\u0bef',
'\u0B80-\u0Be3\u0Bf3-\u0Bff', 'tam',
special_ch='\u0bd0\u0bf3-\u0bff')
text = self.tokenize_by_script(text, '\u0ce6-\u0cef',
'\u0C80-\u0Ce3\u0Cf1-\u0Cff', 'kan')
text = self.tokenize_by_script(text, '\u0b66-\u0b6f',
'\u0B00-\u0B63\u0B70-\u0B7f', 'ori',
special_ch='\u0B72-\u0B77')
if self.urd:
# seperate out urdu full-stop (۔)
text = re.sub('([\u0600-\u06ff])(\u06d4 )', r'\1 \2', text)
text = re.sub('( \u06d4)([\u0600-\u06ff])', r'\1 \2', text)
# seperate out Urdu comma i.e., "،" except for Urdu digits
text = re.sub(
'([^0-9\u0660-\u0669\u06f0-\u06f9])(\u060C)',
r'\1 \2 ',
text)
text = re.sub(
'(\u060C)([^0-9\u0660-\u0669\u06f0-\u06f9])',
r' \1 \2',
text)
# separate out on Urdu letters followed by non-Urdu letters
# and vice-versa
text = re.sub(
'([\u0617-\u061a\u0620-\u065f\u066e-\u06d3\u06d5'
'\u06fa-\u06ff\ufe70-\ufeff\ufb50-\ufdff])'
'([^\u0617-\u061a\u0620-\u065f\u066e-\u06d3\u06d5'
'\u06fa-\u06ff\ufe70-\ufeff\ufb50-\ufdff'
'\u06d4\u066b-])',
r'\1 \2',
text)
text = re.sub(
'([^\u0617-\u061a\u0620-\u065f\u066e-\u06d3\u06d5'
'\u06fa-\u06ff\ufe70-\ufeff\ufb50-\ufdff'
'\u06d4\u066b-])'
'([\u0617-\u061a\u0620-\u065f\u066e-\u06d3\u06d5\u06fa-\u06ff'
'\ufe70-\ufeff\ufb50-\ufdff])',
r'\1 \2',
text)
# separate out on every other special character
text = re.sub(
'([\u0600-\u0607\u0609\u060a\u060d\u060e\u0610-\u0614'
'\u061b-\u061f\u066a\u066c\u066d\u06dd\u06de\u06e9])',
r' \1 ',
text)
# separate out hyphens
text = re.sub(
'(-?[0-9\u0660-\u0669\u06f0-\u06f9]-+'
'[0-9\u0660-\u0669\u06f0-\u06f9]-?){,}',
lambda m: r'%s' % (m.group().replace('-', ' - ')),
text)
text = re.sub(
'(.)-([^a-zA-Z\u0617-\u061a\u0620-\u065f\u066e-\u06d3'
'\u06d5\u06fa-\u06ff\ufe70-\ufeff\ufb50-\ufdff])',
r'\1 - \2',
text)
text = re.sub(
'([^a-zA-Z\u0617-\u061a\u0620-\u065f\u066e-\u06d3\u06d5'
'\u06fa-\u06ff\ufe70-\ufeff\ufb50-\ufdff])-(.)',
r'\1 - \2',
text)
text = text.split()
text = ' '.join(text)
# restore multiple dots, purna virams and deergh virams
text = self.restoredots.sub(lambda m: r'.%s' %
('.' * int((len(m.group(2))) / 3)),
text)
if self.urd:
text = self.restoreudots.sub(lambda m: '\u06d4%s' % (
'\u06d4' * int((len(m.group(2))) / 4)), text)
else:
text = self.restoreviram.sub(lambda m: '\u0964%s' % (
'\u0964' * int((len(m.group(2))) / 4)), text)
text = self.restoredviram.sub(lambda m: '\u0965%s' % (
'\u0965' * int((len(m.group(2))) / 4)), text)
# split sentences
if self.split_sen:
if self.urd:
text = self.splitsenur1.sub(r' \1\n\2', text)
text = self.splitsenur2.sub(r' \1 \2\n', text)
else:
text = self.splitsenir1.sub(r' \1\n\2', text)
text = self.splitsenir2.sub(r' \1 \2\n', text)
return text
|
[
"bhatirshad127@gmail.com"
] |
bhatirshad127@gmail.com
|
0e624691d23c9e9340509f22109a49d05cecac24
|
02f6458703527b013f3d280a08fd4db897d6a9c2
|
/read_lemmas_CHILDES_by_dir.py
|
3a20a54187bbc7ca6e21392f615eb18770de0877
|
[] |
no_license
|
jkodner05/method
|
fc36b19f95c186281c10461689b066f36e8a7826
|
8f9f7ecd43199ea0d143b023527da6493d160f80
|
refs/heads/master
| 2020-04-23T05:27:31.508730
| 2020-01-08T01:18:32
| 2020-01-08T01:18:32
| 170,940,750
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,812
|
py
|
import argparse, re, os
from os.path import basename
from collections import defaultdict
import statistics
import matplotlib.pyplot as plt
exclude_speakers = set(["CHI", "ROS", "DAV", "ARR", "DAN", "JEN"])
exclude_lemmas = set(["be","not","me","us","you","dog","house","eye","bowl","nose","finger","boot","boat","bicycle","toy","station","zipper","channel","lunch","case","arm","clock","key","spoon","crayon","sock","glove","chicken","shadow","powder","pot","head","market","diaper","toast"])
def plot_lemmas_by_numtypes(numtypes_by_lemma, outfname, language):
numtypes_by_lemma = sorted(numtypes_by_lemma.items(), key=lambda kv: kv[1], reverse=True)
x = []
numbins = 0
for lemma, numtypes in numtypes_by_lemma:
if numtypes > 0:
x.extend([numbins]*numtypes)
numbins += 1
if numbins < 2:
return
fig, ax = plt.subplots(figsize=(12,12))
n, bins, patches = ax.hist(x, numbins)
for i, patch in enumerate(patches):
patch.set_facecolor("goldenrod")
fontsize = 40
fig.suptitle("CHILDES " + language, fontsize=40)
ax.tick_params(labelsize=20)
ax.set_xlabel('Ranked Lemmas', fontsize=fontsize)
ax.set_ylabel('Infl Form Type Count', fontsize=fontsize)
plt.savefig("outputs/" + outfname)
plt.close(fig)
def parse_file(fname, freqsbymorph, morphsbytype, lemmasbyfeat, POSset, language):
textlineregex = re.compile(r"^\*[A-Z][A-Z][A-Z]:")
morphlineregex = re.compile(r"^%mor:")
poses = set([])
with open(fname, "r") as f:
speaker = ""
for line in f:
if textlineregex.match(line.strip()):
textline = line.strip()
speaker = textline[0:4]
for excl in exclude_speakers:
if excl in speaker:
continue
if morphlineregex.match(line.strip()):
rawwords = line.strip()[6:].split(" ")
for word in rawwords:
parts = word.split("~")
for part in parts:
lemma = word.split("|")[-1].split("&")[0].split("-")[0]
feats = "."
try:
feats = word.split("|")[-1].split("-")[1]
if "=" in feats:
feats = feats.split("=")[0]
except:
pass
if lemma in exclude_lemmas:
continue
POS = part.split("|")[0]
if not POSset or POS in POSset:
# if "english" in language.lower() and "pl" in feats.lower():
# continue
# print(part, lemma, feats)
# print lemma, POS, POSset
freqsbymorph[part] += 1
morphsbytype[lemma].add(part)
lemmasbyfeat[feats].add(lemma)
# morphsbytype[lemma+"_"+POS].add(part)
def count_types(basedir, POSset, language):
freqsbymorph = defaultdict(int)
morphsbytype = defaultdict(lambda : set([]))
lemmasbyfeat = defaultdict(lambda : set([]))
for subdir, dirs, files in os.walk(basedir):
for fname in files:
if ".cha" in fname:
parse_file(os.path.join(subdir, fname), freqsbymorph, morphsbytype, lemmasbyfeat, POSset, language)
return dict(freqsbymorph), dict(morphsbytype), dict(lemmasbyfeat)
def combine_freqs_bytype(freqsbymorph, morphsbytype, infl):
freqsbytype = {}
for word, morphs in morphsbytype.items():
freqsbytype[word] = 0
for morph in morphs:
freqsbytype[word] += freqsbymorph[morph]
freqsbytypefiltered = {}
if infl:
for word, morphs in morphsbytype.items():
hasinfl = False
for morph in morphs:
if infl in morph.lower():
hasinfl = True
if hasinfl:
freqsbytypefiltered[word] = freqsbytype[word]
else:
freqsbytypefiltered = freqsbytype
return freqsbytypefiltered
def sort_types(freqsbymorph, minfreq):
filtered_types = {}
for word, freq in freqsbymorph.items():
if freq >= minfreq:
filtered_types[word] = freq
return sorted(filtered_types.items(), key=lambda kv: kv[1], reverse=True)
def writeout(outfname, sortedtypes):
with open(outfname, "w") as f:
for word, freq in sortedtypes:
f.write("%s\t%s\n" % (word, freq))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description = "Get type frequencies from Brown Corpus")
parser.add_argument("inputdir", nargs="?", help="Brown base directory or subdirectory")
parser.add_argument("outfile", nargs="?", help="file to write output to")
parser.add_argument("--pos", nargs="+", help="pos list", type=str)
parser.add_argument("--minfreq", nargs="?", help="min frquency", type=int, default=0)
parser.add_argument("--rankcutoff", nargs="?", help="rank cutoff", type=int, default=1000000)
parser.add_argument("--infl", nargs="?", help="all lemmas must attest this inflectional category", type=str, default="")
parser.add_argument("--language", nargs="?", type=str, default ="")
args = parser.parse_args()
POSset = None
if args.pos:
POSset = set(args.pos)
freqsbymorph, morphsbytype, lemmasbyfeat = count_types(args.inputdir, POSset, args.language)
freqsbytype = combine_freqs_bytype(freqsbymorph, morphsbytype, args.infl.lower())
sortedtypes = sort_types(freqsbytype, args.minfreq)
sortedtypes = sortedtypes[0:min(len(sortedtypes),args.rankcutoff)]
writeout(args.outfile, sortedtypes)
print("# Tokens", sum(freqsbymorph.values()))
print("# Types", len(sortedtypes))
morphsbytype_noPOS = {lemma:set([morph.split("|")[1].replace("&","-") for morph in morphs]) for lemma, morphs in morphsbytype.items()}
nummorphsbytype = {lemma:len(morphs) for lemma, morphs in morphsbytype_noPOS.items()}
print("Max", max(nummorphsbytype.values()))
print("Mean", statistics.mean(nummorphsbytype.values()))
print("Median", statistics.median(nummorphsbytype.values()))
plot_lemmas_by_numtypes(nummorphsbytype,basename(args.outfile).replace(".txt",".png"), args.language)
print("# Feats", len(lemmasbyfeat))
numlemmasbyfeat = {feat:len(lemmas) for feat, lemmas in lemmasbyfeat.items()}
print("Max", max(numlemmasbyfeat.values()))
print("Mean", statistics.mean(numlemmasbyfeat.values()))
print("Median", statistics.median(numlemmasbyfeat.values()))
|
[
"jkodner05@yahoo.com"
] |
jkodner05@yahoo.com
|
e751ef226fef8f821e13fe4da35609a2cd119307
|
6b1567f9c6a98f978274750c2f96fee81362d2ea
|
/if语句.py
|
babcdd0cde8e6ae479abd22dc6facb46f852916d
|
[] |
no_license
|
Cunning96/gittest
|
6f843b2a517c91215a49056291d98a2fb14cbbd9
|
0aae2bcf69e3cc69b59ddc8e5c6ce96e8be54e38
|
refs/heads/master
| 2022-11-14T00:00:25.848656
| 2020-07-14T08:23:18
| 2020-07-14T08:23:18
| 279,516,882
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,301
|
py
|
"""
第五章 if语句(整合教科书(6-249行)及视频课程(253-末行))
"""
# 一、书本教程内容
# 5.1 简单if语句
cars = ['audi', 'volkswagen', 'subaru', 'nissan', 'toyota', 'geely', 'bmw', 'skoda', 'buick', 'chevrolet']
for car in cars:
if car == 'geely': # 如果遍历到的车厂为geely
print(car.upper()) # 则字母全部大写
else: # 若不是geely
print(car.title()) # 则首字母大写
# 条件测试(True & False)
car = 'geely' # "="为赋值
car == 'geely' # "=="为发问,car是否为geely.
print(car == 'geely')
# 此检验应考虑大小写
car = 'Geely'
car.lower() == 'geely'
print(car.lower() == 'geely')
# 检验相等
requesting = 'mushrooms'
if requesting != 'fish':
print("here is the fish.") # 因requesting的值不为fish,故执行if后代码print,若替换mushrooms为fish将返回false。
else:
print("we have no the goods that you requesting.")
# 数学比较,判断范围
age = 19 # 下列各行运行时会显示true或false,Windows系统实现不了。
age < 21
print(age < 21)
age <= 21
print(age <= 21)
age > 21
print(age > 21)
age >= 21
print(age >= 21)
# 多条件判定(and,or)
age_0 = 22
age_1 = 18
age_0 >= 21 and age_1 <= 21
print(age_0 >= 21 and age_1 <= 21)
age_0 = 18
age_0 >= 21 or age_1 >= 21
print(age_0 >= 21 or age_1 >= 21)
# 检查特定值是否已包含在列表中(实例:注册账号时检验注册的用户名是否已被注册)----关键词: in 和 not in
requestings = ['mushrooms', 'onions', 'pineapple', 'cheese']
'onions' in requestings
print('onions' in requestings)
if 'onions' in requestings:
print("yes,onions is added.")
else:
print("please add onions!")
'pepperoni' in requestings
print('pepperoni' in requestings)
# 检查特定值是否不包含在列表中----关键词: not in
banned_users = ['link', 'lily', 'black']
'andrew' not in banned_users
print('andrew' not in banned_users)
if 'andrew' not in banned_users:
print("you can post response if you wish.")
# 布尔表达式(判定游戏是否正在运行,用户是否可以编辑网站特定内容,跟踪程序状态等)
game_active = True
can_edit = False
# Practice 1
car = 'nissan'
print("is car == 'NISSAN'? I predict True")
print(car.upper() == 'NISSAN')
print("\ncar = 'audi'? I predict False")
print(car == 'adui')
# Practice 2
value1 = 21
value2 = 22
print(value2 == value1)
print(value1 >= 20 and value2 <= 30)
print(value1 <= 20 and value2 >= 100)
print(value1 > 10 or value2 < 23)
# Practice 3
cities = ['yujing', 'ruilin', 'qicang', 'wenyang', 'changxiu', 'xuanyun', 'tanchuan', 'yunji', 'linghan']
print('yinhuang' in cities)
print('changxiu' in cities)
if 'ninghai' not in cities:
cities.insert(4, 'ninghai')
print(cities)
# if-else语句(判定到一个条件符合时终止,else可省略)
age = 20
if age >= 18:
print("you are old enough to vote.")
print("have you registered to vote yet?")
else:
print("you are too young to vote!")
# if-else-elif语句(检查事件超过2个的情形,判定到一个条件符合时终止,else可省略)
age = 20
if age <= 4:
print("free!")
elif age > 4 and age < 18:
print("cost $5")
else:
print("cost $10") # 以下用简洁代码
"""
4 < age: free
4 < age < 18: $5
18 >= age: $10
age > 65: $5
"""
age = 20
if age <= 4:
price = 0
elif age < 65:
price = 10
else:
price = 5
print("Your admission cost is $" + str(price) + ".\n") # 该方法更简洁,但缺乏“人意”。
# if语句适合多个符合的条件(遇到符合条件的语句也会继续判定到末尾)
requesting = ['cheese', 'beef', 'squid', 'scallop', 'flounder']
if 'mushrooms' in requesting:
print("add mushrooms")
if 'scallop' in requesting:
print("add scallop")
if 'pork' in requesting:
print("add pork")
# Practice 1 Page 76
alien_colors = ['green', 'yellow', 'red', 'purple']
for alien_color in alien_colors:
if alien_color == 'green':
print("well down,you got 5 points!")
# Practice 2
alien_colors = ['green', 'yellow', 'red']
for alien_color in alien_colors:
if alien_color == 'yellow':
print()
else:
print("perfect,you got 10 points!")
if alien_color == 'red':
print()
# Practice 3
alien_colors = ['green', 'yellow', 'red']
for alien_color in alien_colors:
if alien_color == 'yellow':
print("perfect!you got 10 points!")
elif alien_color == 'green':
print("well down!you got 5 points!")
else:
print("excllent!you got 20 points")
# Practice 4
age = 26
if age < 2:
print("baby")
elif age < 4:
print("b-chil")
elif age < 14:
print("child")
elif age < 20:
print("young man")
elif age < 65:
print("adult")
else:
print("old man")
# Practice 5
favorite_fruits = ['coconut', 'banana', 'pine']
if 'coconut' in favorite_fruits:
print("Yes")
if 'apple' in favorite_fruits:
print()
if 'pine' in favorite_fruits:
print("Yes")
if 'banana' in favorite_fruits:
print("hole yes!")
if 'orange' in favorite_fruits:
print()
# if语句处理列表
drinks = ['pepsi', 'juice', 'sprite', 'fenta', 'hot coco']
for drink in drinks:
if drink == 'sprite':
print("sorry,the sprite is out of.")
else:
print("drink is preparing.")
"""
该方法可用于餐饮店内系统,告知顾客或店员库存情况。
系统管理员已做好库存变量赋值后的告知。
该方法做不到系统自行检查库存并报错,仍另需编程告知具体库存。
即 if 'sprite'为0时,print("sprite is out of.")
"""
# 检查列表是否为空
order_list = []
if order_list:
for food in order_list:
print("\nyour order is confirmed.")
else:
print("please choose at least 1 food.")
# 对比列表(匹配为true,否则执行else)
"""
匹配顾客需求和店内供应
"""
available_requestings = ['beef', 'wine', 'duck', 'cookie', 'hamburger',
'fish', 'sashimi']
customs_requestings = ['apple', 'banana', 'pine', 'beef', 'cookie']
for requesting in customs_requestings:
if requesting in available_requestings:
print(requesting + " is already preparing.")
else:
print("we don't have " + requesting + ".")
# Practice 1 Page79 -80
names = ['admin', 'jesse', 'luke', 'aderson', 'hulk']
for name in names:
if name == 'admin':
print("Hello" + name + " would you like to see a status report?")
else:
print("Hello," + name + " thank you for logging in again.")
# Practice 2
names = []
if names:
for name in names:
if name == 'admin':
print("Hello" + name + " would you like to see a status report?")
else:
print("please type in a name.")
# Practice 3
current_users = ['jesse', 'luke', 'aderson', 'hulk', 'lawson']
new_users = ['jack', 'chen', 'yao', 'luke', 'HULK']
for user in new_users:
if user.lower() in current_users:
print(user + " is used.")
else:
print(user + " can be used.")
# Practice 4
numbers = list(range(1, 10)) # 第四章内容
for value in numbers:
if value == 1:
print("1st")
elif value == 2:
print("2nd")
elif value == 3:
print("3rd")
else:
print(str(value) + "th")
"""
二、视频教程内容
判断大小
例:判断 5000,2000,9000,1000的大小
"""
n0 = 5000
n1 = 2000
n2 = 9000
n3 = 1000
max_value = n0 # max_value是额外添加变量,变量不足时应记得添加变量
if n1 > n0:
max_value = n1
if n2 > n1:
max_value = n2
if n3 > n2:
max_value = n3
print("MAX is " + str(max_value))
# 根据月份判断天数
month = int(input("month:"))
if month < 1 or month > 12:
print("Please tab correct numbers.(1-12)")
elif month == 2:
print("28 days")
elif month == 4 or month == 6 or month == 9 or month == 11:
print("30 days")
else:
print("31 days")
# 判断季节
season = input("season:")
if season == "春":
print("February,March,April")
elif season == "夏":
print("May,June,July,August")
elif season == "秋":
print("September,October")
elif season == "冬":
print("November,December,January")
else:
print("it's not the current earth seasons.")
# 根据运算符计算两个数字。录入数字和运算符,试写出若运算符不是“+ - * /”任意一个,则提示“运算有误”,若是其中一个运算符请print运算结果的代码
number1 = int(input("number1:"))
number2 = int(input("number2:"))
opreator = input("opreator:")
if opreator == "+":
value = number1 + number2
print(value)
elif opreator == "-":
value = number1 - number2
print(value)
elif opreator == "*":
value = number1 * number2
print(value)
elif opreator == "/":
value = number1 / number2
print(value)
else:
print("error:please check number and opreator.")
# 练习1
month = int(input("month:"))
if month < 1 or month > 12:
print("incorrect type.")
elif month == 1 or month > 10:
print("冬")
elif month >= 9:
print("秋")
elif month >= 5:
print("夏")
else:
print("春")
# 练习2
years = int(input("年龄:"))
if years < 0:
print("错误")
elif years < 2:
human = "婴儿"
elif years < 13:
human = "儿童"
elif years < 20:
human = "青年"
elif years < 65:
human = "成年"
elif years <= 150:
human = "老年"
else:
print("it's not possible!")
print(human)
# 练习3
tall = float(input("身高:"))
weight = float(input("体重:"))
BMI = (tall / weight) ** 2
if BMI < 18.5:
health = "偏瘦"
elif BMI < 24:
health = "正常"
elif BMI < 28:
health = "超重"
elif BMI < 30:
health = "I度肥胖"
elif BMI < 40:
health = "II度肥胖"
else:
health = "III度肥胖"
print(health)
# 可改写为高级形式: "偏瘦" if MNI<18.5 else "肥胖"(本改法为示例,不适用本练习内容)
numbers = list(range(3, 11))
for value in numbers:
if value > 3 and value < 10:
print(value)
|
[
"lirongji96@163.com"
] |
lirongji96@163.com
|
3142d5009781ba0f8c34ac388e740b211d9b0c54
|
84e419d3f4c410d4ab7ecba48bc812e1a18f2471
|
/assignment1/trainer.py
|
71f72cac2a066db2926f7524ea014f43f8712b55
|
[] |
no_license
|
ludvikka/tdt4265-exercise-1
|
f2836858c4e1915926151fc0793f2319709ac1ca
|
1e7ec7531ca5f3a6c858a36e78d744678a5ef131
|
refs/heads/master
| 2023-03-05T21:20:15.237629
| 2021-02-05T12:57:50
| 2021-02-05T12:57:50
| 333,704,896
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,838
|
py
|
import numpy as np
import utils
# NO NEED TO CHANGE THIS CODE
class BaseTrainer:
def __init__(
self,
model,
learning_rate: float,
batch_size: int,
shuffle_dataset: bool,
X_train: np.ndarray, Y_train: np.ndarray,
X_val: np.ndarray, Y_val: np.ndarray,) -> None:
"""
Initialize the trainer responsible for performing the gradient descent loop.
"""
self.X_train = X_train
self.Y_train = Y_train
self.X_val = X_val
self.Y_val = Y_val
self.learning_rate = learning_rate
self.batch_size = batch_size
self.model = model
self.shuffle_dataset = shuffle_dataset
def validation_step(self):
"""
Perform a validation step to evaluate the model at the current step for the validation set.
Also calculates the current accuracy of the model on the train set.
Returns:
loss (float): cross entropy loss over the whole dataset
accuracy_ (float): accuracy over the whole dataset
Returns:
loss value (float) on batch
"""
pass
def train_step(self):
"""
Perform forward, backward and gradient descent step here.
Args:
X: one batch of images
Y: one batch of labels
Returns:
loss value (float) on batch
"""
pass
def train(
self,
num_epochs: int):
"""
Training loop for model.
Implements stochastic gradient descent with num_epochs passes over the train dataset.
Returns:
train_history: a dictionary containing loss and accuracy over all training steps
val_history: a dictionary containing loss and accuracy over a selected set of steps
"""
# Utility variables
num_batches_per_epoch = self.X_train.shape[0] // self.batch_size
num_steps_per_val = num_batches_per_epoch // 5
# A tracking value of loss over all training steps
train_history = dict(
loss={},
accuracy={}
)
val_history = dict(
loss={},
accuracy={}
)
global_step = 0
counter = 0
lowest_val_loss = 1
for epoch in range(num_epochs):
train_loader = utils.batch_loader(
self.X_train, self.Y_train, self.batch_size, shuffle=self.shuffle_dataset)
for X_batch, Y_batch in iter(train_loader):
loss = self.train_step(X_batch, Y_batch)
# Track training loss continuously
train_history["loss"][global_step] = loss
# Track validation loss / accuracy every time we progress 20% through the dataset
if global_step % num_steps_per_val == 0:
val_loss, accuracy_train, accuracy_val = self.validation_step()
train_history["accuracy"][global_step] = accuracy_train
val_history["loss"][global_step] = val_loss
val_history["accuracy"][global_step] = accuracy_val
# TODO (Task 2d): Implement early stopping here.
# You can access the validation loss in val_history["loss"]
if (val_loss>lowest_val_loss):
counter = counter + 1
else:
lowest_val_loss = val_loss
counter = 0
if (counter == 10):
break
global_step += 1
if (counter == 10):
print("Stopped at epoch:", epoch)
break
return train_history, val_history
|
[
"ludkasbo@gmail.com"
] |
ludkasbo@gmail.com
|
7a96d50e392eac40cdadc6ce8f77f3935ba7d551
|
4ee22e60f72add3993b8f5ca9cb004eec862a2ae
|
/PTransE/doc2vec_transformation/paths_from_PCRA_output.py
|
3c367cfa6d3502c69bf92d0524a515cb8d917917
|
[
"MIT"
] |
permissive
|
darvid7/KB2E
|
037a6f6563e7ad5a0aed38ed14c53fffc236680d
|
589f87f1d33935c7dbeea73e1177d5d079d7e844
|
refs/heads/master
| 2020-03-19T16:35:07.137829
| 2018-09-01T12:06:31
| 2018-09-01T12:06:31
| 136,720,941
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,755
|
py
|
import os
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-p", "--path")
parser = parser.parse_args()
print(os.getcwd())
# PCRA_OUTPUT_PATH = "../data/train_pra_sample_1000_lines.txt"
paths = []
with open(parser.path, "r") as fh:
header_toggle = True
for line in fh:
line = line.split()
if header_toggle:
entity_1_mid, entity_2_mid, relation_id_as_str = line
header_toggle = False
else:
num_relation_paths = int(line[0])
cur_index = 1
for i in range(num_relation_paths):
# For each relation count, relation path, confidence level.
relation_count = int(line[cur_index])
relations_on_path = []
cur_index += 1
for r in range(relation_count):
# relations_on_path_count = int(line[cur_index])
# cur_index += 1
# for j in range(relations_on_path_count):
relations_on_path.append(line[cur_index])
cur_index += 1
# Skip over confidence.
cur_index += 1
if relation_count == 1:
path = "%s %s %s"
paths.append(path % (entity_1_mid, relations_on_path[0], entity_2_mid))
elif relation_count == 2: # Relation count == 2, shouldn't be any other options.
path = "%s %s missing_intermediate_entity %s %s"
paths.append(path % (entity_1_mid, relations_on_path[0], relations_on_path[1], entity_2_mid))
else:
print("WTF")
header_toggle = True
for path in paths:
print(path)
|
[
"david.anthony.lei@gmail.com"
] |
david.anthony.lei@gmail.com
|
e96605d4527a4551d1105f8932434a99310e65b9
|
561c590ec93131ceb58c21912a375b6e0d50bedb
|
/jiang_fenci/hmm_segment/segment/model.py
|
f02feafaf66f8f9e98368fd143fd2570a3590bb7
|
[] |
no_license
|
chuanfanyoudong/nlp_learn
|
3607555e59789240afd6c4a9620cc6e678e0afb3
|
9fbb6781640ab9aba561dc2de0066a1f1e5882a0
|
refs/heads/master
| 2020-04-07T13:25:16.118562
| 2019-04-24T07:18:33
| 2019-04-24T07:18:33
| 158,406,684
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,045
|
py
|
# -*- coding: utf-8 -*-
"""
SEGMENT
--------
封装hmm分词模型
"""
import numpy as np
from hmmlearn.hmm import MultinomialHMM
from jiang_fenci.hmm_segment.segment.corpus import get_corpus
__model = None
class Segment:
def __init__(self):
self.corpus = get_corpus()
self.states, self.init_p = self.get_init_state()
self.trans_p = self.get_trans_state()
self.vocabs, self.emit_p = self.get_emit_state()
self.model = self.get_model()
def get_init_state(self):
"""
获取初始概率,转为hmm模型接受数据形式
"""
states = ['S', 'B', 'M', 'E']
init_state = self.corpus.get_state('init')
init_p = np.array([init_state[s] for s in states])
return states, init_p
def get_trans_state(self):
"""
获取转移概率,转为hmm模型接受数据形式
"""
trans_state = self.corpus.get_state('trans')
trans_p = np.array([[trans_state[s][ss] for ss in self.states] for s in self.states])
return trans_p
def get_emit_state(self):
"""
获取发射概率,转为hmm模型接受数据形式
"""
emit_state = self.corpus.get_state('emit')
vocabs = []
for s in self.states:
vocabs.extend([k for k, v in emit_state[s].items()])
vocabs = list(set(vocabs))
emit_p = np.array([[emit_state[s][w] for w in vocabs] for s in self.states])
return vocabs, emit_p
def get_model(self):
"""
初始化hmm模型
"""
model = MultinomialHMM(n_components=len(self.states))
model.startprob_ = self.init_p
model.transmat_ = self.trans_p
model.emissionprob_ = self.emit_p
return model
def pre_process(self, word):
"""
未知字处理
"""
if word in self.vocabs:
return self.vocabs.index(word)
else:
return len(self.vocabs)-1
def cut(self, sentence):
"""
分词
"""
seen_n = np.array([[self.pre_process(w) for w in sentence]]).T
log_p, b = self.model.decode(seen_n, algorithm='viterbi')
# print(len(sentence),len(b))
# print(sentence,b)
#print(self.states,len(b))
states = list(map(lambda x: self.states[int(x)], b))
#print(type(states),states)
cut_sentence = ''
for index in range(len(list(states))):
# print(list(states))
if list(states)[index] in ('S', 'E'):
cut_sentence += sentence[index]+' '
else:
cut_sentence += sentence[index]
return cut_sentence
@staticmethod
def stats(cut_corpus, gold_corpus):
"""
正确率、召回率、F1
"""
success_count = 0
cut_count = 0
gold_count = 0
for index in range(len(cut_corpus)):
cut_sentence = cut_corpus[index].split(' ')
gold_sentence = gold_corpus[index].split(' ')
cut_count += len(cut_sentence)
gold_count += len(gold_sentence)
for word in cut_sentence:
if word in gold_sentence:
success_count += 1
recall = float(success_count)/float(gold_count)
precision = float(success_count)/float(cut_count)
f1 = (2*recall*precision)/(recall+precision)
return [precision, recall, f1]
def test(self):
"""
分词测试
"""
test_corpus = self.corpus.get_test_corpus('test')
gold_corpus = [sentence.replace(' ', ' ').strip() for sentence in self.corpus.get_test_corpus('test_gold') if sentence]
cut_corpus = [self.cut(sentence).strip() for sentence in test_corpus if sentence]
result = self.stats(cut_corpus, gold_corpus)
# print(result)
return result
def get_model():
"""
单例模型获取
"""
global __model
if not __model:
__model = Segment()
return __model
|
[
"qaz3762541@163.com"
] |
qaz3762541@163.com
|
57188da6617341c1573abc4a7a65fe48de96e55e
|
9a8d2d226560d63f0d7df2545efe26bf395db06c
|
/conditions.py
|
a3be6428a68704486084651ac18ac10fd4e995aa
|
[] |
no_license
|
Fakhruddin90/python-tutorial
|
49235e767dc616640a48d4bb3ca8e1b41dd4d468
|
54fc5669a1c8157985190c387c3a4137d98f6729
|
refs/heads/master
| 2022-12-01T06:40:46.243746
| 2020-08-13T09:37:22
| 2020-08-13T09:37:22
| 287,238,328
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 112
|
py
|
x = 28
if x > 0:
print("x is positive")
elif x < 0:
print("x is negative")
else:
print("x is zero")
|
[
"Fakhruddin90.com"
] |
Fakhruddin90.com
|
e5f5194a4d099c2be8675f621b9cf47c993456c2
|
762b088015430f246c2fbc8992b46d220b4d7023
|
/String/intToRoman.py
|
194b33c72477d4cbfc240b5bb7779a2bff68178e
|
[] |
no_license
|
its-sachin/InterviewBit
|
3f137f960702ea31e510b4a54386f41273e0e3c4
|
3f83bce9a4e2bc285d1a75e41e6de5122b5ed470
|
refs/heads/master
| 2023-06-25T09:40:06.422635
| 2021-07-23T10:33:23
| 2021-07-23T10:33:23
| 382,411,938
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 556
|
py
|
class Solution:
# @param A : integer
# @return a strings
def intToRoman(self, num: int)->str:
d = {1: ['I', 'X', 'C', 'M'], 2: ['II', 'XX', 'CC', 'MM'], 3: ['III', 'XXX', 'CCC', 'MMM'], 4: ['IV', 'XL', 'CD'], 5: ['V', 'L', 'D'], 6: ['VI', 'LX', 'DC'], 7: ['VII', 'LXX', 'DCC'], 8: ['VIII', 'LXXX', 'DCCC'], 9: ['IX', 'XC', 'CM']}
ans = ""
i=0
while(num!=0):
digit = num%10
if(digit!=0):
ans = d.get(digit)[i]+ans
i+=1
num=num//10
return ans
|
[
"67999058+its-sachin@users.noreply.github.com"
] |
67999058+its-sachin@users.noreply.github.com
|
938eaa007e714c01f2aa6135f7480b71167d0efc
|
5a3ab0a36cea526c7900fe33db57429eed8a857c
|
/main_solid_cubes_surface.py
|
81d80ca709c50842e324801e59497c772e052ce6
|
[] |
no_license
|
LLizardi/cubes
|
71aad819f6ca64ecc57c4131eaa22b7c48f3e416
|
de3028cd50ac4bebe3f405dc07c589af50f991c5
|
refs/heads/master
| 2020-05-07T05:44:59.480315
| 2019-04-09T04:21:14
| 2019-04-09T04:21:14
| 180,283,601
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,627
|
py
|
from math import pi, atan, log, sqrt
import numpy as np
import warnings
warnings.filterwarnings('ignore')
from datetime import datetime as dt
import sys
from itertools import combinations,permutations
from class_cube import cube
def create_mesh(N):
cubes = []
if N<=1:
cubes.append(cube())
return cubes
N = (N-1)/2
for x in np.arange(-N,N + 0.1,1):
for y in np.arange(-N,N + 0.1,1):
for z in np.arange(-N,N + 0.1,1):
cubes.append(cube([x,y,z]))
return cubes
def main(N_min,N_max):
print('{0} {1} {2} {3} {4} {5}'.format('Radius','Total_Energy','Energy_per_Particle','Number_of_Particle','Faces_Touching','Energy_of_central_cube'))
xb = 0.5
lista = range(N_min,N_max)
lista3 = [i**3 for i in range(N_min,N_max)]
cc = 0
for N in lista3:
cubes = create_mesh(lista[cc])
indexs2 = combinations(np.arange(N,dtype=np.int),2)
faces = np.sum(np.array([cubes[i].are_touching(cubes[j]) for i,j in indexs2]))
energy = 0.
energy_norm = 0.
radii = [i.distance_corner_origin() for i in cubes]
central_energy = 0.
print('{0:.5f} {1:.5f} {2:.5f} {3:3.0f} {4:3.0f} {5:.5f}'.format(max(radii),energy,energy_norm,N,faces,central_energy))
cc += 1
return True
if __name__ == "__main__":
inicio = dt.now()
#†his calculation considered the origin in the center of a cube
main(1,30)
fin = dt.now()
print('Elapsed Time: ',fin-inicio)
|
[
"noreply@github.com"
] |
noreply@github.com
|
93196c7e4c3d9aee7a600a779e6f089b06a181e0
|
13eae91d078c8b88c990bb6da1b9cdb8e3648b76
|
/cogs/Downloader/lib/fontTools/misc/macRes.py
|
e8b3cbc20ed28d5048adec1ba0a12c560f11c715
|
[] |
no_license
|
skylarr1227/skybizzle
|
98303c99a5ea897469e381e06dcda3725d6500d6
|
63c38995437d6880bd9bf0de52d406c904cbbd24
|
refs/heads/master
| 2023-05-13T00:12:46.827511
| 2019-11-12T01:03:45
| 2019-11-12T01:03:45
| 221,097,000
| 0
| 1
| null | 2023-05-07T06:22:44
| 2019-11-12T00:40:38
|
Python
|
UTF-8
|
Python
| false
| false
| 6,591
|
py
|
""" Tools for reading Mac resource forks. """
from fontTools.misc.py23 import *
import struct
from fontTools.misc import sstruct
from collections import OrderedDict
try:
from collections.abc import MutableMapping
except ImportError:
from UserDict import DictMixin as MutableMapping
class ResourceError(Exception):
pass
class ResourceReader(MutableMapping):
def __init__(self, fileOrPath):
self._resources = OrderedDict()
if hasattr(fileOrPath, 'read'):
self.file = fileOrPath
else:
try:
# try reading from the resource fork (only works on OS X)
self.file = self.openResourceFork(fileOrPath)
self._readFile()
return
except (ResourceError, IOError):
# if it fails, use the data fork
self.file = self.openDataFork(fileOrPath)
self._readFile()
@staticmethod
def openResourceFork(path):
if hasattr(path, "__fspath__"): # support os.PathLike objects
path = path.__fspath__()
with open(path + '/..namedfork/rsrc', 'rb') as resfork:
data = resfork.read()
infile = BytesIO(data)
infile.name = path
return infile
@staticmethod
def openDataFork(path):
with open(path, 'rb') as datafork:
data = datafork.read()
infile = BytesIO(data)
infile.name = path
return infile
def _readFile(self):
self._readHeaderAndMap()
self._readTypeList()
def _read(self, numBytes, offset=None):
if offset is not None:
try:
self.file.seek(offset)
except OverflowError:
raise ResourceError("Failed to seek offset ('offset' is too large)")
if self.file.tell() != offset:
raise ResourceError('Failed to seek offset (reached EOF)')
try:
data = self.file.read(numBytes)
except OverflowError:
raise ResourceError("Cannot read resource ('numBytes' is too large)")
if len(data) != numBytes:
raise ResourceError('Cannot read resource (not enough data)')
return data
def _readHeaderAndMap(self):
self.file.seek(0)
headerData = self._read(ResourceForkHeaderSize)
sstruct.unpack(ResourceForkHeader, headerData, self)
# seek to resource map, skip reserved
mapOffset = self.mapOffset + 22
resourceMapData = self._read(ResourceMapHeaderSize, mapOffset)
sstruct.unpack(ResourceMapHeader, resourceMapData, self)
self.absTypeListOffset = self.mapOffset + self.typeListOffset
self.absNameListOffset = self.mapOffset + self.nameListOffset
def _readTypeList(self):
absTypeListOffset = self.absTypeListOffset
numTypesData = self._read(2, absTypeListOffset)
self.numTypes, = struct.unpack('>H', numTypesData)
absTypeListOffset2 = absTypeListOffset + 2
for i in range(self.numTypes + 1):
resTypeItemOffset = absTypeListOffset2 + ResourceTypeItemSize * i
resTypeItemData = self._read(ResourceTypeItemSize, resTypeItemOffset)
item = sstruct.unpack(ResourceTypeItem, resTypeItemData)
resType = tostr(item['type'], encoding='mac-roman')
refListOffset = absTypeListOffset + item['refListOffset']
numRes = item['numRes'] + 1
resources = self._readReferenceList(resType, refListOffset, numRes)
self._resources[resType] = resources
def _readReferenceList(self, resType, refListOffset, numRes):
resources = []
for i in range(numRes):
refOffset = refListOffset + ResourceRefItemSize * i
refData = self._read(ResourceRefItemSize, refOffset)
res = Resource(resType)
res.decompile(refData, self)
resources.append(res)
return resources
def __getitem__(self, resType):
return self._resources[resType]
def __delitem__(self, resType):
del self._resources[resType]
def __setitem__(self, resType, resources):
self._resources[resType] = resources
def __len__(self):
return len(self._resources)
def __iter__(self):
return iter(self._resources)
def keys(self):
return self._resources.keys()
@property
def types(self):
return list(self._resources.keys())
def countResources(self, resType):
"""Return the number of resources of a given type."""
try:
return len(self[resType])
except KeyError:
return 0
def getIndices(self, resType):
numRes = self.countResources(resType)
if numRes:
return list(range(1, numRes+1))
else:
return []
def getNames(self, resType):
"""Return list of names of all resources of a given type."""
return [res.name for res in self.get(resType, []) if res.name is not None]
def getIndResource(self, resType, index):
"""Return resource of given type located at an index ranging from 1
to the number of resources for that type, or None if not found.
"""
if index < 1:
return None
try:
res = self[resType][index-1]
except (KeyError, IndexError):
return None
return res
def getNamedResource(self, resType, name):
"""Return the named resource of given type, else return None."""
name = tostr(name, encoding='mac-roman')
for res in self.get(resType, []):
if res.name == name:
return res
return None
def close(self):
if not self.file.closed:
self.file.close()
class Resource(object):
def __init__(self, resType=None, resData=None, resID=None, resName=None,
resAttr=None):
self.type = resType
self.data = resData
self.id = resID
self.name = resName
self.attr = resAttr
def decompile(self, refData, reader):
sstruct.unpack(ResourceRefItem, refData, self)
# interpret 3-byte dataOffset as (padded) ULONG to unpack it with struct
self.dataOffset, = struct.unpack('>L', bytesjoin([b"\0", self.dataOffset]))
absDataOffset = reader.dataOffset + self.dataOffset
dataLength, = struct.unpack(">L", reader._read(4, absDataOffset))
self.data = reader._read(dataLength)
if self.nameOffset == -1:
return
absNameOffset = reader.absNameListOffset + self.nameOffset
nameLength, = struct.unpack('B', reader._read(1, absNameOffset))
name, = struct.unpack('>%ss' % nameLength, reader._read(nameLength))
self.name = tostr(name, encoding='mac-roman')
ResourceForkHeader = """
> # big endian
dataOffset: L
mapOffset: L
dataLen: L
mapLen: L
"""
ResourceForkHeaderSize = sstruct.calcsize(ResourceForkHeader)
ResourceMapHeader = """
> # big endian
attr: H
typeListOffset: H
nameListOffset: H
"""
ResourceMapHeaderSize = sstruct.calcsize(ResourceMapHeader)
ResourceTypeItem = """
> # big endian
type: 4s
numRes: H
refListOffset: H
"""
ResourceTypeItemSize = sstruct.calcsize(ResourceTypeItem)
ResourceRefItem = """
> # big endian
id: h
nameOffset: h
attr: B
dataOffset: 3s
reserved: L
"""
ResourceRefItemSize = sstruct.calcsize(ResourceRefItem)
|
[
"skylarr1227@gmail.comgit config --global user.email skylarr1227@gmail.com"
] |
skylarr1227@gmail.comgit config --global user.email skylarr1227@gmail.com
|
af9bfb5814f5f4141fc5fd9980c003da790129c1
|
2dbd4a34f6da93c0e70e8517971672a010db93dc
|
/py_m/lexer_.py
|
2bc01b0157aa225fd69bd537af1b174f584f269a
|
[] |
no_license
|
kawain/copy_interpreter
|
44eebe43c6b9ddefa94066577dcd5779a933f426
|
94e7a6d5d03b528b9138c17a5a6828f6332fa98d
|
refs/heads/master
| 2023-04-26T02:51:46.457263
| 2021-05-22T07:48:52
| 2021-05-22T07:48:52
| 356,544,593
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,363
|
py
|
from token_ import TokenType, Token
class Lexer:
"""字句解析"""
def __init__(self, input, position=0, next_position=0, ch=""):
self.input = input
self.position = position
self.next_position = next_position
self.ch = ch
self.size = len(self.input)
self.read_char()
def read_char(self):
if self.next_position >= self.size:
self.ch = ""
else:
self.ch = self.input[self.next_position]
self.position = self.next_position
self.next_position += 1
def skip_whitespace(self):
while self.ch == " " or self.ch == "\t" or self.ch == "\n" or self.ch == "\r":
self.read_char()
@staticmethod
def is_letter(v):
if v.isalpha():
return True
elif v == "_":
return True
else:
return False
@staticmethod
def is_digit(v):
if v.isdigit():
return True
elif v == '.':
return True
else:
return False
def peek_char(self):
if self.next_position >= self.size:
return ""
else:
return self.input[self.next_position]
def read_identifier(self):
position = self.position
while self.is_letter(self.ch):
self.read_char()
return self.input[position:self.position]
def read_number(self):
position = self.position
while self.is_digit(self.ch):
self.read_char()
return self.input[position:self.position]
def read_string(self):
position = self.position + 1
while True:
self.read_char()
if self.ch == '"' or self.ch == "":
break
return self.input[position:self.position]
def next_token(self):
tok = Token()
self.skip_whitespace()
if self.ch == "=":
if self.peek_char() == "=":
self.read_char()
tok.token_type = TokenType.EQ
tok.literal = "=="
else:
tok.token_type = TokenType.ASSIGN
tok.literal = "="
elif self.ch == "+":
tok.token_type = TokenType.PLUS
tok.literal = self.ch
elif self.ch == "-":
tok.token_type = TokenType.MINUS
tok.literal = self.ch
elif self.ch == "!":
if self.peek_char() == "=":
self.read_char()
tok.token_type = TokenType.NOT_EQ
tok.literal = "!="
else:
tok.token_type = TokenType.BANG
tok.literal = "!"
elif self.ch == "/":
tok.token_type = TokenType.SLASH
tok.literal = self.ch
elif self.ch == "*":
tok.token_type = TokenType.ASTERISK
tok.literal = self.ch
elif self.ch == "<":
tok.token_type = TokenType.LT
tok.literal = self.ch
elif self.ch == ">":
tok.token_type = TokenType.GT
tok.literal = self.ch
elif self.ch == ";":
tok.token_type = TokenType.SEMICOLON
tok.literal = self.ch
elif self.ch == ",":
tok.token_type = TokenType.COMMA
tok.literal = self.ch
elif self.ch == "{":
tok.token_type = TokenType.LBRACE
tok.literal = self.ch
elif self.ch == "}":
tok.token_type = TokenType.RBRACE
tok.literal = self.ch
elif self.ch == "(":
tok.token_type = TokenType.LPAREN
tok.literal = self.ch
elif self.ch == ")":
tok.token_type = TokenType.RPAREN
tok.literal = self.ch
elif self.ch == '"':
tok.token_type = TokenType.STRING
tok.literal = self.read_string()
elif self.ch == "[":
tok.token_type = TokenType.LBRACKET
tok.literal = self.ch
elif self.ch == "]":
tok.token_type = TokenType.RBRACKET
tok.literal = self.ch
elif self.ch == "":
tok.token_type = TokenType.EOF
tok.literal = ""
else:
if self.is_letter(self.ch):
tok.literal = self.read_identifier()
tok.token_type = tok.lookup_ident(tok.literal)
return tok
elif self.is_digit(self.ch):
literal = self.read_number()
if literal.count(".") == 0:
tok.token_type = TokenType.INT
tok.literal = literal
return tok
elif literal.count(".") == 1:
tok.token_type = TokenType.FLOAT
tok.literal = literal
return tok
else:
tok.token_type = TokenType.ILLEGAL
tok.literal = literal
else:
tok.token_type = TokenType.ILLEGAL
tok.literal = self.ch
self.read_char()
return tok
def __str__(self):
return "Lexer()"
if __name__ == "__main__":
pass
|
[
"unknown@example.com"
] |
unknown@example.com
|
d4cb57f250e733e13d0676e9b5d25d710f3cafad
|
7f52bb7c3a5ed3be6821306137c5217362d06dc3
|
/manage.py
|
3b50f6ea9203cf02974bbf61451c2a74f68e4d63
|
[] |
no_license
|
payush/cristianoronaldoyopmailcom-307
|
547f36250cf3c9c94bdea0fe8c7a1e3e1194294a
|
d2f2a1f76ab354e391bab8a628782c80a3b1c97a
|
refs/heads/master
| 2020-03-23T14:24:53.428495
| 2018-07-20T06:44:00
| 2018-07-20T06:44:00
| 141,674,090
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 828
|
py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "cristianoronaldoyopmailcom_307.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
|
[
"ayushpuroheet@gmail.com"
] |
ayushpuroheet@gmail.com
|
70938badd7297e124107dca01ead60ecba9b844c
|
fdb48a4812dc17c4a591b4c30a7d71231a0fba7c
|
/score.py
|
320331b76f5dcd2ab242dff34f96de1339f8bd40
|
[
"MIT"
] |
permissive
|
s-a-nersisyan/PP_miRNA_arrays
|
ee3312b9cccf917f8375a022ef72d33a5b0c36a1
|
6c68fa3b4a22232684fd9f2e77d3529012dea2bc
|
refs/heads/master
| 2020-07-29T08:52:14.591943
| 2020-02-16T05:57:54
| 2020-02-16T05:57:54
| 209,736,358
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,111
|
py
|
# This script inputs a pre-MIRNA - miRNA expression table (see README) and
# calculates a score for each row
import sys
import pickle
import numpy as np
from scipy.stats import spearmanr
def main(input_fname):
# Open input file and skip the header
in_f = open(input_fname)
in_f.readline()
# Load prepared miRBase data
name_to_MI, name_to_MIMAT, MI_to_MIMAT = pickle.load(open("miRBase/miRBase.pkl", "rb"))
# First, collect pre-miRNA and miRNA expression values
pre_miRNA_expressions = {}
mature_miRNA_expressions = {}
for l in in_f:
split = l.split("\t")
id_ = split[0]
expressions = list(map(float, split[1:]))
if "MIMAT" in id_:
mature_miRNA_expressions[id_] = expressions
elif "MI" in id_:
pre_miRNA_expressions[id_] = expressions
# Now compute some maximal values for normalizing
max_mature_expression = max(map(lambda expressions: np.median(expressions), mature_miRNA_expressions.values()))
max_MIMAT = max(map(lambda id_: int(id_[5:]), mature_miRNA_expressions.keys()))
# Now make a table with scores
print("pre-miRNA\tmiRNA\tScore") # Header
for MI in pre_miRNA_expressions:
for MIMAT in MI_to_MIMAT.get(MI, []):
x = pre_miRNA_expressions[MI]
y = mature_miRNA_expressions.get(MIMAT)
if not y:
continue
mature_median = np.median(y)
MIMAT_num = int(MIMAT[5:])
corr_obj = spearmanr(x, y)
R = corr_obj[0]
normalized_expression = mature_median / max_mature_expression
normalized_MIMAT = 1 - MIMAT_num / max_MIMAT
normalized_R = (R + 1) / 2
weights = [0.5, 0.3, 0.2]
score = 0
for v, w in zip([normalized_expression, normalized_MIMAT, normalized_R], weights):
score += v*w
print("\t".join([MI, MIMAT, str(score)]))
if __name__ == "__main__":
if len(sys.argv) != 2:
print('''Usage: python score.py input_table.txt''')
else:
main(sys.argv[1])
|
[
"s.a.nersisyan@gmail.com"
] |
s.a.nersisyan@gmail.com
|
d8b395d8ac0a6477ee1af27e443fb18de8465eb8
|
e6f79a62e811c0752f0701ef7e21c0bac42764e8
|
/DAC/num2dac.py
|
bca05838342d5bd71a5099e1ce3fc58050c350cc
|
[] |
no_license
|
KseniaYurko/DAC-ADC
|
4caf0e22a97236f1762e36033ddfe22208bf58e6
|
503cdd244e86743bf8810dab40b880774420c891
|
refs/heads/master
| 2023-05-06T16:40:52.145441
| 2021-05-23T16:24:32
| 2021-05-23T16:24:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 207
|
py
|
import RPi.GPIO as GPIO
import time
N = [10, 9, 11, 5, 6, 13, 19, 26]
GPIO.setmode(GPIO.BCM)
GPIO.setup(N, GPIO.OUT)
c
for i in range(8):
GPIO.output(N[i], int(binary[num - 1 - i]))
num2dac(3)
|
[
"iurko.kk@phystech.edu"
] |
iurko.kk@phystech.edu
|
b4035d14d48d4b7424181bca29f863136400598d
|
1cf5b9efcaa6de40513174d400621bfa8f1d3ed4
|
/pythonBackend/MyStreamListener.py
|
2123076d4f50ddc5a7c27e396fff5c1d7d2e4566
|
[] |
no_license
|
edfilbasan/gotsentiment
|
212a812ee95f16613aa978b25d9560c5da496d61
|
7540743a0309073fdb08ad6c73df137dc8a2698e
|
refs/heads/master
| 2023-01-14T03:37:18.131126
| 2020-02-17T23:15:56
| 2020-02-17T23:15:56
| 181,374,344
| 3
| 1
| null | 2022-12-10T11:21:47
| 2019-04-14T22:04:10
|
Python
|
UTF-8
|
Python
| false
| false
| 1,800
|
py
|
import tweepy
import csv
import re
#override tweepy.StreamListener to add logic to on_status
class MyStreamListener(tweepy.StreamListener):
tweet = {}
idSelf = 0
# Receives tweets, operates on them (more operations to come?)
def on_status(self, status):
try:
if (hasattr(status,'extended_tweet')):
tweetText = self.clean_tweet(status.extended_tweet['full_text'])
else:
tweetText = self.clean_tweet(status.text)
created_at = status.created_at
id = status.id
if(re.search('[a-zA-Z]', tweetText)):
# print('NUM stream tweets: ' + str(self.idSelf))
# print("\n")
self.idSelf += 1
self.tweet["tweet"] = tweetText
self.tweet["id"] = id
self.tweet["sequence"] = self.idSelf
self.tweet["created_at"] = created_at
with open('#testThread10.csv', 'a', newline='') as csv_file:
writer = csv.DictWriter(csv_file, self.tweet.keys())
writer.writerow(self.tweet)
except Exception as e:
print(">>>>Encountered Exception Tweet: %s" % str(e))
pass
return True
def on_disconnect(self, notice):
"""Called when twitter sends a disconnect notice Disconnect
codes are listed here: https://dev.twitter.com/docs/streaming-
apis/messages#Disconnect_messages_disconnect """
print(notice)
print('DISCONNECTED')
return
def on_stall_warning(self, status):
print("stall warning")
print(status)
return True
def on_error(self, status_code):
print("Encountered error with status code:" + repr(status_code))
return True
def clean_tweet(self, tweet):
'''
Utility function to clean tweet text by removing links, special characters
using simple regex statements.
'''
return ' '.join(re.sub("(@[A-Za-z0-9]+)|([^0-9A-Za-z \t])|(\w+:\/\/\S+)", " ", tweet).split())
|
[
"ntm32@cornell.edu"
] |
ntm32@cornell.edu
|
8582899f0b6cd0f7f1531c256890a8f8e05394a0
|
af9d2d87f3ea36d5d42e2d56c8e8c730d674b6f7
|
/Breadth_First_Search_Practice/513 Find Bottom Left Tree Value/solution.py
|
685f40ef680943453861fbed7bb5a356cc2989d0
|
[] |
no_license
|
ShiqinHuo/LeetCode_Notes
|
6830a3e8d7afdd76c257fc28f0029a3d5ec1abf1
|
f3ab388d2d47aac36e2f96156f149d447151878a
|
refs/heads/master
| 2021-05-25T09:17:04.838015
| 2020-03-24T13:29:26
| 2020-03-24T13:29:26
| 126,961,451
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 696
|
py
|
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def findBottomLeftValue(self, root):
"""
:type root: TreeNode
:rtype: int
"""
current_row = [root]
result = root.val
while current_row:
next_row = []
for node in current_row:
if (node.left):
next_row.append(node.left)
if (node.right):
next_row.append(node.right)
result = current_row[0].val
current_row = next_row
return result
|
[
"ShiqinHuo@gmail.com"
] |
ShiqinHuo@gmail.com
|
47e01a8d79922beb1795fe91191db98c2627286b
|
61a8f496dbe1880398b0156940b1789ddfe8e081
|
/Week_7_Lab/Advanced/q10.py
|
3c2bfdedd51b4b0239a8993191d0ee3ac329def6
|
[] |
no_license
|
luca2849/CM1103-Problem-Solving-With-Python
|
e369cdc032249e3625ae5dbbd926703e20d11dd9
|
a10b7ee6b972b23528a983dd7fff78d097c08465
|
refs/heads/master
| 2020-04-04T15:07:01.179113
| 2018-12-13T12:07:19
| 2018-12-13T12:07:19
| 156,024,862
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 441
|
py
|
def rec_power(a,n):
# if n is 1 then return a
if n == 1:
return a
# recursively call this function for n/2 and call it factor
n = n/2
factor = rec_power(a, n)
# if n/2 is even return the square of factor
if (n/2) % 2 == 0:
return factor * factor
# if n/2 is odd then return the square of factor multiplied by a
if (n/2) % 2 == 1:
return factor * factor * a
print(rec_power(10, 4))
|
[
"="
] |
=
|
a6c3cf2f1f9a3458d0b562aef5935f76de142de7
|
1956883d52e4019bbf8bd7bbc3744cdd1376e128
|
/abutton.py
|
96ea6f9b2354e36232ba86e55ad6e83e85bbaeda
|
[
"MIT"
] |
permissive
|
Otumian-empire/tkinter-basic-gui
|
5d7f7c697e9ac40f34b598b166186733d0931202
|
8a561fde8f770119bc3108511388371b1bdcabf5
|
refs/heads/master
| 2020-06-18T13:41:17.248470
| 2019-07-15T18:51:40
| 2019-07-15T18:51:40
| 196,320,911
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 378
|
py
|
from tkinter import *
root = Tk()
x = 0
def increase():
global x
x += 1
label.configure(text=x)
def decrease():
global x
x -= 1
label.configure(text=x)
label = Label(text=x)
sendbutton = Button(text="increase", command=increase)
deletebutton = Button(text="decrease", command=decrease)
sendbutton.grid()
label.grid()
deletebutton.grid()
mainloop()
|
[
"popecan1000@gmail.com"
] |
popecan1000@gmail.com
|
a5f03b6460749d35d829fc1dc57d5916aea8e21e
|
29200a862498323d77b18666bb9eb2a1554d5209
|
/bfresample.py
|
00002bf1251658dc142eda6104b03bb238225361
|
[] |
no_license
|
arms22/bfdump
|
f278416eeb4227b43a688734004fea6af6601ab2
|
1e665bcfeb53125e2525daec62398bbba29e411c
|
refs/heads/master
| 2020-05-01T02:51:07.921521
| 2019-03-23T01:30:14
| 2019-03-23T01:30:14
| 177,230,110
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,464
|
py
|
import sys
import argparse
import pandas as pd
parser = argparse.ArgumentParser(description="")
parser.add_argument('csv', nargs='?', type=argparse.FileType('r'), default=sys.stdin)
parser.add_argument("--rule", dest='rule', type=str, default='1T')
parser.add_argument("--index_col", dest='index_col', type=str, default='exec_date')
args = parser.parse_args()
df = pd.read_csv(args.csv, index_col=args.index_col, parse_dates=True)
df.sort_index(inplace=True)
ohlc = df['price'].resample(args.rule).ohlc()
volume = df['size'].resample(args.rule).sum()
buy_count = (df['side']=='BUY') * 1
sell_count = (df['side']=='SELL') * 1
imbalance = buy_count - sell_count
vwap = df['price']*df['size']
buy_count = buy_count.resample(args.rule).sum()
sell_count = sell_count.resample(args.rule).sum()
trades = buy_count + sell_count
imbalance = imbalance.resample(args.rule).sum()
vwap = vwap.resample(args.rule).sum()/volume
variance = df['price'].resample(args.rule).var()
stdev = df['price'].resample(args.rule).std()
average = df['price'].resample(args.rule).mean()
sell = df[df['side']=='SELL']
sell_volume = sell['size'].resample(args.rule).sum()
buy = df[df['side']=='BUY']
buy_volume = buy['size'].resample(args.rule).sum()
volume_imbalance = buy_volume-sell_volume
if args.index_col != 'exec_date':
exec_date = pd.to_datetime(df['exec_date'])
delay = (df.index - exec_date).dt.total_seconds()
delay = delay.resample(args.rule).last()
else:
delay = trades*0
sell_accepted_at = pd.to_datetime(df['sell_child_order_acceptance_id'].str[3:], format='%Y%m%d-%H%M%S-%f')
buy_accepted_at = pd.to_datetime(df['buy_child_order_acceptance_id'].str[3:], format='%Y%m%d-%H%M%S-%f')
sell_delay = (df.index - sell_accepted_at).dt.total_seconds()
buy_delay = (df.index - buy_accepted_at).dt.total_seconds()
market_delay = (df['side']=='SELL')*sell_delay + (df['side']=='BUY')*buy_delay
market_delay = market_delay.resample(args.rule).median()
data_ohlc = pd.DataFrame({'open': ohlc.open, 'high': ohlc.high, 'low': ohlc.low, 'close': ohlc.close, 'volume':volume,
'variance':variance, 'stdev':stdev, 'average':average, 'vwap':vwap,
'buy_count':buy_count, 'sell_count':sell_count, 'trades':trades, 'imbalance':imbalance,
'buy_volume':buy_volume, 'sell_volume':sell_volume,'volume_imbalance':volume_imbalance,
'delay':delay, 'market_delay':market_delay})
data_ohlc = data_ohlc.dropna()
print(data_ohlc.to_csv())
|
[
"arms22@gmail.com"
] |
arms22@gmail.com
|
8b61414def568b0fb5a70aa3d6138923df7e26b9
|
e3fe2b429116b428f7ce2577e388934f574d3e4e
|
/models.py
|
e8e7f6bdc7f90a953e270aab665a01a1760b234b
|
[] |
no_license
|
FutureWL/Imooc-Python-Flask-Video-Artcms
|
8875d5c2799c735f4183d354fa381ea7287b0d09
|
baa6a14aa47dd0c46720fcce93cbc95b23117e30
|
refs/heads/master
| 2022-12-21T13:26:56.156609
| 2019-04-17T09:18:13
| 2019-04-17T09:18:13
| 181,664,216
| 0
| 0
| null | 2022-12-08T04:59:25
| 2019-04-16T10:05:23
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 1,519
|
py
|
# coding:utf8
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from werkzeug.security import check_password_hash
app = Flask(__name__)
app.config["SQLALCHEMY_DATABASE_URI"] = "mysql+pymysql://root:root@localhost:8889/artcms_pro"
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = True
db = SQLAlchemy(app)
"""
用户模型
1.编号
2.账号
3.密码
4.注册时间
"""
class User(db.Model):
__tablename__ = "user"
id = db.Column(db.Integer, primary_key=True) # 编号
name = db.Column(db.String(20), nullable=False) # 账号
pwd = db.Column(db.String(100), nullable=False) # 密码
addtime = db.Column(db.DateTime, nullable=False) # 注册时间
def __repr__(self):
return "<User %r>" % self.name
def check_pwd(self, pwd):
return check_password_hash(self.pwd, pwd)
"""
文章模型
1.编号
2.标题
3.分类
4.作者
5.封面
6.内容
7.发布时间
"""
class Art(db.Model):
__tablename__ = "art"
id = db.Column(db.Integer, primary_key=True) # 编号
title = db.Column(db.String(100), nullable=False) # 标题
cate = db.Column(db.Integer, nullable=False) # 分类
user_id = db.Column(db.Integer, nullable=False) # 作者
logo = db.Column(db.String(100), nullable=False) # 封面
content = db.Column(db.Text, nullable=False) # 内容
addtime = db.Column(db.DateTime, nullable=False) # 发布时间
def __repr__(self):
return "<Art %r>" % self.title
if __name__ == '__main__':
db.create_all()
|
[
"624263934@qq.com"
] |
624263934@qq.com
|
40bb0128d71f1ed4d0776ae4e076f7440e468d39
|
1f8f423532070a9b49bad7b15af6cd12d0d3e5d0
|
/app/src/applications/auth/query/AvlanAuthQuery.py
|
17ef5e1a3063861b3a694771e6074e435b172010
|
[] |
no_license
|
r2r-dev/avlan-docker
|
357ce99db914660a0ffdb2022ee139f213e5514b
|
1fe6b045bc9d03cbc81b69431e98af45958358d0
|
refs/heads/master
| 2021-06-12T00:54:19.901286
| 2017-01-29T22:22:32
| 2017-01-29T22:22:32
| 68,092,562
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 953
|
py
|
from src.applications.base.query.AvlanBaseQuery import AvlanBaseQuery
from src.applications.auth.storage.AvlanTokenStorage import AvlanTokenStorage
class AvlanAuthQuery(AvlanBaseQuery):
def create_token(self, value, date):
token = AvlanTokenStorage()
token.value = value
token.expirationDate = date
self._dao.save(token)
return token
def get_token(self, **kwargs):
conditions = []
values = []
for key, value in kwargs.items():
condition = "`{0:s}` = %s".format(key)
conditions.append(condition)
values.append(value)
where_clause = " AND ".join(conditions)
tokens = self._dao.list_where(
where_clause=where_clause,
clazz=AvlanTokenStorage,
arg_list=values
)
# TODO check if unique results
if len(tokens) > 0:
return tokens[0]
return None
|
[
"artur.stachecki@gmail.com"
] |
artur.stachecki@gmail.com
|
fd77865f99149986c2796021bc34b0517e28cb94
|
9b5d40bc51370f7a1d86834735f5238476d91c20
|
/beacon.py
|
2e2999e55f483502ae0d39959cab4c5fdd78e991
|
[] |
no_license
|
rodolfo-r-a-da-silva/live-telemetry-viewer-pyqt
|
e702b60be22151ed68eada6d31ba449957cf3731
|
9c9bee1f495f05f8f35535f9f0e6d70855d6a265
|
refs/heads/master
| 2023-03-26T14:25:26.409490
| 2021-03-22T16:49:20
| 2021-03-22T16:49:20
| 350,517,491
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 63
|
py
|
class Functions():
def __init__(self):
print("b")
|
[
"artur.chagas@usp.br"
] |
artur.chagas@usp.br
|
faebb5a0349e309e0695aa79d9415aaf1078fc69
|
39f98e5a10547b17311ddecc8acfde520a0bd866
|
/paper-code/configs/__init__.py
|
f36d121b1c00decd7637617fdc1528d362e7da9e
|
[] |
no_license
|
metapost/APROX-Robust-Stochastic-Optimization-Algorithms
|
75010ab324d5dd8a33950d4bc65bf9c0e8858066
|
1ebcd302ef5baea5eec833da8e913b8ade3cad96
|
refs/heads/master
| 2021-05-19T17:09:06.704476
| 2019-11-07T01:04:51
| 2019-11-07T01:04:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 59
|
py
|
from .configs import *
from .settings import apply_setting
|
[
"hilal.asi94@gmail.com"
] |
hilal.asi94@gmail.com
|
51f52e800e8f09fc08922501bc56b4bfc8eb4896
|
a049b9ebe0ecaf6cb5bca728d31acdf8d0a3a737
|
/M3_multi_mirror_mirror.py
|
fea6de6d7499d4c57d6b47e4c9a3b7e5284c3e30
|
[] |
no_license
|
mistajuliax/MACHIN3tools
|
330ccf7136f6323195cb2ab5fd9a450da181e6f3
|
c3115433be1aa68fea17453c1c1f666a51cb44d6
|
refs/heads/master
| 2022-06-22T18:48:08.204318
| 2017-04-18T00:05:04
| 2017-04-18T00:05:04
| 88,886,365
| 0
| 0
| null | 2017-04-20T16:18:40
| 2017-04-20T16:18:40
| null |
UTF-8
|
Python
| false
| false
| 3,002
|
py
|
bl_info = {
"name": "Multi Mirror Mirror",
"author": "MACHIN3",
"version": (0, 1),
"blender": (2, 77, 0),
"location": "Spacebar Menu/ Shift + Alt + X/Y/Z",
"description": "Mirror Mirror Tool, but allows mirroring of multiple objects at once.",
"warning": "",
"wiki_url": "",
"category": "Mesh"}
# SETTINGS
buttonx = "X"
buttony = "Y"
buttonz = "Z"
press = "PRESS"
ctrl = False
alt = True
shift = True
import bpy
class MultiMirrorMirrorX(bpy.types.Operator):
bl_idname = "machin3.multi_mirror_mirrorx"
bl_label = "MACHIN3: Multi Mirror Mirror X"
def execute(self, context):
multi_mirror_mirror(bpy.ops.object.mirror_mirror_x)
return {'FINISHED'}
class MultiMirrorMirrorY(bpy.types.Operator):
bl_idname = "machin3.multi_mirror_mirrory"
bl_label = "MACHIN3: Multi Mirror Mirror Y"
def execute(self, context):
multi_mirror_mirror(bpy.ops.object.mirror_mirror_y)
return {'FINISHED'}
class MultiMirrorMirrorZ(bpy.types.Operator):
bl_idname = "machin3.multi_mirror_mirrorz"
bl_label = "MACHIN3: Multi Mirror Mirror Z"
def execute(self, context):
multi_mirror_mirror(bpy.ops.object.mirror_mirror_z)
return {'FINISHED'}
def multi_mirror_mirror(mirrormirrortool):
activeobj = bpy.context.scene.objects.active
selection = bpy.context.selected_objects
selection.remove(activeobj)
for obj in selection:
bpy.ops.object.select_all(action='DESELECT')
bpy.data.objects[obj.name].select = True
bpy.data.objects[activeobj.name].select = True
bpy.context.scene.objects.active = activeobj
mirrormirrortool()
# DECALmachine support (u mirror for parallax and for info decals!)
if "decal" in obj.name or "info" in obj.name:
for mod in obj.modifiers:
if "mirror" in mod.name.lower():
mod.use_mirror_u = True
for obj in selection:
bpy.data.objects[obj.name].select = True
bpy.data.objects[activeobj.name].select = True
bpy.context.scene.objects.active = activeobj
def register():
bpy.utils.register_class(MultiMirrorMirrorX)
bpy.utils.register_class(MultiMirrorMirrorY)
bpy.utils.register_class(MultiMirrorMirrorZ)
wm = bpy.context.window_manager
km = wm.keyconfigs.addon.keymaps.new(name='Object Mode', space_type='EMPTY')
kmi = km.keymap_items.new(MultiMirrorMirrorX.bl_idname, buttonx, press, ctrl=ctrl, alt=alt, shift=shift)
kmi = km.keymap_items.new(MultiMirrorMirrorY.bl_idname, buttony, press, ctrl=ctrl, alt=alt, shift=shift)
kmi = km.keymap_items.new(MultiMirrorMirrorZ.bl_idname, buttonz, press, ctrl=ctrl, alt=alt, shift=shift)
def unregister():
bpy.utils.unregister_class(MultiMirrorMirrorX)
bpy.utils.unregister_class(MultiMirrorMirrorY)
bpy.utils.unregister_class(MultiMirrorMirrorZ)
# TODO: properly unregister keymap and keymap_items
if __name__ == "__main__":
register()
|
[
"social@machin3.io"
] |
social@machin3.io
|
81042f16ac1b22ab0924fb904a3ab405642edbb9
|
d644194bb4da2a4e7bc3f79268530790d9995e47
|
/events/utils/FileService.py
|
177b8cd46bb69ff709bd4adc9e2d7568d3303975
|
[] |
no_license
|
lhalam/event_manager
|
d6294237f11f5c75f196f0db737897dc8c0ba3d0
|
e3bd2949f49080639eaca30ac242d9a98aa71f20
|
refs/heads/dev
| 2021-01-11T04:00:45.877314
| 2016-12-29T19:57:34
| 2016-12-29T19:57:34
| 71,260,068
| 1
| 1
| null | 2016-12-29T19:57:35
| 2016-10-18T15:01:24
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 1,162
|
py
|
from django.views.generic.base import View
from boto.s3.key import Key
from boto.s3.connection import S3Connection
from events import local_settings
class FileManager(View):
CONN = S3Connection(local_settings.ACCESS_KEY_ID, local_settings.AWS_SECRET_ACCESS_KEY)
@staticmethod
def delete_by_key(key):
bucket = FileManager.CONN.get_bucket(local_settings.NAME_PROFILES_BUCKET)
k = FileManager.get_key_bucket()
k.key = key
bucket.delete_key(k)
return True
@staticmethod
def get_href(key, bucket_name=local_settings.NAME_PROFILES_BUCKET):
return FileManager.CONN.generate_url(expires_in=0,
method="GET",
bucket=bucket_name,
key=key,
query_auth=False,
force_http=True
)
@staticmethod
def get_key_bucket():
bucket = FileManager.CONN.get_bucket(local_settings.NAME_PROFILES_BUCKET)
return Key(bucket)
|
[
"grizly.vl@gmail.com"
] |
grizly.vl@gmail.com
|
9678a91e6b91dca0a5b441f54375da06394cbea3
|
4981cddfc4c9a7c3d8a2ec93592707a572e4e4b4
|
/Project/game_class.py
|
5449f5a2b8a4f49ec05089b6d50713cfa266b309
|
[] |
no_license
|
JaeHyeon-Yu/Project
|
ec9c7b5b6c52babb714c771ae2f9639815bcf171
|
1aff96a8528ef039875bd3204b98006f770066bc
|
refs/heads/master
| 2020-04-04T02:03:06.464788
| 2018-12-02T12:38:13
| 2018-12-02T12:38:13
| 155,687,229
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 127
|
py
|
from pico2d import *
from card import *
from player import *
from monster import *
from background import *
from hpmp import *
|
[
"43131666+JaeHyeon-Yu@users.noreply.github.com"
] |
43131666+JaeHyeon-Yu@users.noreply.github.com
|
8ec3bf12cacdc47c54db00c9ea91520d09aa8fc4
|
747f759311d404af31c0f80029e88098193f6269
|
/addons/email_account/wizard/wizard_send_email.py
|
2ad2aa3b9779c2e69e1d813316fd08d361ac84a4
|
[] |
no_license
|
sgeerish/sirr_production
|
9b0d0f7804a928c0c582ddb4ccb7fcc084469a18
|
1081f3a5ff8864a31b2dcd89406fac076a908e78
|
refs/heads/master
| 2020-05-19T07:21:37.047958
| 2013-09-15T13:03:36
| 2013-09-15T13:03:36
| 9,648,444
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 79
|
py
|
/home/openerp/production/extra-addons/email_account/wizard/wizard_send_email.py
|
[
"geerish@omerp.net"
] |
geerish@omerp.net
|
adbd024cf5420b91b9fdfbf7e1bfc06c9100629a
|
983f6d96e10643421816beb9204c9cc1a5a93b50
|
/HASHGRAPH/continuous_send.py
|
ea721e46ccab70751b749725f4cc2c1c7c9ad344
|
[] |
no_license
|
USAFACyberPower/Hashgraph
|
9865f5f9a4269cb37dae8332b309384f7a92e906
|
1e7baef7022157c0f4daf6efb55bfe833e52b573
|
refs/heads/master
| 2022-12-17T11:04:17.080277
| 2020-09-10T17:01:39
| 2020-09-10T17:01:39
| 294,466,382
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 533
|
py
|
#!/usr/bin/env python3
import socket
import random
HOST = ['192.168.20.182', '192.168.20.230'] # The server's hostname or IP address
PORT = 65432 # The port used by the server
while True:
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s_host = 0
s_send = str(random.random())
try:
s.connect((HOST[s_host], PORT))
s.sendall(s_send.encode('ascii'))
data = s.recv(1024)
print("sent!")
#print('Data sent successfully:', repr(data))
except:
print("Invalid input \n")
|
[
"59588023+ZelfDread@users.noreply.github.com"
] |
59588023+ZelfDread@users.noreply.github.com
|
4c5aa5950353440cacb41eae8812b9ebad525a8f
|
536656cd89e4fa3a92b5dcab28657d60d1d244bd
|
/chrome/test/enterprise/e2e/policy/extension_blacklist/extension_blacklist.py
|
d14b00fa20cb00fb2767361a0b60407fe2824f33
|
[
"BSD-3-Clause"
] |
permissive
|
ECS-251-W2020/chromium
|
79caebf50443f297557d9510620bf8d44a68399a
|
ac814e85cb870a6b569e184c7a60a70ff3cb19f9
|
refs/heads/master
| 2022-08-19T17:42:46.887573
| 2020-03-18T06:08:44
| 2020-03-18T06:08:44
| 248,141,336
| 7
| 8
|
BSD-3-Clause
| 2022-07-06T20:32:48
| 2020-03-18T04:52:18
| null |
UTF-8
|
Python
| false
| false
| 2,267
|
py
|
# Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import os
from chrome_ent_test.infra.core import environment, before_all, test
from infra import ChromeEnterpriseTestCase
@environment(file="../policy_test.asset.textpb")
class ExtensionInstallBlacklistTest(ChromeEnterpriseTestCase):
"""Test the ExtensionInstallBlacklist policy.
https://cloud.google.com/docs/chrome-enterprise/policies/?policy=ExtensionInstallBlacklist"""
@before_all
def setup(self):
self.InstallChrome('client2012')
self.InstallWebDriver('client2012')
def installExtension(self, url):
args = ['--url', url, '--text_only', '--wait', '5']
dir = os.path.dirname(os.path.abspath(__file__))
logging.info('Opening page: %s' % url)
output = self.RunWebDriverTest('client2012',
os.path.join(dir, '../install_extension.py'),
args)
return output
@test
def test_ExtensionBlacklist_all(self):
extension = '*'
self.SetPolicy('win2012-dc', r'ExtensionInstallBlacklist\1', extension,
'String')
self.RunCommand('client2012', 'gpupdate /force')
logging.info('Disabled extension install for ' + extension)
test_url = 'https://chrome.google.com/webstore/detail/google-hangouts/nckgahadagoaajjgafhacjanaoiihapd'
output = self.installExtension(test_url)
self.assertIn('blocked', output)
@test
def test_ExtensionBlacklist_hangout(self):
extension = 'nckgahadagoaajjgafhacjanaoiihapd'
self.SetPolicy('win2012-dc', r'ExtensionInstallBlacklist\1', extension,
'String')
self.RunCommand('client2012', 'gpupdate /force')
logging.info('Disabled extension install for ' + extension)
test_url = 'https://chrome.google.com/webstore/detail/google-hangouts/nckgahadagoaajjgafhacjanaoiihapd'
output = self.installExtension(test_url)
self.assertIn('blocked', output)
positive_test_url = 'https://chrome.google.com/webstore/detail/grammarly-for-chrome/kbfnbcaeplbcioakkpcpgfkobkghlhen'
output = self.installExtension(positive_test_url)
self.assertNotIn('blocked', output)
|
[
"pcding@ucdavis.edu"
] |
pcding@ucdavis.edu
|
afdc9e6a0d519cd1b27adb70b9fb7fc4ecc25f8f
|
bd829ad194fb5d7610cbe062103dfebdfc791f4e
|
/tests/conftest.py
|
45dccd041cba9511fd325dade231012f031f687c
|
[] |
no_license
|
avracadabra/api-gateway
|
df9e25d01dd56d7d9e20ead9fdd20aaddc568ddf
|
4e30e3f3d2235278cd78bdb0c3a961c8073f0879
|
refs/heads/master
| 2022-08-10T06:55:51.909344
| 2020-05-10T12:54:13
| 2020-05-10T12:54:13
| 260,641,772
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 245
|
py
|
import pytest
from starlette.testclient import TestClient
from avracadabra.api.app import create_app
@pytest.fixture(name="client", scope="session")
def fixture_test_client():
with TestClient(create_app()) as client:
yield client
|
[
"pverkest@anybox.fr"
] |
pverkest@anybox.fr
|
af8010a1e412e867091d19bae06ae1b90c345783
|
f993e252fc740471e71a6748685988fc0b5f2e34
|
/backend/driver/migrations/0001_initial.py
|
66dea496859c52c65d3e087e3245db062a3abc77
|
[] |
no_license
|
crowdbotics-apps/cobros-app-22778
|
b9b9561d693fc979de0af693ffa9e4ca4d57873d
|
0774fc76d1b8b484790ed1ec070c1f6455905c65
|
refs/heads/master
| 2023-01-12T16:46:05.469345
| 2020-11-19T17:57:14
| 2020-11-19T17:57:14
| 314,314,056
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,540
|
py
|
# Generated by Django 2.2.17 on 2020-11-19 17:02
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
("delivery_order", "0001_initial"),
]
operations = [
migrations.CreateModel(
name="DriverProfile",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("photo", models.URLField()),
("timestamp_created", models.DateTimeField(auto_now_add=True)),
("last_updated", models.DateTimeField(auto_now=True)),
("details", models.TextField(blank=True, null=True)),
(
"user",
models.OneToOneField(
on_delete=django.db.models.deletion.CASCADE,
related_name="driverprofile_user",
to=settings.AUTH_USER_MODEL,
),
),
],
),
migrations.CreateModel(
name="DriverOrder",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("timestamp_created", models.DateTimeField(auto_now_add=True)),
(
"driver",
models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="driverorder_driver",
to="driver.DriverProfile",
),
),
(
"order",
models.OneToOneField(
on_delete=django.db.models.deletion.CASCADE,
related_name="driverorder_order",
to="delivery_order.Order",
),
),
],
),
]
|
[
"team@crowdbotics.com"
] |
team@crowdbotics.com
|
57234362fb224659d519596c67ccad380e195e2b
|
dba6d074ceed96714e1cd32834d7ceafcd1bb08b
|
/tweet.py
|
e9dfc685fc4d7222e0a57238264b42bfc5e3d2fb
|
[] |
no_license
|
cosu/gvbalert
|
6de6b61501d7de523b98f066ddc897008a7b86c2
|
fb75716b708fa2fa8da513168e15d2b3b23cf760
|
refs/heads/master
| 2021-01-11T17:52:02.081730
| 2017-04-05T21:35:38
| 2017-04-05T21:35:38
| 79,852,829
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,562
|
py
|
import attr
from parsing import extract_event_type, extract_lines, extract_ride_type, extract_reason, remove_links, extract_destination
DISTURBANCE = {'en': ['disturbance'], 'nl': ['verstoring']}
DELAY = {'en': ['delay'], 'nl': ['langzaam', 'vertraging', 'dienstregeling']}
CROWDED = {'en': 'crowded', 'nl': ['druk']}
RECOVERED = {'en': 'recovered', 'nl': ''}
UNKNOWN = {'en': 'n/a', 'nl': ['n/a']}
OTHER = {'en': 'other', 'nl': ['other']}
DETOUR = {'en': 'detour', 'nl': ['omleiding']}
EVENT_TYPES = [DISTURBANCE, DELAY, CROWDED, RECOVERED, UNKNOWN, DETOUR]
@attr.s
class Tweet(object):
event_type = attr.ib()
text = attr.ib()
created_at = attr.ib()
id = attr.ib()
lines = attr.ib()
ride_type = attr.ib()
destination = attr.ib()
reason = attr.ib()
@classmethod
def from_tweet(cls, tweet):
"""
:param tweet: the tweet
:type
:return:
:rtype Tweet
"""
cleaned_text = remove_links(tweet.text.lower())
event_type = extract_event_type(cleaned_text)
reason = extract_reason(cleaned_text)
lines = extract_lines(cleaned_text)
ride_type = extract_ride_type(cleaned_text)
destination = extract_destination(cleaned_text)
return cls(created_at=tweet.created_at,
id=tweet.id,
text=tweet.text,
event_type=event_type,
reason=reason,
lines=lines,
ride_type=ride_type,
destination=destination)
|
[
"cosu@cosu.ro"
] |
cosu@cosu.ro
|
c0ed17888c60fcab123c626b184ea63afe1eb11f
|
504d143a2b3172b5f59ecb0c9ce6912d7cdc669d
|
/football-tracker/main.py
|
4fd23e35415af58cf930dbe20fde04a31f4d46c1
|
[] |
no_license
|
xzebra/aplc-ucode19
|
786f049b4e6ce91fad6f0e62f02dc9384412228a
|
67a6d8b2d2d7b5c859a888233d5fd6de0ee7f857
|
refs/heads/master
| 2020-05-01T08:24:16.470712
| 2019-03-24T09:28:54
| 2019-03-24T09:28:54
| 177,377,892
| 0
| 0
| null | 2019-03-24T06:14:14
| 2019-03-24T06:14:13
| null |
UTF-8
|
Python
| false
| false
| 2,679
|
py
|
import boto3
import base64
import cv2
import csv
client = boto3.client('rekognition')
REDUCTION = 0
ball_movement = []
def save_ball_data():
with open('lame_bolas.csv', mode='w') as csvfile:
writer = csv.writer(csvfile)
#writer.writerow(['X', 'Y', 'Height', 'Width'])
for movement in ball_movement:
writer.writerow([str(movement[i]) for i in range(4)])
def face_recon(jpg_bytes, frame):
height, width, layers = frame.shape
faces = client.detect_faces(Image={'Bytes':jpg_bytes}, Attributes=['ALL'])
# Draw rectangle around faces
for face in faces['FaceDetails']:
cv2.rectangle(frame,
(int(face['BoundingBox']['Left']*width),
int(face['BoundingBox']['Top']*height)),
(int((face['BoundingBox']['Left']+face['BoundingBox']['Width'])*width),
int((face['BoundingBox']['Top']+face['BoundingBox']['Height'])*height)),
(0,0,255), 2)
def track_ball(jpg_bytes, frame):
response = client.detect_labels(
Image={
'Bytes': jpg_bytes,
},
MaxLabels=123,
MinConfidence=50,
)
#print([r['Name'] for r in response['Labels']])
height, width, layers = frame.shape
# Soccer Ball detected
for r in response['Labels']:
if r['Name'] == 'Soccer Ball':
for i in r['Instances']:
x = i['BoundingBox']['Left']*width
x_width = i['BoundingBox']['Width']*width
y = i['BoundingBox']['Top']*height
y_height = i['BoundingBox']['Height']*height
pos = [int(x + x_width/2), int(y + y_height/2)]
ball_movement.append([pos[0], pos[1], x_width, y_height])
cv2.circle(frame, (pos[0],pos[1]), 10, (0,0,255), -1)
def labelFrame(jpg, frame, video):
jpg_bytes = jpg.tobytes()
track_ball(jpg_bytes, frame)
#face_recon(jpg_bytes, frame)
# Display the resulting frame
video.write(frame)
def labelVideo(video_name):
cap = cv2.VideoCapture(video_name)
success, frame = cap.read()
height, width, layers = frame.shape
video = cv2.VideoWriter('video2.mp4', cv2.VideoWriter_fourcc(*'avc1'), 20.0, (width, height))
while success:
success, buf = cv2.imencode('.jpg', frame)
labelFrame(buf, frame, video)
success, frame = cap.read()
cap.release()
video.release()
cv2.destroyAllWindows()
save_ball_data()
#labelFrame(open('data/example.jpg', 'rb').read())
labelVideo('data/Hd3.mp4')
|
[
"jromanosp@hotmail.com"
] |
jromanosp@hotmail.com
|
67991a4734fbf184aa111ce23f7c1989d0e99200
|
5b9b682f24f61cfc54dab88cf02d67f97d6c318f
|
/0x0C-python-almost_a_circle/models/square.py
|
d9356838c00a7d4614ae74f9e0f057ac28c7ddd8
|
[] |
no_license
|
Lvmvr22/alx-higher_level_programming
|
90cab81ebeca4b852138ed1a7b853d1493fbcacc
|
db76df49f35cf304f1910ea48cea72bba624b43a
|
refs/heads/main
| 2023-07-13T06:35:25.729309
| 2021-08-24T11:23:16
| 2021-08-24T11:23:16
| 362,140,780
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,751
|
py
|
#!/usr/bin/python3
# square.py
"""Defines the square class
"""
from models.rectangle import Rectangle
class Square(Rectangle):
"""Class Square inherits from Rectangle
Args:
size, x=0, y=0, id=None
Raisses:
"""
def __init__(self, size, x=0, y=0, id=None):
"""Intiates the square object
"""
super().__init__(size, size, x, y, id)
def __str__(self):
"""string representation of the square object
"""
return "[Square] ({}) {}/{} - {}".format(self.id,
self._Rectangle__x,
self._Rectangle__y,
self._Rectangle__width)
@property
def size(self):
"""returns the size of the object
"""
return self._Rectangle__width
@size.setter
def size(self, size):
"""sets the new width and height
"""
self.width = size
self.height = size
def update(self, *args, **kwargs):
"""Update the Rectangle.
Args:
*args (ints): New attribute values.
- 1st argument represents id attribute
- 2nd argument represents size attribute
- 3rd argument represents x attribute
- 4th argument represents y attribute
**kwargs (dict): New key/value pairs of attributes.
"""
new_args = [self.id,
self._Rectangle__width,
self._Rectangle__x, self._Rectangle__y]
if len(args) == 0 or args is None:
if len(kwargs) == 0:
return
else:
try:
new_args[0] = kwargs['id']
except KeyError:
pass
try:
new_args[1] = kwargs['size']
except KeyError:
pass
try:
new_args[2] = kwargs['x']
except KeyError:
pass
try:
new_args[3] = kwargs['y']
except KeyError:
pass
else:
for x in range(len(args)):
if x < len(new_args):
new_args[x] = args[x]
self.__init__(new_args[1],
new_args[2],
new_args[3],
new_args[0])
def to_dictionary(self):
"""Returns a dictionary representation of the the
object"""
obj_dic = {
"id": self.id,
"size": self.width,
"x": self.x,
"y": self.y
}
return obj_dic
|
[
"lamarnyairo@gmail.com"
] |
lamarnyairo@gmail.com
|
14536dfa37f4ea568e6c92a11016ff172bc3a4b4
|
210958516fa1354dbf4027b5602e448be54449b9
|
/python_codes/manipulate_plot_q_s.py
|
85f10009c7eea20f5c689197d57e28513acfd436
|
[] |
no_license
|
Somayeh91/PSPL-Plus-Gaussian-Fit
|
f3527f0a735aa32c6a05d1405ee97c44e92345a2
|
40905cb1616ef5089fcd178440aeedb73a1ce7fc
|
refs/heads/master
| 2022-12-01T13:11:06.207735
| 2020-08-17T17:02:16
| 2020-08-17T17:02:16
| 288,234,147
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 13,455
|
py
|
import glob,os,sys
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mat
from numpy import *
import re
import scipy.stats as st
from os.path import expanduser
import cmath
import scipy.optimize as op
import time
import gzip
import pandas as pd
def med_med (true,fitted):
temp = fitted - true
return (np.median(np.abs(temp-np.median(temp))))
def rms (true,fitted):
temp = fitted - true
return np.sqrt((np.sum(temp**2))/len(temp))
start = time.time()
home = os.path.expanduser("~")
direc = os.listdir(".")
'''
name = 'alllc_second_run_A_>_1.CSV'
#Library/Mobile Documents/com~apple~CloudDocs/Microlensing/OSU trip/Matt/result_file/
tempdata = home+'/Desktop/trial_runs/alllce1_A_>_1_fails/'+str(name)
'''
name = 'alllc_result_v2.CSV'
#Library/Mobile Documents/com~apple~CloudDocs/Microlensing/OSU trip/Matt/result_file/
tempdata = home+'/Desktop/trial_runs/'+str(name)
df = pd.read_csv(tempdata)
df['u0_true'] = np.abs(df['u0_true'])
df['u0_fitted'] = np.abs(df['u0_fitted'])
#(df['chi_2_2']>-22500)& (df['s_fitted']<5) &
#df = df[((df['f_s_true']*( (2 + df['u0_true']**2) / (df['u0_true']*np.sqrt(4 + df['u0_true']**2)) ) + (1-df['f_s_true']) )>1.5) & (df['u0_true']>0.1) ]
#df = df[ (df['chi_2_2']>-25000) & (df['s_fitted']<5)&((df['f_s_true']*( (2 + df['u0_true']**2) / (df['u0_true']*np.sqrt(4 + df['u0_true']**2)) ) + (1-df['f_s_true']) )>1.1)]
color = ['#e41a1c', '#f781bf', '#377eb8', '#4daf4a', '#984ea3', '#ff7f00', '#ffff33', '#a65628','#e7298a', '#e6ab02']
print len(df)
df['q_fitted_log'][df['q_fitted_log']>0] = np.log10( 1/df['q_fitted'][df['q_fitted_log']>0] )
df['q_fitted'][df['q_fitted_log']>0] = ( 1/df['q_fitted'][df['q_fitted_log']>0] )
u = df['u0_true']
df['max_A'] = (2+u**2)/(u*np.sqrt(4+u**2))
df = df[df['s_fitted']<10][np.abs(df['u0_fitted'])>0.045] #02/20/2018
#df_new_del_t = df[(np.abs(df['t0_fitted']-df['tp_fitted'])>1) | ((-2*df['chi_2_1']/41039)>1.04) | (np.abs((-2*df['chi_2_1']/41039)-(-2*df['chi_2_2']/41039))>0.02)]
##df_new_del_t = df[(np.abs(df['t0_fitted']-df['tp_fitted'])>1) & ((-2*df['chi_2_2']/41039)>1.003) ]
df_new_del_t = df[np.abs(df['u0_fitted'])>0.045]
df_new_ampl = df[(np.abs(df['t0_fitted']-df['tp_fitted'])>1)]
df_new_ampl_del_t = df_new_del_t[(df_new_del_t['ampl_fitted']>0.016) | (df_new_del_t['ampl_fitted']<-0.016)]
# cut-offs for s_fitted
low_cut = 0.28
high_cut = 5
err_1 = 0.25
err_2 = 0.1
err_1_q = 1
x1 = df['s_fitted']/df['s_true']
y1 = df['ampl_fitted']
x1_name ='Ratio of fitted separation over true separation'
y1_name = 'Amplitude of the planetary perturbation'
# Regionalizing the plot of s_fitted vs s_true
s_deg = (np.abs(df['s_true']-np.sqrt((df['s_true']-(1/df['s_true']))**2 + 4)))
df_precise = df[(df['s_fitted']>df['s_true']-0.1) & (df['s_fitted']< df['s_true']+0.1)]
df_s_equal_1 = (df [ (df['s_fitted']<1.05) & (df['s_fitted']>0.95) ][ (df['s_fitted'] > df['s_true']+0.25) | (df['s_fitted'] < df['s_true']-0.25) ]
[ (df['s_fitted'] < s_deg - 0.2) | (df['s_fitted'] > s_deg +0.2) ]).reset_index(drop=True)
s_deg_ = (df[ (df['s_fitted'] < s_deg+0.2 ) & (df['s_fitted'] > s_deg-0.2 )] [ (df['s_fitted']>1.05) | (df['s_fitted']<0.95) ]
[ (df['s_fitted'] > df['s_true']+0.25) | (df['s_fitted'] < df['s_true']-0.25) ]).reset_index(drop=True)
s_err_up = (df [ (df['s_fitted'] < df['s_true']+0.25 ) & (df['s_fitted'] > df['s_true']+0.1 )]).reset_index(drop=True)
s_err_down = (df [ (df['s_fitted'] < df['s_true']-0.1 ) & (df['s_fitted'] > df['s_true']-0.25 )]).reset_index(drop=True)
#[ (df['s_fitted']>1.05) | (df['s_fitted']<0.95) ][ (df['s_fitted'] > s_deg+0.2) | (df['s_fitted'] < s_deg-0.2) ]
s_scatter_1 = (df[ (df['s_fitted']> s_deg+0.2) & (df['s_fitted']> df['s_true']+0.25) ]).reset_index(drop=True)
s_scatter_2 = (df[ (df['s_fitted']> s_deg+0.2) & (df['s_fitted']< df['s_true']-0.25) ][(df['s_fitted']>1.05) | (df['s_fitted']<0.95)]).reset_index(drop=True)
s_scatter_3 = (df[ (df['s_fitted']< s_deg-0.2) & (df['s_fitted']< df['s_true']-0.25) ]).reset_index(drop=True)
s_scatter_4 = (df[ (df['s_fitted']< s_deg-0.2) & (df['s_fitted']> df['s_true']+0.25) ][(df['s_fitted']>1.05) | (df['s_fitted']<0.95)]).reset_index(drop=True)
#df['x_c'] = np.sqrt( df['u0_fitted']**2 + np.abs(df['tp']-df['t0_fitted'])**2/(df['tE_fitted']) )
plt.close('all')
plt.figure(1)
plt.rc('axes',edgecolor='black')
plt.rcParams['axes.facecolor'] = 'white'
plt.title('Fitted'+ str(y1_name)+ 'vs True '+str(x1_name)+ 'for ' +str(len(df))+ ' targets',size=20)
plt.plot (x1,y1,'b.',label='_nolegend_',markersize=8 , alpha = 0.4)
#plt.plot (x1,x1,'g-',label='_nolegend_')
#plt.text(max(df['s_true'])-1.5, (high_cut-low_cut)/2, 'Median Absolute Deviation = '+str(round(med_med(df['s_true'],df['s_fitted']),3)),size=15)
#plt.axis([0,5,0,5])
#plt.legend()
plt.xlabel(str(x1_name),size=20)
plt.ylabel(str(y1_name),size=20)
plt.grid()
plt.figure(2)
plt.title('Fitted'+ str(y1_name)+ 'vs True '+str(x1_name)+ 'for ' +str(len(df))+ ' targets',size=20)
plt.plot (df['q_true_log'],df['q_fitted_log'],'b.',markersize=10,label='_nolegend_',alpha=0.4)
plt.plot (df['q_true_log'],df['q_true_log'],'g-',label='_nolegend_')
#plt.plot (df['q_true_log'],df['q_true_log']+1,'y-.',label = '_nolegend_')
#plt.plot (df['q_true_log'],df['q_true_log']-1,'y-.',label = '_nolegend_')
#plt.plot (df['q_true_log'],df['q_true_log']-2.5,'y--', label = '_nolegend_')
#plt.plot (df['q_true_log'][df['s_fitted']<low_cut],df['q_fitted_log'][df['s_fitted']<low_cut],'.',color='red',markeredgecolor='none',markersize=10, label = '_nolegend_')
#plt.plot (df['q_true_log'][df['s_fitted']>high_cut],df['q_fitted_log'][df['s_fitted']>high_cut],'.',color='red',markeredgecolor='none',markersize=10,label = 'Failure')
#plt.plot (df['q_true_log'][(df['s_fitted']<high_cut) & ( df['s_fitted']> df['s_true']+err_1 ) ],df['q_fitted_log'][(df['s_fitted']<high_cut) & ( df['s_fitted']> df['s_true']+err_1 ) ],'.',color = 'orange',markersize=10, label = 's_true + 0.25 < s_fitted < 2.5')
#plt.plot (df['q_true_log'][(df['s_fitted']<df['s_true']-err_1) & ( df['s_fitted']> low_cut ) ],df['q_fitted_log'][(df['s_fitted']<df['s_true']-err_1) & ( df['s_fitted']> low_cut ) ],'.',color = 'orange',markersize=10, label = '0.28 < s_fitted < s_true - 0.25')
plt.plot (df['q_true'],df['q_true']+err_2,'g--')
#plt.plot (df['q_true_log'][(df['s_fitted']<df['s_true']+err_1) & ( df['s_fitted']> df['s_true']+err_2 ) ],df['q_fitted_log'][(df['s_fitted']<df['s_true']+err_1) & ( df['s_fitted']>df['s_true']+err_2) ],'.',color = '#6BFF33',markersize=10, label = 's_true + 0.1 < s_fitted < s_true + 0.25')
#plt.plot (df['q_true_log'][(df['s_fitted']<df['s_true']-err_2) & ( df['s_fitted']> df['s_true']-err_1 ) ],df['q_fitted_log'][(df['s_fitted']<df['s_true']-err_2) & ( df['s_fitted']>df['s_true']-err_1) ],'.',color = '#6BFF33',markersize=10, label = 's_true - 0.25 < s_fitted < s_true - 0.1')
#plt.text(min(df['q_fitted_log'])+1, (min(df['q_fitted_log'])-max(df['q_fitted_log']))/2, 'Median Absolute Deviation = '+str(round(med_med(df['q_true_log'],df['q_fitted_log']),3)),size=15)
plt.axis([min(df['q_fitted_log']), max(df['q_fitted_log']), min(df['q_fitted_log']), max(df['q_fitted_log'])])
plt.xlabel(str(x1_name),size=15)
plt.ylabel(str(y1_name),size=15)
#plt.legend()
plt.grid()
'''
plt.figure(3)
plt.rc('axes',edgecolor='black')
plt.rcParams['axes.facecolor'] = 'white'
plt.title('Fitted Pojected Separation vs True Projected Separation for '+str(len(df_new_del_t))+' targets',size=15)
plt.plot (df_new_del_t['s_true'],df_new_del_t['s_fitted'],'b.',label='_nolegend_',markersize=8 , alpha = 0.4)
plt.plot (df_new_del_t['s_true'],df_new_del_t['s_true'],'g-',label='_nolegend_')
plt.axis([0,5,0,5])
plt.grid()
plt.figure(6)
plt.rc('axes',edgecolor='black')
plt.rcParams['axes.facecolor'] = 'white'
plt.title('Fitted Pojected Separation vs True Projected Separation for '+str(len(df_new_del_t))+' targets',size=15)
plt.plot (df_new_del_t['q_true_log'],df_new_del_t['q_fitted_log'],'b.',label='_nolegend_',markersize=8 , alpha = 0.4)
plt.plot (df_new_del_t['q_true_log'],df_new_del_t['q_true_log'],'g-',label='_nolegend_')
plt.axis([min(df_new_del_t['q_fitted_log']), max(df_new_del_t['q_fitted_log']), min(df_new_del_t['q_fitted_log']), max(df_new_del_t['q_fitted_log'])])
plt.grid()
plt.figure(4)
plt.rc('axes',edgecolor='black')
plt.rcParams['axes.facecolor'] = 'white'
plt.title('Fitted Pojected Separation vs True Projected Separation for df_new_ampl',size=15)
plt.plot (df_new_ampl['s_true'],df_new_ampl['s_fitted'],'b.',label='_nolegend_',markersize=8 , alpha = 0.4)
plt.plot (df_new_ampl['s_true'],df_new_ampl['s_true'],'g-',label='_nolegend_')
plt.axis([0,5,0,5])
plt.figure(5)
plt.rc('axes',edgecolor='black')
plt.rcParams['axes.facecolor'] = 'white'
plt.title('Fitted Pojected Separation vs True Projected Separation for df_new_ampl',size=15)
plt.plot (df_new_ampl['q_true_log'],df_new_ampl['q_fitted_log'],'b.',label='_nolegend_',markersize=8 , alpha = 0.4)
plt.plot (df_new_ampl['q_true_log'],df_new_ampl['q_true_log'],'g-',label='_nolegend_')
plt.axis([min(df_new_ampl['q_fitted_log']), max(df_new_ampl['q_fitted_log']), min(df_new_ampl['q_fitted_log']), max(df_new_ampl['q_fitted_log'])])
plt.figure(7)
plt.rc('axes',edgecolor='black')
plt.rcParams['axes.facecolor'] = 'white'
plt.title('Fitted Pojected Separation vs True Projected Separation for df_new_ampl_del_t',size=10)
plt.plot (df_new_ampl_del_t['s_true'],df_new_ampl_del_t['s_fitted'],'b.',label='_nolegend_',markersize=8 , alpha = 0.4)
plt.plot (df_new_ampl_del_t['s_true'],df_new_ampl_del_t['s_true'],'g-',label='_nolegend_')
plt.axis([0,5,0,5])
plt.figure(8)
plt.rc('axes',edgecolor='black')
plt.rcParams['axes.facecolor'] = 'white'
plt.title('Fitted Pojected Separation vs True Projected Separation for df_new_ampl_del_t',size=10)
plt.plot (df_new_ampl_del_t['q_true_log'],df_new_ampl_del_t['q_fitted_log'],'b.',label='_nolegend_',markersize=8 , alpha = 0.4)
plt.plot (df_new_ampl_del_t['q_true_log'],df_new_ampl_del_t['q_true_log'],'g-',label='_nolegend_')
plt.axis([min(df_new_ampl_del_t['q_fitted_log']), max(df_new_ampl_del_t['q_fitted_log']), min(df_new_ampl_del_t['q_fitted_log']), max(df_new_ampl_del_t['q_fitted_log'])])
'''
plt.figure(3)
plt.rc('axes',edgecolor='black')
plt.rcParams['axes.facecolor'] = 'white'
plt.title('Fitted log(Pojected Separation) vs True log(Projected Separation) for ' +str(len(df))+ ' targets',size = 15)
plt.plot (df['s_true'],df['s_fitted'],'b.',label='_nolegend_',markersize=8,alpha=0.4)
plt.plot (df['s_true'],df['s_true'],'g-',label='_nolegend_')
#plt.plot (df['s_true_log'],df['s_true_log']+err_1,'y-.',label='_nolegend_')
#plt.plot (df['s_true_log'],df['s_true_log']-err_1,'y-.',label='_nolegend_')
#plt.plot (df['s_true_log'][df['s_fitted']<low_cut],df['s_fitted_log'][df['s_fitted']<low_cut],'r.',markersize=8,label = '_nolegend_')
#plt.plot (df['s_true_log'][df['s_fitted']>high_cut],df['s_fitted_log'][df['s_fitted']>high_cut],'r.',markersize=8, label = '_nolegend_')
#plt.plot (df['s_true_log'][(df['s_fitted']<high_cut) & ( df['s_fitted']> df['s_true']+err_1 ) ],df['s_fitted_log'][(df['s_fitted']<high_cut)
# & ( df['s_fitted']> df['s_true']+err_1 ) ],'.',color = 'orange',markeredgecolor='black',markersize=8,label = 's_true + 0.25 < s_fitted < 2.5')
#plt.plot (df['s_true_log'][(df['s_fitted']<df['s_true']-err_1) & ( df['s_fitted']> low_cut ) ],df['s_fitted_log'][(df['s_fitted']<df['s_true']-err_1)
# & ( df['s_fitted']> low_cut ) ],'.',color = 'orange',markeredgecolor='black', markersize=8, label = '0.28 < s_fitted < s_true - 0.25')
#plt.plot (df['s_true_log'],df['s_true_log']-err_2,'y-.',label='_nolegend_')
#plt.plot (df['s_true_log'],df['s_true_log']+err_2,'y-.',label='_nolegend_')
#plt.plot (df['s_true_log'][(df['s_fitted']<df['s_true']+err_1) & ( df['s_fitted']> df['s_true']+err_2 ) ],df['s_fitted_log'][(df['s_fitted']<df['s_true']+err_1)
# & ( df['s_fitted']>df['s_true']+err_2) ],'.',markersize=8,color = '#6BFF33',markeredgecolor='black', label = 's_true + 0.1 < s_fitted < s_true + 0.25')
#plt.plot (df['s_true_log'][(df['s_fitted']<df['s_true']-err_2) & ( df['s_fitted']> df['s_true']-err_1 ) ],df['s_fitted_log'][(df['s_fitted']<df['s_true']-err_2)
# & ( df['s_fitted']>df['s_true']-err_1) ],'.',markersize=8,color = '#6BFF33',markeredgecolor='black', label = 's_true - 0.25 < s_fitted < s_true - 0.1')
#plt.axis([min(df['s_true']), max(df['s_true']), low_cut, high_cut])
#plt.legend()
plt.xlabel('s_true_log',size = 15)
plt.ylabel('s_fitted_log', size =15)
plt.grid()
plt.figure(4)
plt.plot(df['s_true'],df['s_true'],'g-')
plt.plot(df_s_equal_1['s_true'],df_s_equal_1['s_fitted'],'k.',alpha=0.7)
plt.plot(df['s_true'],s_deg,'g.',alpha=0.4)
plt.plot(s_deg_['s_true'],s_deg_['s_fitted'],'k.',alpha=0.7)
plt.plot(s_err_up['s_true'],s_err_up['s_fitted'],'c.',alpha=0.4 )
plt.plot(s_err_down['s_true'],s_err_down['s_fitted'],'c.',alpha=0.4 )
plt.plot(s_scatter_1['s_true'],s_scatter_1['s_fitted'],'r.',alpha=0.7)
plt.plot(s_scatter_2['s_true'],s_scatter_2['s_fitted'],'r.',alpha=0.7)
plt.plot(s_scatter_3['s_true'],s_scatter_3['s_fitted'],'r.',alpha=0.7)
plt.plot(s_scatter_4['s_true'],s_scatter_4['s_fitted'],'r.',alpha=0.7)
'''
plt.figure(4)
plt.plot(df['q_fitted_log'],np.abs(df['s_fitted_log']),'b.',alpha=0.5)
plt.figure(5)
plt.plot(df['q_true_log'],np.abs(df['s_true_log']),'r.',alpha=0.5)
'''
plt.show()
|
[
"noreply@github.com"
] |
noreply@github.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.