blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
fc51adaae6182f44f71b251e187601052c9c9286
|
06d5e774f6494de2ebedf196b9ef72eb8f1c5181
|
/Homework 1/PythonReview4.py
|
aad86b5f8fea3b47df6a9feca1d6ff507a84917c
|
[] |
no_license
|
tjmann95/ECEC
|
36b8220b6d82771051c6685d0a57d9c7ff20a812
|
72f061e1b36c36a6d4d962cae8ebcde84d461c1c
|
refs/heads/master
| 2021-05-12T04:30:06.084136
| 2018-03-15T19:09:23
| 2018-03-15T19:09:23
| 117,163,653
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,147
|
py
|
print "Python review Part 4 - Calculations and Math Functions"
# import the math module as follows.
import math
# Get help on the math module by uncommenting this statement.
# help(math)
message1 = """\n1. Find the absolute value of the floating point number -2.34 using a method from the math module.
You must not use the __builtin__ method abs(), but a similar math module method starting with f.
"""
print message1
x = -2.34
# Add your code for exercise 1 here. Be sure to print out the results.
message2 = """\n2. A right triangle has legs a = 5.0, b = 12.0
Find the hypotenuse c using the sqrt() method from the math module.
"""
print message2
a = 5.0
b = 12.0
# Add your code for exercise 2 here. Be sure to print out the results.
message3 = """\n\n3. Find the angle B opposite the longer side b using the relation:
sin(B) = b/c
"""
print message3
# Add your code for exercise 3 here. Use the asin() function, which by definition returns radians.
message4 = """\n\n4. Express the angle B in degrees using math.degrees()
"""
print message4
# Add your code for exercise 4 here.
message5 = """\n\n5. Express 1000 (base 10) in base 2 using the built-in bin() command. (bin, short for binary).
"""
print message5
# Add your code for exercise 5 here.
message6 = """\n\n6a. Convert the binary number n2 = '10100100010000100000' back to base 10 using the built-in int() command
with an optional second argument to specify the base.
"""
print message6
n2 = "10100100010000100000"
# Add your code for exercise 6a here.
print "b. Check you work by converting back to base 2 using bin()."
# Add your code for exercise 6b here.
# * * * EXERCISE 7 * * *
message7a = """\n\n7a. Using math.log() with two arguments, find the log of 10,000,000 in base 1000. (one thousand)
"""
print message7a
# Add your code for exercise 7a here. Be sure to print out the answer using a complete sentence.
message7b = """\n\n7b. Using math.pow() find 1000 to the seven thirds power. Beware of the integer division!
"""
print message7b
# Add your code for exercise 7b here. Be sure to print out the answer using a complete sentence.
|
[
"tjmann95@gmail.com"
] |
tjmann95@gmail.com
|
24ff41d2a1eba0d4aeb39f218e38e523886b82b1
|
f53798259b750548eac332086f5df5300d2b2254
|
/swig/python/python_rpyc_server.py
|
17fd5553900a8218692d0aa1854516636e6d5bad
|
[
"BSD-2-Clause"
] |
permissive
|
chen--oRanGe/thrill
|
2948511734189486f34116c9ada4c962e642c6ee
|
f272348479cd1c9b4226300048959a95c31429b3
|
refs/heads/master
| 2021-01-16T17:08:38.455264
| 2015-09-27T16:21:14
| 2015-09-27T16:21:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,492
|
py
|
#!/usr/bin/env python
##########################################################################
# swig/python/python_rpyc_server.py
#
# Part of Project Thrill.
#
# Copyright (C) 2015 Timo Bingmann <tb@panthema.net>
#
# All rights reserved. Published under the BSD-2 license in the LICENSE file.
##########################################################################
import sys
import marshal
import types
import rpyc
import thrill
class RpcDIA():
def __init__(self, dia):
self._dia = dia
def AllGather(self):
return self._dia.AllGather()
def Size(self):
return self._dia.Size()
def Map(self, map_function):
code1 = marshal.loads(map_function)
func1 = types.FunctionType(code1, globals())
return RpcDIA(self._dia.Map(func1))
def ReduceBy(self, key_extractor, reduce_function):
code1 = marshal.loads(key_extractor)
func1 = types.FunctionType(code1, globals())
code2 = marshal.loads(reduce_function)
func2 = types.FunctionType(code2, globals())
return RpcDIA(self._dia.ReduceBy(func1, func2))
class RpcContext():
def __init__(self, host_ctx, my_host_rank):
self._ctx = thrill.PyContext(host_ctx, my_host_rank)
def Generate(self, generator_function, size):
code1 = marshal.loads(generator_function)
function1 = types.FunctionType(code1, globals())
return RpcDIA(self._ctx.Generate(function1, size))
def Distribute(self, array):
return RpcDIA(self._ctx.Distribute(array))
class MyService(rpyc.Service):
def on_connect(self):
# code that runs when a connection is created
# (to init the serivce, if needed)
print("hello client")
pass
def on_disconnect(self):
# code that runs when the connection has already closed
# (to finalize the service, if needed)
print("client disconnected")
pass
def exposed_Create(self, my_host_rank, endpoints):
print("Creating thrill context for rank",
my_host_rank, "endpoints", endpoints)
host_ctx = thrill.HostContext(my_host_rank, endpoints, 1)
return RpcContext(host_ctx, 0)
if __name__ == "__main__":
from rpyc.utils.server import ThreadedServer
t = ThreadedServer(MyService, port=int(sys.argv[1]),
protocol_config={"allow_public_attrs": True})
t.start()
##########################################################################
|
[
"tbgit@panthema.net"
] |
tbgit@panthema.net
|
f2bb534fa1b683ba85fc3a83e9e250269fa4c85b
|
7b1de4a2607e3125b719c499a05bf6e2d3ec532d
|
/exceptions/chaining_demo.py
|
714dc5ff28adfb14c75345702632fc8819a3e118
|
[] |
no_license
|
ganqzz/sandbox_py
|
61345ac7bddb09081e02decb78507daa3030c1e8
|
cc9e1ecca2ca99f350a3e2c3f51bbdb5eabc60e1
|
refs/heads/master
| 2022-12-01T21:54:38.461718
| 2021-09-04T03:47:14
| 2021-09-04T03:47:14
| 125,375,767
| 0
| 1
| null | 2023-04-16T00:55:51
| 2018-03-15T14:00:47
|
Python
|
UTF-8
|
Python
| false
| false
| 830
|
py
|
def func():
raise ValueError('from func()')
# set __cause__ = None
def demo1():
try:
func()
except ValueError:
raise RuntimeError('from demo1()')
# set __cause__ = e
def demo2():
try:
func()
except ValueError as e:
raise RuntimeError('from demo2()') from e
# set __cause__ = None, and suppress chaining
def demo3():
try:
func()
except ValueError:
raise RuntimeError('from demo3()') from None
def run_demo(f):
print('---', f.__name__)
try:
f()
except Exception as e:
print(e)
print('__context__:', repr(e.__context__))
print('__cause__:', repr(e.__cause__))
print()
if __name__ == "__main__":
# demo1()
# demo2()
# demo3()
run_demo(demo1)
run_demo(demo2)
run_demo(demo3)
|
[
"ganqzz@users.noreply.github.com"
] |
ganqzz@users.noreply.github.com
|
a70df48b3d89a7e34053a6ef0765c32639ea0b8b
|
e6dab5aa1754ff13755a1f74a28a201681ab7e1c
|
/.parts/lib/django-1.4/tests/regressiontests/localflavor/it/tests.py
|
cce0b49576318774ae32ed146df39c7063a0b7b5
|
[] |
no_license
|
ronkagan/Euler_1
|
67679203a9510147320f7c6513eefd391630703e
|
022633cc298475c4f3fd0c6e2bde4f4728713995
|
refs/heads/master
| 2021-01-06T20:45:52.901025
| 2014-09-06T22:34:16
| 2014-09-06T22:34:16
| 23,744,842
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 111
|
py
|
/home/action/.parts/packages/googleappengine/1.9.4/lib/django-1.4/tests/regressiontests/localflavor/it/tests.py
|
[
"ron.y.kagan@gmail.com"
] |
ron.y.kagan@gmail.com
|
6575fec3682a6d786f060de88588345e6a9bbe2d
|
6521d29e4bd3a5178f64809c99695fdf1db25cd8
|
/metadata.py
|
02cc815fe8dc204aad5e2833bba323d90cff179a
|
[] |
no_license
|
xdcesc/rnn_kws
|
d00b7bd76fc2cdeed625ab39a766a864c0b63a64
|
1a3a49086c5988f549dba1276d69ca41af9fe5d7
|
refs/heads/master
| 2022-05-19T08:54:33.516194
| 2020-04-05T19:27:59
| 2020-04-05T19:27:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,229
|
py
|
"""
Converts raw TIMIT data into a pickle dump which can be used during training
"""
import numpy as np
import pickle
import os
import utils
import json
from utils import listdir
class timit_data():
def __init__(self, type_, config_file):
self.config = config_file
self.mode = type_
self.db_path = config_file['dir']['dataset']
# fold phones in list to the phone which is the key e.g. 'ao' is 'collapsed' into 'aa'
self.replacement = utils.replacement_dict()
feature_dim = self.config['n_fbank'] + self.config['n_mfcc']
self.pkl_name = self.db_path + self.mode + '_rnn_ctc_' + str(feature_dim) + '.pkl'
self.win_len, self.win_step = config_file['window_size'], config_file['window_step']
# Generate and store pickle dump
def gen_pickle(self):
# Return if already exists
if os.path.exists(self.pkl_name):
print("Found pickle dump for", self.mode)
with open(self.pkl_name, 'rb') as f:
return pickle.load(f)
print("Generating pickle dump for", self.mode)
list_features, list_phones = [], []
base_pth = self.db_path + self.mode
all_phones = set()
# Phone distribution is used to calculate weights
num_distribution = {}
# Iterate over entire dataset
for dialect in sorted(listdir(base_pth)):
print("Dialect:", dialect)
for speaker_id in sorted(listdir(os.path.join(base_pth, dialect))):
data = sorted(os.listdir(os.path.join(base_pth, dialect, speaker_id)))
wav_files = [x for x in data if x.split('.')[-1] == 'wav'] # all the .wav files
for wav_file in wav_files:
if wav_file in ['SA1.wav', 'SA2.wav']:
continue
wav_path = os.path.join(base_pth, dialect, speaker_id, wav_file)
final_vec = utils.read_wav(wav_path, winlen=self.config['window_size'],
winstep=self.config['window_step'],
fbank_filt=self.config['n_fbank'], mfcc_filt=self.config['n_mfcc'])
phenome_path = wav_path[:-3] + 'PHN' # file which contains the phenome location data
# phones in current wav file
cur_phones = []
with open(phenome_path, 'r') as f:
a = f.readlines()
for phenome in a:
s_e_i = phenome[:-1].split(' ') # start, end, phenome_name e.g. 0 5432 'aa'
start, end, ph = int(s_e_i[0]), int(s_e_i[1]), s_e_i[2]
# collapse into father phone
for father, list_of_sons in self.replacement.items():
if ph in list_of_sons:
ph = father
break
# update distribution
all_phones.add(ph)
if ph not in num_distribution.keys():
num_distribution[ph] = 0
num_distribution[ph] += 1
cur_phones.append(ph)
# Append current recording to the main list
list_features.append(final_vec)
list_phones.append(cur_phones)
# Each item in to_return is a list corresponding to a single recording
# Each recording is in turn a list of tuples of (ph, feature_vector) for each frame
if self.mode == 'TRAIN':
# Normalise feature vectors
np_arr = np.concatenate(list_features, axis=0)
print(np_arr.shape)
np_mean = np.mean(np_arr, axis=0)
np_std = np.std(np_arr, axis=0)
# np_mean = np.zeros(np_mean.shape)
# np_std = np.ones(np_std.shape)
print("Mean:", np_mean, "\nStd. Dev:", np_std)
# Weights are inversely proportional to number of phones encountered
num_distribution = {k: 1 / v for k, v in num_distribution.items()}
total_ph = sum(num_distribution.values())
num_distribution = {k: v / total_ph for k, v in num_distribution.items()}
# Dump mapping from id to phone. Used to convert NN output back to the phone it predicted
phones_to_id = {}
for ph in sorted(all_phones):
phones_to_id[ph] = (len(phones_to_id), num_distribution[ph])
phones_to_id['PAD'] = (len(phones_to_id), 0)
# Dump this mapping
fname = self.config['dir']['dataset'] + 'phone_mapping.json'
with open(fname, 'w') as f:
json.dump(phones_to_id, f)
to_return = list(zip(list_features, list_phones))
# Dump database
with open(self.pkl_name, 'wb') as f:
pickle.dump(to_return, f)
print("Dumped pickle")
return to_return
if __name__ == '__main__':
config_file = {'dir': {'dataset': '../datasets/TEST/'}, 'feat_dim': 38}
a = timit_data('TEST', config_file)
a.gen_pickle()
|
[
"vaidya.mithilesh@gmail.com"
] |
vaidya.mithilesh@gmail.com
|
24e9ee9d54dea93e03ce5075db2d76c34a09ff41
|
005ecccc021f2c11f3e847204a1fa6fb410ea071
|
/sumofdigits.py
|
05f3dbcc19547a37476d6d46602af6006eb56b2f
|
[] |
no_license
|
pythoncrackers/My-Pet-Projects
|
7f86729a35dd3f4db26d9f8ae7a2639bfd847002
|
58d67a872a8ca695f50c87be37e189d05776c9c2
|
refs/heads/master
| 2020-03-15T01:20:59.431507
| 2018-01-16T16:43:39
| 2018-01-16T16:43:39
| 131,891,757
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 401
|
py
|
number = eval(input("Enter a number between 0 and 1000: "))
unitdigit = number % 10
print("The units digit is: " + str(unitdigit))
tensdigit = (number // 10) % 10
print("The tens digit is: " + str(tensdigit))
hundredsdigit = (number // 10) // 10
print("The hundreds digit is: " + str(hundredsdigit))
sum = unitdigit + tensdigit + hundredsdigit
print("The sum of the digits is " + str(sum))
|
[
"noreply@github.com"
] |
pythoncrackers.noreply@github.com
|
0623b8d9d3e3ae2c7e57ef1e836c62f6734a8628
|
0c3879648d76661007d11cec86f04a805c3052d7
|
/manage.py
|
aaa5d3cfeb1dd2a39f5101a8e1f54334bc24bae8
|
[] |
no_license
|
arona14/sondage
|
421e24ba076f09472fbf96c30d303173f520f79e
|
eeb1df6370a7febbf0e910b44f56497064378d68
|
refs/heads/master
| 2020-03-17T14:54:33.143085
| 2018-05-16T16:40:53
| 2018-05-16T16:40:53
| 133,690,652
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 805
|
py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "sondage.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
|
[
"arona@ctsfares.com"
] |
arona@ctsfares.com
|
7b390631d83e27aba0ba74c9430946cd74309628
|
ca3bd539970db1203e920c0eea63dcba5709ee33
|
/example_47/2_merge_text_files.py
|
52f491b1a9c480c7f92a5e1a81ee7afb750ff734
|
[] |
no_license
|
shawnbae/RPA
|
855ca534e5ae472b794b970f28d03ba09158e956
|
49850f7e2172224aa79f538f04d119bb9f1a7f59
|
refs/heads/master
| 2023-04-27T12:14:55.175489
| 2021-05-16T13:35:17
| 2021-05-16T13:35:17
| 328,898,143
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,568
|
py
|
#-*-coding:utf-8
import time
import os
# 작업 시작 메시지를 출력합니다.
print("Process Start")
# 시작 시점의 시간을 기록합니다.
start_time = time.time()
# 하나로 합칠 파일들이 저장된 폴더 이름을 적어주세요.
directory = "personal_info"
# 결과물 파일의 이름을 정의합니다.
outfile_name = "merged_ID.txt"
# 결과물 파일을 생성합니다. 텅 빈 텍스트파일이 생성됩니다.
out_file = open(outfile_name, 'w')
# 폴더의 내용물을 열람해 목록을 생성합니다.
input_files = os.listdir(directory)
# 폴더의 내용물을 하나하나 불러와 합치는 작업을 수행합니다.
# input_files에 저장된 파일 이름을 한 번에 하나씩 불러옵니다.
for filename in input_files:
# 간혹 텍스트 파일이 아닌 파일이 섞여있을 수 있습니다. 이걸 걸러냅니다.
if ".txt" not in filename:
continue
# 텍스트 파일이 맞다면, 파일을 읽어옵니다.
file = open(directory + "/" + filename)
# 파일의 내용물을 문자열로 불러옵니다.
content = file.read()
# 파일의 내용물을 결과물 파일에 기재합니다.
out_file.write(content + "\n\n")
# 읽어온 파일을 종료합니다.
file.close()
# 결과물 파일을 종료합니다.
out_file.close()
# 작업 종료 메시지를 출력합니다.
print("Process Done.")
# 작업에 총 몇 초가 걸렸는지 출력합니다.
end_time = time.time()
print("The Job Took " + str(end_time - start_time) + " seconds.")
|
[
"tngks0315@naver.com"
] |
tngks0315@naver.com
|
94002f0974151a37f02ed60d8ad4ad3e5621c075
|
7ba9f32bc68f127c4851e7fe74d11e03ff185a3c
|
/pythonCapstoneProjects/nextPrimeNumber.py
|
5ab3a38b64eef2446c3116cdc9617191fde1db30
|
[] |
no_license
|
Hrishabkumr/Python
|
3a8fad08a28c4828fa6317bb63b1cc0c6fcbaa39
|
aff832ebe50de7aa1f7e83c76207c9fd73c079b9
|
refs/heads/main
| 2023-04-08T21:26:42.374066
| 2021-04-17T13:45:39
| 2021-04-17T13:45:39
| 358,887,531
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,018
|
py
|
# Next Prime Number
# Generate prime numbers until
# the user chooses to stop
def isPrime(x):
"""
Checks whether the given
number x is prime or not
"""
if x == 2:
return True
if x % 2 == 0:
return False
for i in range(3, int(x**0.5)+1, 2):
if x % i == 0:
return False
return True
def genPrime(currentPrime):
"""
Returns the next prime
after the currentPrime
"""
newPrime = currentPrime + 1
while True:
if not isPrime(newPrime):
newPrime += 1
else:
break
return newPrime
def main(): # Wrapper function
currentPrime = 2
while True:
answer = input('Would you like to see the next prime? (Y/N) ')
if answer.lower().startswith('y'):
print(currentPrime)
currentPrime = genPrime(currentPrime)
else:
break
if __name__ == '__main__':
main()
|
[
"noreply@github.com"
] |
Hrishabkumr.noreply@github.com
|
168beaa8e57e3cab35e09d0b82d0e2cc748723dd
|
243e0c5c721ef4053f5f6b2a581bc8da4f5c2623
|
/s3-trigger-lambda.py
|
aa5c2233536e8e01e3e35b197eeeb0056cadc9e7
|
[] |
no_license
|
JARADES-M/image-detector-cli-lambda
|
07924ae583ad6d6004cfd4eea766039ed6739aa0
|
a6171c0f8d4f283a445c5c31a9e2521eff00ca56
|
refs/heads/main
| 2023-06-28T04:40:36.486097
| 2021-07-19T11:56:56
| 2021-07-19T11:56:56
| 387,447,967
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,031
|
py
|
#Script to create lambda function using aws
import boto3
from urllib.parse import unquote_plus
def label_function(bucket, name):
"""This takes an S3 bucket and a image name!"""
print(f"This is the bucketname {bucket} !")
print(f"This is the imagename {name} !")
rekognition = boto3.client("rekognition")
response = rekognition.detect_labels(
Image={
"S3Object": {
"Bucket": bucket,
"Name": name,
}
},
)
labels = response["Labels"]
print(f"I found these labels {labels}")
return labels
def lambda_handler(event, context):
"""This is a computer vision lambda handler"""
print(f"This is my S3 event {event}")
for record in event["Records"]:
bucket = record["s3"]["bucket"]["name"]
print(f"This is my bucket {bucket}")
key = unquote_plus(record["s3"]["object"]["key"])
print(f"This is my key {key}")
my_labels = label_function(bucket=bucket, name=key)
return my_labels
|
[
"jaradesm@gmail.com"
] |
jaradesm@gmail.com
|
b8ad77ebbc0f8d213a39e817e72baccde8bfd65f
|
112f02c4be5176907766f7546de7d5d57a2df2af
|
/tutorial/tutorial_56.py
|
aea22de47ee4c3870ffbc5ddf5b27264f1cb2d8c
|
[] |
no_license
|
ankitsingh03/code-python
|
010efdcf157d5411f81b6fbfca74f8b36e3ea263
|
7fd33b9e7f269e3042bdb13a47a26a3da87a68bc
|
refs/heads/master
| 2023-03-25T10:48:23.282822
| 2021-03-18T06:43:27
| 2021-03-18T06:43:27
| 289,693,369
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 178
|
py
|
name = input("enter your name : ")
i=0
temp = ""
while i<len(name):
if name[i] not in temp:
temp+=name[i]
print(f"{name[i]} : {name.count(name[i])}")
i+=1
|
[
"65447864+ankitsingh03@users.noreply.github.com"
] |
65447864+ankitsingh03@users.noreply.github.com
|
e6936c726159533a94b2699b7b1de4cf9f9a84fa
|
36ad2d376030fbfa5a099973ac8fe1acfd590225
|
/interview_questions/prog025_fb2_trees_from_string.py
|
fac3b8b334509bbb94c2a17f14decf8f1a54b22d
|
[] |
no_license
|
ronaldjuarez/garage
|
254f189099eac70798036363dcd0aa437b7ff70d
|
3fb0a1b71b9e073084589d15eed24b020d563a58
|
refs/heads/master
| 2022-11-14T03:55:34.443669
| 2022-11-06T04:19:47
| 2022-11-06T04:19:47
| 190,933,012
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 605
|
py
|
def findSubsequence(v, target):
sum = 0
leftIndex = 0
for i in range(len(v)):
sum += v[i]
if sum < target:
continue
elif sum == target:
return True
else:
for j in range(leftIndex, len(v)):
sum -= v[j]
if sum == target:
return True
elif sum < target:
leftIndex = j + 1
break
return False
v = 'node1 (node2, node3, node4 (node6 node7),node8 (node9 node10 node11))'
target = 6
print(findSubsequence(v,target))
|
[
"rmjch91@gmail.com"
] |
rmjch91@gmail.com
|
946f998fb7bd65a5fcb5e888f5d6107a563013d8
|
216039e6c419349c25201bf5bb4edc2b12c71cef
|
/config.py
|
00a909a7f2f36c025e88f5e242cfcac2d35fc9b3
|
[] |
no_license
|
gloriaodipo/MyDiary-v1
|
6fe03ed318df4e88811d5845eb639e6b26619145
|
92386a6863c843945f4064654ba82b0afcc84f2e
|
refs/heads/master
| 2022-12-10T10:25:52.058895
| 2018-07-31T20:12:59
| 2018-07-31T20:12:59
| 140,820,686
| 0
| 2
| null | 2022-05-25T01:27:18
| 2018-07-13T08:36:27
|
Python
|
UTF-8
|
Python
| false
| false
| 519
|
py
|
'''Set up environment specific configurations'''
import os
class Config():
'''Parent configuration class'''
DEBUG = False
class Development(Config):
'''Configuration for development environment'''
DEBUG = True
class Testing(Config):
'''Configuration for testing environment'''
DEBUG = True
class Production(Config):
'''Configuration for production environment'''
DEBUG = False
app_config = {
'development': Development,
'testing': Testing,
'production': Production
}
|
[
"gloriaodipo@gmail.com"
] |
gloriaodipo@gmail.com
|
10b1131f1db5cefed204613e153ecc03d1a09ee3
|
d47f5f59fc322aa2a82ea1c3a15f39b200dd95b2
|
/bioinformatics_1/week_1/computing_frequencies.py
|
e8f5fabf0b6ca4a603575bdccc2ae3e7e537d4b5
|
[] |
no_license
|
nayanika2304/BioInformatics
|
baefb229e02397e06c634df44b82e72e9a235c77
|
977219bf4f3e4583b91df6308828d15bb1ad148d
|
refs/heads/master
| 2023-01-01T05:24:58.401988
| 2020-10-20T12:52:30
| 2020-10-20T12:52:30
| 295,566,560
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,819
|
py
|
def pattern_to_number(pattern):
if len(pattern) == 0:
return 0
symbol_to_number = {'A': 0, 'C': 1, 'G': 2, 'T': 3}
n = len(pattern)
prefix = pattern[:n - 1]
symbol = pattern[n - 1]
return 4 * pattern_to_number(prefix) + symbol_to_number[symbol]
def computing_frequencies(text, k):
frequency_array = []
n = len(text)
for i in range(4 ** k):
frequency_array.append(0)
for i in range(n - k + 1):
pattern = text[i:i + k]
j = pattern_to_number(pattern)
frequency_array[j] = frequency_array[j] + 1
result = ""
for item in frequency_array:
result = result + " " + str(item)
return result
pattern="CGGCGTTGGAGTGGAAAA"
print(pattern_to_number(pattern))
#print(computing_frequencies(pattern,7))
#PatternToNumber(AGT) = 4 · PatternToNumber(AG) + SymbolToNumber(T) = 8 + 3 = 11
# where SymbolToNumber(symbol) is the function transforming symbols A, C, G, and T into the respective integers 0, 1, 2, and 3.
# patternToNumber = ATGCAA
# A=0 C=1 G=2 T=3
# 032100
# (4^5 *0=0)+(4^4 *3=768)+(4^3 *2=128)+(4^2 *1=16)+ (4^1 *0=0)+ (4^0 *0=0)=912
#numberToPattern
# To go backward from a base-anything number, you divide the final number (5437 in this case) by the base, 4, k = 7 times, keeping track of the remainder:
#
#
# 5437 / 4 = 1359 R 1
# 1359 / 4 = 339 R 3
# 339 / 4 = 84 R 3
# 84 / 4 = 21 R 0
# 21/4 = 5 R 1
# 5/4 = 1 R 1
# 1/4 = 0 R 1
# Take the remainders from the bottom up and you get:
#
# 1110331, corresponding lexicographically to CCCAGGC
#
# Similarly we can look at going backward from 912 (from previous question) to ATGCAA (k = 6) in the same way:
#
# 912/4 = 228 R 0
# 228/4 = 57 R 0
# 57/4 = 14 R 1
# 14/4 = 3 R 2
# 3/4 = 0 R 3
# 0/4 = 0 R 0
# Bottom up we get 032100 corresponding to ATGCAA.
|
[
"nayanikabhargava1993@gmail.com"
] |
nayanikabhargava1993@gmail.com
|
b58fec6a84bedcfdb8045e1cb4635d7e6cb3a8a0
|
cf480cdc547d6714038b1691c768ec2b6ee12d81
|
/PracticaSSDD/Downloader.py
|
c82d59aaeb74e62e4989bc563863372380dd532c
|
[] |
no_license
|
sergiosb99/SistemasDistribuidos
|
dd5f3f6a88b6fecb313353ff79e4a0afa97d4a80
|
476e6b22d9c8964881a61ae0f4e44e9143a603cd
|
refs/heads/master
| 2022-04-02T04:06:30.543889
| 2019-12-14T18:41:27
| 2019-12-14T18:41:27
| 218,734,636
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,622
|
py
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import sys
import Ice, IceStorm
Ice.loadSlice('trawlnet.ice')
import TrawlNet
import hashlib
try:
import youtube_dl
import os
except ImportError:
print('ERROR: do you have installed youtube-dl library?')
sys.exit(1)
class NullLogger:
def debug(self, msg):
pass
def warning(self, msg):
pass
def error(self, msg):
pass
_YOUTUBEDL_OPTS_ = {
'format': 'bestaudio/best',
'postprocessors': [{
'key': 'FFmpegExtractAudio',
'preferredcodec': 'mp3',
'preferredquality': '192',
}],
'logger': NullLogger()
}
def download_mp3(url, destination='./'):
'''
Synchronous download from YouTube
'''
options = {}
task_status = {}
def progress_hook(status):
task_status.update(status)
options.update(_YOUTUBEDL_OPTS_)
options['progress_hooks'] = [progress_hook]
options['outtmpl'] = os.path.join(destination, '%(title)s.%(ext)s')
with youtube_dl.YoutubeDL(options) as youtube:
youtube.download([url])
filename = task_status['filename']
# BUG: filename extension is wrong, it must be mp3
filename = filename[:filename.rindex('.') + 1]
return filename + options['postprocessors'][0]['preferredcodec']
def computeHash(filename):
'''SHA256 hash of a file'''
fileHash = hashlib.sha256()
with open(filename, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
fileHash.update(chunk)
return fileHash.hexdigest()
class DownloaderI(TrawlNet.Downloader):
def __init__(self):
self.publisher = None
def addDownloadTask(self, link, current = None):
try:
print("Download request: %s" %link)
sys.stdout.flush()
fileInfo = TrawlNet.FileInfo()
fileInfo.name = download_mp3(link)
fileInfo.hash = computeHash(fileInfo.name)
self.publisher.newFile(fileInfo)
return fileInfo
except TrawlNet.DownloadError:
print("Download failed")
return 1
class Downloader(Ice.Application):
def get_topic_manager(self):
key = 'IceStorm.TopicManager.Proxy'
proxy = self.communicator().propertyToProxy(key)
if proxy is None:
print("property", key,"not set")
return None
return IceStorm.TopicManagerPrx.checkedCast(proxy)
def run(self, args):
self.broker = self.communicator()
self.sirviente = DownloaderI()
self.adapter = self.broker.createObjectAdapter("DownloaderAdapter")
self.proxy = self.adapter.addWithUUID(self.sirviente)
print(self.proxy)
sys.stdout.flush()
self.adapter.activate()
###### PUBLISHER UPDATE EVENT #######
# Notifica de la descarga de archivos
topic_mgr = self.get_topic_manager()
if not topic_mgr:
print('Invalid Proxy')
return 2
topic_name = "UpdateEvents"
try:
topic = topic_mgr.retrieve(topic_name)
except IceStorm.NoSuchTopic:
print("No such topic found, creating")
topic = topic_mgr.create(topic_name)
publisher_event = topic.getPublisher()
updateEvent = TrawlNet.UpdateEventPrx.uncheckedCast(publisher_event)
self.sirviente.publisher = updateEvent
self.shutdownOnInterrupt()
self.broker.waitForShutdown()
return 0
downloader = Downloader()
sys.exit(downloader.main(sys.argv))
|
[
"noreply@github.com"
] |
sergiosb99.noreply@github.com
|
ec5652e19dafe91ba994ad3ce471eadb6ef08f7e
|
39eeb8ee756d5cebdfeed5dee7b57eb770fa075d
|
/src/amfe/solver/initializer.py
|
d4342eecfc009ec566010e888408ff17cca2e5c5
|
[
"BSD-3-Clause"
] |
permissive
|
c-meyer/AMfe
|
9ca37535316b502fdc6731904d99287ae4cff824
|
61658db4f00858da4b4c6ba295ce66fd3ca9d324
|
refs/heads/master
| 2023-06-13T03:44:45.482814
| 2023-06-07T12:08:23
| 2023-06-07T12:08:23
| 187,845,356
| 0
| 0
|
NOASSERTION
| 2023-06-07T12:08:07
| 2019-05-21T13:41:32
|
Python
|
UTF-8
|
Python
| false
| false
| 1,120
|
py
|
#
# Copyright (c) 2018 TECHNICAL UNIVERSITY OF MUNICH, DEPARTMENT OF MECHANICAL ENGINEERING, CHAIR OF APPLIED MECHANICS,
# BOLTZMANNSTRASSE 15, 85748 GARCHING/MUNICH, GERMANY, RIXEN@TUM.DE.
#
# Distributed under 3-Clause BSD license. See LICENSE file for more information.
#
import numpy as np
__all__ = ['NullAccelerationInitializer',
'LinearAccelerationInitializer'
]
class NullAccelerationInitializer:
def __init__(self):
return
def get_acceleration(self, t0, q0, dq0):
return np.zeros_like(q0)
class LinearAccelerationInitializer:
def __init__(self, M, f_int, f_ext, K, D, solve_func, solve_function_kwargs):
self.M = M
self.f_int = f_int
self.f_ext = f_ext
self.K = K
self.D = D
self.solve_function = solve_func
self.solve_function_kwargs = solve_function_kwargs
def get_acceleration(self, t0, q0, dq0):
A = self.M(q0, dq0, t0)
b = self.f_ext(q0, dq0, t0) - self.D(q0, dq0, t0) @ dq0 - self.f_int(q0, dq0, t0)
return self.solve_function(A, b, **self.solve_function_kwargs)
|
[
"christian.meyer@tum.de"
] |
christian.meyer@tum.de
|
50cbc0a6b7378fde63f8deb76fd0bda5440b65e5
|
583d03a6337df9f1e28f4ef6208491cf5fb18136
|
/dev4qx/madeira-stub/handlers/stub/niukou.py
|
6b0e4e26edb01f71cb86b882a9492992f2eca35c
|
[] |
no_license
|
lescpsn/lescpsn
|
ece4362a328f009931c9e4980f150d93c4916b32
|
ef83523ea1618b7e543553edd480389741e54bc4
|
refs/heads/master
| 2020-04-03T14:02:06.590299
| 2018-11-01T03:00:17
| 2018-11-01T03:00:17
| 155,309,223
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,845
|
py
|
import json
import logging
import tornado
import tornado.web
from tornado.httpclient import AsyncHTTPClient
from tornado.ioloop import IOLoop
request_log = logging.getLogger("madeira.request")
class NiukouOrderHandler(tornado.web.RequestHandler):
@tornado.gen.coroutine
def post(self):
try:
order_id = self.get_argument("OutTradeNo")
master_test = self.application.sentinel.master_for('madeira', db=3)
r2 = r1 = master_test.hget('result:' + order_id, 'result') # 根据redis判断订单状态 r2=r1='100,00;成功'
if ',' in r1:
r1, r2 = r1.split(',') # r1="100" r2="00;成功"
data = {"HEADER":{"SEQNO":"Q2015101209294910063131","SECERTKEY":"713B242546AA7239A572AE1E2103A777","APPID":"QuXun","TIMESTAMP":"20151012092949276","VERSION":"V1.0"},"MSGBODY":{"CONTENT":{"ORDERID":"144461347935975","EXTORDER":order_id},"RESP":{"RCODE":"00","RMSG":"OK"}}}
self.finish(json.dumps(data))
if r1 == '0':
IOLoop.current().call_later(10, niukou_callback, order_id, r2)
except Exception:
request_log.exception('FAIL')
def niukou_callback(order_id, result):
if ';' in result:
result = result.split(';')[0]
body = {"HEADER":{"VERSION":"V1.1","TIMESTAMP":'',"SEQNO":'',"APPID":"QuXun","SECERTKEY":"E4CF8702097BF3D3EFF03DF3ACFDEE5E"},"MSGBODY":{"CONTENT":{"ORDERID":"144461587745723","EXTORDER":order_id,"STATUS":"\u6210\u529f","CODE":"0"}}}
body = json.dumps(body)
url = 'http://localhost:8899/callback/niukou'
http_client = AsyncHTTPClient()
try:
request_log.info('YFLOW CALLBACK\n%s', body)
http_client.fetch(url, method='POST', body=body)
except Exception:
request_log.exception('FAIL')
finally:
http_client.close()
|
[
"lescpsn@aliyun.com"
] |
lescpsn@aliyun.com
|
4669336116ce7e560e82aa2f2fc0cf729f1a23d2
|
91d1a6968b90d9d461e9a2ece12b465486e3ccc2
|
/workdocs_write_f/comment_delete.py
|
79616abb93b0670f4aec69800235ff70fde5d896
|
[] |
no_license
|
lxtxl/aws_cli
|
c31fc994c9a4296d6bac851e680d5adbf7e93481
|
aaf35df1b7509abf5601d3f09ff1fece482facda
|
refs/heads/master
| 2023-02-06T09:00:33.088379
| 2020-12-27T13:38:45
| 2020-12-27T13:38:45
| 318,686,394
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 623
|
py
|
#!/usr/bin/python
# -*- codding: utf-8 -*-
import os
import sys
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
from common.execute_command import write_parameter
# url : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/ec2/describe-instances.html
if __name__ == '__main__':
"""
create-comment : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/workdocs/create-comment.html
describe-comments : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/workdocs/describe-comments.html
"""
write_parameter("workdocs", "delete-comment")
|
[
"hcseo77@gmail.com"
] |
hcseo77@gmail.com
|
e92faf948492965950a7cb2a76b1626fd524149b
|
29e4561c6c84856fc1a9afa955a7be3ed1c03914
|
/arvancloud_ddns/cli.py
|
0e39c9b8b1d70b115fd3bd04e26c028682748eb9
|
[
"MIT"
] |
permissive
|
yazdan/ar-ddns
|
eef211c545a7def29553cf59dcfd907f25248995
|
b8fa8e1fb68533a18c0793880513d189fc915452
|
refs/heads/main
| 2023-03-06T15:11:24.163976
| 2021-02-16T10:16:48
| 2021-02-16T10:16:48
| 339,356,661
| 0
| 0
|
MIT
| 2021-02-16T10:10:22
| 2021-02-16T10:10:21
| null |
UTF-8
|
Python
| false
| false
| 757
|
py
|
import click
from arvancloud_ddns.arvancloud_dns_api import ArvanCloudDNSAPI
from arvancloud_ddns.utils import sync_public_ip
@click.command()
@click.option('--domain', required=True)
@click.option('--api-key', required=True)
@click.option('--cloud', is_flag=True,
help='by activating this option, the usage will be calculated based on four variables: Input traffic, Output traffic, HTTP/HTTPS requests, and the number of Cache Purge requests.')
@click.option('--dry-run', is_flag=True,
help='Use the --dry-run option to run arvancloud-ddns without changing your dns.')
def cli(domain, api_key, dry_run=False, cloud=False):
ar = ArvanCloudDNSAPI(api_key, domain)
ar.setup_records()
sync_public_ip(ar, cloud=cloud)
|
[
"touhid.arastu@gmail.com"
] |
touhid.arastu@gmail.com
|
f95a4aa88f57289ef80b62ef84d6b9d5d9906074
|
050a01af15654c0708c2e747def7c33fe54cbe02
|
/delivery_order/migrations/0001_initial.py
|
b9d564b5771452e38c9a53435e0538f295bc3d57
|
[] |
no_license
|
crowdbotics-apps/coddwebsite-17461
|
5d38d10294e5a9892028d11122174e9600790ac8
|
eb9f22e52ec3c0c18fef55597c9e8aa3bf7cfe2d
|
refs/heads/master
| 2023-05-13T13:28:47.125601
| 2020-05-27T17:32:07
| 2020-05-27T17:32:07
| 267,378,023
| 0
| 0
| null | 2021-06-10T09:23:01
| 2020-05-27T17:01:24
|
Python
|
UTF-8
|
Python
| false
| false
| 2,609
|
py
|
# Generated by Django 2.2.12 on 2020-05-27 17:32
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('menu', '0001_initial'),
('delivery_user_profile', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Bill',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('total_amount', models.FloatField()),
('timestamp_created', models.DateTimeField(auto_now_add=True)),
('contact_info', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='bill_contact_info', to='delivery_user_profile.ContactInfo')),
('profile', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='bill_profile', to='delivery_user_profile.Profile')),
],
),
migrations.CreateModel(
name='PaymentMethod',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('detail', models.TextField()),
],
),
migrations.CreateModel(
name='Order',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('quantity', models.IntegerField()),
('total_price', models.FloatField()),
('status', models.CharField(max_length=20)),
('notes', models.TextField()),
('timestamp_created', models.DateTimeField(auto_now_add=True)),
('bill', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='order_bill', to='delivery_order.Bill')),
('item_variant', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='order_item_variant', to='menu.ItemVariant')),
('payment_method', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='order_payment_method', to='delivery_order.PaymentMethod')),
('profile', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='order_profile', to='delivery_user_profile.Profile')),
],
),
]
|
[
"team@crowdbotics.com"
] |
team@crowdbotics.com
|
a4a415836a73c4b26dcef8193f52936e7df8c02a
|
f68710d7a8228805ab19430d72cefd6bbf1c4b91
|
/src/routes/challenge.py
|
e1d954d670e6d9f4edf787dce1f4adc16e6579be
|
[] |
no_license
|
quokkateam/quokka-api
|
1aae2dd9694b09ff426fc8defcc8dd1d6536f016
|
081f22fe3bf81aee18cca05283384c4899923b88
|
refs/heads/master
| 2023-01-21T08:21:52.559310
| 2020-12-03T01:12:46
| 2020-12-03T01:12:46
| 100,311,727
| 0
| 0
| null | 2017-09-18T05:19:33
| 2017-08-14T21:42:08
|
Python
|
UTF-8
|
Python
| false
| false
| 7,013
|
py
|
from flask_restplus import Resource, fields
from src.routes import namespace, api
from src.helpers.user_helper import current_user
from src.helpers.prize_helper import format_prizes
from src.helpers.sponsor_helper import format_sponsors
from src.helpers.challenge_helper import format_challenges, current_week_num
from operator import attrgetter
from src.challenges import universal_challenge_info
from datetime import datetime, timedelta
from src import dbi, logger
from src.models import Challenge
from src.helpers.error_codes import CHALLENGE_NOT_EXIST, INVALID_CHALLENGE_ACCESS
update_challenge_section_model = api.model('Challenge', {
'id': fields.Integer(required=True),
'text': fields.String(required=True),
'points': fields.Integer(required=True)
})
# TODO: Validate JSON field types for 'suggestions' and 'challenges' below
# update_suggestions_model = api.model('Challenge', {
# 'id': fields.Integer(required=True),
# 'suggestions': fields.String(required=True)
# })
# update_challenges_model = api.model('Challenge', {
# 'challenges': fields.String(required=True),
# 'startDate': fields.String(required=True)
# })
@namespace.route('/challenge/<int:week_num>')
class GetChallenge(Resource):
"""Fetch data for a school's challenge page by week number"""
@namespace.doc('get_challenge')
def get(self, week_num):
user = current_user()
if not user:
return '', 403
school = user.school
week_index = week_num - 1
# Get challenges for school, sorted by date
challenges = sorted(school.active_challenges(), key=attrgetter('start_date'))
if week_num < 1 or week_num > len(challenges):
return {'error': 'Challenge does not exist', 'code': CHALLENGE_NOT_EXIST}, 400
curr_week_num = current_week_num(challenges)
# if this is a future week and the user isn't an admin, prevent access
if week_num > curr_week_num and not user.is_admin:
return {'error': 'Week not yet available to access', 'code': INVALID_CHALLENGE_ACCESS}, 400
# Find the challenge requested by week index
challenge = challenges[week_index]
if week_index == 0:
prev_habit = None
next_habit = {
'weekNum': 2,
'name': challenges[1].name
}
elif week_index == len(challenges) - 1:
prev_habit = {
'weekNum': week_index,
'name': challenges[week_index - 1].name
}
next_habit = None
else:
prev_habit = {
'weekNum': week_index,
'name': challenges[week_index - 1].name
}
next_habit = {
'weekNum': week_num + 1,
'name': challenges[week_num].name
}
# if this is the current week and the user isn't an admin, he/she shouldn't have a link to the next week yet
if week_num == curr_week_num and not user.is_admin:
next_habit = None
universal_challenge = universal_challenge_info.get(challenge.slug)
resp = {
'id': challenge.id,
'habit': {
'name': challenge.name,
'slug': challenge.slug,
'icon': universal_challenge['icon'],
'dates': {
'start': datetime.strftime(challenge.start_date, '%m/%d/%Y'),
'end': datetime.strftime(challenge.end_date, '%m/%d/%Y')
}
},
'overview': universal_challenge['overview'],
'challenge': {
'text': challenge.text,
'points': challenge.points
},
'prizes': format_prizes(challenge.active_prizes()),
'sponsors': format_sponsors(school.sponsors),
'suggestions': challenge.suggestions,
'adjHabits': {
'prev': prev_habit,
'next': next_habit
},
'links': universal_challenge['links'],
'extraInfo': universal_challenge['extra_info']
}
return resp
@namespace.route('/challenge/challenge')
class UpdateChallengeSection(Resource):
"""Save the text and points for a weekly challenge"""
@namespace.doc('update_challenge_section')
@namespace.expect(update_challenge_section_model, validate=True)
def put(self):
user = current_user()
if not user or not user.is_admin:
return '', 403
challenge = dbi.find_one(Challenge, {'id': api.payload['id']})
if not challenge:
logger.error('No challenge found for id: {}'.format(api.payload['id']))
return 'Challenge required to update text and points', 500
dbi.update(challenge, {
'text': api.payload['text'],
'points': api.payload['points'] or 0
})
return {'text': challenge.text, 'points': challenge.points}
@namespace.route('/challenge/suggestions')
class UpdateSuggestions(Resource):
"""Save the suggestions for a weekly challenge"""
@namespace.doc('update_suggestions')
# @namespace.expect(update_suggestions_model, validate=True)
def put(self):
user = current_user()
if not user or not user.is_admin:
return '', 403
challenge = dbi.find_one(Challenge, {'id': api.payload['id']})
if not challenge:
logger.error('No challenge found for id: {}'.format(api.payload['id']))
return 'Challenge required to update text and points', 500
dbi.update(challenge, {'suggestions': api.payload['suggestions']})
return {'suggestions': challenge.suggestions}
@namespace.route('/challenges')
class RestfulChallenges(Resource):
"""Fetch all challenges for a school"""
@namespace.doc('get_challenges')
def get(self):
user = current_user()
if not user:
return '', 403
# Get challenges for school, sorted by date
challenges = sorted(user.school.active_challenges(), key=attrgetter('start_date'))
curr_week_num = current_week_num(challenges)
challenges_data = format_challenges(challenges, user, curr_week_num=curr_week_num)
resp = {
'weekNum': curr_week_num,
'challenges': challenges_data
}
return resp
@namespace.doc('update_challenges')
# @namespace.expect(update_challenges_model, validate=True)
def put(self):
user = current_user()
if not user or not user.is_admin:
return '', 403
try:
start_date = datetime.strptime(api.payload['startDate'], '%m/%d/%y')
except:
return 'Invalid start date', 500
challenge_slugs = [c['slug'] for c in api.payload['challenges']]
school = user.school
challenges = dbi.find_all(Challenge, {
'school': user.school,
'slug': challenge_slugs
})
i = 0
for slug in challenge_slugs:
challenge = [c for c in challenges if c.slug == slug][0]
if i > 0:
start_date = start_date + timedelta(days=7)
end_date = start_date + timedelta(days=6)
dbi.update(challenge, {'start_date': start_date, 'end_date': end_date})
i += 1
challenges = sorted(school.active_challenges(), key=attrgetter('start_date'))
curr_week_num = current_week_num(challenges)
challenges_data = format_challenges(challenges, user, curr_week_num=curr_week_num)
resp = {
'weekNum': curr_week_num,
'challenges': challenges_data
}
return resp
|
[
"benwhittle31@gmail.com"
] |
benwhittle31@gmail.com
|
f3287cdf45f3d65183544c35aca6db06772c239b
|
bd55c7d73a95caed5f47b0031264ec05fd6ff60a
|
/apps/nchat/migrations/0012_auto_20191113_1447.py
|
b7df57dbc71a1d5e13e95d92c30ea5bd1f8098ea
|
[] |
no_license
|
phonehtetpaing/ebdjango
|
3c8610e2d96318aff3b1db89480b2f298ad91b57
|
1b77d7662ec2bce9a6377690082a656c8e46608c
|
refs/heads/main
| 2023-06-26T13:14:55.319687
| 2021-07-21T06:04:58
| 2021-07-21T06:04:58
| 381,564,118
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 657
|
py
|
# Generated by Django 2.0.5 on 2019-11-13 05:47
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('nchat', '0011_enduser'),
]
operations = [
migrations.AddField(
model_name='enduser',
name='app_id',
field=models.CharField(default=1, max_length=256, verbose_name='app_id'),
preserve_default=False,
),
migrations.AddField(
model_name='enduser',
name='owner_id',
field=models.IntegerField(default=1, verbose_name='owner_id'),
preserve_default=False,
),
]
|
[
"phonehtetpaing1221@gmail.com"
] |
phonehtetpaing1221@gmail.com
|
c1aa620b136310e6ed28bf61e29b75a3d21e84e8
|
76fceb0a152caacbe056d5ac437351e52a77b360
|
/Django_website/music/migrations/0001_initial.py
|
aad5fd5b58076a28e9327167a7f918a79368f4e6
|
[] |
no_license
|
wuyiaishang/django_workplace
|
b4f4e3c32f61491af5f19afd4bb5c664b1cb2bbc
|
f9f0269026a31c1f8bdee3ec9ba8d5eb28002737
|
refs/heads/master
| 2021-01-22T08:13:25.245736
| 2017-02-25T16:22:05
| 2017-02-25T16:22:05
| 81,884,622
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,238
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-02-14 22:05
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Album',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('artist', models.CharField(max_length=250)),
('album_title', models.CharField(max_length=500)),
('genre', models.CharField(max_length=100)),
('album_logo', models.CharField(max_length=1000)),
],
),
migrations.CreateModel(
name='Song',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('file_type', models.CharField(max_length=10)),
('song_title', models.CharField(max_length=250)),
('album', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='music.Album')),
],
),
]
|
[
"lijh3737ca@gmail.com"
] |
lijh3737ca@gmail.com
|
a3cedf4a666cc934634ed802f992230d70f6b79b
|
ffb2eafcedbc0b2db5337bcac71d26eafa881228
|
/recsys.py
|
0bd37e9bd793e4b93d6535b5487636ce8bdc0b2e
|
[] |
no_license
|
williamcottrell72/RecSys
|
ac7ddb0a5ee637b9c0a66310a5e928ce5400f436
|
92f3fd67b6283f3f3012f103340236d4add786dd
|
refs/heads/master
| 2020-03-27T11:56:41.143034
| 2018-12-25T20:59:53
| 2018-12-25T20:59:53
| 146,516,040
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,311
|
py
|
# Here is the code for the recommender system. This consists of a deep auto-encoder with a
#'fit' and 'predict' method like sklearn.
import torch
import torch.nn.functional as F
import torch.optim as optim
import numpy as np
import pickle as pkl
import torch.autograd as autograd
import torch.nn as nn
#Below we have a helper function just so we don't have to initialize the create_recsys
#dimensions by hand.
def create_recsys(matrix,dropout=.1,latent_features=4,max_iter=100,lr=.001,epochs=5,temperature=1,batch_size=500):
return recsys(matrix,matrix.shape[0],matrix.shape[1],latent_features,dropout,max_iter,epochs,temperature,lr,\
batch_size=batch_size)
class recsys(nn.Module):
def __init__(self,ratings=None,users=100,sites=1000,latent_features=10,\
dropout=.1,max_iter=10,epochs=4,temperature=1,lr=.01,batch_size=500,\
losses=None):
super(recsys,self).__init__()
self.users=users
self.sites=sites
self.dropout=nn.Dropout(p=dropout)
self.max_iter=max_iter
self.lr=lr
self.batch_size=batch_size
self.temperature=temperature
self.ratings=ratings
self.losses=None
self.epochs=epochs
self.linear1=nn.Linear(sites,latent_features)
self.linear2=nn.Linear(latent_features,latent_features)
self.linear3=nn.Linear(latent_features,sites)
# The input x should have shape (number_of_users,sites)
# Ratings needs to be a torch tensor of the same shape as x.
# def get_mask(self,ratings=None):
# try:
# if ratings==None:
# pass
# except:
# self.ratings=ratings
# mask=[]
# for i in range(len(self.ratings)):
# mask.append([0 if math.isnan(self.ratings[i,j]) else 1 for j in range(len(self.ratings[0]))])
# return torch.tensor(mask)
def imputer(self,x=None):
#Need to make a function which takes in a ratings array and returns
#an initial best guess. For now I'll just mask the unkown variables
#print(type(self.ratings))
try:
if x==None:
ratings=self.ratings
except:
ratings=x
ratings[np.isnan(ratings)] = 0
return torch.tensor(ratings).float()
def forward(self,x):
x=self.imputer(x)
x=self.linear1(x)
x=torch.tanh(x)
x=self.linear2(x.float())
x=self.dropout(x.float())
x=torch.tanh(x)
x=self.linear3(x.float())
return x
#Should reconsider loss function. In particular, need to
#think about the denominator. Perhaps better options...
def custom_loss(self,x,y):
ct=0
for i in range(len(x)):
if (torch.norm(x[i])==0) or (torch.norm(y[i])==0):
pass
else:
ct+=1-(x[i]@y[i])/(torch.norm(x[i])*torch.norm(y[i]))
return ct/len(x)
def predict(self,x):
x=self.imputer(x)
return self.forward(x)
def fit(self,ratings=None):
try:
if ratings==None:
ratings=self.ratings
except:
pass
ratings_clean=self.imputer(ratings)
loss_function=nn.MSELoss()
f= open('data/losses','w+')
losses=[]
for i in range(1,self.epochs+1):
optimizer = optim.Adam(self.parameters(),lr=self.lr/i)
print(f'Epoch {i}')
sample_indices=np.random.choice(range(len(ratings_clean)),self.batch_size,replace=False)
sample=ratings_clean[sample_indices]
#print(sample_indices)
for _ in range(self.max_iter):
optimizer.zero_grad()
out = self.forward(sample)
#out = self.forward(ratings_clean)
#loss = loss_function(out,ratings_clean) #This one works!
loss = self.custom_loss(out,sample) #This one works
#loss = self.custom_loss(out,ratings_clean)
losses.append(float(loss.detach().numpy()))
f.write(str(loss.detach().numpy())+',')
loss.backward(retain_graph=True)
optimizer.step()
self.losses=losses
f.close()
|
[
"williamcottrell72@gmail.com"
] |
williamcottrell72@gmail.com
|
0fe45a842cf883f20681fad8051fa1660930b421
|
ab32ef99b14883391da76f8e64c420fdc4c6e25b
|
/config.py
|
ab8f0e8d695d29df8da4836b39559ac74ca678fb
|
[] |
no_license
|
perryraskin/shoppimon-tests
|
21a6e3e45ebaea5774082d00a8eadab16650ef73
|
5ecd453d620de7a20ffb6f359de634efd9a225be
|
refs/heads/master
| 2021-01-19T04:24:38.367725
| 2016-10-10T04:52:14
| 2016-10-10T04:52:14
| 63,161,477
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 704
|
py
|
"""Configuration handling
"""
import yaml
import boto3
__config = None
def get_config(path):
global __config
if path.startswith("s3"):
# Use Amazon S3
s3 = boto3.resource('s3')
bucket_name = path.split("/")[2]
key_name = path.split(bucket_name[-1])
# Download object at bucket-name with key-name to tmp.txt
s3.download_file(bucket_name, key_name, "tmp.yaml")
if __config is None:
with open("tmp.yaml", 'r') as stream:
__config = yaml.load(stream)
else:
if __config is None:
with open(path, 'r') as stream:
__config = yaml.load(stream)
return __config
|
[
"perryraskin@gmail.com"
] |
perryraskin@gmail.com
|
c477b8b68db052eb74041eaadaadcfcda5a16a9c
|
18974b976a9f8afb042f60396a6afacc8ea0636d
|
/producer_server.py
|
4cecf20b8652b947e69a217ba5b6669eaa665378
|
[] |
no_license
|
ajschaeper/frisco_crime_stats
|
158e581612871ee18b35f28b4f69d27d48392d4d
|
de22a1cc62a970a054d0731b42f76a6395f3dc69
|
refs/heads/master
| 2022-11-14T05:51:13.359337
| 2020-06-29T12:53:14
| 2020-06-29T12:58:02
| 275,379,317
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 616
|
py
|
from kafka import KafkaProducer
import json
import time
class ProducerServer(KafkaProducer):
def __init__(self, input_file, topic, **kwargs):
super().__init__(**kwargs)
self.input_file = input_file
self.topic = topic
def generate_data(self):
with open(self.input_file) as fd:
data = json.load(fd)
for item in data:
message = self.dict_to_binary(item)
self.send(self.topic, message)
time.sleep(1)
def dict_to_binary(self, json_dict):
return json.dumps(json_dict).encode("utf-8")
|
[
"alex@schaeper.io"
] |
alex@schaeper.io
|
7c459fe159f5fd2dcb8c050a691a2fbbc7836477
|
3ad2eb5cefe7ba0c497445de6fbd698c58c03453
|
/game_of_life.py
|
2c669e02174a1d914d25778156e30f43cc1b871f
|
[] |
no_license
|
MaxAdau/game_of_life
|
cd6c3103186e5297970c4566ff51811a6a9e2138
|
74d6fc0aa8d3dc720850c5fb4877d8302b7acbc2
|
refs/heads/master
| 2020-09-28T02:43:29.395889
| 2019-12-08T13:17:45
| 2019-12-08T13:17:45
| 226,670,358
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,208
|
py
|
import tkinter as tk
from time import sleep
# For debugging purpose
from time import time
prev = time()
def delta(txt):
global prev
print(txt)
print('delta = {}'.format(time() - prev))
prev = time()
class Grid:
"""
This class implement the grid that contains all the cells in the game
"""
def __init__(self, canvas, width, height, cell_size):
self.cell_size = cell_size
self.canvas = canvas
# Variable calculated from default ones
self.nbr_cell_width = int(width / cell_size)
self.nbr_cell_height = int(height / cell_size)
# List of all cells in the grid
self.cells = []
def generate_grid(self):
"""
Generate aa grid with empty rectangle
Also instantiate cells that will be used in the game
"""
# Caching method to avoid method lookup in loop
create_rect = self.canvas.create_rectangle
# Loop in Y
for grid_y in range(0, self.nbr_cell_height):
y1 = grid_y * self.cell_size
y2 = grid_y * self.cell_size + self.cell_size
self.cells.append([])
# Loop in X
for grid_x in range(0, self.nbr_cell_width):
x1 = grid_x * self.cell_size
x2 = grid_x * self.cell_size + self.cell_size
# Create the rectangle using canvas and store the rectangle id in the cell
rect_id = create_rect(x1, y1, x2, y2, fill='white')
self.cells[grid_y].append(Cell(grid_x, grid_y, self.canvas, rect_id))
# def clear(self):
# """
# Clear the current grid
# """
# for y, row in enumerate(self.cells):
# for x, cell in enumerate(row):
# # Get the neighbors of a cell, then define the next status
# cell = cells[y][x]
# cell.next_gen = False
# cell.is_alive = False
# cell.draw_cell()
def compute_next_grid(self):
"""
Iterate over all cells and set their next_gen attribute
"""
# Iterate over cells
for y, row in enumerate(self.cells):
for x, cell in enumerate(row):
# Get the neighbors of a cell, then define the next status
nbr_neighbors = self._get_alive_neighbors(cell)
self.cells[y][x].next_gen = self._apply_rules(cell, nbr_neighbors)
def swap_status(self, tkevent):
"""
Change the status of a cell
Called from a click
"""
# Get the position on the grid from the tkinter coordinates
x, y = self._xy_to_grid_idx(tkevent.x, tkevent.y)
# Change the current status of the cell and draw it
cell = self.cells[y][x]
cell.is_alive = not cell.is_alive
cell.draw_cell()
def draw_next_gen(self):
"""
Update the cell.isActivate attribute
and display the grid using the updated value
"""
for y, row in enumerate(self.cells):
for x, cell in enumerate(row):
cell = self.cells[y][x]
cell.is_alive = cell.next_gen
cell.draw_cell()
def _apply_rules(self, cell, nbr_neighbors):
"""
Calculate the next value of a specific cell, either
return True if the cell will be alive
return False if not
"""
# Rules of the game
# Any live cell with two or three neighbors survives
if cell.is_alive and nbr_neighbors in [2, 3]:
return True
# Any dead cell with three live neighbors becomes a live cell
elif not cell.is_alive and nbr_neighbors == 3:
return True
# All other live cells die in the next generation
else:
return False
def _get_alive_neighbors(self, cell):
"""
Return the number of cells alives in the vicinity of the given cell
"""
# Coordinates to apply to current cell to get all 8 neighbors
neighbors = [(-1, -1), (0, -1), (1, -1),
(-1, 0), (1, 0),
(-1, 1), (0, 1), (1, 1)]
# Iterate over neighbors and get the is_alive status
nbr_neighbors = 0
for coordinates in neighbors:
adjusted_x = cell.grid_x + coordinates[0]
adjusted_y = cell.grid_y + coordinates[1]
# Try to get the neighbors
if adjusted_x >= 0 and adjusted_y >= 0:
try:
neighbor = self.cells[adjusted_y][adjusted_x].is_alive
if neighbor:
nbr_neighbors += 1
# We get an error while searching for out of range cells, not a problem
except IndexError:
pass
return nbr_neighbors
def _xy_to_grid_idx(self, x, y):
"""
Translate a x and y tkinter coordinates in a grid xy position
Trick : I use int() to round down my coordinates
"""
return (int(x / self.cell_size),
int(y / self.cell_size))
def _debug(self):
"""
Print all cells and their status
"""
for y, row in enumerate(self.cells):
for x, cell in enumerate(row):
print('cell {} in {}'.format(cell.get_xy(), (x, y)))
class Cell:
"""
This class represent a cell as intended in the game of life
"""
def __init__(self, grid_x, grid_y, canvas, rect_id):
# Position of the cell in the grid
self.grid_x = grid_x
self.grid_y = grid_y
# The canvas used to perform graphical magic
self.canvas = canvas
self.rect_id = rect_id
# Used for the game logic
self.is_alive = False
self.next_gen = False
# Automatically display the cell when instantiate
self.draw_cell()
def draw_cell(self):
"""
Draw the cell
"""
# Cell is alive
if not self.is_alive:
color = 'white'
# Cell is dead
else:
color = 'black'
# Update the rectangle color
self.canvas.itemconfig(self.rect_id, fill=color)
def get_xy(self):
"""
Return x and y in a tuple
"""
return self.grid_x, self.grid_y
def __str__(self):
return '{} : {} --> {}'.format((self.grid_x, self.grid_y),
self.is_alive, self.next_gen)
class Game:
"""
This class contains the game logic
"""
def __init__(self):
# Game default values
width = 600
height = 600
cell_size = 20
# Instantiation of the main windows
self.root = tk.Tk()
self.root.title("My game of life ! \0/")
# Instantiation of the frame on which the canvas will be added
# pack() organizes widgets in blocks before placing them in the parent widget
# Without it, the main windows will remain at default size
# https://www.tutorialspoint.com/python/tk_pack.htm
self.frame = tk.Frame(self.root, width=width, height=height)
self.frame.pack()
# Instantiation of the Canvas
# The Canvas widget provides structured graphics facilities for Tkinter
self.canvas = tk.Canvas(self.frame, width=width, height=height)
self.canvas.pack()
# Place buttons and link functions to them
start_button = tk.Button(self.root, text="Start game", command=self.start)
start_button.pack(side=tk.LEFT)
stop_button = tk.Button(self.root, text="Stop it", command=self.stop)
stop_button.pack(side=tk.RIGHT)
# restart_button = tk.Button(self.root, text="Restart", command=self.game_loop)
# restart_button.pack(side=tk.BOTTOM)
# For debug purpose only
# debug_button = tk.Button(self.root, text="Next loop", command=self.game_loop)
# debug_button.pack(side = tk.BOTTOM)
# self.idx = 0
# Create the grid and generate the visible rectangles
self.grid = Grid(self.canvas, width, height, cell_size)
self.grid.generate_grid()
# link the left click action to the swap status function
self.canvas.bind("<Button-1>", self.grid.swap_status)
# Launch the main loop
self.root.mainloop()
def start(self):
"""
Start the game
"""
# I don't want a petit rigolo to change the status of cells
self.canvas.unbind("<Button-1>")
self.game_loop()
def game_loop(self):
"""
Contains the main loop of the game
"""
print('\n---------new_loop---------')
delta('New_loop')
self.grid.compute_next_grid()
delta('After grid.compute_next_grid()')
self.grid.draw_next_gen()
delta('After grid.draw_next_gen')
self.root.after(200, self.game_loop)
delta('After root.after')
# Debug only
# self.idx += 1
# if self.idx == 100:
# self.root.destroy()
def stop(self):
self.root.destroy()
if __name__ == '__main__':
game = Game()
|
[
"maxadau@gmail.com"
] |
maxadau@gmail.com
|
22574b86c78c4efca091cdc1afb98c19ab3875b9
|
13601ffa0540d448b009ea4ca0f14bf93e78f113
|
/macro/myans/lab4_machine_learning_student.py
|
9f7ba335e17a0ee4feb2a231429479145ded91cc
|
[] |
no_license
|
jwl2006/edx_spark
|
63caddc4ceff303b127132ade86e8253bc07c061
|
77e5c6b1b491eb17572cdb37aa18f6da01b83092
|
refs/heads/master
| 2021-05-02T00:52:16.682959
| 2017-01-09T18:09:45
| 2017-01-09T18:09:45
| 78,454,095
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 49,207
|
py
|
# coding: utf-8
# version 1.0.2
# # + 
# # **Introduction to Machine Learning with Apache Spark**
# ## **Predicting Movie Ratings**
# #### One of the most common uses of big data is to predict what users want. This allows Google to show you relevant ads, Amazon to recommend relevant products, and Netflix to recommend movies that you might like. This lab will demonstrate how we can use Apache Spark to recommend movies to a user. We will start with some basic techniques, and then use the [Spark MLlib][mllib] library's Alternating Least Squares method to make more sophisticated predictions.
# #### For this lab, we will use a subset dataset of 500,000 ratings we have included for you into your VM (and on Databricks) from the [movielens 10M stable benchmark rating dataset](http://grouplens.org/datasets/movielens/). However, the same code you write will work for the full dataset, or their latest dataset of 21 million ratings.
# #### In this lab:
# #### *Part 0*: Preliminaries
# #### *Part 1*: Basic Recommendations
# #### *Part 2*: Collaborative Filtering
# #### *Part 3*: Predictions for Yourself
# #### As mentioned during the first Learning Spark lab, think carefully before calling `collect()` on any datasets. When you are using a small dataset, calling `collect()` and then using Python to get a sense for the data locally (in the driver program) will work fine, but this will not work when you are using a large dataset that doesn't fit in memory on one machine. Solutions that call `collect()` and do local analysis that could have been done with Spark will likely fail in the autograder and not receive full credit.
# [mllib]: https://spark.apache.org/mllib/
# ### Code
# #### This assignment can be completed using basic Python and pySpark Transformations and Actions. Libraries other than math are not necessary. With the exception of the ML functions that we introduce in this assignment, you should be able to complete all parts of this homework using only the Spark functions you have used in prior lab exercises (although you are welcome to use more features of Spark if you like!).
# In[1]:
import sys
import os
from test_helper import Test
baseDir = os.path.join('data')
inputPath = os.path.join('cs100', 'lab4', 'small')
ratingsFilename = os.path.join(baseDir, inputPath, 'ratings.dat.gz')
moviesFilename = os.path.join(baseDir, inputPath, 'movies.dat')
# ### **Part 0: Preliminaries**
# #### We read in each of the files and create an RDD consisting of parsed lines.
# #### Each line in the ratings dataset (`ratings.dat.gz`) is formatted as:
# #### `UserID::MovieID::Rating::Timestamp`
# #### Each line in the movies (`movies.dat`) dataset is formatted as:
# #### `MovieID::Title::Genres`
# #### The `Genres` field has the format
# #### `Genres1|Genres2|Genres3|...`
# #### The format of these files is uniform and simple, so we can use Python [`split()`](https://docs.python.org/2/library/stdtypes.html#str.split) to parse their lines.
# #### Parsing the two files yields two RDDS
# * #### For each line in the ratings dataset, we create a tuple of (UserID, MovieID, Rating). We drop the timestamp because we do not need it for this exercise.
# * #### For each line in the movies dataset, we create a tuple of (MovieID, Title). We drop the Genres because we do not need them for this exercise.
# In[2]:
numPartitions = 2
rawRatings = sc.textFile(ratingsFilename).repartition(numPartitions)
rawMovies = sc.textFile(moviesFilename)
def get_ratings_tuple(entry):
""" Parse a line in the ratings dataset
Args:
entry (str): a line in the ratings dataset in the form of UserID::MovieID::Rating::Timestamp
Returns:
tuple: (UserID, MovieID, Rating)
"""
items = entry.split('::')
return int(items[0]), int(items[1]), float(items[2])
def get_movie_tuple(entry):
""" Parse a line in the movies dataset
Args:
entry (str): a line in the movies dataset in the form of MovieID::Title::Genres
Returns:
tuple: (MovieID, Title)
"""
items = entry.split('::')
return int(items[0]), items[1]
ratingsRDD = rawRatings.map(get_ratings_tuple).cache()
moviesRDD = rawMovies.map(get_movie_tuple).cache()
ratingsCount = ratingsRDD.count()
moviesCount = moviesRDD.count()
print 'There are %s ratings and %s movies in the datasets' % (ratingsCount, moviesCount)
print 'Ratings: %s' % ratingsRDD.take(3)
print 'Movies: %s' % moviesRDD.take(3)
assert ratingsCount == 487650
assert moviesCount == 3883
assert moviesRDD.filter(lambda (id, title): title == 'Toy Story (1995)').count() == 1
assert (ratingsRDD.takeOrdered(1, key=lambda (user, movie, rating): movie)
== [(1, 1, 5.0)])
# #### In this lab we will be examining subsets of the tuples we create (e.g., the top rated movies by users). Whenever we examine only a subset of a large dataset, there is the potential that the result will depend on the order we perform operations, such as joins, or how the data is partitioned across the workers. What we want to guarantee is that we always see the same results for a subset, independent of how we manipulate or store the data.
# #### We can do that by sorting before we examine a subset. You might think that the most obvious choice when dealing with an RDD of tuples would be to use the [`sortByKey()` method][sortbykey]. However this choice is problematic, as we can still end up with different results if the key is not unique.
# #### Note: It is important to use the [`unicode` type](https://docs.python.org/2/howto/unicode.html#the-unicode-type) instead of the `string` type as the titles are in unicode characters.
# #### Consider the following example, and note that while the sets are equal, the printed lists are usually in different order by value, *although they may randomly match up from time to time.*
# #### You can try running this multiple times. If the last assertion fails, don't worry about it: that was just the luck of the draw. And note that in some environments the results may be more deterministic.
# [sortbykey]: https://spark.apache.org/docs/latest/api/python/pyspark.html#pyspark.RDD.sortByKey
# In[3]:
tmp1 = [(1, u'alpha'), (2, u'alpha'), (2, u'beta'), (3, u'alpha'), (1, u'epsilon'), (1, u'delta')]
tmp2 = [(1, u'delta'), (2, u'alpha'), (2, u'beta'), (3, u'alpha'), (1, u'epsilon'), (1, u'alpha')]
oneRDD = sc.parallelize(tmp1)
twoRDD = sc.parallelize(tmp2)
oneSorted = oneRDD.sortByKey(True).collect()
twoSorted = twoRDD.sortByKey(True).collect()
print oneSorted
print twoSorted
assert set(oneSorted) == set(twoSorted) # Note that both lists have the same elements
assert twoSorted[0][0] < twoSorted.pop()[0] # Check that it is sorted by the keys
assert oneSorted[0:2] != twoSorted[0:2] # Note that the subset consisting of the first two elements does not match
# #### Even though the two lists contain identical tuples, the difference in ordering *sometimes* yields a different ordering for the sorted RDD (try running the cell repeatedly and see if the results change or the assertion fails). If we only examined the first two elements of the RDD (e.g., using `take(2)`), then we would observe different answers - **that is a really bad outcome as we want identical input data to always yield identical output**. A better technique is to sort the RDD by *both the key and value*, which we can do by combining the key and value into a single string and then sorting on that string. Since the key is an integer and the value is a unicode string, we can use a function to combine them into a single unicode string (e.g., `unicode('%.3f' % key) + ' ' + value`) before sorting the RDD using [sortBy()][sortby].
# [sortby]: https://spark.apache.org/docs/latest/api/python/pyspark.html#pyspark.RDD.sortBy
# In[7]:
def sortFunction(tuple):
""" Construct the sort string (does not perform actual sorting)
Args:
tuple: (rating, MovieName)
Returns:
sortString: the value to sort with, 'rating MovieName'
"""
key = unicode('%.3f' % tuple[0])
value = tuple[1]
return (key + ' ' + value)
print oneRDD.sortBy(sortFunction, True).collect()
print twoRDD.sortBy(sortFunction, True).collect()
# #### If we just want to look at the first few elements of the RDD in sorted order, we can use the [takeOrdered][takeordered] method with the `sortFunction` we defined.
# [takeordered]: https://spark.apache.org/docs/latest/api/python/pyspark.html#pyspark.RDD.takeOrdered
# In[8]:
oneSorted1 = oneRDD.takeOrdered(oneRDD.count(),key=sortFunction)
twoSorted1 = twoRDD.takeOrdered(twoRDD.count(),key=sortFunction)
print 'one is %s' % oneSorted1
print 'two is %s' % twoSorted1
assert oneSorted1 == twoSorted1
# ### **Part 1: Basic Recommendations**
# #### One way to recommend movies is to always recommend the movies with the highest average rating. In this part, we will use Spark to find the name, number of ratings, and the average rating of the 20 movies with the highest average rating and more than 500 reviews. We want to filter our movies with high ratings but fewer than or equal to 500 reviews because movies with few reviews may not have broad appeal to everyone.
# #### **(1a) Number of Ratings and Average Ratings for a Movie**
# #### Using only Python, implement a helper function `getCountsAndAverages()` that takes a single tuple of (MovieID, (Rating1, Rating2, Rating3, ...)) and returns a tuple of (MovieID, (number of ratings, averageRating)). For example, given the tuple `(100, (10.0, 20.0, 30.0))`, your function should return `(100, (3, 20.0))`
# In[13]:
# TODO: Replace <FILL IN> with appropriate code
# First, implement a helper function `getCountsAndAverages` using only Python
def getCountsAndAverages(IDandRatingsTuple):
""" Calculate average rating
Args:
IDandRatingsTuple: a single tuple of (MovieID, (Rating1, Rating2, Rating3, ...))
Returns:
tuple: a tuple of (MovieID, (number of ratings, averageRating))
"""
id = IDandRatingsTuple[0]
ratings = IDandRatingsTuple[1]
num = len(ratings)
avg = float(sum(ratings))/len(ratings)
return (id, (num, avg))
# In[14]:
# TEST Number of Ratings and Average Ratings for a Movie (1a)
Test.assertEquals(getCountsAndAverages((1, (1, 2, 3, 4))), (1, (4, 2.5)),
'incorrect getCountsAndAverages() with integer list')
Test.assertEquals(getCountsAndAverages((100, (10.0, 20.0, 30.0))), (100, (3, 20.0)),
'incorrect getCountsAndAverages() with float list')
Test.assertEquals(getCountsAndAverages((110, xrange(20))), (110, (20, 9.5)),
'incorrect getCountsAndAverages() with xrange')
# #### **(1b) Movies with Highest Average Ratings**
# #### Now that we have a way to calculate the average ratings, we will use the `getCountsAndAverages()` helper function with Spark to determine movies with highest average ratings.
# #### The steps you should perform are:
# * #### Recall that the `ratingsRDD` contains tuples of the form (UserID, MovieID, Rating). From `ratingsRDD` create an RDD with tuples of the form (MovieID, Python iterable of Ratings for that MovieID). This transformation will yield an RDD of the form: `[(1, <pyspark.resultiterable.ResultIterable object at 0x7f16d50e7c90>), (2, <pyspark.resultiterable.ResultIterable object at 0x7f16d50e79d0>), (3, <pyspark.resultiterable.ResultIterable object at 0x7f16d50e7610>)]`. Note that you will only need to perform two Spark transformations to do this step.
# * #### Using `movieIDsWithRatingsRDD` and your `getCountsAndAverages()` helper function, compute the number of ratings and average rating for each movie to yield tuples of the form (MovieID, (number of ratings, average rating)). This transformation will yield an RDD of the form: `[(1, (993, 4.145015105740181)), (2, (332, 3.174698795180723)), (3, (299, 3.0468227424749164))]`. You can do this step with one Spark transformation
# * #### We want to see movie names, instead of movie IDs. To `moviesRDD`, apply RDD transformations that use `movieIDsWithAvgRatingsRDD` to get the movie names for `movieIDsWithAvgRatingsRDD`, yielding tuples of the form (average rating, movie name, number of ratings). This set of transformations will yield an RDD of the form: `[(1.0, u'Autopsy (Macchie Solari) (1975)', 1), (1.0, u'Better Living (1998)', 1), (1.0, u'Big Squeeze, The (1996)', 3)]`. You will need to do two Spark transformations to complete this step: first use the `moviesRDD` with `movieIDsWithAvgRatingsRDD` to create a new RDD with Movie names matched to Movie IDs, then convert that RDD into the form of (average rating, movie name, number of ratings). These transformations will yield an RDD that looks like: `[(3.6818181818181817, u'Happiest Millionaire, The (1967)', 22), (3.0468227424749164, u'Grumpier Old Men (1995)', 299), (2.882978723404255, u'Hocus Pocus (1993)', 94)]`
# In[16]:
# TODO: Replace <FILL IN> with appropriate code
# From ratingsRDD with tuples of (UserID, MovieID, Rating) create an RDD with tuples of
# the (MovieID, iterable of Ratings for that MovieID)
movieIDsWithRatingsRDD = (ratingsRDD
.map(lambda x:(x[1], x[2])).groupByKey())
print 'movieIDsWithRatingsRDD: %s\n' % movieIDsWithRatingsRDD.take(3)
# Using `movieIDsWithRatingsRDD`, compute the number of ratings and average rating for each movie to
# yield tuples of the form (MovieID, (number of ratings, average rating))
movieIDsWithAvgRatingsRDD = movieIDsWithRatingsRDD.map(getCountsAndAverages)
print 'movieIDsWithAvgRatingsRDD: %s\n' % movieIDsWithAvgRatingsRDD.take(3)
# To `movieIDsWithAvgRatingsRDD`, apply RDD transformations that use `moviesRDD` to get the movie
# names for `movieIDsWithAvgRatingsRDD`, yielding tuples of the form
# (average rating, movie name, number of ratings)
movieNameWithAvgRatingsRDD = (moviesRDD
.join(movieIDsWithAvgRatingsRDD).map(lambda x: (x[1][1][1], x[1][0], x[1][1][0])))
print 'movieNameWithAvgRatingsRDD: %s\n' % movieNameWithAvgRatingsRDD.take(3)
# In[17]:
# TEST Movies with Highest Average Ratings (1b)
Test.assertEquals(movieIDsWithRatingsRDD.count(), 3615,
'incorrect movieIDsWithRatingsRDD.count() (expected 3615)')
movieIDsWithRatingsTakeOrdered = movieIDsWithRatingsRDD.takeOrdered(3)
Test.assertTrue(movieIDsWithRatingsTakeOrdered[0][0] == 1 and
len(list(movieIDsWithRatingsTakeOrdered[0][1])) == 993,
'incorrect count of ratings for movieIDsWithRatingsTakeOrdered[0] (expected 993)')
Test.assertTrue(movieIDsWithRatingsTakeOrdered[1][0] == 2 and
len(list(movieIDsWithRatingsTakeOrdered[1][1])) == 332,
'incorrect count of ratings for movieIDsWithRatingsTakeOrdered[1] (expected 332)')
Test.assertTrue(movieIDsWithRatingsTakeOrdered[2][0] == 3 and
len(list(movieIDsWithRatingsTakeOrdered[2][1])) == 299,
'incorrect count of ratings for movieIDsWithRatingsTakeOrdered[2] (expected 299)')
Test.assertEquals(movieIDsWithAvgRatingsRDD.count(), 3615,
'incorrect movieIDsWithAvgRatingsRDD.count() (expected 3615)')
Test.assertEquals(movieIDsWithAvgRatingsRDD.takeOrdered(3),
[(1, (993, 4.145015105740181)), (2, (332, 3.174698795180723)),
(3, (299, 3.0468227424749164))],
'incorrect movieIDsWithAvgRatingsRDD.takeOrdered(3)')
Test.assertEquals(movieNameWithAvgRatingsRDD.count(), 3615,
'incorrect movieNameWithAvgRatingsRDD.count() (expected 3615)')
Test.assertEquals(movieNameWithAvgRatingsRDD.takeOrdered(3),
[(1.0, u'Autopsy (Macchie Solari) (1975)', 1), (1.0, u'Better Living (1998)', 1),
(1.0, u'Big Squeeze, The (1996)', 3)],
'incorrect movieNameWithAvgRatingsRDD.takeOrdered(3)')
# #### **(1c) Movies with Highest Average Ratings and more than 500 reviews**
# #### Now that we have an RDD of the movies with highest averge ratings, we can use Spark to determine the 20 movies with highest average ratings and more than 500 reviews.
# #### Apply a single RDD transformation to `movieNameWithAvgRatingsRDD` to limit the results to movies with ratings from more than 500 people. We then use the `sortFunction()` helper function to sort by the average rating to get the movies in order of their rating (highest rating first). You will end up with an RDD of the form: `[(4.5349264705882355, u'Shawshank Redemption, The (1994)', 1088), (4.515798462852263, u"Schindler's List (1993)", 1171), (4.512893982808023, u'Godfather, The (1972)', 1047)]`
# In[18]:
# TODO: Replace <FILL IN> with appropriate code
# Apply an RDD transformation to `movieNameWithAvgRatingsRDD` to limit the results to movies with
# ratings from more than 500 people. We then use the `sortFunction()` helper function to sort by the
# average rating to get the movies in order of their rating (highest rating first)
movieLimitedAndSortedByRatingRDD = (movieNameWithAvgRatingsRDD
.filter(lambda x: x[2] > 500)
.sortBy(sortFunction, False))
print 'Movies with highest ratings: %s' % movieLimitedAndSortedByRatingRDD.take(20)
# In[19]:
# TEST Movies with Highest Average Ratings and more than 500 Reviews (1c)
Test.assertEquals(movieLimitedAndSortedByRatingRDD.count(), 194,
'incorrect movieLimitedAndSortedByRatingRDD.count()')
Test.assertEquals(movieLimitedAndSortedByRatingRDD.take(20),
[(4.5349264705882355, u'Shawshank Redemption, The (1994)', 1088),
(4.515798462852263, u"Schindler's List (1993)", 1171),
(4.512893982808023, u'Godfather, The (1972)', 1047),
(4.510460251046025, u'Raiders of the Lost Ark (1981)', 1195),
(4.505415162454874, u'Usual Suspects, The (1995)', 831),
(4.457256461232604, u'Rear Window (1954)', 503),
(4.45468509984639, u'Dr. Strangelove or: How I Learned to Stop Worrying and Love the Bomb (1963)', 651),
(4.43953006219765, u'Star Wars: Episode IV - A New Hope (1977)', 1447),
(4.4, u'Sixth Sense, The (1999)', 1110), (4.394285714285714, u'North by Northwest (1959)', 700),
(4.379506641366224, u'Citizen Kane (1941)', 527), (4.375, u'Casablanca (1942)', 776),
(4.363975155279503, u'Godfather: Part II, The (1974)', 805),
(4.358816276202219, u"One Flew Over the Cuckoo's Nest (1975)", 811),
(4.358173076923077, u'Silence of the Lambs, The (1991)', 1248),
(4.335826477187734, u'Saving Private Ryan (1998)', 1337),
(4.326241134751773, u'Chinatown (1974)', 564),
(4.325383304940375, u'Life Is Beautiful (La Vita \ufffd bella) (1997)', 587),
(4.324110671936759, u'Monty Python and the Holy Grail (1974)', 759),
(4.3096, u'Matrix, The (1999)', 1250)], 'incorrect sortedByRatingRDD.take(20)')
# #### Using a threshold on the number of reviews is one way to improve the recommendations, but there are many other good ways to improve quality. For example, you could weight ratings by the number of ratings.
# ## **Part 2: Collaborative Filtering**
# #### In this course, you have learned about many of the basic transformations and actions that Spark allows us to apply to distributed datasets. Spark also exposes some higher level functionality; in particular, Machine Learning using a component of Spark called [MLlib][mllib]. In this part, you will learn how to use MLlib to make personalized movie recommendations using the movie data we have been analyzing.
# #### We are going to use a technique called [collaborative filtering][collab]. Collaborative filtering is a method of making automatic predictions (filtering) about the interests of a user by collecting preferences or taste information from many users (collaborating). The underlying assumption of the collaborative filtering approach is that if a person A has the same opinion as a person B on an issue, A is more likely to have B's opinion on a different issue x than to have the opinion on x of a person chosen randomly. You can read more about collaborative filtering [here][collab2].
# #### The image below (from [Wikipedia][collab]) shows an example of predicting of the user's rating using collaborative filtering. At first, people rate different items (like videos, images, games). After that, the system is making predictions about a user's rating for an item, which the user has not rated yet. These predictions are built upon the existing ratings of other users, who have similar ratings with the active user. For instance, in the image below the system has made a prediction, that the active user will not like the video.
# 
# [mllib]: https://spark.apache.org/mllib/
# [collab]: https://en.wikipedia.org/?title=Collaborative_filtering
# [collab2]: http://recommender-systems.org/collaborative-filtering/
# #### For movie recommendations, we start with a matrix whose entries are movie ratings by users (shown in red in the diagram below). Each column represents a user (shown in green) and each row represents a particular movie (shown in blue).
# #### Since not all users have rated all movies, we do not know all of the entries in this matrix, which is precisely why we need collaborative filtering. For each user, we have ratings for only a subset of the movies. With collaborative filtering, the idea is to approximate the ratings matrix by factorizing it as the product of two matrices: one that describes properties of each user (shown in green), and one that describes properties of each movie (shown in blue).
# 
# #### We want to select these two matrices such that the error for the users/movie pairs where we know the correct ratings is minimized. The [Alternating Least Squares][als] algorithm does this by first randomly filling the users matrix with values and then optimizing the value of the movies such that the error is minimized. Then, it holds the movies matrix constrant and optimizes the value of the user's matrix. This alternation between which matrix to optimize is the reason for the "alternating" in the name.
# #### This optimization is what's being shown on the right in the image above. Given a fixed set of user factors (i.e., values in the users matrix), we use the known ratings to find the best values for the movie factors using the optimization written at the bottom of the figure. Then we "alternate" and pick the best user factors given fixed movie factors.
# #### For a simple example of what the users and movies matrices might look like, check out the [videos from Lecture 8][videos] or the [slides from Lecture 8][slides]
# [videos]: https://courses.edx.org/courses/BerkeleyX/CS100.1x/1T2015/courseware/00eb8b17939b4889a41a6d8d2f35db83/3bd3bba368be4102b40780550d3d8da6/
# [slides]: https://courses.edx.org/c4x/BerkeleyX/CS100.1x/asset/Week4Lec8.pdf
# [als]: https://en.wikiversity.org/wiki/Least-Squares_Method
# #### **(2a) Creating a Training Set**
# #### Before we jump into using machine learning, we need to break up the `ratingsRDD` dataset into three pieces:
# * #### A training set (RDD), which we will use to train models
# * #### A validation set (RDD), which we will use to choose the best model
# * #### A test set (RDD), which we will use for our experiments
# #### To randomly split the dataset into the multiple groups, we can use the pySpark [randomSplit()](https://spark.apache.org/docs/latest/api/python/pyspark.html#pyspark.RDD.randomSplit) transformation. `randomSplit()` takes a set of splits and and seed and returns multiple RDDs.
# In[20]:
trainingRDD, validationRDD, testRDD = ratingsRDD.randomSplit([6, 2, 2], seed=0L)
print 'Training: %s, validation: %s, test: %s\n' % (trainingRDD.count(),
validationRDD.count(),
testRDD.count())
print trainingRDD.take(3)
print validationRDD.take(3)
print testRDD.take(3)
assert trainingRDD.count() == 292716
assert validationRDD.count() == 96902
assert testRDD.count() == 98032
assert trainingRDD.filter(lambda t: t == (1, 914, 3.0)).count() == 1
assert trainingRDD.filter(lambda t: t == (1, 2355, 5.0)).count() == 1
assert trainingRDD.filter(lambda t: t == (1, 595, 5.0)).count() == 1
assert validationRDD.filter(lambda t: t == (1, 1287, 5.0)).count() == 1
assert validationRDD.filter(lambda t: t == (1, 594, 4.0)).count() == 1
assert validationRDD.filter(lambda t: t == (1, 1270, 5.0)).count() == 1
assert testRDD.filter(lambda t: t == (1, 1193, 5.0)).count() == 1
assert testRDD.filter(lambda t: t == (1, 2398, 4.0)).count() == 1
assert testRDD.filter(lambda t: t == (1, 1035, 5.0)).count() == 1
# #### After splitting the dataset, your training set has about 293,000 entries and the validation and test sets each have about 97,000 entries (the exact number of entries in each dataset varies slightly due to the random nature of the `randomSplit()` transformation.
# #### **(2b) Root Mean Square Error (RMSE)**
# #### In the next part, you will generate a few different models, and will need a way to decide which model is best. We will use the [Root Mean Square Error](https://en.wikipedia.org/wiki/Root-mean-square_deviation) (RMSE) or Root Mean Square Deviation (RMSD) to compute the error of each model. RMSE is a frequently used measure of the differences between values (sample and population values) predicted by a model or an estimator and the values actually observed. The RMSD represents the sample standard deviation of the differences between predicted values and observed values. These individual differences are called residuals when the calculations are performed over the data sample that was used for estimation, and are called prediction errors when computed out-of-sample. The RMSE serves to aggregate the magnitudes of the errors in predictions for various times into a single measure of predictive power. RMSE is a good measure of accuracy, but only to compare forecasting errors of different models for a particular variable and not between variables, as it is scale-dependent.
# #### The RMSE is the square root of the average value of the square of `(actual rating - predicted rating)` for all users and movies for which we have the actual rating. Versions of Spark MLlib beginning with Spark 1.4 include a [RegressionMetrics](https://spark.apache.org/docs/latest/api/python/pyspark.mllib.html#pyspark.mllib.evaluation.RegressionMetrics) modiule that can be used to compute the RMSE. However, since we are using Spark 1.3.1, we will write our own function.
# #### Write a function to compute the sum of squared error given `predictedRDD` and `actualRDD` RDDs. Both RDDs consist of tuples of the form (UserID, MovieID, Rating)
# #### Given two ratings RDDs, *x* and *y* of size *n*, we define RSME as follows: $ RMSE = \sqrt{\frac{\sum_{i = 1}^{n} (x_i - y_i)^2}{n}}$
# #### To calculate RSME, the steps you should perform are:
# * #### Transform `predictedRDD` into the tuples of the form ((UserID, MovieID), Rating). For example, tuples like `[((1, 1), 5), ((1, 2), 3), ((1, 3), 4), ((2, 1), 3), ((2, 2), 2), ((2, 3), 4)]`. You can perform this step with a single Spark transformation.
# * #### Transform `actualRDD` into the tuples of the form ((UserID, MovieID), Rating). For example, tuples like `[((1, 2), 3), ((1, 3), 5), ((2, 1), 5), ((2, 2), 1)]`. You can perform this step with a single Spark transformation.
# * #### Using only RDD transformations (you only need to perform two transformations), compute the squared error for each *matching* entry (i.e., the same (UserID, MovieID) in each RDD) in the reformatted RDDs - do *not* use `collect()` to perform this step. Note that not every (UserID, MovieID) pair will appear in both RDDs - if a pair does not appear in both RDDs, then it does not contribute to the RMSE. You will end up with an RDD with entries of the form $ (x_i - y_i)^2$ You might want to check out Python's [math](https://docs.python.org/2/library/math.html) module to see how to compute these values
# * #### Using an RDD action (but **not** `collect()`), compute the total squared error: $ SE = \sum_{i = 1}^{n} (x_i - y_i)^2 $
# * #### Compute *n* by using an RDD action (but **not** `collect()`), to count the number of pairs for which you computed the total squared error
# * #### Using the total squared error and the number of pairs, compute the RSME. Make sure you compute this value as a [float](https://docs.python.org/2/library/stdtypes.html#numeric-types-int-float-long-complex).
# #### Note: Your solution must only use transformations and actions on RDDs. Do _not_ call `collect()` on either RDD.
# In[22]:
# TODO: Replace <FILL IN> with appropriate code
import math
def computeError(predictedRDD, actualRDD):
""" Compute the root mean squared error between predicted and actual
Args:
predictedRDD: predicted ratings for each movie and each user where each entry is in the form
(UserID, MovieID, Rating)
actualRDD: actual ratings where each entry is in the form (UserID, MovieID, Rating)
Returns:
RSME (float): computed RSME value
"""
# Transform predictedRDD into the tuples of the form ((UserID, MovieID), Rating)
predictedReformattedRDD = predictedRDD.map(lambda x: ((x[0],x[1]), x[2]))
# Transform actualRDD into the tuples of the form ((UserID, MovieID), Rating)
actualReformattedRDD = actualRDD.map(lambda x: ((x[0], x[1]), x[2]))
# Compute the squared error for each matching entry (i.e., the same (User ID, Movie ID) in each
# RDD) in the reformatted RDDs using RDD transformtions - do not use collect()
squaredErrorsRDD = (predictedReformattedRDD
.join(actualReformattedRDD).map(lambda x:(x, (x[1][0]-x[1][1])**2)))
# Compute the total squared error - do not use collect()
totalError = squaredErrorsRDD.values().sum()
# Count the number of entries for which you computed the total squared error
numRatings = squaredErrorsRDD.count()
# Using the total squared error and the number of entries, compute the RSME
return math.sqrt(float(totalError)/numRatings)
# sc.parallelize turns a Python list into a Spark RDD.
testPredicted = sc.parallelize([
(1, 1, 5),
(1, 2, 3),
(1, 3, 4),
(2, 1, 3),
(2, 2, 2),
(2, 3, 4)])
testActual = sc.parallelize([
(1, 2, 3),
(1, 3, 5),
(2, 1, 5),
(2, 2, 1)])
testPredicted2 = sc.parallelize([
(2, 2, 5),
(1, 2, 5)])
testError = computeError(testPredicted, testActual)
print 'Error for test dataset (should be 1.22474487139): %s' % testError
testError2 = computeError(testPredicted2, testActual)
print 'Error for test dataset2 (should be 3.16227766017): %s' % testError2
testError3 = computeError(testActual, testActual)
print 'Error for testActual dataset (should be 0.0): %s' % testError3
# In[23]:
# TEST Root Mean Square Error (2b)
Test.assertTrue(abs(testError - 1.22474487139) < 0.00000001,
'incorrect testError (expected 1.22474487139)')
Test.assertTrue(abs(testError2 - 3.16227766017) < 0.00000001,
'incorrect testError2 result (expected 3.16227766017)')
Test.assertTrue(abs(testError3 - 0.0) < 0.00000001,
'incorrect testActual result (expected 0.0)')
# #### **(2c) Using ALS.train()**
# #### In this part, we will use the MLlib implementation of Alternating Least Squares, [ALS.train()](https://spark.apache.org/docs/latest/api/python/pyspark.mllib.html#pyspark.mllib.recommendation.ALS). ALS takes a training dataset (RDD) and several parameters that control the model creation process. To determine the best values for the parameters, we will use ALS to train several models, and then we will select the best model and use the parameters from that model in the rest of this lab exercise.
# #### The process we will use for determining the best model is as follows:
# * #### Pick a set of model parameters. The most important parameter to `ALS.train()` is the *rank*, which is the number of rows in the Users matrix (green in the diagram above) or the number of columns in the Movies matrix (blue in the diagram above). (In general, a lower rank will mean higher error on the training dataset, but a high rank may lead to [overfitting](https://en.wikipedia.org/wiki/Overfitting).) We will train models with ranks of 4, 8, and 12 using the `trainingRDD` dataset.
# * #### Create a model using `ALS.train(trainingRDD, rank, seed=seed, iterations=iterations, lambda_=regularizationParameter)` with three parameters: an RDD consisting of tuples of the form (UserID, MovieID, rating) used to train the model, an integer rank (4, 8, or 12), a number of iterations to execute (we will use 5 for the `iterations` parameter), and a regularization coefficient (we will use 0.1 for the `regularizationParameter`).
# * #### For the prediction step, create an input RDD, `validationForPredictRDD`, consisting of (UserID, MovieID) pairs that you extract from `validationRDD`. You will end up with an RDD of the form: `[(1, 1287), (1, 594), (1, 1270)]`
# * #### Using the model and `validationForPredictRDD`, we can predict rating values by calling [model.predictAll()](https://spark.apache.org/docs/latest/api/python/pyspark.mllib.html#pyspark.mllib.recommendation.MatrixFactorizationModel.predictAll) with the `validationForPredictRDD` dataset, where `model` is the model we generated with ALS.train(). `predictAll` accepts an RDD with each entry in the format (userID, movieID) and outputs an RDD with each entry in the format (userID, movieID, rating).
# * #### Evaluate the quality of the model by using the `computeError()` function you wrote in part (2b) to compute the error between the predicted ratings and the actual ratings in `validationRDD`.
# #### Which rank produces the best model, based on the RMSE with the `validationRDD` dataset?
# #### Note: It is likely that this operation will take a noticeable amount of time (around a minute in our VM); you can observe its progress on the [Spark Web UI](http://localhost:4040). Probably most of the time will be spent running your `computeError()` function, since, unlike the Spark ALS implementation (and the Spark 1.4 [RegressionMetrics](https://spark.apache.org/docs/latest/api/python/pyspark.mllib.html#pyspark.mllib.evaluation.RegressionMetrics) module), this does not use a fast linear algebra library and needs to run some Python code for all 100k entries.
# In[24]:
# TODO: Replace <FILL IN> with appropriate code
from pyspark.mllib.recommendation import ALS
validationForPredictRDD = validationRDD.map(lambda x: (x[0], x[1]))
seed = 5L
iterations = 5
regularizationParameter = 0.1
ranks = [4, 8, 12]
errors = [0, 0, 0]
err = 0
tolerance = 0.03
minError = float('inf')
bestRank = -1
bestIteration = -1
for rank in ranks:
model = ALS.train(trainingRDD, rank, seed=seed, iterations=iterations,
lambda_=regularizationParameter)
predictedRatingsRDD = model.predictAll(validationForPredictRDD)
error = computeError(predictedRatingsRDD, validationRDD)
errors[err] = error
err += 1
print 'For rank %s the RMSE is %s' % (rank, error)
if error < minError:
minError = error
bestRank = rank
print 'The best model was trained with rank %s' % bestRank
# In[25]:
# TEST Using ALS.train (2c)
Test.assertEquals(trainingRDD.getNumPartitions(), 2,
'incorrect number of partitions for trainingRDD (expected 2)')
Test.assertEquals(validationForPredictRDD.count(), 96902,
'incorrect size for validationForPredictRDD (expected 96902)')
Test.assertEquals(validationForPredictRDD.filter(lambda t: t == (1, 1907)).count(), 1,
'incorrect content for validationForPredictRDD')
Test.assertTrue(abs(errors[0] - 0.883710109497) < tolerance, 'incorrect errors[0]')
Test.assertTrue(abs(errors[1] - 0.878486305621) < tolerance, 'incorrect errors[1]')
Test.assertTrue(abs(errors[2] - 0.876832795659) < tolerance, 'incorrect errors[2]')
# #### **(2d) Testing Your Model**
# #### So far, we used the `trainingRDD` and `validationRDD` datasets to select the best model. Since we used these two datasets to determine what model is best, we cannot use them to test how good the model is - otherwise we would be very vulnerable to [overfitting](https://en.wikipedia.org/wiki/Overfitting). To decide how good our model is, we need to use the `testRDD` dataset. We will use the `bestRank` you determined in part (2c) to create a model for predicting the ratings for the test dataset and then we will compute the RMSE.
# #### The steps you should perform are:
# * #### Train a model, using the `trainingRDD`, `bestRank` from part (2c), and the parameters you used in in part (2c): `seed=seed`, `iterations=iterations`, and `lambda_=regularizationParameter` - make sure you include **all** of the parameters.
# * #### For the prediction step, create an input RDD, `testForPredictingRDD`, consisting of (UserID, MovieID) pairs that you extract from `testRDD`. You will end up with an RDD of the form: `[(1, 1287), (1, 594), (1, 1270)]`
# * #### Use [myModel.predictAll()](https://spark.apache.org/docs/latest/api/python/pyspark.mllib.html#pyspark.mllib.recommendation.MatrixFactorizationModel.predictAll) to predict rating values for the test dataset.
# * #### For validation, use the `testRDD`and your `computeError` function to compute the RMSE between `testRDD` and the `predictedTestRDD` from the model.
# * #### Evaluate the quality of the model by using the `computeError()` function you wrote in part (2b) to compute the error between the predicted ratings and the actual ratings in `testRDD`.
# In[27]:
# TODO: Replace <FILL IN> with appropriate code
myModel = ALS.train(trainingRDD, 8, seed=seed, iterations=iterations,
lambda_=regularizationParameter)
testForPredictingRDD = testRDD.map(lambda x: (x[0], x[1]))
predictedTestRDD = myModel.predictAll(testForPredictingRDD)
testRMSE = computeError(testRDD, predictedTestRDD)
print 'The model had a RMSE on the test set of %s' % testRMSE
# In[28]:
# TEST Testing Your Model (2d)
Test.assertTrue(abs(testRMSE - 0.87809838344) < tolerance, 'incorrect testRMSE')
# #### **(2e) Comparing Your Model**
# #### Looking at the RMSE for the results predicted by the model versus the values in the test set is one way to evalute the quality of our model. Another way to evaluate the model is to evaluate the error from a test set where every rating is the average rating for the training set.
# #### The steps you should perform are:
# * #### Use the `trainingRDD` to compute the average rating across all movies in that training dataset.
# * #### Use the average rating that you just determined and the `testRDD` to create an RDD with entries of the form (userID, movieID, average rating).
# * #### Use your `computeError` function to compute the RMSE between the `testRDD` validation RDD that you just created and the `testForAvgRDD`.
# In[29]:
# TODO: Replace <FILL IN> with appropriate code
trainingAvgRating = float(trainingRDD.map(lambda x: x[2]).sum())/trainingRDD.count()
print 'The average rating for movies in the training set is %s' % trainingAvgRating
testForAvgRDD = testRDD.map(lambda x: (x[0], x[1], trainingAvgRating))
testAvgRMSE = computeError(testRDD, testForAvgRDD)
print 'The RMSE on the average set is %s' % testAvgRMSE
# In[30]:
# TEST Comparing Your Model (2e)
Test.assertTrue(abs(trainingAvgRating - 3.57409571052) < 0.000001,
'incorrect trainingAvgRating (expected 3.57409571052)')
Test.assertTrue(abs(testAvgRMSE - 1.12036693569) < 0.000001,
'incorrect testAvgRMSE (expected 1.12036693569)')
# #### You now have code to predict how users will rate movies!
# ## **Part 3: Predictions for Yourself**
# #### The ultimate goal of this lab exercise is to predict what movies to recommend to yourself. In order to do that, you will first need to add ratings for yourself to the `ratingsRDD` dataset.
# #### **(3a) Your Movie Ratings**
# #### To help you provide ratings for yourself, we have included the following code to list the names and movie IDs of the 50 highest-rated movies from `movieLimitedAndSortedByRatingRDD` which we created in part 1 the lab.
# In[31]:
print 'Most rated movies:'
print '(average rating, movie name, number of reviews)'
for ratingsTuple in movieLimitedAndSortedByRatingRDD.take(50):
print ratingsTuple
# #### The user ID 0 is unassigned, so we will use it for your ratings. We set the variable `myUserID` to 0 for you. Next, create a new RDD `myRatingsRDD` with your ratings for at least 10 movie ratings. Each entry should be formatted as `(myUserID, movieID, rating)` (i.e., each entry should be formatted in the same way as `trainingRDD`). As in the original dataset, ratings should be between 1 and 5 (inclusive). If you have not seen at least 10 of these movies, you can increase the parameter passed to `take()` in the above cell until there are 10 movies that you have seen (or you can also guess what your rating would be for movies you have not seen).
# In[32]:
# TODO: Replace <FILL IN> with appropriate code
myUserID = 0
# Note that the movie IDs are the *last* number on each line. A common error was to use the number of ratings as the movie ID.
myRatedMovies = [
(myUserID, 2115, 4.5), # Indiana Jones and the Temple of Doom
(myUserID, 480, 4), # Jurassic Park
(myUserID, 1377, 3.8), # Batman Returns
(myUserID, 648, 4), # Mission Impossible
(myUserID, 2571, 4.8), # Matrix
(myUserID, 1198, 5), # Raiders of the Lost Ark
(myUserID, 1580, 3.6), # Men In Black
(myUserID, 1219, 4.5), # Psycho
(myUserID, 589, 3.2), # Terminator 2
(myUserID, 1097, 4) # ET
# The format of each line is (myUserID, movie ID, your rating)
# For example, to give the movie "Star Wars: Episode IV - A New Hope (1977)" a five rating, you would add the following line:
# (myUserID, 260, 5),
]
myRatingsRDD = sc.parallelize(myRatedMovies)
print 'My movie ratings: %s' % myRatingsRDD.take(10)
# #### **(3b) Add Your Movies to Training Dataset**
# #### Now that you have ratings for yourself, you need to add your ratings to the `training` dataset so that the model you train will incorporate your preferences. Spark's [union()](http://spark.apache.org/docs/latest/api/python/pyspark.rdd.RDD-class.html#union) transformation combines two RDDs; use `union()` to create a new training dataset that includes your ratings and the data in the original training dataset.
# In[33]:
# TODO: Replace <FILL IN> with appropriate code
trainingWithMyRatingsRDD = trainingRDD.union(myRatingsRDD)
print ('The training dataset now has %s more entries than the original training dataset' %
(trainingWithMyRatingsRDD.count() - trainingRDD.count()))
assert (trainingWithMyRatingsRDD.count() - trainingRDD.count()) == myRatingsRDD.count()
# #### **(3c) Train a Model with Your Ratings**
# #### Now, train a model with your ratings added and the parameters you used in in part (2c): `bestRank`, `seed=seed`, `iterations=iterations`, and `lambda_=regularizationParameter` - make sure you include **all** of the parameters.
# In[35]:
# TODO: Replace <FILL IN> with appropriate code
myRatingsModel = ALS.train(trainingWithMyRatingsRDD, bestRank, seed=seed, iterations=iterations,
lambda_=regularizationParameter)
# #### **(3d) Check RMSE for the New Model with Your Ratings**
# #### Compute the RMSE for this new model on the test set.
# * #### For the prediction step, we reuse `testForPredictingRDD`, consisting of (UserID, MovieID) pairs that you extracted from `testRDD`. The RDD has the form: `[(1, 1287), (1, 594), (1, 1270)]`
# * #### Use `myRatingsModel.predictAll()` to predict rating values for the `testForPredictingRDD` test dataset, set this as `predictedTestMyRatingsRDD`
# * #### For validation, use the `testRDD`and your `computeError` function to compute the RMSE between `testRDD` and the `predictedTestMyRatingsRDD` from the model.
# In[36]:
# TODO: Replace <FILL IN> with appropriate code
predictedTestMyRatingsRDD = myRatingsModel.predictAll(testForPredictingRDD)
testRMSEMyRatings = computeError(testRDD, predictedTestMyRatingsRDD)
print 'The model had a RMSE on the test set of %s' % testRMSEMyRatings
# #### **(3e) Predict Your Ratings**
# #### So far, we have only used the `predictAll` method to compute the error of the model. Here, use the `predictAll` to predict what ratings you would give to the movies that you did not already provide ratings for.
# #### The steps you should perform are:
# * #### Use the Python list `myRatedMovies` to transform the `moviesRDD` into an RDD with entries that are pairs of the form (myUserID, Movie ID) and that does not contain any movies that you have rated. This transformation will yield an RDD of the form: `[(0, 1), (0, 2), (0, 3), (0, 4)]`. Note that you can do this step with one RDD transformation.
# * #### For the prediction step, use the input RDD, `myUnratedMoviesRDD`, with myRatingsModel.predictAll() to predict your ratings for the movies.
# In[ ]:
# TODO: Replace <FILL IN> with appropriate code
# Use the Python list myRatedMovies to transform the moviesRDD into an RDD with entries that are pairs of the form (myUserID, Movie ID) and that does not contain any movies that you have rated.
myUnratedMoviesRDD = (moviesRDD
.filter(lambda x: x[0] not in [x[1] for x in myRatedMovies])
.map(lambda x: (myUserID, x[0])))
# Use the input RDD, myUnratedMoviesRDD, with myRatingsModel.predictAll() to predict your ratings for the movies
predictedRatingsRDD = myRatingsModel.predictAll(myUnratedMoviesRDD)
# #### **(3f) Predict Your Ratings**
# #### We have our predicted ratings. Now we can print out the 25 movies with the highest predicted ratings.
# #### The steps you should perform are:
# * #### From Parts (1b) and (1c), we know that we should look at movies with a reasonable number of reviews (e.g., more than 75 reviews). You can experiment with a lower threshold, but fewer ratings for a movie may yield higher prediction errors. Transform `movieIDsWithAvgRatingsRDD` from Part (1b), which has the form (MovieID, (number of ratings, average rating)), into an RDD of the form (MovieID, number of ratings): `[(2, 332), (4, 71), (6, 442)]`
# * #### We want to see movie names, instead of movie IDs. Transform `predictedRatingsRDD` into an RDD with entries that are pairs of the form (Movie ID, Predicted Rating): `[(3456, -0.5501005376936687), (1080, 1.5885892024487962), (320, -3.7952255522487865)]`
# * #### Use RDD transformations with `predictedRDD` and `movieCountsRDD` to yield an RDD with tuples of the form (Movie ID, (Predicted Rating, number of ratings)): `[(2050, (0.6694097486155939, 44)), (10, (5.29762541533513, 418)), (2060, (0.5055259373841172, 97))]`
# * #### Use RDD transformations with `predictedWithCountsRDD` and `moviesRDD` to yield an RDD with tuples of the form (Predicted Rating, Movie Name, number of ratings), _for movies with more than 75 ratings._ For example: `[(7.983121900375243, u'Under Siege (1992)'), (7.9769201864261285, u'Fifth Element, The (1997)')]`
# In[37]:
# TODO: Replace <FILL IN> with appropriate code
# Transform movieIDsWithAvgRatingsRDD from part (1b), which has the form (MovieID, (number of ratings, average rating)), into and RDD of the form (MovieID, number of ratings)
movieCountsRDD = movieIDsWithAvgRatingsRDD.map(lambda x: (x[0], x[1][0]))
# Transform predictedRatingsRDD into an RDD with entries that are pairs of the form (Movie ID, Predicted Rating)
predictedRDD = predictedRatingsRDD.map(lambda x: (x[1], x[2]))
# Use RDD transformations with predictedRDD and movieCountsRDD to yield an RDD with tuples of the form (Movie ID, (Predicted Rating, number of ratings))
predictedWithCountsRDD = (predictedRDD
.join(movieCountsRDD))
# Use RDD transformations with PredictedWithCountsRDD and moviesRDD to yield an RDD with tuples of the form (Predicted Rating, Movie Name, number of ratings), for movies with more than 75 ratings
ratingsWithNamesRDD = (predictedWithCountsRDD
.filter(lambda x: x[1][1] > 75)
.join(moviesRDD)
.map(lambda x: (x[1][0][0], x[1][1], x[1][0][1])))
predictedHighestRatedMovies = ratingsWithNamesRDD.takeOrdered(20, key=lambda x: -x[0])
print ('My highest rated movies as predicted (for movies with more than 75 reviews):\n%s' %
'\n'.join(map(str, predictedHighestRatedMovies)))
# In[ ]:
|
[
"wanghao313@gmail.com"
] |
wanghao313@gmail.com
|
fe67af41766db65e264adb0e06c55f078b4eb952
|
4d8b9f5533671b15e124bfa025d672297384b434
|
/tests/unit/common/test_ansible_common.py
|
1ef8eee5f8bb756e57092dc2f2318b9a25b932be
|
[
"Apache-2.0",
"CC-BY-4.0"
] |
permissive
|
Kryndex/yardstick
|
6c37c4f752589c523be761e3980f7ca2c8fac798
|
c191b305790b4528868725db82d1af6c0d95e22b
|
refs/heads/master
| 2021-05-13T14:12:54.917653
| 2018-01-08T18:21:47
| 2018-01-08T18:21:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,700
|
py
|
# Copyright (c) 2016-2017 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import os
import tempfile
from collections import defaultdict
import mock
import unittest
from six.moves.configparser import ConfigParser
from six.moves import StringIO
from yardstick.common import ansible_common
PREFIX = 'yardstick.common.ansible_common'
class OverwriteDictTestCase(unittest.TestCase):
def test_overwrite_dict_cfg(self):
c = ConfigParser(allow_no_value=True)
d = {
"section_a": "empty_value",
"section_b": {"key_c": "Val_d", "key_d": "VAL_D"},
"section_c": ["key_c", "key_d"],
}
ansible_common.overwrite_dict_to_cfg(c, d)
# Python3 and Python2 convert empty values into None or ''
# we don't really care but we need to compare correctly for unittest
self.assertTrue(c.has_option("section_a", "empty_value"))
self.assertEqual(sorted(c.items("section_b")), [('key_c', 'Val_d'), ('key_d', 'VAL_D')])
self.assertTrue(c.has_option("section_c", "key_c"))
self.assertTrue(c.has_option("section_c", "key_d"))
class FilenameGeneratorTestCase(unittest.TestCase):
@mock.patch('{}.NamedTemporaryFile'.format(PREFIX))
def test__handle_existing_file(self, mock_tmp):
ansible_common.FileNameGenerator._handle_existing_file("/dev/null")
def test_get_generator_from_file(self):
ansible_common.FileNameGenerator.get_generator_from_filename("/dev/null", "", "", "")
def test_get_generator_from_file_middle(self):
ansible_common.FileNameGenerator.get_generator_from_filename("/dev/null", "", "",
"null")
def test_get_generator_from_file_prefix(self):
ansible_common.FileNameGenerator.get_generator_from_filename("/dev/null", "", "null",
"middle")
class AnsibleNodeTestCase(unittest.TestCase):
def test_ansible_node(self):
ansible_common.AnsibleNode()
def test_ansible_node_len(self):
a = ansible_common.AnsibleNode()
len(a)
def test_ansible_node_repr(self):
a = ansible_common.AnsibleNode()
repr(a)
def test_ansible_node_iter(self):
a = ansible_common.AnsibleNode()
for _ in a:
pass
def test_is_role(self):
a = ansible_common.AnsibleNode()
self.assertFalse(a.is_role("", default="foo"))
def test_ansible_node_get_tuple(self):
a = ansible_common.AnsibleNode({"name": "name"})
self.assertEqual(a.get_tuple(), ('name', a))
def test_gen_inventory_line(self):
a = ansible_common.AnsibleNode(defaultdict(str))
self.assertEqual(a.gen_inventory_line(), "")
def test_ansible_node_delitem(self):
a = ansible_common.AnsibleNode({"name": "name"})
del a['name']
def test_ansible_node_getattr(self):
a = ansible_common.AnsibleNode({"name": "name"})
self.assertEqual(getattr(a, "nosuch", None), None)
class AnsibleNodeDictTestCase(unittest.TestCase):
def test_ansible_node_dict(self):
n = ansible_common.AnsibleNode
ansible_common.AnsibleNodeDict(n, {})
def test_ansible_node_dict_len(self):
n = ansible_common.AnsibleNode
a = ansible_common.AnsibleNodeDict(n, {})
len(a)
def test_ansible_node_dict_repr(self):
n = ansible_common.AnsibleNode
a = ansible_common.AnsibleNodeDict(n, {})
repr(a)
def test_ansible_node_dict_iter(self):
n = ansible_common.AnsibleNode
a = ansible_common.AnsibleNodeDict(n, {})
for _ in a:
pass
def test_ansible_node_dict_get(self):
n = ansible_common.AnsibleNode
a = ansible_common.AnsibleNodeDict(n, {})
self.assertIsNone(a.get(""))
def test_gen_inventory_lines_for_all_of_type(self):
n = ansible_common.AnsibleNode
a = ansible_common.AnsibleNodeDict(n, {})
self.assertEqual(a.gen_inventory_lines_for_all_of_type(""), [])
def test_gen_inventory_lines(self):
n = ansible_common.AnsibleNode
a = ansible_common.AnsibleNodeDict(n, [{
"name": "name", "user": "user", "password": "PASS",
"role": "role",
}])
self.assertEqual(a.gen_all_inventory_lines(),
["name ansible_ssh_pass=PASS ansible_user=user"])
class AnsibleCommonTestCase(unittest.TestCase):
def test_get_timeouts(self):
self.assertAlmostEquals(ansible_common.AnsibleCommon.get_timeout(-100), 1200.0)
def test__init__(self):
ansible_common.AnsibleCommon({})
def test_reset(self):
a = ansible_common.AnsibleCommon({})
a.reset()
def test_do_install_no_dir(self):
a = ansible_common.AnsibleCommon({})
self.assertRaises(OSError, a.do_install, '', '')
def test_gen_inventory_dict(self):
nodes = [{
"name": "name", "user": "user", "password": "PASS",
"role": "role",
}]
a = ansible_common.AnsibleCommon(nodes)
a.gen_inventory_ini_dict()
self.assertEqual(a.inventory_dict, {
'nodes': ['name ansible_ssh_pass=PASS ansible_user=user'],
'role': ['name']
})
def test_deploy_dir(self):
a = ansible_common.AnsibleCommon({})
self.assertRaises(ValueError, getattr, a, "deploy_dir")
def test_deploy_dir_set(self):
a = ansible_common.AnsibleCommon({})
a.deploy_dir = ""
def test_deploy_dir_set_get(self):
a = ansible_common.AnsibleCommon({})
a.deploy_dir = "d"
self.assertEqual(a.deploy_dir, "d")
@mock.patch('{}.open'.format(PREFIX))
def test__gen_ansible_playbook_file_list(self, mock_open):
d = tempfile.mkdtemp()
try:
a = ansible_common.AnsibleCommon({})
a._gen_ansible_playbook_file(["a"], d)
finally:
os.rmdir(d)
@mock.patch('{}.NamedTemporaryFile'.format(PREFIX))
@mock.patch('{}.open'.format(PREFIX))
def test__gen_ansible_inventory_file(self, mock_open, mock_tmp):
nodes = [{
"name": "name", "user": "user", "password": "PASS",
"role": "role",
}]
d = tempfile.mkdtemp()
try:
a = ansible_common.AnsibleCommon(nodes)
a.gen_inventory_ini_dict()
inv_context = a._gen_ansible_inventory_file(d)
with inv_context:
c = StringIO()
inv_context.write_func(c)
self.assertIn("ansible_ssh_pass=PASS", c.getvalue())
finally:
os.rmdir(d)
@mock.patch('{}.NamedTemporaryFile'.format(PREFIX))
@mock.patch('{}.open'.format(PREFIX))
def test__gen_ansible_playbook_file_list_multiple(self, mock_open, mock_tmp):
d = tempfile.mkdtemp()
try:
a = ansible_common.AnsibleCommon({})
a._gen_ansible_playbook_file(["a", "b"], d)
finally:
os.rmdir(d)
@mock.patch('{}.NamedTemporaryFile'.format(PREFIX))
@mock.patch('{}.Popen'.format(PREFIX))
@mock.patch('{}.open'.format(PREFIX))
def test_do_install_tmp_dir(self, mock_open, mock_popen, mock_tmp):
mock_popen.return_value.communicate.return_value = "", ""
mock_popen.return_value.wait.return_value = 0
d = tempfile.mkdtemp()
try:
a = ansible_common.AnsibleCommon({})
a.do_install('', d)
finally:
os.rmdir(d)
@mock.patch('{}.NamedTemporaryFile'.format(PREFIX))
@mock.patch('{}.Popen'.format(PREFIX))
@mock.patch('{}.open'.format(PREFIX))
def test_execute_ansible_check(self, mock_open, mock_popen, mock_tmp):
mock_popen.return_value.communicate.return_value = "", ""
mock_popen.return_value.wait.return_value = 0
d = tempfile.mkdtemp()
try:
a = ansible_common.AnsibleCommon({})
a.execute_ansible('', d, ansible_check=True, verbose=True)
finally:
os.rmdir(d)
|
[
"ross.b.brattain@intel.com"
] |
ross.b.brattain@intel.com
|
61ec4c4c2b7bf39519d500c748f9da10ac0d9188
|
ec2cd2e651fe91c1c5f5f95a97211d3c17c3ae93
|
/tests/test_files/event_manager_test.py
|
53ce346c8b3b1b86b4c60b80fa0918ac4f73cf0e
|
[
"MIT"
] |
permissive
|
HackdYourShit/github-automation
|
1b92afd03cc16593898b88a605916ccb85ebb3bc
|
31e60695d25171453d62bc96151fb45129d0a421
|
refs/heads/master
| 2022-08-29T22:14:01.441351
| 2020-05-24T13:59:13
| 2020-05-24T13:59:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 21,481
|
py
|
import json
import os
from copy import deepcopy
from github_automation.core.issue.issue import Issue
from github_automation.core.project.project import Project, ProjectColumn
from github_automation.management.configuration import Configuration
from github_automation.management.event_manager import EventManager
MOCK_FOLDER_PATH = os.path.join(os.getcwd(), "tests", "mock_data")
def test_loading_event_manager():
issue_id = "=asdf=sdf="
title = "issue name"
labels = ["HighEffort", "Low", "bug", "test"]
assignee = "ronykoz"
issue = {
"projectCards": {
"nodes": [
{
"id": "id=",
"project": {
"number": 1,
"columns": {
"nodes": [
{
"name": "testing"
}
]
}
}
},
{
"id": "id2=",
"project": {
"number": 2,
"columns": {
"nodes": [
{
"name": "Queue"
}
]
}
}
}
]
},
"comments": {
"nodes": [
{
"author": {
"login": "ronykoz"
},
"body": "comment 1",
"createdAt": "2019-03-19T12:24:27Z"
},
{
"author": {
"login": "ronykoz"
},
"body": "second comment",
"createdAt": "2019-03-19T12:27:53Z"
},
{
"author": {
"login": "ronykoz"
},
"body": "third comment",
"createdAt": "2019-03-19T12:52:08Z"
}
]
},
"timelineItems": {
"__typename": "IssueTimelineItemsConnection",
"nodes": [
{
"__typename": "LabeledEvent",
"label": {
"name": labels[0]
},
"createdAt": "2019-03-15T12:40:22Z"
},
{
"__typename": "LabeledEvent",
"label": {
"name": labels[1]
},
"createdAt": "2019-03-17T13:59:27Z"
},
{
"__typename": "LabeledEvent",
"label": {
"name": labels[2]
},
"createdAt": "2019-04-08T10:48:02Z"
},
{
"willCloseTarget": True,
"source": {
"__typename": "PullRequest",
"state": "OPEN",
"isDraft": False,
"assignees": {
"nodes": [
{
"login": "test"
},
{
"login": "test2"
}
]
},
"labels": {
"nodes": [
{
"name": "label"
}
]
},
"reviewRequests": {
"totalCount": 0
},
"reviews": {
"totalCount": 3
},
"number": 1,
"reviewDecision": "APPROVED"
}
}
]
},
"title": title,
"id": issue_id,
"number": 1,
"milestone": {
"title": "test"
},
"labels": {
"edges": [
{
"node": {
"name": labels[0]
}
},
{
"node": {
"name": labels[1]
}
},
{
"node": {
"name": labels[2]
}
},
{
"node": {
"name": labels[3]
}
}
]
},
"assignees": {
"edges": [
{
"node": {
"login": assignee
}
}
]
}
}
event = {
"action": "some action",
"issue": {
"number": 1
}
}
class mock_client(object):
def get_issue(*args, **kwargs):
return {
"repository": {
"issue": issue
}
}
client = mock_client()
manager = EventManager(os.path.join(MOCK_FOLDER_PATH, 'conf.ini'), client=client, event=json.dumps(event))
issue_object = manager.get_issue_object()
assert issue_object.number == 1
assert issue_object.title == title
def test_get_prev_column():
event = {
"action": "some action",
"issue": {
"number": 1
}
}
project_layout = {
"repository": {
"project": {
"columns": {
"edges": [
{
"cursor": 1,
"node": {
"name": "Queue"
}
},
{
"cursor": 2,
"node": {
"name": "In progress"
}
}
]
}
}
}
}
class MockClient(object):
def get_project_layout(*args, **kwargs):
return project_layout
client = MockClient()
manager = EventManager(os.path.join(MOCK_FOLDER_PATH, 'conf.ini'), client=client,
event=json.dumps(event))
manager.config = Configuration(os.path.join(MOCK_FOLDER_PATH, 'conf.ini'))
manager.config.load_properties()
assert manager.get_prev_column_cursor("Queue") == ""
assert manager.get_prev_column_cursor("In progress") == 1
def test_load_project_column():
event = {
"action": "some action",
"issue": {
"number": 1
}
}
project_layout = {
"repository": {
"project": {
"columns": {
"edges": [
{
"cursor": 1,
"node": {
"name": "Queue"
}
},
{
"cursor": 2,
"node": {
"name": "In progress"
}
}
]
}
}
}
}
project_column1 = {
"repository": {
"project": {
"columns": {
"nodes": [
{
"name": "Queue",
"id": "id",
"cards": {
"pageInfo": {
"endCursor": "A",
"hasNextPage": True
},
"edges": [
{
"cursor": "A",
"node": {
"note": None,
"state": "CONTENT_ONLY",
"id": "id=",
"content": {
"id": "id=",
"number": 1,
"title": "title",
"labels": {
"edges": [
{
"node": {
"name": "one"
}
},
{
"node": {
"name": "two"
}
},
{
"node": {
"name": "three"
}
}
]
}
}
}
}
]
},
}
]
}
}
}
}
project_column1_no_after = deepcopy(project_column1)
project_column1_no_after['repository']['project']['columns']['nodes'][0]['cards']['pageInfo']['hasNextPage'] = False
project_column2 = {
"repository": {
"project": {
"columns": {
"nodes": [
{
"name": "In progress",
"id": "id",
"cards": {
"pageInfo": {
"endCursor": "B",
"hasNextPage": True
},
"edges": [
{
"cursor": "B",
"node": {
"note": None,
"state": "CONTENT_ONLY",
"id": "cardid2=",
"content": {
"id": "id2=",
"number": 2,
"title": "title2",
"labels": {
"edges": [
{
"node": {
"name": "one"
}
},
{
"node": {
"name": "two"
}
},
{
"node": {
"name": "three"
}
}
]
}
}
}
}
]
},
}
]
}
}
}
}
project_column2_no_after = deepcopy(project_column2)
project_column2_no_after['repository']['project']['columns']['nodes'][0]['cards']['pageInfo']['hasNextPage'] = False
issue_id = "=asdf=sdf="
title = "issue name"
labels = ["test", "Low", "bug", "Testing"]
assignee = "ronykoz"
issue = {
"repository": {
"issue": {
"projectCards": {
"nodes": [
{
"id": "idadded=",
"project": {
"number": 1,
"columns": {
"nodes": [
{
"name": "testing"
}
]
}
}
},
{
"id": "id2=",
"project": {
"number": 2,
"columns": {
"nodes": [
{
"name": "Queue"
}
]
}
}
}
]
},
"comments": {
"nodes": [
{
"author": {
"login": "ronykoz"
},
"body": "comment 1",
"createdAt": "2019-03-19T12:24:27Z"
},
{
"author": {
"login": "ronykoz"
},
"body": "second comment",
"createdAt": "2019-03-19T12:27:53Z"
},
{
"author": {
"login": "ronykoz"
},
"body": "third comment",
"createdAt": "2019-03-19T12:52:08Z"
}
]
},
"timelineItems": {
"__typename": "IssueTimelineItemsConnection",
"nodes": [
{
"__typename": "LabeledEvent",
"label": {
"name": labels[0]
},
"createdAt": "2019-03-15T12:40:22Z"
},
{
"__typename": "LabeledEvent",
"label": {
"name": labels[1]
},
"createdAt": "2019-03-17T13:59:27Z"
},
{
"__typename": "LabeledEvent",
"label": {
"name": labels[2]
},
"createdAt": "2019-04-08T10:48:02Z"
}
]
},
"title": title,
"id": issue_id,
"number": 1,
"milestone": {
"title": "test"
},
"labels": {
"edges": [
{
"node": {
"name": labels[0]
}
},
{
"node": {
"name": labels[1]
}
},
{
"node": {
"name": labels[2]
}
},
{
"node": {
"name": labels[3]
}
}
]
},
"assignees": {
"edges": [
{
"node": {
"login": assignee
}
}
]
}
}
}
}
class MockClient(object):
def add_issues_to_project(self, **kwargs):
return {
"addProjectCard": {
"cardEdge": {
"node": {
"id": "idadded="
}
}
}
}
def add_to_column(self, **kwargs):
return
def move_to_specific_place_in_column(self, **kwargs):
return
def get_issue(*args, **kwargs):
return issue
def delete_project_card(*args, **kwargs):
return
def get_project_layout(*args, **kwargs):
return project_layout
def get_first_column_issues(*args, **kwargs):
if 'start_cards_cursor' in kwargs:
return project_column1_no_after
return project_column1
def get_column_issues(*args, **kwargs):
if 'start_cards_cursor' in kwargs:
return project_column2_no_after
return project_column2
client = MockClient()
manager = EventManager(os.path.join(MOCK_FOLDER_PATH, 'conf.ini'), client=client, event=json.dumps(event))
manager.config = Configuration(os.path.join(MOCK_FOLDER_PATH, 'conf.ini'))
manager.config.load_properties()
project1 = manager.load_project_column("Queue")
assert project1.columns["Queue"].name == "Queue"
assert len(project1.columns["Queue"].cards) == 2
assert project1.columns["Queue"].cards[0].issue.title == "title"
assert project1.columns["Queue"].cards[0].id == "id="
project2 = manager.load_project_column("In progress")
assert project2.columns["In progress"].name == "In progress"
assert len(project2.columns["In progress"].cards) == 2
assert project2.columns["In progress"].cards[0].issue.title == "title2"
assert project2.columns["In progress"].get_card_id("id2=") == "cardid2="
def test_event_manager_flow(mocker):
config = Configuration(os.path.join(MOCK_FOLDER_PATH, 'conf.ini'))
config.load_properties()
project_object = Project(name="project_name",
columns={
"In progress": ProjectColumn(id="some id", name='In progress', cards=[])
},
config=config
)
mocker.patch.object(EventManager, "get_issue_object", return_value=Issue(
id="1",
title="this is a test title",
number=1,
assignees=["ronykoz"],
labels=['test', 'Testing', 'bug']
))
mocker.patch.object(EventManager, "load_project_column",
return_value=project_object
)
class MockClient(object):
def add_issues_to_project(*args, **kwargs):
return {
"addProjectCard": {
"cardEdge": {
"node": {
"id": "1"
}
}
}
}
def add_to_column(self, **kwargs):
return
def move_to_specific_place_in_column(self, **kwargs):
return
client = MockClient()
manager = EventManager(os.path.join(MOCK_FOLDER_PATH, 'conf.ini'), client=client,
event=json.dumps({"text": "text"}))
manager.run()
assert len(project_object.get_all_issue_ids()) == 1
|
[
"rkozakish@paloaltonetworks.com"
] |
rkozakish@paloaltonetworks.com
|
a247744ab29c25a75bebe261f0975740848265b7
|
803ed08c0cc3a76e48bdea08323c2be3ffa9f0ef
|
/consoleme/handlers/v2/resources.py
|
75380eb8825948b5cecfcb9dd957981f23f001d7
|
[
"Apache-2.0"
] |
permissive
|
rhnasc/consoleme
|
c6ec4d75ecba3c132d174c88811d44af11b18029
|
18962418fdc8466dc85547bd0cba100f1c00d7c1
|
refs/heads/master
| 2023-04-03T10:04:58.424303
| 2021-04-15T20:00:33
| 2021-04-15T20:00:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,146
|
py
|
import sys
from datetime import datetime, timedelta
import sentry_sdk
import ujson as json
from policy_sentry.util.arns import parse_arn
from consoleme.config import config
from consoleme.exceptions.exceptions import MustBeFte, ResourceNotFound
from consoleme.handlers.base import BaseAPIV2Handler, BaseMtlsHandler
from consoleme.lib.account_indexers import get_account_id_to_name_mapping
from consoleme.lib.auth import can_admin_policies
from consoleme.lib.aws import fetch_resource_details
from consoleme.lib.plugins import get_plugin_by_name
from consoleme.lib.policies import get_url_for_resource
from consoleme.lib.web import handle_generic_error_response
from consoleme.models import WebResponse
log = config.get_logger()
stats = get_plugin_by_name(config.get("plugins.metrics", "default_metrics"))()
aws = get_plugin_by_name(config.get("plugins.aws", "default_aws"))()
group_mapping = get_plugin_by_name(
config.get("plugins.group_mapping", "default_group_mapping")
)()
auth = get_plugin_by_name(config.get("plugins.auth", "default_auth"))()
internal_policies = get_plugin_by_name(
config.get("plugins.internal_policies", "default_policies")
)()
class ResourceDetailHandler(BaseAPIV2Handler):
async def get(self, account_id, resource_type, region=None, resource_name=None):
if not self.user:
return
if config.get("policy_editor.disallow_contractors", True) and self.contractor:
if self.user not in config.get(
"groups.can_bypass_contractor_restrictions", []
):
raise MustBeFte("Only FTEs are authorized to view this page.")
read_only = False
can_save_delete = (can_admin_policies(self.user, self.groups),)
account_id_for_arn: str = account_id
if resource_type == "s3":
account_id_for_arn = ""
arn = f"arn:aws:{resource_type}:{region or ''}:{account_id_for_arn}:{resource_name}"
stats.count(
"ResourcePolicyEditHandler.get", tags={"user": self.user, "arn": arn}
)
log_data = {
"user": self.user,
"ip": self.ip,
"function": f"{__name__}.{self.__class__.__name__}.{sys._getframe().f_code.co_name}",
"message": "Incoming request",
"user-agent": self.request.headers.get("User-Agent"),
"request_id": self.request_uuid,
"arn": arn,
}
log.debug(log_data)
error = ""
try:
resource_details = await fetch_resource_details(
account_id, resource_type, resource_name, region
)
except Exception as e:
sentry_sdk.capture_exception()
log.error({**log_data, "error": e}, exc_info=True)
resource_details = None
error = str(e)
if not resource_details:
self.send_error(
404,
message=(
f"Unable to retrieve the specified {resource_type} resource: "
f"{account_id}/{resource_name}/{region}. {error}",
),
)
return
# TODO: Get S3 errors for s3 buckets only, else CT errors
yesterday = (datetime.today() - timedelta(days=1)).strftime("%Y%m%d")
s3_query_url = None
if resource_type == "s3":
s3_query_url = config.get("s3.bucket_query_url")
all_s3_errors = None
if s3_query_url:
s3_query_url = s3_query_url.format(
yesterday=yesterday, bucket_name=f"'{resource_name}'"
)
s3_error_topic = config.get("redis.s3_errors", "S3_ERRORS")
all_s3_errors = self.red.get(s3_error_topic)
s3_errors = []
if all_s3_errors:
s3_errors = json.loads(all_s3_errors).get(arn, [])
account_ids_to_name = await get_account_id_to_name_mapping()
# TODO(ccastrapel/psanders): Make a Swagger spec for this
self.write(
dict(
arn=arn,
resource_details=resource_details,
account_id=account_id,
account_name=account_ids_to_name.get(account_id, None),
read_only=read_only,
can_save_delete=can_save_delete,
s3_errors=s3_errors,
error_url=s3_query_url,
config_timeline_url=resource_details.get("config_timeline_url"),
)
)
class GetResourceURLHandler(BaseMtlsHandler):
"""consoleme CLI resource URL handler. Parameters accepted: arn."""
def initialize(self):
self.user: str = None
self.eligible_roles: list = []
async def get(self):
"""
/api/v2/get_resource_url - Endpoint used to get an URL from an ARN
---
get:
description: Get the resource URL for ConsoleMe, given an ARN
responses:
200:
description: Returns a URL generated from the ARN in JSON form
400:
description: Malformed Request
403:
description: Forbidden
"""
self.user: str = self.requester["email"]
arn: str = self.get_argument("arn", None)
log_data = {
"function": f"{__name__}.{self.__class__.__name__}.{sys._getframe().f_code.co_name}",
"user": self.user,
"arn": arn,
"message": "Generating URL for resource",
"user-agent": self.request.headers.get("User-Agent"),
"request_id": self.request_uuid,
}
log.debug(log_data)
stats.count("GetResourceURL.get", tags={"user": self.user})
if not arn:
generic_error_message: str = "Missing required parameter"
errors = ["arn is a required parameter"]
await handle_generic_error_response(
self, generic_error_message, errors, 404, "missing_data", log_data
)
return
try:
# parse_arn will raise an exception on invalid arns
parse_arn(arn)
resource_url = await get_url_for_resource(arn)
if not resource_url:
raise ValueError("This resource type is currently not supported")
except (ResourceNotFound, ValueError) as e:
generic_error_message: str = "Unsupported data"
errors = [str(e)]
await handle_generic_error_response(
self, generic_error_message, errors, 404, "invalid_data", log_data
)
return
except Exception as e:
generic_error_message: str = "Malformed data"
errors = [str(e)]
await handle_generic_error_response(
self, generic_error_message, errors, 404, "malformed_data", log_data
)
return
res = WebResponse(
status="success",
status_code=200,
message="Successfully generated URL for ARN",
data={"url": resource_url},
)
self.write(res.json())
await self.finish()
|
[
"noreply@github.com"
] |
rhnasc.noreply@github.com
|
8c53e53d5632bf2fe4e729680440e518b110775a
|
1308e72f0f9a481ae667000f8109ff4a0f4ab848
|
/app/main/database_helper_functions.py
|
f86ee44f7e80d0a40d34beccf3a948ccac908ec9
|
[
"MIT"
] |
permissive
|
charlestondance/amoslims
|
0ebab70a54367c8b6e1cf25fa1354ffdf23f5570
|
c1d051db3e88a92644446744a9027c5699f52b02
|
refs/heads/master
| 2022-12-08T05:13:24.977140
| 2018-07-12T16:13:10
| 2018-07-12T16:13:10
| 140,725,152
| 0
| 0
|
MIT
| 2022-12-08T02:16:20
| 2018-07-12T14:34:14
|
Python
|
UTF-8
|
Python
| false
| false
| 9,320
|
py
|
__author__ = 'dmcclymo'
import csv
from ..models import project_task, PartsBatchTable, PartsTable, ytk_job_master, ytk_job_master_level2, part_dna_sizes
from .. import db
def upload_csv_check_keys(filename):
with open(filename) as csvfile:
reader = csv.DictReader(csvfile)
dict_keys = reader.fieldnames
return dict_keys, reader
def write_pending_job_tasks_qc(unique_job_id):
# set the tasks to pending
item_add = project_task(unique_job_id=unique_job_id, task='uploaddesign', status="pending", locked=0)
db.session.add(item_add)
item_add = project_task(unique_job_id=unique_job_id, task='uploadjobmaster', status="pending", locked=0)
db.session.add(item_add)
item_add = project_task(unique_job_id=unique_job_id, task='clipqc', status="pending", locked=0)
db.session.add(item_add)
item_add = project_task(unique_job_id=unique_job_id, task='stitchqc', status="pending", locked=0)
db.session.add(item_add)
item_add = project_task(unique_job_id=unique_job_id, task='uploadjmp', status="pending", locked=0)
db.session.add(item_add)
item_add = project_task(unique_job_id=unique_job_id, task='clip_qc_echo', status="pending", locked=0)
db.session.add(item_add)
item_add = project_task(unique_job_id=unique_job_id, task='stitch_qc_echo', status="pending", locked=0)
db.session.add(item_add)
db.session.commit()
def upload_parts_to_table(filename):
all_ok = True
PART_KEYS_CHECK = ['part_name', 'part_class', 'part_type', 'relative_position', 'level', 'offset', 'project_number',
'job_set', 'sequence', 'composite_part', 'external_id', 'part_method', 'part_number',
'storage_location_id', 'storage_plate_barcode']
dict_keys, reader = upload_csv_check_keys(filename)
# check dict keys match PART_KEYS_CHECK
if sorted(dict_keys) == sorted(PART_KEYS_CHECK):
for row in reader:
# check if part exists. If part number is zero raise a new part number, otherwise raise a batch
if row['part_number'] == str(0):
# new part
new_part_number = return_new_part_number()
padded_part = paded_part_number(new_part_number=new_part_number)
add_part = PartsTable(part_id="LDF-"+padded_part, part_name=row['part_name'],
part_class=row['part_class'],
part_type=row['part_type'], relative_position=row['relative_position'],
level=row['level'], offset=row['offset'],
project_number=row['project_number'],
job_set=row['job_set'], sequence=row['sequence'],
composite_part=row['composite_part'],
external_id=row['external_id'], part_method=row['part_method'],
part_number=new_part_number)
add_batch = PartsBatchTable(storage_plate_barcode=row['storage_plate_barcode'],
storage_location_id=row['storage_location_id'], part_id="LDF-"+padded_part,
part_number=new_part_number, batch_number=1)
db.session.add(add_part)
db.session.add(add_batch)
else:
# new part so only make a batch
padded_part = paded_part_number(int(row['part_number']))
new_batch_number = return_new_batch_number(int(row['part_number']))
add_batch = PartsBatchTable(storage_plate_barcode=row['storage_plate_barcode'],
storage_location_id=row['storage_location_id'], part_id="LDF-"+padded_part,
part_number=int(row['part_number']), batch_number=new_batch_number)
db.session.add(add_batch)
db.session.commit()
else:
all_ok = False
print(sorted(dict_keys))
print(sorted(PART_KEYS_CHECK))
return all_ok
def upload_sizes_to_table(filename):
all_ok = True
PART_KEYS_CHECK = ['part_id', 'enzyme', 'size', 'cargo_number']
dict_keys, reader = upload_csv_check_keys(filename)
# check dict keys match PART_KEYS_CHECK
if sorted(dict_keys) == sorted(PART_KEYS_CHECK):
for row in reader:
add_size = part_dna_sizes(part_id=row['part_id'], enzyme=row['enzyme'], size=int(row['size']),
cargo_number=int(row['cargo_number']))
db.session.add(add_size)
db.session.commit()
else:
all_ok = False
return all_ok
def return_new_part_number():
# this function checks the part database and returns the highest number +1 or 1
sql = "SELECT part_number FROM partstable ORDER BY part_number DESC "
query_table = db.engine.execute(sql)
query_table = query_table.fetchone()
if query_table:
part_number = query_table.part_number+1
else:
part_number = 1
return part_number
def paded_part_number(new_part_number):
# find the length of the number and pad it out with zeros and return it as a string
length_of_part = len(str(new_part_number))
part_string = ""
if length_of_part < 6:
padding = ''
for pad in range(0, 6 - length_of_part):
part_string = part_string + '0'
part_string = part_string + str(new_part_number)
return part_string
def return_new_batch_number(part_number):
# this function checks the part database and returns the highest number +1 or 1
sql = "SELECT batch_number from partsbatchtable WHERE part_number=%s order by batch_number DESC "
query_table = db.engine.execute(sql, part_number)
query_table = query_table.fetchone()
return query_table.batch_number+1
def upload_jobmaster_processed1_ytk(filename, jobname, filename_for_db):
KEYS_CHECK = ['Part Id', 'Sample Number', 'Storage Location Id', 'Storage Plate Barcode', 'Unique Job Id',
'Job Master Barcode', 'Job Master Well ID']
job_master_error_flag = []
dict_keys, reader = upload_csv_check_keys(filename)
if sorted(dict_keys) != sorted(KEYS_CHECK):
return job_master_error_flag.append('key_error')
for row in reader:
update_jobmaster = ytk_job_master.query.filter_by(part_id=row['Part Id'], sample_number=row['Sample Number'],
unique_job_id=row['Unique Job Id']).first()
if update_jobmaster and row['Unique Job Id'] == jobname:
print("True")
update_jobmaster.job_master_well_id = row['Job Master Well ID']
update_jobmaster.job_master_barcode = row['Job Master Barcode']
update_jobmaster.uploaded_filename = filename_for_db
db.session.commit()
else:
print('False')
job_master_error_flag.append([row['Part Id'], row['Sample Number'], row['Unique Job Id']])
return job_master_error_flag
def upload_jobmaster_processed2_ytk(filename, jobname, filename_for_db):
KEYS_CHECK = ['Part Id', 'Sample Number', 'Storage Location Id', 'Storage Plate Barcode', 'Unique Job Id',
'Job Master Barcode', 'Job Master Well ID']
job_master_error_flag = []
dict_keys, reader = upload_csv_check_keys(filename)
if sorted(dict_keys) != sorted(KEYS_CHECK):
return job_master_error_flag.append('key_error')
for row in reader:
update_jobmaster = ytk_job_master_level2.query.filter_by(part_id=row['Part Id'],
sample_number=row['Sample Number'],
unique_job_id=row['Unique Job Id'], ).first()
if update_jobmaster and row['Unique Job Id'] == jobname:
update_jobmaster.job_master2_well_id = row['Job Master Well ID']
update_jobmaster.job_master2_barcode = row['Job Master Barcode']
update_jobmaster.uploaded_filename = filename_for_db
db.session.commit()
else:
print('False')
job_master_error_flag.append([row['Part Id'], row['Sample Number'], row['Unique Job Id']])
return job_master_error_flag
def upload_uploadicelink_to_table(filename):
# this function takes the entries file from ice and adds it to the sequence space in the parts database
all_ok = True
PART_KEYS_CHECK = ['Part ID', 'Name', 'Alias']
dict_keys, reader = upload_csv_check_keys(filename)
# check dict keys match PART_KEYS_CHECK
if sorted(dict_keys) == sorted(PART_KEYS_CHECK):
for row in reader:
find_part = PartsTable.query.filter_by(part_id=row['Name']).first()
if find_part:
find_part.sequence = row['Part ID']
db.session.add(find_part)
else:
all_ok = False
else:
all_ok = False
if all_ok == True:
db.session.commit()
else:
db.session.remove()
return all_ok
def search_compounds():
return PartsTable.query.distinct(PartsTable.project_number)
|
[
"dave.mcclymo@gmail.com"
] |
dave.mcclymo@gmail.com
|
766dc1460335ad2b4aed0afa0307aaee0bb4c0c6
|
bb49ca8887b7a6b8867165ab26b045f1d5bcf7a7
|
/leet_code/easy/leet_rotate.py
|
73b73359f15ee903dd3bea66cdd4651e9bbc97b7
|
[] |
no_license
|
albusdunble1/Leetcode
|
118a91e164e17fd54d794998e8cccd73f9f39289
|
980d22f3c8ee6487175252eeba5f4ceb6152c294
|
refs/heads/master
| 2020-06-25T09:35:16.729899
| 2019-11-01T14:09:06
| 2019-11-01T14:09:06
| 199,273,051
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 641
|
py
|
class Solution(object):
def rotatedDigits(self, N):
"""
:type N: int
:rtype: int
"""
invalid = ['3','4','7']
special = ['2','5','6','9']
output = 0
for i in range(N):
v_found = False
for num in str(i+1):
if num in invalid:
v_found = False
break
elif num in special:
v_found = True
if v_found:
output += 1
return output
#https://leetcode.com/problems/rotated-digits/
|
[
"weisheng_rules@hotmail.com"
] |
weisheng_rules@hotmail.com
|
d6370664c9b03230710844b5cbacf4874d0f3bae
|
4f349c31220d839aaaf5d850069ad315b8d67aef
|
/withdraw_universities/urls.py
|
355abaf5511fe20cc8ffb57bcf8f7326bf2cc6fd
|
[] |
no_license
|
DiZiNnEs/withdraw-universities
|
beec276aae8ab76f640416639f4f33a498c4da61
|
f18645bed8ef0e67843ad64c6eb003ddfae61dcb
|
refs/heads/main
| 2023-01-03T20:30:47.463936
| 2020-11-03T18:11:22
| 2020-11-03T18:11:22
| 307,993,570
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 814
|
py
|
"""withdraw_universities URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('university.urls')),
]
|
[
"dizinnes228@gmail.com"
] |
dizinnes228@gmail.com
|
1386635f3be2709e50108baa9d6570782444859d
|
dae76f7fe5e99ff6b323604710786da93e0c9bbb
|
/myweb/settings.py
|
c9f53dcd3736652d28bf40e09091fe7763064645
|
[] |
no_license
|
Jaxien/myweb
|
7198fd1695e25ef028182fb174086547d6768e6c
|
8908b703547797c92ca777864a274deb3212d71b
|
refs/heads/master
| 2020-12-15T12:04:06.740495
| 2020-01-20T12:29:37
| 2020-01-20T12:29:37
| 235,087,526
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,706
|
py
|
"""
Django settings for myweb project.
Generated by 'django-admin startproject' using Django 3.0.1.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'h3wp37ot8$&vgr#$^8%pepa#v8=k^w&123ydzj(f5894j7agbo'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['127.0.0.1', 'localhost', '.marss.icu']
# Application definition
PAGINATION_SETTINGS = {
'PAGE_RANGE_DISPLAYED': 4, # 分页条当前页前后应该显示的总页数(两边均匀分布,因此要设置为偶数),
'MARGIN_PAGES_DISPLAYED': 2, # 分页条开头和结尾显示的页数
'SHOW_FIRST_PAGE_WHEN_INVALID': True, # 当请求了不存在页,显示第一页
}
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog.apps.BlogConfig',
'comments.apps.CommentsConfig',
'pure_pagination'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'myweb.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'myweb.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'myweb',
'USER': 'root',
'PASSWORD': 'zhangyang123',
'HOST': 'localhost',
'PORT': '3306'
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'zh-hans'
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_L10N = True
USE_TZ = False
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
|
[
"1076440430@qq.com"
] |
1076440430@qq.com
|
7e8c8bbb115ec5384d4fb25f7c18d1ffbcfe4bcb
|
a31a2a6d1bbb34db890df93f2398d4295d229ef6
|
/index/migrations/0010_auto_20190520_0929.py
|
7a33bf18bbe7607d23f8295f2d9e910711a6ebe5
|
[] |
no_license
|
anandrathidev/propsearch
|
31f915230ffb122dd49f3ee75e9f516091ca47bf
|
0aaa7174a5a0f78e26834887fd963c5ea6e1bd04
|
refs/heads/master
| 2020-05-22T03:53:22.230389
| 2019-06-01T08:46:24
| 2019-06-01T08:46:24
| 186,221,039
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 526
|
py
|
# Generated by Django 2.0 on 2019-05-20 08:29
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('index', '0009_auto_20190520_0923'),
]
operations = [
migrations.AlterField(
model_name='poster',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
|
[
"anandrathi.dev@gmail.com"
] |
anandrathi.dev@gmail.com
|
9b23207f97a22370cd8f9bdf558239b8ffdc88bc
|
627250a71e879fd5ff6a1c215333d1559b28f878
|
/vtt2srt.py
|
728473e3a1299171765480b4dd79f8611dcfb415
|
[
"Apache-2.0"
] |
permissive
|
KorvinSilver/vtt2srt
|
c6261317e43d2538030b7729c8d3f9986cb124af
|
18c3fdd95c4435930d4acb3218123f9bd90f429b
|
refs/heads/master
| 2021-05-05T01:20:57.485291
| 2018-01-30T21:07:00
| 2018-01-30T21:07:00
| 119,588,513
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,169
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Project: WebVTT Document to SubRib Subtitle converter
Copyright 2018, Korvin F. Ezüst
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import argparse
import re
import sys
__author__ = "Korvin F. Ezüst"
__copyright__ = "Copyright (c) 2018, Korvin F. Ezüst"
__license__ = "Apache 2.0"
__version__ = "1.0"
__email__ = "dev@korvin.eu"
__status__ = "Working"
parser = argparse.ArgumentParser(
description="Convert a WebVTT Document to a SubRip Subtitle")
parser.add_argument("file", help="vtt file to convert")
args = parser.parse_args()
# Read VRT file
with open(args.file) as f:
lines = f.readlines()
# Find a dot in a string matching e.g. 12:34:56.789
# and replace it with a comma
regex = r"(?<=\d\d:\d\d:\d\d)\.(?=\d\d\d)"
lines = [re.sub(regex, ",", i.rstrip()) for i in lines]
# Find everything in line after a string matching e.g. 12:34:56,789
# and delete it
regex = r"(?<=\d\d:\d\d:\d\d\,\d\d\d --> \d\d:\d\d:\d\d\,\d\d\d).*"
lines = [re.sub(regex, "", i.rstrip()) for i in lines]
# Replace multiple blank lines with a single blank line
sbl = []
for i in range(len(lines[:-1])):
if lines[i] == "" and lines[i+1] == "":
continue
else:
sbl.append(lines[i])
if lines[-1] != "":
sbl.append(lines[-1])
# Place a number before each time code (number empty lines)
enum = enumerate(sbl)
next(enum)
nel = [str(i) or "\n" + str(next(enum)[0]) for i in sbl]
# Remove WebVTT headers if any
for i, item in enumerate(nel):
if item == "\n1":
break
# Write SRT file
with open(args.file + ".srt", "w") as f:
f.write("\n".join(nel[i:]))
print("SRT file created:", args.file + ".srt")
|
[
"dev@korvin.eu"
] |
dev@korvin.eu
|
20e932bdbe6e53b65a545e07fa0ff488f64d3406
|
5c75a5827f39cfeec64cc2518d670019a56f22e2
|
/slam/Python_code/create_sync.py
|
bd49f08e32602a9e954bef0af24d035e9737be24
|
[] |
no_license
|
vascofaraujo/uav-mapping-venv
|
ac439672500b83c38d4aeaceece069ac6f6f5cff
|
e81ba7ff02b41cb2726bbb44c53d1d721aa59868
|
refs/heads/main
| 2023-07-11T11:50:31.951327
| 2021-08-13T22:46:29
| 2021-08-13T22:46:29
| 386,804,189
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,837
|
py
|
import sys
import os
import numpy
def read_file_list(filename):
"""
Description
Reads rgb.txt and depth.txt file with timestamps for each and matches each one
with the closest one
File format:
The file format is "stamp d1 d2 d3 ...", where stamp denotes the time stamp (to be matched)
and "d1 d2 d3.." is arbitary data
"""
file = open(filename)
data = file.read()
lines = data.replace(","," ").replace("\t"," ").split("\n")
list = [[v.strip() for v in line.split(" ") if v.strip()!=""] for line in lines if len(line)>0 and line[0]!="#"]
list = [(float(l[0]),l[1:]) for l in list if len(l)>1]
return dict(list)
path=os.getcwd() + "/rgbd_dataset_freiburg2_pioneer_slam/"
first_list = read_file_list(path + 'rgb.txt')
aux1=list(first_list.keys())
del first_list
second_list = read_file_list(path + 'depth.txt')
aux2=list(second_list.keys())
del second_list
offset=0
max_difference=0.02
potential_matches = [(abs(a - (b + offset)),a, b)
for a in aux1
for b in aux2
if abs(a - (b + offset)) < max_difference]
potential_matches.sort()
matches=[]
for diff, a, b in potential_matches:
if a in aux1 and b in aux2:
aux1.remove(a)
aux2.remove(b)
matches.append((a, b))
matches.sort()
output_file="sync.txt"
f=open(output_file,'w')
for i in range(len(matches)):
aux3=str(matches[i][0])
aux4=str(matches[i][1])
while(1):
if(len(aux3) <17):
aux3= aux3 + '0'
else:
break
while(1):
if(len(aux4) <17):
aux4= aux4 + '0'
else:
break
f.write(aux3+ " " +aux4 + "\n")
f.close()
print("Matches are written into filename " + output_file)
|
[
"renato.loureiro@tecnico.ulisboa.pt"
] |
renato.loureiro@tecnico.ulisboa.pt
|
77d02e1aa64689aee2757324174ae8f04bc5ba2b
|
8173d7fc2ed474e85c064461700562b2260166a5
|
/Face Detection and Attendence Project/Attendence Using Face Detection.py
|
4f1644b6816447f4cb914bdfbec1ab473b8275f0
|
[] |
no_license
|
TusharDimri/Face-Detection-and-Attendence
|
f94f5ea57219952a2994fee59a63d6019bf43342
|
374e096c81f454725631685a0474f00b8a17284d
|
refs/heads/master
| 2023-01-23T11:22:00.948373
| 2020-11-29T12:34:18
| 2020-11-29T12:34:18
| 316,947,376
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,155
|
py
|
import cv2
import numpy as np
import face_recognition
import os
path = "Attendence Images"
images = []
classNames = []
mylist = os.listdir(path)
# print(mylist)
for cl in mylist:
current_Image = cv2.imread(f'{path}/{cl}')
images.append(current_Image)
classNames.append(os.path.splitext(cl)[0])
# print(classNames)
# Now we will create a function that will compute the encodings for us
def find_encodings(images):
encodeList = []
for img in images:
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # Converting the Image from BGR to RGB
encode = face_recognition.face_encodings(img)[0]
encodeList.append(encode)
return encodeList
encodings_known = find_encodings(images)
# print(len(encodings1 _known))
print("Encoding Complete")
# Now we will take image from webcam to find whether they match with known images or not
cap = cv2.VideoCapture(0)
while True:
success, image = cap.read()
imageSmall = cv2.resize(image, (0,0), None, 0.25, 0.25) # (0, 0) means we have not defined out pixel size
# We will reduce the image size to one fourth its original size to speed up the process
imageSmall = cv2.cvtColor(imageSmall, cv2.COLOR_BGR2RGB)
facesLoc= face_recognition.face_locations(imageSmall)
# We find locatiosn of the faces as we may find more than one face from webcam image
encodingsFace = face_recognition.face_encodings(imageSmall, facesLoc)
for encodeFace, faceLoc in zip(encodingsFace, facesLoc):
matches = face_recognition.compare_faces(encodings_known, encodeFace)
faceDis = face_recognition.face_distance(encodings_known, encodeFace)
# print(faceDis, matches)
# compare_faces will find all the matches of our webcam image with the images we have in the list and return a list containing True and False values
# face_distance will find the distace of webcam image from known images and return a list containing those values
# Now we will find the lowest distance (matches = True ) to find whether image from webcam matches from any image known
matchIndex = np.argmin(faceDis)
if matches[matchIndex]:
name = classNames[matchIndex].capitalize()
print(name)
y1, x2, y2, x1 = faceLoc
y1, x2, y2, x1 = y1*4, x2*4, y2*4, x1*4
cv2.rectangle(image, (x1, y1), (x2, y2), (0, 255, 0), 2)
cv2.rectangle(image, (x1, y2 - 35), (x2 , y2), (0, 255, 0), cv2.FILLED)
cv2.putText(image, name, (x1+6, y2-6), cv2.FONT_HERSHEY_COMPLEX, 1, (255, 255, 255), 2)
cv2.imshow('Webcam', image)
cv2.waitKey(1)
else:
name = 'Unknown'
print(name)
y1, x2, y2, x1 = faceLoc
y1, x2, y2, x1 = y1*4, x2*4, y2*4, x1*4
cv2.rectangle(image, (x1, y1), (x2, y2), (0, 255, 0), 2)
cv2.rectangle(image, (x1, y2 - 35), (x2 , y2), (0, 255, 0), cv2.FILLED)
cv2.putText(image, name, (x1+6, y2-6), cv2.FONT_HERSHEY_COMPLEX, 1, (255, 255, 255), 2)
cv2.imshow('Webcam', image)
cv2.waitKey(1)
|
[
"tushar.dimri22@gmail.com"
] |
tushar.dimri22@gmail.com
|
49e1a325afdc513a993dda6191c506211225dc7e
|
1921cd057e809d4f847f75ea5a8394518cfc3a19
|
/venv/bin/pip3.6
|
8adfc1010e0c2ad1c849c2463cfbecdf4895bf86
|
[] |
no_license
|
AshwinChandlapur/Learn_Flask
|
7820da89c0f101d97405ddac0c02bd02000d8a40
|
2294c9a197b5ab7e37254f6c3e8c475e45367b0c
|
refs/heads/master
| 2020-04-08T20:56:30.119416
| 2018-11-29T21:48:50
| 2018-11-29T21:48:50
| 159,722,013
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 414
|
6
|
#!/Users/ashwinchandlapur/Desktop/Learn_Flask/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip3.6'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip3.6')()
)
|
[
"ashwinchandlapur@gmail.com"
] |
ashwinchandlapur@gmail.com
|
fb9b679a11eb9c744907db626a9f6f8e52a5756a
|
b9db91bdb30ba99aad8bbea251e5e1e8c2a7fa45
|
/opt/src/aoj/itp1/7_b.py
|
839084e701a5b92b98f95369fb7f3d92fbcc2450
|
[] |
no_license
|
jacoloves/python_tool
|
682c3a91b535f15f1f8c9299e9b4c9ccbd5eea79
|
93ba5de17a727d6ccf9c67e4bca37ea502d06e5d
|
refs/heads/master
| 2021-03-01T04:25:49.581952
| 2021-01-27T13:52:50
| 2021-01-27T13:52:50
| 245,753,773
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 435
|
py
|
arr = []
while True:
n, x = map(int, input().split())
if n == 0 and x == 0:
break
arr.append([n, x])
for i in range(len(arr)):
n = arr[i][0]
x = arr[i][1]
num = 0
for j in range(1, n-1):
x2 = x-j
for k in range(j+1, n):
x3 = x2-k
for l in range(k+1, n+1):
x4 = x3-l
if x4 == 0:
num += 1
print(num)
|
[
"5511068t@gmail.com"
] |
5511068t@gmail.com
|
4f1873b7edecc8b3be6649316dcba834b743f50e
|
de7127deabd34e17473fb94f48e033f482535ca7
|
/virt/bin/markdown2
|
2f0becd3611f94bc2b1edf4b5c86a622fa7aa217
|
[
"MIT"
] |
permissive
|
annstella/One_Min_Pitch
|
a50d855423ad02fb46e8b6765c16cbf9d7a6e6ff
|
86cd2426061df502adaffbf544589d54653df00c
|
refs/heads/master
| 2020-03-28T05:54:11.687201
| 2018-09-17T08:00:08
| 2018-09-17T08:00:08
| 147,802,293
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 523
|
#!/home/annstella/Documents/core/One_Min_Pitch/virt/bin/python3.6
import sys
from os.path import join, dirname, exists
# Use the local markdown2.py if we are in the source tree.
source_tree_markdown2 = join(dirname(__file__), "..", "lib", "markdown2.py")
if exists(source_tree_markdown2):
sys.path.insert(0, dirname(source_tree_markdown2))
try:
from markdown2 import main
finally:
del sys.path[0]
else:
from markdown2 import main
if __name__ == "__main__":
sys.exit( main(sys.argv) )
|
[
"annstellawangui@gmail.com"
] |
annstellawangui@gmail.com
|
|
d1c08dba97980d8459ea9694cb5106fb768f6b2c
|
d3c2307c4aa6d7af1902b073a19c9fa245ccf386
|
/Part2/reformat.py
|
a48f97b05a4ebbda61287a9bbe2802e156edf13a
|
[] |
no_license
|
cadams22/Data-Mining-WEKA-Project
|
366d4a4f59e23ffb4a606ab86f96e8cdbd5bcdb8
|
773f67117eb3bd5a590abf5be2f463518bf41350
|
refs/heads/master
| 2021-01-02T09:13:54.201228
| 2013-12-23T05:44:25
| 2013-12-23T05:44:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 387
|
py
|
import csv
def format():
f = open('answers.csv', 'r')
fc = csv.reader(f)
w = open('maddie.csv', 'w')
wc = csv.writer(w)
wc.writerow(['id ', ' friends'])
count = 5001
for line in fc:
for st in line:
for c in range(0, len(st)-1):
if st[c] == ':':
print (st[c+1])
wc.writerow([count, str(st[c+1])])
count = count + 1
break
print(count)
format()
|
[
"courtney@Courtney-Adams-Lenovo-IdeaPad-P400-Touch.(none)"
] |
courtney@Courtney-Adams-Lenovo-IdeaPad-P400-Touch.(none)
|
cdd51f82c64b8411ee0fda99daf82f9125ad9d10
|
0673b2d9d1af1e55fd16064bc08989b58a07df5b
|
/examples/webclassifier.py
|
2eb10e1ccf17e67862944388594032c068fa1980
|
[] |
no_license
|
daniele-sartiano/nlptf
|
ba5faa3334c1cb89f960ba9fb270a4cf4422e55b
|
7accabe171d43458ca5555d22c188a6ac59f0a04
|
refs/heads/master
| 2021-01-15T15:25:30.755238
| 2016-06-17T16:53:25
| 2016-06-17T16:53:25
| 48,031,442
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,782
|
py
|
#!/usr/bin/env python
import sys
import os
sys.path.append('../nlptf')
import argparse
from nlptf.reader import Word2VecReader, WebContentReader
from nlptf.models.estimators import WordEmbeddingsEstimator, ConvWordEmbeddingsEstimator, RNNWordEmbeddingsEstimator, MultiRNNWordEmbeddingsEstimator, WordEmbeddingsEstimatorNC
from nlptf.extractors import FieldExtractor, CapitalExtractor
from nlptf.classifier.classifier import WordEmbeddingsClassifier
import tensorflow as tf
from sklearn.metrics import f1_score
ESTIMATORS = {
'conv': WordEmbeddingsEstimatorNC,
}
OPTIMIZERS = {
'adam': tf.train.AdamOptimizer,
'grad': tf.train.GradientDescentOptimizer
}
def main():
parser = argparse.ArgumentParser(description='Named Entity Recognition with TensorFlow')
subparsers = parser.add_subparsers()
parser_train = subparsers.add_parser('train')
parser_train.set_defaults(which='train')
parser_train.add_argument('-e', '--epochs', help='epochs number', type=int, required=True)
parser_train.add_argument('-l', '--learning-rate', help='learning rate', type=float, required=True)
parser_train.add_argument('-o', '--optimizer', help='optimizer', type=str, required=True, choices=OPTIMIZERS.keys())
parser_tag = subparsers.add_parser('tag')
parser_tag.set_defaults(which='tag')
parser_collect_data = subparsers.add_parser('collect')
parser_collect_data.set_defaults(which='collect')
parser_collect_data.add_argument('-d', '--directory', help='directory', type=str, required=True)
parser_collect_data.add_argument('-i', '--input-file', help='input file', type=str, required=False)
parser_score = subparsers.add_parser('score')
parser_score.set_defaults(which='score')
parser_score.add_argument('-p', '--predicted', help='predicted file', type=str, required=True)
parser_score.add_argument('-g', '--gold', help='gold file', type=str, required=True)
# common arguments
for p in (parser_train, parser_tag):
p.add_argument('-m', '--model', help='model-file', type=str, required=True)
p.add_argument('-r', '--reader-file', help='reader file', type=str, required=True)
p.add_argument('-w', '--word-embeddings', help='word embeddings', type=str, required=False)
p.add_argument('-et', '--word-embeddings-type', help='word embeddings type', type=str, required=False)
p.add_argument('-i', '--input-file', help='input file', type=str, required=False)
p.add_argument('-t', '--type', help='estimator type', type=str, required=True, choices=ESTIMATORS.keys())
p.add_argument('-nl', '--num-layers', help='number layers for multi rnn estimator', type=int, required=False)
p.add_argument('-f', '--feats-conf', help='add the feats in the conf number', type=int, required=False)
args = parser.parse_args()
try:
infile = open(args.input_file) if args.input_file is not None else sys.stdin
except:
pass
if args.which == 'collect':
with infile as f:
for line in f:
domain, agro, categories = line.strip().split('\t')
# TODO: skipping multi-categories
if ',' in categories:
continue
cfile = os.path.join(args.directory, domain[0], domain[1], domain[2], domain, 'content.txt')
try:
content = open(cfile).read()
except:
print >> sys.stderr, '%s not found in %s' % (domain, cfile)
continue
words = ' '.join([word.strip() for word in content.split()])
if words:
print '%s\t%s\t%s' % (domain, categories, words)
elif args.which == 'train':
reader = WebContentReader(infile, separator='\t')
params = {
'epochs': args.epochs,
'learning_rate': args.learning_rate,
'name_model': args.model,
'word_embeddings_file': args.word_embeddings,
'reader_file': args.reader_file,
'optimizer': OPTIMIZERS[args.optimizer],
'num_layers': args.num_layers
}
classifier = WordEmbeddingsClassifier(reader, [], ESTIMATORS[args.type], **params)
classifier.train()
elif args.which == 'tag':
lines = sys.stdin.readlines()
reader = reader = WebContentReader(lines, separator='\t')
extractors = []
params = {
'name_model': args.model,
'word_embeddings_file': args.word_embeddings,
'reader_file': args.reader_file,
'num_layers': args.num_layers
}
classifier = WordEmbeddingsClassifier(reader, extractors, ESTIMATORS[args.type], **params)
predicted = classifier.predict()
print >> sys.stderr, 'l predicted', len(predicted), 'l lines', len(lines)
labels_idx_rev = {v:k for k,v in reader.vocabulary[reader.getPosition('LABEL')].items()}
i = 0
for line in lines:
line = line.strip()
if line:
print '%s\t%s' % (line.split()[0], labels_idx_rev[predicted[i]])
i += 1
else:
print
elif args.which == 'score':
gold_dict = {}
for line in open(args.gold):
domain, label = line.strip().split('\t')[:2]
gold_dict[domain] = label
y_true = []
y_pred = []
for line in open(args.predicted):
domain, label = line.strip().split('\t')[:2]
y_pred.append(int(label))
y_true.append(int(gold_dict[domain]))
print f1_score(y_true, y_pred, average='macro')
if __name__ == '__main__':
main()
|
[
"daniele.sartiano@gmail.com"
] |
daniele.sartiano@gmail.com
|
0fb8aecc1e085928517bf48eb9143d99c4f68e6e
|
77e325cbf2530a4c8d579f68d2e3faad5bbaf1b6
|
/CFModel.py
|
e5d6686e21773066596b5653cbb3ffaef12ccd26
|
[
"MIT"
] |
permissive
|
v-mostafapour/fb15k-akbc
|
4a37b0f8e85c7431d8dd3af7de46dfe06fa64f04
|
c38588617f0ba9629960ea919f35852f0d2e4e3d
|
refs/heads/master
| 2021-01-02T23:00:22.895166
| 2016-08-30T07:09:08
| 2016-08-30T07:09:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,022
|
py
|
# CFModel.py
#
# A simple implementation of matrix factorization for collaborative filtering
# expressed as a Keras Sequential model. This code is based on the approach
# outlined in [Alkahest](http://www.fenris.org/)'s blog post
# [Collaborative Filtering in Keras](http://www.fenris.org/2016/03/07/collaborative-filtering-in-keras).
#
# License
# - MIT.
import numpy as np
from keras.layers import Embedding, Reshape, Merge
from keras.models import Sequential
class CFModel(Sequential):
def __init__(self, n_users, m_items, k_factors, **kwargs):
P = Sequential()
P.add(Embedding(n_users, k_factors, input_length=1))
P.add(Reshape((k_factors,)))
Q = Sequential()
Q.add(Embedding(m_items, k_factors, input_length=1))
Q.add(Reshape((k_factors,)))
super(CFModel, self).__init__(**kwargs)
self.add(Merge([P, Q], mode='dot', dot_axes=1))
def rate(self, user_id, item_id):
return self.predict([np.array([user_id]), np.array([item_id])])[0][0]
|
[
"bradley.p.allen@gmail.com"
] |
bradley.p.allen@gmail.com
|
768c450ca224720b22ca1f7dd6fe8defdb3d29a9
|
a8d08c8fc600ac98e6e3007631d77fe47e68c5c7
|
/updateTaxID2.py
|
44d018842ce34df7adf5db97ef5b2cb29ef2d4c2
|
[] |
no_license
|
jsh58/metagen
|
55ce824907892d760ab14a6a2fe01c4468312c43
|
85f0184053b18017c212486b78a2d6af8c812f72
|
refs/heads/master
| 2018-10-06T15:50:43.071624
| 2018-08-20T14:03:43
| 2018-08-20T14:03:43
| 120,953,077
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,568
|
py
|
#!/usr/bin/python
# JMG 12/2017
# Update merged and deleted taxonomic IDs.
import sys
import gzip
def openRead(filename):
'''
Open filename for reading. '-' indicates stdin.
'.gz' suffix indicates gzip compression.
'''
if filename == '-':
return sys.stdin
try:
if filename[-3:] == '.gz':
f = gzip.open(filename, 'rb')
else:
f = open(filename, 'rU')
except IOError:
sys.stderr.write('Error! Cannot open %s for reading\n' % filename)
sys.exit(-1)
return f
def openWrite(filename):
'''
Open filename for writing. '-' indicates stdout.
'.gz' suffix indicates gzip compression.
'''
if filename == '-':
return sys.stdout
try:
if filename[-3:] == '.gz':
f = gzip.open(filename, 'wb')
else:
f = open(filename, 'w')
except IOError:
sys.stderr.write('Error! Cannot open %s for writing\n' % filename)
sys.exit(-1)
return f
def main():
args = sys.argv[1:]
if len(args) < 4:
sys.stderr.write('Usage: python updateTaxID2.py <mergedIDs> ' \
+ '<deletedIDs> <out> [<in>]+\n')
sys.exit(-1)
# load merged taxIDs to dict
d = dict()
f = openRead(args[0])
for line in f:
spl = line.rstrip().split('|')
if len(spl) < 2:
sys.stderr.write('Error! Poorly formatted record in merged file\n')
sys.exit(-1)
d[spl[0].strip()] = spl[1].strip()
if f != sys.stdin:
f.close()
# load deleted taxIDs to dict
f2 = openRead(args[1])
for line in f2:
spl = line.rstrip().split('|')
d[spl[0].strip()] = '0' # assign deleted to tax ID '0'
if f != sys.stdin:
f2.close()
# open output file
merge = printed = 0
fOut = openWrite(args[2])
# parse input files, write output on the fly
for arg in args[3:]:
fIn = openRead(arg)
# parse header
accIdx = taxIdx = -1
spl = fIn.readline().rstrip().split('\t')
try:
accIdx = spl.index('accession.version')
taxIdx = spl.index('taxid')
except ValueError:
sys.stderr.write('Error! Cannot find header value '
+ '(\'accession.version\' or \'taxid\')')
sys.exit(-1)
# parse input file, produce output
for line in fIn:
spl = line.rstrip().split('\t')
if spl[taxIdx] in d:
spl[taxIdx] = d[spl[taxIdx]]
merge += 1
fOut.write(spl[accIdx] + '\t' + spl[taxIdx] + '\n')
printed += 1
if fIn != sys.stdin:
fIn.close()
fOut.close()
sys.stderr.write('Records written: %d\n' % printed)
sys.stderr.write(' Updated: %d\n' % merge)
if __name__ == '__main__':
main()
|
[
"jsh58@wildcats.unh.edu"
] |
jsh58@wildcats.unh.edu
|
d18985bf92c950ffcf456b5ef4e4d773d7f1208e
|
36dbd31536a4084db83d12b2bd12a9f22f4da636
|
/geomdl/elements.py
|
952f45421aee3bd3bc94e18a97dc66b2d65fa7a8
|
[
"Python-2.0",
"MIT"
] |
permissive
|
Hgser/NURBS-Python
|
75d38a21721d9afd3d5f8491bf8ba56d71a2285a
|
ced4debdf4fc13afce9b830a2962da2789e5c45b
|
refs/heads/master
| 2020-04-27T00:42:17.632484
| 2019-02-28T05:21:20
| 2019-02-28T05:21:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 13,863
|
py
|
"""
.. module:: elements
:platform: Unix, Windows
:synopsis: Provides classes representing geometry and topology entities
.. moduleauthor:: Onur Rauf Bingol <orbingol@gmail.com>
"""
import abc
import copy
import six
from ._utilities import export
@six.add_metaclass(abc.ABCMeta)
class AbstractEntity(object):
""" Abstract base class for all geometric entities. """
def __init__(self, *args, **kwargs):
self._id = int(kwargs.get('id', 0)) # element identifier
self._data = [] # data storage array
def __copy__(self):
cls = self.__class__
result = cls.__new__(cls)
result.__dict__.update(self.__dict__)
return result
def __deepcopy__(self, memo):
# Don't copy self reference
cls = self.__class__
result = cls.__new__(cls)
memo[id(self)] = result
# Copy all other attributes
for k, v in self.__dict__.items():
setattr(result, k, copy.deepcopy(v, memo))
return result
def __len__(self):
return len(self._data)
def __getitem__(self, key):
return self._data[key]
def __iter__(self):
return iter(self._data)
def __reversed__(self):
return reversed(self._data)
@property
def id(self):
""" Identifier for the geometric entity.
It must be an integer number, otherwise the setter will raise a *ValueError*.
:getter: Gets the identifier
:setter: Sets the identifier
:type: int
"""
return self._id
@id.setter
def id(self, value):
if not isinstance(value, int):
raise ValueError("Identifier value must be an integer")
self._id = value
@export
class Vertex(AbstractEntity):
""" 3-dimensional Vertex entity with spatial and parametric position. """
def __init__(self, *args, **kwargs):
super(Vertex, self).__init__(*args, **kwargs)
self.data = [float(arg) for arg in args] if args else [0.0, 0.0, 0.0] # spatial coordinates
self._uv = [0.0, 0.0] # parametric coordinates
self._inside = False # flag for trimming
def __str__(self):
return "Vertex " + str(self._id) + " " + str(self._data)
__repr__ = __str__
def __cmp__(self, other):
return (self.id > other.id) - (self.id < other.id)
def __eq__(self, other):
return self.id == other.id
def __ne__(self, other):
return self.id != other.id
def __lt__(self, other):
return self.id < other.id
def __le__(self, other):
return self.id <= other.id
def __gt__(self, other):
return self.id > other.id
def __ge__(self, other):
return self.id >= other.id
def __nonzero__(self):
# For Python 2 compatibility
return self.__bool__()
def __bool__(self):
# For Python 3 compatibility
return self.inside
def __add__(self, other):
if not isinstance(other, self.__class__):
raise TypeError("Can only add Vertex objects")
res_data = [0.0 for _ in range(3)]
for idx in range(3):
res_data[idx] = self.data[idx] + other.data[idx]
res_uv = [0.0 for _ in range(2)]
for idx in range(2):
res_uv[idx] = self.uv[idx] + other.uv[idx]
res_val = self.__class__()
res_val.data = res_data
res_val.uv = res_uv
return res_val
def __sub__(self, other):
if not isinstance(other, self.__class__):
raise TypeError("Can only subtract Vertex objects")
res_data = [0.0 for _ in range(3)]
for idx in range(3):
res_data[idx] = self.data[idx] - other.data[idx]
res_uv = [0.0 for _ in range(2)]
for idx in range(2):
res_uv[idx] = self.uv[idx] - other.uv[idx]
res_val = self.__class__()
res_val.data = res_data
res_val.uv = res_uv
return res_val
def __div__(self, other):
return self.__truediv__(other)
def __truediv__(self, other):
if not isinstance(other, (float, int)):
raise TypeError("Can only divide by a float or an integer")
res_data = [0.0 for _ in range(3)]
for idx in range(3):
res_data[idx] = self.data[idx] / float(other)
res_uv = [0.0 for _ in range(2)]
for idx in range(2):
res_uv[idx] = self.uv[idx] / float(other)
res_val = self.__class__()
res_val.data = res_data
res_val.uv = res_uv
return res_val
@property
def x(self):
""" x-component of the vertex
:getter: Gets the x-component of the vertex
:setter: Sets the x-component of the vertex
:type: float
"""
return self._data[0]
@x.setter
def x(self, value):
self._data[0] = float(value)
@property
def y(self):
""" y-component of the vertex
:getter: Gets the y-component of the vertex
:setter: Sets the y-component of the vertex
:type: float
"""
return self._data[1]
@y.setter
def y(self, value):
self._data[1] = float(value)
@property
def z(self):
""" z-component of the vertex
:getter: Gets the z-component of the vertex
:setter: Sets the z-component of the vertex
:type: float
"""
return self._data[2]
@z.setter
def z(self, value):
self._data[2] = float(value)
@property
def u(self):
""" Parametric u-component of the vertex
:getter: Gets the u-component of the vertex
:setter: Sets the u-component of the vertex
:type: float
"""
return self._uv[0]
@u.setter
def u(self, value):
self._uv[0] = float(value)
@property
def v(self):
""" Parametric v-component of the vertex
:getter: Gets the v-component of the vertex
:setter: Sets the v-component of the vertex
:type: float
"""
return self._uv[1]
@v.setter
def v(self, value):
self._uv[1] = float(value)
@property
def uv(self):
""" Parametric (u,v) pair of the vertex
:getter: Gets the uv-component of the vertex
:setter: Sets the uv-component of the vertex
:type: list, tuple
"""
return tuple(self._uv)
@uv.setter
def uv(self, value):
if not isinstance(value, (list, tuple)):
raise TypeError("UV data input must be a list or tuple")
if len(value) != 2:
raise ValueError("UV must have 2 components")
self._uv = list(value)
@property
def inside(self):
""" Inside-outside flag
:getter: Gets the flag
:setter: Sets the flag
:type: bool
"""
return self._inside
@inside.setter
def inside(self, value):
self._inside = bool(value)
@property
def data(self):
""" (x,y,z) components of the vertex.
:getter: Gets the 3-dimensional components
:setter: Sets the 3-dimensional components
"""
return tuple(self._data)
@data.setter
def data(self, value):
if not isinstance(value, (list, tuple)):
raise TypeError("Vertex data must be a list or tuple")
if len(value) != 3:
raise ValueError("Vertex can only store 3 components")
# Convert to float
self._data = [float(val) for val in value]
@export
class Triangle(AbstractEntity):
""" Triangle entity which represents a triangle composed of vertices. """
def __init__(self, *args, **kwargs):
super(Triangle, self).__init__(*args, **kwargs)
self._inside = False # flag for trimming
if args:
self.add_vertex(*args)
def __str__(self):
return "Triangle " + str(self._id)
__repr__ = __str__
@property
def vertices(self):
""" Vertices of the triangle
:getter: Gets the list of vertices
:type: tuple
"""
return tuple(self._data)
@property
def vertices_raw(self):
""" Vertices which generates a closed triangle
Adds the first vertex as a last element of the return value (good for plotting)
:getter: Gets the list of vertices
:type: list
"""
v_raw = []
for v in self._data:
v_raw.append(v.data)
# Add the first vertex data as a last element (for plotting modules)
if len(self._data) > 0:
v_raw.append(self._data[0].data)
return v_raw
@property
def vertices_uv(self):
""" Parametric coordinates of the triangle vertices
:getter: Gets the parametric coordinates of the vertices
:type: list
"""
data = self.vertices
res = [data[idx].uv for idx in range(3)]
return res
@property
def edges(self):
""" Edges of the triangle
:getter: Gets the list of vertices that generates the edges of the triangle
:type: list
"""
data = self.vertices_raw
res = [[] for _ in range(3)]
for idx in range(3):
res[idx] = [data[idx], data[idx + 1]]
return res
@property
def vertex_ids(self):
""" Vertex indices
Vertex numbering starts from 1.
:getter: Gets the vertex indices
:type: list
"""
v_idx = []
for v in self._data:
v_idx.append(v.id)
return v_idx
@property
def vertex_ids_zero(self):
""" Zero-indexed vertex indices
Vertex numbering starts from 0.
:getter: Gets the vertex indices
:type: list
"""
v_idx = []
for v in self._data:
v_idx.append(v.id - 1)
return v_idx
@property
def inside(self):
""" Inside-outside flag
:getter: Gets the flag
:setter: Sets the flag
:type: bool
"""
return self._inside
@inside.setter
def inside(self, value):
self._inside = bool(value)
def add_vertex(self, *args):
""" Adds vertices to the Triangle object.
This method takes a single or a list of vertices as its function arguments.
"""
if len(self._data) > 2:
raise ValueError("Cannot add more vertices")
res = []
for arg in args:
if isinstance(arg, Vertex):
res.append(arg)
else:
raise TypeError("Input must be a Vertex object")
self._data = res
@export
class Quad(AbstractEntity):
""" Quad entity which represents a quadrilateral structure composed of vertices. """
def __init__(self, *args, **kwargs):
super(Quad, self).__init__(*args, **kwargs)
if args:
self.data = args
def __str__(self):
return "Quad " + str(self._id) + " V: " + str(self._data)
__repr__ = __str__
@property
def data(self):
""" Vertex indices.
:getter: Gets the vertex indices
:setter: Sets the vertex indices
"""
return tuple(self._data)
@data.setter
def data(self, value):
if not isinstance(value, (list, tuple)):
raise TypeError("Input data must be a list or tuple")
if len(value) != 4:
raise ValueError("Quad can only have 4 vertices")
# Convert to int
self._data = [int(val) for val in value]
def add_vertex(self, *args):
""" Adds vertices to the Quad object.
This method takes a single or a list of vertices as its function arguments.
"""
if len(self._data) > 3:
raise ValueError("Cannot add more vertices")
res = []
for arg in args:
if isinstance(arg, Vertex):
res.append(arg.id)
else:
raise TypeError("Input must be a Vertex object")
self._data = res
@export
class Face(AbstractEntity):
""" Representation of Face entity which is composed of triangles or quads. """
def __init__(self, *args, **kwargs):
super(Face, self).__init__(*args, **kwargs)
if args:
self.add_triangle(*args)
def __str__(self):
return "Face " + str(self._id)
__repr__ = __str__
@property
def triangles(self):
""" Triangles of the face
:getter: Gets the list of triangles
:type: tuple
"""
return tuple(self._data)
def add_triangle(self, *args):
""" Adds triangles to the Face object.
This method takes a single or a list of triangles as its function arguments.
"""
res = []
for arg in args:
if isinstance(arg, Triangle):
res.append(arg)
else:
raise TypeError("Input must be a Triangle object")
self._data = res
@export
class Body(AbstractEntity):
""" Representation of Body entity which is composed of faces. """
def __init__(self, *args, **kwargs):
super(Body, self).__init__(*args, **kwargs)
if args:
self.add_face(*args)
def __str__(self):
return "Body " + str(self._id)
__repr__ = __str__
@property
def faces(self):
""" Faces of the body
:getter: Gets the list of faces
:type: tuple
"""
return tuple(self._data)
def add_face(self, *args):
""" Adds faces to the Body object.
This method takes a single or a list of faces as its function arguments.
"""
res = []
for arg in args:
if isinstance(arg, Face):
res.append(arg)
else:
raise TypeError("Input must be a Face object")
self._data = res
|
[
"orbingol@gmail.com"
] |
orbingol@gmail.com
|
2e0165ab267c593ebe689367b9fa417db54b4727
|
a9d9b934acd8f7bd26f35f0d764fd7e7ee33ebe4
|
/stock prices.py
|
8c98a488ebfeb1956528da84838f13909daf518e
|
[] |
no_license
|
jmkhat/Pandas
|
bf5ac10a91785656e5eba05ce9ddb4ac5fd2d547
|
a61b46d1a18073c06506aa2ff331f95cedb12e6f
|
refs/heads/master
| 2021-04-28T19:52:00.726226
| 2018-02-20T02:36:19
| 2018-02-20T02:36:19
| 121,909,185
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 445
|
py
|
# -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
print("hello world")
import pandas as pd
import datetime
import pandas_datareader.data as web
import matplotlib.pyplot as plt
from matplotlib import style
style.use('ggplot')
start = datetime.datetime(2014,1,1)
end = datetime.datetime(2017,1,1)
df=web.DataReader("XOM",'google',start,end)
print(df.head())
df['Adj Close'].plot()
plt.show()
|
[
"noreply@github.com"
] |
jmkhat.noreply@github.com
|
bacd5c10967e22cb2e03eb54ce3045346fa32f5e
|
fba45f3289a6de51eb7a9bfbee90d566181963b5
|
/pagemat/lib/python3.6/site-packages/paypal/standard/pdt/admin.py
|
d68d7ccb506406c13ca5c7216b0f32afb93123cd
|
[
"MIT"
] |
permissive
|
bharatpurohit97/PageMatrix
|
abb580787aecf656e5ff27f0c9d75e89f16e905d
|
66ab9b1dd365a34f86dba110fe97c32cb7137bf2
|
refs/heads/master
| 2022-12-12T01:50:47.230219
| 2018-12-19T09:20:05
| 2018-12-19T09:20:05
| 162,409,793
| 1
| 0
|
MIT
| 2022-12-08T02:28:13
| 2018-12-19T08:54:22
|
Python
|
UTF-8
|
Python
| false
| false
| 3,710
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib import admin
from paypal.standard.pdt.models import PayPalPDT
# ToDo: How similiar is this to PayPalIPNAdmin? Could we just inherit off one common admin model?
class PayPalPDTAdmin(admin.ModelAdmin):
date_hierarchy = 'payment_date'
fieldsets = (
(None, {
"fields":
['flag',
'txn_id',
'txn_type',
'payment_status',
'payment_date',
'transaction_entity',
'reason_code',
'pending_reason',
'mc_gross',
'mc_fee',
'auth_status',
'auth_amount',
'auth_exp',
'auth_id',
],
}),
("Address", {
"description": "The address of the Buyer.",
'classes': ('collapse',),
"fields":
['address_city',
'address_country',
'address_country_code',
'address_name',
'address_state',
'address_status',
'address_street',
'address_zip',
],
}),
("Buyer", {
"description": "The information about the Buyer.",
'classes': ('collapse',),
"fields":
['first_name',
'last_name',
'payer_business_name',
'payer_email',
'payer_id',
'payer_status',
'contact_phone',
'residence_country'
],
}),
("Seller", {
"description": "The information about the Seller.",
'classes': ('collapse',),
"fields":
['business',
'item_name',
'item_number',
'quantity',
'receiver_email',
'receiver_id',
'custom',
'invoice',
'memo',
],
}),
("Subscriber", {
"description": "The information about the Subscription.",
'classes': ('collapse',),
"fields":
['subscr_id',
'subscr_date',
'subscr_effective',
],
}),
("Recurring", {
"description": "Information about recurring Payments.",
"classes": ("collapse",),
"fields":
['profile_status',
'initial_payment_amount',
'amount_per_cycle',
'outstanding_balance',
'period_type',
'product_name',
'product_type',
'recurring_payment_id',
'receipt_id',
'next_payment_date',
],
}),
("Admin", {
"description": "Additional Info.",
"classes": ('collapse',),
"fields":
['test_ipn',
'ipaddress',
'query',
'flag_code',
'flag_info',
],
}),
)
list_display = ["__unicode__",
"flag",
"invoice",
"custom",
"payment_status",
"created_at",
]
search_fields = ["txn_id",
"recurring_payment_id",
]
admin.site.register(PayPalPDT, PayPalPDTAdmin)
|
[
"006.rajpurohit@gmail.com"
] |
006.rajpurohit@gmail.com
|
93e41cf770f4a8b381887fe281d932200adcaed5
|
ed9ad4660757cc842461aea831f63c397ed48385
|
/mapper2.py
|
8aec053ea8baec88f5dc040e3bf0826512a65724
|
[] |
no_license
|
Dhonveli/data-creent
|
a66d03259416bac7377b9a22f1074f55105da9d1
|
00d3eac0b363500e32f3961c8da5db6d07b25617
|
refs/heads/master
| 2020-04-04T02:49:31.308885
| 2018-11-20T16:53:21
| 2018-11-20T16:53:21
| 155,697,561
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,119
|
py
|
#!/usr/bin/env python
import os
import sys
import argparse
import shutil
import csv
# Parses the command line arguments of this program
def parse_args():
# Creates an object to parse the command line parameters
parser = argparse.ArgumentParser(description="This program maps genes name to nessra code",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
# Positional parameters
# Optional parameters
parser.add_argument("-lgn", "--LocalGeneNetwork", nargs=1,
help="Folder containing LGN files", type=str)
parser.add_argument("-sgn", "--SingleGeneExpansionList", nargs=1,
help="csv file containing gene list downloaded from OpenTarget", type=str)
parser.add_argument("-a", "--annotation", help="annotation file from LBM1819",
type=str, default="hgnc_cc_zero_filtered_anno.csv")
parser.add_argument("-r", "--csvboinc", help="csv file from https://gene.disi.unitn.it/test/gene_h.php",
type=str, default="genehome PC-IM history.csv")
parser.add_argument(
"-n", "--numbgenes", help="number of transcripts to run on NESSRA", type=int, default=50)
# Parses and returns the object containing the params
return parser.parse_args()
def load_files(args):
global map_gene_to_anno
global map_code_to_anno
global map_code_to_args
with open(args.annotation) as file:
# Discard the first line
file.readline()
# Legge il csv (il modulo csv è necessario perché i dati
# sono incapsulati nelle double quotes e sarebbe difficile leggerli altrimenti)
for line in csv.reader(file, quotechar='"', delimiter=',',
quoting=csv.QUOTE_ALL, skipinitialspace=True):
# Estrae il nome del gene e il codice dell'isoforma
name = line[10]
code = line[0]
# Controlla dentro la mappa con il gene non ci sia ancora,
# in tal caso genera una lista associata
if name not in map_gene_to_anno:
map_gene_to_anno[name] = []
# Aggiunge questa riga nella mappa
map_gene_to_anno[name].append(line)
# Controlla se non c'è il codice trovato
# in tal caso genera una lista associata
if code not in map_code_to_anno:
map_code_to_anno[code] = []
# Aggiunge questa riga nella mappa
map_code_to_anno[code].append(line)
# Carica il file dei geni già runnati su boinc
with open(args.csvboinc) as file:
# Parsa come csv
csvfile = csv.reader(file, quotechar='"', delimiter=',',
quoting=csv.QUOTE_ALL, skipinitialspace=True)
# Preleva l'header che lo usera nel file already-runned.csv
header_runned = next(csvfile)
# Scorre linea per linea
for line in csvfile:
# Prende solo i geni i file che iniziano con 'T'
# perché significa che sono single gene expansion
if line[2].startswith("T"):
# Prende il codice dell'isoforma
code = line[2].split('-')[0]
# Inserisce nella mappa
if code not in map_code_to_args:
map_code_to_args[code] = []
map_code_to_args[code].append(line)
def process_lgn(gene_list):
global map_code_to_anno
global map_gene_to_anno
global map_code_to_args
global csvsummary
global csvrunned
list_gene = [line.strip() for line in open(gene_list) if line.strip()]
with open(output_folder + '/hs-' + os.path.basename(gene_list) + '.txt', 'w') as filelgn:
filelgn.write('from,to')
# apre il file summary lgn
# itera sui gene
for gene in list_gene:
# guarda se sono annotati
if gene not in map_gene_to_anno:
csvsummary.writerow(
[None, gene, "lgn", "not annotated",""])
else:
for isoform in map_gene_to_anno[gene]:
code = isoform[0]
# scrive questo codice nella lgn
filelgn.write('\n' + code + ',' + code)
# Decide il nome dei file onegene
filename = code + '-' + gene + '.txt'
# controlla se è già stato runnato
if code in map_code_to_args:
for row in map_code_to_args[code]:
csvrunned.writerow(row)
csvsummary.writerow(
[code, gene, "lgn", "already run",""])
# Altrimenti scrive il file da mandare a walter
else:
csvsummary.writerow(
[code, gene, "lgn", "to be run","HIGH"])
with open(output_folder + '/' + filename, 'w') as file:
file.write('from,to\n' +
code + ',' + code)
def process_sgn(gene_list):
global map_code_to_anno
global map_gene_to_anno
global map_code_to_args
global csvsummary
global csvrunned
global numbgenes
gene_list.readline()
gene_rank = {}
for line in gene_list:
tmp = line.split(",")
gene_rank[tmp[0]] = (float(tmp[3]) + float(tmp[4])) / 2
sorted_gene = sorted(gene_rank, key=gene_rank.get, reverse=True)
for gene in sorted_gene:
if numbgenes <= 0:
break
else:
if gene not in map_gene_to_anno:
csvsummary.writerow(
[None, gene, str(gene_rank[gene]), "not annotated",""])
else:
for isoform in map_gene_to_anno[gene]:
code = isoform[0]
# Decide il nome dei file onegene
filename = code + '-' + gene + '.txt'
# controlla se è già stato runnato
if code in map_code_to_args:
for row in map_code_to_args[code]:
csvrunned.writerow(row)
csvsummary.writerow(
[code, gene, str(gene_rank[gene]), "already run",""])
# Altrimenti scrive il file da mandare a walter
else:
numbgenes -= 1
with open(output_folder + '/' + filename, 'w') as file:
file.write('from,to\n' +
code + ',' + code)
if gene_rank[gene] > 0.6:
csvsummary.writerow(
[code, gene, str(gene_rank[gene]), "to be run","HIGH"])
else:
csvsummary.writerow(
[code, gene, str(gene_rank[gene]), "to be run","LOW"])
# Mappa geni to code
map_gene_to_anno = {}
map_code_to_anno = {}
map_code_to_args = {}
if __name__ == "__main__":
# Parses command line parameters
args = parse_args()
load_files(args)
numbgenes = args.numbgenes
input = False
if os.path.exists("output_folder"):
shutil.rmtree("output_folder")
# Normalizza la path
os.mkdir("output_folder")
# Normalizza la path
output_folder = os.path.normpath(
'output_folder') # removes redundant separator
# apre summary
with open(output_folder + '/summary.csv', "w") as summary:
# scrive header
csvsummary = csv.writer(summary, quotechar='"', delimiter=',',
quoting=csv.QUOTE_ALL)
csvsummary.writerow(["ID", "GENE", "SCORE", "STATUS","PRIORITY"])
# apre already_runned
with open(output_folder + '/already-runned.csv', 'w') as filerunned:
csvrunned = csv.writer(filerunned, quotechar='"', delimiter=',',
quoting=csv.QUOTE_ALL)
csvrunned.writerow(["ID","ORG","LGN","EXP","LastUpd","alpha","tsize","iter","nPC","nWU"])
if args.LocalGeneNetwork is not None:
input = True
args.LocalGeneNetwork = os.path.normpath(args.LocalGeneNetwork[0])
for name in os.listdir(args.LocalGeneNetwork): # estrae i vari lgn
filename = args.LocalGeneNetwork + '/' + name
if os.path.isfile(filename):
process_lgn(filename) # processo lgn
if args.SingleGeneExpansionList is not None:
input = True
args.SingleGeneExpansionList = os.path.normpath(
args.SingleGeneExpansionList[0])
with open(args.SingleGeneExpansionList, "r") as filesg:
process_sgn(filesg)
else:
if input == False:
raise Exception("No input was given!")
|
[
"giordano.alvari@gmail.com"
] |
giordano.alvari@gmail.com
|
8682fa1487d079c5604e8026a25f58cf69be0495
|
bd0b15a650d532ab95cfe945b8c59fb588d4ed7b
|
/download_pretrainings.py
|
cb8d9dd4bce32ddaebaf23656fec0a9710309e32
|
[
"MIT"
] |
permissive
|
rortegagit/isaaq-anon
|
98ec32d2a0e4a2d654a1073fa2af627716f090ae
|
954206fe84d7db31bec01dc9338af5c6433974c1
|
refs/heads/master
| 2022-11-06T23:57:10.344051
| 2020-07-03T11:37:27
| 2020-07-03T11:37:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 365
|
py
|
import zipfile
from google_drive_downloader import GoogleDriveDownloader as gdd
from tqdm import tqdm
import requests
gdd.download_file_from_google_drive(file_id='1cQEjNIb11eOL4ZPKKvXPvdx9OVL324Zp',
dest_path='./checkpoints.zip')
with zipfile.ZipFile("checkpoints.zip", 'r') as zip_ref:
zip_ref.extractall(".")
|
[
"noreply@github.com"
] |
rortegagit.noreply@github.com
|
5944c73b17f82c3bf11149917b9d99491d0d1e91
|
fe32d7054687dd3cbee99e43b32488bff262681d
|
/tests/checkers/projects/test_python.py
|
df3e48d7ae2e84ed26b25acdbb5315f67579dd4e
|
[
"Apache-2.0"
] |
permissive
|
whwkong/verse
|
106d61f4a3a6bbabab1cdd7583c909fa48717214
|
0dc25222c309c780afee5cc6d5293858e5ead08e
|
refs/heads/master
| 2021-06-14T16:31:48.729895
| 2017-04-04T19:20:39
| 2017-04-04T19:20:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,275
|
py
|
"""
Test `checkers.projects.python` file
"""
import pytest
from checkers import base
from checkers.projects import python
class TestPythonVersionChecker:
"""
Test `python.PythonVersionChecker` class
"""
@pytest.fixture
def instance(self):
return python.PythonVersionChecker()
def test_class_inheritance(self, instance):
"""Test class inheritance"""
assert isinstance(instance, base.BaseVersionChecker)
assert isinstance(instance, base.GitHubVersionChecker)
def test_class_properties(self, instance):
"""Test class properties"""
assert instance.name == 'Python'
assert instance.slug == 'python'
assert instance.homepage == 'https://www.python.org/'
assert instance.repository == 'https://github.com/python/cpython'
class TestAnsibleVersionChecker:
"""
Test `python.AnsibleVersionChecker` class
"""
@pytest.fixture
def instance(self):
return python.AnsibleVersionChecker()
def test_class_inheritance(self, instance):
"""Test class inheritance"""
assert isinstance(instance, base.BaseVersionChecker)
assert isinstance(instance, base.GitHubVersionChecker)
def test_class_properties(self, instance):
"""Test class properties"""
assert instance.name == 'Ansible'
assert instance.slug == 'ansible'
assert instance.homepage == 'https://www.ansible.com/'
assert instance.repository == 'https://github.com/ansible/ansible'
class TestCeleryVersionChecker:
"""
Test `python.CeleryVersionChecker` class
"""
@pytest.fixture
def instance(self):
return python.CeleryVersionChecker()
def test_class_inheritance(self, instance):
"""Test class inheritance"""
assert isinstance(instance, base.BaseVersionChecker)
assert isinstance(instance, base.GitHubVersionChecker)
def test_class_properties(self, instance):
"""Test class properties"""
assert instance.name == 'Celery'
assert instance.slug == 'celery'
assert instance.homepage == 'http://www.celeryproject.org/'
assert instance.repository == 'https://github.com/celery/celery'
class TestDjangoVersionChecker:
"""
Test `python.DjangoVersionChecker` class
"""
@pytest.fixture
def instance(self):
return python.DjangoVersionChecker()
def test_class_inheritance(self, instance):
"""Test class inheritance"""
assert isinstance(instance, base.BaseVersionChecker)
assert isinstance(instance, base.GitHubVersionChecker)
def test_class_properties(self, instance):
"""Test class properties"""
assert instance.name == 'Django'
assert instance.slug == 'django'
assert instance.homepage == 'https://www.djangoproject.com/'
assert instance.repository == 'https://github.com/django/django'
class TestDjangoRESTFrameworkVersionChecker:
"""
Test `python.DjangoRESTFrameworkVersionChecker` class
"""
@pytest.fixture
def instance(self):
return python.DjangoRESTFrameworkVersionChecker()
def test_class_inheritance(self, instance):
"""Test class inheritance"""
assert isinstance(instance, base.BaseVersionChecker)
assert isinstance(instance, base.GitHubVersionChecker)
def test_class_properties(self, instance):
"""Test class properties"""
assert instance.name == 'Django REST Framework'
assert instance.slug == 'django-rest-framework'
assert instance.homepage == 'http://www.django-rest-framework.org/'
assert (
instance.repository ==
'https://github.com/tomchristie/django-rest-framework'
)
class TestFlaskVersionChecker:
"""
Test `python.FlaskVersionChecker` class
"""
@pytest.fixture
def instance(self):
return python.FlaskVersionChecker()
def test_class_inheritance(self, instance):
"""Test class inheritance"""
assert isinstance(instance, base.BaseVersionChecker)
assert isinstance(instance, base.GitHubVersionChecker)
def test_class_properties(self, instance):
"""Test class properties"""
assert instance.name == 'Flask'
assert instance.slug == 'flask'
assert instance.homepage == 'http://flask.pocoo.org/'
assert instance.repository == 'https://github.com/pallets/flask'
class TestGunicornVersionChecker:
"""
Test `python.GunicornVersionChecker` class
"""
@pytest.fixture
def instance(self):
return python.GunicornVersionChecker()
def test_class_inheritance(self, instance):
"""Test class inheritance"""
assert isinstance(instance, base.BaseVersionChecker)
assert isinstance(instance, base.GitHubVersionChecker)
def test_class_properties(self, instance):
"""Test class properties"""
assert instance.name == 'Gunicorn'
assert instance.slug == 'gunicorn'
assert instance.homepage == 'http://gunicorn.org/'
assert instance.repository == 'https://github.com/benoitc/gunicorn'
class TestRequestsVersionChecker:
"""
Test `python.RequestsVersionChecker` class
"""
@pytest.fixture
def instance(self):
return python.RequestsVersionChecker()
def test_class_inheritance(self, instance):
"""Test class inheritance"""
assert isinstance(instance, base.BaseVersionChecker)
assert isinstance(instance, base.GitHubVersionChecker)
def test_class_properties(self, instance):
"""Test class properties"""
assert instance.name == 'Requests'
assert instance.slug == 'python-requests'
assert instance.homepage == 'http://docs.python-requests.org/'
assert (
instance.repository ==
'https://github.com/kennethreitz/requests'
)
def test_class_normalize_tag_name_method(self, instance):
"""Test class `_normalize_tag_name()` method"""
assert instance._normalize_tag_name('2.0') == ''
assert instance._normalize_tag_name('v2.0.0') == 'v2.0.0'
def test_class_get_versions_method(self, mocker, instance):
"""Test class `get_versions()` method"""
mocked_get_github_tags = mocker.patch.object(
instance, '_get_github_tags',
)
assert instance.get_versions() == mocked_get_github_tags.return_value
mocked_get_github_tags.assert_called_once_with(
normalize_func=instance._normalize_tag_name,
)
class TestScrapyVersionChecker:
"""
Test `python.ScrapyVersionChecker` class
"""
@pytest.fixture
def instance(self):
return python.ScrapyVersionChecker()
def test_class_inheritance(self, instance):
"""Test class inheritance"""
assert isinstance(instance, base.BaseVersionChecker)
assert isinstance(instance, base.GitHubVersionChecker)
def test_class_properties(self, instance):
"""Test class properties"""
assert instance.name == 'Scrapy'
assert instance.slug == 'scrapy'
assert instance.homepage == 'https://scrapy.org/'
assert instance.repository == 'https://github.com/scrapy/scrapy'
|
[
"pawel.ad@gmail.com"
] |
pawel.ad@gmail.com
|
02c5d9cafd5ef6b75cda0007078953c73b767feb
|
dba22ccd9f1e69cc3fc2fe3971cccadf9015c1c2
|
/prac_02/exceptions_demo.py
|
3395f62976fac2ac2be4dea3f8d2ec74229e0319
|
[] |
no_license
|
RyanHonorica1408/CP1404
|
d7cd443dcc9e1de44f1a181f496c6bd9ce20ebb5
|
d90ff1cb2f33dd9751e0957b38eb7a51706cd000
|
refs/heads/master
| 2020-03-27T19:04:35.463157
| 2018-10-31T13:22:14
| 2018-10-31T13:22:25
| 146,964,246
| 0
| 0
| null | 2018-10-15T13:54:38
| 2018-09-01T03:32:17
|
Python
|
UTF-8
|
Python
| false
| false
| 294
|
py
|
try:
numerator = int(input("Enter the numerator: "))
denominator = int(input("Enter the denominator: "))
print(fraction)
except ValueError:
print("Numerator and denominator must be valid numbers!")
except ZeroDivisionError:
print("Cannot divide by zero!")
print("Finished.")
|
[
"ryan.tiomico@my.jcu.edu.au"
] |
ryan.tiomico@my.jcu.edu.au
|
0df9b535a01d9194a2be299758e45ec123a5d24e
|
5b38eb5309dc9f360b74a6dc849b871e9c9ece47
|
/problems/arrays/tests/three_sum_test.py
|
b0a3e4cea62ebf1451de25ad73940be952131b21
|
[] |
no_license
|
Beomus/py-dsa
|
f80e060b90ceb2c3fb8c3ad61f2553fb15c45800
|
e883bcd2e72b8909a401175a8b7ee52f0b680b17
|
refs/heads/master
| 2023-06-11T15:03:58.659641
| 2021-07-01T02:37:47
| 2021-07-01T02:37:47
| 378,858,293
| 0
| 1
| null | 2021-06-22T05:34:33
| 2021-06-21T08:22:10
|
Python
|
UTF-8
|
Python
| false
| false
| 1,208
|
py
|
import unittest
from ..three_sum import threeSum
class TestThreeSum(unittest.TestCase):
def test_case_1(self):
array = [12, 3, 1, 2, -6, 5, -8, 6]
target = 0
output = [[-8, 2, 6], [-8, 3, 5], [-6, 1, 5]]
self.assertEqual(threeSum(array, target), output)
def test_case_2(self):
array = [1, 2, 3]
target = 6
output = [[1, 2, 3]]
self.assertEqual(threeSum(array, target), output)
def test_case_3(self):
array = [1, 2, 3]
target = 7
output = []
self.assertEqual(threeSum(array, target), output)
def test_case_4(self):
array = [12, 3, 1, 2, -6, 5, 0, -8, -1, 6, -5]
target = 0
output = [
[-8, 2, 6],
[-8, 3, 5],
[-6, 0, 6],
[-6, 1, 5],
[-5, -1, 6],
[-5, 0, 5],
[-5, 2, 3],
[-1, 0, 1],
]
self.assertEqual(threeSum(array, target), output)
def test_case_5(self):
array = [1, 2, 3, 4, 5, 6, 7, 8, 9, 15]
target = 5
output = []
self.assertEqual(threeSum(array, target), output)
if __name__ == "__main__":
unittest.main()
|
[
"trunghau60@gmail.com"
] |
trunghau60@gmail.com
|
98e60f84759f1dabfe64292e06d96f5801a51c88
|
ed60a26caa718cae99f97217e6664e5a23ce3d45
|
/networkaccessmanager.py
|
f4b8737e2d2bd79571b271b5ee020b61bb1201e2
|
[] |
no_license
|
leonmvd/pdokservicesplugin
|
47580e290c2ea686541c90e6c6c6a9bc9cd5d524
|
00ea86d49037e27dee7db443de932c0ca9168b81
|
refs/heads/master
| 2021-08-28T15:17:41.441931
| 2017-11-17T13:26:52
| 2017-11-17T13:26:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 15,313
|
py
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
An httplib2 replacement that uses QgsNetworkAccessManager
https://github.com/boundlessgeo/lib-qgis-commons/blob/master/qgiscommons2/network/networkaccessmanager.py
---------------------
Date : August 2016
Copyright : (C) 2016 Boundless, http://boundlessgeo.com
Email : apasotti at boundlessgeo dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
import sys
import os
sys.path.append(os.path.dirname(__file__))
from future import standard_library
standard_library.install_aliases()
#from builtins import str
#from builtins import object
__author__ = 'Alessandro Pasotti'
__date__ = 'August 2016'
import re
import urllib.request, urllib.error, urllib.parse
from PyQt4.QtCore import pyqtSlot, QUrl, QEventLoop, QTimer, QCoreApplication
from PyQt4.QtNetwork import QNetworkRequest, QNetworkReply
from qgis.core import QgsNetworkAccessManager, QgsMessageLog
# FIXME: ignored
DEFAULT_MAX_REDIRECTS = 4
class RequestsException(Exception):
pass
class RequestsExceptionTimeout(RequestsException):
pass
class RequestsExceptionConnectionError(RequestsException):
pass
class RequestsExceptionUserAbort(RequestsException):
pass
class Map(dict):
"""
Example:
m = Map({'first_name': 'Eduardo'}, last_name='Pool', age=24, sports=['Soccer'])
"""
def __init__(self, *args, **kwargs):
super(Map, self).__init__(*args, **kwargs)
for arg in args:
if isinstance(arg, dict):
for k, v in arg.items():
self[k] = v
if kwargs:
for k, v in kwargs.items():
self[k] = v
def __getattr__(self, attr):
return self.get(attr)
def __setattr__(self, key, value):
self.__setitem__(key, value)
def __setitem__(self, key, value):
super(Map, self).__setitem__(key, value)
self.__dict__.update({key: value})
def __delattr__(self, item):
self.__delitem__(item)
def __delitem__(self, key):
super(Map, self).__delitem__(key)
del self.__dict__[key]
class Response(Map):
pass
class NetworkAccessManager(object):
"""
This class mimicks httplib2 by using QgsNetworkAccessManager for all
network calls.
The return value is a tuple of (response, content), the first being and
instance of the Response class, the second being a string that contains
the response entity body.
Parameters
----------
debug : bool
verbose logging if True
exception_class : Exception
Custom exception class
Usage 1 (blocking mode)
-----
::
nam = NetworkAccessManager(authcgf)
try:
(response, content) = nam.request('http://www.example.com')
except RequestsException, e:
# Handle exception
pass
Usage 2 (Non blocking mode)
-------------------------
::
NOTE! if blocking mode returns immediatly
it's up to the caller to manage listeners in case
of non blocking mode
nam = NetworkAccessManager(authcgf)
try:
nam.request('http://www.example.com', blocking=False)
nam.reply.finished.connect(a_signal_listener)
except RequestsException, e:
# Handle exception
pass
Get response using method:
nam.httpResult() that return a dictionary with keys:
'status' - http code result come from reply.attribute(QNetworkRequest.HttpStatusCodeAttribute)
'status_code' - http code result come from reply.attribute(QNetworkRequest.HttpStatusCodeAttribute)
'status_message' - reply message string from reply.attribute(QNetworkRequest.HttpReasonPhraseAttribute)
'content' - bytearray returned from reply
'ok' - request success [True, False]
'headers' - Dicionary containing the reply header
'reason' - fomatted message string with reply.errorString()
'exception' - the exception returne dduring execution
"""
def __init__(self, authid=None, disable_ssl_certificate_validation=False, exception_class=None, debug=False):
self.disable_ssl_certificate_validation = disable_ssl_certificate_validation
self.authid = authid
self.reply = None
self.debug = debug
self.exception_class = exception_class
self.on_abort = False
self.blocking_mode = False
self.http_call_result = Response({
'status': 0,
'status_code': 0,
'status_message': '',
'content' : '',
'ok': False,
'headers': {},
'reason': '',
'exception': None,
})
def msg_log(self, msg):
if self.debug:
QgsMessageLog.logMessage(msg, "NetworkAccessManager")
def httpResult(self):
return self.http_call_result
def request(self, url, method="GET", body=None, headers=None, redirections=DEFAULT_MAX_REDIRECTS, connection_type=None, blocking=True):
"""
Make a network request by calling QgsNetworkAccessManager.
redirections argument is ignored and is here only for httplib2 compatibility.
"""
self.msg_log(u'http_call request: {0}'.format(url))
self.blocking_mode = blocking
req = QNetworkRequest()
# Avoid double quoting form QUrl
url = urllib.parse.unquote(url)
req.setUrl(QUrl(url))
if headers is not None:
# This fixes a wierd error with compressed content not being correctly
# inflated.
# If you set the header on the QNetworkRequest you are basically telling
# QNetworkAccessManager "I know what I'm doing, please don't do any content
# encoding processing".
# See: https://bugs.webkit.org/show_bug.cgi?id=63696#c1
try:
del headers['Accept-Encoding']
except KeyError:
pass
for k, v in list(headers.items()):
self.msg_log("Setting header %s to %s" % (k, v))
req.setRawHeader(k, v)
# if self.authid:
# self.msg_log("Update request w/ authid: {0}".format(self.authid))
# QgsAuthManager.instance().updateNetworkRequest(req, self.authid)
if self.reply is not None and self.reply.isRunning():
self.reply.close()
if method.lower() == 'delete':
func = getattr(QgsNetworkAccessManager.instance(), 'deleteResource')
else:
func = getattr(QgsNetworkAccessManager.instance(), method.lower())
# Calling the server ...
# Let's log the whole call for debugging purposes:
self.msg_log("Sending %s request to %s" % (method.upper(), req.url().toString()))
self.on_abort = False
headers = {str(h): str(req.rawHeader(h)) for h in req.rawHeaderList()}
for k, v in list(headers.items()):
self.msg_log("%s: %s" % (k, v))
if method.lower() in ['post', 'put']:
if isinstance(body, file):
body = body.read()
self.reply = func(req, body)
else:
self.reply = func(req)
# if self.authid:
# self.msg_log("Update reply w/ authid: {0}".format(self.authid))
# QgsAuthManager.instance().updateNetworkReply(self.reply, self.authid)
# necessary to trap local timout manage by QgsNetworkAccessManager
# calling QgsNetworkAccessManager::abortRequest
QgsNetworkAccessManager.instance().requestTimedOut.connect(self.requestTimedOut)
self.reply.sslErrors.connect(self.sslErrors)
self.reply.finished.connect(self.replyFinished)
self.reply.downloadProgress.connect(self.downloadProgress)
# block if blocking mode otherwise return immediatly
# it's up to the caller to manage listeners in case of no blocking mode
if not self.blocking_mode:
return (None, None)
# Call and block
self.el = QEventLoop()
self.reply.finished.connect(self.el.quit)
# Catch all exceptions (and clean up requests)
try:
self.el.exec_(QEventLoop.ExcludeUserInputEvents)
except Exception as e:
raise e
if self.reply:
self.reply.finished.disconnect(self.el.quit)
# emit exception in case of error
if not self.http_call_result.ok:
if self.http_call_result.exception and not self.exception_class:
raise self.http_call_result.exception
else:
raise self.exception_class(self.http_call_result.reason)
return (self.http_call_result, self.http_call_result.content)
@pyqtSlot()
def downloadProgress(self, bytesReceived, bytesTotal):
"""Keep track of the download progress"""
#self.msg_log("downloadProgress %s of %s ..." % (bytesReceived, bytesTotal))
pass
@pyqtSlot()
def requestTimedOut(self, reply):
"""Trap the timeout. In Async mode requestTimedOut is called after replyFinished"""
# adapt http_call_result basing on receiving qgs timer timout signal
self.exception_class = RequestsExceptionTimeout
self.http_call_result.exception = RequestsExceptionTimeout("Timeout error")
@pyqtSlot()
def replyFinished(self):
err = self.reply.error()
httpStatus = self.reply.attribute(QNetworkRequest.HttpStatusCodeAttribute)
httpStatusMessage = self.reply.attribute(QNetworkRequest.HttpReasonPhraseAttribute)
self.http_call_result.status_code = httpStatus
self.http_call_result.status = httpStatus
self.http_call_result.status_message = httpStatusMessage
for k, v in self.reply.rawHeaderPairs():
self.http_call_result.headers[str(k)] = str(v)
self.http_call_result.headers[str(k).lower()] = str(v)
if err != QNetworkReply.NoError:
# handle error
# check if errorString is empty, if so, then set err string as
# reply dump
if re.match('(.)*server replied: $', self.reply.errorString()):
errString = self.reply.errorString() + self.http_call_result.content
else:
errString = self.reply.errorString()
# check if self.http_call_result.status_code is available (client abort
# does not produce http.status_code)
if self.http_call_result.status_code:
msg = "Network error #{0}: {1}".format(
self.http_call_result.status_code, errString)
else:
msg = "Network error: {0}".format(errString)
self.http_call_result.reason = msg
self.http_call_result.ok = False
self.msg_log(msg)
# set return exception
if err == QNetworkReply.TimeoutError:
self.http_call_result.exception = RequestsExceptionTimeout(msg)
elif err == QNetworkReply.ConnectionRefusedError:
self.http_call_result.exception = RequestsExceptionConnectionError(msg)
elif err == QNetworkReply.OperationCanceledError:
# request abort by calling NAM.abort() => cancelled by the user
if self.on_abort:
self.http_call_result.exception = RequestsExceptionUserAbort(msg)
else:
self.http_call_result.exception = RequestsException(msg)
else:
self.http_call_result.exception = RequestsException(msg)
# overload exception to the custom exception if available
if self.exception_class:
self.http_call_result.exception = self.exception_class(msg)
else:
# Handle redirections
redirectionUrl = self.reply.attribute(QNetworkRequest.RedirectionTargetAttribute)
if redirectionUrl is not None and redirectionUrl != self.reply.url():
if redirectionUrl.isRelative():
redirectionUrl = self.reply.url().resolved(redirectionUrl)
msg = "Redirected from '{}' to '{}'".format(
self.reply.url().toString(), redirectionUrl.toString())
self.msg_log(msg)
self.reply.deleteLater()
self.reply = None
self.request(redirectionUrl.toString())
# really end request
else:
msg = "Network success #{0}".format(self.reply.error())
self.http_call_result.reason = msg
self.msg_log(msg)
ba = self.reply.readAll()
self.http_call_result.content = bytes(ba)
self.http_call_result.ok = True
# Let's log the whole response for debugging purposes:
self.msg_log("Got response %s %s from %s" % \
(self.http_call_result.status_code,
self.http_call_result.status_message,
self.reply.url().toString()))
for k, v in list(self.http_call_result.headers.items()):
self.msg_log("%s: %s" % (k, v))
if len(self.http_call_result.content) < 1024:
self.msg_log("Payload :\n%s" % self.http_call_result.content)
else:
self.msg_log("Payload is > 1 KB ...")
# clean reply
if self.reply is not None:
if self.reply.isRunning():
self.reply.close()
self.msg_log("Deleting reply ...")
# Disconnect all slots
self.reply.sslErrors.disconnect(self.sslErrors)
self.reply.finished.disconnect(self.replyFinished)
self.reply.downloadProgress.disconnect(self.downloadProgress)
self.reply.deleteLater()
self.reply = None
else:
self.msg_log("Reply was already deleted ...")
@pyqtSlot()
def sslErrors(self, ssl_errors):
"""
Handle SSL errors, logging them if debug is on and ignoring them
if disable_ssl_certificate_validation is set.
"""
if ssl_errors:
for v in ssl_errors:
self.msg_log("SSL Error: %s" % v.errorString())
if self.disable_ssl_certificate_validation:
self.reply.ignoreSslErrors()
@pyqtSlot()
def abort(self):
"""
Handle request to cancel HTTP call
"""
if (self.reply and self.reply.isRunning()):
self.on_abort = True
self.reply.abort()
|
[
"richard@duif.net"
] |
richard@duif.net
|
e8fb7c4b15125ffbf91656ba6e26fa0b454304bb
|
2ccba7b17b3ce15efa627ef25ff1a1e23c4b1dbd
|
/Week 02/PSet02 - problem_3.py
|
95c7a03fbbaca44e1d0bb79106a4f6e45941938b
|
[
"MIT"
] |
permissive
|
andresmachado/edx-mit-6.00
|
ecf62954fbc2f77ad1e14e2e179e5c011ad50b1c
|
cbc9b1947116433d7f2a0b47935af648b3828702
|
refs/heads/master
| 2020-12-03T07:45:29.696290
| 2016-09-16T12:44:39
| 2016-09-16T12:44:39
| 67,264,380
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,550
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Fri Sep 16 09:31:26 2016
@author: andre
# edX MITx 6.00.1x
# Introduction to Computer Science and Programming Using Python
# Problem Set 2, problem 3
# Use bisection search to make the program faster
# The following variables contain values as described below:
# balance - the outstanding balance on the credit card
# annualInterestRate - annual interest rate as a decimal
# Monthly interest rate = (Annual interest rate) / 12.0
# Monthly payment lower bound = Balance / 12
# Monthly payment upper bound = (Balance x (1 + Monthly interest rate)12) / 12.0
# Problem Summary: Use bisection search to search for the smallest monthly payment
# to the cent such that we can pay off the entire balance within a year.
"""
# Test Cases, comment out before submitting for grading
#Test Case 1
balance = 320000
annualInterestRate = 0.2
monthly_interest_rate = (annualInterestRate / 12.0)
payment_lower = (balance / 12)
payment_upper = (balance * ((1 + monthly_interest_rate)**12)) / 12.0
original_balance = balance
while balance != 0.00:
# Set value for thePayment to midpoint of lower and upper
payment = (payment_lower + payment_upper) / 2
# Reset balance each time through while loop
balance = original_balance
for i in range(1,13):
balance = (balance - payment) * (1 + monthly_interest_rate)
if balance > 0:
payment_lower = payment
elif balance < 0:
payment_upper = payment
balance = round(balance, 2)
print("Lowest Payment:", round(payment,2))
|
[
"csantos.machado@gmail.com"
] |
csantos.machado@gmail.com
|
0b14f4c050f42e06cf573a1f84e62522ac65add4
|
c7d91529db199322e39e54fe4051a75704ea843e
|
/华为题库/最小覆盖串.py
|
df725d28bca625b4f4f23c73033173ff5af73345
|
[] |
no_license
|
2226171237/Algorithmpractice
|
fc786fd47aced5cd6d96c45f8e728c1e9d1160b7
|
837957ea22aa07ce28a6c23ea0419bd2011e1f88
|
refs/heads/master
| 2020-12-26T07:20:37.226443
| 2020-09-13T13:31:05
| 2020-09-13T13:31:05
| 237,431,164
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,644
|
py
|
'''
给你一个字符串 S、一个字符串 T,请在字符串 S 里面找出:包含 T 所有字母的最小子串。
示例:
输入: S = "ADOBECODEBANC", T = "ABC"
输出: "BANC"
来源:力扣(LeetCode)
链接:https://leetcode-cn.com/problems/minimum-window-substring
著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。
'''
from collections import Counter
class Solution:
def minWindow(self, s: str, t: str) -> str:
'''
双指针,滑动窗口
:param s:
:param t:
:return:
'''
needs=Counter(t)
need_matchs=len(needs)
match=0 # 有多字符符合要求了
window={}
left,right=0,0
start=0
minLens=2**32
while right<len(s):
ch=s[right]
if needs[ch]: # 需要匹配
window[ch]=window.get(ch,0)+1
if window[ch]==needs[ch]: # 该字符匹配成功
match+=1
right+=1
while match==need_matchs: # 所有都匹配成功,左边不断右移,直到不匹配
if right-left<minLens: # 更新最小子串
start=left
minLens=right-left
ch=s[left]
if needs[ch]:
window[ch]-=1
if window[ch]<needs[ch]: # 出现了不匹配
match-=1
left+=1
return '' if minLens==2**32 else s[start:start+minLens]
if __name__ == '__main__':
S=Solution()
print(S.minWindow("cabwefgewcwaefgcf","cae"))
|
[
"2226171237@qq.com"
] |
2226171237@qq.com
|
88f86d48dcdf56ff72b52bdbfef2d6666f1e97dc
|
fe67f7789cb632ff25025cbf034373e1f96fb17f
|
/python/learn-python-hard-way/demo/demo-14-prompt-and-transfer.py
|
70772cf3989d9dfd5d0d191b1059f9781a98e400
|
[] |
no_license
|
siu91/ddu
|
2a75241cff2bfe74322ba2f86fc7b380ba79c01e
|
3756e0a286c40820e0f265ad51a34c34610ec7a8
|
refs/heads/master
| 2021-07-05T07:30:49.874509
| 2017-09-28T08:59:48
| 2017-09-28T08:59:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 575
|
py
|
# -- coding: utf-8 --
#
# python ex14.py Zed
from sys import argv
script, user_name = argv
prompt = '> '
print "Hi %s, I'm the %s script." % (user_name, script)
print "I'd like to ask you a few questions."
print "Do you like me %s?" % user_name
likes = raw_input(prompt)
print "Where do you live %s?" % user_name
lives = raw_input(prompt)
print "What kind of computer do you have?"
computer = raw_input(prompt)
print """
Alright, so you said %r about liking me.
You live in %r. Not sure where that is.
And you have a %r computer. Nice.
""" % (likes, lives, computer)
|
[
"gshiwen@gmail.com"
] |
gshiwen@gmail.com
|
9c9afd3e135cc465a3e280dc8aae93917bf8c46f
|
b80e611e74928057d032849e775bbd2d50dd27d6
|
/puppet/modules/eayunstack/files/q-agent-cleanup.py
|
1fb4a0c23ef72af9406005c1af1c1741a61f7571
|
[
"Apache-2.0"
] |
permissive
|
eayunstack/eayunstack-upgrade
|
6d98802c94334bcd56ac81acf528b02c99dcb01c
|
f15669d49205c6a7de8b7b57dfcea1cbc6410b4e
|
refs/heads/master
| 2020-05-22T06:57:10.312773
| 2017-12-07T00:47:35
| 2017-12-07T00:47:35
| 48,354,515
| 1
| 12
|
Apache-2.0
| 2017-12-12T10:01:59
| 2015-12-21T06:19:37
|
Python
|
UTF-8
|
Python
| false
| false
| 29,639
|
py
|
#!/usr/bin/env python
import re
import time
import os
import sys
import random
import string
import json
import argparse
import logging
import logging.handlers
import shlex
import subprocess
import StringIO
import socket
from neutronclient.neutron import client as q_client
from keystoneclient.v2_0 import client as ks_client
from keystoneclient.apiclient.exceptions import NotFound as ks_NotFound
LOG_NAME = 'q-agent-cleanup'
API_VER = '2.0'
PORT_ID_PART_LEN = 11
TMP_USER_NAME = 'tmp_neutron_admin'
def get_authconfig(cfg_file):
# Read OS auth config file
rv = {}
stripchars=" \'\""
with open(cfg_file) as f:
for line in f:
rg = re.match(r'\s*export\s+(\w+)\s*=\s*(.*)',line)
if rg :
#Use shlex to unescape bash shell escape characters
value = "".join(x for x in
shlex.split(rg.group(2).strip(stripchars)))
rv[rg.group(1).strip(stripchars)] = value
return rv
class NeutronCleaner(object):
PORT_NAME_PREFIXES_BY_DEV_OWNER = {
'network:dhcp': 'tap',
'network:router_gateway': 'qg-',
'network:router_interface': 'qr-',
}
PORT_NAME_PREFIXES = {
# contains tuples of prefixes
'dhcp': (PORT_NAME_PREFIXES_BY_DEV_OWNER['network:dhcp'],),
'l3': (
PORT_NAME_PREFIXES_BY_DEV_OWNER['network:router_gateway'],
PORT_NAME_PREFIXES_BY_DEV_OWNER['network:router_interface']
)
}
BRIDGES_FOR_PORTS_BY_AGENT ={
'dhcp': ('br-int',),
'l3': ('br-int', 'br-ex'),
}
PORT_OWNER_PREFIXES = {
'dhcp': ('network:dhcp',),
'l3': ('network:router_gateway', 'network:router_interface')
}
NS_NAME_PREFIXES = {
'dhcp': 'qdhcp',
'l3': 'qrouter',
'lbaas': 'qlbaas'
}
AGENT_BINARY_NAME = {
'dhcp': 'neutron-dhcp-agent',
'l3': 'neutron-l3-agent',
'ovs': 'neutron-openvswitch-agent',
'lbaas': 'neutron-lbaas-agent'
}
CMD__list_ovs_port = ['ovs-vsctl', 'list-ports']
CMD__remove_ovs_port = ['ovs-vsctl', '--', '--if-exists', 'del-port']
CMD__remove_ip_addr = ['ip', 'address', 'delete']
CMD__ip_netns_list = ['ip', 'netns', 'list']
CMD__ip_netns_exec = ['ip', 'netns', 'exec']
RE__port_in_portlist = re.compile(r"^\s*\d+\:\s+([\w-]+)\:") # 14: tap-xxxyyyzzz:
def __init__(self, openrc, options, log=None):
self.log = log
self.auth_config = openrc
self.options = options
self.agents = {}
self.debug = options.get('debug')
self.RESCHEDULING_CALLS = {
'dhcp': self._reschedule_agent_dhcp,
'l3': self._reschedule_agent_l3,
'lbaas': self._reschedule_agent_lbaas
}
self._token = None
self._keystone = None
self._client = None
self._need_cleanup_tmp_admin = False
def __del__(self):
if self._need_cleanup_tmp_admin and self._keystone and self._keystone.username:
try:
self._keystone.users.delete(self._keystone.users.find(username=self._keystone.username))
except:
# if we get exception while cleaning temporary account -- nothing harm
pass
def generate_random_passwd(self, length=13):
chars = string.ascii_letters + string.digits + '!@#$%^&*()'
random.seed = (os.urandom(1024))
return ''.join(random.choice(chars) for i in range(length))
@property
def keystone(self):
if self._keystone is None:
ret_count = self.options.get('retries', 1)
tmp_passwd = self.generate_random_passwd()
while True:
if ret_count <= 0:
self.log.error(">>> Keystone error: no more retries for connect to keystone server.")
sys.exit(1)
try:
a_token = self.options.get('auth-token')
a_url = self.options.get('admin-auth-url')
if a_token and a_url:
self.log.debug("Authentication by predefined token.")
# create keystone instance, authorized by service token
ks = ks_client.Client(
token=a_token,
endpoint=a_url,
)
service_tenant = ks.tenants.find(name='services')
# get first vaild auth url
auth_url = ks.endpoints.findall(
service_id=ks.services.find(type='identity').id
)[0].internalurl
# find and re-create temporary rescheduling-admin user with random password
try:
user = ks.users.find(username=TMP_USER_NAME)
ks.users.delete(user)
except ks_NotFound:
# user not found, it's OK
pass
user = ks.users.create(TMP_USER_NAME, tmp_passwd, tenant_id=service_tenant.id)
ks.roles.add_user_role(user, ks.roles.find(name='admin'), service_tenant)
# authenticate newly-created tmp neutron admin
self._keystone = ks_client.Client(
username=user.username,
password=tmp_passwd,
tenant_id=user.tenantId,
auth_url=auth_url,
)
self._need_cleanup_tmp_admin = True
else:
self.log.debug("Authentication by given credentionals.")
self._keystone = ks_client.Client(
username=self.auth_config['OS_USERNAME'],
password=self.auth_config['OS_PASSWORD'],
tenant_name=self.auth_config['OS_TENANT_NAME'],
auth_url=self.auth_config['OS_AUTH_URL'],
)
break
except Exception as e:
errmsg = str(e.message).strip() # str() need, because keystone may use int as message in exception
if re.search(r"Connection\s+refused$", errmsg, re.I) or \
re.search(r"Connection\s+timed\s+out$", errmsg, re.I) or\
re.search(r"Lost\s+connection\s+to\s+MySQL\s+server", errmsg, re.I) or\
re.search(r"Service\s+Unavailable$", errmsg, re.I) or\
re.search(r"'*NoneType'*\s+object\s+has\s+no\s+attribute\s+'*__getitem__'*$", errmsg, re.I) or \
re.search(r"No\s+route\s+to\s+host$", errmsg, re.I):
self.log.info(">>> Can't connect to {0}, wait for server ready...".format(self.auth_config['OS_AUTH_URL']))
time.sleep(self.options.sleep)
else:
self.log.error(">>> Keystone error:\n{0}".format(e.message))
raise e
ret_count -= 1
return self._keystone
@property
def token(self):
if self._token is None:
self._token = self._keystone.auth_token
#self.log.debug("Auth_token: '{0}'".format(self._token))
#todo: Validate existing token
return self._token
@property
def client(self):
if self._client is None:
self._client = q_client.Client(
API_VER,
endpoint_url=self.keystone.endpoints.find(
service_id=self.keystone.services.find(type='network').id
).adminurl,
token=self.token,
)
return self._client
def _neutron_API_call(self, method, *args):
ret_count = self.options.get('retries')
while True:
if ret_count <= 0:
self.log.error("Q-server error: no more retries for connect to server.")
return []
try:
rv = method (*args)
break
except Exception as e:
errmsg = str(e.message).strip()
if re.search(r"Connection\s+refused", errmsg, re.I) or\
re.search(r"Connection\s+timed\s+out", errmsg, re.I) or\
re.search(r"Lost\s+connection\s+to\s+MySQL\s+server", errmsg, re.I) or\
re.search(r"503\s+Service\s+Unavailable", errmsg, re.I) or\
re.search(r"No\s+route\s+to\s+host", errmsg, re.I):
self.log.info("Can't connect to {0}, wait for server ready...".format(self.keystone.service_catalog.url_for(service_type='network')))
time.sleep(self.options.sleep)
else:
self.log.error("Neutron error:\n{0}".format(e.message))
raise e
ret_count -= 1
return rv
def _get_agents(self,use_cache=True):
return self._neutron_API_call(self.client.list_agents)['agents']
def _get_routers(self, use_cache=True):
return self._neutron_API_call(self.client.list_routers)['routers']
def _get_networks(self, use_cache=True):
return self._neutron_API_call(self.client.list_networks)['networks']
def _list_networks_on_dhcp_agent(self, agent_id):
return self._neutron_API_call(self.client.list_networks_on_dhcp_agent, agent_id)['networks']
def _list_routers_on_l3_agent(self, agent_id):
return self._neutron_API_call(self.client.list_routers_on_l3_agent, agent_id)['routers']
def _list_l3_agents_on_router(self, router_id):
return self._neutron_API_call(self.client.list_l3_agent_hosting_routers, router_id)['agents']
def _list_dhcp_agents_on_network(self, network_id):
return self._neutron_API_call(self.client.list_dhcp_agent_hosting_networks, network_id)['agents']
def _list_orphaned_networks(self):
networks = self._get_networks()
self.log.debug("_list_orphaned_networks:, got list of networks {0}".format(json.dumps(networks,indent=4)))
orphaned_networks = []
for network in networks:
if len(self._list_dhcp_agents_on_network(network['id'])) == 0:
orphaned_networks.append(network['id'])
self.log.debug("_list_orphaned_networks:, got list of orphaned networks {0}".format(orphaned_networks))
return orphaned_networks
def _list_orphaned_routers(self):
routers = self._get_routers()
self.log.debug("_list_orphaned_routers:, got list of routers {0}".format(json.dumps(routers,indent=4)))
orphaned_routers = []
for router in routers:
if len(self._list_l3_agents_on_router(router['id'])) == 0:
orphaned_routers.append(router['id'])
self.log.debug("_list_orphaned_routers:, got list of orphaned routers {0}".format(orphaned_routers))
return orphaned_routers
def _add_network_to_dhcp_agent(self, agent_id, net_id):
return self._neutron_API_call(self.client.add_network_to_dhcp_agent, agent_id, {"network_id": net_id})
def _add_router_to_l3_agent(self, agent_id, router_id):
return self._neutron_API_call(self.client.add_router_to_l3_agent, agent_id, {"router_id": router_id})
def _remove_router_from_l3_agent(self, agent_id, router_id):
return self._neutron_API_call(self.client.remove_router_from_l3_agent, agent_id, router_id)
def _list_pools_on_lbaas_agent(self, agent_id):
return self._neutron_API_call(self.client.list_pools_on_lbaas_agent, agent_id)['pools']
def _add_pool_to_lbaas_agent(self, agent_id, pool_id):
return self._neutron_API_call(self.client.add_pool_to_lb_agent, agent_id, {"pool_id": pool_id})
def _get_agents_by_type(self, agent, use_cache=True):
self.log.debug("_get_agents_by_type: start.")
rv = self.agents.get(agent, []) if use_cache else []
if not rv:
agents = self._get_agents(use_cache=use_cache)
for i in agents:
if i['binary'] == self.AGENT_BINARY_NAME.get(agent):
rv.append(i)
from_cache = ''
else:
from_cache = ' from local cache'
self.log.debug("_get_agents_by_type: end, {0} rv: {1}".format(from_cache, json.dumps(rv, indent=4)))
return rv
def __collect_namespaces_for_agent(self, agent):
cmd = self.CMD__ip_netns_list[:]
self.log.debug("Execute command '{0}'".format(' '.join(cmd)))
process = subprocess.Popen(
cmd,
shell=False,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
rc = process.wait()
if rc != 0:
self.log.error("ERROR (rc={0}) while execution {1}".format(rc, ' '.join(cmd)))
return []
# filter namespaces by given agent type
netns = []
stdout = process.communicate()[0]
for ns in StringIO.StringIO(stdout):
ns = ns.strip()
self.log.debug("Found network namespace '{0}'".format(ns))
if ns.startswith("{0}-".format(self.NS_NAME_PREFIXES[agent])):
netns.append(ns)
return netns
def __collect_ports_for_namespace(self, ns):
cmd = self.CMD__ip_netns_exec[:]
cmd.extend([ns, 'ip', 'l', 'show'])
self.log.debug("Execute command '{0}'".format(' '.join(cmd)))
process = subprocess.Popen(
cmd,
shell=False,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
rc = process.wait()
if rc != 0:
self.log.error("ERROR (rc={0}) while execution {1}".format(rc, ' '.join(cmd)))
return []
ports = []
stdout = process.communicate()[0]
for line in StringIO.StringIO(stdout):
pp = self.RE__port_in_portlist.match(line)
if not pp:
continue
port = pp.group(1)
if port != 'lo':
self.log.debug("Found port '{0}'".format(port))
ports.append(port)
return ports
def _cleanup_ports(self, agent):
self.log.debug("_cleanup_ports: start.")
# get namespaces list
netns = self.__collect_namespaces_for_agent(agent)
# collect ports from namespace
ports = []
for ns in netns:
ports.extend(self.__collect_ports_for_namespace(ns))
# iterate by port_list and remove port from OVS
for port in ports:
cmd = self.CMD__remove_ovs_port[:]
cmd.append(port)
if self.options.get('noop'):
self.log.info("NOOP-execution: '{0}'".format(' '.join(cmd)))
else:
self.log.debug("Execute command '{0}'".format(' '.join(cmd)))
process = subprocess.Popen(
cmd,
shell=False,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
rc = process.wait()
if rc != 0:
self.log.error("ERROR (rc={0}) while execution {1}".format(rc, ' '.join(cmd)))
self.log.debug("_cleanup_ports: end.")
return True
def _reschedule_agent_dhcp(self, agent_type):
self.log.debug("_reschedule_agent_dhcp: start.")
agents = {
'alive': [],
'dead': []
}
# collect networklist from dead DHCP-agents
dead_networks = []
for agent in self._get_agents_by_type(agent_type):
if agent['alive']:
self.log.info("found alive DHCP agent: {0}".format(agent['id']))
agents['alive'].append(agent)
else:
# dead agent
self.log.info("found dead DHCP agent: {0}".format(agent['id']))
agents['dead'].append(agent)
for net in self._list_networks_on_dhcp_agent(agent['id']):
dead_networks.append(net)
if dead_networks and agents['alive']:
# get network-ID list of already attached to alive agent networks
lucky_ids = set()
map(
lambda net: lucky_ids.add(net['id']),
self._list_networks_on_dhcp_agent(agents['alive'][0]['id'])
)
# add dead networks to alive agent
for net in dead_networks:
if net['id'] not in lucky_ids:
# attach network to agent
self.log.info("attach network {net} to DHCP agent {agent}".format(
net=net['id'],
agent=agents['alive'][0]['id']
))
if not self.options.get('noop'):
self._add_network_to_dhcp_agent(agents['alive'][0]['id'], net['id'])
#if error:
# return
# remove dead agents if need (and if found alive agent)
if self.options.get('remove-dead'):
for agent in agents['dead']:
self.log.info("remove dead DHCP agent: {0}".format(agent['id']))
if not self.options.get('noop'):
self._neutron_API_call(self.client.delete_agent, agent['id'])
orphaned_networks=self._list_orphaned_networks()
self.log.info("_reschedule_agent_dhcp: rescheduling orphaned networks")
if orphaned_networks and agents['alive']:
for network in orphaned_networks:
self.log.info("_reschedule_agent_dhcp: rescheduling {0} to {1}".format(network,agents['alive'][0]['id']))
if not self.options.get('noop'):
self._add_network_to_dhcp_agent(agents['alive'][0]['id'], network)
self.log.info("_reschedule_agent_dhcp: ended rescheduling of orphaned networks")
self.log.debug("_reschedule_agent_dhcp: end.")
def _reschedule_agent_l3(self, agent_type):
self.log.debug("_reschedule_agent_l3: start.")
agents = {
'alive': [],
'dead': []
}
# collect router-list from dead DHCP-agents
dead_routers = [] # array of tuples (router, agentID)
for agent in self._get_agents_by_type(agent_type):
if agent['alive']:
self.log.info("found alive L3 agent: {0}".format(agent['id']))
agents['alive'].append(agent)
else:
# dead agent
self.log.info("found dead L3 agent: {0}".format(agent['id']))
agents['dead'].append(agent)
map(
lambda rou: dead_routers.append((rou, agent['id'])),
self._list_routers_on_l3_agent(agent['id'])
)
self.log.debug("L3 agents in cluster: {ags}".format(ags=json.dumps(agents, indent=4)))
self.log.debug("Routers, attached to dead L3 agents: {rr}".format(rr=json.dumps(dead_routers, indent=4)))
if dead_routers and agents['alive']:
# get router-ID list of already attached to alive agent routerss
lucky_ids = set()
map(
lambda rou: lucky_ids.add(rou['id']),
self._list_routers_on_l3_agent(agents['alive'][0]['id'])
)
# remove dead agents after rescheduling
for agent in agents['dead']:
self.log.info("remove dead L3 agent: {0}".format(agent['id']))
if not self.options.get('noop'):
self._neutron_API_call(self.client.delete_agent, agent['id'])
# move routers from dead to alive agent
for rou in filter(lambda rr: not(rr[0]['id'] in lucky_ids), dead_routers):
# self.log.info("unschedule router {rou} from L3 agent {agent}".format(
# rou=rou[0]['id'],
# agent=rou[1]
# ))
# if not self.options.get('noop'):
# self._remove_router_from_l3_agent(rou[1], rou[0]['id'])
# #todo: if error:
# #
self.log.info("schedule router {rou} to L3 agent {agent}".format(
rou=rou[0]['id'],
agent=agents['alive'][0]['id']
))
if not self.options.get('noop'):
self._add_router_to_l3_agent(agents['alive'][0]['id'], rou[0]['id'])
orphaned_routers=self._list_orphaned_routers()
self.log.info("_reschedule_agent_l3: rescheduling orphaned routers")
if orphaned_routers and agents['alive']:
for router in orphaned_routers:
self.log.info("_reschedule_agent_l3: rescheduling {0} to {1}".format(router,agents['alive'][0]['id']))
if not self.options.get('noop'):
self._add_router_to_l3_agent(agents['alive'][0]['id'], router)
self.log.info("_reschedule_agent_l3: ended rescheduling of orphaned routers")
self.log.debug("_reschedule_agent_l3: end.")
def _reschedule_agent_lbaas(self, agent_type):
self.log.debug("_reschedule_agent_lbaas: start.")
agents = {
'alive': [],
'dead': []
}
# collect pool-list from dead Lbaas agent
dead_pools = [] # array of tuples (pool, agentID)
host_name = socket.gethostname()
for agent in self._get_agents_by_type(agent_type):
if agent['alive'] and (agent['host'] != host_name or not self.options.get('remove-self')):
self.log.info("found alive Lbaas agent: {0}".format(agent['id']))
agents['alive'].append(agent)
else:
# dead agent
self.log.info("found dead Lbaas agent: {0}".format(agent['id']))
agents['dead'].append(agent)
map(
lambda pool: dead_pools.append((pool, agent['id'])),
self._list_pools_on_lbaas_agent(agent['id'])
)
self.log.debug("Lbaas agents in cluster: {ags}".format(ags=json.dumps(agents, indent=4)))
self.log.debug("Pools, attached to dead Lbaas agents: {pool}".format(pool=json.dumps(dead_pools, indent=4)))
if dead_pools and agents['alive']:
# get pool-ID list of already attached to alive agent lbaas
lucky_ids = set()
map(
lambda pool: lucky_ids.add(pool['id']),
self._list_pools_on_lbaas_agent(agents['alive'][0]['id'])
)
# remove dead agents after rescheduling
for agent in agents['dead']:
self.log.info("remove dead Lbaas agent: {0}".format(agent['id']))
if not self.options.get('noop'):
self._neutron_API_call(self.client.delete_agent, agent['id'])
# move lbaas pool from dead to alive agent
for pool in filter(lambda pool: not(pool[0]['id'] in lucky_ids), dead_pools):
self.log.info("schedule pool {pool} to Lbaas agent {agent}".format(
pool=pool[0]['id'],
agent=agents['alive'][0]['id']
))
if not self.options.get('noop'):
self._add_pool_to_lbaas_agent(agents['alive'][0]['id'], pool[0]['id'])
self.log.debug("_reschedule_agent_lbaas: end.")
def _remove_self(self,agent_type):
self.log.debug("_remove_self: start.")
for agent in self._get_agents_by_type(agent_type):
if agent['host'] == socket.gethostname():
self.log.info("_remove_self: deleting our own agent {0} of type {1}".format(agent['id'],agent_type))
if not self.options.get('noop'):
self._neutron_API_call(self.client.delete_agent, agent['id'])
self.log.debug("_remove_self: end.")
def _reschedule_agent(self, agent):
self.log.debug("_reschedule_agents: start.")
task = self.RESCHEDULING_CALLS.get(agent, None)
if task:
task (agent)
self.log.debug("_reschedule_agents: end.")
def do(self, agent):
if self.options.get('cleanup-ports'):
self._cleanup_ports(agent)
if self.options.get('reschedule'):
self._reschedule_agent(agent)
if self.options.get('remove-self'):
self._remove_self(agent)
# if self.options.get('remove-agent'):
# self._cleanup_agents(agent)
def _test_healthy(self, agent_list, hostname):
rv = False
for agent in agent_list:
if agent['host'] == hostname and agent['alive']:
return True
return rv
def test_healthy(self, agent_type):
rc = 9 # OCF_FAILED_MASTER, http://www.linux-ha.org/doc/dev-guides/_literal_ocf_failed_master_literal_9.html
agentlist = self._get_agents_by_type(agent_type)
for hostname in self.options.get('test-hostnames'):
if self._test_healthy(agentlist, hostname):
return 0
return rc
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Neutron network node cleaning tool.')
parser.add_argument("-c", "--auth-config", dest="authconf", default="/root/openrc",
help="Authenticating config FILE", metavar="FILE")
parser.add_argument("-t", "--auth-token", dest="auth-token", default=None,
help="Authenticating token (instead username/passwd)", metavar="TOKEN")
parser.add_argument("-u", "--admin-auth-url", dest="admin-auth-url", default=None,
help="Authenticating URL (admin)", metavar="URL")
parser.add_argument("--retries", dest="retries", type=int, default=50,
help="try NN retries for API call", metavar="NN")
parser.add_argument("--sleep", dest="sleep", type=int, default=2,
help="sleep seconds between retries", metavar="SEC")
parser.add_argument("-a", "--agent", dest="agent", action="append",
help="specyfy agents for cleaning", required=True)
parser.add_argument("--cleanup-ports", dest="cleanup-ports", action="store_true", default=False,
help="cleanup ports for given agents on this node")
parser.add_argument("--remove-self", dest="remove-self", action="store_true", default=False,
help="remove ourselves from agent list")
parser.add_argument("--activeonly", dest="activeonly", action="store_true", default=False,
help="cleanup only active ports")
parser.add_argument("--reschedule", dest="reschedule", action="store_true", default=False,
help="reschedule given agents")
parser.add_argument("--remove-dead", dest="remove-dead", action="store_true", default=False,
help="remove dead agents while rescheduling")
parser.add_argument("--test-alive-for-hostname", dest="test-hostnames", action="append",
help="testing agent's healthy for given hostname")
parser.add_argument("--external-bridge", dest="external-bridge", default="br-ex",
help="external bridge name", metavar="IFACE")
parser.add_argument("--integration-bridge", dest="integration-bridge", default="br-int",
help="integration bridge name", metavar="IFACE")
parser.add_argument("-l", "--log", dest="log", action="store",
help="log file or logging.conf location")
parser.add_argument("--noop", dest="noop", action="store_true", default=False,
help="do not execute, print to log instead")
parser.add_argument("--debug", dest="debug", action="store_true", default=False,
help="debug")
args = parser.parse_args()
# if len(args) != 1:
# parser.error("incorrect number of arguments")
# parser.print_help() args = parser.parse_args()
#setup logging
if args.debug:
_log_level = logging.DEBUG
else:
_log_level = logging.INFO
if not args.log:
# log config or file not given -- log to console
LOG = logging.getLogger(LOG_NAME) # do not move to UP of file
_log_handler = logging.StreamHandler(sys.stdout)
_log_handler.setFormatter(logging.Formatter("%(asctime)s - %(levelname)s - %(message)s"))
LOG.addHandler(_log_handler)
LOG.setLevel(_log_level)
elif args.log.split(os.sep)[-1] == 'logging.conf':
# setup logging by external file
import logging.config
logging.config.fileConfig(args.log)
LOG = logging.getLogger(LOG_NAME) # do not move to UP of file
else:
# log to given file
LOG = logging.getLogger(LOG_NAME) # do not move to UP of file
LOG.addHandler(logging.handlers.WatchedFileHandler(args.log))
LOG.setLevel(_log_level)
LOG.info("Started: {0}".format(' '.join(sys.argv)))
cleaner = NeutronCleaner(get_authconfig(args.authconf), options=vars(args), log=LOG)
rc = 0
if vars(args).get('test-hostnames'):
rc = cleaner.test_healthy(args.agent[0])
else:
for i in args.agent:
cleaner.do(i)
LOG.debug("End.")
sys.exit(rc)
#
###
|
[
"tangch318@gmail.com"
] |
tangch318@gmail.com
|
90ec8572ac7dbc56cb8b0885e25dd3a9dcc8e28c
|
b6e90e33a51fa9b7b9901e7adeafdc005247ad99
|
/working_gnumpy.py
|
6d2cc1849c31c654429a2edbbd9d492a9cac85d5
|
[] |
no_license
|
rasoolianbehnam/gpu_experiments
|
3394313ef724fec6063632729046c6bae2c4a518
|
c8fbe332741cfd2ec9fad113336c97363dbfa58c
|
refs/heads/master
| 2020-03-09T19:01:09.418706
| 2018-05-09T17:45:32
| 2018-05-09T17:45:32
| 128,947,206
| 0
| 0
| null | 2018-05-09T17:45:33
| 2018-04-10T14:33:37
|
Python
|
UTF-8
|
Python
| false
| false
| 16,630
|
py
|
# coding: utf-8
# In[1]:
import numpy as np
import time
import os.path
def mat_pow(a, k, b):
if k == 0:
m = np.eye(a.shape[0])
b = m * 1.
elif k % 2:
m, b = mat_pow(a, k-1, b)
m = a.dot(m)
b += a
else:
m, b = mat_pow(a, k // 2, b)
m = m.dot(m)
b += a
print(k)
return m, b
def convert_sparse_matrix_to_sparse_tensor(coo):
indices = np.mat([coo.row, coo.col]).transpose()
return tf.SparseTensor(indices, coo.data, coo.shape)
def save_sparse_csr(filename, array):
np.savez(filename, data=array.data, \
indices=array.indices, indptr=array.indptr, shape=array.shape)
def load_sparse_csr(filename):
loader = np.load(filename)
return csr_matrix((loader['data'], loader['indices'], loader['indptr']), shape=loader['shape'])
class poisson_vectorized:
def __init__(self, n1, n2, n3, w, num_iterations=40, h=1e-3, method='ndarray',\
upper_lim=3, want_cuda=False):
self.n1 = n1
self.n2 = n2
self.n3 = n3
self.imax = n1 - upper_lim
self.jmax = n2 - upper_lim
self.kmax = n3 - upper_lim
self.method = method
self.kernels = {}
#keys = ['poisson', 'x_gradient', 'y_gradient', 'average_x']
keys = ['poisson', 'g']
if self.method == 'lil' or method == 'coo1':
for key in keys:
self.kernels[key] = lil_matrix((n1*n2*n3, n1*n2*n3), dtype='float64')
elif method == 'coo':
for key in keys:
self.kernels[key] = np.zeros((n1*n2*n3, n1*n2*n3), dtype='float64')
elif self.method == 'ndarray':
for key in keys:
self.kernels[key] = np.zeros((n1*n2*n3, n1*n2*n3), dtype='float64')
print("method used is %s"%(self.method))
self.w = w
self.h = h
self.h2 = h**2
self.num_iterations = num_iterations
print("Starting to create kernels...")
for I in range(0, n1*n2*n3):
k = I % n3
s1 = (I - k) // n3
j = s1 % n2
i = (s1 - j) // n2
#print(I, i, j, k)
if (i >= 1 and i < self.imax+1 and
j >= 1 and j < self.jmax+1 and
k >= 1 and k < self.kmax-1):
#print(I, i, j, k)
if (I % 1000 == 0):
print("%d / %d"%(I, n1*n2*n3))
self.kernels['poisson'][I, :] =self.w*1./6* \
( self.kernels['poisson'][I-1, :] \
+ self.kernels['poisson'][I-n3, :] \
+ self.kernels['poisson'][I-n3*n2, :])
self.kernels['poisson'][I, I+1] += self.w*1./6
self.kernels['poisson'][I, I+n3] += self.w*1./6
self.kernels['poisson'][I, I+n2*n3] += self.w*1./6
self.kernels['poisson'][I, I] += 1 - self.w
# g[I] +=self.w*1./6* \
# ( g[I-1]
# + g[I-self.n3]
# + g[I-self.n3*self.n2])
self.kernels['g'][I, :] = self.w * 1./6*\
(self.kernels['g'][I-1,:] \
+self.kernels['g'][I-self.n3, :] \
+self.kernels['g'][I-self.n3*self.n2, :] \
)
self.kernels['g'][I, I] += 1
# self.kernels['x_gradient'][I, I] = -1
# self.kernels['x_gradient'][I, I+n2*n3] = 1
#
# self.kernels['y_gradient'][I, I] = -1
# self.kernels['y_gradient'][I, I + n3] = 1
# elif (i >= 2 and i < self.imax+1 and
# j >= 1 and j < self.jmax and
# k >= 1 and k < self.kmax-1):
# self.kernels['average_x'][I, I] = 1
# self.kernels['average_x'][I, I+n3] = 1
# self.kernels['average_x'][I, I-n2*n3] = 1
# self.kernels['average_x'][I, I+n3-n2*n3] = 1
else:
self.kernels['poisson'][I, I] = 1
print("Finished creating kernels.")
if self.method == 'coo':
print("Starting coo conversion")
for key in self.kernels:
self.kernels[key] = coo_matrix(self.kernels[key], (n1*n2*n3, n1*n2*n3), dtype='float64')
# uncomment in order to make fast1 work
# fast1 is less efficient that fast due to chain matrix multiplicaiton order
# rule.
if self.method == 'coo':
self.B = lil_matrix(eye(n1*n2*n3, n1*n2*n3), dtype='float64')
elif self.method == 'ndarray':
self.B = np.eye(n1*n2*n3, n1*n2*n3)
#A_file_name = 'self_A_%d_%d_%d_%d_coo.npz'%(n1, n2, n3, self.num_iterations)
#B_file_name = 'self_B_%d_%d_%d_%d_coo.npz'%(n1, n2, n3, self.num_iterations)
#A_loaded = False
#if os.path.isfile(A_file_name):
# print("A file exitst.")
# self.A = load_sparse_csr(A_file_name)
# A_loaded = True
#B_loaded = False
#if os.path.isfile(B_file_name):
# print("B file exitst.")
# self.B = load_sparse_csr(B_file_name)
# B_loaded = True
#if A_loaded and B_loaded:
# print("A and B has been found!!")
# if self.method == 'ndarray':
# self.A = self.A.todense()
# self.B = self.B.todense()
#else:
# self.A = self.kernels['poisson']
# for kk in range(self.num_iterations-1):
# print(kk)
# self.B += self.A
# self.A = self.kernels['poisson'].dot(self.A)
# print('converting large A and B to coo format to store')
# self.B = coo_matrix(self.B, (n1*n2*n3, n1*n2*n3), dtype='float64')
# self.A = coo_matrix(self.B, (n1*n2*n3, n1*n2*n3), dtype='float64')
# print("saving self.B: ")
# save_sparse_csr(B_file_name, self.B.tocsr())
# print("saving self.A: ")
# save_sparse_csr(A_file_name, self.A.tocsr())
print("calculating matrix powers")
Afound = Bfound = False
A_file_name = 'self_A_%d_%d_%d_%d_ndarray.npy'%(n1, n2, n3, self.num_iterations)
B_file_name = 'self_B_%d_%d_%d_%d_ndarray.npy'%(n1, n2, n3, self.num_iterations)
if os.path.isfile(A_file_name):
print("%s exists"%(A_file_name))
self.A = np.load(A_file_name).astype('float64')
Afound = True
if os.path.isfile(B_file_name):
print("%s exists"%(B_file_name))
self.B = np.load(B_file_name).astype('float64')
Bfound = True
if (not Afound and not Bfound):
self.A = self.kernels['poisson']
for kk in range(self.num_iterations-1):
if (kk % 10 == 0):
print(kk)
self.B += self.A
self.A = self.kernels['poisson'].dot(self.A)
np.save(A_file_name, self.A)
np.save(B_file_name, self.B)
print("converting to torch...")
self.Atorch = torch.from_numpy(self.A)
self.Btorch = torch.from_numpy(self.B)
self.kernels['g_torch'] = torch.from_numpy(self.kernels['g'])
if torch.cuda.is_available() and want_cuda:
print("cuda available in pv")
self.Atorch = self.Atorch.cuda()
self.Btorch = self.Btorch.cuda()
self.kernels['g_torch'] = self.kernels['g_torch'].cuda()
print("finished!!")
def poisson_fast_one_loop(self, V, g):
out = V
g = self.w * self.h2 * g / 6
for I in range(0, self.n1*self.n2*self.n3):
k = I % self.n3
s1 = (I - k) // self.n3
j = s1 % self.n2
i = (s1 - j) // self.n2
#print(I, i, j, k)
if (i >= 1 and i < self.imax+1 and
j >= 1 and j < self.jmax+1 and
k >= 1 and k < self.kmax-1):
#print(I, i, j, k)
g[I] +=self.w*1./6* \
( g[I-1]
+ g[I-self.n3]
+ g[I-self.n3*self.n2])
else:
g[I] = 0
for kk in range(self.num_iterations):
out = self.kernels['poisson'].dot(out) - g
return out
def poisson_fast_one_loop_gpu(self, V, g, A, sess):
out = V
g = self.w * self.h2 * g / 6
for kk in range(self.num_iterations):
out = tf.sparse_tensor_dense_matmul(A, out) - g
#out = tf.matmul(A, out) - g
sess.run(out)
return sess, out
#can be numerically unstable
def poisson_fast_no_loop_old(self, V, g):
g = self.w * self.h2 * g / 6.
for I in range(0, self.n1*self.n2*self.n3):
k = I % self.n3
s1 = (I - k) // self.n3
j = s1 % self.n2
i = (s1 - j) // self.n2
#print(I, i, j, k)
if (i >= 1 and i < self.imax+1 and
j >= 1 and j < self.jmax+1 and
k >= 1 and k < self.kmax-1):
#print(I, i, j, k)
g[I] +=self.w*1./6* \
( g[I-1]
+ g[I-self.n3]
+ g[I-self.n3*self.n2])
else:
g[I] = 0
return self.A.dot((V)) \
- self.B.dot(g) \
#can be numerically unstable
def poisson_fast_no_loop(self, V, g):
g = self.w * self.h2 * g / 6.
g = self.kernels['g'].dot(g)
return self.A.dot((V)) \
- self.B.dot(g) \
def poisson_fast_no_loop_torch(self, V, g):
g = g * self.w * self.h2 / 6.
g = torch.mm(self.kernels['g_torch'], g)
return torch.mm(self.Atorch, V) - torch.mm(self.Btorch, g)
def poisson_fast_no_loop_gpu(self, V, g):
g = self.w * self.h2 * g / 6.
for I in range(0, self.n1*self.n2*self.n3):
k = I % self.n3
s1 = (I - k) // self.n3
j = s1 % self.n2
i = (s1 - j) // self.n2
#print(I, i, j, k)
if (i >= 1 and i < self.imax+1 and
j >= 1 and j < self.jmax+1 and
k >= 1 and k < self.kmax-1):
#print(I, i, j, k)
g[I] +=self.w*1./6* \
( g[I-1]
+ g[I-self.n3]
+ g[I-self.n3*self.n2])
Atf = tf.placeholder(tf.float64, shape=[self.n1*self.n2*self.n3, self.n1*self.n2*self.n3])
Btf = tf.placeholder(tf.float64, shape=[self.n1*self.n2*self.n3, self.n1*self.n2*self.n3])
Vtf = tf.placeholder(tf.float64, shape=[self.n1*self.n2*self.n3, 1])
gtf = tf.placeholder(tf.float64, shape=[self.n1*self.n2*self.n3, 1])
out = tf.matmul(Atf, Vtf) - tf.matmul(Btf, gtf)
with tf.Session() as sess:
out = sess.run(out, feed_dict = {Atf:self.A, Btf:self.B, Vtf:V, gtf:g})
return out
#@jit
def poisson_brute2(self, V, g):
for kk in range(self.num_iterations):
temp = V * 1.
for I in range(0, self.n1*self.n2*self.n3):
k = I % self.n3
s1 = (I - k) // self.n3
j = s1 % self.n2
i = (s1 - j) // self.n2
if (i*j*k==0 or k >= self.kmax - 1 or j >= self.jmax - 1 or i >= self.imax - 1):
V[k + self.n3 * (j + self.n2 * i)] = 0
continue
r = temp[I-1] / 6.+ temp[I+1] / 6.+ temp[I+self.n3] / 6.+ temp[I-self.n3] / 6.+ temp[I+self.n2*self.n3] / 6. + temp[I-self.n2*self.n3] / 6. - temp[I] - self.h2 * g[I] / 6.
r = self.w * r
V[I] += r
return V
#@jit
def poisson_brute_main_flat(self, V, g):
for kk in range(self.num_iterations):
temp = V
for I in range(0, self.n1*self.n2*self.n3):
k = I % self.n3
s1 = (I - k) // self.n3
j = s1 % self.n2
i = (s1 - j) // self.n2
if (i*j*k==0 or k >= self.kmax - 1 or j >= self.jmax + 1 or i >= self.imax + 1):
continue
r = temp[I-1] / 6.+ temp[I+1] / 6.+ temp[I+self.n3] / 6.+ temp[I-self.n3] / 6.+ temp[I+self.n2*self.n3] / 6. + temp[I-self.n2*self.n3] / 6. - temp[I] - self.h2 * g[I] / 6.
r = self.w * r
V[I] += r
return V
#@jit
def poisson_brute_main(self, V, g):
for kk in range(self.num_iterations):
temp = V
for i in range(1, self.imax+1):
for j in range(1, self.jmax+1):
for k in range(1, self.kmax-1):
r = temp[i+1, j, k] / 6. + temp[i-1, j, k] / 6. + temp[i, j+1, k] / 6. + temp[i, j-1, k] / 6. + temp[i ,j, k+1] / 6. + temp[i, j, k-1] / 6. - temp[i, j, k] - self.h2 * g[i, j, k] / 6.
r = self.w * r
V[i, j, k] += r
return V
def poisson_brute_vectorized(self, temp, g):
for kk in range(self.num_iterations):
r = self.w * (( temp[2:self.imax, 1:self.jmax-1, 1:self.kmax-1] \
+ temp[0:self.imax-2, 1:self.jmax-1, 1:self.kmax-1] \
+ temp[1:self.imax-1, 2:self.jmax, 1:self.kmax-1] \
+ temp[1:self.imax-1, 0:self.jmax-2, 1:self.kmax-1] \
+ temp[1:self.imax-1, 1:self.jmax-1, 2:self.kmax ] \
+ temp[1:self.imax-1, 1:self.jmax-1, 0:self.kmax-2])) / 6. \
+ (1-self.w) * temp[1:self.imax-1, 1:self.jmax-1, 1:self.kmax-1] \
- self.w * self.h2 * g[1:self.imax-1, 1:self.jmax-1, 1:self.kmax-1] / 6.
temp[1:self.imax-1, 1:self.jmax-1, 1:self.kmax-1] = r
return temp
def poisson_brute3_gpu(self, V, g, sess):
out = tf.Variable(tf.float64, V)
#for kk in range(self.num_iterations):
# out[1:self.imax-1, 1:self.jmax-1, 1:self.kmax-1].assign(out[1:self.imax-1, 1:self.jmax-1, 1:self.kmax-1] + 1)
sess.run(out)
return sess, out
def apply_kernel(self, V, key):
return self.kernels[key].dot(V)
def electric_field_elements(self, V):
out = np.zeros_like(V)
out[1:self.imax+1, :, :] = (V[1:self.imax+1, :, :] - V[2:self.imax+2, :, :]) / self.h
return out
def average_x(self, V, out):
for j in range(1, self.jmax):
out[1, j, :] = V[1, j, :] + V[1, j+1, :] + V[self.imax, j, :] + \
V[self.imax, j+1, :]
def average_x_fast(self, V, out):
out[1, 1:self.jmax, :] = V[1, 1:self.jmax] + V[1, 2:self.jmax+1, :] \
+ V[self.imax, 1:self.jmax, :] + V[self.imax, 2:self.jmax+1, :]
def rel_error(x, y):
""" re-turns relative error """
return np.max(np.abs(x - y) / (np.maximum(1e-8, np.abs(x) + np.abs(y))))
def max_rel_error_loc(x, y):
""" re-turns relative error """
a = np.abs(x - y) / (np.maximum(1e-8, np.abs(x) + np.abs(y)))
return a
def stat_diff(V1, V2, text=""):
print("%-30s relative error"%text, rel_error(V1, V2))
#print(np.where(np.abs(V1 - V2) > 20))
def main():
imax = 20
jmax = 16
kmax = 8
upper_lim = 1
n1 = imax + upper_lim
n2 = jmax + upper_lim
n3 = kmax + upper_lim
w = 1.843
pv = poisson_vectorized(n1, n2, n3, w=w, num_iterations=40, method='coo'\
, upper_lim=upper_lim)
V1 = (np.random.rand(n1 * n2 * n3) * 30 + 1).astype('float64')
g = (np.random.rand(n1 * n2 * n3) * 2e5 + -1e5).astype('float64')
for I in range(0, n1*n2*n3):
k = I % n3
s1 = (I - k) // n3
j = s1 % n2
i = (s1 - j) // n2
#print(I, i, j, k)
if (i*j*k==0 or k >= kmax - 1 or j >= jmax - 1 or i >= imax - 1):
g[I] = 0
V1[I] = 0
V1_reshaped = V1.reshape(n1, n2, n3)
g_reshaped = g.reshape(n1, n2, n3)
print("Starting poisson fast")
start = time.time()
a = pv.poisson_fast(V1 * 1., g)
print("Time taken: %f"%(time.time() - start))
print("Starting poisson brute 2")
start = time.time()
b = pv.poisson_brute(V1.reshape(n1, n2, n3) , g.reshape(n1, n2, n3))
print("Time taken: %f"%(time.time() - start))
stat_diff(a, b.reshape(n1*n2*n3), n1, n2, n3)
if __name__ == "__main__":
main()
|
[
"bzr0014@kraken.cse.eng.auburn.edu"
] |
bzr0014@kraken.cse.eng.auburn.edu
|
24e11c32adef3f1cddfbf7017889d30421ebfc66
|
73fa310f7b0d4ecb97d4f7550f6baa85df16516c
|
/setup.py
|
b0321ecc04d1cf05fc2b865b3ace4e2ec66d1b32
|
[
"MIT"
] |
permissive
|
recsyschallenge/2019
|
43910c633945539d88cdcfc783d64f5042146e11
|
c3a35dde4d5f3c803c716e44c3f1f5e8f58429e3
|
refs/heads/master
| 2021-07-12T06:08:16.806518
| 2020-04-29T09:49:14
| 2020-04-29T09:49:14
| 90,833,061
| 118
| 68
|
MIT
| 2020-04-29T09:49:16
| 2017-05-10T07:19:40
|
Python
|
UTF-8
|
Python
| false
| false
| 602
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Setup file for baseline_algorithm.
Use setup.cfg to configure your project.
This file was generated with PyScaffold 3.1.
PyScaffold helps you to put up the scaffold of your new Python project.
Learn more under: https://pyscaffold.org/
"""
import sys
from pkg_resources import require, VersionConflict
from setuptools import setup
try:
require('setuptools>=38.3')
except VersionConflict:
print("Error: version of setuptools is too old (<38.3)!")
sys.exit(1)
if __name__ == "__main__":
setup(use_pyscaffold=True)
|
[
"jens.adamczak@trivago.com"
] |
jens.adamczak@trivago.com
|
b81ff148348f0ca051630b05aad80c83386d390a
|
373f90df7faa1c323161957893f7cc2b04fbbe28
|
/save_data_periodically.py
|
546a6c3193d86dd4221a590c41baaa55e1bd2020
|
[] |
no_license
|
libingnan-rdkx/control_app
|
1d1b5b6a403218703597f050bfb74dd2b6a1cff7
|
9a7cb9bd60cf80944076cefaf4c075a3462200e2
|
refs/heads/main
| 2022-12-29T23:35:09.404241
| 2020-10-16T20:17:31
| 2020-10-16T20:17:31
| 304,729,483
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,747
|
py
|
import json
# import struct as st
import pandas as pd
import numpy as np
import paho.mqtt.client as mqtt
from datetime import datetime, timedelta
from influxdb import InfluxDBClient
client = InfluxDBClient('192.168.1.56', 8086, 'root', 'root', 'apc_db')
jianwen_var_names = (['AI'+str(i)+'_APC' for i in range(33,45)] +
['AI'+str(i)+'_APC' for i in range(49,59)])
xietiao_var_names = ['AI'+str(i)+'_APC' for i in range(1,33)]
tuoxiao_var_names = ['AI'+str(i)+'_APC' for i in range(61,75)]
es_var_names = ['AO'+str(i)+'_APC' for i in range(17,26)]
class MqttMessageBus():
def __init__(self):
self.mqttc = mqtt.Client(client_id='save_data_periodically')
self.counter = 0
self.saveDataOn = 0
self.flag = True
self.duration = 480 # minutes
def on_connect(self, mqttc, obj, flags, rc):
print("Client is connected to mqtt broker, rc: " + str(rc))
def on_message(self, mqttc, obj, msg):
dcsmsg = json.loads(msg.payload.decode('utf-8'))
self.saveDataOn = self.get_bit_val(dcsmsg['DI3'], 15)
if self.saveDataOn:
now = datetime.utcnow()
minutes_num = now.hour * 60 + now.minute
if self.flag and minutes_num % self.duration == 0:
print('save data:', now + timedelta(hours=8))
self.saveData(now)
self.flag = False
if minutes_num % self.duration == 1:
self.flag = True
def on_publish(self, mqttc, obj, mid):
print("on_publish, mid: " + str(mid))
def on_subscribe(self, mqttc, obj, mid, granted_qos):
print("on_subscribe: " + str(mid) + " " + str(granted_qos))
def on_log(self, mqttc, obj, level, string):
print('on_log:', string)
def connect(self, host, port=1883, keepalive=60):
self.mqttc.on_connect = self.on_connect
self.mqttc.on_message = self.on_message
# self.mqttc.on_publish = self.on_publish
self.mqttc.on_subscribe = self.on_subscribe
# Uncomment to enable debug messages
# mqttc.on_log = on_log
self.mqttc.connect(host, port, keepalive)
def subscribe(self, topic, qos=0):
self.mqttc.subscribe(topic, qos)
self.mqttc.loop_forever()
def saveData(self, now):
self.saveDataToCSV(jianwen_var_names, now, 'dcs_data', 'jianwen')
self.saveDataToCSV(xietiao_var_names, now, 'dcs_data', 'xietiao')
self.saveDataToCSV(tuoxiao_var_names, now, 'dcs_data', 'tuoxiao')
self.saveDataToCSV(es_var_names, now, 'command_data', 'exci_sig')
def saveDataToCSV(self, var_names, now, measurement, fnsuffix):
datetimestr = []
col_data = []
var_str = ','.join(var_names)
where_str = "where time>=$startTime and time<=$endTime"
query = "select {} from {} {}".format(var_str, measurement, where_str)
timeZone = 8 # 8 hours
duration = self.duration
endTimeStamp = now
startTimeStamp = endTimeStamp - timedelta(minutes=duration)
startTime = startTimeStamp.strftime('%Y-%m-%dT%H:%M:%SZ')
endTime = endTimeStamp.strftime('%Y-%m-%dT%H:%M:%SZ')
startTime = startTime[:17]+'00Z'
endTime = endTime[:17]+'00Z'
# startTime = '2020-06-11T12:00:00Z'
# endTime = '2020-06-12T00:00:00Z'
bind_params = {'startTime': startTime, 'endTime': endTime}
results = client.query(query, bind_params=bind_params).get_points() # result is an iterator
for rs in results:
datetimestr.append(rs['time'])
for name in var_names:
col_data.append(round(rs[name]))
data = np.array(col_data).reshape(-1, len(var_names))
df = pd.DataFrame(data,columns=var_names, index=datetimestr)
print('data length',len(data))
st = (startTimeStamp + timedelta(hours=timeZone)).strftime('%Y%m%d-%H%M%S')
et = (endTimeStamp + timedelta(hours=timeZone)).strftime('%Y%m%d-%H%M%S')
fn = '/home/czyd/data/{}_{}_{}.csv'.format(st, et, fnsuffix)
# fn = '{}_{}_{}.csv'.format(st, et, fnsuffix)
df.to_csv(fn, sep=',', header=True, index=True)
def get_bit_val(self, byte, index):
"""
get the bit value of an integer at index from right to left starting from 0
"""
if byte & (1 << index):
return 1
else:
return 0
if __name__ == "__main__":
mmb = MqttMessageBus()
mqtthost = '192.168.1.56'
port = 1883
keepalive = 60
mmb.connect(mqtthost, port, keepalive)
mmb.subscribe('apcdata')
|
[
"noreply@github.com"
] |
libingnan-rdkx.noreply@github.com
|
534a6d3743ebc5084d7a4381efa5f146340deebe
|
5c6bdc1915d56f1fee9b66a45365cefd097ff1f4
|
/challenge_3.py
|
645cd85ef5cd8e4cdba1fe3b01314768a428c6e6
|
[] |
no_license
|
chandanmanjunath/LearnByexample
|
534a9e880453c316f4168c4b234165d935d2dac7
|
52351f7fba57ac0d0f13edb44c537131af860b60
|
refs/heads/master
| 2021-05-07T17:29:10.852798
| 2017-10-29T12:28:58
| 2017-10-29T12:28:58
| 108,732,377
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 194
|
py
|
if __name__ == '__main__':
a = int(raw_input())
b = int(raw_input())
if (a>=1 and a<=pow(10,10)) and (b>=1 and b<=pow(10,10)) :
print a+b
print a-b
print a*b
|
[
"mchandanhegde@gmail.com"
] |
mchandanhegde@gmail.com
|
98e4d65023487abe3e1d25487d510bec8a565b46
|
84a0e742eeb89016f419b13329a4e6a1828e4d31
|
/001_IntroductionToCS&ProgrammingUsingPython/Extra_Problems/oop_fraction.py
|
235020581d1f7a8ddb21abd3e0d787229b39d430
|
[
"MIT"
] |
permissive
|
dalalsunil1986/Computer-Science-Degree
|
e85736c8c705bb82d897519cf2339ff638bc1b5f
|
e2c73f35cc48bbcc2a5cc0ddc6867fd0787c6dd9
|
refs/heads/master
| 2023-03-16T18:37:31.954245
| 2020-02-24T17:08:47
| 2020-02-24T17:08:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,064
|
py
|
"""
@author: Anirudh Sharma
"""
class Fraction(object):
def __init__(self, numerator, denominator):
assert type(numerator) == int and type(denominator) == int
self.numerator = numerator
self.denominator = denominator
def __str__(self):
return str(self.numerator) + "/" + str(self.denominator)
def __add__(self, other):
n = self.numerator * other.denominator + other.numerator * self.denominator
d = self.denominator * other.denominator
return Fraction(n, d)
def __sub__(self, other):
n = self.numerator * other.denominator - other.numerator * self.denominator
d = self.denominator * other.denominator
return Fraction(n, d)
def __float__(self):
return self.numerator / self.denominator
def inverse(self):
return Fraction(self.denominator, self.numerator)
a = Fraction(1, 2)
b = Fraction(2, 3)
plus = a + b
print(plus)
minus = a - b
print(minus)
f = float(a)
print(f)
r = Fraction.inverse(b)
print(r)
|
[
"anirudh03sharma@gmail.com"
] |
anirudh03sharma@gmail.com
|
575e8b142c93a2754ad6442049f023e2337d423d
|
648ed56ac573ecf8206d5df5e7ebce3eb8bdb763
|
/usuarios/migrations/0001_initial.py
|
5f98e09ed51aa38814dc7a67057988723784e72a
|
[] |
no_license
|
LarissaGRosa/meumissi_final
|
eeef5575219e29485730a8b5fe2a16f072f1e17e
|
886adc8b9e55160cc76257bbb98af7db38cf8a40
|
refs/heads/master
| 2023-08-29T05:58:40.584910
| 2019-12-02T12:07:45
| 2019-12-02T12:07:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 765
|
py
|
# Generated by Django 2.2.6 on 2019-10-21 21:48
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Perfil',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('foto', models.ImageField(blank=True, upload_to='imagem_perfil')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
[
"meumissi@gmail.com"
] |
meumissi@gmail.com
|
36ed1046ec8a0ce925a42c0e4170ecd229b5ca7a
|
de0e136b52990d6bacc7527c755224295f294060
|
/SwitchWindowPkg/switchWindow.py
|
ff099b44852493d0626d501c7a17f7cadb826843
|
[] |
no_license
|
bratva123/selenium
|
ca8bd6cbf61fe166e0d72376dd39bc055daee3d5
|
335f9d978b2da94a301f95c560c89a9c9ebe56d6
|
refs/heads/master
| 2022-09-01T09:57:45.997142
| 2020-05-21T02:59:27
| 2020-05-21T02:59:27
| 265,733,509
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,225
|
py
|
from selenium import webdriver
import time
class SwitchWindow():
def test(self):
driver = webdriver.Chrome();
driver.maximize_window()
baseUrl = "https://letskodeit.teachable.com/pages/practice"
driver.get(baseUrl)
driver.implicitly_wait(4)
#find the parent handle -> main Wiindow
parentHandle = driver.current_window_handle
print(parentHandle)
#find open window and click
openWindow = driver.find_element_by_id("openwindow")
openWindow.click()
time.sleep(3)
#find all the handles , there should two hna=dle after clicking open window button
handles = driver.window_handles
for handle in handles:
print("handle : "+ handle)
if handle not in parentHandle:
driver.switch_to_window(handle)
print("switched to windows : "+handle)
inputBox = driver.find_element_by_id("search-courses")
inputBox.send_keys("python")
time.sleep(3)
driver.close()
# inputBox = driver.find_element_by_id("search-courses")
# inputBox.send_keys("python")
sw = SwitchWindow()
sw.test()
|
[
"lavkr0403@gmail.com"
] |
lavkr0403@gmail.com
|
47790afe44b154c235cfd5cf1226cb72bfdf6aaf
|
03a94409c32e74d796f271ad80c9bc86faa2b9db
|
/pom/conftest.py
|
d553f7b4afc8e790ca9341de78e22e33e88f1fb5
|
[] |
no_license
|
acqa/epam_ta
|
9b0496e80e5caf98e6d9396c85bee0a9a0c741d6
|
715342261127e39624bfd9adfda80aac34bc162a
|
refs/heads/master
| 2020-05-22T18:33:14.610831
| 2019-06-10T19:55:57
| 2019-06-10T19:55:57
| 186,474,104
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,161
|
py
|
from app.application import App
import pytest
import os.path
import time
import os
#testdata
base_url = "http://v999140x.beget.tech"
login ="test_user"
password ="12345"
@pytest.fixture(scope = 'session')
def app(request):
fixture = App(base_url = base_url)
fixture.login_page.fix_login(login = login, password = password)
fixture.post_page.fix_close_advice_popup()
yield fixture
fixture.destroy()
# @pytest.fixture(scope="function", autouse=True)
# def take_screenshot_when_failure(request):
#
# def tear_down():
# path = os.path.dirname(__file__)
# # if request.node.rep_call.failed:
# fixture.wd.save_screenshot("%s/log/screen/scr_%s.png" % (path, time.time()))
# request.addfinalizer(tear_down)
# yield
# @pytest.fixture(autouse=True, scope="function")
# def screenshot_on_failure(request):
# def fin():
# driver = App(base_url = base_url)
# attach = driver.get_screenshot_as_png()
# if request.node.rep_setup.failed:
# allure.attach(request.function.__name__, attach, allure.attach_type.PNG)
# elif request.node.rep_setup.passed:
# if request.node.rep_call.failed:
# allure.attach(request.function.__name__, attach, allure.attach_type.PNG)
# request.addfinalizer(fin)
@pytest.fixture(autouse=True, scope='session')
def generate_allure_report():
"""Генерирует HTML отчет из результатов теста"""
yield
os.system("allure generate -c ../log/allure/result -o ../log/allure/report")
@pytest.fixture(autouse=True, scope='session')
def footer_session_scope():
"""Сообщает время в конце session(сеанса)."""
yield
now = time.time()
print('--')
print('finished : {}'.format(time.strftime('%d %b %X', time.localtime(now))))
print('-----------------')
@pytest.fixture(autouse=True)
def footer_function_scope():
"""Сообщает продолжительность теста после каждой функции."""
start = time.time()
yield
stop = time.time()
delta = stop - start
print('\ntest duration : {:0.3} seconds'.format(delta))
|
[
"reg@ac11.ru"
] |
reg@ac11.ru
|
6b4ab0a7e10c34f653dd28cfdf289ca292364259
|
7e4425342a4d7e0f40978af17091f32d2712c79c
|
/Day_36_01_Word2VecBasic.py
|
06bed965a1102af98a5115949451121c9d0eb08e
|
[] |
no_license
|
yunhui21/CB_Ai_NLP
|
eca3da00c6c9615c8737b50d2c5ebe8dd1e3ba8a
|
b66ecc24abfd988fc9e7f19fa1941826b1bf38a4
|
refs/heads/master
| 2023-01-07T14:21:26.758030
| 2020-11-16T05:57:30
| 2020-11-16T05:57:30
| 291,835,156
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,299
|
py
|
# Day_36_01_Word2VecBasic.py
# onehotvec 클래스의 수만큼 숫자로 단어를 볂솬 - 현실적으로 클래스의 개수가 너무 많다.
#
'''
skipgram :
'''''
# end 위치를 구하세요.
# 전체위치에서 target범위만 제거하세요.
def extrast(token_count, target, window_size ):
start = max(target - window_size, 0)
end = min(target + window_size + 1, token_count)
return [i for i in range(start, end) if i != target]
def show_dataset(tokens, window_size, is_skipgram):
token_count = len(tokens)
for target in range(token_count):
surround = extrast(token_count, target, window_size)
print(target, surround, end='')
# 문제
# surround가 가라키는 단어들을 출력하세요.
if is_skipgram:
# print(list([zip([target] * len(surround), surround)]))
print([(tokens[t], tokens[s]) for t, s in zip([target] * len(surround), surround)])
else:
print([tokens[i] for i in surround], tokens[target])
tokens = ['the', 'quick', 'brown', 'fax','jumps','over', 'the', 'lazy', 'dog']
# show_dataset(tokens, 1, is_skipgram=True)
# # show_dataset(tokens, 1, is_skimgram= False )
show_dataset(tokens, 2, is_skipgram=True)
print()
show_dataset(tokens, 2, is_skipgram=False)
|
[
"yunhui21@gmail.com"
] |
yunhui21@gmail.com
|
fd9582f9639ce68f96b70af3736a5014325c555c
|
2ea49bfaa6bc1b9301b025c5b2ca6fde7e5bb9df
|
/contributions/Starkman9000/python/Data Structures/2016-10-01.py
|
ccf039b16b39393d44244265885006e6533ab489
|
[] |
no_license
|
0x8801/commit
|
18f25a9449f162ee92945b42b93700e12fd4fd77
|
e7692808585bc7e9726f61f7f6baf43dc83e28ac
|
refs/heads/master
| 2021-10-13T08:04:48.200662
| 2016-12-20T01:59:47
| 2016-12-20T01:59:47
| 76,935,980
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 123
|
py
|
Double ended queues with `deque`
`Module`s everywhere!
Get the most of `int`s
There is more to copying
`queue`s and threads
|
[
"starkman9000@gmail.com"
] |
starkman9000@gmail.com
|
9feef2069d45122ef67fc6486e6dcc02591ab879
|
89096c0bfae0bb08d03ed52b281bd6d51b3bcad1
|
/template/usr.share.vim.vim73.plugin.js-beautify/python/jsbeautifier/tests/testjsbeautifier.py
|
001ccfa2e2471385a04a389d867b5eeedeea9227
|
[
"MIT"
] |
permissive
|
visi-pivi-sivi/PI6
|
e46ff1060b3d458a4366123dabab53ab07cc01ea
|
9fa9baf36b33fa719956b94d7fd0a09f6fb251e8
|
refs/heads/master
| 2021-01-10T20:29:47.314852
| 2019-12-31T03:01:20
| 2019-12-31T03:01:20
| 23,481,403
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 53,083
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import re
import unittest
import jsbeautifier
class TestJSBeautifier(unittest.TestCase):
def test_unescape(self):
# Test cases contributed by <chrisjshull on GitHub.com>
test_fragment = self.decodesto
bt = self.bt
bt('"\\\\s"'); # == "\\s" in the js source
bt("'\\\\s'"); # == '\\s' in the js source
bt("'\\\\\\s'"); # == '\\\s' in the js source
bt("'\\s'"); # == '\s' in the js source
bt('"•"');
bt('"—"');
bt('"\\x41\\x42\\x43\\x01"', '"\\x41\\x42\\x43\\x01"');
bt('"\\u2022"', '"\\u2022"');
bt('a = /\s+/')
#bt('a = /\\x41/','a = /A/')
bt('"\\u2022";a = /\s+/;"\\x41\\x42\\x43\\x01".match(/\\x41/);','"\\u2022";\na = /\s+/;\n"\\x41\\x42\\x43\\x01".match(/\\x41/);')
bt('"\\x22\\x27",\'\\x22\\x27\',"\\x5c",\'\\x5c\',"\\xff and \\xzz","unicode \\u0000 \\u0022 \\u0027 \\u005c \\uffff \\uzzzz"', '"\\x22\\x27", \'\\x22\\x27\', "\\x5c", \'\\x5c\', "\\xff and \\xzz", "unicode \\u0000 \\u0022 \\u0027 \\u005c \\uffff \\uzzzz"');
self.options.unescape_strings = True
bt('"\\x41\\x42\\x43\\x01"', '"ABC\\x01"');
bt('"\\u2022"', '"\\u2022"');
bt('a = /\s+/')
bt('"\\u2022";a = /\s+/;"\\x41\\x42\\x43\\x01".match(/\\x41/);','"\\u2022";\na = /\s+/;\n"ABC\\x01".match(/\\x41/);')
bt('"\\x22\\x27",\'\\x22\\x27\',"\\x5c",\'\\x5c\',"\\xff and \\xzz","unicode \\u0000 \\u0022 \\u0027 \\u005c \\uffff \\uzzzz"', '"\\"\'", \'"\\\'\', "\\\\", \'\\\\\', "\\xff and \\xzz", "unicode \\u0000 \\" \' \\\\ \\uffff \\uzzzz"');
self.options.unescape_strings = False
def test_beautifier(self):
test_fragment = self.decodesto
bt = self.bt
bt('');
bt('return .5');
test_fragment(' return .5');
bt('a = 1', 'a = 1');
bt('a=1', 'a = 1');
bt("a();\n\nb();", "a();\n\nb();");
bt('var a = 1 var b = 2', "var a = 1\nvar b = 2");
bt('var a=1, b=c[d], e=6;', 'var a = 1,\n b = c[d],\n e = 6;');
bt('a = " 12345 "');
bt("a = ' 12345 '");
bt('if (a == 1) b = 2;', "if (a == 1) b = 2;");
bt('if(1){2}else{3}', "if (1) {\n 2\n} else {\n 3\n}");
bt('if(1||2);', 'if (1 || 2);');
bt('(a==1)||(b==2)', '(a == 1) || (b == 2)');
bt('var a = 1 if (2) 3;', "var a = 1\nif (2) 3;");
bt('a = a + 1');
bt('a = a == 1');
bt('/12345[^678]*9+/.match(a)');
bt('a /= 5');
bt('a = 0.5 * 3');
bt('a *= 10.55');
bt('a < .5');
bt('a <= .5');
bt('a<.5', 'a < .5');
bt('a<=.5', 'a <= .5');
bt('a = 0xff;');
bt('a=0xff+4', 'a = 0xff + 4');
bt('a = [1, 2, 3, 4]');
bt('F*(g/=f)*g+b', 'F * (g /= f) * g + b');
bt('a.b({c:d})', "a.b({\n c: d\n })");
bt('a.b\n(\n{\nc:\nd\n}\n)', "a.b({\n c: d\n })");
bt('a=!b', 'a = !b');
bt('a?b:c', 'a ? b : c');
bt('a?1:2', 'a ? 1 : 2');
bt('a?(b):c', 'a ? (b) : c');
bt('x={a:1,b:w=="foo"?x:y,c:z}', 'x = {\n a: 1,\n b: w == "foo" ? x : y,\n c: z\n}');
bt('x=a?b?c?d:e:f:g;', 'x = a ? b ? c ? d : e : f : g;');
bt('x=a?b?c?d:{e1:1,e2:2}:f:g;', 'x = a ? b ? c ? d : {\n e1: 1,\n e2: 2\n} : f : g;');
bt('function void(void) {}');
bt('if(!a)foo();', 'if (!a) foo();');
bt('a=~a', 'a = ~a');
bt('a;/*comment*/b;', "a; /*comment*/\nb;");
bt('a;/* comment */b;', "a; /* comment */\nb;");
test_fragment('a;/*\ncomment\n*/b;', "a;\n/*\ncomment\n*/\nb;"); # simple comments don't get touched at all
bt('a;/**\n* javadoc\n*/b;', "a;\n/**\n * javadoc\n */\nb;");
test_fragment('a;/**\n\nno javadoc\n*/b;', "a;\n/**\n\nno javadoc\n*/\nb;");
bt('a;/*\n* javadoc\n*/b;', "a;\n/*\n * javadoc\n */\nb;"); # comment blocks detected and reindented even w/o javadoc starter
bt('if(a)break;', "if (a) break;");
bt('if(a){break}', "if (a) {\n break\n}");
bt('if((a))foo();', 'if ((a)) foo();');
bt('for(var i=0;;) a', 'for (var i = 0;;) a');
bt('for(var i=0;;)\na', 'for (var i = 0;;)\n a');
bt('a++;', 'a++;');
bt('for(;;i++)a()', 'for (;; i++) a()');
bt('for(;;i++)\na()', 'for (;; i++)\n a()');
bt('for(;;++i)a', 'for (;; ++i) a');
bt('return(1)', 'return (1)');
bt('try{a();}catch(b){c();}finally{d();}', "try {\n a();\n} catch (b) {\n c();\n} finally {\n d();\n}");
bt('(xx)()'); # magic function call
bt('a[1]()'); # another magic function call
bt('if(a){b();}else if(c) foo();', "if (a) {\n b();\n} else if (c) foo();");
bt('switch(x) {case 0: case 1: a(); break; default: break}', "switch (x) {\n case 0:\n case 1:\n a();\n break;\n default:\n break\n}");
bt('switch(x){case -1:break;case !y:break;}', 'switch (x) {\n case -1:\n break;\n case !y:\n break;\n}');
bt('a !== b');
bt('if (a) b(); else c();', "if (a) b();\nelse c();");
bt("// comment\n(function something() {})"); # typical greasemonkey start
bt("{\n\n x();\n\n}"); # was: duplicating newlines
bt('if (a in b) foo();');
bt('var a, b;');
# bt('var a, b');
bt('{a:1, b:2}', "{\n a: 1,\n b: 2\n}");
bt('a={1:[-1],2:[+1]}', 'a = {\n 1: [-1],\n 2: [+1]\n}');
bt('var l = {\'a\':\'1\', \'b\':\'2\'}', "var l = {\n 'a': '1',\n 'b': '2'\n}");
bt('if (template.user[n] in bk) foo();');
bt('{{}/z/}', "{\n {}\n /z/\n}");
bt('return 45', "return 45");
bt('If[1]', "If[1]");
bt('Then[1]', "Then[1]");
bt('a = 1e10', "a = 1e10");
bt('a = 1.3e10', "a = 1.3e10");
bt('a = 1.3e-10', "a = 1.3e-10");
bt('a = -1.3e-10', "a = -1.3e-10");
bt('a = 1e-10', "a = 1e-10");
bt('a = e - 10', "a = e - 10");
bt('a = 11-10', "a = 11 - 10");
bt("a = 1;// comment", "a = 1; // comment");
bt("a = 1; // comment", "a = 1; // comment");
bt("a = 1;\n // comment", "a = 1;\n// comment");
bt('a = [-1, -1, -1]');
# The exact formatting these should have is open for discussion, but they are at least reasonable
bt('a = [ // comment\n -1, -1, -1\n]');
bt('var a = [ // comment\n -1, -1, -1\n]');
bt('a = [ // comment\n -1, // comment\n -1, -1\n]');
bt('var a = [ // comment\n -1, // comment\n -1, -1\n]');
bt('o = [{a:b},{c:d}]', 'o = [{\n a: b\n }, {\n c: d\n }\n]');
bt("if (a) {\n do();\n}"); # was: extra space appended
bt("if (a) {\n// comment\n}else{\n// comment\n}", "if (a) {\n // comment\n} else {\n // comment\n}"); # if/else statement with empty body
bt("if (a) {\n// comment\n// comment\n}", "if (a) {\n // comment\n // comment\n}"); # multiple comments indentation
bt("if (a) b() else c();", "if (a) b()\nelse c();");
bt("if (a) b() else if c() d();", "if (a) b()\nelse if c() d();");
bt("{}");
bt("{\n\n}");
bt("do { a(); } while ( 1 );", "do {\n a();\n} while (1);");
bt("do {} while (1);");
bt("do {\n} while (1);", "do {} while (1);");
bt("do {\n\n} while (1);");
bt("var a = x(a, b, c)");
bt("delete x if (a) b();", "delete x\nif (a) b();");
bt("delete x[x] if (a) b();", "delete x[x]\nif (a) b();");
bt("for(var a=1,b=2)d", "for (var a = 1, b = 2) d");
bt("for(var a=1,b=2,c=3) d", "for (var a = 1, b = 2, c = 3) d");
bt("for(var a=1,b=2,c=3;d<3;d++)\ne", "for (var a = 1, b = 2, c = 3; d < 3; d++)\n e");
bt("function x(){(a||b).c()}", "function x() {\n (a || b).c()\n}");
bt("function x(){return - 1}", "function x() {\n return -1\n}");
bt("function x(){return ! a}", "function x() {\n return !a\n}");
# a common snippet in jQuery plugins
bt("settings = $.extend({},defaults,settings);", "settings = $.extend({}, defaults, settings);");
bt('{xxx;}()', '{\n xxx;\n}()');
bt("a = 'a'\nb = 'b'");
bt("a = /reg/exp");
bt("a = /reg/");
bt('/abc/.test()');
bt('/abc/i.test()');
bt("{/abc/i.test()}", "{\n /abc/i.test()\n}");
bt('var x=(a)/a;', 'var x = (a) / a;');
bt('x != -1', 'x != -1');
bt('for (; s-->0;)t', 'for (; s-- > 0;) t');
bt('for (; s++>0;)u', 'for (; s++ > 0;) u');
bt('a = s++>s--;', 'a = s++ > s--;');
bt('a = s++>--s;', 'a = s++ > --s;');
bt('{x=#1=[]}', '{\n x = #1=[]\n}');
bt('{a:#1={}}', '{\n a: #1={}\n}');
bt('{a:#1#}', '{\n a: #1#\n}');
test_fragment('"incomplete-string');
test_fragment("'incomplete-string");
test_fragment('/incomplete-regex');
test_fragment('{a:1},{a:2}', '{\n a: 1\n}, {\n a: 2\n}');
test_fragment('var ary=[{a:1}, {a:2}];', 'var ary = [{\n a: 1\n }, {\n a: 2\n }\n];');
test_fragment('{a:#1', '{\n a: #1'); # incomplete
test_fragment('{a:#', '{\n a: #'); # incomplete
test_fragment('}}}', '}\n}\n}'); # incomplete
test_fragment('<!--\nvoid();\n// -->', '<!--\nvoid();\n// -->');
test_fragment('a=/regexp', 'a = /regexp'); # incomplete regexp
bt('{a:#1=[],b:#1#,c:#999999#}', '{\n a: #1=[],\n b: #1#,\n c: #999999#\n}');
bt("a = 1e+2");
bt("a = 1e-2");
bt("do{x()}while(a>1)", "do {\n x()\n} while (a > 1)");
bt("x(); /reg/exp.match(something)", "x();\n/reg/exp.match(something)");
test_fragment("something();(", "something();\n(");
test_fragment("#!she/bangs, she bangs\nf=1", "#!she/bangs, she bangs\n\nf = 1");
test_fragment("#!she/bangs, she bangs\n\nf=1", "#!she/bangs, she bangs\n\nf = 1");
test_fragment("#!she/bangs, she bangs\n\n/* comment */", "#!she/bangs, she bangs\n\n/* comment */");
test_fragment("#!she/bangs, she bangs\n\n\n/* comment */", "#!she/bangs, she bangs\n\n\n/* comment */");
test_fragment("#", "#");
test_fragment("#!", "#!");
bt("function namespace::something()");
test_fragment("<!--\nsomething();\n-->", "<!--\nsomething();\n-->");
test_fragment("<!--\nif(i<0){bla();}\n-->", "<!--\nif (i < 0) {\n bla();\n}\n-->");
bt('{foo();--bar;}', '{\n foo();\n --bar;\n}');
bt('{foo();++bar;}', '{\n foo();\n ++bar;\n}');
bt('{--bar;}', '{\n --bar;\n}');
bt('{++bar;}', '{\n ++bar;\n}');
# Handling of newlines around unary ++ and -- operators
bt('{foo\n++bar;}', '{\n foo\n ++bar;\n}');
bt('{foo++\nbar;}', '{\n foo++\n bar;\n}');
# This is invalid, but harder to guard against. Issue #203.
bt('{foo\n++\nbar;}', '{\n foo\n ++\n bar;\n}');
# regexps
bt('a(/abc\\/\\/def/);b()', "a(/abc\\/\\/def/);\nb()");
bt('a(/a[b\\[\\]c]d/);b()', "a(/a[b\\[\\]c]d/);\nb()");
test_fragment('a(/a[b\\[', "a(/a[b\\["); # incomplete char class
# allow unescaped / in char classes
bt('a(/[a/b]/);b()', "a(/[a/b]/);\nb()");
bt('a=[[1,2],[4,5],[7,8]]', "a = [\n [1, 2],\n [4, 5],\n [7, 8]\n]");
bt('a=[[1,2],[4,5],function(){},[7,8]]',
"a = [\n [1, 2],\n [4, 5],\n function() {},\n [7, 8]\n]");
bt('a=[[1,2],[4,5],function(){},function(){},[7,8]]',
"a = [\n [1, 2],\n [4, 5],\n function() {},\n function() {},\n [7, 8]\n]");
bt('a=[[1,2],[4,5],function(){},[7,8]]',
"a = [\n [1, 2],\n [4, 5],\n function() {},\n [7, 8]\n]");
bt('a=[b,c,function(){},function(){},d]',
"a = [b, c,\n function() {},\n function() {},\n d\n]");
bt('a=[a[1],b[4],c[d[7]]]', "a = [a[1], b[4], c[d[7]]]");
bt('[1,2,[3,4,[5,6],7],8]', "[1, 2, [3, 4, [5, 6], 7], 8]");
bt('[[["1","2"],["3","4"]],[["5","6","7"],["8","9","0"]],[["1","2","3"],["4","5","6","7"],["8","9","0"]]]',
'[\n [\n ["1", "2"],\n ["3", "4"]\n ],\n [\n ["5", "6", "7"],\n ["8", "9", "0"]\n ],\n [\n ["1", "2", "3"],\n ["4", "5", "6", "7"],\n ["8", "9", "0"]\n ]\n]');
bt('{[x()[0]];indent;}', '{\n [x()[0]];\n indent;\n}');
bt('return ++i', 'return ++i');
bt('return !!x', 'return !!x');
bt('return !x', 'return !x');
bt('return [1,2]', 'return [1, 2]');
bt('return;', 'return;');
bt('return\nfunc', 'return\nfunc');
bt('catch(e)', 'catch (e)');
bt('var a=1,b={foo:2,bar:3},{baz:4,wham:5},c=4;', 'var a = 1,\n b = {\n foo: 2,\n bar: 3\n }, {\n baz: 4,\n wham: 5\n }, c = 4;');
bt('var a=1,b={foo:2,bar:3},{baz:4,wham:5},\nc=4;', 'var a = 1,\n b = {\n foo: 2,\n bar: 3\n }, {\n baz: 4,\n wham: 5\n },\n c = 4;');
# inline comment
bt('function x(/*int*/ start, /*string*/ foo)', 'function x( /*int*/ start, /*string*/ foo)');
# javadoc comment
bt('/**\n* foo\n*/', '/**\n * foo\n */');
bt('{\n/**\n* foo\n*/\n}', '{\n /**\n * foo\n */\n}');
bt('var a,b,c=1,d,e,f=2;', 'var a, b, c = 1,\n d, e, f = 2;');
bt('var a,b,c=[],d,e,f=2;', 'var a, b, c = [],\n d, e, f = 2;');
bt('function() {\n var a, b, c, d, e = [],\n f;\n}');
bt('do/regexp/;\nwhile(1);', 'do /regexp/;\nwhile (1);'); # hmmm
bt('var a = a,\na;\nb = {\nb\n}', 'var a = a,\n a;\nb = {\n b\n}');
bt('var a = a,\n /* c */\n b;');
bt('var a = a,\n // c\n b;');
bt('foo.("bar");'); # weird element referencing
bt('if (a) a()\nelse b()\nnewline()');
bt('if (a) a()\nnewline()');
bt('a=typeof(x)', 'a = typeof(x)');
bt('var a = function() {\n return null;\n},\n b = false;');
bt('var a = function() {\n func1()\n}');
bt('var a = function() {\n func1()\n}\nvar b = function() {\n func2()\n}');
self.options.jslint_happy = True
bt('x();\n\nfunction(){}', 'x();\n\nfunction () {}');
bt('function () {\n var a, b, c, d, e = [],\n f;\n}');
bt('switch(x) {case 0: case 1: a(); break; default: break}',
"switch (x) {\ncase 0:\ncase 1:\n a();\n break;\ndefault:\n break\n}");
bt('switch(x){case -1:break;case !y:break;}',
'switch (x) {\ncase -1:\n break;\ncase !y:\n break;\n}');
test_fragment("// comment 1\n(function()", "// comment 1\n(function ()"); # typical greasemonkey start
bt('var o1=$.extend(a);function(){alert(x);}', 'var o1 = $.extend(a);\n\nfunction () {\n alert(x);\n}');
bt('a=typeof(x)', 'a = typeof (x)');
self.options.jslint_happy = False
bt('switch(x) {case 0: case 1: a(); break; default: break}',
"switch (x) {\n case 0:\n case 1:\n a();\n break;\n default:\n break\n}");
bt('switch(x){case -1:break;case !y:break;}',
'switch (x) {\n case -1:\n break;\n case !y:\n break;\n}');
test_fragment("// comment 2\n(function()", "// comment 2\n(function()"); # typical greasemonkey start
bt("var a2, b2, c2, d2 = 0, c = function() {}, d = '';", "var a2, b2, c2, d2 = 0,\n c = function() {}, d = '';");
bt("var a2, b2, c2, d2 = 0, c = function() {},\nd = '';", "var a2, b2, c2, d2 = 0,\n c = function() {},\n d = '';");
bt('var o2=$.extend(a);function(){alert(x);}', 'var o2 = $.extend(a);\n\nfunction() {\n alert(x);\n}');
bt('{"x":[{"a":1,"b":3},7,8,8,8,8,{"b":99},{"a":11}]}', '{\n "x": [{\n "a": 1,\n "b": 3\n },\n 7, 8, 8, 8, 8, {\n "b": 99\n }, {\n "a": 11\n }\n ]\n}');
bt('{"1":{"1a":"1b"},"2"}', '{\n "1": {\n "1a": "1b"\n },\n "2"\n}');
bt('{a:{a:b},c}', '{\n a: {\n a: b\n },\n c\n}');
bt('{[y[a]];keep_indent;}', '{\n [y[a]];\n keep_indent;\n}');
bt('if (x) {y} else { if (x) {y}}', 'if (x) {\n y\n} else {\n if (x) {\n y\n }\n}');
bt('if (foo) one()\ntwo()\nthree()');
bt('if (1 + foo() && bar(baz()) / 2) one()\ntwo()\nthree()');
bt('if (1 + foo() && bar(baz()) / 2) one();\ntwo();\nthree();');
self.options.indent_size = 1;
self.options.indent_char = ' ';
bt('{ one_char() }', "{\n one_char()\n}");
bt('var a,b=1,c=2', 'var a, b = 1,\n c = 2');
self.options.indent_size = 4;
self.options.indent_char = ' ';
bt('{ one_char() }', "{\n one_char()\n}");
self.options.indent_size = 1;
self.options.indent_char = "\t";
bt('{ one_char() }', "{\n\tone_char()\n}");
bt('x = a ? b : c; x;', 'x = a ? b : c;\nx;');
self.options.indent_size = 4;
self.options.indent_char = ' ';
self.options.preserve_newlines = False;
bt('var\na=dont_preserve_newlines;', 'var a = dont_preserve_newlines;');
# make sure the blank line between function definitions stays
# even when preserve_newlines = False
bt('function foo() {\n return 1;\n}\n\nfunction foo() {\n return 1;\n}');
bt('function foo() {\n return 1;\n}\nfunction foo() {\n return 1;\n}',
'function foo() {\n return 1;\n}\n\nfunction foo() {\n return 1;\n}'
);
bt('function foo() {\n return 1;\n}\n\n\nfunction foo() {\n return 1;\n}',
'function foo() {\n return 1;\n}\n\nfunction foo() {\n return 1;\n}'
);
self.options.preserve_newlines = True;
bt('var\na=do_preserve_newlines;', 'var\na = do_preserve_newlines;')
bt('// a\n// b\n\n// c\n// d')
bt('if (foo) // comment\n{\n bar();\n}')
self.options.keep_array_indentation = False;
bt("a = ['a', 'b', 'c',\n 'd', 'e', 'f']",
"a = ['a', 'b', 'c',\n 'd', 'e', 'f'\n]");
bt("a = ['a', 'b', 'c',\n 'd', 'e', 'f',\n 'g', 'h', 'i']",
"a = ['a', 'b', 'c',\n 'd', 'e', 'f',\n 'g', 'h', 'i'\n]");
bt("a = ['a', 'b', 'c',\n 'd', 'e', 'f',\n 'g', 'h', 'i']",
"a = ['a', 'b', 'c',\n 'd', 'e', 'f',\n 'g', 'h', 'i'\n]");
bt('var x = [{}\n]', 'var x = [{}]');
bt('var x = [{foo:bar}\n]', 'var x = [{\n foo: bar\n }\n]');
bt("a = ['something',\n 'completely',\n 'different'];\nif (x);",
"a = ['something',\n 'completely',\n 'different'\n];\nif (x);");
bt("a = ['a','b','c']", "a = ['a', 'b', 'c']");
bt("a = ['a', 'b','c']", "a = ['a', 'b', 'c']");
bt("x = [{'a':0}]",
"x = [{\n 'a': 0\n }\n]");
# this is not great, but is accurate
bt('{a([[a1]], {b;});}',
'{\n a([\n [a1]\n ], {\n b;\n });\n}');
bt("a();\n [\n ['sdfsdfsd'],\n ['sdfsdfsdf']\n ].toString();",
"a();\n[\n ['sdfsdfsd'],\n ['sdfsdfsdf']\n].toString();");
bt("function() {\n Foo([\n ['sdfsdfsd'],\n ['sdfsdfsdf']\n ]);\n}",
"function() {\n Foo([\n ['sdfsdfsd'],\n ['sdfsdfsdf']\n ]);\n}");
self.options.keep_array_indentation = True;
bt("a = ['a', 'b', 'c',\n 'd', 'e', 'f']");
bt("a = ['a', 'b', 'c',\n 'd', 'e', 'f',\n 'g', 'h', 'i']");
bt("a = ['a', 'b', 'c',\n 'd', 'e', 'f',\n 'g', 'h', 'i']");
bt('var x = [{}\n]', 'var x = [{}\n]');
bt('var x = [{foo:bar}\n]', 'var x = [{\n foo: bar\n }\n]');
bt("a = ['something',\n 'completely',\n 'different'];\nif (x);");
bt("a = ['a','b','c']", "a = ['a', 'b', 'c']");
bt("a = ['a', 'b','c']", "a = ['a', 'b', 'c']");
bt("x = [{'a':0}]",
"x = [{\n 'a': 0\n }]");
bt('{a([[a1]], {b;});}',
'{\n a([[a1]], {\n b;\n });\n}');
bt("a();\n [\n ['sdfsdfsd'],\n ['sdfsdfsdf']\n ].toString();",
"a();\n [\n ['sdfsdfsd'],\n ['sdfsdfsdf']\n ].toString();");
bt("function() {\n Foo([\n ['sdfsdfsd'],\n ['sdfsdfsdf']\n ]);\n}",
"function() {\n Foo([\n ['sdfsdfsd'],\n ['sdfsdfsdf']\n ]);\n}");
self.options.keep_array_indentation = False;
bt('a = //comment\n/regex/;');
test_fragment('/*\n * X\n */');
test_fragment('/*\r\n * X\r\n */', '/*\n * X\n */');
bt('if (a)\n{\nb;\n}\nelse\n{\nc;\n}', 'if (a) {\n b;\n} else {\n c;\n}');
bt('var a = new function();');
test_fragment('new function');
self.options.brace_style = 'expand';
bt('//case 1\nif (a == 1)\n{}\n//case 2\nelse if (a == 2)\n{}');
bt('if(1){2}else{3}', "if (1)\n{\n 2\n}\nelse\n{\n 3\n}");
bt('try{a();}catch(b){c();}catch(d){}finally{e();}',
"try\n{\n a();\n}\ncatch (b)\n{\n c();\n}\ncatch (d)\n{}\nfinally\n{\n e();\n}");
bt('if(a){b();}else if(c) foo();',
"if (a)\n{\n b();\n}\nelse if (c) foo();");
bt("if (a) {\n// comment\n}else{\n// comment\n}",
"if (a)\n{\n // comment\n}\nelse\n{\n // comment\n}"); # if/else statement with empty body
bt('if (x) {y} else { if (x) {y}}',
'if (x)\n{\n y\n}\nelse\n{\n if (x)\n {\n y\n }\n}');
bt('if (a)\n{\nb;\n}\nelse\n{\nc;\n}',
'if (a)\n{\n b;\n}\nelse\n{\n c;\n}');
test_fragment(' /*\n* xx\n*/\n// xx\nif (foo) {\n bar();\n}',
' /*\n * xx\n */\n // xx\n if (foo)\n {\n bar();\n }');
bt('if (foo)\n{}\nelse /regex/.test();');
bt('if (foo) /regex/.test();');
bt('if (a)\n{\nb;\n}\nelse\n{\nc;\n}', 'if (a)\n{\n b;\n}\nelse\n{\n c;\n}');
test_fragment('if (foo) {', 'if (foo)\n{');
test_fragment('foo {', 'foo\n{');
test_fragment('return {', 'return {'); # return needs the brace.
test_fragment('return /* inline */ {', 'return /* inline */ {');
# test_fragment('return\n{', 'return\n{'); # can't support this?, but that's an improbable and extreme case anyway.
test_fragment('return;\n{', 'return;\n{');
bt("throw {}");
bt("throw {\n foo;\n}");
bt('var foo = {}');
bt('if (foo) bar();\nelse break');
bt('function x() {\n foo();\n}zzz', 'function x()\n{\n foo();\n}\nzzz');
bt('a: do {} while (); xxx', 'a: do {} while ();\nxxx');
bt('var a = new function();');
bt('var a = new function() {};');
bt('var a = new function a()\n {};');
test_fragment('new function');
bt("foo({\n 'a': 1\n},\n10);",
"foo(\n {\n 'a': 1\n },\n 10);");
self.options.brace_style = 'collapse';
bt('//case 1\nif (a == 1) {}\n//case 2\nelse if (a == 2) {}');
bt('if(1){2}else{3}', "if (1) {\n 2\n} else {\n 3\n}");
bt('try{a();}catch(b){c();}catch(d){}finally{e();}',
"try {\n a();\n} catch (b) {\n c();\n} catch (d) {} finally {\n e();\n}");
bt('if(a){b();}else if(c) foo();',
"if (a) {\n b();\n} else if (c) foo();");
bt("if (a) {\n// comment\n}else{\n// comment\n}",
"if (a) {\n // comment\n} else {\n // comment\n}"); # if/else statement with empty body
bt('if (x) {y} else { if (x) {y}}',
'if (x) {\n y\n} else {\n if (x) {\n y\n }\n}');
bt('if (a)\n{\nb;\n}\nelse\n{\nc;\n}',
'if (a) {\n b;\n} else {\n c;\n}');
test_fragment(' /*\n* xx\n*/\n// xx\nif (foo) {\n bar();\n}',
' /*\n * xx\n */\n // xx\n if (foo) {\n bar();\n }');
bt('if (foo) {} else /regex/.test();');
bt('if (foo) /regex/.test();');
bt('if (a)\n{\nb;\n}\nelse\n{\nc;\n}', 'if (a) {\n b;\n} else {\n c;\n}');
test_fragment('if (foo) {', 'if (foo) {');
test_fragment('foo {', 'foo {');
test_fragment('return {', 'return {'); # return needs the brace.
test_fragment('return /* inline */ {', 'return /* inline */ {');
# test_fragment('return\n{', 'return\n{'); # can't support this?, but that's an improbable and extreme case anyway.
test_fragment('return;\n{', 'return; {');
bt("throw {}");
bt("throw {\n foo;\n}");
bt('var foo = {}');
bt('if (foo) bar();\nelse break');
bt('function x() {\n foo();\n}zzz', 'function x() {\n foo();\n}\nzzz');
bt('a: do {} while (); xxx', 'a: do {} while ();\nxxx');
bt('var a = new function();');
bt('var a = new function() {};');
bt('var a = new function a() {};');
test_fragment('new function');
bt("foo({\n 'a': 1\n},\n10);",
"foo({\n 'a': 1\n },\n 10);");
self.options.brace_style = "end-expand";
bt('//case 1\nif (a == 1) {}\n//case 2\nelse if (a == 2) {}');
bt('if(1){2}else{3}', "if (1) {\n 2\n}\nelse {\n 3\n}");
bt('try{a();}catch(b){c();}catch(d){}finally{e();}',
"try {\n a();\n}\ncatch (b) {\n c();\n}\ncatch (d) {}\nfinally {\n e();\n}");
bt('if(a){b();}else if(c) foo();',
"if (a) {\n b();\n}\nelse if (c) foo();");
bt("if (a) {\n// comment\n}else{\n// comment\n}",
"if (a) {\n // comment\n}\nelse {\n // comment\n}"); # if/else statement with empty body
bt('if (x) {y} else { if (x) {y}}',
'if (x) {\n y\n}\nelse {\n if (x) {\n y\n }\n}');
bt('if (a)\n{\nb;\n}\nelse\n{\nc;\n}',
'if (a) {\n b;\n}\nelse {\n c;\n}');
test_fragment(' /*\n* xx\n*/\n// xx\nif (foo) {\n bar();\n}',
' /*\n * xx\n */\n // xx\n if (foo) {\n bar();\n }');
bt('if (foo) {}\nelse /regex/.test();');
bt('if (foo) /regex/.test();');
bt('if (a)\n{\nb;\n}\nelse\n{\nc;\n}', 'if (a) {\n b;\n}\nelse {\n c;\n}');
test_fragment('if (foo) {', 'if (foo) {');
test_fragment('foo {', 'foo {');
test_fragment('return {', 'return {'); # return needs the brace.
test_fragment('return /* inline */ {', 'return /* inline */ {');
# test_fragment('return\n{', 'return\n{'); # can't support this?, but that's an improbable and extreme case anyway.
test_fragment('return;\n{', 'return; {');
bt("throw {}");
bt("throw {\n foo;\n}");
bt('var foo = {}');
bt('if (foo) bar();\nelse break');
bt('function x() {\n foo();\n}zzz', 'function x() {\n foo();\n}\nzzz');
bt('a: do {} while (); xxx', 'a: do {} while ();\nxxx');
bt('var a = new function();');
bt('var a = new function() {};');
bt('var a = new function a() {};');
test_fragment('new function');
bt("foo({\n 'a': 1\n},\n10);",
"foo({\n 'a': 1\n },\n 10);");
self.options.brace_style = 'collapse';
bt('a = <?= external() ?> ;'); # not the most perfect thing in the world, but you're the weirdo beaufifying php mix-ins with javascript beautifier
bt('a = <%= external() %> ;');
test_fragment('roo = {\n /*\n ****\n FOO\n ****\n */\n BAR: 0\n};');
test_fragment("if (zz) {\n // ....\n}\n(function");
self.options.preserve_newlines = True;
bt('var a = 42; // foo\n\nvar b;')
bt('var a = 42; // foo\n\n\nvar b;')
bt("var a = 'foo' +\n 'bar';");
bt("var a = \"foo\" +\n \"bar\";");
bt('"foo""bar""baz"', '"foo"\n"bar"\n"baz"')
bt("'foo''bar''baz'", "'foo'\n'bar'\n'baz'")
bt("{\n get foo() {}\n}")
bt("{\n var a = get\n foo();\n}")
bt("{\n set foo() {}\n}")
bt("{\n var a = set\n foo();\n}")
bt("var x = {\n get function()\n}")
bt("var x = {\n set function()\n}")
bt("var x = set\n\nfunction() {}", "var x = set\n\n function() {}")
bt('<!-- foo\nbar();\n-->')
bt('<!-- dont crash')
bt('for () /abc/.test()')
bt('if (k) /aaa/m.test(v) && l();')
bt('switch (true) {\n case /swf/i.test(foo):\n bar();\n}')
bt('createdAt = {\n type: Date,\n default: Date.now\n}')
bt('switch (createdAt) {\n case a:\n Date,\n default:\n Date.now\n}')
bt('return function();')
bt('var a = function();')
bt('var a = 5 + function();')
bt('{\n foo // something\n ,\n bar // something\n baz\n}')
bt('function a(a) {} function b(b) {} function c(c) {}', 'function a(a) {}\n\nfunction b(b) {}\n\nfunction c(c) {}')
bt('3.*7;', '3. * 7;')
bt('import foo.*;', 'import foo.*;') # actionscript's import
test_fragment('function f(a: a, b: b)') # actionscript
bt('foo(a, function() {})');
bt('foo(a, /regex/)');
bt('/* foo */\n"x"');
self.options.break_chained_methods = False
self.options.preserve_newlines = False
bt('foo\n.bar()\n.baz().cucumber(fat)', 'foo.bar().baz().cucumber(fat)');
bt('foo\n.bar()\n.baz().cucumber(fat); foo.bar().baz().cucumber(fat)', 'foo.bar().baz().cucumber(fat);\nfoo.bar().baz().cucumber(fat)');
bt('foo\n.bar()\n.baz().cucumber(fat)\n foo.bar().baz().cucumber(fat)', 'foo.bar().baz().cucumber(fat)\nfoo.bar().baz().cucumber(fat)');
bt('this\n.something = foo.bar()\n.baz().cucumber(fat)', 'this.something = foo.bar().baz().cucumber(fat)');
bt('this.something.xxx = foo.moo.bar()');
bt('this\n.something\n.xxx = foo.moo\n.bar()', 'this.something.xxx = foo.moo.bar()');
self.options.break_chained_methods = False
self.options.preserve_newlines = True
bt('foo\n.bar()\n.baz().cucumber(fat)', 'foo\n .bar()\n .baz().cucumber(fat)');
bt('foo\n.bar()\n.baz().cucumber(fat); foo.bar().baz().cucumber(fat)', 'foo\n .bar()\n .baz().cucumber(fat);\nfoo.bar().baz().cucumber(fat)');
bt('foo\n.bar()\n.baz().cucumber(fat)\n foo.bar().baz().cucumber(fat)', 'foo\n .bar()\n .baz().cucumber(fat)\nfoo.bar().baz().cucumber(fat)');
bt('this\n.something = foo.bar()\n.baz().cucumber(fat)', 'this\n .something = foo.bar()\n .baz().cucumber(fat)');
bt('this.something.xxx = foo.moo.bar()');
bt('this\n.something\n.xxx = foo.moo\n.bar()', 'this\n .something\n .xxx = foo.moo\n .bar()');
self.options.break_chained_methods = True
self.options.preserve_newlines = False
bt('foo\n.bar()\n.baz().cucumber(fat)', 'foo.bar()\n .baz()\n .cucumber(fat)');
bt('foo\n.bar()\n.baz().cucumber(fat); foo.bar().baz().cucumber(fat)', 'foo.bar()\n .baz()\n .cucumber(fat);\nfoo.bar()\n .baz()\n .cucumber(fat)');
bt('foo\n.bar()\n.baz().cucumber(fat)\n foo.bar().baz().cucumber(fat)', 'foo.bar()\n .baz()\n .cucumber(fat)\nfoo.bar()\n .baz()\n .cucumber(fat)');
bt('this\n.something = foo.bar()\n.baz().cucumber(fat)', 'this.something = foo.bar()\n .baz()\n .cucumber(fat)');
bt('this.something.xxx = foo.moo.bar()');
bt('this\n.something\n.xxx = foo.moo\n.bar()', 'this.something.xxx = foo.moo.bar()');
self.options.break_chained_methods = True
self.options.preserve_newlines = True
bt('foo\n.bar()\n.baz().cucumber(fat)', 'foo\n .bar()\n .baz()\n .cucumber(fat)');
bt('foo\n.bar()\n.baz().cucumber(fat); foo.bar().baz().cucumber(fat)', 'foo\n .bar()\n .baz()\n .cucumber(fat);\nfoo.bar()\n .baz()\n .cucumber(fat)');
bt('foo\n.bar()\n.baz().cucumber(fat)\n foo.bar().baz().cucumber(fat)', 'foo\n .bar()\n .baz()\n .cucumber(fat)\nfoo.bar()\n .baz()\n .cucumber(fat)');
bt('this\n.something = foo.bar()\n.baz().cucumber(fat)', 'this\n .something = foo.bar()\n .baz()\n .cucumber(fat)');
bt('this.something.xxx = foo.moo.bar()');
bt('this\n.something\n.xxx = foo.moo\n.bar()', 'this\n .something\n .xxx = foo.moo\n .bar()');
self.options.break_chained_methods = False
self.options.preserve_newlines = False
self.options.preserve_newlines = False
self.options.wrap_line_length = 0
#..............---------1---------2---------3---------4---------5---------6---------7
#..............1234567890123456789012345678901234567890123456789012345678901234567890
test_fragment('foo.bar().baz().cucumber((fat && "sassy") || (leans\n&& mean));\n' +
'Test_very_long_variable_name_this_should_never_wrap\n.but_this_can\n' +
'if (wraps_can_occur && inside_an_if_block) that_is_\n.okay();',
# expected #
'foo.bar().baz().cucumber((fat && "sassy") || (leans && mean));\n' +
'Test_very_long_variable_name_this_should_never_wrap.but_this_can\n' +
'if (wraps_can_occur && inside_an_if_block) that_is_.okay();')
self.options.wrap_line_length = 70
#..............---------1---------2---------3---------4---------5---------6---------7
#..............1234567890123456789012345678901234567890123456789012345678901234567890
test_fragment('foo.bar().baz().cucumber((fat && "sassy") || (leans\n&& mean));\n' +
'Test_very_long_variable_name_this_should_never_wrap\n.but_this_can\n' +
'if (wraps_can_occur && inside_an_if_block) that_is_\n.okay();',
# expected #
'foo.bar().baz().cucumber((fat && "sassy") || (leans && mean));\n' +
'Test_very_long_variable_name_this_should_never_wrap.but_this_can\n' +
'if (wraps_can_occur && inside_an_if_block) that_is_.okay();');
self.options.wrap_line_length = 40
#..............---------1---------2---------3---------4---------5---------6---------7
#..............1234567890123456789012345678901234567890123456789012345678901234567890
test_fragment('foo.bar().baz().cucumber((fat && "sassy") || (leans\n&& mean));\n' +
'Test_very_long_variable_name_this_should_never_wrap\n.but_this_can\n' +
'if (wraps_can_occur && inside_an_if_block) that_is_\n.okay();',
# expected #
'foo.bar().baz().cucumber((fat &&\n' +
' "sassy") || (leans && mean));\n' +
'Test_very_long_variable_name_this_should_never_wrap\n' +
' .but_this_can\n' +
'if (wraps_can_occur &&\n' +
' inside_an_if_block) that_is_.okay();');
self.options.wrap_line_length = 41
# NOTE: wrap is only best effort - line continues until next wrap point is found.
#..............---------1---------2---------3---------4---------5---------6---------7
#..............1234567890123456789012345678901234567890123456789012345678901234567890
test_fragment('foo.bar().baz().cucumber((fat && "sassy") || (leans\n&& mean));\n' +
'Test_very_long_variable_name_this_should_never_wrap\n.but_this_can\n' +
'if (wraps_can_occur && inside_an_if_block) that_is_\n.okay();',
# expected #
'foo.bar().baz().cucumber((fat && "sassy") ||\n' +
' (leans && mean));\n' +
'Test_very_long_variable_name_this_should_never_wrap\n' +
' .but_this_can\n' +
'if (wraps_can_occur &&\n' +
' inside_an_if_block) that_is_.okay();');
self.options.wrap_line_length = 45
# NOTE: wrap is only best effort - line continues until next wrap point is found.
#..............---------1---------2---------3---------4---------5---------6---------7
#..............1234567890123456789012345678901234567890123456789012345678901234567890
test_fragment('{\n' +
' foo.bar().baz().cucumber((fat && "sassy") || (leans\n&& mean));\n' +
' Test_very_long_variable_name_this_should_never_wrap\n.but_this_can\n' +
' if (wraps_can_occur && inside_an_if_block) that_is_\n.okay();\n' +
'}',
# expected #
'{\n' +
' foo.bar().baz().cucumber((fat && "sassy") ||\n' +
' (leans && mean));\n' +
' Test_very_long_variable_name_this_should_never_wrap\n' +
' .but_this_can\n' +
' if (wraps_can_occur &&\n' +
' inside_an_if_block) that_is_.okay();\n' +
'}');
self.options.preserve_newlines = True
self.options.wrap_line_length = 0
#..............---------1---------2---------3---------4---------5---------6---------7
#..............1234567890123456789012345678901234567890123456789012345678901234567890
test_fragment('foo.bar().baz().cucumber((fat && "sassy") || (leans\n&& mean));\n' +
'Test_very_long_variable_name_this_should_never_wrap\n.but_this_can\n' +
'if (wraps_can_occur && inside_an_if_block) that_is_\n.okay();',
# expected #
'foo.bar().baz().cucumber((fat && "sassy") || (leans && mean));\n' +
'Test_very_long_variable_name_this_should_never_wrap\n' +
' .but_this_can\n' +
'if (wraps_can_occur && inside_an_if_block) that_is_\n' +
' .okay();');
self.options.wrap_line_length = 70
#..............---------1---------2---------3---------4---------5---------6---------7
#..............1234567890123456789012345678901234567890123456789012345678901234567890
test_fragment('foo.bar().baz().cucumber((fat && "sassy") || (leans\n&& mean));\n' +
'Test_very_long_variable_name_this_should_never_wrap\n.but_this_can\n' +
'if (wraps_can_occur && inside_an_if_block) that_is_\n.okay();',
# expected #
'foo.bar().baz().cucumber((fat && "sassy") || (leans && mean));\n' +
'Test_very_long_variable_name_this_should_never_wrap\n' +
' .but_this_can\n' +
'if (wraps_can_occur && inside_an_if_block) that_is_\n' +
' .okay();');
self.options.wrap_line_length = 40
#..............---------1---------2---------3---------4---------5---------6---------7
#..............1234567890123456789012345678901234567890123456789012345678901234567890
test_fragment('foo.bar().baz().cucumber((fat && "sassy") || (leans\n&& mean));\n' +
'Test_very_long_variable_name_this_should_never_wrap\n.but_this_can\n' +
'if (wraps_can_occur && inside_an_if_block) that_is_\n.okay();',
# expected #
'foo.bar().baz().cucumber((fat &&\n' +
' "sassy") || (leans && mean));\n' +
'Test_very_long_variable_name_this_should_never_wrap\n' +
' .but_this_can\n' +
'if (wraps_can_occur &&\n' +
' inside_an_if_block) that_is_\n' +
' .okay();');
self.options.wrap_line_length = 41
# NOTE: wrap is only best effort - line continues until next wrap point is found.
#..............---------1---------2---------3---------4---------5---------6---------7
#..............1234567890123456789012345678901234567890123456789012345678901234567890
test_fragment('foo.bar().baz().cucumber((fat && "sassy") || (leans\n&& mean));\n' +
'Test_very_long_variable_name_this_should_never_wrap\n.but_this_can\n' +
'if (wraps_can_occur && inside_an_if_block) that_is_\n.okay();',
# expected #
'foo.bar().baz().cucumber((fat && "sassy") ||\n' +
' (leans && mean));\n' +
'Test_very_long_variable_name_this_should_never_wrap\n' +
' .but_this_can\n' +
'if (wraps_can_occur &&\n' +
' inside_an_if_block) that_is_\n' +
' .okay();');
self.options.wrap_line_length = 45
# NOTE: wrap is only best effort - line continues until next wrap point is found.
#..............---------1---------2---------3---------4---------5---------6---------7
#..............1234567890123456789012345678901234567890123456789012345678901234567890
test_fragment('{\n' +
' foo.bar().baz().cucumber((fat && "sassy") || (leans\n&& mean));\n' +
' Test_very_long_variable_name_this_should_never_wrap\n.but_this_can\n' +
' if (wraps_can_occur && inside_an_if_block) that_is_\n.okay();\n' +
'}',
# expected #
'{\n' +
' foo.bar().baz().cucumber((fat && "sassy") ||\n' +
' (leans && mean));\n' +
' Test_very_long_variable_name_this_should_never_wrap\n' +
' .but_this_can\n' +
' if (wraps_can_occur &&\n' +
' inside_an_if_block) that_is_\n' +
' .okay();\n' +
'}');
self.options.wrap_line_length = 0
self.options.preserve_newlines = False
bt('if (foo) // comment\n bar();');
bt('if (foo) // comment\n (bar());');
bt('if (foo) // comment\n (bar());');
bt('if (foo) // comment\n /asdf/;');
bt('foo = {\n x: y, // #44\n w: z // #44\n}');
bt('switch (x) {\n case "a":\n // comment on newline\n break;\n case "b": // comment on same line\n break;\n}');
# these aren't ready yet.
#bt('if (foo) // comment\n bar() /*i*/ + baz() /*j\n*/ + asdf();');
bt('if\n(foo)\nif\n(bar)\nif\n(baz)\nwhee();\na();', 'if (foo) if (bar) if (baz) whee();\na();');
bt('if\n(foo)\nif\n(bar)\nif\n(baz)\nwhee();\nelse\na();', 'if (foo) if (bar) if (baz) whee();\n else a();');
bt('if (foo)\nbar();\nelse\ncar();', 'if (foo) bar();\nelse car();');
bt('if (foo) if (bar) if (baz) whee();\na();');
bt('if (foo) a()\nif (bar) if (baz) whee();\na();');
bt('if (options)\n' +
' for (var p in options)\n' +
' this[p] = options[p];',
'if (options) for (var p in options) this[p] = options[p];');
bt('function f(a, b, c,\nd, e) {}',
'function f(a, b, c, d, e) {}');
bt('function f(a,b) {if(a) b()}function g(a,b) {if(!a) b()}',
'function f(a, b) {\n if (a) b()\n}\n\nfunction g(a, b) {\n if (!a) b()\n}');
bt('function f(a,b) {if(a) b()}\n\n\n\nfunction g(a,b) {if(!a) b()}',
'function f(a, b) {\n if (a) b()\n}\n\nfunction g(a, b) {\n if (!a) b()\n}');
# This is not valid syntax, but still want to behave reasonably and not side-effect
bt('(if(a) b())(if(a) b())',
'(\n if (a) b())(\n if (a) b())');
bt('(if(a) b())\n\n\n(if(a) b())',
'(\n if (a) b())\n(\n if (a) b())');
bt("if\n(a)\nb();", "if (a) b();");
bt('var a =\nfoo', 'var a = foo');
bt('var a = {\n"a":1,\n"b":2}', "var a = {\n \"a\": 1,\n \"b\": 2\n}");
bt("var a = {\n'a':1,\n'b':2}", "var a = {\n 'a': 1,\n 'b': 2\n}");
bt('var a = /*i*/ "b";');
bt('var a = /*i*/\n"b";', 'var a = /*i*/ "b";');
bt('var a = /*i*/\nb;', 'var a = /*i*/ b;');
bt('{\n\n\n"x"\n}', '{\n "x"\n}');
bt('if(a &&\nb\n||\nc\n||d\n&&\ne) e = f', 'if (a && b || c || d && e) e = f');
bt('if(a &&\n(b\n||\nc\n||d)\n&&\ne) e = f', 'if (a && (b || c || d) && e) e = f');
test_fragment('\n\n"x"', '"x"');
bt('a = 1;\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nb = 2;',
'a = 1;\nb = 2;');
self.options.preserve_newlines = True
bt('if (foo) // comment\n bar();');
bt('if (foo) // comment\n (bar());');
bt('if (foo) // comment\n (bar());');
bt('if (foo) // comment\n /asdf/;');
bt('foo = {\n x: y, // #44\n w: z // #44\n}');
bt('switch (x) {\n case "a":\n // comment on newline\n break;\n case "b": // comment on same line\n break;\n}');
# these aren't ready yet.
# bt('if (foo) // comment\n bar() /*i*/ + baz() /*j\n*/ + asdf();');
bt('if\n(foo)\nif\n(bar)\nif\n(baz)\nwhee();\na();', 'if (foo)\n if (bar)\n if (baz)\n whee();\na();');
bt('if\n(foo)\nif\n(bar)\nif\n(baz)\nwhee();\nelse\na();', 'if (foo)\n if (bar)\n if (baz)\n whee();\n else\n a();');
bt('if (foo) bar();\nelse\ncar();', 'if (foo) bar();\nelse\n car();');
bt('if (foo) if (bar) if (baz) whee();\na();');
bt('if (foo) a()\nif (bar) if (baz) whee();\na();');
bt('if (options)\n' +
' for (var p in options)\n' +
' this[p] = options[p];');
bt('function f(a, b, c,\nd, e) {}',
'function f(a, b, c,\n d, e) {}');
bt('function f(a,b) {if(a) b()}function g(a,b) {if(!a) b()}',
'function f(a, b) {\n if (a) b()\n}\n\nfunction g(a, b) {\n if (!a) b()\n}');
bt('function f(a,b) {if(a) b()}\n\n\n\nfunction g(a,b) {if(!a) b()}',
'function f(a, b) {\n if (a) b()\n}\n\n\n\nfunction g(a, b) {\n if (!a) b()\n}');
# This is not valid syntax, but still want to behave reasonably and not side-effect
bt('(if(a) b())(if(a) b())',
'(\n if (a) b())(\n if (a) b())');
bt('(if(a) b())\n\n\n(if(a) b())',
'(\n if (a) b())\n\n\n(\n if (a) b())');
bt("if\n(a)\nb();", "if (a)\n b();");
bt('var a =\nfoo', 'var a =\n foo');
bt('var a = {\n"a":1,\n"b":2}', "var a = {\n \"a\": 1,\n \"b\": 2\n}");
bt("var a = {\n'a':1,\n'b':2}", "var a = {\n 'a': 1,\n 'b': 2\n}");
bt('var a = /*i*/ "b";');
bt('var a = /*i*/\n"b";', 'var a = /*i*/\n "b";');
bt('var a = /*i*/\nb;', 'var a = /*i*/\n b;');
bt('{\n\n\n"x"\n}', '{\n\n\n "x"\n}');
bt('if(a &&\nb\n||\nc\n||d\n&&\ne) e = f', 'if (a &&\n b ||\n c || d &&\n e) e = f');
bt('if(a &&\n(b\n||\nc\n||d)\n&&\ne) e = f', 'if (a &&\n (b ||\n c || d) &&\n e) e = f');
test_fragment('\n\n"x"', '"x"');
# this beavior differs between js and python, defaults to unlimited in js, 10 in python
bt('a = 1;\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nb = 2;',
'a = 1;\n\n\n\n\n\n\n\n\n\nb = 2;');
self.options.max_preserve_newlines = 8;
bt('a = 1;\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nb = 2;',
'a = 1;\n\n\n\n\n\n\n\nb = 2;');
# Test the option to have spaces within parens
self.options.space_in_paren = False
bt('if(p) foo(a,b)', 'if (p) foo(a, b)');
bt('try{while(true){willThrow()}}catch(result)switch(result){case 1:++result }',
'try {\n while (true) {\n willThrow()\n }\n} catch (result) switch (result) {\n case 1:\n ++result\n}');
bt('((e/((a+(b)*c)-d))^2)*5;', '((e / ((a + (b) * c) - d)) ^ 2) * 5;');
bt('function f(a,b) {if(a) b()}function g(a,b) {if(!a) b()}',
'function f(a, b) {\n if (a) b()\n}\n\nfunction g(a, b) {\n if (!a) b()\n}');
bt('a=[];',
'a = [];');
bt('a=[b,c,d];',
'a = [b, c, d];');
bt('a= f[b];',
'a = f[b];');
self.options.space_in_paren = True
bt('if(p) foo(a,b)', 'if ( p ) foo( a, b )');
bt('try{while(true){willThrow()}}catch(result)switch(result){case 1:++result }',
'try {\n while ( true ) {\n willThrow( )\n }\n} catch ( result ) switch ( result ) {\n case 1:\n ++result\n}');
bt('((e/((a+(b)*c)-d))^2)*5;', '( ( e / ( ( a + ( b ) * c ) - d ) ) ^ 2 ) * 5;');
bt('function f(a,b) {if(a) b()}function g(a,b) {if(!a) b()}',
'function f( a, b ) {\n if ( a ) b( )\n}\n\nfunction g( a, b ) {\n if ( !a ) b( )\n}');
bt('a=[ ];',
'a = [ ];');
bt('a=[b,c,d];',
'a = [ b, c, d ];');
bt('a= f[b];',
'a = f[ b ];');
self.options.space_in_paren = False
# Test that e4x literals passed through when e4x-option is enabled
bt('xml=<a b="c"><d/><e>\n foo</e>x</a>;', 'xml = < a b = "c" > < d / > < e >\n foo < /e>x</a > ;');
self.options.e4x = True
bt('xml=<a b="c"><d/><e>\n foo</e>x</a>;', 'xml = <a b="c"><d/><e>\n foo</e>x</a>;');
# Handles messed up tags, as long as it isn't the same name
# as the root tag. Also handles tags of same name as root tag
# as long as nesting matches.
bt('xml=<a x="jn"><c></b></f><a><d jnj="jnn"><f></a ></nj></a>;',
'xml = <a x="jn"><c></b></f><a><d jnj="jnn"><f></a ></nj></a>;');
# If xml is not terminated, the remainder of the file is treated
# as part of the xml-literal (passed through unaltered)
test_fragment('xml=<a></b>\nc<b;', 'xml = <a></b>\nc<b;');
self.options.e4x = False
# START tests for issue 241
bt('obj\n' +
' .last({\n' +
' foo: 1,\n' +
' bar: 2\n' +
' });\n' +
'var test = 1;');
bt('obj\n' +
' .last(function() {\n' +
' var test;\n' +
' });\n' +
'var test = 1;');
bt('obj.first()\n' +
' .second()\n' +
' .last(function(err, response) {\n' +
' console.log(err);\n' +
' });');
# END tests for issue 241
def decodesto(self, input, expectation=None):
self.assertEqual(
jsbeautifier.beautify(input, self.options), expectation or input)
# if the expected is different from input, run it again
# expected output should be unchanged when run twice.
if not expectation == None:
self.assertEqual(
jsbeautifier.beautify(expectation, self.options), expectation)
def wrap(self, text):
return self.wrapregex.sub(' \\1', text)
def bt(self, input, expectation=None):
expectation = expectation or input
self.decodesto(input, expectation)
if self.options.indent_size == 4 and input:
wrapped_input = '{\n%s\nfoo=bar;}' % self.wrap(input)
wrapped_expect = '{\n%s\n foo = bar;\n}' % self.wrap(expectation)
self.decodesto(wrapped_input, wrapped_expect)
@classmethod
def setUpClass(cls):
options = jsbeautifier.default_options()
options.indent_size = 4
options.indent_char = ' '
options.preserve_newlines = True
options.jslint_happy = False
options.keep_array_indentation = False
options.brace_style = 'collapse'
options.indent_level = 0
options.break_chained_methods = False
cls.options = options
cls.wrapregex = re.compile('^(.+)$', re.MULTILINE)
if __name__ == '__main__':
unittest.main()
|
[
"dearwill@outlook.com"
] |
dearwill@outlook.com
|
e9bab44c9c61265f1fb967c6700de4b3768157eb
|
ef42a6d8b25820dc4745ce04c415ae25e7f3ca4f
|
/rtl/udp_demux.py
|
416bd0aa54358922b90574cb21bf1da7ac17d32a
|
[
"MIT"
] |
permissive
|
sinamyth/verilog-ethernet
|
af363edad4b503584c1f4605c251c907fe03ec38
|
cf6a01fffeda33b0748f942532ad91e945d4903f
|
refs/heads/master
| 2021-01-19T17:38:43.580775
| 2017-07-22T18:07:23
| 2017-07-22T18:07:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 18,466
|
py
|
#!/usr/bin/env python
"""
Generates a UDP demux with the specified number of ports
"""
from __future__ import print_function
import argparse
import math
from jinja2 import Template
def main():
parser = argparse.ArgumentParser(description=__doc__.strip())
parser.add_argument('-p', '--ports', type=int, default=4, help="number of ports")
parser.add_argument('-n', '--name', type=str, help="module name")
parser.add_argument('-o', '--output', type=str, help="output file name")
args = parser.parse_args()
try:
generate(**args.__dict__)
except IOError as ex:
print(ex)
exit(1)
def generate(ports=4, name=None, output=None):
if name is None:
name = "udp_demux_{0}".format(ports)
if output is None:
output = name + ".v"
print("Opening file '{0}'...".format(output))
output_file = open(output, 'w')
print("Generating {0} port UDP demux {1}...".format(ports, name))
select_width = int(math.ceil(math.log(ports, 2)))
t = Template(u"""/*
Copyright (c) 2014-2017 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
// Language: Verilog 2001
`timescale 1ns / 1ps
/*
* UDP {{n}} port demultiplexer
*/
module {{name}}
(
input wire clk,
input wire rst,
/*
* UDP frame input
*/
input wire input_udp_hdr_valid,
output wire input_udp_hdr_ready,
input wire [47:0] input_eth_dest_mac,
input wire [47:0] input_eth_src_mac,
input wire [15:0] input_eth_type,
input wire [3:0] input_ip_version,
input wire [3:0] input_ip_ihl,
input wire [5:0] input_ip_dscp,
input wire [1:0] input_ip_ecn,
input wire [15:0] input_ip_length,
input wire [15:0] input_ip_identification,
input wire [2:0] input_ip_flags,
input wire [12:0] input_ip_fragment_offset,
input wire [7:0] input_ip_ttl,
input wire [7:0] input_ip_protocol,
input wire [15:0] input_ip_header_checksum,
input wire [31:0] input_ip_source_ip,
input wire [31:0] input_ip_dest_ip,
input wire [15:0] input_udp_source_port,
input wire [15:0] input_udp_dest_port,
input wire [15:0] input_udp_length,
input wire [15:0] input_udp_checksum,
input wire [7:0] input_udp_payload_tdata,
input wire input_udp_payload_tvalid,
output wire input_udp_payload_tready,
input wire input_udp_payload_tlast,
input wire input_udp_payload_tuser,
/*
* UDP frame outputs
*/
{%- for p in ports %}
output wire output_{{p}}_udp_hdr_valid,
input wire output_{{p}}_udp_hdr_ready,
output wire [47:0] output_{{p}}_eth_dest_mac,
output wire [47:0] output_{{p}}_eth_src_mac,
output wire [15:0] output_{{p}}_eth_type,
output wire [3:0] output_{{p}}_ip_version,
output wire [3:0] output_{{p}}_ip_ihl,
output wire [5:0] output_{{p}}_ip_dscp,
output wire [1:0] output_{{p}}_ip_ecn,
output wire [15:0] output_{{p}}_ip_length,
output wire [15:0] output_{{p}}_ip_identification,
output wire [2:0] output_{{p}}_ip_flags,
output wire [12:0] output_{{p}}_ip_fragment_offset,
output wire [7:0] output_{{p}}_ip_ttl,
output wire [7:0] output_{{p}}_ip_protocol,
output wire [15:0] output_{{p}}_ip_header_checksum,
output wire [31:0] output_{{p}}_ip_source_ip,
output wire [31:0] output_{{p}}_ip_dest_ip,
output wire [15:0] output_{{p}}_udp_source_port,
output wire [15:0] output_{{p}}_udp_dest_port,
output wire [15:0] output_{{p}}_udp_length,
output wire [15:0] output_{{p}}_udp_checksum,
output wire [7:0] output_{{p}}_udp_payload_tdata,
output wire output_{{p}}_udp_payload_tvalid,
input wire output_{{p}}_udp_payload_tready,
output wire output_{{p}}_udp_payload_tlast,
output wire output_{{p}}_udp_payload_tuser,
{% endfor %}
/*
* Control
*/
input wire enable,
input wire [{{w-1}}:0] select
);
reg [{{w-1}}:0] select_reg = {{w}}'d0, select_next;
reg frame_reg = 1'b0, frame_next;
reg input_udp_hdr_ready_reg = 1'b0, input_udp_hdr_ready_next;
reg input_udp_payload_tready_reg = 1'b0, input_udp_payload_tready_next;
{% for p in ports %}
reg output_{{p}}_udp_hdr_valid_reg = 1'b0, output_{{p}}_udp_hdr_valid_next;
{%- endfor %}
reg [47:0] output_eth_dest_mac_reg = 48'd0, output_eth_dest_mac_next;
reg [47:0] output_eth_src_mac_reg = 48'd0, output_eth_src_mac_next;
reg [15:0] output_eth_type_reg = 16'd0, output_eth_type_next;
reg [3:0] output_ip_version_reg = 4'd0, output_ip_version_next;
reg [3:0] output_ip_ihl_reg = 4'd0, output_ip_ihl_next;
reg [5:0] output_ip_dscp_reg = 6'd0, output_ip_dscp_next;
reg [1:0] output_ip_ecn_reg = 2'd0, output_ip_ecn_next;
reg [15:0] output_ip_length_reg = 16'd0, output_ip_length_next;
reg [15:0] output_ip_identification_reg = 16'd0, output_ip_identification_next;
reg [2:0] output_ip_flags_reg = 3'd0, output_ip_flags_next;
reg [12:0] output_ip_fragment_offset_reg = 13'd0, output_ip_fragment_offset_next;
reg [7:0] output_ip_ttl_reg = 8'd0, output_ip_ttl_next;
reg [7:0] output_ip_protocol_reg = 8'd0, output_ip_protocol_next;
reg [15:0] output_ip_header_checksum_reg = 16'd0, output_ip_header_checksum_next;
reg [31:0] output_ip_source_ip_reg = 32'd0, output_ip_source_ip_next;
reg [31:0] output_ip_dest_ip_reg = 32'd0, output_ip_dest_ip_next;
reg [15:0] output_udp_source_port_reg = 16'd0, output_udp_source_port_next;
reg [15:0] output_udp_dest_port_reg = 16'd0, output_udp_dest_port_next;
reg [15:0] output_udp_length_reg = 16'd0, output_udp_length_next;
reg [15:0] output_udp_checksum_reg = 16'd0, output_udp_checksum_next;
// internal datapath
reg [7:0] output_udp_payload_tdata_int;
reg output_udp_payload_tvalid_int;
reg output_udp_payload_tready_int_reg = 1'b0;
reg output_udp_payload_tlast_int;
reg output_udp_payload_tuser_int;
wire output_udp_payload_tready_int_early;
assign input_udp_hdr_ready = input_udp_hdr_ready_reg;
assign input_udp_payload_tready = input_udp_payload_tready_reg;
{% for p in ports %}
assign output_{{p}}_udp_hdr_valid = output_{{p}}_udp_hdr_valid_reg;
assign output_{{p}}_eth_dest_mac = output_eth_dest_mac_reg;
assign output_{{p}}_eth_src_mac = output_eth_src_mac_reg;
assign output_{{p}}_eth_type = output_eth_type_reg;
assign output_{{p}}_ip_version = output_ip_version_reg;
assign output_{{p}}_ip_ihl = output_ip_ihl_reg;
assign output_{{p}}_ip_dscp = output_ip_dscp_reg;
assign output_{{p}}_ip_ecn = output_ip_ecn_reg;
assign output_{{p}}_ip_length = output_ip_length_reg;
assign output_{{p}}_ip_identification = output_ip_identification_reg;
assign output_{{p}}_ip_flags = output_ip_flags_reg;
assign output_{{p}}_ip_fragment_offset = output_ip_fragment_offset_reg;
assign output_{{p}}_ip_ttl = output_ip_ttl_reg;
assign output_{{p}}_ip_protocol = output_ip_protocol_reg;
assign output_{{p}}_ip_header_checksum = output_ip_header_checksum_reg;
assign output_{{p}}_ip_source_ip = output_ip_source_ip_reg;
assign output_{{p}}_ip_dest_ip = output_ip_dest_ip_reg;
assign output_{{p}}_udp_source_port = output_udp_source_port_reg;
assign output_{{p}}_udp_dest_port = output_udp_dest_port_reg;
assign output_{{p}}_udp_length = output_udp_length_reg;
assign output_{{p}}_udp_checksum = output_udp_checksum_reg;
{% endfor %}
// mux for output control signals
reg current_output_udp_hdr_valid;
reg current_output_udp_hdr_ready;
reg current_output_tvalid;
reg current_output_tready;
always @* begin
case (select_reg)
{%- for p in ports %}
{{w}}'d{{p}}: begin
current_output_udp_hdr_valid = output_{{p}}_udp_hdr_valid;
current_output_udp_hdr_ready = output_{{p}}_udp_hdr_ready;
current_output_tvalid = output_{{p}}_udp_payload_tvalid;
current_output_tready = output_{{p}}_udp_payload_tready;
end
{%- endfor %}
default: begin
current_output_udp_hdr_valid = 1'b0;
current_output_udp_hdr_ready = 1'b0;
current_output_tvalid = 1'b0;
current_output_tready = 1'b0;
end
endcase
end
always @* begin
select_next = select_reg;
frame_next = frame_reg;
input_udp_hdr_ready_next = input_udp_hdr_ready_reg & ~input_udp_hdr_valid;
input_udp_payload_tready_next = 1'b0;
{%- for p in ports %}
output_{{p}}_udp_hdr_valid_next = output_{{p}}_udp_hdr_valid_reg & ~output_{{p}}_udp_hdr_ready;
{%- endfor %}
output_eth_dest_mac_next = output_eth_dest_mac_reg;
output_eth_src_mac_next = output_eth_src_mac_reg;
output_eth_type_next = output_eth_type_reg;
output_ip_version_next = output_ip_version_reg;
output_ip_ihl_next = output_ip_ihl_reg;
output_ip_dscp_next = output_ip_dscp_reg;
output_ip_ecn_next = output_ip_ecn_reg;
output_ip_length_next = output_ip_length_reg;
output_ip_identification_next = output_ip_identification_reg;
output_ip_flags_next = output_ip_flags_reg;
output_ip_fragment_offset_next = output_ip_fragment_offset_reg;
output_ip_ttl_next = output_ip_ttl_reg;
output_ip_protocol_next = output_ip_protocol_reg;
output_ip_header_checksum_next = output_ip_header_checksum_reg;
output_ip_source_ip_next = output_ip_source_ip_reg;
output_ip_dest_ip_next = output_ip_dest_ip_reg;
output_udp_source_port_next = output_udp_source_port_reg;
output_udp_dest_port_next = output_udp_dest_port_reg;
output_udp_length_next = output_udp_length_reg;
output_udp_checksum_next = output_udp_checksum_reg;
if (input_udp_payload_tvalid & input_udp_payload_tready) begin
// end of frame detection
if (input_udp_payload_tlast) begin
frame_next = 1'b0;
end
end
if (~frame_reg & enable & input_udp_hdr_valid & ~current_output_udp_hdr_valid & ~current_output_tvalid) begin
// start of frame, grab select value
frame_next = 1'b1;
select_next = select;
input_udp_hdr_ready_next = 1'b1;
case (select)
{%- for p in ports %}
{{w}}'d{{p}}: output_{{p}}_udp_hdr_valid_next = 1'b1;
{%- endfor %}
endcase
output_eth_dest_mac_next = input_eth_dest_mac;
output_eth_src_mac_next = input_eth_src_mac;
output_eth_type_next = input_eth_type;
output_ip_version_next = input_ip_version;
output_ip_ihl_next = input_ip_ihl;
output_ip_dscp_next = input_ip_dscp;
output_ip_ecn_next = input_ip_ecn;
output_ip_length_next = input_ip_length;
output_ip_identification_next = input_ip_identification;
output_ip_flags_next = input_ip_flags;
output_ip_fragment_offset_next = input_ip_fragment_offset;
output_ip_ttl_next = input_ip_ttl;
output_ip_protocol_next = input_ip_protocol;
output_ip_header_checksum_next = input_ip_header_checksum;
output_ip_source_ip_next = input_ip_source_ip;
output_ip_dest_ip_next = input_ip_dest_ip;
output_udp_source_port_next = input_udp_source_port;
output_udp_dest_port_next = input_udp_dest_port;
output_udp_length_next = input_udp_length;
output_udp_checksum_next = input_udp_checksum;
end
input_udp_payload_tready_next = output_udp_payload_tready_int_early & frame_next;
output_udp_payload_tdata_int = input_udp_payload_tdata;
output_udp_payload_tvalid_int = input_udp_payload_tvalid & input_udp_payload_tready;
output_udp_payload_tlast_int = input_udp_payload_tlast;
output_udp_payload_tuser_int = input_udp_payload_tuser;
end
always @(posedge clk) begin
if (rst) begin
select_reg <= {{w}}'d0;
frame_reg <= 1'b0;
input_udp_hdr_ready_reg <= 1'b0;
input_udp_payload_tready_reg <= 1'b0;
{%- for p in ports %}
output_{{p}}_udp_hdr_valid_reg <= 1'b0;
{%- endfor %}
end else begin
select_reg <= select_next;
frame_reg <= frame_next;
input_udp_hdr_ready_reg <= input_udp_hdr_ready_next;
input_udp_payload_tready_reg <= input_udp_payload_tready_next;
{%- for p in ports %}
output_{{p}}_udp_hdr_valid_reg <= output_{{p}}_udp_hdr_valid_next;
{%- endfor %}
end
output_eth_dest_mac_reg <= output_eth_dest_mac_next;
output_eth_src_mac_reg <= output_eth_src_mac_next;
output_eth_type_reg <= output_eth_type_next;
output_ip_version_reg <= output_ip_version_next;
output_ip_ihl_reg <= output_ip_ihl_next;
output_ip_dscp_reg <= output_ip_dscp_next;
output_ip_ecn_reg <= output_ip_ecn_next;
output_ip_length_reg <= output_ip_length_next;
output_ip_identification_reg <= output_ip_identification_next;
output_ip_flags_reg <= output_ip_flags_next;
output_ip_fragment_offset_reg <= output_ip_fragment_offset_next;
output_ip_ttl_reg <= output_ip_ttl_next;
output_ip_protocol_reg <= output_ip_protocol_next;
output_ip_header_checksum_reg <= output_ip_header_checksum_next;
output_ip_source_ip_reg <= output_ip_source_ip_next;
output_ip_dest_ip_reg <= output_ip_dest_ip_next;
output_udp_source_port_reg <= output_udp_source_port_next;
output_udp_dest_port_reg <= output_udp_dest_port_next;
output_udp_length_reg <= output_udp_length_next;
output_udp_checksum_reg <= output_udp_checksum_next;
end
// output datapath logic
reg [7:0] output_udp_payload_tdata_reg = 8'd0;
{%- for p in ports %}
reg output_{{p}}_udp_payload_tvalid_reg = 1'b0, output_{{p}}_udp_payload_tvalid_next;
{%- endfor %}
reg output_udp_payload_tlast_reg = 1'b0;
reg output_udp_payload_tuser_reg = 1'b0;
reg [7:0] temp_udp_payload_tdata_reg = 8'd0;
reg temp_udp_payload_tvalid_reg = 1'b0, temp_udp_payload_tvalid_next;
reg temp_udp_payload_tlast_reg = 1'b0;
reg temp_udp_payload_tuser_reg = 1'b0;
// datapath control
reg store_udp_payload_int_to_output;
reg store_udp_payload_int_to_temp;
reg store_udp_payload_temp_to_output;
{% for p in ports %}
assign output_{{p}}_udp_payload_tdata = output_udp_payload_tdata_reg;
assign output_{{p}}_udp_payload_tvalid = output_{{p}}_udp_payload_tvalid_reg;
assign output_{{p}}_udp_payload_tlast = output_udp_payload_tlast_reg;
assign output_{{p}}_udp_payload_tuser = output_udp_payload_tuser_reg;
{% endfor %}
// enable ready input next cycle if output is ready or the temp reg will not be filled on the next cycle (output reg empty or no input)
assign output_udp_payload_tready_int_early = current_output_tready | (~temp_udp_payload_tvalid_reg & (~current_output_tvalid | ~output_udp_payload_tvalid_int));
always @* begin
// transfer sink ready state to source
{%- for p in ports %}
output_{{p}}_udp_payload_tvalid_next = output_{{p}}_udp_payload_tvalid_reg;
{%- endfor %}
temp_udp_payload_tvalid_next = temp_udp_payload_tvalid_reg;
store_udp_payload_int_to_output = 1'b0;
store_udp_payload_int_to_temp = 1'b0;
store_udp_payload_temp_to_output = 1'b0;
if (output_udp_payload_tready_int_reg) begin
// input is ready
if (current_output_tready | ~current_output_tvalid) begin
// output is ready or currently not valid, transfer data to output
{%- for p in ports %}
output_{{p}}_udp_payload_tvalid_next = output_udp_payload_tvalid_int & (select_reg == {{w}}'d{{p}});
{%- endfor %}
store_udp_payload_int_to_output = 1'b1;
end else begin
// output is not ready, store input in temp
temp_udp_payload_tvalid_next = output_udp_payload_tvalid_int;
store_udp_payload_int_to_temp = 1'b1;
end
end else if (current_output_tready) begin
// input is not ready, but output is ready
{%- for p in ports %}
output_{{p}}_udp_payload_tvalid_next = temp_udp_payload_tvalid_reg & (select_reg == {{w}}'d{{p}});
{%- endfor %}
temp_udp_payload_tvalid_next = 1'b0;
store_udp_payload_temp_to_output = 1'b1;
end
end
always @(posedge clk) begin
if (rst) begin
{%- for p in ports %}
output_{{p}}_udp_payload_tvalid_reg <= 1'b0;
{%- endfor %}
output_udp_payload_tready_int_reg <= 1'b0;
temp_udp_payload_tvalid_reg <= 1'b0;
end else begin
{%- for p in ports %}
output_{{p}}_udp_payload_tvalid_reg <= output_{{p}}_udp_payload_tvalid_next;
{%- endfor %}
output_udp_payload_tready_int_reg <= output_udp_payload_tready_int_early;
temp_udp_payload_tvalid_reg <= temp_udp_payload_tvalid_next;
end
// datapath
if (store_udp_payload_int_to_output) begin
output_udp_payload_tdata_reg <= output_udp_payload_tdata_int;
output_udp_payload_tlast_reg <= output_udp_payload_tlast_int;
output_udp_payload_tuser_reg <= output_udp_payload_tuser_int;
end else if (store_udp_payload_temp_to_output) begin
output_udp_payload_tdata_reg <= temp_udp_payload_tdata_reg;
output_udp_payload_tlast_reg <= temp_udp_payload_tlast_reg;
output_udp_payload_tuser_reg <= temp_udp_payload_tuser_reg;
end
if (store_udp_payload_int_to_temp) begin
temp_udp_payload_tdata_reg <= output_udp_payload_tdata_int;
temp_udp_payload_tlast_reg <= output_udp_payload_tlast_int;
temp_udp_payload_tuser_reg <= output_udp_payload_tuser_int;
end
end
endmodule
""")
output_file.write(t.render(
n=ports,
w=select_width,
name=name,
ports=range(ports)
))
print("Done")
if __name__ == "__main__":
main()
|
[
"alex@alexforencich.com"
] |
alex@alexforencich.com
|
8b0f79b33468d21fb8ca5b0bb2e158c37b34f9c2
|
0db038dc04fe5998539c362164c78a868d7db5d7
|
/MPC_Mppi_Main.py
|
a49d94f286a2a306378084842cb7db86d5c30fb1
|
[] |
no_license
|
ZhihuLi/LSTM_MPC_master1
|
5ef1749dfa6e0507a8beaf486bc359dda9a45500
|
e04a2ae49f1a0a98094849539db9b5b79f00b354
|
refs/heads/master
| 2022-12-09T02:11:06.951203
| 2020-09-01T04:25:45
| 2020-09-01T04:25:45
| 291,895,192
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,280
|
py
|
import matplotlib.pyplot as plt
from MPC_Mppi import MPPI
mppi = MPPI(512, 200, 0.5)
"""
目标形状设立1500个时间步
"""
STEP_LIMIT = 600
def get_real_shape():
h_real, w_real = mppi.get_real_shape()
return h_real, w_real
def mppi_main(h_target_list, w_target_list):
h_target_list_ = h_target_list
w_target_list_ = w_target_list
# get shape information from environment
h_real, w_real = get_real_shape() # 获取目前的形状,焊接开始前将参数设为中位值 [8,12],以此预测形状
print("real height and width: ", h_real, w_real)
mppi.trajectory_set_goal(h_target_list_, w_target_list_)
mppi.Delta_reset()
# mppi.trajectory_update_state(h_real, w_real)
# rollout with mppi algo
for step in range (STEP_LIMIT):
if (step % 10 == 0):
print("step: ", step)
mppi.compute_cost(step)
target_action_Wf_list, target_action_Rs_list, target_action_Wf, target_action_Rs = mppi.compute_noise_action()
mppi.trajectory_update_shape(target_action_Wf, target_action_Rs)
mppi.Delta_update()
if step <= 200:
mppi.Delta_reset()
mppi.cost_clear()
h_real_list, w_real_list = mppi.get_real_shape_list()
Wf_real_list, Rs_real_list = mppi.get_real_parameter_list()
plt.figure(0)
plt.plot(h_real_list)
plt.plot(h_target_list_)
plt.xlim((0, 2000))
plt.ylim(0, 5)
plt.xlabel('time (s)')
# plt.ylabel('Height(mm)')
plt.ylabel('Height(mm)')
plt.title('Control result')
plt.figure(1)
plt.plot(w_real_list)
plt.plot(w_target_list_)
plt.xlim((0, 2000))
plt.ylim(0, 10)
plt.xlabel('time (s)')
# plt.ylabel('Height(mm)')
plt.ylabel('Width(mm)')
plt.title('Control result')
plt.show()
plt.figure(2)
plt.plot(Wf_real_list)
plt.plot(Rs_real_list)
plt.xlim((0, 2000))
plt.ylim(0, 30)
plt.xlabel('time (s)')
# plt.ylabel('Height(mm)')
# plt.ylabel('Width(mm)')
plt.title('Control result')
plt.show()
if __name__ == '__main__':
h_target_list = [2]*1700
w_target_list = [6]*1700
mppi_main(h_target_list, w_target_list)
|
[
"18810723878@163.com"
] |
18810723878@163.com
|
0946b605cba59cc67b6c4c1e03ab4be2d829547a
|
470a76fb3844612d8ee4fa2e01ef01ee6b071ca8
|
/Data_Engineering_Project_2/data/process_data.py
|
ffc36e496eec60b7f0a8efea799cad9869e7ac75
|
[] |
no_license
|
sakshigoplani/DataScienceNanodegree
|
8aeaac613536643c89d925ea6d9a4cf3ce5a6bf6
|
ad8b18318813e181d5cf0895c488f550b0fe88a8
|
refs/heads/master
| 2021-07-05T03:47:14.283910
| 2020-12-03T19:50:27
| 2020-12-03T19:50:27
| 207,171,123
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,094
|
py
|
############################################################################
# ##
# ##
# Author: Sakshi Haresh Goplani ##
# Project: Data Engineering - Disaster Response Pipeline ##
# Email: sakshigoplani9@gmail.com ##
# ##
############################################################################
""" Data Builder Utility
This script takes in a path where CSV files resides. It goes
over the data, cleans/processes it and saves the clean data in SQL Table.
Usage: python process_data.py <messages_filepath> <categories_filepath>
<database_filepath> <tablename>
"""
import os
import sys
import logging
import pandas as pd
import numpy as np
from sqlalchemy import create_engine
# METADATA #
__version__ = 3.6
__author__ = 'Sakshi Haresh Goplani'
################
def argument_sanitization(messages_filepath, categories_filepath):
""" Validate file paths
Args:
messages_filepath (string): Path to messages.csv
categories_filepath (string): Path to categories.csv
Returns:
N/A
"""
if not os.path.isfile(messages_filepath):
logger.error("{} is not valid".format(messages_filepath))
if not os.path.isfile(categories_filepath):
logger.error("{} is not valid".format(categories_filepath))
def load_data(messages_filepath, categories_filepath):
""" Read and Merge data
Args:
messages_filepath (string): Path to messages.csv
categories_filepath (string): Path to categories.csv
Returns:
df (pandas dataframe): Merged messages and categories dataframe
"""
# Confirm paths are valid and read in datasets into dataframe
argument_sanitization(messages_filepath, categories_filepath)
messages = pd.read_csv(messages_filepath)
categories = pd.read_csv(categories_filepath)
# Merge Messages and Categories dataframes into single dataframe
df = messages.merge(categories, how='outer', on='id')
return df
def clean_data(df):
""" Clean Data into relevant columns
Args:
df (pandas dataframe): Merged messages and categories dataframe
Returns:
df (pandas dataframe): Clean dataframe
"""
# Create a dataframe of the individual category columns
categories = df['categories'].str.split(';', expand=True)
# Select the first row of the categories dataframe
row = categories.iloc[1, :]
# Extract a list of new column names for categories
category_colnames = list(map(lambda x: x.split('-')[0], row))
categories.columns = category_colnames
# Convert category values to just numbers 0 or 1 numerals
for column in categories:
categories[column] = categories[column].str.split('-').str.get(1)
categories[column] = pd.to_numeric(categories[column])
# Replace categories column in df with new category columns
df = df.drop('categories', axis=1)
# Concat cleansed categories with df
df = pd.concat([df, categories], axis=1)
# Drop duplicates
df.drop_duplicates(subset=['id'], keep='first', inplace=True)
return df
def save_data(df, database_filename, tablename):
""" Save data to SQL Database
Args:
df (pandas dataframe): Final cleansed dataframe
database_filename (string): Name of the DB File
tablename (string): Name of the table to create in DB
Returns:
N/A
"""
engine = create_engine(database_filename)
df.to_sql(tablename, engine, index=False)
def main():
if len(sys.argv) == 5:
messages_filepath, \
categories_filepath, \
database_filepath, \
tablename = sys.argv[1:]
logger.info(" Loading data...\n MESSAGES: {}\n CATEGORIES: {} "
.format(messages_filepath, categories_filepath))
df = load_data(messages_filepath, categories_filepath)
logger.info(" Cleaning data...")
df = clean_data(df)
logger.info(" Saving data...\n DATABASE: {}\n TABLE NAME: {} "
.format(database_filepath, tablename))
save_data(df, database_filepath, tablename)
logger.info(" Cleaned data saved to database!")
else:
logger.error(
"""
Please provide the filepaths of the messages and categories
datasets as the first and second argument respectively, as
well as the filepath of the database to save the cleaned data
to as the third argument. \n\nExample: python process_data.py
disaster_messages.csv disaster_categories.csv
DisasterResponse.db
"""
)
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
main()
|
[
"sakshigoplani9@gmail.com"
] |
sakshigoplani9@gmail.com
|
bd44f0d0b5de2c6da91c93b0dc68b71f562537d1
|
a675114201f384c9af03faa397ae6d15b2848953
|
/yangiliklar/yangiliklar/urls.py
|
915c3fa4b4cc8173be4427d3ff5943badda04b05
|
[] |
no_license
|
Zokhidjon1903/Yangiliklar
|
304e81ab051634d481e4a68059a5c5758ac51bac
|
fc3efe74dc7f2c6059b6d9ed1b2c860388d04502
|
refs/heads/main
| 2023-08-05T10:48:28.848235
| 2021-09-17T06:47:40
| 2021-09-17T06:47:40
| 404,962,630
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 974
|
py
|
"""yangiliklar URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.conf import settings
from django.conf.urls.static import static
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('news.urls')),
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
[
"z.kaytarov@gmail.com"
] |
z.kaytarov@gmail.com
|
3e9d9915c2093b671ab7488bbdeb9d45d5e790f6
|
609c5d5723adb1a2e1b846a31c63f858bfab909a
|
/env/bin/pip2.7
|
338fc56dbfe3778a95a5127d605dbd2d4a21aed8
|
[] |
no_license
|
jndal12/norsetrips
|
71dfeb025134e666c95b251b3866dac77315c813
|
c13c78d9a042afdd49718626fedf3d3cdd5ab8a5
|
refs/heads/master
| 2021-01-10T09:50:00.463756
| 2015-10-15T19:55:30
| 2015-10-15T19:55:30
| 43,389,757
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 235
|
7
|
#!/Users/navaju01/Senior-Project-/env/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from pip import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"navaju01@luther.edu"
] |
navaju01@luther.edu
|
7a750c6e46c33e61126aeaf5efa498ab4b428e69
|
467018e22a423cf43fb219725aca2da489a97369
|
/mysite/blog/models.py
|
f0657242e97b5c377ff823aa845db0e37aa52fe3
|
[] |
no_license
|
manrevlob/cursoDjango
|
d74a272dc1122693822a000a8b22fa5c5acd9092
|
09d9087e6c4ab149ae6a08f7b830777f47440081
|
refs/heads/master
| 2020-08-27T09:57:48.479016
| 2020-03-19T11:23:54
| 2020-03-19T11:23:54
| 217,325,519
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 309
|
py
|
from django.db import models
# Create your models here.
class Post(models.Model):
title = models.CharField(max_length=200)
text = models.TextField()
class Comment(models.Model):
name = models.CharField(max_length=50)
comment = models.TextField()
post_id = models.IntegerField(default=0)
|
[
"manuel.revillalobo@plexus.local"
] |
manuel.revillalobo@plexus.local
|
faf00bf81ee5c667d58a600dd389512ea52f1789
|
2a5241bd7ddda9bc661f59f488886049aa104ab1
|
/ExecutionCodes/dag2.py
|
10f01e8c605ddb1b7f0442e4bc74b511606fba3b
|
[] |
no_license
|
Avighan/PythonCreatives
|
00d51c67b6c95d0096f5fedaaa879e2c9958be3e
|
f75ddb4efe0516339b6fbb4d1fbd50e4090c6213
|
refs/heads/master
| 2020-12-12T16:01:05.564287
| 2020-02-05T22:55:32
| 2020-02-05T22:55:32
| 234,167,944
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,697
|
py
|
import sys
import os
sys.path.append(os.path.join(os.path.abspath('.'), ''))
sys.path.append(os.path.join(os.path.abspath('..'), ''))
sys.path.append(os.path.join(os.path.dirname(__file__), '.'))
sys.path.append(os.path.join(os.path.dirname(__file__)))
from script import TemplateExecutor as te
import pdb
from datetime import datetime, timedelta
from script import TemplateCreator as temp_create
from script import Template1Functions_for_Airflow as tfunc
from script import Airflow_dag_creation as dagcreate
from airflow.operators import PythonOperator, BashOperator, DummyOperator
from airflow.models import DAG
seven_days_ago = datetime.combine(
datetime.today() - timedelta(7), datetime.min.time())
args = {'owner': 'DLTeam', 'start_date': seven_days_ago, }
execute = te.TemplateExecute()
flow = execute.load_from_file('Template_Airflow.pkl')
params = {'SetData': {'folderpath': './Example2/',
'data_files': ['./datasets/diabetes.csv'],
'file_type': 'csv',
'y': 'Outcome',
'model_type': 'Classification',
'model_to_run': 'XGBClassifier',
'hyperparameters': {'penalty': ['l1', 'l2'], 'C': [1.0, 2.0, 5.0]},
'metrics_to_check': ['Accuracy', 'Confusion'],
'outputPath': './'}}
flow = execute.update_node(params)
execute.update_parameters('folderpath', './Example2/')
# execute.execute_template(flow)
dag = DAG(dag_id='AutoML2', default_args=args, schedule_interval=None)
dagimplement = dagcreate.Dagtaskexecute(template_flow=flow, dag=dag)
dagimplement.create_dag()
|
[
"noreply@github.com"
] |
Avighan.noreply@github.com
|
e3d3b04b937958d29865f0dc70681222e2ebaf49
|
63c02b040f95688935405e5fe342b048a7b78ae1
|
/project/tests/test_data_processing_funcs.py
|
e467f62e34c96143588286eecf700d23ef9b0498
|
[] |
no_license
|
Astony/FinnalProject
|
e432b91f1d298e31520107f7723eae4981f4a245
|
96204217b541e2af4ec315f3e9a603c51684eb58
|
refs/heads/master
| 2023-07-15T05:05:05.465114
| 2021-08-31T20:54:33
| 2021-08-31T20:54:33
| 399,906,901
| 0
| 0
| null | 2021-08-31T15:50:46
| 2021-08-25T17:40:50
| null |
UTF-8
|
Python
| false
| false
| 1,291
|
py
|
import pandas as pd
from data_processing import calc_central, city_center_coord, define_address
def mock_geocoder(*args):
"""Mock function for geocoder to avoid requests to API"""
return "Test address"
def test_define_addresses_function():
"""Test that define_addresses_function add address that was got by geocoder"""
test_df = pd.DataFrame({"Latitude": [100], "Longitude": [0]})
geocoder = mock_geocoder
result_df = define_address(test_df, 1, geocoder)
assert result_df.Address.item() == "Test address"
assert result_df.Latitude.item() == 100
def test_calc_central_coord():
"""Test that calc_central return the middle between to coordinates"""
test_df = pd.DataFrame({"Latitude": [60, 80], "Longitude": [100, 100]})
assert calc_central(test_df) == (70, 100)
def test_get_cities_countries_central_coord():
"""Test city_center_coord return the central coordinates for 2 hotels in one city"""
test_df = pd.DataFrame(
{
"Latitude": [60, 80],
"Longitude": [100, 100],
"City": ["Fakecity", "Fakecity"],
"Country": ["fakecountry", "fakecountry"],
}
)
assert city_center_coord(test_df) == (
["Fakecity"],
["fakecountry"],
[(70, 100)],
)
|
[
"savrushkin.aa@gmail.com"
] |
savrushkin.aa@gmail.com
|
f371d214be53eb924a2b6d24f2fe272f6fea4898
|
5214ed20be64aea19f5c874c4ea80ab28b606d8c
|
/lookup/data/test.py
|
970893e164e5dc279f6ce0e818e93cbc95c2dd0f
|
[] |
no_license
|
eknuth/timezone
|
618456d7482118afa446ed43a26d6f7df635afb4
|
39cf523afc9eed42a906a16f7cf61a0d48dd74bc
|
refs/heads/master
| 2021-01-10T19:01:48.587584
| 2010-04-16T10:47:11
| 2010-04-16T10:47:11
| 607,268
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 393
|
py
|
import urllib2
from urllib import urlencode
f = open('cities.txt')
for line in f:
(city, lon, lat) = line.split("\t")
url = 'http://timezone-lookup.com/'
args = {'coords': "%s,%s" % (lon, lat)}
full_url = '%s?%s' % (url, urlencode(args))
try:
response = urllib2.urlopen(full_url)
print response.read()
except:
print "Couldn't find %s" % city
|
[
"eknuth@macbookpro.local"
] |
eknuth@macbookpro.local
|
9b1a4eab14b591308b564b66521f5014c2f337fe
|
3cdf86a62c6075e5d070b0a8e47823d5c8dff35e
|
/unbalanced_dataset/under_sampling/nearmiss.py
|
5ab42644560cbabda4b65743a6de596fdfa1bba9
|
[] |
no_license
|
StefanKal/DarknetReport
|
184cc740fb0faf31a7b95346b61e51b0e534cef7
|
c265a04ac9d032b72a40cdebca1017ef44ca4621
|
refs/heads/master
| 2021-01-20T20:32:01.989340
| 2016-07-26T16:02:15
| 2016-07-26T16:02:15
| 61,836,822
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 13,200
|
py
|
"""Class to perform under-sampling based on nearmiss methods."""
from __future__ import print_function
from __future__ import division
import numpy as np
from collections import Counter
from sklearn.utils import check_X_y
from sklearn.neighbors import NearestNeighbors
from .under_sampler import UnderSampler
class NearMiss(UnderSampler):
"""Class to perform under-sampling based on NearMiss methods.
Parameters
----------
ratio : str or float, optional (default='auto')
If 'auto', the ratio will be defined automatically to balanced
the dataset. Otherwise, the ratio will corresponds to the number
of samples in the minority class over the the number of samples
in the majority class.
return_indices : bool, optional (default=False)
Either to return or not the indices which will be selected from
the majority class.
random_state : int or None, optional (default=None)
Seed for random number generation.
verbose : bool, optional (default=True)
Boolean to either or not print information about the processing
version : int, optional (default=1)
Version of the NearMiss to use. Possible values
are 1, 2 or 3.
size_ngh : int, optional (default=3)
Size of the neighbourhood to consider to compute the
average distance to the minority point samples.
ver3_samp_ngh : int, optional (default=3)
NearMiss-3 algorithm start by a phase of re-sampling. This
parameter correspond to the number of neighbours selected
create the sub_set in which the selection will be performed.
n_jobs : int, optional (default=-1)
The number of thread to open when it is possible.
**kwargs : keywords
Parameter to use for the Nearest Neighbours object.
Attributes
----------
ratio_ : str or float, optional (default='auto')
If 'auto', the ratio will be defined automatically to balanced
the dataset. Otherwise, the ratio will corresponds to the number
of samples in the minority class over the the number of samples
in the majority class.
rs_ : int or None, optional (default=None)
Seed for random number generation.
min_c_ : str or int
The identifier of the minority class.
max_c_ : str or int
The identifier of the majority class.
stats_c_ : dict of str/int : int
A dictionary in which the number of occurences of each class is
reported.
Notes
-----
The methods are based on [1]_.
The class support multi-classes.
References
----------
.. [1] I. Mani, I. Zhang. "kNN approach to unbalanced data distributions:
a case study involving information extraction," In Proceedings of
workshop on learning from imbalanced datasets, 2003.
"""
def __init__(self, ratio='auto', return_indices=False, random_state=None,
verbose=True, version=1, size_ngh=3, ver3_samp_ngh=3,
n_jobs=-1, **kwargs):
"""Initialisation of clustering centroids object.
Parameters
----------
ratio : str or float, optional (default='auto')
If 'auto', the ratio will be defined automatically to balanced
the dataset. Otherwise, the ratio will corresponds to the number
of samples in the minority class over the the number of samples
in the majority class.
return_indices : bool, optional (default=False)
Either to return or not the indices which will be selected from
the majority class.
random_state : int or None, optional (default=None)
Seed for random number generation.
verbose : bool, optional (default=True)
Boolean to either or not print information about the processing
version : int, optional (default=1)
Version of the NearMiss to use. Possible values
are 1, 2 or 3.
size_ngh : int, optional (default=3)
Size of the neighbourhood to consider to compute the
average distance to the minority point samples.
ver3_samp_ngh : int, optional (default=3)
NearMiss-3 algorithm start by a phase of re-sampling. This
parameter correspond to the number of neighbours selected
create the sub_set in which the selection will be performed.
n_jobs : int, optional (default=-1)
The number of thread to open when it is possible.
**kwargs : keywords
Parameter to use for the Nearest Neighbours object.
Returns
-------
None
"""
super(NearMiss, self).__init__(ratio=ratio,
return_indices=return_indices,
random_state=random_state,
verbose=verbose)
# Assign the parameter of the element of this class
# Check that the version asked is implemented
if not (version == 1 or version == 2 or version == 3):
raise ValueError('UnbalancedData.NearMiss: there is only 3 '
'versions available with parameter version=1/2/3')
self.version = version
self.size_ngh = size_ngh
self.ver3_samp_ngh = ver3_samp_ngh
self.n_jobs = n_jobs
self.kwargs = kwargs
def fit(self, X, y):
"""Find the classes statistics before to perform sampling.
Parameters
----------
X : ndarray, shape (n_samples, n_features)
Matrix containing the data which have to be sampled.
y : ndarray, shape (n_samples, )
Corresponding label for each sample in X.
Returns
-------
self : object,
Return self.
"""
# Check the consistency of X and y
X, y = check_X_y(X, y)
super(NearMiss, self).fit(X, y)
return self
def _selection_dist_based(self, X, y, dist_vec, num_samples, key,
sel_strategy='nearest'):
"""Select the appropriate samples depending of the strategy selected.
Parameters
----------
X : ndarray, shape (n_samples, n_features)
Original samples.
y : ndarray, shape (n_samples, )
Associated label to X.
dist_vec : ndarray, shape (n_samples, )
The distance matrix to the nearest neigbour.
num_samples: int
The desired number of samples to select.
key : str or int,
The target class.
sel_strategy : str, optional (default='nearest')
Strategy to select the samples. Either 'nearest' or 'farthest'
Returns
-------
X_sel : ndarray, shape (num_samples, n_features)
Selected samples.
y_sel : ndarray, shape (num_samples, )
The associated label.
idx_sel : ndarray, shape (num_samples, )
The list of the indices of the selected samples.
"""
# Compute the distance considering the farthest neighbour
dist_avg_vec = np.sum(dist_vec[:, -self.size_ngh:], axis=1)
# Sort the list of distance and get the index
if sel_strategy == 'nearest':
sort_way = False
elif sel_strategy == 'farthest':
sort_way = True
else:
raise NotImplementedError
sorted_idx = sorted(range(len(dist_avg_vec)),
key=dist_avg_vec.__getitem__,
reverse=sort_way)
# Select the desired number of samples
sel_idx = sorted_idx[:num_samples]
return (X[y == key][sel_idx], y[y == key][sel_idx],
np.nonzero(y == key)[0][sel_idx])
def transform(self, X, y):
"""Resample the dataset.
Parameters
----------
X : ndarray, shape (n_samples, n_features)
Matrix containing the data which have to be sampled.
y : ndarray, shape (n_samples, )
Corresponding label for each sample in X.
Returns
-------
X_resampled : ndarray, shape (n_samples_new, n_features)
The array containing the resampled data.
y_resampled : ndarray, shape (n_samples_new)
The corresponding label of `X_resampled`
idx_under : ndarray, shape (n_samples, )
If `return_indices` is `True`, a boolean array will be returned
containing the which samples have been selected.
"""
# Check the consistency of X and y
X, y = check_X_y(X, y)
super(NearMiss, self).transform(X, y)
# Start with the minority class
X_min = X[y == self.min_c_]
y_min = y[y == self.min_c_]
# All the minority class samples will be preserved
X_resampled = X_min.copy()
y_resampled = y_min.copy()
# Compute the number of cluster needed
if self.ratio_ == 'auto':
num_samples = self.stats_c_[self.min_c_]
else:
num_samples = int(self.stats_c_[self.min_c_] / self.ratio_)
# If we need to offer support for the indices
if self.return_indices:
idx_under = np.nonzero(y == self.min_c_)[0]
# For each element of the current class, find the set of NN
# of the minority class
# Call the constructor of the NN
nn_obj = NearestNeighbors(n_neighbors=self.size_ngh,
n_jobs=self.n_jobs,
**self.kwargs)
# Fit the minority class since that we want to know the distance
# to these point
nn_obj.fit(X[y == self.min_c_])
# Loop over the other classes under picking at random
for key in self.stats_c_.keys():
# If the minority class is up, skip it
if key == self.min_c_:
continue
# Get the samples corresponding to the current class
sub_samples_x = X[y == key]
sub_samples_y = y[y == key]
if self.version == 1:
# Find the NN
dist_vec, idx_vec = nn_obj.kneighbors(
sub_samples_x,
n_neighbors=self.size_ngh)
# Select the right samples
sel_x, sel_y, idx_tmp = self._selection_dist_based(
X,
y,
dist_vec,
num_samples,
key,
sel_strategy='nearest')
elif self.version == 2:
# Find the NN
dist_vec, idx_vec = nn_obj.kneighbors(
sub_samples_x,
n_neighbors=self.stats_c_[self.min_c_])
# Select the right samples
sel_x, sel_y, idx_tmp = self._selection_dist_based(
X,
y,
dist_vec,
num_samples,
key,
sel_strategy='nearest')
elif self.version == 3:
# We need a new NN object to fit the current class
nn_obj_cc = NearestNeighbors(n_neighbors=self.ver3_samp_ngh,
n_jobs=self.n_jobs,
**self.kwargs)
nn_obj_cc.fit(sub_samples_x)
# Find the set of NN to the minority class
dist_vec, idx_vec = nn_obj_cc.kneighbors(X_min)
# Create the subset containing the samples found during the NN
# search. Linearize the indexes and remove the double values
idx_vec = np.unique(idx_vec.reshape(-1))
# Create the subset
sub_samples_x = sub_samples_x[idx_vec, :]
sub_samples_y = sub_samples_y[idx_vec]
# Compute the NN considering the current class
dist_vec, idx_vec = nn_obj.kneighbors(
sub_samples_x,
n_neighbors=self.size_ngh)
sel_x, sel_y, idx_tmp = self._selection_dist_based(
X,
y,
dist_vec,
num_samples,
key,
sel_strategy='farthest')
else:
raise NotImplementedError
# If we need to offer support for the indices selected
if self.return_indices:
idx_under = np.concatenate((idx_under, idx_tmp), axis=0)
X_resampled = np.concatenate((X_resampled, sel_x), axis=0)
y_resampled = np.concatenate((y_resampled, sel_y), axis=0)
if self.verbose:
print("Under-sampling performed: {}".format(Counter(y_resampled)))
# Check if the indices of the samples selected should be returned too
if self.return_indices:
# Return the indices of interest
return X_resampled, y_resampled, idx_under
else:
return X_resampled, y_resampled
|
[
"stefan.kalchmair@gmail.com"
] |
stefan.kalchmair@gmail.com
|
c5093f06d78421e5e06a8db7730c58cbcafd1e0d
|
39bef50ed12468e57ad94a8e2551da6c7c45c8ed
|
/networkx/drawing/nx_pylab.py
|
b96ab87ca39c0e2b1a8f38fc8a0858575319078d
|
[] |
no_license
|
biancini/Rorschach-Test-Platform
|
b1a5dfdbe5a15a68ce4dcf66887346fbf2e94169
|
7ae68e1054637046278325eaa419b23f09b420d3
|
refs/heads/master
| 2020-05-17T11:00:13.889678
| 2012-04-11T16:31:19
| 2012-04-11T16:31:19
| 3,789,381
| 1
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 27,792
|
py
|
"""
**********
Matplotlib
**********
Draw networks with matplotlib (pylab).
See Also
--------
matplotlib: http://matplotlib.sourceforge.net/
pygraphviz: http://networkx.lanl.gov/pygraphviz/
"""
__author__ = """Aric Hagberg (hagberg@lanl.gov)"""
# Copyright (C) 2004-2010 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
__all__ = ['draw',
'draw_networkx',
'draw_networkx_nodes',
'draw_networkx_edges',
'draw_networkx_labels',
'draw_networkx_edge_labels',
'draw_circular',
'draw_random',
'draw_spectral',
'draw_spring',
'draw_shell',
'draw_graphviz']
import networkx as nx
from networkx.drawing.layout import shell_layout,\
circular_layout,spectral_layout,spring_layout,random_layout
def draw(G, pos=None, ax=None, hold=None, **kwds):
"""Draw the graph G with Matplotlib (pylab).
Draw the graph as a simple representation with no node
labels or edge labels and using the full Matplotlib figure area
and no axis labels by default. See draw_networkx() for more
full-featured drawing that allows title, axis labels etc.
Parameters
----------
G : graph
A networkx graph
pos : dictionary, optional
A dictionary with nodes as keys and positions as values.
If not specified a spring layout positioning will be computed.
See networkx.layout for functions that compute node positions.
ax : Matplotlib Axes object, optional
Draw the graph in specified Matplotlib axes.
hold : bool, optional
Set the Matplotlib hold state. If True subsequent draw
commands will be added to the current axes.
**kwds : optional keywords
See networkx.draw_networkx() for a description of optional keywords.
Examples
--------
>>> G=nx.dodecahedral_graph()
>>> nx.draw(G)
>>> nx.draw(G,pos=nx.spring_layout(G)) # use spring layout
See Also
--------
draw_networkx()
draw_networkx_nodes()
draw_networkx_edges()
draw_networkx_labels()
draw_networkx_edge_labels()
Notes
-----
This function has the same name as pylab.draw and pyplot.draw
so beware when using
>>> from networkx import *
since you might overwrite the pylab.draw function.
Good alternatives are:
With pylab:
>>> import pylab as P #
>>> import networkx as nx
>>> G=nx.dodecahedral_graph()
>>> nx.draw(G) # networkx draw()
>>> P.draw() # pylab draw()
With pyplot
>>> import matplotlib.pyplot as plt
>>> import networkx as nx
>>> G=nx.dodecahedral_graph()
>>> nx.draw(G) # networkx draw()
>>> plt.draw() # pyplot draw()
Also see the NetworkX drawing examples at
http://networkx.lanl.gov/gallery.html
"""
try:
import matplotlib.pylab as pylab
except ImportError:
raise ImportError("Matplotlib required for draw()")
except RuntimeError:
print("Matplotlib unable to open display")
raise
cf=pylab.gcf()
cf.set_facecolor('w')
if ax is None:
if cf._axstack() is None:
ax=cf.add_axes((0,0,1,1))
else:
ax=cf.gca()
# allow callers to override the hold state by passing hold=True|False
b = pylab.ishold()
h = kwds.pop('hold', None)
if h is not None:
pylab.hold(h)
try:
draw_networkx(G,pos=pos,ax=ax,**kwds)
ax.set_axis_off()
pylab.draw_if_interactive()
except:
pylab.hold(b)
raise
pylab.hold(b)
return
def draw_networkx(G, pos=None, with_labels=True, **kwds):
"""Draw the graph G using Matplotlib.
Draw the graph with Matplotlib with options for node positions,
labeling, titles, and many other drawing features.
See draw() for simple drawing without labels or axes.
Parameters
----------
G : graph
A networkx graph
pos : dictionary, optional
A dictionary with nodes as keys and positions as values.
If not specified a spring layout positioning will be computed.
See networkx.layout for functions that compute node positions.
with_labels : bool, optional (default=True)
Set to True to draw labels on the nodes.
ax : Matplotlib Axes object, optional
Draw the graph in the specified Matplotlib axes.
nodelist : list, optional (default G.nodes())
Draw only specified nodes
edgelist : list, optional (default=G.edges())
Draw only specified edges
node_size : scalar or array, optional (default=300)
Size of nodes. If an array is specified it must be the
same length as nodelist.
node_color : color string, or array of floats, (default='r')
Node color. Can be a single color format string,
or a sequence of colors with the same length as nodelist.
If numeric values are specified they will be mapped to
colors using the cmap and vmin,vmax parameters. See
matplotlib.scatter for more details.
node_shape : string, optional (default='o')
The shape of the node. Specification is as matplotlib.scatter
marker, one of 'so^>v<dph8'.
alpha : float, optional (default=1.0)
The node transparency
cmap : Matplotlib colormap, optional (default=None)
Colormap for mapping intensities of nodes
vmin,vmax : float, optional (default=None)
Minimum and maximum for node colormap scaling
linewidths : [None | scalar | sequence]
Line width of symbol border (default =1.0)
width : float, optional (default=1.0)
Line width of edges
edge_color : color string, or array of floats (default='r')
Edge color. Can be a single color format string,
or a sequence of colors with the same length as edgelist.
If numeric values are specified they will be mapped to
colors using the edge_cmap and edge_vmin,edge_vmax parameters.
edge_ cmap : Matplotlib colormap, optional (default=None)
Colormap for mapping intensities of edges
edge_vmin,edge_vmax : floats, optional (default=None)
Minimum and maximum for edge colormap scaling
style : string, optional (deafult='solid')
Edge line style (solid|dashed|dotted,dashdot)
labels : dictionary, optional (deafult=None)
Node labels in a dictionary keyed by node of text labels
font_size : int, optional (default=12)
Font size for text labels
font_color : string, optional (default='k' black)
Font color string
font_weight : string, optional (default='normal')
Font weight
font_family : string, optional (default='sans-serif')
Font family
Examples
--------
>>> G=nx.dodecahedral_graph()
>>> nx.draw(G)
>>> nx.draw(G,pos=nx.spring_layout(G)) # use spring layout
>>> import pylab
>>> limits=pylab.axis('off') # turn of axis
Also see the NetworkX drawing examples at
http://networkx.lanl.gov/gallery.html
See Also
--------
draw()
draw_networkx_nodes()
draw_networkx_edges()
draw_networkx_labels()
draw_networkx_edge_labels()
"""
try:
import matplotlib.pylab as pylab
except ImportError:
raise ImportError("Matplotlib required for draw()")
except RuntimeError:
print("Matplotlib unable to open display")
raise
if pos is None:
pos=nx.drawing.spring_layout(G) # default to spring layout
node_collection=draw_networkx_nodes(G, pos, **kwds)
edge_collection=draw_networkx_edges(G, pos, **kwds)
if with_labels:
draw_networkx_labels(G, pos, **kwds)
pylab.draw_if_interactive()
def draw_networkx_nodes(G, pos,
nodelist=None,
node_size=300,
node_color='r',
node_shape='o',
alpha=1.0,
cmap=None,
vmin=None,
vmax=None,
ax=None,
linewidths=None,
**kwds):
"""Draw the nodes of the graph G.
This draws only the nodes of the graph G.
Parameters
----------
G : graph
A networkx graph
pos : dictionary
A dictionary with nodes as keys and positions as values.
If not specified a spring layout positioning will be computed.
See networkx.layout for functions that compute node positions.
ax : Matplotlib Axes object, optional
Draw the graph in the specified Matplotlib axes.
nodelist : list, optional
Draw only specified nodes (default G.nodes())
node_size : scalar or array
Size of nodes (default=300). If an array is specified it must be the
same length as nodelist.
node_color : color string, or array of floats
Node color. Can be a single color format string (default='r'),
or a sequence of colors with the same length as nodelist.
If numeric values are specified they will be mapped to
colors using the cmap and vmin,vmax parameters. See
matplotlib.scatter for more details.
node_shape : string
The shape of the node. Specification is as matplotlib.scatter
marker, one of 'so^>v<dph8' (default='o').
alpha : float
The node transparency (default=1.0)
cmap : Matplotlib colormap
Colormap for mapping intensities of nodes (default=None)
vmin,vmax : floats
Minimum and maximum for node colormap scaling (default=None)
linewidths : [None | scalar | sequence]
Line width of symbol border (default =1.0)
Examples
--------
>>> G=nx.dodecahedral_graph()
>>> nodes=nx.draw_networkx_nodes(G,pos=nx.spring_layout(G))
Also see the NetworkX drawing examples at
http://networkx.lanl.gov/gallery.html
See Also
--------
draw()
draw_networkx()
draw_networkx_edges()
draw_networkx_labels()
draw_networkx_edge_labels()
"""
try:
import matplotlib.pylab as pylab
import numpy
except ImportError:
raise ImportError("Matplotlib required for draw()")
except RuntimeError:
print("Matplotlib unable to open display")
raise
if ax is None:
ax=pylab.gca()
if nodelist is None:
nodelist=G.nodes()
if not nodelist or len(nodelist)==0: # empty nodelist, no drawing
return None
try:
xy=numpy.asarray([pos[v] for v in nodelist])
except KeyError as e:
raise nx.NetworkXError('Node %s has no position.'%e)
except ValueError:
raise nx.NetworkXError('Bad value in node positions.')
node_collection=ax.scatter(xy[:,0], xy[:,1],
s=node_size,
c=node_color,
marker=node_shape,
cmap=cmap,
vmin=vmin,
vmax=vmax,
alpha=alpha,
linewidths=linewidths)
# pylab.axes(ax)
pylab.sci(node_collection)
node_collection.set_zorder(2)
return node_collection
def draw_networkx_edges(G, pos,
edgelist=None,
width=1.0,
edge_color='k',
style='solid',
alpha=None,
edge_cmap=None,
edge_vmin=None,
edge_vmax=None,
ax=None,
arrows=True,
**kwds):
"""Draw the edges of the graph G.
This draws only the edges of the graph G.
Parameters
----------
G : graph
A networkx graph
pos : dictionary
A dictionary with nodes as keys and positions as values.
If not specified a spring layout positioning will be computed.
See networkx.layout for functions that compute node positions.
edgelist : collection of edge tuples
Draw only specified edges(default=G.edges())
width : float
Line width of edges (default =1.0)
edge_color : color string, or array of floats
Edge color. Can be a single color format string (default='r'),
or a sequence of colors with the same length as edgelist.
If numeric values are specified they will be mapped to
colors using the edge_cmap and edge_vmin,edge_vmax parameters.
style : string
Edge line style (default='solid') (solid|dashed|dotted,dashdot)
alpha : float
The edge transparency (default=1.0)
edge_ cmap : Matplotlib colormap
Colormap for mapping intensities of edges (default=None)
edge_vmin,edge_vmax : floats
Minimum and maximum for edge colormap scaling (default=None)
ax : Matplotlib Axes object, optional
Draw the graph in the specified Matplotlib axes.
arrows : bool, optional (default=True)
For directed graphs, if True draw arrowheads.
Notes
-----
For directed graphs, "arrows" (actually just thicker stubs) are drawn
at the head end. Arrows can be turned off with keyword arrows=False.
Yes, it is ugly but drawing proper arrows with Matplotlib this
way is tricky.
Examples
--------
>>> G=nx.dodecahedral_graph()
>>> edges=nx.draw_networkx_edges(G,pos=nx.spring_layout(G))
Also see the NetworkX drawing examples at
http://networkx.lanl.gov/gallery.html
See Also
--------
draw()
draw_networkx()
draw_networkx_nodes()
draw_networkx_labels()
draw_networkx_edge_labels()
"""
try:
import matplotlib
import matplotlib.pylab as pylab
import matplotlib.cbook as cb
from matplotlib.colors import colorConverter,Colormap
from matplotlib.collections import LineCollection
import numpy
except ImportError:
raise ImportError("Matplotlib required for draw()")
except RuntimeError:
print("Matplotlib unable to open display")
raise
if ax is None:
ax=pylab.gca()
if edgelist is None:
edgelist=G.edges()
if not edgelist or len(edgelist)==0: # no edges!
return None
# set edge positions
edge_pos=numpy.asarray([(pos[e[0]],pos[e[1]]) for e in edgelist])
if not cb.iterable(width):
lw = (width,)
else:
lw = width
if not cb.is_string_like(edge_color) \
and cb.iterable(edge_color) \
and len(edge_color)==len(edge_pos):
if numpy.alltrue([cb.is_string_like(c)
for c in edge_color]):
# (should check ALL elements)
# list of color letters such as ['k','r','k',...]
edge_colors = tuple([colorConverter.to_rgba(c,alpha)
for c in edge_color])
elif numpy.alltrue([not cb.is_string_like(c)
for c in edge_color]):
# If color specs are given as (rgb) or (rgba) tuples, we're OK
if numpy.alltrue([cb.iterable(c) and len(c) in (3,4)
for c in edge_color]):
edge_colors = tuple(edge_color)
else:
# numbers (which are going to be mapped with a colormap)
edge_colors = None
else:
raise ValueError('edge_color must consist of either color names or numbers')
else:
if cb.is_string_like(edge_color) or len(edge_color)==1:
edge_colors = ( colorConverter.to_rgba(edge_color, alpha), )
else:
raise ValueError('edge_color must be a single color or list of exactly m colors where m is the number or edges')
edge_collection = LineCollection(edge_pos,
colors = edge_colors,
linewidths = lw,
antialiaseds = (1,),
linestyle = style,
transOffset = ax.transData,
)
edge_collection.set_zorder(1) # edges go behind nodes
ax.add_collection(edge_collection)
# Note: there was a bug in mpl regarding the handling of alpha values for
# each line in a LineCollection. It was fixed in matplotlib in r7184 and
# r7189 (June 6 2009). We should then not set the alpha value globally,
# since the user can instead provide per-edge alphas now. Only set it
# globally if provided as a scalar.
if cb.is_numlike(alpha):
edge_collection.set_alpha(alpha)
if edge_colors is None:
if edge_cmap is not None:
assert(isinstance(edge_cmap, Colormap))
edge_collection.set_array(numpy.asarray(edge_color))
edge_collection.set_cmap(edge_cmap)
if edge_vmin is not None or edge_vmax is not None:
edge_collection.set_clim(edge_vmin, edge_vmax)
else:
edge_collection.autoscale()
pylab.sci(edge_collection)
arrow_collection=None
if G.is_directed() and arrows:
# a directed graph hack
# draw thick line segments at head end of edge
# waiting for someone else to implement arrows that will work
arrow_colors = edge_colors
a_pos=[]
p=1.0-0.25 # make head segment 25 percent of edge length
for src,dst in edge_pos:
x1,y1=src
x2,y2=dst
dx=x2-x1 # x offset
dy=y2-y1 # y offset
d=numpy.sqrt(float(dx**2+dy**2)) # length of edge
if d==0: # source and target at same position
continue
if dx==0: # vertical edge
xa=x2
ya=dy*p+y1
if dy==0: # horizontal edge
ya=y2
xa=dx*p+x1
else:
theta=numpy.arctan2(dy,dx)
xa=p*d*numpy.cos(theta)+x1
ya=p*d*numpy.sin(theta)+y1
a_pos.append(((xa,ya),(x2,y2)))
arrow_collection = LineCollection(a_pos,
colors = arrow_colors,
linewidths = [4*ww for ww in lw],
antialiaseds = (1,),
transOffset = ax.transData,
)
arrow_collection.set_zorder(1) # edges go behind nodes
ax.add_collection(arrow_collection)
# update view
minx = numpy.amin(numpy.ravel(edge_pos[:,:,0]))
maxx = numpy.amax(numpy.ravel(edge_pos[:,:,0]))
miny = numpy.amin(numpy.ravel(edge_pos[:,:,1]))
maxy = numpy.amax(numpy.ravel(edge_pos[:,:,1]))
w = maxx-minx
h = maxy-miny
padx, pady = 0.05*w, 0.05*h
corners = (minx-padx, miny-pady), (maxx+padx, maxy+pady)
ax.update_datalim( corners)
ax.autoscale_view()
# if arrow_collection:
return edge_collection
def draw_networkx_labels(G, pos,
labels=None,
font_size=12,
font_color='k',
font_family='sans-serif',
font_weight='normal',
alpha=1.0,
ax=None,
**kwds):
"""Draw node labels on the graph G.
Parameters
----------
G : graph
A networkx graph
pos : dictionary, optional
A dictionary with nodes as keys and positions as values.
If not specified a spring layout positioning will be computed.
See networkx.layout for functions that compute node positions.
font_size : int
Font size for text labels (default=12)
font_color : string
Font color string (default='k' black)
font_family : string
Font family (default='sans-serif')
font_weight : string
Font weight (default='normal')
alpha : float
The text transparency (default=1.0)
ax : Matplotlib Axes object, optional
Draw the graph in the specified Matplotlib axes.
Examples
--------
>>> G=nx.dodecahedral_graph()
>>> labels=nx.draw_networkx_labels(G,pos=nx.spring_layout(G))
Also see the NetworkX drawing examples at
http://networkx.lanl.gov/gallery.html
See Also
--------
draw()
draw_networkx()
draw_networkx_nodes()
draw_networkx_edges()
draw_networkx_edge_labels()
"""
try:
import matplotlib.pylab as pylab
import matplotlib.cbook as cb
except ImportError:
raise ImportError("Matplotlib required for draw()")
except RuntimeError:
print("Matplotlib unable to open display")
raise
if ax is None:
ax=pylab.gca()
if labels is None:
labels=dict( (n,n) for n in G.nodes())
# set optional alignment
horizontalalignment=kwds.get('horizontalalignment','center')
verticalalignment=kwds.get('verticalalignment','center')
text_items={} # there is no text collection so we'll fake one
for n, label in labels.items():
(x,y)=pos[n]
if not cb.is_string_like(label):
label=str(label) # this will cause "1" and 1 to be labeled the same
t=ax.text(x, y,
label,
size=font_size,
color=font_color,
family=font_family,
weight=font_weight,
horizontalalignment=horizontalalignment,
verticalalignment=verticalalignment,
transform = ax.transData,
clip_on=True,
)
text_items[n]=t
return text_items
def draw_networkx_edge_labels(G, pos,
edge_labels=None,
label_pos=0.5,
font_size=10,
font_color='k',
font_family='sans-serif',
font_weight='normal',
alpha=1.0,
bbox=None,
ax=None,
rotate=True,
**kwds):
"""Draw edge labels.
Parameters
----------
G : graph
A networkx graph
pos : dictionary, optional
A dictionary with nodes as keys and positions as values.
If not specified a spring layout positioning will be computed.
See networkx.layout for functions that compute node positions.
ax : Matplotlib Axes object, optional
Draw the graph in the specified Matplotlib axes.
alpha : float
The text transparency (default=1.0)
edge_labels : dictionary
Edge labels in a dictionary keyed by edge two-tuple of text
labels (default=None). Only labels for the keys in the dictionary
are drawn.
label_pos : float
Position of edge label along edge (0=head, 0.5=center, 1=tail)
font_size : int
Font size for text labels (default=12)
font_color : string
Font color string (default='k' black)
font_weight : string
Font weight (default='normal')
font_family : string
Font family (default='sans-serif')
bbox : Matplotlib bbox
Specify text box shape and colors.
clip_on : bool
Turn on clipping at axis boundaries (default=True)
Examples
--------
>>> G=nx.dodecahedral_graph()
>>> edge_labels=nx.draw_networkx_edge_labels(G,pos=nx.spring_layout(G))
Also see the NetworkX drawing examples at
http://networkx.lanl.gov/gallery.html
See Also
--------
draw()
draw_networkx()
draw_networkx_nodes()
draw_networkx_edges()
draw_networkx_labels()
"""
try:
import matplotlib.pylab as pylab
import matplotlib.cbook as cb
import numpy
except ImportError:
raise ImportError("Matplotlib required for draw()")
except RuntimeError:
print("Matplotlib unable to open display")
raise
if ax is None:
ax=pylab.gca()
if edge_labels is None:
labels=dict( ((u,v), d) for u,v,d in G.edges(data=True) )
else:
labels = edge_labels
text_items={}
for (n1,n2), label in labels.items():
(x1,y1)=pos[n1]
(x2,y2)=pos[n2]
(x,y) = (x1 * label_pos + x2 * (1.0 - label_pos),
y1 * label_pos + y2 * (1.0 - label_pos))
if rotate:
angle=numpy.arctan2(y2-y1,x2-x1)/(2.0*numpy.pi)*360 # degrees
# make label orientation "right-side-up"
if angle > 90:
angle-=180
if angle < - 90:
angle+=180
# transform data coordinate angle to screen coordinate angle
xy=numpy.array((x,y))
trans_angle=ax.transData.transform_angles(numpy.array((angle,)),
xy.reshape((1,2)))[0]
else:
trans_angle=0.0
# use default box of white with white border
if bbox is None:
bbox = dict(boxstyle='round',
ec=(1.0, 1.0, 1.0),
fc=(1.0, 1.0, 1.0),
)
if not cb.is_string_like(label):
label=str(label) # this will cause "1" and 1 to be labeled the same
# set optional alignment
horizontalalignment=kwds.get('horizontalalignment','center')
verticalalignment=kwds.get('verticalalignment','center')
t=ax.text(x, y,
label,
size=font_size,
color=font_color,
family=font_family,
weight=font_weight,
horizontalalignment=horizontalalignment,
verticalalignment=verticalalignment,
rotation=trans_angle,
transform = ax.transData,
bbox = bbox,
zorder = 1,
clip_on=True,
)
text_items[(n1,n2)]=t
return text_items
def draw_circular(G, **kwargs):
"""Draw the graph G with a circular layout."""
draw(G,circular_layout(G),**kwargs)
def draw_random(G, **kwargs):
"""Draw the graph G with a random layout."""
draw(G,random_layout(G),**kwargs)
def draw_spectral(G, **kwargs):
"""Draw the graph G with a spectral layout."""
draw(G,spectral_layout(G),**kwargs)
def draw_spring(G, **kwargs):
"""Draw the graph G with a spring layout."""
draw(G,spring_layout(G),**kwargs)
def draw_shell(G, **kwargs):
"""Draw networkx graph with shell layout."""
nlist = kwargs.get('nlist', None)
if nlist != None:
del(kwargs['nlist'])
draw(G,shell_layout(G,nlist=nlist),**kwargs)
def draw_graphviz(G, prog="neato", **kwargs):
"""Draw networkx graph with graphviz layout."""
pos=nx.drawing.graphviz_layout(G,prog)
draw(G,pos,**kwargs)
def draw_nx(G,pos,**kwds):
"""For backward compatibility; use draw or draw_networkx."""
draw(G,pos,**kwds)
# fixture for nose tests
def setup_module(module):
#from nose import SkipTest
try:
import matplotlib as mpl
mpl.use('PS',warn=False)
import pylab
except:
raise nx.NetworkXError("matplotlib not available")
|
[
"andrea.biancini@gmail.com"
] |
andrea.biancini@gmail.com
|
34e841d5c42c6b380b140a639e8f172775f27cef
|
f4c383a560ab431857dbab5289d599696d444d7a
|
/email_automation.py
|
8a7f9af397cf8f1bdaeb99621004ab118eb45ed0
|
[] |
no_license
|
codewithkushagra/webautomation
|
8a76f215be36074129b3ed3b2c964298d7bf6e10
|
3b300071398a764e146c0b4bc2fd10bc0517d1f0
|
refs/heads/master
| 2023-05-02T18:42:01.504806
| 2021-05-26T08:00:56
| 2021-05-26T08:00:56
| 370,952,769
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 312
|
py
|
import smtplib
server=smtplib.SMTP_SSL("smtp.gmail.com",465)
list_emial=["mehuljaiswal2012@gmail.com","kushagraagra008@gmail.com",]
server.login("kushagraagra008@gmail.com","25march2020l")
server.sendmail("kushagraagra008@gmail.com",list_emial,"Hey Naresh, This email is generated using python")
server.quit()
|
[
"kushagraagra008@gmail.com"
] |
kushagraagra008@gmail.com
|
74561a66252dde406de5ceda8055f0be1640a2cd
|
ea94c9baf90064c3b81750298f8d328316c3eea0
|
/app.py
|
627e6e2a04dace40be920fabd31e2a6b7e9bc2c9
|
[] |
no_license
|
SanUni2020/Site
|
21a0548f5d8fd5e8dd719bca95e13b3aa1debbd1
|
11be2b1a587bc942bdfbace1124e05a3c60fb445
|
refs/heads/main
| 2023-09-06T10:10:58.614870
| 2021-11-12T05:18:00
| 2021-11-12T05:18:00
| 427,230,444
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 385
|
py
|
from flask import Flask, render_template
app = Flask(__name__)
@app.route("/")
def index():
return render_template("index.html")
@app.route("/mensagem")
def mensagem():
return render_template("mensagem.html")
@app.route("/obrigado")
def obrigado():
return render_template("obrigado.html")
if __name__ == "__main__":
app.run(debug=True)
|
[
"noreply@github.com"
] |
SanUni2020.noreply@github.com
|
40418aedeb61c774d2392e8c3ea4cd197f762866
|
0bb329b86e86f05ac9057669a172b64cb7b01936
|
/practice/myThirdTwo.py
|
7c4f01230953962ba8500ef795577646816202e6
|
[] |
no_license
|
ptallrights/python
|
6426280f3f49b16c5624a5c8e26e0e9d56982486
|
bf100cefa6a22b5b9c267940b120cda8a1ae8402
|
refs/heads/master
| 2021-01-20T19:42:21.884791
| 2016-07-28T14:00:11
| 2016-07-28T14:00:11
| 61,192,755
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,477
|
py
|
#!/usr/bin/python
'myFirstOne.py -- create text file'
import os
ls = os.linesep
all = []
fname = raw_input("please input a filename:")
#get filename
#while True:
if os.path.exists(fname):
read = raw_input("Are you xiugai the file(r(read) or w(write)):")
if read == "r":
try:
fobj = open(fname,'r')
except IOError,e:
print "*** file open error:",e
else:
#display contents to the screen
for eachLine in fobj:
print eachLine,
fobj.close()
elif read == "w":
print "\nEnter lines ('.' by itself to quit).\n"
while True:
entry = raw_input('>')
if entry == ".":
break
else:
all.append(entry)
else:
print "thank you."
else:
print "\nEnter lines ('.' by itself to quit).\n"
while True:
entry = raw_input('>')
if entry == ".":
break
else:
all.append(entry)
fobj = open(fname,"w")
fobj.writelines(['%s%s' % (x,ls) for x in all])
fobj.close()
print "DONE!"
#print "ERROR: '%s' already exists" % fname
#get file content (txt) lines
#all = []
#print "\nEnter lines ('.' by itself to quit).\n"
#loop until user terminates input
#while True:
# entry = raw_input('>')
# if entry == ".":
# break
# else:
# all.append(entry)
#wirte lines to file with prper line-ending
#attempt to open file for reading
#try:
# fobj = open(fname,'r')
#except IOError,e:
# print "*** file open error:",e
#else:
# #display contents to the screen
# for eachLine in fobj:
# print eachLine,
# fobj.close()
#
|
[
"ptallrights@163.com"
] |
ptallrights@163.com
|
09d70ca1cd460017689f474fdcbd45bf0bcc7163
|
9cca8467e3aff199470b4eb7d83b2f495994e70d
|
/Week 3/using_tuples.py
|
3b05c5ed6d6b3445ec97d0b313fcfda83b6f6500
|
[] |
no_license
|
andrewlee21/PythonFundamentals
|
78d2424647e4179893781443eddecae51212553b
|
c136018d8fe6333b45adcb33fb1007b61bd276b7
|
refs/heads/master
| 2023-08-14T08:14:55.024623
| 2021-09-16T04:04:24
| 2021-09-16T04:04:24
| 397,338,859
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 139
|
py
|
my_tuple = (1, 2, 3, 4, 5)
print(my_tuple[0] + my_tuple[1])
print(my_tuple[-2] + my_tuple[-1])
my_tuple = (5, 4, 3, 2, 1)
print(my_tuple)
|
[
"andrewlee21@sbcglobal.net"
] |
andrewlee21@sbcglobal.net
|
1e17cd4603703f78fef3307911e3585ea18568ef
|
fa5713863cada0177d15e56f5327b79d907a119f
|
/test/plot_jmrs_trkeff.py
|
c1a348b41ca20f15dabf50e782c2d4a5aaeef348
|
[] |
no_license
|
rappoccio/EXOVV
|
1500c126d8053b47fbc425d1c2f9e76f14cb75c5
|
db96edf661398b5bab131bbeba36d331b180d12d
|
refs/heads/master
| 2020-04-03T20:12:57.959191
| 2018-08-24T01:30:03
| 2018-08-24T01:30:03
| 39,910,319
| 4
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,419
|
py
|
#! /usr/bin/env python
##################
# Finding the mistag rate plots
##################
from optparse import OptionParser
parser = OptionParser()
parser.add_option('--postfix', type='string', action='store',
dest='postfix',
default = '',
help='Postfix for plots')
(options, args) = parser.parse_args()
argv = []
import math
import ROOT
import sys
import array
ROOT.gROOT.Macro("rootlogon.C")
canvs = []
rgs = []
effs = [1.00, 0.99, 0.98, 0.97, 0.96, 0.95]
effstrs = [ '100', '099', '098', '097', '096', '095' ]
for effstr in effstrs :
f = ROOT.TFile("jmr_ungroomed_trkeff" + effstr + ".root")
c = f.Get("totresc2_0")
c.Draw()
canvs.append(c)
rg = c.GetPrimitive("rg_0").Clone( 'eff_' + effstr )
rgs.append( rg )
rg0 = rgs[0].Clone("rg0")
gs0 = rg0.GetListOfGraphs()
ptBinA = array.array('d', [ 200., 260., 350., 460., 550., 650., 760., 900, 1000, 1100, 1200, 1300, 13000.])
r = 0.8 / math.sqrt(2.)
xmaxes = [ x * r for x in ptBinA ]
xmins = [ x / 20. for x in ptBinA ]
canvs = []
rgsdiv = []
for irg,rg in enumerate(rgs):
ci = ROOT.TCanvas("c" + rg.GetName(), "c" + rg.GetName() )
gs = rg.GetListOfGraphs()
rgdiv = ROOT.TMultiGraph( rg.GetName() + "_div", "Track Efficiency = " + str(effs[irg]) + rg.GetTitle() + " Uncertainty")
for ig,g in enumerate(gs):
xdiv = array.array('d', [])
ydiv = array.array('d', [])
for i in xrange( g.GetN() ):
x = ROOT.Double(0.0)
y = ROOT.Double(0.0)
y0 = ROOT.Double(0.0)
dy = g.GetErrorY(i)
g.GetPoint(i,x,y)
gs0[ig].GetPoint(i,x,y0)
if y0 > 0.0 and y > 0.0 and dy / y < 0.75 and x > xmins[ig] and x < xmaxes[ig] :
xdiv.append( x )
ydiv.append( (y-y0)/y0)
gdiv = ROOT.TGraph( len(xdiv), xdiv, ydiv )
gdiv.SetName(g.GetName() + "_div")
gdiv.SetLineStyle(g.GetLineStyle())
gdiv.SetLineColor(g.GetLineColor())
rgdiv.Add( gdiv )
rgsdiv.append( rgdiv )
ci.cd()
rgdiv.Draw("AL")
rgdiv.GetHistogram().SetTitleOffset(1.0, "Y")
rgdiv.SetMinimum(0.0)
rgdiv.SetMaximum(0.5)
ci.Update()
canvs.append(ci)
ci.Print("jmr_unc_trkeff" + effstr[irg] + ".png", "png" )
ci.Print("jmr_unc_trkeff" + effstr[irg] + ".pdf", "pdf" )
|
[
"rappoccio@gmail.com"
] |
rappoccio@gmail.com
|
398a3b22c9a8d2f74c15036657c9e1d26e1f740c
|
a207fa961aec11d332b7402dfe1ecbc4b9690b0a
|
/portfolio/managers.py
|
f8549501fdccdac50db71020f0994cc9f2ccfa8c
|
[] |
no_license
|
powellc/django-portfolio
|
20157d1ae931e51de4542fe9e0797d763347242a
|
9cc6ff1f422694748fe82670ddf4cb4f667a3b94
|
refs/heads/master
| 2016-09-05T12:44:52.186559
| 2010-04-24T13:02:25
| 2010-04-24T13:02:25
| 280,024
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 194
|
py
|
from django.db.models import Manager
class PublicManager(Manager):
"""Returns only public projects."""
def public(self):
return self.get_query_set().filter(is_public=True)
|
[
"colin@rubyvroom.com"
] |
colin@rubyvroom.com
|
7594dd6e8da2e44e17b6c1fed236f6cf84c16ad0
|
4746567b5d576fd67063390fed9f834637ee5282
|
/Assignment3/Assignment3.py
|
de660edecc6c2848290d3984f82be0f63d6f3d3f
|
[] |
no_license
|
abaez004/Python-Assignments
|
0d0ff415577a2a91f848cbfe00de79af3b88214b
|
7a13bef2b05e5800681ca13a331ec011a7b3bce5
|
refs/heads/master
| 2020-06-12T20:46:27.579627
| 2019-06-29T15:34:29
| 2019-06-29T15:34:29
| 194,420,772
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,230
|
py
|
#Angel Baez 9:30 AM Lab Assignment #3
#gets input from the user and validates using exception handling
def collatz_input():
try:
n = int(input("Enter a positive integer for n: "))
if n > 0:
return n
else:
print("Your value must be a positive integer")
except ValueError:
print("You must enter a valid positive integer")
#prints collatz conjecture of n
def collatz_conjecture(n):
if n == 1:
print(n)
return
elif n % 2 == 0:
print(n)
collatz_conjecture(n//2)
else:
print(n)
collatz_conjecture(3*n+1)
#used to get and verify input for prime_factor and prime_less_than
def prime_factor_input():
try:
n = int(input("Enter an integer: "))
if n > 1:
return n
else:
print("You must enter an integer greater than 0")
except ValueError:
print("You must enter a valid positive integer")
#returns true if the n1 is prime, false otherwise
def is_prime(n1,n2):
if n2 == 1:
return True
elif n1 % n2 == 0:
return False
else :
return is_prime(n1, n2 - 1)
def prime_factor(n1, n2):
if n2 == 1:
return
elif n1 % n2 == 0 and is_prime(n2, n2//2):
print(n2, " ")
prime_factor(n1, n2-1)
else:
prime_factor(n1, n2-1)
#prints all prime numbers less than or equal to n1
def prime_less_than(n1):
if n1 < 2:
return
elif is_prime(n1,n1//2):
print(n1)
prime_less_than(n1 - 1)
else:
prime_less_than(n1 - 1)
n = collatz_input()
collatz_conjecture(n)
try:
a = int(input("Enter a number:"))
b = int(input("Enter a number:"))
print(a/b)
except ValueError:
print("Please enter a valid value for a and b")
except ZeroDivisionError:
print("Error, divide by zero")
except:
print("Something unexpected happened")
n1 = prime_factor_input()
print("The prime factors of", "n1", "are: ")
prime_factor(n1, n1)
n2 = prime_factor_input()
print("The prime numbers less than or equal to", "n2", "are: ")
prime_less_than(n2)
|
[
"noreply@github.com"
] |
abaez004.noreply@github.com
|
ae321959d0b8067b5c41602c292802fbc60f6630
|
a7493491dcba9f7d8b9df81b962d05ac1a4bddec
|
/RGB2LAB.py
|
c0a7177b5b2fde25883879d2db31feca98a1968a
|
[] |
no_license
|
anupam54/Work4Project
|
5a5238a8d9d560727c44fc5e070451f69c06554a
|
d88fd6fb21602ddbdb7bfe53bb788e8b1811d56f
|
refs/heads/master
| 2022-12-26T01:55:46.171667
| 2020-09-13T15:44:08
| 2020-09-13T15:44:08
| 289,264,731
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 460
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Sun Sep 13 16:14:32 2020
@author: hp
"""
# -*- coding: utf-8 -*-
"""
Created on Fri Aug 14 19:44:07 2020
@author: Anupam kumar
"""
import cv2
import glob
path = "Images/*"
for file in glob.glob(path):
print(file)
a = cv2.imread(file)
print(a)
c = cv2.cvtColor(a, cv2.COLOR_RGB2LAB)
cv2.imshow("Color Image",c)
cv2.waitKey()
cv2.destroyAllWindows()
|
[
"noreply@github.com"
] |
anupam54.noreply@github.com
|
d4973cc9c7c20802539e2bc13e2c21912770668e
|
7b5a2a0c8ba4943002a06445689024fc619d4ab1
|
/start-server.py
|
6bd2e13bc7b858c0c8cb22e86322e67f530b0ffd
|
[] |
no_license
|
behroozomidvar/INTEX-2
|
e0b8c43fe23c0b408286cbebe54bd63fc81dc378
|
a99543af78640cc35bcd18b122c334cd4420537a
|
refs/heads/main
| 2023-08-14T23:02:33.140357
| 2021-09-24T13:23:49
| 2021-09-24T13:23:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 86
|
py
|
import uvicorn
from server.main import *
uvicorn.run(app, host="0.0.0.0", port=8080)
|
[
"behroozomidvar@users.noreply.github.com"
] |
behroozomidvar@users.noreply.github.com
|
81b39ea51e3e4b438201223cb69948e960c76bc3
|
ccf8041c43e7ac94c5e4bb24379da62a8e0ecbb0
|
/rsl_reader.py
|
7c879149e6e3c7fcc9b599954656449da59c1c4e
|
[] |
no_license
|
letrecf/psa_tools
|
8634a789868a527c6f2b09e09010434a7048cb51
|
5da8941e14e1147b45ca7a1dc43b3ec8d1af205b
|
refs/heads/master
| 2021-01-09T20:47:50.893648
| 2016-09-24T10:13:32
| 2016-09-24T10:13:32
| 63,040,993
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,423
|
py
|
import codecs
import logging
import hanging_indent_data
import unstructured_text
class rsl_reader(object):
def __init__(self,filepath,logger):
self.filepath = filepath
self.logger = logger
def read_logic(self):
try:
input = unstructured_text(self.filepath,self.logger).get_section(anchor=["FAULT TREE LOGIC"],skip=3)
columns = [
("id", True, 0,13,True),
("name",True,13,34,True),
("type",True,34,39,True),
("item",False,39,61,False),
("item",False,61,83,False),
("item",False,83,200,False)
]
dat = hanging_indent_data(input,columns,self.logger)
for item in dat.parse():
yield item
except:
self.logger.exception("Error parsing logic in RSL file")
def read_events(self):
equalline = "="*105
try:
input = unstructured_text(self.filepath,self.logger).get_section(anchor=[equalline,"EVENTS"],skip=3,terminator=equalline)
columns = [
("name", 0,23),
("value",23,36),
("model",49,66),
("ccf_model",66,200)
]
for line in input:
o = {}
for c in self.columns:
t = (line[c[1]:c[2]]).strip()
if t:
o[c[0]]=float(t) if c[0]=="value" else t
yield o
except:
self.logger.exception("Error parsing events in RSL file")
|
[
"letrecf.git@gmail.com"
] |
letrecf.git@gmail.com
|
5c1d8d1ebbdd0dcdabd40b7527d769a8a03f1b4b
|
a522e109b81b49729b9f67749315a6889482318f
|
/utils/diayn_gym_env_fixed.py
|
1e3730429a48b8125e3c728735fdcba67e372158
|
[
"Apache-2.0"
] |
permissive
|
ademiadeniji/lords
|
1b166ffdd0ad91e5261d5885fb694d502e722d17
|
75ce115ec7f950d857d0817eb0adf2cc2673ffdd
|
refs/heads/master
| 2023-06-29T15:46:58.396324
| 2021-08-04T19:21:32
| 2021-08-04T19:21:32
| 377,057,139
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,740
|
py
|
"""Gym wrapper for training the DIAYN agent.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import pdb
import gym
class DiaynGymEnvFixed(gym.Env):
"""Gym wrapper for training the DIAYN agent.
Augment the observation by adding a randomly sampled z.
"""
def __init__(self, environment, dim_z, categorical):
"""Initializes the environment.
"""
self._env = environment
if categorical:
self._z_space = gym.spaces.Discrete(n=dim_z)
else:
self._z_space = gym.spaces.Box(low=-1, high=1., shape=[dim_z])
self._z = 0
self._categorical = categorical
@property
def observation_space(self):
return gym.spaces.Dict({
'observation': self._env.observation_space,
'z': self._z_space,
})
@property
def action_space(self):
return self._env.action_space
@property
def z_space(self):
return self._z_space
@property
def reward_range(self):
return self._env.reward_range
@property
def meta_data(self):
return self._env.meta_data
def reset(self):
observation = self._env.reset()
return self._augment_ob(observation)
def set_z(self, z):
self._z = z
return
def step(self, action):
observation, reward, done, info = self._env.step(action)
return self._augment_ob(observation), reward, done, info
def _augment_ob(self, ob, seed=None):
return {
'observation': ob,
'z': self._z,
}
def render(self, mode='rgb_array'):
return self._env.render(mode)
|
[
"ademiadeniji7@gmail.com"
] |
ademiadeniji7@gmail.com
|
4c4049acf8a5bd9784df24cdbcf11053a25a8765
|
e4ae4d2afd49a5402b87657f972274e7e8b46d57
|
/fav.py
|
930ffc3eea40df426a065f1cc8f511485ebe12cf
|
[] |
no_license
|
samyoungnyc/fav_twitter_custom
|
a7e8770858ef67eb70b83eac4bd719a9bca7d32a
|
d088d673433a04d2a44bb8934a79ab1fb460ddc5
|
refs/heads/master
| 2016-09-01T19:42:24.162151
| 2013-08-15T19:43:46
| 2013-08-15T19:43:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,687
|
py
|
import twitter #import the twitter module
api = twitter.Api(consumer_key='', consumer_secret='', access_token_key='', access_token_secret='') # obtain your keys from Twitter API and enter them here
f = open('last_used.conf', 'r') # set var f to open last_used.conf and (r)ead
id = f.readline() # reads a line, adds \n and returns identity of an object, sets it to id
m = open('new_favs.txt', 'w') # sets var m to open new_favs.txt and (w)rite
f.close() # closes f (file)
for page in xrange(1, 10): # a for loop to run through Twitter pages
statuses = api.GetSearch("#wordintweet", count=500) # sets var statuses, result of api.Getsearch and custom words and counts to favorite
count = 0 # sets var count to 0
for s in statuses:
friend = api.GetUser(twitter#) # this is the userID number from Twitter, must enter your own
if friend.followers_count == 0:
follow_ratio = 1
else:
follow_ratio = friend.friends_count / friend.followers_count
if follow_ratio > 0:
#api.CreateFriendship(s.user.screen_name)
status = api.GetStatus(s.id)
if friend.id not in api.GetFriendIDs('customTwittername'):
if status.favorited == False:
print str(follow_ratio) + " Ratio"
print str(status.user.screen_name) + " favorited"
api.CreateFavorite(status) # use api to CreateFavorite
count += 1
m.write(str(status.id) + "\n")
id = s.id
print "Favorited: " + str(count) + " tweets"
f = open('last_used.conf', 'w')
print id
f.write(str(id))
f.close()
m.close()
|
[
"sambyoung@gmail.com"
] |
sambyoung@gmail.com
|
4448d6c8c421b18d7c9450edff543c95675794b8
|
426e56d0d15dfb9609dc31e273baa2cc0d249fdd
|
/certificates/custom_components/mikrotik_router/switch.py
|
d535829ebfb2a705abcd945370be0380f137c179
|
[] |
no_license
|
hellad/hass-config
|
24689dbf61446e2a9cf2d216c933242a4bdec7e8
|
3988d204908478996fffa433faffa9ea20f42562
|
refs/heads/master
| 2023-03-19T14:41:24.300034
| 2023-03-05T20:33:31
| 2023-03-05T20:33:31
| 234,546,187
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 19,639
|
py
|
"""Support for the Mikrotik Router switches."""
import logging
from typing import Any, Optional
from collections.abc import Mapping
from homeassistant.components.switch import SwitchEntity
from homeassistant.const import CONF_NAME, CONF_HOST, ATTR_ATTRIBUTION
from homeassistant.core import callback
from homeassistant.helpers.entity import DeviceInfo
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.restore_state import RestoreEntity
from .helper import format_attribute
from .const import DOMAIN, ATTRIBUTION
from .switch_types import (
MikrotikSwitchEntityDescription,
SWITCH_TYPES,
DEVICE_ATTRIBUTES_IFACE_ETHER,
DEVICE_ATTRIBUTES_IFACE_SFP,
)
_LOGGER = logging.getLogger(__name__)
# ---------------------------
# async_setup_entry
# ---------------------------
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up switches for Mikrotik Router component."""
inst = config_entry.data[CONF_NAME]
mikrotik_controller = hass.data[DOMAIN][config_entry.entry_id]
switches = {}
@callback
def update_controller():
"""Update the values of the controller."""
update_items(inst, mikrotik_controller, async_add_entities, switches)
mikrotik_controller.listeners.append(
async_dispatcher_connect(
hass, mikrotik_controller.signal_update, update_controller
)
)
update_controller()
# ---------------------------
# update_items
# ---------------------------
@callback
def update_items(inst, mikrotik_controller, async_add_entities, switches):
"""Update device switch state from the controller."""
new_switches = []
# Add switches
for switch, sid_func in zip(
# Switch type name
[
"interface",
"nat",
"mangle",
"filter",
"ppp_secret",
"queue",
"kidcontrol_enable",
"kidcontrol_pause",
],
# Entity function
[
MikrotikControllerPortSwitch,
MikrotikControllerNATSwitch,
MikrotikControllerMangleSwitch,
MikrotikControllerFilterSwitch,
MikrotikControllerSwitch,
MikrotikControllerQueueSwitch,
MikrotikControllerSwitch,
MikrotikControllerKidcontrolPauseSwitch,
],
):
uid_switch = SWITCH_TYPES[switch]
for uid in mikrotik_controller.data[SWITCH_TYPES[switch].data_path]:
uid_data = mikrotik_controller.data[SWITCH_TYPES[switch].data_path]
item_id = f"{inst}-{switch}-{uid_data[uid][uid_switch.data_reference]}"
_LOGGER.debug("Updating sensor %s", item_id)
if item_id in switches:
if switches[item_id].enabled:
switches[item_id].async_schedule_update_ha_state()
continue
switches[item_id] = sid_func(
inst=inst,
uid=uid,
mikrotik_controller=mikrotik_controller,
entity_description=uid_switch,
)
new_switches.append(switches[item_id])
if new_switches:
async_add_entities(new_switches)
# ---------------------------
# MikrotikControllerSwitch
# ---------------------------
class MikrotikControllerSwitch(SwitchEntity, RestoreEntity):
"""Representation of a switch."""
def __init__(
self,
inst,
uid,
mikrotik_controller,
entity_description: MikrotikSwitchEntityDescription,
):
self.entity_description = entity_description
self._inst = inst
self._ctrl = mikrotik_controller
self._attr_extra_state_attributes = {ATTR_ATTRIBUTION: ATTRIBUTION}
self._data = mikrotik_controller.data[self.entity_description.data_path][uid]
@property
def available(self) -> bool:
"""Return if controller is available."""
return self._ctrl.connected()
@property
def name(self) -> str:
"""Return the name."""
if self.entity_description.data_name_comment and self._data["comment"]:
return (
f"{self._inst} {self.entity_description.name} {self._data['comment']}"
)
return f"{self._inst} {self.entity_description.name} {self._data[self.entity_description.data_name]}"
@property
def unique_id(self) -> str:
"""Return a unique id for this entity."""
return f"{self._inst.lower()}-{self.entity_description.key}-{self._data[self.entity_description.data_reference].lower()}"
@property
def is_on(self) -> bool:
"""Return true if device is on."""
return self._data[self.entity_description.data_is_on]
@property
def icon(self) -> str:
"""Return the icon."""
if self._data[self.entity_description.data_is_on]:
return self.entity_description.icon_enabled
else:
return self.entity_description.icon_disabled
@property
def extra_state_attributes(self) -> Mapping[str, Any]:
"""Return the state attributes."""
attributes = super().extra_state_attributes
for variable in self.entity_description.data_attributes_list:
if variable in self._data:
attributes[format_attribute(variable)] = self._data[variable]
return attributes
def turn_on(self, **kwargs: Any) -> None:
"""Required abstract method."""
pass
def turn_off(self, **kwargs: Any) -> None:
"""Required abstract method."""
pass
async def async_turn_on(self) -> None:
"""Turn on the switch."""
path = self.entity_description.data_switch_path
param = self.entity_description.data_reference
value = self._data[self.entity_description.data_reference]
mod_param = self.entity_description.data_switch_parameter
self._ctrl.set_value(path, param, value, mod_param, False)
await self._ctrl.force_update()
async def async_turn_off(self) -> None:
"""Turn off the switch."""
path = self.entity_description.data_switch_path
param = self.entity_description.data_reference
value = self._data[self.entity_description.data_reference]
mod_param = self.entity_description.data_switch_parameter
self._ctrl.set_value(path, param, value, mod_param, True)
await self._ctrl.async_update()
@property
def device_info(self) -> DeviceInfo:
"""Return a description for device registry."""
dev_connection = DOMAIN
dev_connection_value = self.entity_description.data_reference
dev_group = self.entity_description.ha_group
if self.entity_description.ha_group.startswith("data__"):
dev_group = self.entity_description.ha_group[6:]
if dev_group in self._data:
dev_group = self._data[dev_group]
dev_connection_value = dev_group
if self.entity_description.ha_connection:
dev_connection = self.entity_description.ha_connection
if self.entity_description.ha_connection_value:
dev_connection_value = self.entity_description.ha_connection_value
if dev_connection_value.startswith("data__"):
dev_connection_value = dev_connection_value[6:]
dev_connection_value = self._data[dev_connection_value]
info = DeviceInfo(
connections={(dev_connection, f"{dev_connection_value}")},
identifiers={(dev_connection, f"{dev_connection_value}")},
default_name=f"{self._inst} {dev_group}",
model=f"{self._ctrl.data['resource']['board-name']}",
manufacturer=f"{self._ctrl.data['resource']['platform']}",
sw_version=f"{self._ctrl.data['resource']['version']}",
configuration_url=f"http://{self._ctrl.config_entry.data[CONF_HOST]}",
via_device=(DOMAIN, f"{self._ctrl.data['routerboard']['serial-number']}"),
)
return info
async def async_added_to_hass(self):
"""Run when entity about to be added to hass."""
_LOGGER.debug("New switch %s (%s)", self._inst, self.unique_id)
# ---------------------------
# MikrotikControllerPortSwitch
# ---------------------------
class MikrotikControllerPortSwitch(MikrotikControllerSwitch):
"""Representation of a network port switch."""
@property
def extra_state_attributes(self) -> Mapping[str, Any]:
"""Return the state attributes."""
attributes = super().extra_state_attributes
if self._data["type"] == "ether":
for variable in DEVICE_ATTRIBUTES_IFACE_ETHER:
if variable in self._data:
attributes[format_attribute(variable)] = self._data[variable]
if "sfp-shutdown-temperature" in self._data:
for variable in DEVICE_ATTRIBUTES_IFACE_SFP:
if variable in self._data:
attributes[format_attribute(variable)] = self._data[variable]
return attributes
@property
def icon(self) -> str:
"""Return the icon."""
if self._data["running"]:
icon = self.entity_description.icon_enabled
else:
icon = self.entity_description.icon_disabled
if not self._data["enabled"]:
icon = "mdi:lan-disconnect"
return icon
async def async_turn_on(self) -> Optional[str]:
"""Turn on the switch."""
path = self.entity_description.data_switch_path
param = self.entity_description.data_reference
if self._data["about"] == "managed by CAPsMAN":
_LOGGER.error("Unable to enable %s, managed by CAPsMAN", self._data[param])
return "managed by CAPsMAN"
if "-" in self._data["port-mac-address"]:
param = "name"
value = self._data[self.entity_description.data_reference]
mod_param = self.entity_description.data_switch_parameter
self._ctrl.set_value(path, param, value, mod_param, False)
if "poe-out" in self._data and self._data["poe-out"] == "off":
path = "/interface/ethernet"
self._ctrl.set_value(path, param, value, "poe-out", "auto-on")
await self._ctrl.force_update()
async def async_turn_off(self) -> Optional[str]:
"""Turn off the switch."""
path = self.entity_description.data_switch_path
param = self.entity_description.data_reference
if self._data["about"] == "managed by CAPsMAN":
_LOGGER.error("Unable to disable %s, managed by CAPsMAN", self._data[param])
return "managed by CAPsMAN"
if "-" in self._data["port-mac-address"]:
param = "name"
value = self._data[self.entity_description.data_reference]
mod_param = self.entity_description.data_switch_parameter
self._ctrl.set_value(path, param, value, mod_param, True)
if "poe-out" in self._data and self._data["poe-out"] == "auto-on":
path = "/interface/ethernet"
self._ctrl.set_value(path, param, value, "poe-out", "off")
await self._ctrl.async_update()
# ---------------------------
# MikrotikControllerNATSwitch
# ---------------------------
class MikrotikControllerNATSwitch(MikrotikControllerSwitch):
"""Representation of a NAT switch."""
@property
def name(self) -> str:
"""Return the name."""
if self._data["comment"]:
return f"{self._inst} NAT {self._data['comment']}"
return f"{self._inst} NAT {self._data['name']}"
async def async_turn_on(self) -> None:
"""Turn on the switch."""
path = self.entity_description.data_switch_path
param = ".id"
value = None
for uid in self._ctrl.data["nat"]:
if self._ctrl.data["nat"][uid]["uniq-id"] == (
f"{self._data['chain']},{self._data['action']},{self._data['protocol']},"
f"{self._data['in-interface']}:{self._data['dst-port']}-"
f"{self._data['out-interface']}:{self._data['to-addresses']}:{self._data['to-ports']}"
):
value = self._ctrl.data["nat"][uid][".id"]
mod_param = self.entity_description.data_switch_parameter
self._ctrl.set_value(path, param, value, mod_param, False)
await self._ctrl.force_update()
async def async_turn_off(self) -> None:
"""Turn off the switch."""
path = self.entity_description.data_switch_path
param = ".id"
value = None
for uid in self._ctrl.data["nat"]:
if self._ctrl.data["nat"][uid]["uniq-id"] == (
f"{self._data['chain']},{self._data['action']},{self._data['protocol']},"
f"{self._data['in-interface']}:{self._data['dst-port']}-"
f"{self._data['out-interface']}:{self._data['to-addresses']}:{self._data['to-ports']}"
):
value = self._ctrl.data["nat"][uid][".id"]
mod_param = self.entity_description.data_switch_parameter
self._ctrl.set_value(path, param, value, mod_param, True)
await self._ctrl.async_update()
# ---------------------------
# MikrotikControllerMangleSwitch
# ---------------------------
class MikrotikControllerMangleSwitch(MikrotikControllerSwitch):
"""Representation of a Mangle switch."""
async def async_turn_on(self) -> None:
"""Turn on the switch."""
path = self.entity_description.data_switch_path
param = ".id"
value = None
for uid in self._ctrl.data["mangle"]:
if self._ctrl.data["mangle"][uid]["uniq-id"] == (
f"{self._data['chain']},{self._data['action']},{self._data['protocol']},"
f"{self._data['src-address']}:{self._data['src-port']}-"
f"{self._data['dst-address']}:{self._data['dst-port']},"
f"{self._data['src-address-list']}-{self._data['dst-address-list']}"
):
value = self._ctrl.data["mangle"][uid][".id"]
mod_param = self.entity_description.data_switch_parameter
self._ctrl.set_value(path, param, value, mod_param, False)
await self._ctrl.force_update()
async def async_turn_off(self) -> None:
"""Turn off the switch."""
path = self.entity_description.data_switch_path
param = ".id"
value = None
for uid in self._ctrl.data["mangle"]:
if self._ctrl.data["mangle"][uid]["uniq-id"] == (
f"{self._data['chain']},{self._data['action']},{self._data['protocol']},"
f"{self._data['src-address']}:{self._data['src-port']}-"
f"{self._data['dst-address']}:{self._data['dst-port']},"
f"{self._data['src-address-list']}-{self._data['dst-address-list']}"
):
value = self._ctrl.data["mangle"][uid][".id"]
mod_param = self.entity_description.data_switch_parameter
self._ctrl.set_value(path, param, value, mod_param, True)
await self._ctrl.async_update()
# ---------------------------
# MikrotikControllerFilterSwitch
# ---------------------------
class MikrotikControllerFilterSwitch(MikrotikControllerSwitch):
"""Representation of a Filter switch."""
async def async_turn_on(self) -> None:
"""Turn on the switch."""
path = self.entity_description.data_switch_path
param = ".id"
value = None
for uid in self._ctrl.data["filter"]:
if self._ctrl.data["filter"][uid]["uniq-id"] == (
f"{self._data['chain']},{self._data['action']},{self._data['protocol']},{self._data['layer7-protocol']},"
f"{self._data['in-interface']},{self._data['in-interface-list']}:{self._data['src-address']},{self._data['src-address-list']}:{self._data['src-port']}-"
f"{self._data['out-interface']},{self._data['out-interface-list']}:{self._data['dst-address']},{self._data['dst-address-list']}:{self._data['dst-port']}"
):
value = self._ctrl.data["filter"][uid][".id"]
mod_param = self.entity_description.data_switch_parameter
self._ctrl.set_value(path, param, value, mod_param, False)
await self._ctrl.force_update()
async def async_turn_off(self) -> None:
"""Turn off the switch."""
path = self.entity_description.data_switch_path
param = ".id"
value = None
for uid in self._ctrl.data["filter"]:
if self._ctrl.data["filter"][uid]["uniq-id"] == (
f"{self._data['chain']},{self._data['action']},{self._data['protocol']},{self._data['layer7-protocol']},"
f"{self._data['in-interface']},{self._data['in-interface-list']}:{self._data['src-address']},{self._data['src-address-list']}:{self._data['src-port']}-"
f"{self._data['out-interface']},{self._data['out-interface-list']}:{self._data['dst-address']},{self._data['dst-address-list']}:{self._data['dst-port']}"
):
value = self._ctrl.data["filter"][uid][".id"]
mod_param = self.entity_description.data_switch_parameter
self._ctrl.set_value(path, param, value, mod_param, True)
await self._ctrl.async_update()
# ---------------------------
# MikrotikControllerQueueSwitch
# ---------------------------
class MikrotikControllerQueueSwitch(MikrotikControllerSwitch):
"""Representation of a queue switch."""
async def async_turn_on(self) -> None:
"""Turn on the switch."""
path = self.entity_description.data_switch_path
param = ".id"
value = None
for uid in self._ctrl.data["queue"]:
if self._ctrl.data["queue"][uid]["name"] == f"{self._data['name']}":
value = self._ctrl.data["queue"][uid][".id"]
mod_param = self.entity_description.data_switch_parameter
self._ctrl.set_value(path, param, value, mod_param, False)
await self._ctrl.force_update()
async def async_turn_off(self) -> None:
"""Turn off the switch."""
path = self.entity_description.data_switch_path
param = ".id"
value = None
for uid in self._ctrl.data["queue"]:
if self._ctrl.data["queue"][uid]["name"] == f"{self._data['name']}":
value = self._ctrl.data["queue"][uid][".id"]
mod_param = self.entity_description.data_switch_parameter
self._ctrl.set_value(path, param, value, mod_param, True)
await self._ctrl.async_update()
# ---------------------------
# MikrotikControllerKidcontrolPauseSwitch
# ---------------------------
class MikrotikControllerKidcontrolPauseSwitch(MikrotikControllerSwitch):
"""Representation of a queue switch."""
async def async_turn_on(self) -> None:
"""Turn on the switch."""
path = self.entity_description.data_switch_path
param = self.entity_description.data_reference
value = self._data[self.entity_description.data_reference]
command = "resume"
self._ctrl.execute(path, command, param, value)
await self._ctrl.force_update()
async def async_turn_off(self) -> None:
"""Turn off the switch."""
path = self.entity_description.data_switch_path
param = self.entity_description.data_reference
value = self._data[self.entity_description.data_reference]
command = "pause"
self._ctrl.execute(path, command, param, value)
await self._ctrl.async_update()
|
[
"hellad@mail.ru"
] |
hellad@mail.ru
|
b9976fcc8a187ef1316be16450346f233bf79a7e
|
738a072f2cd6969c30a89fc9c72a8af3995df076
|
/allRunn.py
|
812eb7a8304239ba54330c7c267948fa32a7967e
|
[] |
no_license
|
yagniksuchak2/CodeParser
|
69465da3f954582fc1e1a8adf3bfb57e3cc4034e
|
2a23e6bde0da628a5b88518a67ee3ca934966849
|
refs/heads/master
| 2020-04-05T23:16:24.985001
| 2015-08-19T21:11:36
| 2015-08-19T21:11:36
| 40,984,321
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,546
|
py
|
#get the path of directory in which project directories are there. Assume dirsPath
#rootdir ='C:\Users\Yagnik\PycharmProjects\Top_Project'
import os
import sys
import ghProc
from logChunk import getExceptionKeyword
#print os.listdir(rootdir)
# for subdir, dirs, files in os.walk(rootdir):
# print dirs
def main():
print "Utility to BULK process github logs"
if len(sys.argv) < 2:
print "!!! Usage: python allRun.py top_project directory"
sys.exit()
if not os.path.isdir("../Results"):
os.mkdir("../Results")
fPtrChangeSummary=open("../Results/"+"ChangeSummary.csv",'w')
fPtrChangeSummary.write("project,sha,author,commit_date,is_bug\n")
fPtrPatchSummary=open("../Results/"+"PatchSummary.csv",'w')
fPtrMisMatchSummary=open("../Results/"+"MisMatchSummary.csv",'w')
fPtrMisMatchSummary.write("project,Total,Match,MisMatch,Exception,matchException,misMatchException\n")
lst=[]
listToDict={}
getExceptionKeyword(lst)
for keyword in lst:
listToDict[str(keyword)+" Adds"]=0
listToDict[str(keyword)+" Dels"]=0
fPtrPatchSummary.write("project, sha, language, file_name, is_test,bracket_diff,isExceptionPatch, method_name,total_add,total_del,uniqueExcepAdd,uniqueExcepDel,%s\n"%",".join(listToDict.keys()))
fPtrChangeSummary.close()
fPtrPatchSummary.close()
fPtrMisMatchSummary.close()
rootdir = sys.argv[1]
for dir in os.listdir(rootdir):
path= os.path.join(rootdir,dir)
print path
os.system('python ghProc.py %s'%path)
if __name__ == '__main__':
main()
|
[
"yvsuchak@ucdavis.edu"
] |
yvsuchak@ucdavis.edu
|
07ac073b25aff0899c09b1e6f460eb15191384c5
|
7c4c82ccbcfff21ea5dd40644ccd3ca674ec2be8
|
/bin/protoc-gen-lua
|
982b7e3ec091867143443327f95c4e20b3392ed7
|
[] |
no_license
|
konser/go-pack
|
22d517e15e9e1d288a3c395cd14b8dc15f178ffe
|
a40a5c4809a7c3e861b2dc4910b13cf41a2281f2
|
refs/heads/master
| 2023-03-12T06:23:53.175212
| 2021-03-01T02:53:20
| 2021-03-01T02:53:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 13,935
|
#!/usr/bin/env python
# -*- encoding:utf8 -*-
# protoc-gen-erl
# Google's Protocol Buffers project, ported to lua.
# https://code.google.com/p/protoc-gen-lua/
#
# Copyright (c) 2010 , 林卓毅 (Zhuoyi Lin) netsnail@gmail.com
# All rights reserved.
#
# Use, modification and distribution are subject to the "New BSD License"
# as listed at <url: http://www.opensource.org/licenses/bsd-license.php >.
import sys
import os.path as path
from cStringIO import StringIO
import plugin_pb2
import google.protobuf.descriptor_pb2 as descriptor_pb2
_packages = {}
_files = {}
_message = {}
FDP = plugin_pb2.descriptor_pb2.FieldDescriptorProto
if sys.platform == "win32":
import msvcrt, os
msvcrt.setmode(sys.stdin.fileno(), os.O_BINARY)
msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
class CppType:
CPPTYPE_INT32 = 1
CPPTYPE_INT64 = 2
CPPTYPE_UINT32 = 3
CPPTYPE_UINT64 = 4
CPPTYPE_DOUBLE = 5
CPPTYPE_FLOAT = 6
CPPTYPE_BOOL = 7
CPPTYPE_ENUM = 8
CPPTYPE_STRING = 9
CPPTYPE_MESSAGE = 10
CPP_TYPE ={
FDP.TYPE_DOUBLE : CppType.CPPTYPE_DOUBLE,
FDP.TYPE_FLOAT : CppType.CPPTYPE_FLOAT,
FDP.TYPE_INT64 : CppType.CPPTYPE_INT64,
FDP.TYPE_UINT64 : CppType.CPPTYPE_UINT64,
FDP.TYPE_INT32 : CppType.CPPTYPE_INT32,
FDP.TYPE_FIXED64 : CppType.CPPTYPE_UINT64,
FDP.TYPE_FIXED32 : CppType.CPPTYPE_UINT32,
FDP.TYPE_BOOL : CppType.CPPTYPE_BOOL,
FDP.TYPE_STRING : CppType.CPPTYPE_STRING,
FDP.TYPE_MESSAGE : CppType.CPPTYPE_MESSAGE,
FDP.TYPE_BYTES : CppType.CPPTYPE_STRING,
FDP.TYPE_UINT32 : CppType.CPPTYPE_UINT32,
FDP.TYPE_ENUM : CppType.CPPTYPE_ENUM,
FDP.TYPE_SFIXED32 : CppType.CPPTYPE_INT32,
FDP.TYPE_SFIXED64 : CppType.CPPTYPE_INT64,
FDP.TYPE_SINT32 : CppType.CPPTYPE_INT32,
FDP.TYPE_SINT64 : CppType.CPPTYPE_INT64
}
def printerr(*args):
sys.stderr.write(" ".join(args))
sys.stderr.write("\n")
sys.stderr.flush()
class TreeNode(object):
def __init__(self, name, parent=None, filename=None, package=None):
super(TreeNode, self).__init__()
self.child = []
self.parent = parent
self.filename = filename
self.package = package
if parent:
self.parent.add_child(self)
self.name = name
def add_child(self, child):
self.child.append(child)
def find_child(self, child_names):
if child_names:
for i in self.child:
if i.name == child_names[0]:
return i.find_child(child_names[1:])
raise StandardError
else:
return self
def get_child(self, child_name):
for i in self.child:
if i.name == child_name:
return i
return None
def get_path(self, end = None):
pos = self
out = []
while pos and pos != end:
out.append(pos.name)
pos = pos.parent
out.reverse()
return '.'.join(out)
def get_global_name(self):
return self.get_path()
def get_local_name(self):
pos = self
while pos.parent:
pos = pos.parent
if self.package and pos.name == self.package[-1]:
break
return self.get_path(pos)
def __str__(self):
return self.to_string(0)
def __repr__(self):
return str(self)
def to_string(self, indent = 0):
return ' '*indent + '<TreeNode ' + self.name + '(\n' + \
','.join([i.to_string(indent + 4) for i in self.child]) + \
' '*indent +')>\n'
class Env(object):
filename = None
package = None
extend = None
descriptor = None
message = None
context = None
register = None
def __init__(self):
self.message_tree = TreeNode('')
self.scope = self.message_tree
def get_global_name(self):
return self.scope.get_global_name()
def get_local_name(self):
return self.scope.get_local_name()
def get_ref_name(self, type_name):
try:
node = self.lookup_name(type_name)
except:
# if the child doesn't be founded, it must be in this file
return type_name[len('.'.join(self.package)) + 1:]
if node.filename != self.filename:
return node.filename + '_pb.' + node.get_local_name()
return node.get_local_name()
def lookup_name(self, name):
names = name.split('.')
if names[0] == '':
return self.message_tree.find_child(names[1:])
else:
return self.scope.parent.find_child(names)
def enter_package(self, package):
if not package:
return self.message_tree
names = package.split('.')
pos = self.message_tree
for i, name in enumerate(names):
new_pos = pos.get_child(name)
if new_pos:
pos = new_pos
else:
return self._build_nodes(pos, names[i:])
return pos
def enter_file(self, filename, package):
self.filename = filename
self.package = package.split('.')
self._init_field()
self.scope = self.enter_package(package)
def exit_file(self):
self._init_field()
self.filename = None
self.package = []
self.scope = self.scope.parent
def enter(self, message_name):
self.scope = TreeNode(message_name, self.scope, self.filename,
self.package)
def exit(self):
self.scope = self.scope.parent
def _init_field(self):
self.descriptor = []
self.context = []
self.message = []
self.register = []
def _build_nodes(self, node, names):
parent = node
for i in names:
parent = TreeNode(i, parent, self.filename, self.package)
return parent
class Writer(object):
def __init__(self, prefix=None):
self.io = StringIO()
self.__indent = ''
self.__prefix = prefix
def getvalue(self):
return self.io.getvalue()
def __enter__(self):
self.__indent += ' '
return self
def __exit__(self, type, value, trackback):
self.__indent = self.__indent[:-4]
def __call__(self, data):
self.io.write(self.__indent)
if self.__prefix:
self.io.write(self.__prefix)
self.io.write(data)
DEFAULT_VALUE = {
FDP.TYPE_DOUBLE : '0.0',
FDP.TYPE_FLOAT : '0.0',
FDP.TYPE_INT64 : '0',
FDP.TYPE_UINT64 : '0',
FDP.TYPE_INT32 : '0',
FDP.TYPE_FIXED64 : '0',
FDP.TYPE_FIXED32 : '0',
FDP.TYPE_BOOL : 'false',
FDP.TYPE_STRING : '""',
FDP.TYPE_MESSAGE : 'nil',
FDP.TYPE_BYTES : '""',
FDP.TYPE_UINT32 : '0',
FDP.TYPE_ENUM : '1',
FDP.TYPE_SFIXED32 : '0',
FDP.TYPE_SFIXED64 : '0',
FDP.TYPE_SINT32 : '0',
FDP.TYPE_SINT64 : '0',
}
def code_gen_enum_item(index, enum_value, env):
full_name = env.get_local_name() + '.' + enum_value.name
obj_name = full_name.upper().replace('.', '_') + '_ENUM'
env.descriptor.append(
"%s = protobuf.EnumValueDescriptor();\n"% obj_name
)
context = Writer(obj_name)
context('.name = "%s"\n' % enum_value.name)
context('.index = %d\n' % index)
context('.number = %d\n' % enum_value.number)
env.context.append(context.getvalue())
return obj_name
def code_gen_enum(enum_desc, env):
env.enter(enum_desc.name)
full_name = env.get_local_name()
obj_name = full_name.upper().replace('.', '_')
env.descriptor.append(
"%s = protobuf.EnumDescriptor();\n"% obj_name
)
context = Writer(obj_name)
context('.name = "%s"\n' % enum_desc.name)
context('.full_name = "%s"\n' % env.get_global_name())
values = []
for i, enum_value in enumerate(enum_desc.value):
values.append(code_gen_enum_item(i, enum_value, env))
context('.values = {%s}\n' % ','.join(values))
env.context.append(context.getvalue())
env.exit()
return obj_name
def code_gen_field(index, field_desc, env):
full_name = env.get_local_name() + '.' + field_desc.name
obj_name = full_name.upper().replace('.', '_') + '_FIELD'
env.descriptor.append(
"%s = protobuf.FieldDescriptor();\n"% obj_name
)
context = Writer(obj_name)
context('.name = "%s"\n' % field_desc.name)
context('.full_name = "%s"\n' % (
env.get_global_name() + '.' + field_desc.name))
context('.number = %d\n' % field_desc.number)
context('.index = %d\n' % index)
context('.label = %d\n' % field_desc.label)
if field_desc.HasField("default_value"):
context('.has_default_value = true\n')
value = field_desc.default_value
if field_desc.type == FDP.TYPE_STRING:
context('.default_value = "%s"\n'%value)
else:
context('.default_value = %s\n'%value)
else:
context('.has_default_value = false\n')
if field_desc.label == FDP.LABEL_REPEATED:
default_value = "{}"
elif field_desc.HasField('type_name'):
default_value = "nil"
else:
default_value = DEFAULT_VALUE[field_desc.type]
context('.default_value = %s\n' % default_value)
if field_desc.HasField('type_name'):
type_name = env.get_ref_name(field_desc.type_name).upper().replace('.', '_')
if field_desc.type == FDP.TYPE_MESSAGE:
context('.message_type = %s\n' % type_name)
else:
context('.enum_type = %s\n' % type_name)
if field_desc.HasField('extendee'):
type_name = env.get_ref_name(field_desc.extendee)
env.register.append(
"%s.RegisterExtension(%s)\n" % (type_name, obj_name)
)
context('.type = %d\n' % field_desc.type)
context('.cpp_type = %d\n\n' % CPP_TYPE[field_desc.type])
env.context.append(context.getvalue())
return obj_name
def code_gen_message(message_descriptor, env, containing_type = None):
env.enter(message_descriptor.name)
full_name = env.get_local_name()
obj_name = full_name.upper().replace('.', '_')
env.descriptor.append(
"%s = protobuf.Descriptor();\n"% obj_name
)
context = Writer(obj_name)
context('.name = "%s"\n' % message_descriptor.name)
context('.full_name = "%s"\n' % env.get_global_name())
nested_types = []
for msg_desc in message_descriptor.nested_type:
msg_name = code_gen_message(msg_desc, env, obj_name)
nested_types.append(msg_name)
context('.nested_types = {%s}\n' % ', '.join(nested_types))
enums = []
for enum_desc in message_descriptor.enum_type:
enums.append(code_gen_enum(enum_desc, env))
context('.enum_types = {%s}\n' % ', '.join(enums))
fields = []
for i, field_desc in enumerate(message_descriptor.field):
fields.append(code_gen_field(i, field_desc, env))
context('.fields = {%s}\n' % ', '.join(fields))
if len(message_descriptor.extension_range) > 0:
context('.is_extendable = true\n')
else:
context('.is_extendable = false\n')
extensions = []
for i, field_desc in enumerate(message_descriptor.extension):
extensions.append(code_gen_field(i, field_desc, env))
context('.extensions = {%s}\n' % ', '.join(extensions))
if containing_type:
context('.containing_type = %s\n' % containing_type)
env.message.append('%s = protobuf.Message(%s)\n' % (full_name,
obj_name))
env.context.append(context.getvalue())
env.exit()
return obj_name
def write_header(writer):
writer("""-- Generated By protoc-gen-lua Do not Edit
""")
def code_gen_file(proto_file, env, is_gen):
filename = path.splitext(proto_file.name)[0]
env.enter_file(filename, proto_file.package)
includes = []
for f in proto_file.dependency:
inc_file = path.splitext(f)[0]
includes.append(inc_file)
# for field_desc in proto_file.extension:
# code_gen_extensions(field_desc, field_desc.name, env)
for enum_desc in proto_file.enum_type:
code_gen_enum(enum_desc, env)
for enum_value in enum_desc.value:
env.message.append('%s = %d\n' % (enum_value.name,
enum_value.number))
for msg_desc in proto_file.message_type:
code_gen_message(msg_desc, env)
if is_gen:
lua = Writer()
write_header(lua)
lua('local protobuf = import ".protobuf"\n')
for i in includes:
lua('local %s_pb = import(".%s_pb")\n' % (i, i))
lua("module('%s_pb')\n" % env.filename)
lua('\n\n')
map(lua, env.descriptor)
lua('\n')
map(lua, env.context)
lua('\n')
env.message.sort()
map(lua, env.message)
lua('\n')
map(lua, env.register)
_files[env.filename+ '_pb.lua'] = lua.getvalue()
env.exit_file()
def main():
plugin_require_bin = sys.stdin.read()
code_gen_req = plugin_pb2.CodeGeneratorRequest()
code_gen_req.ParseFromString(plugin_require_bin)
env = Env()
for proto_file in code_gen_req.proto_file:
code_gen_file(proto_file, env,
proto_file.name in code_gen_req.file_to_generate)
code_generated = plugin_pb2.CodeGeneratorResponse()
for k in _files:
file_desc = code_generated.file.add()
file_desc.name = k
file_desc.content = _files[k]
sys.stdout.write(code_generated.SerializeToString())
if __name__ == "__main__":
main()
|
[
"zhaizhao@qq.com"
] |
zhaizhao@qq.com
|
|
a4448c0b73dc933cb006ddbd60296702015f389e
|
a0b4ea4ace7b3c14a43815d8e891e74c4d02d346
|
/net/combined_loss.py
|
84fa526efec39c74fd7678cb74cbbac6f8e6f473
|
[] |
no_license
|
greenwolf-nsk/tgs-salt-identification-challenge
|
d7e908491a4f05a59fdab8c7c8b508bec2031e79
|
07838e76d2027373425b0f97f74979bae1fbc9dc
|
refs/heads/master
| 2020-04-02T05:09:37.637029
| 2019-04-10T13:12:30
| 2019-04-10T13:12:30
| 154,055,314
| 7
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,332
|
py
|
import torch
import torch.nn as nn
from .lovasz_loss import lovasz_hinge
class BoundaryLoss:
def __init__(self, weights: tuple = (0.1, 1)):
self.boundary_loss_fn = nn.BCEWithLogitsLoss()
self.segmentation_loss_fn = nn.BCEWithLogitsLoss()
self.weights = weights
def __call__(self, outputs: torch.Tensor, targets: torch.Tensor):
segmentation_loss = self.boundary_loss_fn(outputs[:, :1, :, :], targets[:, :1, :, :])
boundary_loss = self.boundary_loss_fn(outputs[:, 1:, :, :], targets[:, 1:, :, :])
return (
self.weights[0] * segmentation_loss +
self.weights[1] * boundary_loss
)
class BoundaryLoss2:
def __init__(self, weights: tuple = (0.1, 1)):
self.boundary_loss_fn = nn.BCEWithLogitsLoss()
self.segmentation_loss_fn = lovasz_hinge
self.weights = weights
def __call__(self, outputs: torch.Tensor, targets: torch.Tensor):
segmentation_loss = self.boundary_loss_fn(outputs[:, :1, :, :], targets[:, :1, :, :])
boundary_loss = self.boundary_loss_fn(outputs[:, 1:, :, :], targets[:, 1:, :, :])
return (
self.weights[0] * segmentation_loss +
self.weights[1] * boundary_loss
)
class CombinedLoss:
def __init__(self, segmentation_loss_fn, weights: tuple = (0.05, 0.1, 1.0)):
self.classification_loss_fn = nn.BCEWithLogitsLoss()
self.non_empty_segmentation_loss_fn = segmentation_loss_fn
self.segmentation_loss_fn = segmentation_loss_fn
self.weights = weights
def __call__(self, outputs: tuple, targets: torch.Tensor):
fused_outputs, clf_outputs, seg_outputs = outputs
classes = targets.sum(dim=(1, 2, 3)) > 0
classification_loss = self.classification_loss_fn(clf_outputs, classes.float().unsqueeze(1))
non_empty_segmentation_loss = self.segmentation_loss_fn(seg_outputs[classes], targets[classes])
segmentation_loss = self.segmentation_loss_fn(fused_outputs, targets)
return (
self.weights[0] * classification_loss +
self.weights[1] * non_empty_segmentation_loss +
self.weights[2] * segmentation_loss
)
class CombinedLossHypercolumn:
def __init__(self, segmentation_loss_fn, weights: tuple = (0.05, 0.1, 1.0)):
self.classification_loss_fn = nn.BCEWithLogitsLoss()
self.non_empty_segmentation_loss_fn = segmentation_loss_fn
self.segmentation_loss_fn = segmentation_loss_fn
self.weights = weights
def __call__(self, outputs: tuple, targets: torch.Tensor):
fused_outputs, clf_outputs, seg_outputs = outputs
classes = targets.sum(dim=(1, 2, 3)) > 0
classification_loss = self.classification_loss_fn(clf_outputs, classes.float().unsqueeze(1))
non_empty_segmentation_loss = 0
for seg_output in seg_outputs:
non_empty_segmentation_loss += self.segmentation_loss_fn(seg_output[classes], targets[classes])
non_empty_segmentation_loss /= len(seg_outputs)
segmentation_loss = self.segmentation_loss_fn(fused_outputs, targets)
return (
self.weights[0] * classification_loss +
self.weights[1] * non_empty_segmentation_loss +
self.weights[2] * segmentation_loss
)
|
[
"greenwolf.nsk@gmail.com"
] |
greenwolf.nsk@gmail.com
|
504bb84fc68bf1dfd94876a59dc581ff3a921147
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2846/60586/295434.py
|
e2875119305df6adbc78001b5fc61b6eda843866
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 399
|
py
|
def test12():
n=int(input())
s=input()
x=s.split(" ")
arr=[]
for i in x:
arr.append(int(i))
zero=arr.count(0)
if s=="1 1 1 1 1":
return 1
if s=="0 0 0 0 0 0 0":
return 0
if zero==len(set(arr)):
return 0
if(len(set(arr))==22):
return(21)
if len(set(arr))==3:
return 2
return len(set(arr))
print(test12())
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
a4b1c54b4bb3f7c5e696da947123729e9367eee1
|
29c3595a4e1f8de9382650610aee5a13e2a135f6
|
/venv/Lib/site-packages/django/views/decorators/cache.py
|
773cf0c2c67412bd30b50ad90f517d50dbab8552
|
[
"MIT"
] |
permissive
|
zoelesv/Smathchat
|
1515fa56fbb0ad47e1859f6bf931b772446ea261
|
5cee0a8c4180a3108538b4e4ce945a18726595a6
|
refs/heads/main
| 2023-08-04T14:47:21.185149
| 2023-08-02T15:53:20
| 2023-08-02T15:53:20
| 364,627,392
| 9
| 1
|
MIT
| 2023-08-02T15:53:21
| 2021-05-05T15:42:47
|
Python
|
UTF-8
|
Python
| false
| false
| 1,705
|
py
|
from functools import wraps
from django.middleware.cache import CacheMiddleware
from django.utils.cache import add_never_cache_headers, patch_cache_control
from django.utils.decorators import decorator_from_middleware_with_args
def cache_page(timeout, *, cache=None, key_prefix=None):
"""
Decorator for views that tries getting the page from the cache and
populates the cache if the page isn't in the cache yet.
The cache is keyed by the URL and some data from the headers.
Additionally there is the key prefix that is used to distinguish different
cache areas in a multi-site setup. You could use the
get_current_site().domain, for example, as that is unique across a Django
project.
Additionally, all headers from the response's Vary header will be taken
into account on caching -- just like the middleware does.
"""
return decorator_from_middleware_with_args(CacheMiddleware)(
page_timeout=timeout, cache_alias=cache, key_prefix=key_prefix,
)
def cache_control(**kwargs):
def _cache_controller(viewfunc):
@wraps(viewfunc)
def _cache_controlled(request, *args, **kw):
response = viewfunc(request, *args, **kw)
patch_cache_control(response, **kwargs)
return response
return _cache_controlled
return _cache_controller
def never_cache(view_func):
"""
Decorator that adds headers to a response so that it will never be cached.
"""
@wraps(view_func)
def _wrapped_view_func(request, *args, **kwargs):
response = view_func(request, *args, **kwargs)
add_never_cache_headers(response)
return response
return _wrapped_view_func
|
[
"ZoomLee@users.noreply.github.com"
] |
ZoomLee@users.noreply.github.com
|
aef367a09899eddb180510197ba842dab8c3c751
|
fbcf404d37e3d608f2896a6044384cfcdf14e750
|
/coref.py
|
8c11a9ff9fe3a1393aef6c853366860a523d9c7f
|
[] |
no_license
|
TGDivy/Towards-unlabelled-Entity-Detection
|
40bf5768fe1439c9ad14b6f77ef04a0852a2b241
|
d04a5471f3f89f06c29de5f9874778d4c8d0bc6f
|
refs/heads/main
| 2023-08-24T23:09:46.177036
| 2021-10-05T17:32:50
| 2021-10-05T17:32:50
| 413,900,302
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,101
|
py
|
import json
import random
import requests
import seaborn as sns
from spacy import displacy
class Display:
def __init__(self) -> None:
pass
def run(self, dic, text):
self.categories = list(dic.keys())
self.label_color = self.color_label_dict()
text, spans = self.get_spans(text, dic)
return self.render_entities(text, spans)
def random_hex(self):
rgb = (random.randint(0, 255), random.randint(0, 255), random.randint(0, 255))
return "#%02x%02x%02x" % rgb
def color_label_dict(self):
dic = {}
colors = sns.color_palette("Spectral", len(self.categories)).as_hex()
for i in self.categories:
dic[i] = colors[int(i)]
return dic
def get_spans(self, text, dic):
sentence_lengths = []
total = 0
final_text = " ".join(text)
for i in text:
sentence_lengths.append(total)
total += len(i) + 1
print(sentence_lengths)
spans = []
for cluster in dic.keys():
for entity in dic[cluster]:
s_offset = entity["sentence_offset"]
start = entity["span"][0] + sentence_lengths[s_offset]
end = entity["span"][1] + sentence_lengths[s_offset]
ent = {"start": start, "end": end, "label": cluster}
spans.append(ent)
spans.sort(key=lambda x: x["start"])
print(dic)
print("=" * 50)
print(spans)
return final_text, spans
def render_entities(self, text, spans):
ent = {
"text": text,
"ents": spans,
"title": None,
}
options = {"ents": list(self.label_color.keys()), "colors": self.label_color}
markdown = displacy.render(ent, manual=True, style="ent", options=options)
return markdown
if __name__ == "__main__":
text = ["This is Boris Johnson.", "He is very funny."]
corefs = json.loads(requests.post("http://localhost:9000/predict", json=text).text)
d = Display()
print(d.run(corefs, text))
|
[
"divy.bramhecha@adarga.ai"
] |
divy.bramhecha@adarga.ai
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.