blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
281
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 6
116
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 313
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 18.2k
668M
⌀ | star_events_count
int64 0
102k
| fork_events_count
int64 0
38.2k
| gha_license_id
stringclasses 17
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 107
values | src_encoding
stringclasses 20
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.02M
| extension
stringclasses 78
values | content
stringlengths 2
6.02M
| authors
listlengths 1
1
| author
stringlengths 0
175
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4884f2033320248dfcdd6d2ebf5eecd26249c309
|
7a9995d177e63c20134705f1c123ac918b422c57
|
/actions.py
|
833284342cd8ce2396181b591a4d8d1315723aaf
|
[] |
no_license
|
FirojNeosoft/VoiceIntegrationWithAlexa
|
6e053dc3cd8c82cf27a45361246ac97fcfc6e99b
|
440dda2c975b5f139ccabcfc2a5a854cacf1c572
|
refs/heads/master
| 2020-04-19T14:28:13.162142
| 2019-01-29T23:24:03
| 2019-01-29T23:24:03
| 168,245,532
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,244
|
py
|
"""
Actions For Skills Of Alexa
"""
import datetime, psycopg2
class Action():
"""
Action class
"""
def __init__(self):
# Database Connection
self.conn = psycopg2.connect(database="AlexaDB", user="postgres",\
password="postgres", host="127.0.0.1", port="5432")
self.cur = self.conn.cursor()
self.balance = 0
def greetings(self):
"""
Greeting action
"""
hour= datetime.datetime.now().hour
reply = ''
if hour>=0 and hour<12:
reply = "Hi, Good morning."
elif hour>=12 and hour<16:
reply = "Hi, Good Afternoon."
elif hour>=16 and hour<24:
reply = "Hi, Good Evening."
else:
reply = "Hi, nice to meet you."
return reply
def get_customer_balance(self, customer_id):
"""
Action to retrieve balance of a customer
"""
try:
self.cur.execute("select balance from customer where customer_id='"+str(customer_id)+"'")
result = self.cur.fetchone()
self.balance = str(result[0])
return self.balance
except Exception as e:
print("Failed due to ", e)
|
[
"firoj.nalband@neosofttech.com"
] |
firoj.nalband@neosofttech.com
|
ec5441321e76af1f9586545d430293f3a4014a7d
|
e31bd208438af3d71e19a3be07dd9d4e7e10831b
|
/secretaria/migrations/0003_profissao.py
|
2f1a8984091cbb3d7b41d9cfec9a6673b41c83b9
|
[] |
no_license
|
ismaely/SOFIL_RH
|
f9ef0cff349d54e2f61b0740c62eb37ab5bfde11
|
65e636a0d89ea415897ef08e8e7c9bd7425c5394
|
refs/heads/master
| 2022-12-09T14:13:27.215869
| 2020-04-16T18:49:07
| 2020-04-16T18:49:07
| 193,426,643
| 2
| 1
| null | 2022-12-07T23:54:14
| 2019-06-24T03:20:16
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 1,132
|
py
|
# Generated by Django 2.2.1 on 2019-06-26 03:21
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('secretaria', '0002_estudante'),
]
operations = [
migrations.CreateModel(
name='Profissao',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('estudante', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, parent_link=True, to='secretaria.Estudante')),
('instituicao', models.CharField(blank=True, default='--', max_length=190, null=True)),
('funcao', models.CharField(blank=True, default='--', max_length=190, null=True)),
('area_profissional', models.CharField(blank=True, default='--', max_length=120, null=True)),
('ano_experiencia', models.CharField(blank=True, default='--', max_length=9, null=True)),
('localizacao', models.CharField(blank=True, max_length=50, null=True)),
],
),
]
|
[
"7ilipe@gmail.com"
] |
7ilipe@gmail.com
|
4f27b2f80d0c42c3f4b9d365e081ee521395bbfc
|
0cb1fedea7d2ab7c13c525cc17e83f4e8bc59173
|
/mutiencrypter/rsa.py
|
c27ec42c2632d1c36f8cbb6217d909b10aeeb8f3
|
[] |
no_license
|
cyfaaa/Security-Tool-in-Python
|
be04dbcbff015d8e63a34cd88d35ee6c5e849c69
|
bbcb940390027181cb892490cf9abcb06f741dac
|
refs/heads/master
| 2020-07-04T02:05:37.181011
| 2019-08-13T10:23:01
| 2019-08-13T10:23:01
| 202,117,175
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,835
|
py
|
import random
def gcd(a, b):
if (b == 0):
return a
else:
return gcd(b, a % b)
def xgcd(a, b):
x, old_x = 0, 1
y, old_y = 1, 0
while (b != 0):
quotient = a // b
a, b = b, a - quotient * b
old_x, x = x, old_x - quotient * x
old_y, y = y, old_y - quotient * y
return a, old_x, old_y
def chooseE(totient):
while (True):
e = random.randrange(2, totient)
if (gcd(e, totient) == 1):
return e
def chooseKeys():
rand1 = random.randint(100, 300)
rand2 = random.randint(100, 300)
fo = open('primes-to-100k.txt', 'r')
lines = fo.read().splitlines()
fo.close()
prime1 = int(lines[rand1])
prime2 = int(lines[rand2])
n = prime1 * prime2
totient = (prime1 - 1) * (prime2 - 1)
e = chooseE(totient)
gcd, x, y = xgcd(e, totient)
if (x < 0):
d = x + totient
else:
d = x
f_public = open('public_keys.txt', 'w')
f_public.write(str(n) + '\n')
f_public.write(str(e) + '\n')
f_public.close()
f_private = open('private_keys.txt', 'w')
f_private.write(str(n) + '\n')
f_private.write(str(d) + '\n')
f_private.close()
def encrypt(message, file_name = 'public_keys.txt', block_size = 2):
try:
fo = open(file_name, 'r')
except FileNotFoundError:
print('That file is not found.')
else:
n = int(fo.readline())
e = int(fo.readline())
fo.close()
encrypted_blocks = []
ciphertext = -1
if (len(message) > 0):
ciphertext = ord(message[0])
for i in range(1, len(message)):
if (i % block_size == 0):
encrypted_blocks.append(ciphertext)
ciphertext = 0
ciphertext = ciphertext * 1000 + ord(message[i])
encrypted_blocks.append(ciphertext)
for i in range(len(encrypted_blocks)):
encrypted_blocks[i] = str((encrypted_blocks[i]**e) % n)
encrypted_message = " ".join(encrypted_blocks)
return encrypted_message
def decrypt(blocks, block_size = 2):
fo = open('private_keys.txt', 'r')
n = int(fo.readline())
d = int(fo.readline())
fo.close()
# turns the string into a list of ints
list_blocks = blocks.split(' ')
int_blocks = []
for s in list_blocks:
int_blocks.append(int(s))
message = ""
# converts each int in the list to block_size number of characters
# by default, each int represents two characters
for i in range(len(int_blocks)):
int_blocks[i] = (int_blocks[i]**d) % n
tmp = ""
for c in range(block_size):
tmp = chr(int_blocks[i] % 1000) + tmp
int_blocks[i] //= 1000
message += tmp
return message
def main():
# we select our primes and generate our public and private keys,
# usually done once
choose_again = input('Do you want to generate new public and private keys? (y or n) ')
if (choose_again == 'y'):
chooseKeys()
instruction = input('Would you like to encrypt or decrypt? (Enter e or d): ')
if (instruction == 'e'):
message = input('What would you like to encrypt?\n')
option = input('Do you want to encrypt using your own public key? (y or n) ')
if (option == 'y'):
print('Encrypting...')
print(encrypt(message))
else:
file_option = input('Enter the file name that stores the public key: ')
print('Encrypting...')
print(encrypt(message, file_option))
elif (instruction == 'd'):
message = input('What would you like to decrypt?\n')
print('Decryption...')
print(decrypt(message))
else:
print('That is not a proper instruction.')
|
[
"cyf@cyf-macbookair.local"
] |
cyf@cyf-macbookair.local
|
2fffbb85134f34d952aaae32c116c8a6990bef42
|
8809f5cb66207e225415bc87f371988adae797e8
|
/lxx/tmp.create-lemma.py
|
995c48d5a8051ab5522bdd82e60fcc6844aef82a
|
[] |
no_license
|
ReneNyffenegger/Bible-Text-Sources
|
29de458d74817ad1134edcb0ff4eebbc22bb5163
|
8d1009cf380e68b926e6d96ffe0ddba6e873156c
|
refs/heads/master
| 2023-07-09T09:16:12.592415
| 2023-06-27T10:25:38
| 2023-06-27T10:25:38
| 44,729,428
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 698
|
py
|
#!/usr/bin/python
import json
import re
lxx_f = open('../github.eliranwong/LXX-Rahlfs-1935/template/ossp_wordlist_lxx_only.csv')
lemma_f = open('lemma', 'w')
strongs_f = open('strongs', 'w')
lxx_f.read(2) # skip BOM
lxx_l = lxx_f.readline()
while lxx_l:
lxx_l = re.sub(',$', '', lxx_l)
no, txt, json_txt= lxx_l.split('\t')
noL = 'L' + no.zfill(5) + ':'
json_obj=json.loads(json_txt)
if 'strong' in json_obj:
strongsG = 'G' + json_obj['strong'].zfill(4)
else:
strongsG = ''
lemma_f .write(noL + json_obj['lemma'] + '\n')
strongs_f.write(noL + strongsG + '\n')
lxx_l = lxx_f.readline()
lxx_f .close()
lemma_f .close()
strongs_f.close()
|
[
"rene.nyffenegger@adp-gmbh.c"
] |
rene.nyffenegger@adp-gmbh.c
|
ff755e8a281abc1e5afea579bbce884e577e92fb
|
873862a28fd31d2fe1807baaf18a0788b112dd57
|
/adsrefpipe/refparsers/ADSocr.py
|
88433da5e18a78761f5df84d5dfcab01a063e7d6
|
[
"MIT"
] |
permissive
|
golnazads/ADSReferencePipeline
|
c92b90e3712dc08a5b2e44497bc96c8e07ec4eff
|
d41ed17b3b2fd7f5ae2deb48243f530cf7f494ee
|
refs/heads/master
| 2023-08-09T15:18:04.114158
| 2023-07-21T14:16:17
| 2023-07-21T14:16:17
| 242,004,478
| 1
| 0
|
MIT
| 2020-02-20T22:32:59
| 2020-02-20T22:32:59
| null |
UTF-8
|
Python
| false
| false
| 7,382
|
py
|
import sys, os
import argparse
import re
from adsrefpipe.refparsers.toREFs import OCRtoREFs
from adsrefpipe.refparsers.reference import unicode_handler
from adsputils import setup_logging, load_config
logger = setup_logging('refparsers')
config = {}
config.update(load_config())
class ADSocrToREFs(OCRtoREFs):
def __init__(self, filename, buffer, parsername=None):
"""
:param filename:
:param buffer:
"""
if not parsername:
parsername = ADSocrToREFs
OCRtoREFs.__init__(self, filename, buffer, parsername)
def process_and_dispatch(self):
"""
this function does reference cleaning and then calls the parser
:return:
"""
references = []
for raw_block_references in self.raw_references:
bibcode = raw_block_references['bibcode']
block_references = raw_block_references['block_references']
item_nums = raw_block_references.get('item_nums', [])
parsed_references = []
for i, reference in enumerate(block_references):
reference = unicode_handler.ent2asc(reference)
logger.debug("ADSocr: parsing %s" % reference)
parsed_references.append(self.merge({'refstr': reference, 'refraw': reference}, self.any_item_num(item_nums, i)))
references.append({'bibcode': bibcode, 'references': parsed_references})
logger.debug("%s: parsed %d references" % (bibcode, len(references)))
return references
class ObsOCRtoREFs(ADSocrToREFs):
punctuations = r'!\"#\$%&\'\(\)\*\+,-\./:;<=>\?@\[\]\^_`{\|}~\\'
enumeration = r'^\s*[%s]*[\dOoIiSsta]{1,3}[a-z]{0,1}[%s\s]+' % (punctuations, punctuations)
enumeration_lookahead = r'(?=.*[A-Z]{1}[\.\s]+)(?=.*[12]\d\d\d[a-z]*)?'
re_reference_start = re.compile(r'(%s)%s' % (enumeration, enumeration_lookahead))
re_remove_enumeration = re.compile(r'%s%s' % (enumeration, enumeration_lookahead))
def __init__(self, filename, buffer):
"""
:param filename:
:param buffer:
"""
ADSocrToREFs.__init__(self , filename, buffer, parsername=ObsOCRtoREFs)
def get_references(self, filename, encoding="ISO-8859-1"):
"""
read reference file for this text format
:param filename:
:return:
"""
try:
references = []
with open(filename, 'r', encoding=encoding, errors='ignore') as f:
reader = f.readlines()
bibcode = None
match = self.re_bibcode.match(os.path.basename(filename))
if match:
bibcode = match.group(1)
block_references = []
prev_reference = ''
reference = ''
for i, line in enumerate(reader):
if not line.strip():
continue
if self.re_reference_start.search(line):
# add previous reference if any, since current line is the start of reference
if reference:
block_references, reference, prev_reference = self.verify_accept(block_references, reference, prev_reference)
# remove the enumeration first
line = list(filter(None, self.re_remove_enumeration.split(line)))[0]
# now start the new multi-line reference
reference = line.replace('\n', ' ').replace('\r', ' ').strip()
else:
# now continue with the multi-line reference
reference += (' ' + line.replace('\n', ' ').replace('\r', ' ').strip()).strip()
# add the last multi-line reference here
if reference:
block_references, _, _ = self.verify_accept(block_references, reference, prev_reference)
if bibcode and block_references:
references.append([bibcode, block_references])
else:
logger.error("Error in getting the bibcode from the reference file name %s. Skipping!" % (filename))
if len(references) > 0:
logger.debug("Read source file %s, and got %d references to resolve for bibcode %s." % (filename, len(references), bibcode))
elif len(references) == 0:
logger.error('No references found in reference file %s.' % (filename))
return references
except Exception as e:
logger.error('Exception: %s' % (str(e)))
return []
def toREFs(filename, buffer): # pragma: no cover
"""
this is a local function, called from main, for testing purposes.
:param filename:
:param buffer:
:return:
"""
reference_type = filename.split('/')[-3]
if reference_type == 'Obs':
results = ObsOCRtoREFs(filename=filename, buffer=buffer).process_and_dispatch()
else:
results = ADSocrToREFs(filename=filename, buffer=buffer).process_and_dispatch()
for result in results:
print(result['bibcode'])
for i, reference in enumerate(result['references']):
print(i + 1, reference['refstr'])
if __name__ == '__main__': # pragma: no cover
parser = argparse.ArgumentParser(description='Parse latex references')
parser.add_argument('-f', '--filename', help='the path to source file')
parser.add_argument('-b', '--buffer', help='latex reference(s)')
args = parser.parse_args()
if args.filename:
toREFs(args.filename, buffer=None)
elif args.buffer:
toREFs(filename=None, buffer=args.buffer)
# if no reference source is provided, just run the test files
elif not args.filename and not args.buffer:
# testing the ocr references
ocr_testing = [
(ADSocrToREFs, '/../tests/unittests/stubdata/ocr/ADS/0/0000ADSTEST.0.....Z.ref.ocr.txt'),
(ADSocrToREFs, '/../tests/unittests/stubdata/ocr/ADS/0/0001ADSTEST.0.....Z.ref.ocr.txt'),
(ADSocrToREFs, '/../tests/unittests/stubdata/ocr/ADS/0/0002ADSTEST.0.....Z.ref.ocr.txt'),
(ADSocrToREFs, '/../tests/unittests/stubdata/ocr/ADS/0/0003ADSTEST.0.....Z.ref.ocr.txt'),
(ADSocrToREFs, '/../tests/unittests/stubdata/ocr/ADS/0/0004ADSTEST.0.....Z.ref.ocr.txt'),
(ObsOCRtoREFs, '/../tests/unittests/stubdata/ocr/Obs/0/0000ObsTEST.0.....Z.ref.ocr.txt'),
]
for (parser, filename) in ocr_testing:
filename = os.path.abspath(os.path.dirname(__file__) + filename)
compare = ''
for i,one in enumerate(parser(filename=filename, buffer=None).process_and_dispatch()):
compare += '---<%s>---\n'%one['bibcode']
for ref in one['references']:
compare += '%s\n'%ref['refstr'].strip()
with open(os.path.abspath(filename + '.result'), 'r', encoding='utf-8', errors='ignore') as f:
from_file = f.read()
if from_file == compare.strip():
print('Test `%s` passed!'%filename)
else:
print('Test `%s` failed!'%filename)
sys.exit(0)
|
[
"28757512+golnazads@users.noreply.github.com"
] |
28757512+golnazads@users.noreply.github.com
|
f20d508b5a52f7b16b2bafc55271c61fa174d3e6
|
2694de88d78edf0bea981f8d23ec61d4d30d5882
|
/texture.py
|
ba5ec822e7b2b8b59dfc12e8a98558387d049c6c
|
[] |
no_license
|
tabufellin/shader-hand-made
|
9da819a3d96f61d08ae61e6552370bb262ebec5d
|
8f1a70c9c0bebcec6bb672ea79ced8aeadff26ca
|
refs/heads/main
| 2023-01-22T21:38:46.015962
| 2020-11-28T02:22:33
| 2020-11-28T02:22:33
| 311,198,221
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,285
|
py
|
import struct
import os
def color(r, g, b):
return bytes([b, g, r])
class Texture(object):
def __init__(self, path):
self.path = path
self.read()
def read(self):
image = open(self.path, "rb")
image.seek(2 + 4 + 4) # skip BM, skip bmp size, skip zeros
header_size = struct.unpack("=l", image.read(4))[0] # read header size
image.seek(2 + 4 + 4 + 4 + 4)
self.width = struct.unpack("=l", image.read(4))[0] # read width
self.height = struct.unpack("=l", image.read(4))[0] # read width
self.pixels = []
image.seek(header_size)
for y in range(self.height):
self.pixels.append([])
for x in range(self.width):
b = ord(image.read(1))
g = ord(image.read(1))
r = ord(image.read(1))
self.pixels[y].append(color(r,g,b))
image.close()
def get_color(self, tx, ty, intensity=1):
x = int(tx * self.width)
y = int(ty * self.height)
# return self.pixels[y][x]
try:
return bytes(map(lambda b: round(b*intensity) if b*intensity > 0 else 0, self.pixels[y][x]))
except:
pass # what causes this
|
[
"noreply@github.com"
] |
noreply@github.com
|
0ae067c5eba7b1ffc6636c0e620c1c15be4f60e6
|
985d4aa1be92f7ef85148a6af258384f846fbc6c
|
/dataInput.py
|
2abaafce4c7a030f36b9d66074efbf8979f10994
|
[] |
no_license
|
seororo358/Tensorflow_CNN_exp
|
a9bca4e3b4c7141ababeae3ab669df65d2d8f157
|
c9d1d69d25105f965bac98af68ff4709aa270e4a
|
refs/heads/master
| 2023-02-03T12:43:42.278378
| 2020-12-23T02:02:25
| 2020-12-23T02:02:25
| 323,776,645
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,785
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Wed May 30 10:38:00 2018
@author: minsooyeo119112
"""
from matplotlib import pyplot as plt
import numpy as np
class dataInput:
def __init__(self):
self.trainData, self.testData = self.dataLoad()
self.labelName = ['airplane',
'automobile',
'bird',
'cat',
'deer',
'dog',
'frog',
'horse',
'ship',
'truck']
def dataLoad(self):
data = []
for i in range(5):
dataFrame = self.unpickle('./data_set/cifar_10/data_batch_' + str(i+1))
data.append(dataFrame)
testData = self.unpickle('./data_set/cifar_10/test_batch' )
return data, testData
def unpickle(self,file):
import pickle
with open(file, 'rb') as fo:
dict = pickle.load(fo, encoding='bytes')
return dict
def dataVisuallization(self, label):
tmpDataIdx = [idx for idx, val in enumerate(self.testData[b'labels']) if val ==label]
tmpData = self.testData[b'data'][tmpDataIdx[np.random.choice(len(tmpDataIdx))],:].reshape([3,32,32])
plt.imshow(tmpData.transpose(1,2,0))
return tmpData
def dataVisuallizationSubplot(self):
for i in range(10):
for q in range(10):
plt.subplot(10,10,((i)*10) + (q+1) )
self.dataVisuallization(q)
if(i==0):
plt.title(self.labelName[q])
if __name__ == '__main__':
dataOb = dataInput()
print('main')
|
[
"ssr3588@nate.com"
] |
ssr3588@nate.com
|
f987c95714b3b19c0d798755b64d27ae114cc266
|
6268a19db5d7806b3a91d6350ec2777b3e13cee6
|
/old_stuff/code/hpe-feb2019/qi2016/huawei/preprocess/load_data.py
|
76844a4293e4bbe7d395115563869a694f4be590
|
[] |
no_license
|
aaronlws95/phd_2019
|
3ae48b4936f039f369be3a40404292182768cf3f
|
22ab0f5029b7d67d32421d06caaf3e8097a57772
|
refs/heads/master
| 2023-03-22T14:38:18.275184
| 2021-03-21T11:39:29
| 2021-03-21T11:39:29
| 186,387,381
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 22,463
|
py
|
from __future__ import print_function
from sklearn.utils import shuffle
from PIL import Image
import numpy
import matplotlib.pyplot as plt
from ..utils import xyz_uvd
from skimage.transform import resize
def get_filenames_labels(dataset_dir):
xyz_jnt_gt=[]
file_name = []
our_index = [0,1,6,7,8,2,9,10,11,3,12,13,14,4,15,16,17,5,18,19,20]
with open('%s/Training_Annotation.txt'%(dataset_dir), mode='r',encoding='utf-8',newline='') as f:
for line in f:
part = line.split('\t')
file_name.append(part[0])
xyz_jnt_gt.append(part[1:64])
f.close()
xyz_jnt_gt=numpy.array(xyz_jnt_gt,dtype='float64')
xyz_jnt_gt.shape=(xyz_jnt_gt.shape[0],21,3)
xyz_jnt_gt=xyz_jnt_gt[:,our_index,:]
uvd_jnt_gt =xyz_uvd.xyz2uvd(xyz=xyz_jnt_gt,setname='mega')
return uvd_jnt_gt,xyz_jnt_gt,numpy.array(file_name)
def generate_fullimg_mask_from_file_unet(path,img_file_name,uvd,batch_size):
centerU=315.944855
phyMargin=50.0
padWidth=200
img_rows=480
img_cols=640
num_imgs=len(img_file_name)
idx = numpy.arange(len(img_file_name))
num = (batch_size - num_imgs%batch_size)%batch_size
idx = numpy.concatenate([idx,idx[0:num]],axis=0)
n_batches = int(idx.shape[0]/batch_size)
x0 = numpy.zeros((batch_size,img_rows,img_cols,1),dtype='float32')
y = numpy.zeros((batch_size,img_rows,img_cols,1),dtype='uint8')
while 1:
idx= shuffle(idx,random_state=0)
for minibatch_index in range(n_batches):
# print('minibatch_index',minibatch_index)
slice_idx = range(minibatch_index * batch_size, (minibatch_index + 1) * batch_size,1)
for mi, cur_idx in enumerate(list(idx[slice_idx])):
cur_file_name = img_file_name[cur_idx]
cur_uvd = uvd[cur_idx]
bb = numpy.array([(phyMargin,phyMargin,cur_uvd[9,2])])
bbox_uvd = xyz_uvd.xyz2uvd(setname='mega',xyz=bb)
margin = int(numpy.ceil(bbox_uvd[0,0] - centerU))
roiDepth = Image.open('%s/images/%s' %(path,cur_file_name))
depth = numpy.asarray(roiDepth, dtype='uint16')
x0[mi,:,:,0]=depth/2000.0
axis_bounds = numpy.array([numpy.min(cur_uvd[:, 0]), numpy.max(cur_uvd[:, 0]),
numpy.min(cur_uvd[:, 1]), numpy.max(cur_uvd[:, 1]),
numpy.min(cur_uvd[:, 2]), numpy.max(cur_uvd[:, 2])],dtype='int32')
tmpDepth = numpy.zeros((depth.shape[0]+padWidth*2,depth.shape[1]+padWidth*2))
tmpDepth[padWidth:padWidth+depth.shape[0],padWidth:padWidth+depth.shape[1]]=depth
crop = tmpDepth[axis_bounds[2]-margin+padWidth:axis_bounds[3]+margin+padWidth,
axis_bounds[0]-margin+padWidth:axis_bounds[1]+margin+padWidth]
loc = numpy.where(numpy.logical_and(crop>axis_bounds[4]-phyMargin,crop<axis_bounds[5]+phyMargin))
cropmask=numpy.zeros_like(crop)
cropmask[loc]=1
orimask = numpy.zeros_like(tmpDepth,dtype='uint8')
orimask[axis_bounds[2]-margin+padWidth:axis_bounds[3]+margin+padWidth,
axis_bounds[0]-margin+padWidth:axis_bounds[1]+margin+padWidth] =cropmask
y[mi,:,:,0] = orimask[padWidth:padWidth+depth.shape[0],padWidth:padWidth+depth.shape[1]]
yield (x0,y)
def generate_train(path,img_file_name,uvd,batch_size):
num_imgs=uvd.shape[0]
idx = numpy.arange(num_imgs)
print('train.num',num_imgs)
n_batches=int(idx.shape[0]/batch_size)
phyMargin=50.0
padWidth=200
centerU=315.944855
setname='mega'
while True:
idx= shuffle(idx,random_state=0)
for minibatch_index in range(n_batches):
# print('minibatch_index',minibatch_index)
slice_idx = range(minibatch_index * batch_size, (minibatch_index + 1) * batch_size,1)
all_input = numpy.zeros((batch_size,128,160,1),dtype='float32')
all_mask=numpy.zeros((batch_size,128,160,1),dtype='uint8')
for mi, cur_idx in enumerate(list(idx[slice_idx])):
cur_file_name = img_file_name[cur_idx]
cur_uvd = uvd[cur_idx]
bb = numpy.array([(phyMargin,phyMargin,cur_uvd[9,2])])
bbox_uvd = xyz_uvd.xyz2uvd(setname=setname,xyz=bb)
margin = int(numpy.ceil(bbox_uvd[0,0] - centerU))
roiDepth = Image.open('%s/images/%s' %(path,cur_file_name))
depth = numpy.asarray(roiDepth, dtype='uint16')
axis_bounds = numpy.array([numpy.min(cur_uvd[:, 0]), numpy.max(cur_uvd[:, 0]),
numpy.min(cur_uvd[:, 1]), numpy.max(cur_uvd[:, 1]),
numpy.min(cur_uvd[:, 2]), numpy.max(cur_uvd[:, 2])],dtype='int32')
tmpDepth = numpy.zeros((depth.shape[0]+padWidth*2,depth.shape[1]+padWidth*2))
tmpDepth[padWidth:padWidth+depth.shape[0],padWidth:padWidth+depth.shape[1]]=depth
crop = tmpDepth[axis_bounds[2]-margin+padWidth:axis_bounds[3]+margin+padWidth,
axis_bounds[0]-margin+padWidth:axis_bounds[1]+margin+padWidth]
loc = numpy.where(numpy.logical_and(crop>axis_bounds[4]-phyMargin,crop<axis_bounds[5]+phyMargin))
cropmask=numpy.zeros_like(crop)
cropmask[loc]=1
orimask = numpy.zeros_like(tmpDepth,dtype='uint8')
orimask[axis_bounds[2]-margin+padWidth:axis_bounds[3]+margin+padWidth,
axis_bounds[0]-margin+padWidth:axis_bounds[1]+margin+padWidth] =cropmask
orimask = orimask[padWidth:padWidth+depth.shape[0],padWidth:padWidth+depth.shape[1]]
orimask = resize(orimask,(120,160), order=3,preserve_range=True)
orimask[numpy.where(orimask>0)]=1
all_mask[mi,4:124,:,0]=orimask
all_input[mi,4:124,:,0]=resize(depth,(120,160), order=3,preserve_range=True)/2000.0
yield (all_input,all_mask)
def generate_downsample_img_mask_from_file_unet_aug(path,img_file_name,uvd,batch_size):
num_imgs=uvd.shape[0]
idx = numpy.arange(num_imgs)
print('train.num',num_imgs)
n_batches=int(idx.shape[0]/batch_size)
phyMargin=50.0
padWidth=200
centerU=315.944855
setname='mega'
while True:
idx= shuffle(idx,random_state=0)
for minibatch_index in range(n_batches):
# print('minibatch_index',minibatch_index)
slice_idx = range(minibatch_index * batch_size, (minibatch_index + 1) * batch_size,1)
all_input = numpy.zeros((batch_size*2,128,160,1),dtype='float32')
all_mask=numpy.zeros((batch_size*2,128,160,1),dtype='uint8')
for mi, cur_idx in enumerate(list(idx[slice_idx])):
cur_file_name = img_file_name[cur_idx]
cur_uvd = uvd[cur_idx]
bb = numpy.array([(phyMargin,phyMargin,cur_uvd[9,2])])
bbox_uvd = xyz_uvd.xyz2uvd(setname=setname,xyz=bb)
margin = int(numpy.ceil(bbox_uvd[0,0] - centerU))
roiDepth = Image.open('%s/images/%s' %(path,cur_file_name))
depth = numpy.asarray(roiDepth, dtype='uint16')
axis_bounds = numpy.array([numpy.min(cur_uvd[:, 0]), numpy.max(cur_uvd[:, 0]),
numpy.min(cur_uvd[:, 1]), numpy.max(cur_uvd[:, 1]),
numpy.min(cur_uvd[:, 2]), numpy.max(cur_uvd[:, 2])],dtype='int32')
tmpDepth = numpy.zeros((depth.shape[0]+padWidth*2,depth.shape[1]+padWidth*2))
tmpDepth[padWidth:padWidth+depth.shape[0],padWidth:padWidth+depth.shape[1]]=depth
crop = tmpDepth[axis_bounds[2]-margin+padWidth:axis_bounds[3]+margin+padWidth,
axis_bounds[0]-margin+padWidth:axis_bounds[1]+margin+padWidth]
loc = numpy.where(numpy.logical_and(crop>axis_bounds[4]-phyMargin,crop<axis_bounds[5]+phyMargin))
cropmask=numpy.zeros_like(crop)
cropmask[loc]=1
tmpMask = numpy.zeros_like(tmpDepth,dtype='uint8')
tmpMask[axis_bounds[2]-margin+padWidth:axis_bounds[3]+margin+padWidth,
axis_bounds[0]-margin+padWidth:axis_bounds[1]+margin+padWidth] =cropmask
orimask = tmpMask[padWidth:padWidth+depth.shape[0],padWidth:padWidth+depth.shape[1]]
orimask = resize(orimask,(120,160), order=3,preserve_range=True)
orimask[numpy.where(orimask>0)]=1
all_mask[mi,4:124,:,0]=orimask
all_input[mi,4:124,:,0]=resize(depth,(120,160), order=3,preserve_range=True)/2000.0
jiter_width = numpy.random.randint(low=-padWidth,high=padWidth,size=1)[0]
# jiter_width = numpy.random.randint(low=-int(padWidth/2),high=int(padWidth/2),size=1)[0]
# print(jiter_width)
jiter_mask = tmpMask[jiter_width+padWidth:padWidth+depth.shape[0]-jiter_width,jiter_width+padWidth:padWidth+depth.shape[1]-jiter_width]
jiter_depth = tmpDepth[jiter_width+padWidth:padWidth+depth.shape[0]-jiter_width,jiter_width+padWidth:padWidth+depth.shape[1]-jiter_width]
orimask = resize(jiter_mask,(120,160), order=3,preserve_range=True)
orimask[numpy.where(orimask>0)]=1
all_mask[mi+batch_size,4:124,:,0]=orimask
all_input[mi+batch_size,4:124,:,0]=resize(jiter_depth,(120,160), order=3,preserve_range=True)/2000.0
#
# fig = plt.figure()
# ax=fig.add_subplot(221)
# ax.imshow(all_input[mi,4:124,:,0])
# ax=fig.add_subplot(222)
# ax.imshow(all_mask[mi,4:124,:,0])
#
# ax=fig.add_subplot(223)
# ax.imshow(all_input[mi+batch_size,4:124,:,0])
# ax=fig.add_subplot(224)
# ax.imshow(all_mask[mi+batch_size,4:124,:,0])
# plt.show()
yield (all_input,all_mask)
def generate_fullimg_mask_from_file_unet_for_test(path,img_file_name,uvd,batch_size,n_batches):
centerU=315.944855
phyMargin=50.0
padWidth=200
img_rows=480
img_cols=640
num_imgs=len(img_file_name)
idx = numpy.arange(len(batch_size*n_batches))
num = (batch_size - num_imgs%batch_size)%batch_size
idx = numpy.concatenate([idx,idx[0:num]],axis=0)
x0 = numpy.zeros((batch_size*n_batches,img_rows,img_cols,1),dtype='float32')
y = numpy.zeros((batch_size*n_batches,img_rows,img_cols,1),dtype='uint8')
for mi, cur_idx in enumerate(list(idx)):
cur_file_name = img_file_name[cur_idx]
cur_uvd = uvd[cur_idx]
bb = numpy.array([(phyMargin,phyMargin,cur_uvd[9,2])])
bbox_uvd = xyz_uvd.xyz2uvd(setname='mega',xyz=bb)
margin = int(numpy.ceil(bbox_uvd[0,0] - centerU))
roiDepth = Image.open('%s/images/%s' %(path,cur_file_name))
depth = numpy.asarray(roiDepth, dtype='uint16')
x0[mi,:,:,0]=depth/2000.0
axis_bounds = numpy.array([numpy.min(cur_uvd[:, 0]), numpy.max(cur_uvd[:, 0]),
numpy.min(cur_uvd[:, 1]), numpy.max(cur_uvd[:, 1]),
numpy.min(cur_uvd[:, 2]), numpy.max(cur_uvd[:, 2])],dtype='int32')
tmpDepth = numpy.zeros((depth.shape[0]+padWidth*2,depth.shape[1]+padWidth*2))
tmpDepth[padWidth:padWidth+depth.shape[0],padWidth:padWidth+depth.shape[1]]=depth
crop = tmpDepth[axis_bounds[2]-margin+padWidth:axis_bounds[3]+margin+padWidth,
axis_bounds[0]-margin+padWidth:axis_bounds[1]+margin+padWidth]
loc = numpy.where(numpy.logical_and(crop>axis_bounds[4]-phyMargin,crop<axis_bounds[5]+phyMargin))
cropmask=numpy.zeros_like(crop)
cropmask[loc]=1
orimask = numpy.zeros_like(tmpDepth,dtype='uint8')
orimask[axis_bounds[2]-margin+padWidth:axis_bounds[3]+margin+padWidth,
axis_bounds[0]-margin+padWidth:axis_bounds[1]+margin+padWidth] =cropmask
y[mi,:,:,0] = orimask[padWidth:padWidth+depth.shape[0],padWidth:padWidth+depth.shape[1]]
return x0,y
def generate_arrays_from_file_unet(path,img_file_name,uvd,batch_size):
output_down_ratio = 8.0
img_rows=120
img_cols=160
num_imgs=len(img_file_name)
idx = numpy.arange(len(img_file_name))
num = (batch_size - num_imgs%batch_size)%batch_size
idx = numpy.concatenate([idx,idx[0:num]],axis=0)
n_batches = int(idx.shape[0]/batch_size)
x0 = numpy.zeros((batch_size,img_rows+8,img_cols,1),dtype='float32')
y = numpy.zeros((batch_size,int((img_rows+8)/output_down_ratio),int(img_cols/output_down_ratio),1),dtype='float32')
# print('$'*20, 'validataion n_batches', n_batches)
target_rows = y[0].shape[0]
target_cols = y[1].shape[1]
while 1:
idx= shuffle(idx,random_state=0)
for minibatch_index in range(n_batches):
# print('minibatch_index',minibatch_index)
slice_idx = range(minibatch_index * batch_size, (minibatch_index + 1) * batch_size,1)
for mi, cur_idx in enumerate(list(idx[slice_idx])):
cur_file_name = img_file_name[cur_idx]
cur_uvd = uvd[cur_idx]
roiDepth = Image.open('%s/images/%s' %(path,cur_file_name))
roiDepth = numpy.asarray(roiDepth, dtype='uint16')/2000.0
depth = resize(roiDepth,(img_rows,img_cols), order=3,preserve_range=True)
u_norm = int(cur_uvd[9,0]/4/output_down_ratio)
v_norm = int((cur_uvd[9,1]/4.0+4)/output_down_ratio)
if v_norm>0 and u_norm >0 and v_norm<target_rows and u_norm < target_cols:
y[mi,v_norm,u_norm,0]=1
x0[mi,4:(4+img_rows),:,0]=depth
yield (x0,y)
def tmp(path,img_file_name,uvd,batch_size):
output_down_ratio = 4.0
img_rows=120
img_cols=160
num_imgs=uvd.shape[0]
idx = numpy.arange(num_imgs)
num = (batch_size - num_imgs%batch_size)%batch_size
idx = numpy.concatenate([idx,idx[0:num]],axis=0)
n_batches = int(idx.shape[0]/batch_size)
x0 = numpy.zeros((batch_size,img_rows+8,img_cols,1),dtype='float32')
y = numpy.zeros((batch_size,int((img_rows+8)/output_down_ratio),int(img_cols/output_down_ratio),1),dtype='float32')
# print('$'*20, 'validataion n_batches', n_batches)
idx= shuffle(idx,random_state=0)
for minibatch_index in range(n_batches):
# print minibatch_index
slice_idx = range(minibatch_index * batch_size, (minibatch_index + 1) * batch_size,1)
for mi, cur_idx in enumerate(list(idx[slice_idx])):
cur_file_name = img_file_name[cur_idx]
cur_uvd = uvd[cur_idx]
roiDepth = Image.open('%s/images/%s' %(path,cur_file_name))
roiDepth = numpy.asarray(roiDepth, dtype='uint16')/2000.0
print(numpy.max(roiDepth))
depth = resize(roiDepth,(img_rows,img_cols), order=3,preserve_range=True)
u_norm = int(cur_uvd[9,0]/16)
v_norm = int((cur_uvd[9,1]/4.0+4)/4.0)
y[mi,v_norm,u_norm,0]=1
x0[mi,4:(4+img_rows),:,0]=depth
plt.imshow(x0[mi,:,:,0],'gray')
plt.figure()
tmp = resize(x0[mi,:,:,0],(y[0].shape[0],y[0].shape[1]), order=3,preserve_range=True)
plt.imshow(tmp,'gray')
plt.scatter(u_norm,v_norm)
plt.show()
# print('yield validataion minibatch_index ',minibatch_index)
def generate_fullimg_mask_from_file_unet_show(path,img_file_name,uvd,batch_size):
centerU=315.944855
phyMargin=50.0
padWidth=200
img_rows=480
img_cols=640
num_imgs=len(img_file_name)
idx = numpy.arange(len(img_file_name))
num = (batch_size - num_imgs%batch_size)%batch_size
idx = numpy.concatenate([idx,idx[0:num]],axis=0)
n_batches = int(idx.shape[0]/batch_size)
idx= shuffle(idx,random_state=0)
for minibatch_index in range(n_batches):
# print('minibatch_index',minibatch_index)
slice_idx = range(minibatch_index * batch_size, (minibatch_index + 1) * batch_size,1)
for mi, cur_idx in enumerate(list(idx[slice_idx])):
cur_file_name = img_file_name[cur_idx]
cur_uvd = uvd[cur_idx]
bb = numpy.array([(phyMargin,phyMargin,cur_uvd[9,2])])
bbox_uvd = xyz_uvd.xyz2uvd(setname='mega',xyz=bb)
margin = int(numpy.ceil(bbox_uvd[0,0] - centerU))
roiDepth = Image.open('%s/images/%s' %(path,cur_file_name))
depth = numpy.asarray(roiDepth, dtype='uint16')
axis_bounds = numpy.array([numpy.min(cur_uvd[:, 0]), numpy.max(cur_uvd[:, 0]),
numpy.min(cur_uvd[:, 1]), numpy.max(cur_uvd[:, 1]),
numpy.min(cur_uvd[:, 2]), numpy.max(cur_uvd[:, 2])],dtype='int32')
tmpDepth = numpy.zeros((depth.shape[0]+padWidth*2,depth.shape[1]+padWidth*2))
tmpDepth[padWidth:padWidth+depth.shape[0],padWidth:padWidth+depth.shape[1]]=depth
crop = tmpDepth[axis_bounds[2]-margin+padWidth:axis_bounds[3]+margin+padWidth,
axis_bounds[0]-margin+padWidth:axis_bounds[1]+margin+padWidth]
loc = numpy.where(numpy.logical_and(crop>axis_bounds[4]-phyMargin,crop<axis_bounds[5]+phyMargin))
cropmask=numpy.zeros_like(crop)
cropmask[loc]=1
orimask = numpy.zeros_like(tmpDepth,dtype='uint8')
orimask[axis_bounds[2]-margin+padWidth:axis_bounds[3]+margin+padWidth,
axis_bounds[0]-margin+padWidth:axis_bounds[1]+margin+padWidth] =cropmask
mask= orimask[padWidth:padWidth+depth.shape[0],padWidth:padWidth+depth.shape[1]]
plt.figure()
plt.imshow(depth,'gray')
plt.figure()
plt.imshow(mask,'gray')
plt.show()
def generate_train_tmp(path,img_file_name,uvd,batch_size):
num_imgs=uvd.shape[0]
idx = numpy.arange(num_imgs)
print(idx.shape)
n_batches=int(idx.shape[0]/batch_size)
phyMargin=50.0
padWidth=200
centerU=315.944855
setname='mega'
idx= shuffle(idx,random_state=0)
for minibatch_index in range(n_batches):
# print('minibatch_index',minibatch_index)
slice_idx = range(minibatch_index * batch_size, (minibatch_index + 1) * batch_size,1)
all_input = numpy.zeros((batch_size,128,160,1),dtype='float32')
all_mask=numpy.zeros((batch_size,128,160,1),dtype='uint8')
for mi, cur_idx in enumerate(list(idx[slice_idx])):
cur_file_name = img_file_name[cur_idx]
cur_uvd = uvd[cur_idx]
bb = numpy.array([(phyMargin,phyMargin,cur_uvd[9,2])])
bbox_uvd = xyz_uvd.xyz2uvd(setname=setname,xyz=bb)
margin = int(numpy.ceil(bbox_uvd[0,0] - centerU))
roiDepth = Image.open('%s/images/%s' %(path,cur_file_name))
depth = numpy.asarray(roiDepth, dtype='uint16')
axis_bounds = numpy.array([numpy.min(cur_uvd[:, 0]), numpy.max(cur_uvd[:, 0]),
numpy.min(cur_uvd[:, 1]), numpy.max(cur_uvd[:, 1]),
numpy.min(cur_uvd[:, 2]), numpy.max(cur_uvd[:, 2])],dtype='int32')
tmpDepth = numpy.zeros((depth.shape[0]+padWidth*2,depth.shape[1]+padWidth*2))
tmpDepth[padWidth:padWidth+depth.shape[0],padWidth:padWidth+depth.shape[1]]=depth
crop = tmpDepth[axis_bounds[2]-margin+padWidth:axis_bounds[3]+margin+padWidth,
axis_bounds[0]-margin+padWidth:axis_bounds[1]+margin+padWidth]
loc = numpy.where(numpy.logical_and(crop>axis_bounds[4]-phyMargin,crop<axis_bounds[5]+phyMargin))
cropmask=numpy.zeros_like(crop)
cropmask[loc]=1
orimask = numpy.zeros_like(tmpDepth,dtype='uint8')
orimask[axis_bounds[2]-margin+padWidth:axis_bounds[3]+margin+padWidth,
axis_bounds[0]-margin+padWidth:axis_bounds[1]+margin+padWidth] =cropmask
orimask = orimask[padWidth:padWidth+depth.shape[0],padWidth:padWidth+depth.shape[1]]
orimask = resize(orimask,(120,160), order=3,preserve_range=True)
orimask[numpy.where(orimask>0)]=1
all_mask[mi,4:124,:,0]=orimask
all_input[mi,4:124,:,0]=resize(depth,(120,160), order=3,preserve_range=True)/2000.0
plt.figure()
plt.imshow(depth,'gray')
plt.scatter(cur_uvd[:,0],cur_uvd[:,1])
plt.figure()
plt.imshow(orimask,'gray')
plt.figure()
plt.imshow(crop,'gray')
plt.show()
for i in range(batch_size):
plt.figure()
plt.imshow(all_input[i,:,:,0],'gray')
plt.figure()
plt.imshow(all_mask[i,:,:,0],'gray')
plt.show()
import h5py
if __name__ == '__main__':
source_dir = 'F:/BigHand_Challenge/Training/'
save_dir = 'F:/HuaweiProj/data/mega'
f = h5py.File('%s/source/test_mask.h5'%save_dir, 'r')
filename = f['filename'][...]
uvd = f['uvd'][...]
f.close()
generate_train_tmp(path=source_dir,img_file_name=filename,uvd=uvd,batch_size=32)
# base_dir = 'D:/Project/3DHandPose/Data_3DHandPoseDataset/NYU_dataset/NYU_dataset/'
# generate_arrays_from_file_unet(path=base_dir,
# dataset='test',num_imgs=8252,num_classes=17,batch_size=1)
# # create_data(dataset='train',num_imgs = 72757)
# # create_data(dataset='test',num_imgs = 8252)
# uvd_jnt_gt,_,file_name=get_filenames_labels(dataset_dir=source_dir)
# num_img=len(file_name)
# idx = shuffle(numpy.arange(num_img),random_state=0)
# img_idx_train = idx[:int(num_img*0.9)]
# img_idx_test = idx[int(num_img*0.9):]
# generate_train_tmp(path=source_dir,img_file_name=file_name[img_idx_test],uvd=uvd_jnt_gt[img_idx_test],batch_size=32)
#
# generate_fullimg_mask_from_file_unet_show(path=source_dir,img_file_name=file_name[img_idx_test],uvd=uvd_jnt_gt[img_idx_test],batch_size=32)
|
[
"aaronlws95@gmail.com"
] |
aaronlws95@gmail.com
|
f7f7eda4a188511ca65e1cb0a7660387d2ce5312
|
d042b8895dc8347356fa4d5984d07bff41eecc73
|
/obtainfo/views/views.py
|
33c81e13d9f89ceb67d0dc937a7b184612156a68
|
[
"Apache-2.0"
] |
permissive
|
jzx1230/obtainfo
|
257b075c32c3448096391f258f42dd7f0c081350
|
883c29ab0a462d11682b60b9b52b2fc93031b816
|
refs/heads/master
| 2021-05-08T04:19:33.810848
| 2015-10-13T10:10:10
| 2015-10-13T10:10:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,997
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from django.db.models import Q
from django.conf import settings
from django.shortcuts import render
from django.core import serializers
from django.forms.models import model_to_dict
from django.http import HttpResponse, Http404, HttpResponseRedirect
from django.views.decorators.csrf import csrf_exempt
from django.views.decorators.cache import cache_page
from django.utils.encoding import force_unicode
from detail import used_time_tag
from obtainfo.models import SearchKey, MovieInfo, BigPoster, Series
from obtainfo.templatetags.obtainfo_tags import pic as render_pic
from pcnile.search import FullTextSearchClient
from pcnile.http import json, JsonResponse
from pcnile.paginator import get_page_part
from pcnile.helper import md5sum, group_list
from bson.objectid import ObjectId
import re
import os
import base64
import random
import datetime
import pymongo
import time
import logging
re_imdb = re.compile(r"tt\d+")
re_douban = re.compile(r"dd\d+")
logger = logging.getLogger(__name__)
verify_oid = lambda oid: True if re.match(r'^[0-9a-fA-F]{24}$', oid) else False
index_field = {'type': 1, 'bigpic': 1, 'title': 1, 'language': 1, 'year': 1, 'douban': 1, 'genre': 1, 'resource': 1,
'area': 1, 'director': 1, 'actor': 1, 'plot': 1, 'finish': 1}
def get_request_page(request_dict, default=1):
try:
return int(request_dict['page'])
except:
return default
def search(request):
if request.method == "POST":
raise Http404
try:
key = request.GET['search'].strip()
except:
return HttpResponseRedirect("/")
page = get_request_page(request.GET)
if key == '':
errors = u"请输入搜索的关键词哦!"
m = {'results': [], 'errors': errors, 'search': key}
elif len(key) > 25:
errors = u"不好意思!您的关键词太长了......"
m = {'results': [], 'errors': errors, 'search': key}
elif re_imdb.match(key):
key = re_imdb.match(key).group()
collection = settings.MONGOINFO
results = collection.find({'imdb': key}, index_field)
contacts = get_page_part(results, request.GET.get('page'))
m = {'results': contacts, 'pages': contacts, 'search': key}
elif re_douban.match(key):
key = re_douban.match(key).group()
collection = settings.MONGOINFO
results = collection.find({'douban.id': key[2:]}, index_field)
contacts = get_page_part(results, request.GET.get('page'))
m = {'results': contacts, 'pages': contacts, 'search': key}
else:
client = FullTextSearchClient(settings.MONGOINFO)
(contacts, results) = client.query_page(key, page)
if results.count() == 0:
try:
sk = SearchKey(key=force_unicode(key).encode('utf-8'))
except:
sk = SearchKey(key=key)
sk.save()
results = []
errors = u"对不起!我们目前还没有您要的资源......"
else:
errors = ''
m = {'results': results, 'pages': contacts, 'errors': errors, 'search': key}
return render(request, 'search.html', {"m": m}, )
@cache_page(60 * 60)
@used_time_tag
def index(request, query):
# generate query string
try:
k, v = query.split('_')[0].split('-')
if k == 'genre':
query_obj = {k: v}
else:
return HttpResponseRedirect("/")
except:
return HttpResponseRedirect("/")
collection = settings.MONGOINFO
results = collection.find(query_obj).sort("year", -1)
contacts = get_page_part(results, get_request_page(request.GET))
return render(request, 'search.html', {"m": {'results': contacts, 'pages': contacts, 'search': ''}}, )
@cache_page(60 * 60)
def sindex(request): # selection index
db = settings.MONGODB
results = db.selection.find().sort("addtime", -1)
contacts = get_page_part(results, get_request_page(request.GET))
results = list()
collection = settings.MONGOINFO
for c in contacts:
c['pic'] = collection.find_one({'_id': ObjectId(c['list'][0])}, {'stdpic': 1})['stdpic']
results.append(c)
m = {'results': results, 'pages': contacts, 'search': ''}
return render(request, 'sindex.html', {"m": m}, )
"""
build selection block
"""
@cache_page(60 * 60)
def selection(request, sid):
try:
db = settings.MONGODB
s = db.selection.find_one({'_id': ObjectId(sid)})
if s == None:
raise Http404
except pymongo.errors.InvalidId:
logger.error('get an unused selection id %s' % sid)
raise Http404
contacts = get_page_part(s['list'], get_request_page(request.GET))
collection = settings.MONGOINFO
results = collection.find({'_id': {'$in': [ObjectId(oid) for oid in contacts]}})
m = {'results': results, 'pages': contacts}
return render(request, 'selection.html', {"m": m, 'title': s['title']}, )
@csrf_exempt
def retrieve(request):
genre = {
'title': u'类型:',
'name': 'genre',
'k': [u'全部', u'剧情', u'喜剧', u'爱情', u'科幻', u'动作', u'惊悚', u'恐怖', u'冒险', u'奇幻', u'家庭', u'记录片', u'古装', u'战争', u'历史',
u'西部', u'悬疑', u'奇幻'],
'v': [u'', u'剧情', u'喜剧', u'爱情', u'科幻', u'动作', u'惊悚', u'恐怖', u'冒险', u'奇幻', u'家庭', u'记录片', u'古装', u'战争', u'历史',
u'西部', u'悬疑', u'奇幻']
}
area = {
'title': u'地区:',
'name': 'area',
'k': [u'全部', u'内地', u'美国', u'英国', u'韩国', u'日本', u'香港', u'台湾', u'印度', u'英国', u'法国', u'意大利', u'德国', u'泰国', u'西班牙',
u'瑞典', u'俄罗斯'],
'v': [u'', u'中国', u'美国', u'英国', u'韩国', u'日本', u'香港', u'台湾', u'印度', u'英国', u'法国', u'意大利', u'德国', u'泰国', u'西班牙',
u'瑞典', u'俄罗斯']
}
year = {
'title': u'年代:',
'name': 'year',
'k': [u'全部', u'2014', u'2013', u'2012', u'2011', u'2010', u'2009', u'2008', u'2007', u'2006', u'2005', u'2004',
u'2003', u'2002', u'2001', u'2000', u'1999', u'1998'],
'v': [u'', u'2014', u'2013', u'2012', u'2011', u'2010', u'2009', u'2008', u'2007', u'2006', u'2005', u'2004',
u'2003', u'2002', u'2001', u'2000', u'1999', u'1998']
}
resource = {
'title': u'资源:',
'name': 'resource',
'k': [u'不限', u'在线', u'网盘', u'3D高清', u'高清', u'普清', u'尝鲜'],
'v': [u'', {'resource.online': {'$gt': 0}}, {'resource.netdisk': {'$gt': 0}}, {'resource.stereo': {'$gt': 0}},
{'resource.hd': {'$gt': 0}}, {'resource.dvd': {'$gt': 0}}, {'resource.cam': {'$gt': 0}}]
}
sub_type = {
'title': u'主题:',
'name': 'type',
'k': [u'不限', u'电影', u'电视剧'],
'v': [u'', 'movie', 'tv']
}
order = {
'title': u'排序:',
'name': 'order',
'k': [u'默认', u'热门', u'经典', u'最新上映', u'添加时间'],
'v': [
[('year', pymongo.ASCENDING), ('addtime', pymongo.ASCENDING)],
{'year': -1, 'douban.ranking.count': -1, 'douban.ranking.score': -1},
[("douban.ranking.count", -1), ("douban.ranking.score", -1)],
[("showtime", pymongo.DESCENDING)],
[("addtime", pymongo.DESCENDING)]
]
}
table = {'genre': genre, 'area': area, 'year': year, 'resource': resource, 'type': sub_type}
if request.method == 'POST':
try:
js = json.loads(request.body)
except:
return JsonResponse({'status': 'fail'})
qs = list()
sort_key = order['v'][js['order']]
for k, v in table.items():
v = v['v'][js[k]]
if v:
if k == 'resource':
qs.append(v)
else:
qs.append({k: v})
collection = settings.MONGOINFO
if len(qs):
results = collection.find({'$and': qs}, {'title': 1, 'stdpic': 1, 'actor': 1}).limit(3500)
else:
results = collection.find({}, {'title': 1, 'stdpic': 1, 'actor': 1}).limit(3500)
contacts = get_page_part(results, js['page'], 20)
for c in contacts.object_list:
c['stdpic'] = render_pic(c['stdpic'])
try:
c['actor'] = c['actor'][0]
except IndexError:
pass
page = {
'has_previous': contacts.has_previous(), 'has_next': contacts.has_next(),
'current': str(contacts.number), 'range': contacts.paginator.page_range_ext,
}
if page['has_previous']:
page['previous_page_number'] = contacts.previous_page_number()
if page['has_next']:
page['next_page_number'] = contacts.next_page_number()
return JsonResponse({'status': 'success', 'results': contacts.object_list, 'page': page})
return render(request, 'retrieve.html', {'table': [genre, area, year, resource, sub_type, order]}, )
@cache_page(10 * 60)
def lazy(request):
sidebar = request.GET.get('s', 'recommend')
try:
number = abs(int(request.GET.get('n', 30)))
if number not in xrange(10, 60):
number = 30
except:
number = 30
oid = request.GET.get('o', '')
if verify_oid(oid):
try:
series = [{'id': s.id, 'no': s.sequence, 'title': s.title} for s in
Series.objects.get(id=oid).get_root().get_descendants(include_self=True)]
except:
series = []
else:
series = []
if sidebar == 'hot':
title = u"大家都在看"
recommands = MovieInfo.objects
first = recommands.filter(Q(image__isnull=False) & ~Q(image='')).order_by("-visitor")[0]
second = recommands.filter(Q(image__isnull=True) | Q(image='')).order_by("-visitor")[:number - 1]
else:
title = u"编辑墙裂推荐"
recommends = MovieInfo.objects.filter(recommend=True)
first = recommends.filter(Q(image__isnull=False) & ~Q(image='')).order_by("-timestamp")[0]
second = recommends.filter(Q(image__isnull=True) | Q(image='')).order_by("-timestamp")[:number - 1]
ranking = dict()
ranking['title'] = title
ranking['first'] = {'id': first.id, 'title': first.title, 'image': first.image.url}
ranking['second'] = [{'id': s.id, 'title': s.title, 'no': c + 2} for c, s in enumerate(second)]
return JsonResponse({'ranking': ranking, 'series': series, 'status': 'success'})
@cache_page(10 * 60)
@used_time_tag
def main(request):
page = get_request_page(request.GET)
if page == 1 and 'page' in request.GET:
return HttpResponseRedirect('/')
collection = settings.MONGOINFO
oids = [ObjectId(o.id) for o in MovieInfo.objects.filter(top=True)]
results = collection.find({"_id": {"$nin": oids}}, index_field).sort('updatetime', -1).limit(3500)
contacts = get_page_part(results, page)
m = {'results': contacts, 'pages': contacts, 'index': False}
if page == 1:
# fill big poster content
db = settings.MONGODB
m['ontop'] = collection.find({"_id": {"$in": oids}}, index_field)
m['big_poster'] = True
m['selection'] = group_list([d for d in db.selection.find().sort('addtime', -1).limit(21)], 7)
m['index'] = True
return render(request, 'index.html', {"m": m}, )
|
[
"pczhaoyun@gmail.com"
] |
pczhaoyun@gmail.com
|
3655ea9552cbabdc6684ddb498d2ad34a35c0af9
|
a62ed8ef64a4e97d008a66115aeee759e9f5de0a
|
/_5_token/scripts/attack.py
|
fc7c74c28c431cf6e7cfe618c0ce9a3f29522173
|
[] |
no_license
|
replikeit/ethernaut_solutions
|
595ac5f27a547a1a45fba02f0a37ed7929ca1d93
|
6958228ba9be47272359a9b2837e6cf990b7f322
|
refs/heads/master
| 2023-07-21T05:45:16.806940
| 2021-08-29T15:54:06
| 2021-08-29T15:54:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,007
|
py
|
from brownie import *
from .reports import *
a0 = accounts[0] # target deployer
a1 = accounts[1] # attacker account
BIGNUMBER = (10**18)
REPORT = Report()
REPORT.add_account(a0, 'Target deployer')
REPORT.add_account(a1, 'Attacker')
def main():
target = prepare()
attack(target)
def prepare():
# deploy the target contract
target = Token.deploy(100*BIGNUMBER,{'from': a0})
REPORT.add_token(target)
# attacker should start with little ETH balance and small token balance
balance_to_burn = a1.balance() - 1*BIGNUMBER
burn_address = accounts[3]
a1.transfer(burn_address,balance_to_burn)
target.transfer(a1,20,{'from': a0})
REPORT.print()
REPORT.txt_print('PRE-ATTACK STATE IS READY')
return target
def attack(target):
# send 20 tokens somewhere to have underflow
target.transfer(a0,21,{'from':a1})
if target.balanceOf(a1) > 20:
REPORT.txt_print('ATTACK SUCCESSFUL')
else:
REPORT.txt_print('ATTACK IS NOT SUCCESSFUL')
|
[
"dan.ogurtsov@gmail.com"
] |
dan.ogurtsov@gmail.com
|
36d5daf08e54fc7a9f05e3301277467301632065
|
7b344848258ad46aeb30477f2767ecc804c8510e
|
/my_app/mysite/forms.py
|
d180f4c9c8344332d24d16540d6e31b07bcd0016
|
[] |
no_license
|
smok2288/Bboard_Dronov
|
7a34c1a6dfe52260252fb6d976fb49a8aabeeb29
|
2601458239fdaa63b496ac41c9d2413c516e07ab
|
refs/heads/master
| 2023-07-12T01:18:46.956571
| 2021-08-15T19:20:53
| 2021-08-15T19:20:53
| 396,465,329
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 176
|
py
|
from django.forms import ModelForm
from .models import Bd
class BdForm(ModelForm):
class Meta():
model=Bd
fields = ('title', 'content', 'price', 'rubric')
|
[
"https://github.com/smok2288"
] |
https://github.com/smok2288
|
87b45d6a7a8aa7945dc7a4015923d2d0f1b3f823
|
d22c7dc57fe448c60e82b31725115c5d8c69f995
|
/celery_config.py
|
73fa01fd03478bbfd4571a59c93bd0fb502c6cf1
|
[
"MIT"
] |
permissive
|
noanflaherty/self-replicating-repo
|
3b63be8f0849e7b88556a9dc40113f961f86735b
|
4977f24e0554cd160944f1449f3928e9f156606c
|
refs/heads/master
| 2023-01-24T15:18:23.729665
| 2018-11-19T16:23:59
| 2018-11-19T16:23:59
| 155,890,338
| 0
| 0
|
MIT
| 2023-01-13T22:54:17
| 2018-11-02T16:06:48
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 394
|
py
|
import os
from dotenv import load_dotenv
load_dotenv()
# From env vars
if os.environ.get('LOCATION') == 'PROD':
broker_url = os.environ.get('CLOUDAMQP_URL')
else:
broker_url = os.environ.get('CELERY_BROKER_URL')
result_backend = os.environ.get('REDIS_URL')
# Hard-coded
task_serializer='json'
accept_content=['json'] # Ignore other content
result_serializer='json'
enable_utc=True
|
[
"noaflaherty@gmail.com"
] |
noaflaherty@gmail.com
|
98782bae440c055b4d0065c2eec59b80824ffaee
|
28483b16e58f04219b9e25640ffbc36360641a0a
|
/charissa_johnson/username_validation/main/urls.py
|
62fc3207054e96f776c5feefe1f08491db402884
|
[] |
no_license
|
charissayj/python_july_2017
|
c69755a4d068440c2799b2b4a37ad15a4fb94a80
|
3939f823646b90b51f5c2d6f64699357728c3ab4
|
refs/heads/master
| 2020-12-02T06:18:14.106345
| 2017-07-27T20:20:47
| 2017-07-27T20:20:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 789
|
py
|
"""main URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
urlpatterns = [
url(r'^', include('apps.username_validation.urls')),
]
|
[
"charissa.y.johnson@gmail.com"
] |
charissa.y.johnson@gmail.com
|
c8ce36e7f047b623defb9b3a946c5a7cb799aa02
|
be61a9f30274514857ea34297719157f1e5b8447
|
/fhir/resources/DSTU2/age.py
|
9975cdbeda716d349901880fad136791d72da6f6
|
[
"BSD-3-Clause"
] |
permissive
|
jwygoda/fhir.resources
|
ceff3a620100d2e875136b86d3e82816c0e60a33
|
5053565570d1ca992d9971d20db813c53fd350b9
|
refs/heads/master
| 2021-02-05T02:59:17.436485
| 2019-07-18T10:57:33
| 2019-07-18T10:57:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 951
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 1.0.2.7202 (http://hl7.org/fhir/StructureDefinition/Age) on 2019-05-14.
# 2019, SMART Health IT.
from . import quantity
class Age(quantity.Quantity):
""" A duration (length of time) with a UCUM code.
There SHALL be a code if there is a value and it SHALL be an expression of
time. If system is present, it SHALL be UCUM. If value is present, it
SHALL be positive.
"""
resource_name = "Age"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
super(Age, self).__init__(jsondict=jsondict, strict=strict)
|
[
"connect2nazrul@gmail.com"
] |
connect2nazrul@gmail.com
|
2abc66004d8005e3de74c1faa0faa5d8838067b4
|
d2c29025d9a83b061ac515564290464b93b63a91
|
/main.py
|
8ea112c2789eec68d29b5119c37fc9ec0b469b27
|
[] |
no_license
|
smrussel/blog-site-flask
|
d50c666324d0c02e3dd86eaab4e097f3a8081d1d
|
0a416aef88cf90ffd37f2161863a3dd89e13ea32
|
refs/heads/master
| 2023-07-14T21:13:59.523806
| 2021-08-19T12:57:03
| 2021-08-19T12:57:03
| 397,868,403
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,468
|
py
|
from flask import Flask, render_template, redirect, url_for, flash, abort, request
from flask_bootstrap import Bootstrap
from flask_ckeditor import CKEditor
from datetime import date
from werkzeug.security import generate_password_hash, check_password_hash
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy.orm import relationship
from flask_login import UserMixin, login_user, LoginManager, login_required, current_user, logout_user
from forms import CreatePostForm, RegistrationForm, LoginForm, CommentForm
from flask_gravatar import Gravatar
from functools import wraps
import smtplib
import os
OWN_EMAIL = os.environ.get("OWN_EMAIL")
OWN_PASSWORD = os.environ.get("OWN_PASSWORD")
app = Flask(__name__)
app.config['SECRET_KEY'] = os.environ.get("SECRET_KEY")
ckeditor = CKEditor(app)
Bootstrap(app)
gravatar = Gravatar(app, size=100, rating='g', default='retro', force_default=False, force_lower=False,
use_ssl=False, base_url=None)
# #CONNECT TO DB
app.config['SQLALCHEMY_DATABASE_URI'] = os.environ.get('DATABASE_URL').replace("://", "ql://", 1)
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db = SQLAlchemy(app)
login_manager = LoginManager()
login_manager.init_app(app)
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
# Create admin-only decorator
def admin_only(f):
@wraps(f)
def decorated_function(*args, **kwargs):
# If id is not 1 then return abort with 403 error
if not current_user.is_authenticated or current_user.id != 1:
return abort(403)
# Otherwise continue with the route function
return f(*args, **kwargs)
return decorated_function
# #CONFIGURE TABLES
class User(UserMixin, db.Model):
__tablename__ = "users"
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(250), nullable=False)
email = db.Column(db.String(250), nullable=False)
password = db.Column(db.String(250), nullable=False)
# This will act like a List of BlogPost objects attached to each User.
# The "author" refers to the author property in the BlogPost class.
posts = relationship("BlogPost", back_populates="author")
comments = relationship('Comment', back_populates='comment_author')
class BlogPost(db.Model):
__tablename__ = "blog_posts"
id = db.Column(db.Integer, primary_key=True)
# Create Foreign Key, "users.id" the users refers to the tablename of User.
author_id = db.Column(db.Integer, db.ForeignKey("users.id"))
# Create reference to the User object, the "posts" refers to the posts protperty in the User class.
author = relationship("User", back_populates="posts")
title = db.Column(db.String(250), unique=True, nullable=False)
subtitle = db.Column(db.String(250), nullable=False)
date = db.Column(db.String(250), nullable=False)
body = db.Column(db.Text, nullable=False)
img_url = db.Column(db.String(250), nullable=False)
# ***************Parent Relationship*************#
comments = relationship("Comment", back_populates="parent_post")
class Comment(db.Model):
__tablename__ = "comments"
id = db.Column(db.Integer, primary_key=True)
text = db.Column(db.Text, nullable=False)
author_id = db.Column(db.Integer, db.ForeignKey("users.id"))
comment_author = relationship("User", back_populates="comments")
# ***************Child Relationship*************#
post_id = db.Column(db.Integer, db.ForeignKey("blog_posts.id"))
parent_post = relationship("BlogPost", back_populates="comments")
db.create_all()
@app.route('/')
def get_all_posts():
posts = BlogPost.query.all()
return render_template("index.html", all_posts=posts)
@app.route('/register', methods=['GET', 'POST'])
def register():
register_form = RegistrationForm()
if register_form.validate_on_submit():
if User.query.filter_by(email=register_form.email.data).first():
flash("You've already sign up with that email, log in instead.", category='danger')
return redirect(url_for('login'))
else:
hash_password = generate_password_hash(register_form.password.data, method='pbkdf2:sha256', salt_length=16)
new_user = User(name=register_form.name.data, email=register_form.email.data, password=hash_password)
db.session.add(new_user)
db.session.commit()
login_user(new_user)
return redirect(url_for('get_all_posts'))
return render_template("register.html", form=register_form)
@app.route('/login', methods=['GET', 'POST'])
def login():
login_form = LoginForm()
if login_form.validate_on_submit():
user = User.query.filter_by(email=login_form.email.data).first()
if user is not None:
if check_password_hash(user.password, login_form.password.data):
login_user(user)
return redirect(url_for('get_all_posts'))
else:
flash('Password incorrect,try again.', category='danger')
else:
flash('That email does not exist,please try again.', category='danger')
return render_template("login.html", form=login_form)
@app.route("/logout")
@login_required
def logout():
logout_user()
return redirect(url_for('get_all_posts'))
@app.route("/post/<int:post_id>", methods=['GET', 'POST'])
def show_post(post_id):
comment_form = CommentForm()
requested_post = BlogPost.query.get(post_id)
if comment_form.validate_on_submit():
if current_user.is_authenticated:
new_comment = Comment(text=comment_form.text.data, comment_author=current_user, parent_post=requested_post)
db.session.add(new_comment)
db.session.commit()
else:
flash('You need to login or register to comment.', category='danger')
return redirect(url_for('login'))
return render_template("post.html", post=requested_post, form=comment_form)
@app.route("/about")
def about():
return render_template("about.html")
@app.route("/new-post", methods=['GET', 'POST'])
@admin_only
def add_new_post():
form = CreatePostForm()
if form.validate_on_submit():
new_post = BlogPost(
title=form.title.data,
subtitle=form.subtitle.data,
body=form.body.data,
img_url=form.img_url.data,
author=current_user,
date=date.today().strftime("%B %d, %Y")
)
db.session.add(new_post)
db.session.commit()
return redirect(url_for("get_all_posts"))
return render_template("make-post.html", form=form)
@app.route("/edit-post/<int:post_id>", methods=['GET', 'POST'])
@admin_only
def edit_post(post_id):
post = BlogPost.query.get(post_id)
edit_form = CreatePostForm(
title=post.title,
subtitle=post.subtitle,
img_url=post.img_url,
author=current_user,
body=post.body
)
if edit_form.validate_on_submit():
post.title = edit_form.title.data
post.subtitle = edit_form.subtitle.data
post.img_url = edit_form.img_url.data
post.body = edit_form.body.data
db.session.commit()
return redirect(url_for("show_post", post_id=post.id))
return render_template("make-post.html", form=edit_form)
@app.route("/delete/<int:post_id>")
@admin_only
def delete_post(post_id):
post_to_delete = BlogPost.query.get(post_id)
db.session.delete(post_to_delete)
db.session.commit()
return redirect(url_for('get_all_posts'))
@app.route('/contact', methods=['POST', 'GET'])
def contact():
if request.method == "POST":
name = request.form['name']
email = request.form['email']
phone = request.form['phone']
message = request.form['message']
send_email(name, email, phone, message)
return render_template('contact.html', msg_sent=True)
return render_template('contact.html', msg_sent=False)
def send_email(name, email, phone, message):
email_message = f"Subject:New Message\n\nName: {name}\nEmail: {email}\nPhone: {phone}\nMessage:{message}"
with smtplib.SMTP(host="smtp.mail.yahoo.com", port=587) as connection:
connection.starttls()
connection.login(user=OWN_EMAIL, password=OWN_PASSWORD)
connection.sendmail(from_addr=OWN_EMAIL, to_addrs=OWN_EMAIL, msg=email_message)
if __name__ == "__main__":
app.run(host='0.0.0.0', port=5000)
|
[
"pythoncoderhub54@yahoo.com"
] |
pythoncoderhub54@yahoo.com
|
8eeb58371d87b121f5bb1772c07014e7b3a2f4bc
|
cc09fb4e4272177a4c10d3385c921cf45b61849d
|
/Gráficos2.py
|
5f821c246e0c553b6aaf435765829032fe0198b9
|
[] |
no_license
|
felipelimapy/Graficos_com_Matplotlib
|
a7995fbddde66d9907d40995f1f3ac59ed9fec31
|
3ff3802cdef398ecd9d81e22ed95adf903228b7f
|
refs/heads/master
| 2020-07-04T07:52:13.894710
| 2019-08-14T19:08:32
| 2019-08-14T19:08:32
| 202,211,627
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 372
|
py
|
import matplotlib.pyplot as plt
municipios=['Manaus', 'Tefé','Maués','Coari','PF', 'Iranduba']
media_pH=[7.5, 8, 5.6 , 6, 9.8, 1.2]
plt.plot(municipios, media_pH, label='pontos', color="green", marker='o')
plt.xticks(municipios)
plt.ylabel("pH no solo")
plt.xlabel("média do pH do solo")
plt.title("pH no solo de Presidente Figueiredo\n")
plt.show()
|
[
"noreply@github.com"
] |
noreply@github.com
|
a1c16962e511343f6654c076de283096891c70f9
|
8e24e8bba2dd476f9fe612226d24891ef81429b7
|
/geeksforgeeks/python/hard/3_1.py
|
5e5a923d3f652d3bb692c335928a84af29e9c3c5
|
[] |
no_license
|
qmnguyenw/python_py4e
|
fb56c6dc91c49149031a11ca52c9037dc80d5dcf
|
84f37412bd43a3b357a17df9ff8811eba16bba6e
|
refs/heads/master
| 2023-06-01T07:58:13.996965
| 2021-06-15T08:39:26
| 2021-06-15T08:39:26
| 349,059,725
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,994
|
py
|
GUI to Shutdown, Restart and Logout from the PC using Python
In this article, we are going to write a python script to shut down or Restart
or Logout your system and bind it with GUI Application.
The **OS module** in Python provides functions for interacting with the
operating system. OS is an inbuilt library python.
**Syntax :**
> **For shutdown your system :** os.system(“shutdown /s /t 1”)
>
> **For restart your system :** os.system(“shutdown /r /t 1”)
>
>
>
>
>
>
>
> **For Logout your system :** os.system(“shutdown -l”)
**Implementation GUI Application using Tkinter:**
## Python3
__
__
__
__
__
__
__
# import modules
from tkinter import *
import os
# user define funtion
def shutdown():
return os.system("shutdown /s /t 1")
def restart():
return os.system("shutdown /r /t 1")
def logout():
return os.system("shutdown -l")
# tkinter object
master = Tk()
# background set to grey
master.configure(bg='light grey')
# creating a button using the widget
# Buttons that will call the submit function
Button(master, text="Shutdown",
command=shutdown).grid(row=0)
Button(master, text="Restart", command=restart).grid(row=1)
Button(master, text="Log out", command=logout).grid(row=2)
mainloop()
---
__
__
**Output:**

**Note:** _Please ensure that you save and close all the_ programs _before
running this code on the IDLE, as this program will immediately shutdown and
restart your computer._
Attention geek! Strengthen your foundations with the **Python Programming
Foundation** Course and learn the basics.
To begin with, your interview preparations Enhance your Data Structures
concepts with the **Python DS** Course.
My Personal Notes _arrow_drop_up_
Save
|
[
"qmnguyenw@gmail.com"
] |
qmnguyenw@gmail.com
|
c8bf965bce8cda9657261c0c2d513430c8ba4848
|
bda4b66c6b8ef0fb8cde7c2fab144970fe597d31
|
/asset_report/report/__init__.py
|
040892f9ddefb9f69bf7644fb607065a5293cd27
|
[] |
no_license
|
drishti-developer/Afras
|
0d9798a1982c77c49c4658d2bd43803f3caaa0c2
|
69f187b11357e05ef34b0d81eed606c61c6ad2d7
|
refs/heads/master
| 2019-01-02T08:36:05.133261
| 2014-02-24T11:51:35
| 2014-02-24T11:51:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 23
|
py
|
import asset_report_wiz
|
[
"vipin.tripathi@drishtitech.com"
] |
vipin.tripathi@drishtitech.com
|
28fb7ba8851425b473d645e4ded39fd630653ec6
|
1797576f7ebc6eea049fea3ff91831cb140afa35
|
/Assignments/Assignment-2/string/format.py
|
af66f156a97503df9f587cacc99211eda27b24e3
|
[] |
no_license
|
Ayushd70/OCF-Python
|
8dd44f9ec08509d9610a6d8310622354e88097c2
|
bea41d997056235051db9f54f66790f66d7d8a2a
|
refs/heads/master
| 2023-06-03T20:38:52.701002
| 2021-06-23T15:38:56
| 2021-06-23T15:39:46
| 367,636,185
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 47
|
py
|
S='hello {name}'
print(S.format(name="World"))
|
[
"ayushdubey70@gmail.com"
] |
ayushdubey70@gmail.com
|
2b8017d7766c5fd2ca521eb26c6729617083b685
|
a878325fd8fb8ef476cc894b1145d5cad9bebd3c
|
/prog04b.py
|
73cc0ca391c2106aabfb5eafe4a6039ae071131e
|
[] |
no_license
|
RestlessMystic/TamingthePython
|
5ffd5ba84c30a1a202aba47cf264f9edb81232b8
|
8e05d88eb979b4948adac75fd793ee3822a9c65e
|
refs/heads/master
| 2016-09-05T20:33:35.926453
| 2013-08-25T09:49:06
| 2013-08-25T09:49:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 178
|
py
|
#
# Taming the Python
# Govind Maheswaran
# Example Programs
# Prog 04b
# Program to demonstrate for loop
for i in range(1,5):
print i;
for i in range(1,10,2):
print i;
|
[
"govindmaheswaran@gmail.com"
] |
govindmaheswaran@gmail.com
|
b3ae4dd8d3b6d3f3f5f2d0f12474ab0ea469bd94
|
7ad19e854135977ee5b789d7c9bdd39d67ec9ea4
|
/members/amit/clf/audio_processing.py
|
571343202183ddc05c2774531c7e5fd1d3a26acd
|
[
"MIT"
] |
permissive
|
Leofltt/rg_sound_generation
|
1b4d522507bf06247247f3ef929c8d0b93015e61
|
8e79b4d9dce028def43284f80521a2ec61d0066c
|
refs/heads/main
| 2023-05-02T19:53:23.645982
| 2021-05-22T16:09:54
| 2021-05-22T16:09:54
| 369,842,561
| 0
| 0
|
MIT
| 2021-05-22T15:27:28
| 2021-05-22T15:27:27
| null |
UTF-8
|
Python
| false
| false
| 1,248
|
py
|
import librosa
import numpy as np
from typing import Dict
def get_mel_spectrogram(audio: np.ndarray, params: Dict) -> np.ndarray:
mel_spec = librosa.feature.melspectrogram(
audio,
sr=params.get("sample_rate"),
n_fft=params.get("n_fft"),
hop_length=params.get("hop_len"),
n_mels=params.get("n_mels")
)
return librosa.power_to_db(mel_spec)
def get_hpr(audio: np.ndarray, params: Dict) -> (np.ndarray, np.ndarray, np.ndarray):
D = librosa.stft(
audio,
n_fft=params.get("n_fft"),
hop_length=params.get("hop_len")
)
H, P = librosa.decompose.hpss(D)
return H, P, D - (H + P)
def get_features(file_path: str, params: Dict):
audio, _ = librosa.load(file_path, sr=params.get("sample_rate"), mono=True)
audio = np.squeeze(audio)[:params.get("sample_rate") * params.get("clip_audio_at")]
h, p, r = get_hpr(audio, params)
h, p, r = np.abs(h).mean(axis=-1), np.abs(p).mean(axis=-1), np.abs(r).mean(axis=-1)
dim = h.shape[0]
hpss = np.concatenate([h, p, r], axis=-1)
hpss = np.reshape(hpss, (dim * 3, 1))
spec = get_mel_spectrogram(audio, params)
spec = np.clip(spec, params.get("clip_at"), np.max(spec))
return spec, hpss
|
[
"amit.yadav.iitr@gmail.com"
] |
amit.yadav.iitr@gmail.com
|
5bdcae03801bc9263730f63678c10f2052be98f5
|
5c8139f1e57e06c7eaf603bd8fe74d9f22620513
|
/PartB/py全排列4.py
|
b1e46e018cad4170fe7d76313c34805ed586b0ef
|
[] |
no_license
|
madeibao/PythonAlgorithm
|
c8a11d298617d1abb12a72461665583c6a44f9d2
|
b4c8a75e724a674812b8a38c0202485776445d89
|
refs/heads/master
| 2023-04-03T07:18:49.842063
| 2021-04-11T12:02:40
| 2021-04-11T12:02:40
| 325,269,130
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,133
|
py
|
# 列表的全排列的实现。
# 全排列算法
class Solution(object):
def permutations(self, nums):
if nums is None:
return []
res = []
def helper(start):
if start == len(nums):
res.append(nums[:])
for i in range(start, len(nums)):
nums[i], nums[start] = nums[start], nums[i]
helper(start + 1)
nums[i], nums[start] = nums[start], nums[i]
helper(0)
return res
if __name__ == "__main__":
s = Solution()
list2 = [1, 2, 3]
print(s.permutations(list2))
# 组合算法的实现
from typing import List
class Solution:
def subsets(self, nums: List[int]) -> List[List[int]]:
if not nums:
return []
res = []
N = len(nums)
def helper(idx, temp_list):
res.append(temp_list)
for i in range(idx, N):
helper(i + 1, temp_list + [nums[i]])
helper(0, [])
return res
if __name__ == "__main__":
s = Solution()
list2 = [1, 2, 3,]
print(s.subsets(list2))
|
[
"2901429479@qq.com"
] |
2901429479@qq.com
|
54cf93179518b8b172fbb7bd732db4e3b0d2fd34
|
5094728779e2ac3dcfc9748d759987e9bc91ed53
|
/t_06_exception.py
|
35a24391e4f6b4ca2cbd12b881196e9f0a7c470f
|
[] |
no_license
|
cegraris/pythontutorial
|
cebe6f427d759fae2899f37dadaa38c6771a5455
|
0a7220537af6d3af93c105144ba4bdfe35013aa2
|
refs/heads/main
| 2023-05-12T11:55:38.867108
| 2021-06-02T17:16:02
| 2021-06-02T17:16:02
| 365,843,134
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 206
|
py
|
# try ==================================
import logging
try:
...
except ZeroDivisionError as e:
logging.exception(e)
except ValueError as e:
logging.exception(e)
else:
...
finally:
...
|
[
"wusthink@gmail.com"
] |
wusthink@gmail.com
|
08320cf0e22cd96627531f4a974ef96a4213a7f1
|
086220d056fe15e8a97ed4b2392c41a5e1582857
|
/codes/models/modules/thops.py
|
777d40ab500302ffc10fa4e0459c88f21585e010
|
[
"Apache-2.0"
] |
permissive
|
jprost76/HCFlow
|
2048931acf7698cb2fff20000160dbc404bb7f2b
|
10deed6f8f719e72cf1c3cad486198b8a506c805
|
refs/heads/main
| 2023-08-07T12:18:37.681269
| 2021-09-29T10:33:05
| 2021-09-29T10:33:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,283
|
py
|
import torch
def sum(tensor, dim=None, keepdim=False):
if dim is None:
# sum up all dim
return torch.sum(tensor)
else:
if isinstance(dim, int):
dim = [dim]
dim = sorted(dim)
for d in dim:
tensor = tensor.sum(dim=d, keepdim=True)
if not keepdim:
for i, d in enumerate(dim):
tensor.squeeze_(d-i)
return tensor
def mean(tensor, dim=None, keepdim=False):
if dim is None:
# mean all dim
return torch.mean(tensor)
else:
if isinstance(dim, int):
dim = [dim]
dim = sorted(dim)
for d in dim:
tensor = tensor.mean(dim=d, keepdim=True)
if not keepdim:
for i, d in enumerate(dim):
tensor.squeeze_(d-i)
return tensor
def split_feature(tensor, type="split"):
"""
type = ["split", "cross"]
"""
C = tensor.size(1)
if type == "split":
return tensor[:, :C // 2, ...], tensor[:, C // 2:, ...]
elif type == "cross":
return tensor[:, 0::2, ...], tensor[:, 1::2, ...]
def cat_feature(tensor_a, tensor_b):
return torch.cat((tensor_a, tensor_b), dim=1)
def pixels(tensor):
return int(tensor.size(2) * tensor.size(3))
|
[
"noreply@github.com"
] |
noreply@github.com
|
a0e61f5c74df9f18379f59f87bef20d5c12f7081
|
63271db556e4de0b2b8c3782ca6e083290e9c01d
|
/SHgold/SHgold/mian.py
|
64b3a857e77336cba39527fedfaf785720e05f51
|
[] |
no_license
|
nuoyi-618/scrapy
|
09068b37b77a3528f229c8ba368d3c652f651401
|
e40f486a6fddbebd27ee1de97c7ff5a4c550f8ce
|
refs/heads/master
| 2020-05-03T14:59:32.492113
| 2019-03-31T13:49:32
| 2019-03-31T13:49:32
| 178,693,656
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 83
|
py
|
from scrapy import cmdline
cmdline.execute('scrapy crawl SHgold_spider'.split())
|
[
"noreply@github.com"
] |
noreply@github.com
|
f3c9e9f41e6da7340eaa39fa9d686a0b0fa3de25
|
012f882a6b354f58ce16d2a58f727eadcfe6e47e
|
/MainProgram.py
|
9b81f8da6ce8a0f96e5551bbdb7858a8ffd7792a
|
[] |
no_license
|
MuhammadAhmedKhalid/Projects
|
93e0b1281067f6f6cee613479f9f5d22f8efa00a
|
dd355e67e3e774008fcd8f698d04c4ead62265b1
|
refs/heads/master
| 2021-02-09T01:34:32.634869
| 2020-03-01T21:09:23
| 2020-03-01T21:09:23
| 244,223,450
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,802
|
py
|
import sqlite3
from sqlite3 import Error
def login():
uname = 'admin123'
pword = '123456'
for i in range(0,3):
input1 = input("Enter Username: ")
input2 = input("Enter Password: ")
if (input1 == uname) & (input2 == pword):
selectOptions()
break;
else:
if (i == 0) | (i == 1):
print("Please Try again..")
else:
print("Sorry please contact your admin.")
def selectOptions():
exit = False;
while(exit == False):
print("Please select option: ")
print("1. Create new account")
print("2. Update account")
print("3. Delete account")
print("4. View account")
print("5. View accounts")
print("6. Deposit account")
print("7. Withdraw money")
print("8. Exit")
select1 = input("Enter your choice number: ")
if (select1 == "1"):
createNewAcc()
elif (select1 == "2"):
updateAcc()
elif (select1 == "3"):
delAcc()
elif (select1 == "4"):
viewAcc()
elif (select1 == "5"):
viewAccs()
elif (select1 == "6"):
depositAcc()
elif (select1 == "7"):
withdrawAcc()
else:
exit=True;
print("Thankyou for using our system.")
def createNewAcc():
customer = ["Default","Default","Default","Default","Default","Default","Default"]
customer[0] = input("Enter your First Name: ")
customer[1] = input("Enter your Last Name: ")
customer[2] = input("Enter your Address: ")
customer[3] = input("Enter your City: ")
customer[4] = input("Enter Phone number: ")
customer[5] = input("Your Account Balance: ")
customer[6] = input("Enter PIN: ")
createDB()
insertCustomer(customer)
def updateAcc():
uid = int(input("Please enter your ID: "))
exist = checkCustomer(uid)
if (exist == True):
correct = checkPIN(uid)
if (correct == True):
print("What do you want to update?")
print("1. Your Address")
print("2. Your City")
print("3. Your Phone Number")
print("4. Your PIN")
select2 = input("Enter your choice number: ")
if (select2 == "1"):
col = "Address"
elif (select2 == "2"):
col = "City"
elif (select2 == "3"):
col = "PhoneNumber"
elif (select2 == "4"):
col = "PIN"
else:
return
temptext = "Enter " + col + ": "
value = input(temptext)
updateCustomer(col, value, uid)
print("Your account updated successfully.")
else:
return
else:
print("ID not found")
return
def delAcc():
uid1 = int(input("Please enter your ID: "))
exist = checkCustomer(uid1)
if (exist == True) :
correct = checkPIN(uid1)
if (correct == True):
delCustomer(uid1)
print("Your account deleted successfully.")
else:
return
else:
print("ID not found")
return
def viewAcc():
uid4 = int(input("Please enter your ID: "))
exist = checkCustomer(uid4)
if (exist == True):
correct = checkPIN(uid4)
if (correct == True):
con = sqlite3.connect('BankCustomerPortal.db')
cursorObj = con.cursor()
cursorObj.execute("SELECT * FROM Customer_Info where ID = " + str(uid4))
acc = cursorObj.fetchone()
print("First Name\tLast Name\tAddress\tCity\tPhoneNumber\tAccountBalance")
print(acc[1] + "\t" + acc[2] + "\t" + acc[3] + "\t" + acc[4] + "\t" + str(acc[5]) + "\t" + str(acc[6]))
else:
return
else:
print("ID not found")
return
def viewAccs():
con = sqlite3.connect('BankCustomerPortal.db')
cursorObj = con.cursor()
cursorObj.execute("SELECT * FROM Customer_Info")
accs = cursorObj.fetchall()
for acc in accs:
print("First Name\tLast Name\tAddress\tCity\tPhoneNumber\tAccountBalance")
print(acc[1] + "\t" + acc[2] + "\t" + acc[3] + "\t" + acc[4] + "\t" + str(acc[5]) + "\t" + str(acc[6]))
def withdrawAcc():
uid3 = int(input("Please enter your ID: "))
exist = checkCustomer(uid3)
if (exist == True):
correct = checkPIN(uid3)
if (correct == True):
subtracting = int(input("How much money do you want to withdraw? "))
withdrawAmount(uid3, subtracting)
print("You successfully withdraw your amount Thank you.")
else:
return
else:
print("ID not found")
return
def depositAcc():
uid2 = int(input("Please enter your ID: "))
exist = checkCustomer(uid2)
if (exist == True):
correct = checkPIN(uid2)
if (correct == True):
adding = int(input("How much money do you want to deposit? "))
depositAmount(uid2, adding)
print("Your amount successfully deposited Thank you.")
else:
return
else:
print("ID not found")
return
def createDB():
con = sqlite3.connect("BankCustomerPortal.db")
cursorObj = con.cursor()
cursorObj.execute(''' SELECT count(name) FROM sqlite_master WHERE type='table' AND name='Customer_Info' ''')
if (cursorObj.fetchone()[0] == 1):
print("Table already exist.")
else:
try:
con = sqlite3.connect('BankCustomerPortal.db')
cursorObj.execute(
"CREATE TABLE Customer_Info(ID integer PRIMARY KEY AUTOINCREMENT, FirstName text, LastName text, Address text, City text, PhoneNumber text, AccountBalance text, PIN text)")
con.commit()
except Error:
print(Error)
def insertCustomer(customer):
con = sqlite3.connect('BankCustomerPortal.db')
cursorObj = con.cursor()
cursorObj.execute("INSERT INTO Customer_Info (FirstName, LastName, Address, City, PhoneNumber, AccountBalance, PIN) VALUES(?, ?, ?, ?, ?, ?, ?)", customer)
cursorObj.execute('SELECT ID FROM Customer_Info WHERE id=(SELECT max(id) FROM Customer_Info)')
cid = cursorObj.fetchone()[0]
print("Your ID is ", cid, " . Remember your ID next time.")
con.commit()
def updateCustomer(columnName, Value, uid):
con = sqlite3.connect('BankCustomerPortal.db')
query = "UPDATE Customer_Info SET " + columnName + "=\"" + Value + "\" where id = " + str(uid)
cursorObj = con.cursor()
cursorObj.execute(query)
con.commit()
def delCustomer(uid1):
con = sqlite3.connect('BankCustomerPortal.db')
query1 = "DELETE FROM Customer_Info where id = " + str(uid1)
cursorObj = con.cursor()
cursorObj.execute(query1)
con.commit()
def checkCustomer(id):
con = sqlite3.connect('BankCustomerPortal.db')
query2 = "SELECT count(ID) FROM Customer_Info where ID = " + str(id)
cursorObj = con.cursor()
cursorObj.execute(query2)
if (cursorObj.fetchone()[0] == 1):
return True
else:
return False
def depositAmount(uid2, adding):
con = sqlite3.connect('BankCustomerPortal.db')
cursorObj = con.cursor()
cursorObj.execute("SELECT AccountBalance FROM Customer_Info where id = " + str(uid2))
accBalance = cursorObj.fetchone()[0]
accBalance = int(accBalance) + adding
query3 = "UPDATE Customer_Info SET AccountBalance=\""+ str(accBalance) +"\" where id = " + str(uid2)
cursorObj.execute(query3)
con.commit()
def withdrawAmount(uid3, subtracting):
con = sqlite3.connect('BankCustomerPortal.db')
cursorObj = con.cursor()
cursorObj.execute("SELECT AccountBalance FROM Customer_Info where id = " + str(uid3))
accBalance = cursorObj.fetchone()[0]
accBalance = int(accBalance) - subtracting
query4 = "UPDATE Customer_Info SET AccountBalance=\"" + str(accBalance) + "\" where id = " + str(uid3)
cursorObj.execute(query4)
con.commit()
def checkPIN(uID):
for i in range(0, 3):
pWord = input("Please enter your PIN code: ")
con = sqlite3.connect('BankCustomerPortal.db')
cursorObj = con.cursor()
cursorObj.execute("SELECT PIN FROM Customer_Info where id = " + str(uID))
check = cursorObj.fetchone()[0]
if (pWord == check):
return True
else:
print("Incorrect PIN.")
return False
login()
|
[
"noreply@github.com"
] |
noreply@github.com
|
b4b694bd0706b269f741b4e7b90bca506b194cc9
|
885c89c56923565117b6244afa6c16664e25094f
|
/vspk/v5_0/nugateway.py
|
3158028e3193886ae359bc573db55e0e6968fa7a
|
[
"BSD-3-Clause"
] |
permissive
|
ikbalcam/vspk-python
|
0017f5c7c4f9aaca604fb4da42884eddc497ee00
|
1c6d28c8f8f7bcadbd1722cdc3046b01dbf1d9e8
|
refs/heads/master
| 2021-10-01T16:14:00.380613
| 2017-11-02T21:43:41
| 2017-11-02T21:43:41
| 115,129,333
| 0
| 0
| null | 2017-12-22T15:52:11
| 2017-12-22T15:52:11
| null |
UTF-8
|
Python
| false
| false
| 19,056
|
py
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2015, Alcatel-Lucent Inc
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from .fetchers import NUPATNATPoolsFetcher
from .fetchers import NUPermissionsFetcher
from .fetchers import NUWANServicesFetcher
from .fetchers import NUMetadatasFetcher
from .fetchers import NUAlarmsFetcher
from .fetchers import NUGlobalMetadatasFetcher
from .fetchers import NUEnterprisePermissionsFetcher
from .fetchers import NUJobsFetcher
from .fetchers import NUPortsFetcher
from .fetchers import NUEventLogsFetcher
from bambou import NURESTObject
class NUGateway(NURESTObject):
""" Represents a Gateway in the VSD
Notes:
Represents Gateway object.
"""
__rest_name__ = "gateway"
__resource_name__ = "gateways"
## Constants
CONST_PERSONALITY_HARDWARE_VTEP = "HARDWARE_VTEP"
CONST_PERSONALITY_VSA = "VSA"
CONST_PERMITTED_ACTION_USE = "USE"
CONST_PERSONALITY_VSG = "VSG"
CONST_PERMITTED_ACTION_READ = "READ"
CONST_PERSONALITY_OTHER = "OTHER"
CONST_PERSONALITY_NSG = "NSG"
CONST_PERSONALITY_VRSB = "VRSB"
CONST_PERMITTED_ACTION_ALL = "ALL"
CONST_PERMITTED_ACTION_DEPLOY = "DEPLOY"
CONST_PERMITTED_ACTION_EXTEND = "EXTEND"
CONST_PERSONALITY_NUAGE_210_WBX_48_S = "NUAGE_210_WBX_48_S"
CONST_PERSONALITY_NUAGE_210_WBX_32_Q = "NUAGE_210_WBX_32_Q"
CONST_PERMITTED_ACTION_INSTANTIATE = "INSTANTIATE"
CONST_ENTITY_SCOPE_ENTERPRISE = "ENTERPRISE"
CONST_PERSONALITY_DC7X50 = "DC7X50"
CONST_ENTITY_SCOPE_GLOBAL = "GLOBAL"
CONST_PERSONALITY_VRSG = "VRSG"
def __init__(self, **kwargs):
""" Initializes a Gateway instance
Notes:
You can specify all parameters while calling this methods.
A special argument named `data` will enable you to load the
object from a Python dictionary
Examples:
>>> gateway = NUGateway(id=u'xxxx-xxx-xxx-xxx', name=u'Gateway')
>>> gateway = NUGateway(data=my_dict)
"""
super(NUGateway, self).__init__()
# Read/Write Attributes
self._name = None
self._last_updated_by = None
self._redundancy_group_id = None
self._peer = None
self._template_id = None
self._pending = None
self._permitted_action = None
self._personality = None
self._description = None
self._enterprise_id = None
self._entity_scope = None
self._use_gateway_vlanvnid = None
self._vtep = None
self._auto_disc_gateway_id = None
self._external_id = None
self._system_id = None
self.expose_attribute(local_name="name", remote_name="name", attribute_type=str, is_required=True, is_unique=False)
self.expose_attribute(local_name="last_updated_by", remote_name="lastUpdatedBy", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="redundancy_group_id", remote_name="redundancyGroupID", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="peer", remote_name="peer", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="template_id", remote_name="templateID", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="pending", remote_name="pending", attribute_type=bool, is_required=False, is_unique=False)
self.expose_attribute(local_name="permitted_action", remote_name="permittedAction", attribute_type=str, is_required=False, is_unique=False, choices=[u'ALL', u'DEPLOY', u'EXTEND', u'INSTANTIATE', u'READ', u'USE'])
self.expose_attribute(local_name="personality", remote_name="personality", attribute_type=str, is_required=False, is_unique=False, choices=[u'DC7X50', u'HARDWARE_VTEP', u'NSG', u'NUAGE_210_WBX_32_Q', u'NUAGE_210_WBX_48_S', u'OTHER', u'VRSB', u'VRSG', u'VSA', u'VSG'])
self.expose_attribute(local_name="description", remote_name="description", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="enterprise_id", remote_name="enterpriseID", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="entity_scope", remote_name="entityScope", attribute_type=str, is_required=False, is_unique=False, choices=[u'ENTERPRISE', u'GLOBAL'])
self.expose_attribute(local_name="use_gateway_vlanvnid", remote_name="useGatewayVLANVNID", attribute_type=bool, is_required=False, is_unique=False)
self.expose_attribute(local_name="vtep", remote_name="vtep", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="auto_disc_gateway_id", remote_name="autoDiscGatewayID", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="external_id", remote_name="externalID", attribute_type=str, is_required=False, is_unique=True)
self.expose_attribute(local_name="system_id", remote_name="systemID", attribute_type=str, is_required=False, is_unique=False)
# Fetchers
self.patnat_pools = NUPATNATPoolsFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.permissions = NUPermissionsFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.wan_services = NUWANServicesFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.metadatas = NUMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.alarms = NUAlarmsFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.global_metadatas = NUGlobalMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.enterprise_permissions = NUEnterprisePermissionsFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.jobs = NUJobsFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.ports = NUPortsFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.event_logs = NUEventLogsFetcher.fetcher_with_object(parent_object=self, relationship="child")
self._compute_args(**kwargs)
# Properties
@property
def name(self):
""" Get name value.
Notes:
Name of the Gateway
"""
return self._name
@name.setter
def name(self, value):
""" Set name value.
Notes:
Name of the Gateway
"""
self._name = value
@property
def last_updated_by(self):
""" Get last_updated_by value.
Notes:
ID of the user who last updated the object.
This attribute is named `lastUpdatedBy` in VSD API.
"""
return self._last_updated_by
@last_updated_by.setter
def last_updated_by(self, value):
""" Set last_updated_by value.
Notes:
ID of the user who last updated the object.
This attribute is named `lastUpdatedBy` in VSD API.
"""
self._last_updated_by = value
@property
def redundancy_group_id(self):
""" Get redundancy_group_id value.
Notes:
The Redundancy Gateway Group associated with this Gateway Instance. This is a read only attribute
This attribute is named `redundancyGroupID` in VSD API.
"""
return self._redundancy_group_id
@redundancy_group_id.setter
def redundancy_group_id(self, value):
""" Set redundancy_group_id value.
Notes:
The Redundancy Gateway Group associated with this Gateway Instance. This is a read only attribute
This attribute is named `redundancyGroupID` in VSD API.
"""
self._redundancy_group_id = value
@property
def peer(self):
""" Get peer value.
Notes:
The System ID of the peer gateway associated with this Gateway instance when it is discovered by the network manager (VSD) as being redundant.
"""
return self._peer
@peer.setter
def peer(self, value):
""" Set peer value.
Notes:
The System ID of the peer gateway associated with this Gateway instance when it is discovered by the network manager (VSD) as being redundant.
"""
self._peer = value
@property
def template_id(self):
""" Get template_id value.
Notes:
The ID of the template that this Gateway was created from. This should be set when instantiating a Gateway
This attribute is named `templateID` in VSD API.
"""
return self._template_id
@template_id.setter
def template_id(self, value):
""" Set template_id value.
Notes:
The ID of the template that this Gateway was created from. This should be set when instantiating a Gateway
This attribute is named `templateID` in VSD API.
"""
self._template_id = value
@property
def pending(self):
""" Get pending value.
Notes:
Indicates that this gateway is pending state or state. When in pending state it cannot be modified from REST.
"""
return self._pending
@pending.setter
def pending(self, value):
""" Set pending value.
Notes:
Indicates that this gateway is pending state or state. When in pending state it cannot be modified from REST.
"""
self._pending = value
@property
def permitted_action(self):
""" Get permitted_action value.
Notes:
The permitted action to USE/EXTEND this Gateway.
This attribute is named `permittedAction` in VSD API.
"""
return self._permitted_action
@permitted_action.setter
def permitted_action(self, value):
""" Set permitted_action value.
Notes:
The permitted action to USE/EXTEND this Gateway.
This attribute is named `permittedAction` in VSD API.
"""
self._permitted_action = value
@property
def personality(self):
""" Get personality value.
Notes:
Personality of the Gateway, cannot be changed after creation.
"""
return self._personality
@personality.setter
def personality(self, value):
""" Set personality value.
Notes:
Personality of the Gateway, cannot be changed after creation.
"""
self._personality = value
@property
def description(self):
""" Get description value.
Notes:
A description of the Gateway
"""
return self._description
@description.setter
def description(self, value):
""" Set description value.
Notes:
A description of the Gateway
"""
self._description = value
@property
def enterprise_id(self):
""" Get enterprise_id value.
Notes:
The enterprise associated with this Gateway. This is a read only attribute
This attribute is named `enterpriseID` in VSD API.
"""
return self._enterprise_id
@enterprise_id.setter
def enterprise_id(self, value):
""" Set enterprise_id value.
Notes:
The enterprise associated with this Gateway. This is a read only attribute
This attribute is named `enterpriseID` in VSD API.
"""
self._enterprise_id = value
@property
def entity_scope(self):
""" Get entity_scope value.
Notes:
Specify if scope of entity is Data center or Enterprise level
This attribute is named `entityScope` in VSD API.
"""
return self._entity_scope
@entity_scope.setter
def entity_scope(self, value):
""" Set entity_scope value.
Notes:
Specify if scope of entity is Data center or Enterprise level
This attribute is named `entityScope` in VSD API.
"""
self._entity_scope = value
@property
def use_gateway_vlanvnid(self):
""" Get use_gateway_vlanvnid value.
Notes:
When set, VLAN-VNID mapping must be unique for all the vports of the gateway
This attribute is named `useGatewayVLANVNID` in VSD API.
"""
return self._use_gateway_vlanvnid
@use_gateway_vlanvnid.setter
def use_gateway_vlanvnid(self, value):
""" Set use_gateway_vlanvnid value.
Notes:
When set, VLAN-VNID mapping must be unique for all the vports of the gateway
This attribute is named `useGatewayVLANVNID` in VSD API.
"""
self._use_gateway_vlanvnid = value
@property
def vtep(self):
""" Get vtep value.
Notes:
Represent the system ID or the Virtual IP of a service used by a Gateway (VSG for now) to establish a tunnel with a remote VSG or hypervisor. The format of this field is consistent with an IP address.
"""
return self._vtep
@vtep.setter
def vtep(self, value):
""" Set vtep value.
Notes:
Represent the system ID or the Virtual IP of a service used by a Gateway (VSG for now) to establish a tunnel with a remote VSG or hypervisor. The format of this field is consistent with an IP address.
"""
self._vtep = value
@property
def auto_disc_gateway_id(self):
""" Get auto_disc_gateway_id value.
Notes:
The Auto Discovered Gateway associated with this Gateway Instance
This attribute is named `autoDiscGatewayID` in VSD API.
"""
return self._auto_disc_gateway_id
@auto_disc_gateway_id.setter
def auto_disc_gateway_id(self, value):
""" Set auto_disc_gateway_id value.
Notes:
The Auto Discovered Gateway associated with this Gateway Instance
This attribute is named `autoDiscGatewayID` in VSD API.
"""
self._auto_disc_gateway_id = value
@property
def external_id(self):
""" Get external_id value.
Notes:
External object ID. Used for integration with third party systems
This attribute is named `externalID` in VSD API.
"""
return self._external_id
@external_id.setter
def external_id(self, value):
""" Set external_id value.
Notes:
External object ID. Used for integration with third party systems
This attribute is named `externalID` in VSD API.
"""
self._external_id = value
@property
def system_id(self):
""" Get system_id value.
Notes:
Identifier of the Gateway, cannot be modified after creation
This attribute is named `systemID` in VSD API.
"""
return self._system_id
@system_id.setter
def system_id(self, value):
""" Set system_id value.
Notes:
Identifier of the Gateway, cannot be modified after creation
This attribute is named `systemID` in VSD API.
"""
self._system_id = value
## Custom methods
def is_template(self):
""" Verify that the object is a template
Returns:
(bool): True if the object is a template
"""
return False
def is_from_template(self):
""" Verify if the object has been instantiated from a template
Note:
The object has to be fetched. Otherwise, it does not
have information from its parent
Returns:
(bool): True if the object is a template
"""
return self.template_id
|
[
"corentin.henry@nokia.com"
] |
corentin.henry@nokia.com
|
3f1bc7c247ce6f9db5bd435471a06ee8d3eda970
|
4bf6cd0269cde7827219bfad51d6ce65074bb7c1
|
/meta/hooks/pre-commit.py
|
32e3e23e4d1e7419b0e9500624cde92873f7a780
|
[
"CC-BY-4.0",
"MIT"
] |
permissive
|
RichysHub/Squimoji
|
b62e3314e2a0365395c7f0b203d72989f6a04ca6
|
73e231b147aebd4fbd63604ba212b87e726b54e8
|
refs/heads/master
| 2021-11-28T05:34:08.957689
| 2021-11-26T21:05:54
| 2021-11-26T21:05:54
| 168,065,136
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,480
|
py
|
import json
import re
import subprocess
import sys
inkscape_executable = "C:/Program Files/Inkscape/inkscape.exe"
print("Checking staged files for color compliance")
try:
# Get files in staging area:
commit_text = subprocess.check_output(["git", "status", "--porcelain", "-uno"],
stderr=subprocess.STDOUT).decode("utf-8")
except subprocess.CalledProcessError:
print("Error calling git status in pre-commit hook")
sys.exit(12)
# making note of the commit changes, for processing in the post-commit
with open(".commit", 'w') as commit_changes:
commit_changes.write(commit_text)
with open("./meta/palette.json", "r") as palette:
valid_colors = set(json.load(palette))
with open("./meta/files_by_color.json", "r") as files_by_color_input:
files_by_color = json.load(files_by_color_input)
color_regex = "#[\da-fA-f]+"
scarcity_cuttoff = 10
def validate_colors(svg_filename):
acceptable_colors = True
with open(svg_filename, "r") as svg:
data = svg.read()
colors = re.findall(color_regex, data)
for color in colors:
if color not in valid_colors:
print("Invalid color", color, "present in", svg_filename)
acceptable_colors = False
else:
number_occurances = len(files_by_color[color])
if number_occurances <= scarcity_cuttoff:
print("Color", color, "present in", svg_filename, "appears in only", number_occurances, "emoji.")
acceptable_colors = False
return acceptable_colors
file_list = commit_text.splitlines()
validations = []
# Check all files:
for file_entry in file_list:
# format of lines is 'XY filename'
# X is status of the index
index_status = file_entry[0]
filename = file_entry[3:]
# only interested in svgs
if filename.endswith(".svg") and filename.startswith("svg/"):
if index_status not in ['R', 'D', 'C']: # Renames, copies and deletes don't need checking
validations.append(validate_colors(filename))
elif filename.endswith(".png") and filename.startswith("72x72/"):
print('Changed rendered image "{}". This change subject to be overridden by post-commit.'.format(filename))
if all(validations):
# Everything seams to be okay:
print("No unexpected colors found.")
sys.exit(0)
else:
print("Commit aborted, fix colors and recommit.")
sys.exit(1)
|
[
"richardjspencer93@gmail.com"
] |
richardjspencer93@gmail.com
|
73baf965eeb3006306eb297aabdd0451da7cb884
|
7dc9a092325f4ca03d2186944105214735de41ea
|
/craftaws/__init__.py
|
be560699661a0b5367c9c29de91c6490f841e5d8
|
[] |
no_license
|
ThinkinGim/stepfunction-demo
|
9b0abde8f06938137d11a8695264acd7d4297826
|
f706faeb479d2b829c128a39c4a4cc5b4ac18cbb
|
refs/heads/main
| 2023-05-01T15:36:12.216764
| 2021-05-11T13:11:05
| 2021-05-11T21:47:43
| 366,378,841
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,809
|
py
|
from aws_cdk import (
core,
aws_ec2,
aws_iam as iam,
aws_lambda as lambda_,
aws_stepfunctions as sfn,
aws_stepfunctions_tasks as tasks,
)
class network(core.Stack):
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
super().__init__(scope, id, **kwargs)
self.vpc = aws_ec2.Vpc(self, "demo-stepfunctions",
cidr="10.100.0.0/16",
max_azs=2,
nat_gateways=0,
subnet_configuration=[
aws_ec2.SubnetConfiguration(
name='demo-stepfunctions',
subnet_type=aws_ec2.SubnetType.ISOLATED,
cidr_mask=24
)
]
)
lambda_role = iam.Role(self, 'demo-lambda-role',
assumed_by=iam.ServicePrincipal("lambda.amazonaws.com")
)
lambda_role.add_managed_policy(
iam.ManagedPolicy.from_aws_managed_policy_name('service-role/AWSLambdaENIManagementAccess')
)
lambda_role.add_managed_policy(
iam.ManagedPolicy.from_aws_managed_policy_name('service-role/AWSLambdaBasicExecutionRole')
)
fn_submit = lambda_.Function(self, 'demo-sfn-submit',
function_name='demo-sfn-submit',
handler='handler.do',
runtime=lambda_.Runtime.PYTHON_3_8,
code=lambda_.Code.asset('./craftaws/func_submit'),
role=lambda_role,
timeout=core.Duration.seconds(900),
allow_public_subnet=False,
vpc=self.vpc,
vpc_subnets=aws_ec2.SubnetSelection(subnet_type=aws_ec2.SubnetType.ISOLATED),
environment={}
)
fn_job_1 = lambda_.Function(self, 'demo-sfn-job1',
function_name='demo-sfn-job1',
handler='handler.do',
runtime=lambda_.Runtime.PYTHON_3_8,
code=lambda_.Code.asset('./craftaws/func_job_1'),
role=lambda_role,
timeout=core.Duration.seconds(900),
allow_public_subnet=False,
vpc=self.vpc,
vpc_subnets=aws_ec2.SubnetSelection(subnet_type=aws_ec2.SubnetType.ISOLATED),
environment={}
)
fn_job_2 = lambda_.Function(self, 'demo-sfn-job2',
function_name='demo-sfn-job2',
handler='handler.do',
runtime=lambda_.Runtime.PYTHON_3_8,
code=lambda_.Code.asset('./craftaws/func_job_2'),
role=lambda_role,
timeout=core.Duration.seconds(900),
allow_public_subnet=False,
vpc=self.vpc,
vpc_subnets=aws_ec2.SubnetSelection(subnet_type=aws_ec2.SubnetType.ISOLATED),
environment={}
)
submit_job = tasks.LambdaInvoke(self, "Submit Job",
lambda_function=fn_submit,
# Lambda's result is in the attribute `Payload`
output_path="$.Payload"
)
step_1_job = tasks.LambdaInvoke(self, "Job_1",
lambda_function=fn_job_1,
# Lambda's result is in the attribute `Payload`
output_path="$.Payload"
)
wait_x = sfn.Wait(self, "Wait X Seconds",
time=sfn.WaitTime.duration(core.Duration.seconds(60))
)
step_2_job = tasks.LambdaInvoke(self, "Job_2",
lambda_function=fn_job_1,
# Lambda's result is in the attribute `Payload`
output_path="$.Payload"
)
job_succeed = sfn.Succeed(self, "Job Succeed",
comment="AWS Batch Job Succeed"
)
definition = submit_job.next(step_1_job).next(wait_x).next(step_2_job).next(job_succeed)
sfn.StateMachine(self, "StateMachine",
definition=definition,
timeout=core.Duration.minutes(5)
)
|
[
"noreply@github.com"
] |
noreply@github.com
|
25752d54216e1107ab2fd434159883fd69ca251e
|
a9eb12ef11b671d9a3e3cb9562fd26928d3e1815
|
/stock_trade/GetLongData.py
|
fc1a45f0ae4e0b745ca4c72a4521eeab4ecf425f
|
[] |
no_license
|
longfeiw07/stock
|
f7c92550805896b776254cd9be671428b455f749
|
5e1a55bd6f360054cb3d589935f106dffb74b929
|
refs/heads/master
| 2020-09-21T21:37:22.054449
| 2019-11-07T15:26:46
| 2019-11-07T15:26:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,922
|
py
|
import os
from selenium import webdriver
from selenium.webdriver.common.action_chains import ActionChains
import time
import re
import pandas as pd
from selenium.common.exceptions import WebDriverException
def wheel_element(element, deltaY = 120, offsetX = 0, offsetY = 0):
error = element._parent.execute_script("""
var element = arguments[0];
var deltaY = arguments[1];
var box = element.getBoundingClientRect();
var clientX = box.left + (arguments[2] || box.width / 2);
var clientY = box.top + (arguments[3] || box.height / 2);
var target = element.ownerDocument.elementFromPoint(clientX, clientY);
for (var e = target; e; e = e.parentElement) {
if (e === element) {
target.dispatchEvent(new MouseEvent('mouseover', {view: window, bubbles: true, cancelable: true, clientX: clientX, clientY: clientY}));
target.dispatchEvent(new MouseEvent('mousemove', {view: window, bubbles: true, cancelable: true, clientX: clientX, clientY: clientY}));
target.dispatchEvent(new WheelEvent('wheel', {view: window, bubbles: true, cancelable: true, clientX: clientX, clientY: clientY, deltaY: deltaY}));
return;
}
}
return "Element is not interactable";
""", element, deltaY, offsetX, offsetY)
if error:
raise WebDriverException(error)
def getDayOrWeekData(driver):
stockData={}
#定位k线
kLine=driver.find_element_by_xpath('//*[@id="testcanvas"]/div[4]')
#print(kLine.text)
rr=re.compile(r'(.*) 开:(.*) 高:(.*) 低:(.*) 收:(.*) 涨跌:(.*) 涨幅:(.*)')
a=rr.match(kLine.text).groups()
b=zip(['年月日','开','高','低','收','涨跌','涨幅'],a)
c=dict(b)
stockData=c
#定位MA
maLine=driver.find_element_by_css_selector('#testcanvas > div.hxc3-hxc3KlinePricePane-hover-ma')
#print(maLine.text)
rr=re.compile(r'MA5: (.*) MA10: (.*) MA30: (.*)')
try:
a=rr.match(maLine.text).groups()
except:
a=[0,0,0]
b=zip(['MA5','MA10','MA30'],a)
c=dict(b)
stockData=dict(stockData,**c)
#定位成交量
deal=driver.find_element_by_xpath('//*[@id="testcanvas"]/div[6]')
#deal.text
#print(deal.text)
rr=re.compile(r'成交量 量(.*)')
a=rr.match(deal.text).groups()[0]
#有万或者亿两个单位
if '万' in a:
c=float(a.replace('万',''))*10000
elif '亿' in a:
c=float(a.replace('亿',''))*100000000
else:
c=a
#b=zip(['成交量'],c)
c=dict({"成交量":c})
stockData=dict(stockData,**c)
#定位MACD
macd=driver.find_element_by_xpath('//*[@id="testcanvas"]/div[7]')
#print(macd.text)
rr=re.compile(r'MACD: (.*) DIFF: (.*) DEA: (.*)')
a=rr.match(macd.text).groups()
b=zip(['MACD','DIFF','DEA'],a)
c=dict(b)
stockData=dict(stockData,**c)
return stockData
#配置chromeDriver
options=webdriver.ChromeOptions()
options.add_argument('--disable-gpu')
driver=webdriver.Chrome(chrome_options=options)
urlPattern=r'http://stockpage.10jqka.com.cn/%s/'
def Start(stock):
#获取股票页面
#stock='600077'
driver.get(urlPattern % stock)
#切入到iframe
driver.switch_to.frame(driver.find_element_by_xpath("//*[@id='hqzs']/div/iframe"))
#点击按日线
dayButton=driver.find_element_by_xpath("/html/body/ul/li[2]/a")
dayButton.send_keys('\n')#click超出显示范围则无法点击,click完全模拟鼠标操作。
#点击MACD
MACDButton=driver.find_element_by_xpath('//*[@id="testcanvas"]/div[2]/ul/li[1]/a')
MACDButton.send_keys('\n')
time.sleep(2)
#移动到界面地最左端
actions = ActionChains(driver)
panel=driver.find_element_by_xpath("//*[@id='hxc3_cross_testcanvas']")
#进行缩放
for x in range(10):
wheel_element(panel,120)
#time.sleep(50)
#必不可少,因为perform并不会清空历史动作列表
actions.reset_actions()
actions.move_to_element_with_offset(panel,0,250)
actions.perform()
#添加每次移动1像素的动作,然后不停调用即可
#这里为何是1呢,因为图像宽618,但是步进是不同的每只股票.
actions.reset_actions()
actions.move_by_offset(1, 0)
dayDataList=[]
lastDay=""
for i in range(618):
actions.perform()
#获取当前日的数值
#time.sleep(0.01)
data=getDayOrWeekData(driver)
#去重
if(data['年月日']!=lastDay):
data['stock']=stock
dayDataList.append(data)
lastDay=data['年月日']
dataFrame_day=pd.DataFrame(dayDataList)
#dataFrame.to_csv("%s_dayresult.csv" % stock)
###########周数据
#点击按周线
#切入到iframe
#driver.refresh()
#driver.switch_to.frame(driver.find_element_by_xpath("//*[@id='hqzs']/div/iframe"))
dayButton=driver.find_element_by_xpath("/html/body/ul/li[3]/a")
dayButton.send_keys('\n')
#点击KDJ
MACDButton=driver.find_element_by_xpath("//*[@id='testcanvas']/div[2]/ul/li[1]/a")
MACDButton.send_keys('\n')
#进行缩放
time.sleep(2)
#移动到界面地最左端
actions = ActionChains(driver)
panel=driver.find_element_by_xpath("//*[@id='hxc3_cross_testcanvas']")
for x in range(5):
wheel_element(panel,120)
#必不可少,因为perform并不会清空历史动作列表
actions.reset_actions()
actions.move_to_element_with_offset(panel,0,250)
actions.perform()
#添加每次移动1像素的动作,然后不停调用即可
#这里为何是1呢,因为图像宽618,但是步进是不同的每只股票.
actions.reset_actions()
actions.move_by_offset(1, 0)
weekDataList=[]
lastWeek=""
for i in range(618):
actions.perform()
#获取当前月份的数值
#time.sleep(0.01)
data=getDayOrWeekData(driver)
if(data['年月日']!=lastWeek):
data['stock']=stock
weekDataList.append(data)
lastWeek=data['年月日']
dataFrame_week=pd.DataFrame(weekDataList)
#dataFrame.to_csv("%s_weekresult.csv" % stock)
#########获取月数据
#切入到iframe
#driver.refresh()
#driver.switch_to.frame(driver.find_element_by_xpath("//*[@id='hqzs']/div/iframe"))
dayButton=driver.find_element_by_xpath("/html/body/ul/li[4]/a")
dayButton.send_keys('\n')
#点击MACD
MACDButton=driver.find_element_by_xpath("//*[@id='testcanvas']/div[2]/ul/li[1]/a")
MACDButton.send_keys('\n')
#进行缩放
time.sleep(2)
#移动到界面地最左端
actions = ActionChains(driver)
panel=driver.find_element_by_xpath("//*[@id='hxc3_cross_testcanvas']")
for x in range(5):
wheel_element(panel,120)
#必不可少,因为perform并不会清空历史动作列表
actions.reset_actions()
actions.move_to_element_with_offset(panel,0,250)
actions.perform()
#添加每次移动1像素的动作,然后不停调用即可
#这里为何是1呢,因为图像宽618,但是步进是不同的每只股票.
actions.reset_actions()
actions.move_by_offset(1, 0)
monthDataList=[]
lastMonth=""
for i in range(618):
actions.perform()
#获取当前月份的数值
#time.sleep(0.01)
data=getDayOrWeekData(driver)
if(data['年月日']!=lastMonth):
data['stock']=stock
monthDataList.append(data)
lastMonth=data['年月日']
dataFrame_month=pd.DataFrame(monthDataList)
#dataFrame.to_csv("%s_monthresult.csv" % stock)
return dataFrame_day,dataFrame_week,dataFrame_month
def End():
driver.quit() # 游览器关闭
if __name__=="__main__":
stock='600516'
dataFrame_day,dataFrame_week,dataFrame_month = Start(stock)
dataFrame_day.to_csv("result.csv")
End()
|
[
"38155482+herb711@users.noreply.github.com"
] |
38155482+herb711@users.noreply.github.com
|
ba922c5445274bb9aa6a8922f040363fbcc0e779
|
5ad4d4ff6060f67e262e42f0d6a24496efa87235
|
/10_9_silent_cats_and_dogs.py
|
74d432d17c5575b6e2dfbbe49c2d012c2325a024
|
[] |
no_license
|
silasfelinus/PythonProjects
|
779bba4b508e2110510a1607e44c3edbf8a232ea
|
4474e03c9e21e35c100bfb524a86a35d1b59820d
|
refs/heads/master
| 2022-12-08T12:34:42.987932
| 2020-08-27T21:10:30
| 2020-08-27T21:10:30
| 290,848,514
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 471
|
py
|
def file_error(filename):
msg = "Sorry, the file " + filename + " can't be found."
print(msg)
cat_file = "cats.txt"
dog_file = "dog.txt"
try:
with open(cat_file) as f_obj:
cat_names = f_obj.read()
cats = cat_names.split()
print("Cat names are: ")
print(cats)
except FileNotFoundError:
pass
try:
with open(dog_file) as f_obj:
dog_names = f_obj.read()
dogs = dog_names.split()
print("Dog names are: ")
print(dogs)
except FileNotFoundError:
pass
|
[
"silasfelinus@gmail.com"
] |
silasfelinus@gmail.com
|
c467a8989e83b33c6f9211a8e00bb6257f7c8175
|
35876012de636c0bfa6107118e8177c4aa0ff880
|
/chaojiying.py
|
bc7baa587ab887c92911e85622f9b1e391f06cb4
|
[] |
no_license
|
Qithird/spider-test
|
5dfe4fa715b31378955c219931e3063e0ab50c43
|
3cee1f93fe7743df732c20a11a87a3d0fb4b8163
|
refs/heads/master
| 2020-03-23T07:07:41.153896
| 2018-07-17T08:10:57
| 2018-07-17T08:10:57
| 141,250,127
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,616
|
py
|
#!/usr/bin/env python
# coding:utf-8
import requests
from hashlib import md5
class Chaojiying_Client(object):
def __init__(self, username, password, soft_id):
self.username = username
self.password = md5(password.encode('utf-8')).hexdigest()
self.soft_id = soft_id
self.base_params = {
'user': self.username,
'pass2': self.password,
'softid': self.soft_id,
}
self.headers = {
'Connection': 'Keep-Alive',
'User-Agent': 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0)',
}
def PostPic(self, im, codetype):
"""
im: 图片字节
codetype: 题目类型 参考 http://www.chaojiying.com/price.html
"""
params = {
'codetype': codetype,
}
params.update(self.base_params)
files = {'userfile': ('ccc.jpg', im)}
r = requests.post('http://upload.chaojiying.net/Upload/Processing.php', data=params, files=files, headers=self.headers)
return r.json()
def ReportError(self, im_id):
"""
im_id:报错题目的图片ID
"""
params = {
'id': im_id,
}
params.update(self.base_params)
r = requests.post('http://upload.chaojiying.net/Upload/ReportError.php', data=params, headers=self.headers)
return r.json()
# if __name__ == '__main__':
# chaojiying = Chaojiying_Client('超级鹰用户名', '超级鹰用户名的密码', '96001')
# im = open('a.jpg', 'rb').read()
# print(chaojiying.PostPic(im, 1902))
|
[
"920664709@163.com"
] |
920664709@163.com
|
8fb1e8bf2d268cc774d14e8ccd13c2898c3813ea
|
6b5f840398183581bae04242453bd833b2e1409b
|
/ansys/mapdl/core/mapdl_corba.py
|
fa9604ef7191be0c68be773e08fccef61eb2c0b5
|
[
"MIT"
] |
permissive
|
natter1/pyansys
|
1b109d889c516b9aa17d510f9671201bdaddf4e5
|
33392706e60688268c95001d5754e834957dcd01
|
refs/heads/master
| 2021-06-17T03:12:51.487326
| 2021-05-08T01:19:32
| 2021-05-08T01:19:32
| 202,570,893
| 0
| 1
|
NOASSERTION
| 2021-05-08T00:00:46
| 2019-08-15T15:57:11
|
Python
|
UTF-8
|
Python
| false
| false
| 11,568
|
py
|
"""CORBA implementation of the MAPDL interface"""
import atexit
import subprocess
import time
import re
import os
import weakref
from ansys.mapdl.core.mapdl import _MapdlCore
from ansys.mapdl.core.misc import threaded, random_string
from ansys.mapdl.core.errors import MapdlRuntimeError, MapdlExitedError
from ansys_corba import CORBA
INSTANCES = []
@atexit.register
def _cleanup():
for instance in INSTANCES:
try:
if instance() is not None:
instance().exit()
except:
pass
def tail(filename, nlines):
"""Read the last nlines of a text file """
with open(filename) as qfile:
qfile.seek(0, os.SEEK_END)
endf = position = qfile.tell()
linecnt = 0
while position >= 0:
qfile.seek(position)
next_char = qfile.read(1)
if next_char == "\n" and position != endf-1:
linecnt += 1
if linecnt == nlines:
break
position -= 1
if position < 0:
qfile.seek(0)
return qfile.read()
def launch_corba(exec_file=None, run_location=None, jobname=None, nproc=None,
verbose=False, additional_switches='', start_timeout=60):
"""Start MAPDL in AAS mode
Notes
-----
The CORBA interface is likely to fail on computers with multiple
network adapters. The ANSYS RPC isn't smart enough to determine
the right adapter and will likely try to communicate on the wrong
IP.
"""
# Using stored parameters so launch command can be run from a
# cached state (when launching the GUI)
# can't run /BATCH in windows, so we trick it using "-b" and
# provide a dummy input file
if os.name == 'nt':
# must be a random filename to avoid conflicts with other
# potential instances
tmp_file = '%s.inp' % random_string(10)
with open(os.path.join(run_location, tmp_file), 'w') as f:
f.write('FINISH')
additional_switches += ' -b -i %s -o out.txt' % tmp_file
# command must include "aas" flag to start MAPDL server
command = '"%s" -aas -j %s -np %d %s' % (exec_file,
jobname,
nproc,
additional_switches)
# remove any broadcast files
broadcast_file = os.path.join(run_location, 'mapdl_broadcasts.txt')
if os.path.isfile(broadcast_file):
os.remove(broadcast_file)
if verbose:
subprocess.Popen(command, shell=True,
cwd=run_location)
else:
subprocess.Popen(command, shell=True,
cwd=run_location,
stdin=subprocess.DEVNULL,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
# listen for broadcast file
telapsed = 0
tstart = time.time()
started_rpc = False
while telapsed < start_timeout and not started_rpc:
try:
if os.path.isfile(broadcast_file):
broadcast = open(broadcast_file).read()
# see if connection to RPC has been made
rpc_txt = 'visited:collaborativecosolverunitior-set:'
started_rpc = rpc_txt in broadcast
time.sleep(0.1)
telapsed = time.time() - tstart
except KeyboardInterrupt:
raise KeyboardInterrupt
# exit if timed out
if not started_rpc:
err_str = 'Unable to start ANSYS within %.1f seconds' % start_timeout
if os.path.isfile(broadcast_file):
broadcast = open(broadcast_file).read()
err_str += '\n\nLast broadcast:\n%s' % broadcast
raise TimeoutError(err_str)
# return CORBA key
keyfile = os.path.join(run_location, 'aaS_MapdlId.txt')
return open(keyfile).read()
class MapdlCorba(_MapdlCore):
"""CORBA implementation of the MAPDL interface
Parameters
----------
corba_key : str
CORBA key used to start the corba interface
start : dict
Additional start parameters to be passed to launcher when
launching the gui interactively.
exec_file, run_location, jobname='file', nproc=2,
additional_switches='', timeout
verbose : bool
Print all output from MAPDL to Python. Useful for debugging
"""
def __init__(self, loglevel='INFO', log_apdl='w', use_vtk=True,
log_broadcast=False, verbose=False, **start_parm):
"""Open a connection to MAPDL via a CORBA interface"""
super().__init__(loglevel=loglevel, use_vtk=use_vtk, log_apdl=log_apdl,
log_broadcast=False, **start_parm)
self._broadcast_logger = None
self._server = None
self._outfile = None
self._log_broadcast = log_broadcast
self._launch(start_parm, verbose)
super().__init__(loglevel=loglevel, use_vtk=use_vtk, log_apdl=log_apdl,
**start_parm)
# critical for collection
INSTANCES.append(weakref.ref(self))
def _launch(self, start_parm, verbose):
corba_key = launch_corba(verbose=verbose, **start_parm)
orb = CORBA.ORB_init()
self._server = orb.string_to_object(corba_key)
# verify you can connect to MAPDL
try:
self._server.getComponentName()
except:
raise MapdlRuntimeError('Unable to connect to APDL server')
# must set to non-interactive in linux
if os.name == 'posix':
self.batch()
self._log.debug('Connected to ANSYS using CORBA interface with key %s',
corba_key)
# separate logger for broadcast file
if self._log_broadcast:
self._broadcast_logger = self._start_broadcast_logger()
@property
def _broadcast_file(self):
return os.path.join(self.directory, 'mapdl_broadcasts.txt')
@threaded
def _start_broadcast_logger(self, update_rate=1.0):
"""Separate logger using broadcast_file """
# listen to broadcast file
loadstep = 0
overall_progress = 0
try:
old_tail = ''
old_size = 0
while not self._exited:
new_size = os.path.getsize(self._broadcast_file)
if new_size != old_size:
old_size = new_size
new_tail = tail(self._broadcast_file, 4)
if new_tail != old_tail:
lines = new_tail.split('>>')
for line in lines:
line = line.strip().replace('<<broadcast::', '')
if "current-load-step" in line:
n = int(re.search(r'\d+', line).group())
if n > loadstep:
loadstep = n
overall_progress = 0
self._log.info(line)
elif "overall-progress" in line:
n = int(re.search(r'\d+', line).group())
if n > overall_progress:
overall_progress = n
self._log.info(line)
old_tail = new_tail
time.sleep(update_rate)
except Exception as e:
pass
def exit(self, close_log=True, timeout=3):
"""Exit MAPDL process"""
if self._exited:
return
# self._log.debug('Exiting ANSYS')
if self._server is not None:
# attempt to cache final path and lockfile before exiting
try:
path = self.directory
lockfile = self._lockfile
except:
pass
try:
self.run('/EXIT')
except:
pass
try:
self._server.terminate()
except:
pass
self._server = None
if close_log:
self._close_apdl_log()
# wait for lockfile to be removed
if timeout:
tstart = time.time()
if lockfile is not None:
while os.path.isfile(lockfile):
time.sleep(0.05)
telap = tstart - time.time()
if telap > timeout:
return 1
try:
self._remove_lockfile()
except:
pass
self._exited = True
def _remove_lockfile(self):
"""Removes lockfile"""
if os.path.isfile(self._lockfile):
try:
os.remove(self._lockfile)
except:
pass
def _run(self, command, **kwargs):
"""Sends a command to the mapdl server via the CORBA interface"""
self._reset_cache()
if self._server is None:
raise MapdlExitedError('ANSYS exited')
# cleanup command
command = command.strip()
if not command:
raise ValueError('Cannot run empty command')
if command[:4].lower() == 'cdre':
with self.non_interactive:
self.run(command)
return self._response
if command[:4].lower() == '/com':
split_command = command.split(',')
if len(split_command) < 2:
return ''
elif not split_command[1]:
return ''
elif split_command[1]:
if not split_command[1].strip():
return ''
# /OUTPUT not redirected properly in corba
if command[:4].lower() == '/out':
items = command.split(',')
if len(items) > 1: # redirect to file
if len(items) > 2:
if items[2].strip():
filename = '.'.join(items[1:3]).strip()
else:
filename = '.'.join(items[1:2]).strip()
else:
filename = items[1]
if filename:
if os.path.basename(filename) == filename:
filename = os.path.join(self.directory, filename)
self._output = filename
if len(items) == 5:
if items[4].lower().strip() == 'append':
self._outfile = open(filename, 'a')
else:
self._outfile = open(filename, 'w')
else:
self._close_output()
else:
self._close_output()
return ''
# include error checking
text = ''
additional_text = ''
self._log.debug('Running command %s', command)
text = self._server.executeCommandToString(command)
# print suppressed output
additional_text = self._server.executeCommandToString('/GO')
# return text, additional_text
if text == additional_text:
additional_text = ''
response = '%s\n%s' % (text, additional_text)
if self._outfile is not None:
self._outfile.write(response)
return response
def _close_output(self):
"""closes the output file"""
self._output = ''
if self._outfile:
self._outfile.close()
self._outfile = None
|
[
"noreply@github.com"
] |
noreply@github.com
|
1d00b242cf132072efcf3453842c9a8c54e066b8
|
a0367d26bf357725ab97c36c930ecb2ae3521355
|
/algorithm-problem-solving-strategies/brute_force/picnic.py
|
a7fb2516d80d6607c9b9f8c948aad4f33afcd824
|
[] |
no_license
|
fidemin/algorithm-study
|
c511a59cb8f3bd227922d87465b2d8a529a1f7a8
|
41277da10c1ec966a20664b961f8e75434ac517f
|
refs/heads/main
| 2022-06-13T00:43:54.223348
| 2022-06-02T16:37:40
| 2022-06-02T16:37:40
| 163,795,704
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,147
|
py
|
def make_is_friend(friends):
is_friend = [[False]* 10 for _ in range(10)]
is_friend[0][0] = True
for i in range(0, len(friends), 2):
is_friend[friends[i]][friends[i+1]] = True
is_friend[friends[i+1]][friends[i]] = True
return is_friend
def count_pairings_recursive(n, is_friend, taken):
first_free = None
for i in range(n):
if not taken[i]:
first_free = i
break;
if first_free is None:
return 1
result = 0
for i in range(first_free+1, n):
if not taken[i] and is_friend[first_free][i]:
taken[first_free] = taken[i] = True
result += count_pairings_recursive(n, is_friend, taken)
taken[first_free] = taken[i] = False
return result
def count_pairings(n, friends):
is_friend = make_is_friend(friends)
taken = [False] * 10
result = count_pairings_recursive(n, is_friend, taken, 0)
return result
if __name__ == "__main__":
print(count_pairings(4, (0, 1, 1, 2, 2, 3, 3, 0, 0, 2, 1, 3)))
print(count_pairings(6, (0, 1, 0, 2, 1, 2, 1, 3, 1, 4, 2, 3, 2, 4, 3, 4, 3, 5, 4, 5)))
|
[
"yhmin84@gmail.com"
] |
yhmin84@gmail.com
|
13a4905ae7077bf34c1cfcef8d53ed482623a436
|
2ff4a38b86dfee4115c0a9280e95ff042d36f8bd
|
/programmers/lv2/emergency_boat_lv2.py
|
4e7c867c41c62802da3a4d88574770e17a8f9e71
|
[] |
no_license
|
ohtaehyun/algo_study
|
5e0adc6d18a186d959f0ad191af0d916f5c99793
|
87ac40b89b5ddbba09e8b3dd86ed0a3defc0590b
|
refs/heads/master
| 2023-02-24T13:52:37.323111
| 2021-01-25T09:32:55
| 2021-01-25T09:32:55
| 284,712,413
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 385
|
py
|
from collections import deque
def solution(people, limit):
answer = 0
people.sort()
p =deque(list(people))
while p :
weight = p.popleft()
remain_weight = limit - weight
while p :
w = p.pop()
if w <= remain_weight:
break
else :
answer += 1
answer += 1
return answer
|
[
"othdev95@gmail.com"
] |
othdev95@gmail.com
|
640cbd8b45b1907c93b397fbf8e94106dd7f97d1
|
6ee2254e0a0e5073927b9bcf8bd1f76cf78b87a1
|
/4字符串处理技巧训练/4-1如何拆分含有多种分隔符的字符串.py
|
eb15a68fdd2c6c49a052b9bb5a74977ef85bdb84
|
[] |
no_license
|
skytotwo/Py3AdvancedTechnique
|
8093b9de7f78954497e80fa2531f55a3a1d69204
|
deac90787b32dea40d7b96b9f40202f93f20a654
|
refs/heads/master
| 2020-03-07T03:34:40.108462
| 2018-04-10T15:51:57
| 2018-04-10T15:51:57
| 125,972,090
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,291
|
py
|
#案例引入
#我们要把某个字符串依据分隔符拆分不同的字段,该字段包含多种不同的分隔符,例如:
#
# s = 'ab;cd|efg|hi,jkl|mn\topq;rst,uvw\txyz'
#其中<,>,<;>,<|>,<\t>都是分隔符号,如何处理?
#解决方案
#方法一:连续使用str.split()方法,每次处理一种分隔符号
#方法二:使用正则表达式的re.split()方法,一次性拆分字符串
#解决方案一
s = "ab;cd|efg|hi,jkl|mn\topq;rst,uvw\txyz"
s.split(';')
res = s.split(';')
print(res)
#map(lambda x: x.split('|'), res)
#为使得map得出的序列是一个一维的序列,这里将x.split结果添加到一个新的列表里:
t=[]
c = list(map(lambda x: t.extend(x.split('|')), res))
#此时t就是一个新的分隔后的一维列表,此时再对t进行分割
print(t)
#以此类推~得到最终的结果
#也可使用下列函数解决:
def mySplit(s, ds):
res=[s]
for d in ds:
t=[]
list(map(lambda x: t.extend(x.split('|')), res))
res = t
return [x for x in res if x] #对结果去除空值
s1 = "ab;cd|efg|hi,jkl|mn\topq;rst,uvw\txyz"
print(mySplit(s1, ';,|\t'))
#解决方案二:(推荐)
import re
s2 = "ab;cd|efg|hi,jkl|mn\topq;rst,uvw\txyz"
c = re.split('[,;\t|]+', s2)
print(c)
|
[
"381944069@qq.com"
] |
381944069@qq.com
|
c97d5c8534d89a3098f1408d6927557520a716a0
|
09e5cfe06e437989a2ccf2aeecb9c73eb998a36c
|
/modules/cctbx_project/xfel/command_line/xtc_dump.py
|
cf748f55c06c41577045989d90fdb4ed5880b085
|
[
"BSD-3-Clause-LBNL",
"BSD-3-Clause"
] |
permissive
|
jorgediazjr/dials-dev20191018
|
b81b19653624cee39207b7cefb8dfcb2e99b79eb
|
77d66c719b5746f37af51ad593e2941ed6fbba17
|
refs/heads/master
| 2020-08-21T02:48:54.719532
| 2020-01-25T01:41:37
| 2020-01-25T01:41:37
| 216,089,955
| 0
| 1
|
BSD-3-Clause
| 2020-01-25T01:41:39
| 2019-10-18T19:03:17
|
Python
|
UTF-8
|
Python
| false
| false
| 10,862
|
py
|
from __future__ import absolute_import, division, print_function
from six.moves import range
# -*- Mode: Python; c-basic-offset: 2; indent-tabs-mode: nil; tab-width: 8 -*-
#
# LIBTBX_SET_DISPATCHER_NAME cctbx.xfel.xtc_dump
#
import psana
from xfel.cftbx.detector import cspad_cbf_tbx
from xfel.cxi.cspad_ana import cspad_tbx, rayonix_tbx
import os, sys
import libtbx.load_env
from libtbx.utils import Sorry, Usage
from dials.util.options import OptionParser
from libtbx.phil import parse
from libtbx import easy_pickle
phil_scope = parse('''
dispatch {
max_events = None
.type = int
.help = If not specified, process all events. Otherwise, only process this many
selected_events = False
.type = bool
.help = If True, only dump events specified in input.event scopes
}
input {
cfg = None
.type = str
.help = Path to psana config file. Genearlly not needed for CBFs. For image pickles, \
the psana config file should have a mod_image_dict module.
experiment = None
.type = str
.help = Experiment identifier, e.g. cxi84914
run_num = None
.type = int
.help = Run number or run range to process
address = None
.type = str
.help = Detector address, e.g. CxiDs2.0:Cspad.0 or detector alias, e.g. Ds1CsPad
calib_dir = None
.type = str
.help = Non-standard calib directory location
xtc_dir = None
.type = str
.help = Non-standard xtc directory location
timestamp = None
.type = str
.multiple = True
.help = Event timestamp(s) of event(s) in human-readable format of images to
.help = dump (must also specify dispatch.selected_events=True.)
}
format {
file_format = *cbf pickle
.type = choice
.help = Output file format, 64 tile segmented CBF or image pickle
pickle {
out_key = cctbx.xfel.image_dict
.type = str
.help = Key name that mod_image_dict uses to put image data in each psana event
}
cbf {
detz_offset = None
.type = float
.help = Distance from back of detector rail to sample interaction region (CXI) \
or actual detector distance (XPP/MFX)
override_energy = None
.type = float
.help = If not None, use the input energy for every event instead of the energy \
from the XTC stream
mode = *cspad rayonix
.type = choice
cspad {
gain_mask_value = None
.type = float
.help = If not None, use the gain mask for the run to multiply the low-gain pixels by this number
}
rayonix {
bin_size = 2
.type = int
.help = Detector binning mode
override_beam_x = None
.type = float
.help = If set, override the beam X position
override_beam_y = None
.type = float
.help = If set, override the beam Y position
}
}
}
output {
output_dir = .
.type = str
.help = Directory output files will be placed
tmp_output_dir = None
.type = str
.help = Directory for CBFlib tmp output files
}
''', process_includes=True)
class Script(object):
""" Script to process dump XFEL data at LCLS """
def __init__(self):
""" Set up the option parser. Arguments come from the command line or a phil file """
self.usage = """
%s input.experiment=experimentname input.run_num=N input.address=address
format.file_format=cbf format.cbf.detz_offset=N
%s input.experiment=experimentname input.run_num=N input.address=address
format.file_format=pickle format.pickle.cfg=path
"""%(libtbx.env.dispatcher_name, libtbx.env.dispatcher_name)
self.parser = OptionParser(
usage = self.usage,
phil = phil_scope)
def run(self):
""" Process all images assigned to this thread """
params, options = self.parser.parse_args(
show_diff_phil=True)
if params.input.experiment is None or \
params.input.run_num is None or \
params.input.address is None:
raise Usage(self.usage)
if params.format.file_format == "cbf":
if params.format.cbf.detz_offset is None:
raise Usage(self.usage)
elif params.format.file_format == "pickle":
if params.input.cfg is None:
raise Usage(self.usage)
else:
raise Usage(self.usage)
if not os.path.exists(params.output.output_dir):
raise Sorry("Output path not found:" + params.output.output_dir)
#Environment variable redirect for CBFLib temporary CBF_TMP_XYZ file output
if params.format.file_format == "cbf":
if params.output.tmp_output_dir is None:
tmp_dir = os.path.join(params.output.output_dir, '.tmp')
else:
tmp_dir = os.path.join(params.output.tmp_output_dir, '.tmp')
if not os.path.exists(tmp_dir):
try:
os.makedirs(tmp_dir)
except Exception as e:
if not os.path.exists(tmp_dir):
halraiser(e)
os.environ['CBF_TMP_DIR'] = tmp_dir
# Save the paramters
self.params = params
self.options = options
from mpi4py import MPI
comm = MPI.COMM_WORLD
rank = comm.Get_rank() # each process in MPI has a unique id, 0-indexed
size = comm.Get_size() # size: number of processes running in this job
# set up psana
if params.input.cfg is not None:
psana.setConfigFile(params.input.cfg)
if params.input.calib_dir is not None:
psana.setOption('psana.calib-dir',params.input.calib_dir)
dataset_name = "exp=%s:run=%s:idx"%(params.input.experiment,params.input.run_num)
if params.input.xtc_dir is not None:
dataset_name = "exp=%s:run=%s:idx:dir=%s"%(params.input.experiment,params.input.run_num,params.input.xtc_dir)
ds = psana.DataSource(dataset_name)
if params.format.file_format == "cbf":
src = psana.Source('DetInfo(%s)'%params.input.address)
psana_det = psana.Detector(params.input.address, ds.env())
# set this to sys.maxint to analyze all events
if params.dispatch.max_events is None:
max_events = sys.maxsize
else:
max_events = params.dispatch.max_events
for run in ds.runs():
if params.format.file_format == "cbf":
if params.format.cbf.mode == "cspad":
# load a header only cspad cbf from the slac metrology
base_dxtbx = cspad_cbf_tbx.env_dxtbx_from_slac_metrology(run, params.input.address)
if base_dxtbx is None:
raise Sorry("Couldn't load calibration file for run %d"%run.run())
elif params.format.cbf.mode == "rayonix":
# load a header only rayonix cbf from the input parameters
detector_size = rayonix_tbx.get_rayonix_detector_dimensions(ds.env())
base_dxtbx = rayonix_tbx.get_dxtbx_from_params(params.format.cbf.rayonix, detector_size)
# list of all events
times = run.times()
if params.dispatch.selected_events:
times = [t for t in times if cspad_tbx.evt_timestamp((t.seconds(),t.nanoseconds()/1e6)) in params.input.timestamp]
nevents = min(len(times),max_events)
# chop the list into pieces, depending on rank. This assigns each process
# events such that the get every Nth event where N is the number of processes
mytimes = [times[i] for i in range(nevents) if (i+rank)%size == 0]
for i in range(len(mytimes)):
evt = run.event(mytimes[i])
id = evt.get(psana.EventId)
print("Event #",i," has id:",id)
timestamp = cspad_tbx.evt_timestamp(cspad_tbx.evt_time(evt)) # human readable format
if timestamp is None:
print("No timestamp, skipping shot")
continue
if evt.get("skip_event") or "skip_event" in [key.key() for key in evt.keys()]:
print("Skipping event",timestamp)
continue
t = timestamp
s = t[0:4] + t[5:7] + t[8:10] + t[11:13] + t[14:16] + t[17:19] + t[20:23]
print("Processing shot", s)
if params.format.file_format == "pickle":
if evt.get("skip_event"):
print("Skipping event",id)
continue
# the data needs to have already been processed and put into the event by psana
data = evt.get(params.format.pickle.out_key)
if data is None:
print("No data")
continue
# set output paths according to the templates
path = os.path.join(params.output.output_dir, "shot-" + s + ".pickle")
print("Saving", path)
easy_pickle.dump(path, data)
elif params.format.file_format == "cbf":
if params.format.cbf.mode == "cspad":
# get numpy array, 32x185x388
data = cspad_cbf_tbx.get_psana_corrected_data(psana_det, evt, use_default=False, dark=True,
common_mode=None,
apply_gain_mask=params.format.cbf.cspad.gain_mask_value is not None,
gain_mask_value=params.format.cbf.cspad.gain_mask_value,
per_pixel_gain=False)
distance = cspad_tbx.env_distance(params.input.address, run.env(), params.format.cbf.detz_offset)
elif params.format.cbf.mode == "rayonix":
data = rayonix_tbx.get_data_from_psana_event(evt, params.input.address)
distance = params.format.cbf.detz_offset
if distance is None:
print("No distance, skipping shot")
continue
if self.params.format.cbf.override_energy is None:
wavelength = cspad_tbx.evt_wavelength(evt)
if wavelength is None:
print("No wavelength, skipping shot")
continue
else:
wavelength = 12398.4187/self.params.format.cbf.override_energy
# stitch together the header, data and metadata into the final dxtbx format object
if params.format.cbf.mode == "cspad":
image = cspad_cbf_tbx.format_object_from_data(base_dxtbx, data, distance, wavelength, timestamp, params.input.address, round_to_int=False)
elif params.format.cbf.mode == "rayonix":
image = rayonix_tbx.format_object_from_data(base_dxtbx, data, distance, wavelength, timestamp, params.input.address)
path = os.path.join(params.output.output_dir, "shot-" + s + ".cbf")
print("Saving", path)
# write the file
import pycbf
image._cbf_handle.write_widefile(path, pycbf.CBF,\
pycbf.MIME_HEADERS|pycbf.MSG_DIGEST|pycbf.PAD_4K, 0)
run.end()
ds.end()
if __name__ == "__main__":
from dials.util import halraiser
try:
script = Script()
script.run()
except Exception as e:
halraiser(e)
|
[
"jorge7soccer@gmail.com"
] |
jorge7soccer@gmail.com
|
031082afed911262fc629700895e628f09e802ef
|
57e1cb070fed466e03cb3cda81a289f447ee13b0
|
/02_readline.py
|
cb6816aacc59543181538163460cd1c66d42c094
|
[] |
no_license
|
diksha1012/Python_Programs
|
3af8423a56879e2fa5d6ec383982ae161d1298e2
|
2f82d740ae4f8891a7ae86c79eb174d9a3dbce4d
|
refs/heads/master
| 2023-06-18T16:00:10.773661
| 2021-07-12T19:10:10
| 2021-07-12T19:10:10
| 384,858,323
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 188
|
py
|
f=open('sample.txt')
#read first line
data = f.readline()
print(data)
#read second line
data = f.readline()
print(data)
#read third line
data = f.readline()
print(data)
f.close()
|
[
"noreply@github.com"
] |
noreply@github.com
|
1137de8b9a552f8d8213d9e2bfda19d48553a4ba
|
ea12a254d0da6f01c75e481fa2a450e2aa8216e5
|
/.history/client1_20211117162244.py
|
6b0a249d98eca32e5d072b7af5bb062cbab9a780
|
[] |
no_license
|
RiteshK555/Python-socket-chat-server
|
6d1b6e51762a7e10f765af706b6093f1d601cfc4
|
a17f05656d041b5491160fc9f1a84e436ef02a86
|
refs/heads/master
| 2023-08-30T11:16:58.934067
| 2021-11-17T14:22:03
| 2021-11-17T14:22:03
| 429,032,715
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 759
|
py
|
import socket
import threading
HEADER=64
PORT=5051
SERVER=socket.gethostbyname(socket.gethostname())
client1=socket.socket(socket.AF_INET,socket.SOCK_STREAM)
client1.connect((SERVER,PORT))
def msg_from_client1(msg):
message=msg.encode('utf-8')
msg_len=len(message)
send_len=str(msg_len).encode('utf-8')
send_len+=b' '*(HEADER-len(send_len))
client1.send(send_len)
client1.send(message)
def handle_input():
a=input()
msg_from_client1(a)
return a
print("Welcome to chat server!")
def handle_recieve():
msg_rcv=client1.recv(2048).decode('utf-8')
print(msg_rcv)
if len(msg_rcv)>0:
print("hello")
print(msg_rcv)
while True:
thread=threading.Thread(target=handle_recieve)
|
[
""
] | |
4a156d8ef09c4d69a6dde0bc892c43557e66c29b
|
0bcbdf19be4749d3babc9731f2c57172655c4285
|
/basic programs/find_last_digit_factorial_divides_factorial.py
|
c9e31574c947ee5e1656e9d74c70c55b1f098fa1
|
[] |
no_license
|
pratikbarjatya/Python-OOPS
|
d0dec073a22790cca8a7883642ff38743822c3ea
|
2f03e21cb1352e462d92d381951d1b00bcd40109
|
refs/heads/master
| 2023-01-27T14:32:50.645555
| 2020-12-09T14:19:20
| 2020-12-09T14:19:20
| 254,899,273
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,734
|
py
|
# Find the last digit when factorial of A divides factorial of B
"""We are given two numbers A and B such that B >= A. We need to compute the last digit of this resulting F such that
F = B!/A! where 1 = A, B <= 10^18 (A and B are very large).
Examples:
Input : A = 2, B = 4
Output : 2
Explanation : A! = 2 and B! = 24.
F = 24/2 = 12 --> last digit = 2
Input : 107 109
Output : 2
Approach
Factorial function grows on an exponential rate. Even the largest data type
cannot hold factorial of numbers like 100
Here the given constraints are very large. Thus, calculating the two factorials and later
dividing them and computing the last digit is practically an impossible task.
Thus we have to find an alternate approach to break down our problem.
It is known that the last digit of factorial always belongs to the set {0, 1, 2, 4, 6}
The approach is as follows: –
1) We evaluate the difference between B and A
2) If the (B – A) >= 5, then the answer is always 0
3) If the difference (B – A) < 5, then we iterate from (A+1) to B, multiply and store them.
multiplication_answer % 10 shall be our answer.
"""
# Function which computes the last digit of resultant of B!/A!
def compute_last_digit(a, b):
variable = 1
if a == b: # If a = b, b! = a! and b!/a! = 1
return 1
# If difference (b - a) >= 5, answer = 0
elif (b - a) >= 5:
return 0
else:
# If non of the conditions are true, we iterate from A+1 to B and multiply them.
# We are only concerned for the last digit, thus we take modulus of 10
for i in range(a + 1, b + 1):
variable = (variable * (i % 10)) % 10
return variable % 10
# driver function
print(compute_last_digit(2632, 2634))
|
[
"pratikbarjatya@gmail.com"
] |
pratikbarjatya@gmail.com
|
dd1ed5bd20b5a60fd53bd43317230eb05bda02ff
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_267/ch80_2020_06_17_20_39_13_688401.py
|
d534bc9a3759e6fe0eb67eb6874f60c857066930
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 203
|
py
|
def interseccao_chaves(dicio1, dicio2):
lista_chaves = []
for i in dicio1.keys() and l in dicio2.keys():
if i == l:
lista_chaves.append(i)
return lista_chaves
|
[
"you@example.com"
] |
you@example.com
|
cc627c73d3145b43e230fd3f9cfd05c2f883fec3
|
3be7d27c8af88cb3e38d3abe6fb356d0aae0234e
|
/mattasks/views.py
|
8b92b7c9da676e81fdcd4e963e4a9fe3cfec1002
|
[] |
no_license
|
peppuo/dg_gp
|
1e38809a0bb0ddb2f747e1b008cefff11e3ec7b6
|
4b17d3835e7de365efeb1bbb82442b8b618da36f
|
refs/heads/master
| 2023-04-26T09:06:39.716374
| 2020-01-22T00:36:09
| 2020-01-22T00:36:09
| 220,835,984
| 0
| 1
| null | 2023-04-21T20:40:37
| 2019-11-10T19:01:05
|
Python
|
UTF-8
|
Python
| false
| false
| 199
|
py
|
from django.shortcuts import render
def render_mattasks(requests):
return render(requests, 'mattasks.html')
def render_add_task(requests):
pass
def render_edit_task(requests):
pass
|
[
"peppujool@hotmail.com"
] |
peppujool@hotmail.com
|
79b5ef3bd637ff8bbc5bb560093fa19e02d11270
|
8e7f87f4c876b2ef8a1d2bba1a146dbad417d1ac
|
/webcars/webcars/settings.py
|
be04b6a18c0666570abf6182e38ac073e28c4069
|
[] |
no_license
|
ATMAROZAK/carsau
|
48ff72ecd61d401bf3367e9d327eaa5cfb750dbd
|
d5d22d5829664c20e19769c31c3375336c8df895
|
refs/heads/master
| 2020-03-14T06:56:49.329360
| 2018-05-15T13:40:53
| 2018-05-15T13:40:53
| 131,493,644
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,146
|
py
|
"""
Django settings for webcars project.
Generated by 'django-admin startproject' using Django 2.0.4.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os, django_heroku
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '5izn7qa%0zd^h)!9osp@6&6&&lp7wd9#_(xi(aw2hw)d&j=am0'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
SITE_ID = 1
# Application definition
INSTALLED_APPS = [
'registration',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django_filters',
'widget_tweaks',
'mainapp',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'webcars.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, "templates")],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
],
},
},
]
WSGI_APPLICATION = 'webcars.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
"default": {
"ENGINE": "django.db.backends.postgresql_psycopg2",
"NAME": "d9o6mmggl2b4i1",
"USER": "vnrqhulaazepei",
"PASSWORD": "3308bf0c890b7f722017961c92972ca7f0ea42472512867b66af5386e33191ae",
"HOST": "ec2-54-83-1-94.compute-1.amazonaws.com",
"PORT": "5432",
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
django_heroku.settings(locals())
ACCOUNT_ACTIVATION_DAYS = 7
REGISTRATION_AUTO_LOGIN = True
SEND_ACTIVATION_EMAIL = True
LOGIN_REDIRECT_URL = '/'
REGISTRATION_EMAIL_SUBJECT_PREFIX = '[CARSAU REGISTRATION CONFIRMATION]'
EMAIL_BACKEND='django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST ='smtp.gmail.com'
EMAIL_PORT = 587
EMAIL_HOST_USER = 'djangomailtest01@gmail.com'
EMAIL_HOST_PASSWORD = 'System2048'
EMAIL_USE_TLS = True
REGISTRATION_FORM = 'mainapp.forms.MyRegForm'
|
[
"ib_vadim@mail.ru"
] |
ib_vadim@mail.ru
|
cae919b0190786a36a947426686bd056b5d69560
|
c9af19f63f356dde5e6be34379b4741fcf3ca918
|
/setup.py
|
10318167a36ec96614515b40eec8c40a0a74ca8b
|
[
"MIT"
] |
permissive
|
borgess28/coronagraph
|
469fe13e13caa55c266a17d3682d041741326c10
|
b321693512422343b08ada7e246413e1f4bae4cc
|
refs/heads/master
| 2023-05-30T08:00:17.501907
| 2020-05-07T05:32:23
| 2020-05-07T05:32:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,398
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import division, print_function, absolute_import
from setuptools import setup
# Hackishly inject a constant into builtins to enable importing of the
# module in "setup" mode. Stolen from `kplr`
import sys
if sys.version_info[0] < 3:
import __builtin__ as builtins
else:
import builtins
builtins.__CORONAGRAPH_SETUP__ = True
import coronagraph
long_description = \
"""Coronagraph noise model for directly imaging exoplanets."""
# Setup!
setup(name='coronagraph',
version=coronagraph.__version__,
description='Coronagraph noise model for directly imaging exoplanets.',
long_description=long_description,
classifiers=[
'Development Status :: 3 - Alpha',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'License :: OSI Approved :: MIT License',
'Topic :: Scientific/Engineering :: Astronomy',
],
url='http://github.com/jlustigy/coronagraph',
author='Jacob Lustig-Yaeger',
author_email='jlustigy@uw.edu',
license='MIT',
packages=['coronagraph'],
install_requires=[
'numpy',
'scipy',
'matplotlib',
'numba',
'astropy'
],
dependency_links=[],
scripts=[],
include_package_data=True,
zip_safe=False,
data_files=["coronagraph/planets/ArcheanEarth_geo_albedo.txt",
"coronagraph/planets/EarlyMars_geo_albedo.txt",
"coronagraph/planets/EarlyVenus_geo_albedo.txt",
"coronagraph/planets/earth_avg_hitran2012_300_100000cm.trnst",
"coronagraph/planets/earth_avg_hitran2012_300_100000cm_toa.rad",
"coronagraph/planets/Hazy_ArcheanEarth_geo_albedo.txt",
"coronagraph/planets/Jupiter_geo_albedo.txt",
"coronagraph/planets/Mars_geo_albedo.txt",
"coronagraph/planets/Neptune_geo_albedo.txt",
"coronagraph/planets/Saturn_geo_albedo.txt",
"coronagraph/planets/Uranus_geo_albedo.txt",
"coronagraph/planets/Venus_geo_albedo.txt",
"coronagraph/planets/earth_quadrature_radiance_refl.dat"
]
)
|
[
"jlustigy@uw.edu"
] |
jlustigy@uw.edu
|
cbd1963eda88e7b0aac52ea0c5cc7e5a2c19a5f0
|
ea6a186bd1123090660495e3d9145cf72673ae07
|
/game_engine.py
|
56fb171cad3aa04064b96183cbdd549037cd17cd
|
[] |
no_license
|
Spacerat/SapphireClone
|
538f99674b4856e6f76502ad46aaeee67fc4de45
|
6feaa855d76c957e86e1eb42146966f628cab688
|
refs/heads/master
| 2021-01-19T14:09:42.548833
| 2015-08-04T08:30:06
| 2015-08-04T08:30:06
| 40,172,768
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,052
|
py
|
import pyglet
import logging
from collections import namedtuple
logging.basicConfig(level=logging.DEBUG)
class Pos(namedtuple('Pos', ['row', 'column'])):
""" A position object encodes a row and a column """
@property
def left(self):
return Pos(self.row, self.column-1)
@property
def right(self):
return Pos(self.row, self.column+1)
@property
def up(self):
return Pos(self.row+1, self.column)
@property
def down(self):
return Pos(self.row-1, self.column)
class Direction:
""" A direction object is a function which modifies a position """
def __init__(self, name):
self.name = name
def __call__(self, pos : Pos):
""" :rtype : Pos"""
return getattr(pos, self.name)
class Directions:
""" The Directions class stores each cardinal direction as a global object"""
Left = Direction('left')
Right = Direction('right')
Up = Direction('up')
Down = Direction('down')
class BaseObject():
""" The BaseObject is the parent of all game objects and behaviours.
It keeps a reference to the game that it is in. """
def __init__(self):
self.position = None
self.game = None
self.prev_pos = None
def tick(self):
pass
def send_message(self, event:str, *args, **kwargs):
""" Send a message to this object. If the object is able to handle it,
i.e. it has a method called on_'event', the method is run."""
if hasattr(self, 'on_'+event):
return getattr(self, 'on_'+event)(*args, **kwargs)
def try_move(self, direction):
""" Attempt to move to a new position.
If an object is already there, it is sent a moved_to event.
The obstacle may move itself out of the way, then return True
to signify that this object is free to move there."""
can_move = True
position = direction(self.position)
obstacle = self.game.grid.get(position, None)
if obstacle:
can_move = obstacle.send_message('moved_to', self, direction)
if can_move:
self.game.move(self, position)
return can_move
def delete(self):
""" Delete this object from the grid """
self.game.delete(self.position)
class Game(object):
""" The Game handles a grid of BaseObjects, and also tells a graphics engine what to do. """
def __init__(self):
self.graphics_engine = None
self.grid = {}
def move(self, object : BaseObject, position :Pos):
""" Move an object to a new position"""
# Check there is nothing already at the new position
assert position not in self.grid
# Delete the object from the grid, then re-add it.
del self.grid[object.position]
self.grid[position.row, position.column] = object
object.position = position
def delete(self, position : Pos):
""" Delete the object at a position. """
obj = self.grid[position]
del self.grid[position]
# Let the graphics engine know to no longer draw this object
if self.graphics_engine: self.graphics_engine.delete(obj)
def add_object(self, pos:Pos, obj:BaseObject):
""" Add an object to the grid """
already_present = self.grid.get(pos, None)
if not already_present:
self.grid[pos] = obj
obj.game = self
obj.position = Pos(*pos)
obj.prev_pos = obj.position
if self.graphics_engine: self.graphics_engine.add_object(obj)
def tick(self):
""" Run one step of the game logic """
positions = sorted(self.grid.keys())
ticked = {}
for pos in positions:
obj = self.grid[pos]
obj.position = Pos(*pos)
obj.prev_pos = obj.position
ticked[obj] = False
cur_objs = [self.grid[p] for p in positions]
for obj in cur_objs:
if ticked[obj] == False:
ticked[obj] = True
obj.tick()
|
[
"jat1g11@soton.ac.uk"
] |
jat1g11@soton.ac.uk
|
c57d23449c718bd2b649ef10212f9605ccca3218
|
487490ff85cfc959890a76410d12bd3e0fe1b789
|
/utils_QSO.py
|
62d5b265e12cd53c56ab0f23a48b49abaf33762f
|
[] |
no_license
|
esavary/eBOSSLens
|
0810e80961c1828613592b3d34e740f43c551d2e
|
c2761c7c045f36a6eb72e8958dd0f06dd1ce0c1d
|
refs/heads/master
| 2021-01-23T04:40:12.129893
| 2017-03-17T15:33:40
| 2017-03-17T15:33:40
| 86,235,505
| 0
| 0
| null | 2017-03-26T13:42:21
| 2017-03-26T13:42:20
| null |
UTF-8
|
Python
| false
| false
| 17,932
|
py
|
import numpy as np
import matplotlib as mpl
from matplotlib import pyplot as plt
from matplotlib import gridspec
from matplotlib.font_manager import FontProperties
from utils import *
def DR12Q_extractor(path = './Superset_DR12Q.fits'):
hdulist = pf.open(path)
z_vi_DR12Q = hdulist[1].data.field('Z_VI')
z_PCA_DR12Q = hdulist[1].data.field('Z_PIPE')
plate_DR12Q = hdulist[1].data.field('PLATE')
mjd_DR12Q = hdulist[1].data.field('MJD')
fiber_DR12Q = hdulist[1].data.field('FIBERID')
return np.transpose(np.vstack((plate_DR12Q,mjd_DR12Q,fiber_DR12Q,z_PCA_DR12Q,z_vi_DR12Q)))
def mask_QSO(ivar,z):
# Masking Lya, NV, SiIV, CIV, etc...
l_NV = 1240
l_SiIV= 1400.0
l_CIV = 1549.0
l_HeII = 1640.0
l_CIII = 1909.0
l_CII = 2326.0
l_FeII_a = 2382.765
l_FeII_b = 2600.173
l_MgII = 2798.0
l_NeV = 3426.0
l_OII = 3727
l_NeIII = 3869
l_Hd = 4101
l_Hg = 4340
l_Hb = 4861
l_OIII_a = 4959
l_OIII_b = 5007
l_Ha = 6562.81
#Mask the above emission lines of QSO's
ivar[wave2bin((1+z)*(l_LyA -2.5*l_width),c0,c1,Nmax):wave2bin((1+z)*(l_LyA +2.5*l_width),c0,c1,Nmax)] = 0
ivar[wave2bin((1+z)*(l_NV -0.5*l_width),c0,c1,Nmax):wave2bin((1+z)*(l_NV +0.5*l_width),c0,c1,Nmax)] = 0
ivar[wave2bin((1+z)*(l_SiIV -1.5*l_width),c0,c1,Nmax):wave2bin((1+z)*(l_SiIV +1.5*l_width),c0,c1,Nmax)] = 0
ivar[wave2bin((1+z)*(l_CIV -2*l_width),c0,c1,Nmax):wave2bin((1+z)*(l_CIV +2*l_width),c0,c1,Nmax)] = 0
ivar[wave2bin((1+z)*(l_HeII -0.5*l_width),c0,c1,Nmax):wave2bin((1+z)*(l_HeII +1.5*l_width),c0,c1,Nmax)] = 0
ivar[wave2bin((1+z)*(l_CIII -l_width),c0,c1,Nmax):wave2bin((1+z)*(l_CIII +l_width),c0,c1,Nmax)] = 0
ivar[wave2bin((1+z)*(l_CII -0.5*l_width),c0,c1,Nmax):wave2bin((1+z)*(l_CII +0.5*l_width),c0,c1,Nmax)] = 0
ivar[wave2bin((1+z)*(l_FeII_a -l_width),c0,c1,Nmax):wave2bin((1+z)*(l_FeII_a +l_width),c0,c1,Nmax)] = 0
ivar[wave2bin((1+z)*(l_FeII_b -l_width),c0,c1,Nmax):wave2bin((1+z)*(l_FeII_b +l_width),c0,c1,Nmax)] = 0
ivar[wave2bin((1+z)*(l_MgII -2.5*l_width),c0,c1,Nmax):wave2bin((1+z)*(l_MgII +2.5*l_width),c0,c1,Nmax)] = 0
ivar[wave2bin((1+z)*(l_NeV -0.5*l_width),c0,c1,Nmax):wave2bin((1+z)*(l_NeV +0.5*l_width),c0,c1,Nmax)] = 0
ivar[wave2bin((1+z)*(l_OII -0.5*l_width),c0,c1,Nmax):wave2bin((1+z)*(l_OII +1.5*l_width),c0,c1,Nmax)] = 0
ivar[wave2bin((1+z)*(l_NeIII -0.5*l_width),c0,c1,Nmax):wave2bin((1+z)*(l_NeIII +0.5*l_width),c0,c1,Nmax)] = 0
ivar[wave2bin((1+z)*(l_Hd -0.5*l_width),c0,c1,Nmax):wave2bin((1+z)*(l_Hd +0.5*l_width),c0,c1,Nmax)] = 0
ivar[wave2bin((1+z)*(l_Hg - 1.5*l_width),c0,c1,Nmax):wave2bin((1+z)*(l_Hg + 1.5*l_width),c0,c1,Nmax)] = 0
ivar[wave2bin((1+z)*(l_Hb - l_width),c0,c1,Nmax):wave2bin((1+z)*(l_Hb + l_width),c0,c1,Nmax)] = 0
ivar[wave2bin((1+z)*(l_OIII_a -2*l_width),c0,c1,Nmax):wave2bin((1+z)*(l_OIII_a +2*l_width),c0,c1,Nmax)] = 0
ivar[wave2bin((1+z)*(l_OIII_b -2*l_width),c0,c1,Nmax):wave2bin((1+z)*(l_OIII_b +2*l_width),c0,c1,Nmax)] = 0
ivar[wave2bin((1+z)*(l_Ha -2*l_width),c0,c1,Nmax):wave2bin((1+z)*(l_Ha +3*l_width),c0,c1,Nmax)] = 0
#additional Lines added after 1st results
# Ne IV
ivar[wave2bin((1+z)*(2427 -l_width),c0,c1,Nmax):wave2bin((1+z)*(2427 +l_width),c0,c1,Nmax)] = 0
# Ne V
ivar[wave2bin((1+z)*(3350 - 0.5*l_width),c0,c1,Nmax):wave2bin((1+z)*(3350 + 0.5*l_width),c0,c1,Nmax)] = 0
# NI
ivar[wave2bin((1+z)*(5200 -l_width),c0,c1,Nmax):wave2bin((1+z)*(5200 +l_width),c0,c1,Nmax)] = 0
# [Fe VII]
ivar[wave2bin((1+z)*(5721 - 0.5*l_width),c0,c1,Nmax):wave2bin((1+z)*(5721 + 0.5*l_width),c0,c1,Nmax)] = 0
#[Fe VII]
ivar[wave2bin((1+z)*(6087 - 0.5*l_width),c0,c1,Nmax):wave2bin((1+z)*(6087 + 0.5*l_width),c0,c1,Nmax)] = 0
# night sky
ivar[wave2bin((1+z)*(6376 - l_width),c0,c1,Nmax):wave2bin((1+z)*(6376 + l_width),c0,c1,Nmax)] = 0
# night sky
ivar[wave2bin((1+z)*(6307 -l_width),c0,c1,Nmax):wave2bin((1+z)*(6307 +l_width),c0,c1,Nmax)] = 0
# SII
ivar[wave2bin((1+z)*(6734 - l_width),c0,c1,Nmax):wave2bin((1+z)*(6734 + l_width),c0,c1,Nmax)] = 0
# SII
ivar[wave2bin((1+z)*(6716 - l_width),c0,c1,Nmax):wave2bin((1+z)*(6716 + l_width),c0,c1,Nmax)] = 0
# Fe ?
ivar[wave2bin((1+z)*(5317 -l_width),c0,c1,Nmax):wave2bin((1+z)*(5317 +l_width),c0,c1,Nmax)] = 0
ivar[wave2bin((1+z)*(5691 -l_width),c0,c1,Nmax):wave2bin((1+z)*(5691 +l_width),c0,c1,Nmax)] = 0
ivar[wave2bin((1+z)*(6504 - l_width),c0,c1,Nmax):wave2bin((1+z)*(6504 + l_width),c0,c1,Nmax)] = 0
ivar[wave2bin((1+z)*(4490 - l_width),c0,c1,Nmax):wave2bin((1+z)*(4490 + l_width),c0,c1,Nmax)] = 0
ivar[wave2bin((1+z)*(5080 -l_width),c0,c1,Nmax):wave2bin((1+z)*(5080 +l_width),c0,c1,Nmax)] = 0
return ivar
def QSO_compute_FWHM(ivar,flux,wave,c0,c1,Nmax,z):
### Constants:
H0 = 72e3 #m s-1 Mpc-1
c = 299792458 #m s-1
if z<1:
#H_beta, need to mask OIII
l_OIII_a = 4959
l_OIII_b = 5007
l_Hb = 4861
ivar[wave2bin((1+z)*(l_OIII_a -2*l_width),c0,c1,Nmax):wave2bin((1+z)*(l_OIII_a +2*l_width),c0,c1,Nmax)] = 0
ivar[wave2bin((1+z)*(l_OIII_b -2*l_width),c0,c1,Nmax):wave2bin((1+z)*(l_OIII_b +2*l_width),c0,c1,Nmax)] = 0
HB_flux = flux[wave2bin(4612*(1+z),c0,c1,Nmax):wave2bin(5112*(1+z),c0,c1,Nmax)]
HB_wave = wave[wave2bin(4612*(1+z),c0,c1,Nmax):wave2bin(5112*(1+z),c0,c1,Nmax)]
HB_weight = np.sqrt(ivar[wave2bin(4612*(1+z),c0,c1,Nmax):wave2bin(5112*(1+z),c0,c1,Nmax)])
### fit a line to continuum on unmasked points
line_coeff = np.polyfit(x = HB_wave, y = HB_flux, deg = 1, w=HB_weight)
HB_flux_r = flux[wave2bin(4812*(1+z),c0,c1,Nmax):wave2bin(4912*(1+z),c0,c1,Nmax)]
HB_wave_r = wave[wave2bin(4812*(1+z),c0,c1,Nmax):wave2bin(4912*(1+z),c0,c1,Nmax)]
HB_weight_r = np.sqrt(ivar[wave2bin(4812*(1+z),c0,c1,Nmax):wave2bin(4912*(1+z),c0,c1,Nmax)])
res = minimize(chi2Lorenz,[4862*(1+z),10,30],args=(HB_wave_r, HB_flux_r-line_coeff[0]*HB_wave_r -line_coeff[1],HB_weight_r), method='SLSQP', bounds = [(4862*(1+z)-5,4862*(1+z)+5),(1,100),(1,10000)])
params_beta = res.x
FWHM = (c/1000)*2*params_beta[1]/((1+z)*l_Hb) # km s-1
average_flux = np.mean(flux[wave2bin(5100-40,c0,c1,Nmax):wave2bin(5100+40,c0,c1,Nmax)])
l_times_luminosity = 5100*(1e-17)*average_flux*4*np.pi*(100*parsec*1e6*(c/H0)*quad(x12,0.0,z)[0]*(1+z))**2
elif 6.2>z>1.5:
HB_wave = None
#CIV
l_CIV = 1549.0
CIV_flux = flux[wave2bin(1300*(1+z),c0,c1,Nmax):wave2bin(1800*(1+z),c0,c1,Nmax)]
CIV_wave = wave[wave2bin(1300*(1+z),c0,c1,Nmax):wave2bin(1800*(1+z),c0,c1,Nmax)]
CIV_weight = np.sqrt(ivar[wave2bin(1300*(1+z),c0,c1,Nmax):wave2bin(1800*(1+z),c0,c1,Nmax)])
### fit a line to continuum on unmasked points
line_coeff = np.polyfit(x = CIV_wave, y = CIV_flux, deg = 1, w=CIV_weight)
CIV_flux_r = flux[wave2bin(1500*(1+z),c0,c1,Nmax):wave2bin(1600*(1+z),c0,c1,Nmax)]
CIV_wave_r = wave[wave2bin(1500*(1+z),c0,c1,Nmax):wave2bin(1600*(1+z),c0,c1,Nmax)]
CIV_weight_r = np.sqrt(ivar[wave2bin(1500*(1+z),c0,c1,Nmax):wave2bin(1600*(1+z),c0,c1,Nmax)])
res = minimize(chi2Lorenz,[1549*(1+z),10,10],args=(CIV_wave_r, CIV_flux_r-line_coeff[0]*CIV_wave_r -line_coeff[1],CIV_weight_r), method='SLSQP', bounds = [(1549*(1+z)-5,1549*(1+z)+5),(1,100),(1,10000)])
params_CIV = res.x
average_flux = 1350*np.mean(flux[wave2bin(1350-40,c0,c1,Nmax):wave2bin(1350+40,c0,c1,Nmax)])
FWHM = (c/1000)*2*params_CIV[1]/((1+z)*l_CIV) #km s-1
l_times_luminosity = 1350*(1e-17)*average_flux*4*np.pi*(100*parsec*1e6*(c/H0)*quad(x12,0.0,z)[0]*(1+z))**2
else:
FWHM = 0.0
l_times_luminosity = 0.0
return FWHM, l_times_luminosity, HB_wave
def plot_QSOLAE(RA,DEC,z,flux,wave,synflux,x0,ivar, reduced_flux,window,peak,params,params_skew, topdir, savedir, show = False, paper=True, QSOlens = True):
if show ==False:
mpl.use('Agg')
# Create and save graph
fontP = FontProperties()
fontP.set_size('medium')
plt.figure(figsize=(12,3))
plt.suptitle(SDSSname(RA,DEC)+'\n'+'RA='+str(RA)+', Dec='+str(DEC) +', $z_{QSO}='+'{:03.3}'.format(z)+ '$')
plt.ylabel('$f_{\lambda}\, (10^{-17} erg\, s^{-1} cm^{-2} \AA^{-1}$')
if paper:
gs = gridspec.GridSpec(1,3)
smoothed_flux = np.array([np.mean(flux[ii-2:ii+3]) for ii in range(len(flux[0,:])) if (ii>4 and ii<len(flux[0,:])-4)])
p1 = plt.subplot(gs[0,:2])
#p1.plot(wave, flux[:], 'k', label = 'BOSS Flux', drawstyle='steps-mid')
p1.plot(wave[5:-4], smoothed_flux, 'k', label = 'eBOSS Flux', drawstyle='steps-mid')
p1.plot(wave, synflux[:], 'r', label = 'PCA fit', drawstyle='steps-mid')
#p1.fill_between(wave,np.min(synflux[:])-10,np.max(synflux[:])+10,where=(ivar[:]<0.001),facecolor='k', alpha=0.2)
p1.set_ylim(np.min(synflux[:])-3, np.max(synflux[:])+3)
p1.vlines(x = x0,ymin= -100,ymax= 100,colors= 'g',linestyles='dashed')
box = p1.get_position()
p1.set_position([box.x0,box.y0+0.06,box.width,box.height*0.85])
plt.ylabel('Flux [$10^{-17} erg\, s^{-1} cm^{-2} \AA^{-1}$]')
plt.xlabel('Observed wavelength [$\AA$]')
p2 = plt.subplot(gs[0,2:3])
p2.plot(wave, flux[:], 'k', label = 'eBOSS Flux', drawstyle='steps-mid')
p2.plot(wave, synflux[:], 'r', label = 'PCA fit', drawstyle='steps-mid')
p2.set_ylim(np.min(flux[window]), np.max(flux[window])+0.5)
p2.legend(loc='upper right', bbox_to_anchor = (1.3,1.1), ncol = 1, prop=fontP)
box = p2.get_position()
p2.set_position([box.x0,box.y0+0.06,box.width*0.9,box.height*0.85])
x1 = int(x0/10.)*10
plt.xticks([x1-10,x1,x1+10,x1+20])
p2.set_xlim(x0-15,x0+25)
plt.xlabel('Observed wavelength [$\AA$]')
else:
gs = gridspec.GridSpec(2,2)
p1 = plt.subplot(gs[0,:2])
p1.plot(wave, flux[:], 'k', label = 'BOSS Flux', drawstyle='steps-mid')
p1.plot(wave, synflux[:], 'r', label = 'PCA fit', drawstyle='steps-mid')
#p1.plot(wave,ivar[:]-15, 'g', label = 'var$^{-1}-15$')
p1.fill_between(wave,np.min(synflux[:])-10,np.max(synflux[:])+10,where=(ivar[:]<0.000001),facecolor='k', alpha=0.2)
p1.legend(loc='upper right', bbox_to_anchor = (1.2,1), ncol = 1, prop=fontP)
box = p1.get_position()
p1.set_position([box.x0,box.y0,box.width*0.9,box.height])
p1.set_ylim(np.min(synflux[:])-3, np.max(synflux[:])+3)
plt.ylabel('$f_{\lambda}\, [10^{-17} erg\, s^{-1} cm^{-2} \AA^{-1}]$')
if QSOlens == False:
p1.set_xlim(3600,6000)
window = np.linspace(wave2bin(x0,c0,c1,Nmax)-40,wave2bin(x0,c0,c1,Nmax)+40,81,dtype = np.int16)
if QSOlens:
p3 = plt.subplot(gs[1,:1])
p3.plot(wave, flux[:], 'k', label = 'BOSS Flux', drawstyle='steps-mid')
p3.plot(wave, synflux[:], 'r', label = 'PCA fit', drawstyle='steps-mid')
p3.set_xlim(np.min(wave[window]),np.max(wave[window]))
p3.set_ylim(np.min(synflux[window])-1, np.max(flux[window])+1)
box = p3.get_position()
p3.set_position([box.x0,box.y0,box.width*0.8,box.height])
plt.ylabel('$f_{\lambda}\, [10^{-17} erg\, s^{-1} cm^{-2} \AA^{-1}]$')
p3.legend(loc='upper right', bbox_to_anchor = (1.4,1), ncol = 1,prop=fontP)
p3.locator_params(axis='x',nbins=6)
median_local = np.median(reduced_flux[window])
fit_QSO = np.poly1d(np.polyfit(x=wave[window],y=reduced_flux[window],deg=3,w=(np.abs(reduced_flux[window]-median_local)<5)*np.sqrt(ivar[window])) )
p4 = plt.subplot(gs[1,1:2])
p4.plot(wave[window], fit_QSO(wave[window]), '-m',label = 'Order 3 fit')
box = p4.get_position()
p4.set_xlim(np.min(wave[window]),np.max(wave[window]))
p4.set_position([box.x0,box.y0,box.width*0.8,box.height])
p4.plot(wave[window], reduced_flux[window],'k', label = 'Reduced flux', drawstyle='steps-mid')
p4.legend(loc='upper right', bbox_to_anchor = (1.4,1), ncol = 1,prop=fontP)
p4.locator_params(axis='x',nbins=6)
else:
p3 = plt.subplot(gs[1,:2])
p3.plot(wave, flux[:], 'k', label = 'BOSS Flux', drawstyle='steps-mid')
p3.plot(wave, synflux[:], 'r', label = 'PCA fit', drawstyle='steps-mid')
p3.legend(prop=fontP)
p3.set_xlim(peak[0]-50,peak[0]+60)
p3.set_ylim(np.min(synflux[bounds])-2, np.max(flux[bounds])+3)
plt.ylabel('$f_{\lambda}\, [10^{-17} erg\, s^{-1} cm^{-2} \AA^{-1}]$', fontsize=18)
p2 = plt.subplot(gs[2,:2])
if QSOlens:
p2.plot(wave[window], reduced_flux[window]-fit_QSO(wave[window]),'k', label = 'Reduced flux', drawstyle='steps-mid')
else:
p2.plot(wave[window], reduced_flux[window],'k', label = 'Reduced flux')
if 0.0<peak[16]<peak[15]:
p2.plot(wave,gauss2(x=wave,x1=params[0],x2=params[1],A1=params[2],A2=params[3],var=params[4]),'g', label = r'$\chi_D^2 = $' + '{:.4}'.format(peak[16]))
else:
p2.plot(wave,gauss(x=wave, x_0=params[0], A=params[1], var=params[2]),'r', label = r'$\chi_G^2 = $' + '{:.4}'.format(peak[15]) )
if 0.0<peak[17]<peak[18]:
p2.plot(wave,skew(x=wave,A = params_skew[0], w=params_skew[1], a=params_skew[2], eps=params_skew[3]), 'b', label =r'$\chi_S^2 = $' + '{:.4}'.format(peak[17]))
else:
p2.plot(wave,skew2(x=wave,A1 = params_skew[0], w1=params_skew[1], a1=params_skew[2], eps1 = params_skew[3], A2 = params_skew[4], w2=params_skew[5], a2=params_skew[6], eps2=params_skew[7]), 'c',label= r'$\chi_{S2}^2 = $' + '{:.4}'.format(peak[18]))
box = p2.get_position()
p2.set_position([box.x0,box.y0,box.width*0.9,box.height])
p2.legend(loc='upper right', bbox_to_anchor = (1.2,1), ncol = 1,prop=fontP)
plt.xlabel('$Wavelength\, [\AA]$',fontsize = 18)
p2.set_xlim(np.min(wave[window]),np.max(wave[window]))
if QSOlens:
p2.set_ylim(-1, np.max(reduced_flux[window]-fit_QSO(wave[window])+1))
make_sure_path_exists(topdir + savedir +'/plots/')
plt.savefig(topdir + savedir +'/plots/'+SDSSname(RA,DEC)+ '-' + str(plate) + '-' + str(mjd) + '-' + str(fiberid[i]) + '-' + str(n_peak)+ '.eps', format = 'eps', dpi = 2000)
if show:
plt.show()
else:
plt.close()
def plot_QSOGal(RA,DEC,z, z_backgal,flux,wave,synflux,ivar, reduced_flux,show = False, HB_wave = 0.0, params_beta=[0,0,0], line_coeff = [0,0]):
if show ==False:
mpl.use('Agg')
fontP = FontProperties()
fontP.set_size('medium')
plt.suptitle(SDSSname(RA,DEC)+'\n'+'RA='+str(RA)+', Dec='+str(DEC) +', $z_{QSO}='+'{:03.3}'.format(z[i])+ '$')
gs = gridspec.GridSpec(2,4)
p1 = plt.subplot(gs[0,:4])
smoothed_flux = n.array([n.mean(flux[ii-2:ii+3]) for ii in range(len(flux[:])) if (ii>4 and ii<len(flux[:])-4)])
p1.plot(wave[5:-4], smoothed_flux, 'k', label = 'BOSS Flux', drawstyle='steps-mid')
#p1.plot(wave, flux[:], 'k', label = 'BOSS Flux')
p1.plot(wave, synflux[:], 'r', label = 'PCA fit')
if z[i]<1:
p1.plot(HB_wave, lorentz(HB_wave, params_beta[0],params_beta[1],params_beta[2]) + HB_wave*line_coeff[0] + line_coeff[1], '--g')
box = p1.get_position()
p1.set_position([box.x0,box.y0+0.02,box.width*0.9,box.height])
p1.set_ylim(n.min(synflux[:])-3, n.max(synflux[:])+3)
p1.vlines(x = em_lines*(1+z_backgal),ymin= -100,ymax= 100,colors= 'g',linestyles='dashed')
p1.legend(loc='upper right', bbox_to_anchor = (1.2,1), ncol = 1, prop=fontP)
p1.set_xlim(3500,10500)
plt.ylabel('Flux [$10^{-17} erg\, s^{-1} cm^{-2} \AA^{-1}]$')
p2 = plt.subplot(gs[1,:1])
p2.vlines(x = em_lines*(1+z_backgal),ymin= -100,ymax= 100,colors= 'g',linestyles='dashed')
loc_flux = flux[wave2bin((1+z_backgal)*(3727-10),c0,c1,Nmax) :wave2bin((1+z_backgal)*(3727+10),c0,c1,Nmax)]
p2.plot(wave[wave2bin((1+z_backgal)*(3727-10),c0,c1,Nmax) :wave2bin((1+z_backgal)*(3727+10),c0,c1,Nmax)],loc_flux,'k', label = 'OII', drawstyle='steps-mid')
p2.plot(wave[wave2bin((1+z_backgal)*(3727-10),c0,c1,Nmax) :wave2bin((1+z_backgal)*(3727+10),c0,c1,Nmax)],synflux[wave2bin((1+z_backgal)*(3727-10),c0,c1,Nmax) :wave2bin((1+z_backgal)*(3727+10),c0,c1,Nmax)],'r', label = 'OII', drawstyle='steps-mid')
if loc_flux != []:
p2.set_ylim(n.min(loc_flux)-1,n.max(loc_flux)+1)
plt.title('[OII] 3727')
p2.set_xlim((1+z_backgal)*(3727-10),(1+z_backgal)*(3727+10))
x1 = int((1+z_backgal)*3727)
plt.xticks([x1-15,x1,x1+15])
plt.ylabel('Flux [$10^{-17} erg\, s^{-1} cm^{-2} \AA^{-1}]$')
#If Ha is below 9500 A, show it
if z>0.44:
p3 = plt.subplot(gs[1,1:4])
else:
p3 = plt.subplot(gs[1,1:3])
p3.vlines(x = em_lines*(1+z_backgal),ymin= -100,ymax= 100,colors= 'g',linestyles='dashed')
loc_flux = flux[wave2bin((1+z_backgal)*(4861-10),c0,c1,Nmax) :wave2bin((1+z_backgal)*(5007+10),c0,c1,Nmax)]
p3.plot(wave[wave2bin((1+z_backgal)*(4861-10),c0,c1,Nmax) :wave2bin((1+z_backgal)*(5007+10),c0,c1,Nmax)],loc_flux,'k', label = 'OIII, Hb', drawstyle='steps-mid')
p3.plot(wave[wave2bin((1+z_backgal)*(4861-10),c0,c1,Nmax) :wave2bin((1+z_backgal)*(5007+10),c0,c1,Nmax)],synflux[wave2bin((1+z_backgal)*(4861-10),c0,c1,Nmax) :wave2bin((1+z_backgal)*(5007+10),c0,c1,Nmax)],'r', label = 'OIII, Hb', drawstyle='steps-mid')
if loc_flux != []:
p3.set_ylim(n.min(loc_flux)-1,n.max(loc_flux)+1)
plt.title(r'H$\beta$,[OIII] 4959, [OIII] 5007')
plt.xlabel(r'Observed wavelength [$\AA$]')
p3.set_xlim((1+z_backgal)*(4861-10),(1+z_backgal)*(5007+10))
x1 = int((1+z_backgal)*4862/10.)*10
plt.xticks([x1,x1+40,x1+80,x1+120, x1+160,x1+200])
box = p3.get_position()
p3.set_position([box.x0+0.02,box.y0,box.width*0.9,box.height])
if z<0.44:
p4 = plt.subplot(gs[1,3:4])
p4.vlines(x = em_lines*(1+z_backgal),ymin= -100,ymax= 100,colors= 'g',linestyles='dashed')
loc_flux = flux[wave2bin((1+z_backgal)*(6562-10),c0,c1,Nmax) :wave2bin((1+z_backgal)*(6562+10),c0,c1,Nmax)]
p4.plot(wave[wave2bin((1+z_backgal)*(6562-10),c0,c1,Nmax) :wave2bin((1+z_backgal)*(6562+10),c0,c1,Nmax)],loc_flux,'k', label = 'Ha', drawstyle='steps-mid')
p4.plot(wave[wave2bin((1+z_backgal)*(6562-10),c0,c1,Nmax) :wave2bin((1+z_backgal)*(6562+10),c0,c1,Nmax)],synflux[wave2bin((1+z_backgal)*(6562-10),c0,c1,Nmax) :wave2bin((1+z_backgal)*(6562+10),c0,c1,Nmax)],'r', label = 'Ha', drawstyle='steps-mid')
if loc_flux != []:
p4.set_ylim(n.min(loc_flux)-1,n.max(loc_flux)+1)
plt.title(r'H$\alpha$')
p4.set_xlim((1+z_backgal)*(6562-10),(1+z_backgal)*(6562+10))
x1 = int((1+z_backgal)*6562)
plt.xticks([x1-10,x1,x1+10])
make_sure_path_exists(topdir + savedir +'/plots/')
plt.savefig(topdir + savedir +'/plots/'+SDSSname(RA,DEC)+ '-' + str(plate) + '-' + str(mjd) + '-' + str(fiberid[i]) + '-'+str(k+1) +'.png')
if show:
plt.show()
else:
plt.close()
|
[
"romainalexis.meyer@epfl.ch"
] |
romainalexis.meyer@epfl.ch
|
db9c9ef77bf6aaf5f803cc25c7e9bd1b81ae8c15
|
0dcf78e319956f2cb2327c5cb47bd6d65e59a51b
|
/Python3/Array/FindTheTownJudge/Naive2_997.py
|
7f1a842199b42bd833b49f6df7d29d0c089ee772
|
[] |
no_license
|
daviddwlee84/LeetCode
|
70edd09a64a6f61492aa06d927e1ec3ab6a8fbc6
|
da1774fd07b7326e66d9478b3d2619e0499ac2b7
|
refs/heads/master
| 2023-05-11T03:16:32.568625
| 2023-05-08T05:11:57
| 2023-05-09T05:11:57
| 134,676,851
| 14
| 4
| null | 2018-05-29T14:50:22
| 2018-05-24T07:18:31
|
Python
|
UTF-8
|
Python
| false
| false
| 495
|
py
|
from typing import List
class Solution:
def findJudge(self, n: int, trust: List[List[int]]) -> int:
# find a node that in edge is n - 1 and out edge is 0
in_edges = [0] * n
out_edges = [0] * n
for ai, bi in trust:
in_edges[bi - 1] += 1
out_edges[ai - 1] += 1
for i, (in_edge, out_edge) in enumerate(zip(in_edges, out_edges)):
if in_edge == n - 1 and out_edge == 0:
return i + 1
return -1
|
[
"daweilee@microsoft.com"
] |
daweilee@microsoft.com
|
3c2aab3ecac9f232f388ff9061c30d4daeb22e65
|
c8b3882a09347076e78324dd106b40579b380f32
|
/medical_web/manage.py
|
d91730869c305c0dd87957564ee1888837fde57b
|
[] |
no_license
|
medical-weetech-co/medical.github.io
|
ec63323937a61ca08b04ea04070ba7c180a2cab1
|
ed0324e104195a4b100aedc5da03d70a9982bac1
|
refs/heads/master
| 2021-07-05T12:51:37.400915
| 2017-01-25T08:54:06
| 2017-01-25T08:54:06
| 59,278,395
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 809
|
py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "medical_web.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
|
[
"peichieh@gmail.com"
] |
peichieh@gmail.com
|
df6570463bf345354f02796f9fdc4ae2c2dd95c9
|
7f70571bc34cdb93a3b973b458915bbc8b3a4e24
|
/pythoncode/search_rotated_list.py
|
6d4c78b548d7b15de39c7e5183004e033f7ca477
|
[] |
no_license
|
Moxxi-ziFe/hogwarts
|
13682453b9bad9391cf7efe32ef1b85e207f175c
|
297bd0636d37344e0624ff99d8452bcc9d08f894
|
refs/heads/master
| 2023-03-12T08:06:00.290969
| 2021-02-24T10:08:53
| 2021-02-24T10:08:53
| 325,500,010
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,397
|
py
|
class Solution:
def rotate_list_search_minimum(self, nums: list):
if len(nums) <= 1:
return nums[0]
left = 0
right = len(nums) - 1
while left + 1 < right:
if nums[left] <= nums[right]:
return nums[left]
else:
mid = (left + right) // 2
if nums[mid] <= nums[left]:
right = mid
else:
left = mid + 1
return min(nums[left], nums[right])
def rotate_list_search_target(self, nums: list, target: int):
left = 0
right = len(nums) - 1
while left + 1 < right:
mid = (left + right) // 2
if nums[mid] == target:
return mid
if nums[left] < nums[mid]:
if nums[left] <= target < nums[mid]:
right = mid - 1
# search in left
else:
# search in right
left = mid + 1
else:
if nums[mid] < target <= nums[right]:
# search in right
left = mid + 1
else:
# search in left
right = mid - 1
if nums[left] == target:
return left
if nums[right] == target:
return right
return -1
|
[
"dengfenghou@outlook.com"
] |
dengfenghou@outlook.com
|
1873e8e86f97a1f555d5127cb16f2f1044ceb05e
|
121389e2f36d2e0672af767411cfb775352cee38
|
/python_stuff/dot-pythonstartup
|
870d3e0feb9682b3f2318e6dcdf75f3158dcfee7
|
[
"MIT"
] |
permissive
|
kaushalmodi/dotfiles
|
4b03c870b52cc359c071c2c07119024fdcecbb9b
|
f43bfab16e8e32047d1d25c90705092bab0f30c3
|
refs/heads/master
| 2023-06-23T12:33:21.303780
| 2023-06-16T21:48:56
| 2023-06-16T21:48:56
| 19,359,447
| 47
| 19
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,411
|
#!/usr/bin/env python3
"""Custom functions for Ipython"""
import math
import convert # in $SCRIPTS/python
def log10(num):
"Return log10 of NUM."
return math.log10(num)
def b2d(binary_str):
"Convert BINARY_STR from Binary to Decimal."
return convert.b2d(binary_str)
def h2d(hex_str):
"Convert HEX_STR from Hex to Decimal."
return convert.h2d(hex_str)
def d2h(decimal):
"Convert DECIMAL from Decimal to Hex."
return convert.d2h(decimal)
def d2b(decimal):
"Convert DECIMAL from Decimal to Binary."
return convert.d2b(decimal)
def h2b(hex_str):
"Convert HEX_STR from Hex to Binary."
return convert.h2b(hex_str)
def b2h(binary_str):
"Convert BINARY_STR from Binary to Hex."
return convert.b2h(binary_str)
def dbp(num):
"Power dB conversions. Return 10*log10(NUM). Returns value in dB."
return 10*math.log10(num)
def dbv(num):
"Voltage dB conversions. Return 20*log10(NUM). Returns value in dB."
return 20*math.log10(num)
def dbinvp(num):
"Power dB conversions. Return 10^(NUM/10). Converts from dB to real number."
return 10**(num*1.0/10)
def dbinv(num):
"Voltage dB conversions. Return 10^(NUM/20). Converts from dB to real number."
return 10**(num*1.0/20)
# ipython config and profiles are stored in ~/.ipython
# ~/.ipython/profile_default/startup/00_init_modules.py contains the code to run
# at ipython startup
|
[
"kaushal.modi@gmail.com"
] |
kaushal.modi@gmail.com
|
|
2271f16f4242a4787a8a6a2d8da4b91c64bb82f4
|
5d344134f84f90df8443de9743e6d05b4d33ecca
|
/MachineLearning/homeworks/hw7/converter.py
|
21f91f418025feb081d42ade7f97cea6542bf0bb
|
[] |
no_license
|
aulasau/BSU.Magistracy
|
e8deaba2c78e4ccb4f77f2956fc286c94573ae87
|
cb9c547987030d004e33048e7c38f057e393413c
|
refs/heads/master
| 2023-01-01T23:41:58.973005
| 2020-10-31T17:51:38
| 2020-10-31T17:51:38
| 308,931,069
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 472
|
py
|
import sys
input_name = sys.argv[-2]
output_name = sys.argv[-1]
with open(input_name, "r") as inp, open(output_name, "w") as out:
for i in inp:
params = i.split(",")
if int(params[-1]) > 0:
out.write("-1 ")
else:
out.write("+1 ")
out.write("0:0 ")
for ind in range(len(params) - 1):
if float(params[ind]) != 0:
out.write(f"{ind+1}:{params[ind]} ")
out.write("\n")
|
[
"barabasha999@mail.ru"
] |
barabasha999@mail.ru
|
84c8c45bf56852074cb397e2014c21d669452304
|
667c25f7f97fb7201fbec3c9a63d9dfb0dbf8f3a
|
/src/Principal.py
|
6420781e6c42a4a46e5f0b22ec634f456996eec8
|
[] |
no_license
|
rickjmzmnz/JugarVsEngine
|
6ef358b38da4e38bba6db33eb061b996c651e031
|
c2cceb8323cdd552fe3691ceeb825cf066eccbaf
|
refs/heads/master
| 2021-08-19T04:48:06.152235
| 2017-11-24T20:54:28
| 2017-11-24T20:54:28
| 111,821,262
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,641
|
py
|
# -*- coding: utf-8 -*-
from Tkinter import *
from PIL import ImageTk, Image
from Jugadas import *
import tkFileDialog
import tkMessageBox
import os
import io
import types
class Interfaz(Frame):
"""
Constructor de la clase
Se crea el canvas donde se pondra el tablero
Se crean los botones y etiquetas de la interfaz
"""
def __init__(self,parent):
Frame.__init__(self,parent)
self.pack(fill=BOTH,expand=True)
self.tablero = None
self.engine = None
self.bandera = None
self.creaCanvas()
self.creaBotones()
"""
Dibuja el canvas donde se colocara el tablero
"""
def creaCanvas(self):
self.canvas = Canvas(self, bg="white",width=400,height=400)
self.canvas.place(x=10,y=10)
self.tablero = inicializaTablero()
self.colocaTab()
"""
Se crean los botones para las diferentes acciones de la interfaz
"""
def creaBotones(self):
botonAbrir = Button(self,text="Cargar engine",command=self.buscaEngine)
botonAbrir.place(x=10,y=460)
botonSalir = Button(self,text="Salir",command=self.salir)
botonSalir.place(x=170,y=460)
self.et = Label(self,text="Carga un engine para mostrar más opciones")
self.et.place(x=100,y=430)
"""
Coloca los botones para decidir quien empieza la partida
"""
def colocaBotonesPartida(self):
self.et.config(text="Elige con cuales piezas jugará el engine")
self.botonNegras = Button(self,text="Engine juega negras",command=self.juegaNegras)
self.botonNegras.place(x=10,y=510)
self.botonBlancas = Button(self,text="Engine juega blancas",command=self.juegaBlancas)
self.botonBlancas.place(x=170,y=510)
"""
Busca el engine que será el rival a vencer en la partida
"""
def buscaEngine(self):
if(self.engine == None):
ruta = tkFileDialog.askopenfilename()
self.engine = cargaEngine(ruta)
self.colocaBotonesPartida()
else:
ruta = tkFileDialog.askopenfilename()
self.engine = cargaEngine(ruta)
self.reiniciaPartida()
"""
Inicia la partida el engine
"""
def juegaBlancas(self):
self.botonBlancas.place_forget()
self.botonNegras.place_forget()
self.et.config(text="Empieza la partida")
mov = juegaEngine(self.tablero,self.engine)
siguienteJugadaEng(self.tablero,mov)
self.colocaTab()
self.bandera = True
self.juegaPersona()
"""
Inicia la partida el jugador
"""
def juegaNegras(self):
self.botonBlancas.place_forget()
self.botonNegras.place_forget()
self.et.config(text="Empieza la partida")
self.juegaPersona()
self.colocaTab()
"""
Coloca la caja de texto donde el jugador va a indicar su jugada
"""
def juegaPersona(self):
self.etPer = Label(self,text="Escoge tu jugada")
self.etPer.place(x=10,y=510)
self.entrytext = StringVar()
self.entry = Entry(self,textvariable=self.entrytext)
self.entry.place(x=120,y=510)
self.botonPer = Button(self,text="Mover",command= lambda: self.aplicaJugadaPer(self.entrytext))
self.botonPer.place(x=280,y=510)
"""
Función para aplicar la jugada que el jugador colocó en la caja de texto
"""
def aplicaJugadaPer(self,mov):
mov = mov.get()
self.etPer.place_forget()
self.entry.place_forget()
self.botonPer.place_forget()
if(jugadaValida(self.tablero,mov)):
siguienteJugadaPer(self.tablero,mov)
self.colocaTab()
self.tocaEng()
self.et.config(text = "La jugada realizada es " + mov)
self.bandera = False
else:
self.mensajeMovInvalido()
self.juegaPersona()
"""
Botón para indicarle al engine que le toca mover
"""
def tocaEng(self):
self.botonMueveEng = Button(self,text="Te toca engine",command=self.mueveEng)
self.botonMueveEng.place(x=10,y=510)
"""
Función para que el engine realice una jugada
"""
def mueveEng(self):
mov = juegaEngine(self.tablero,self.engine)
movS = toStringMove(mov)
siguienteJugadaEng(self.tablero,mov)
self.et.config(text = "La jugada realizada es " + movS)
self.colocaTab()
self.botonMueveEng.place_forget()
self.bandera = True
self.juegaPersona()
jaque = verificaJaque(self.tablero)
jaquemate = verificaJaqueMate(self.tablero)
tablas = verificaTablas(self.tablero)
if(jaquemate == True):
self.mensajeJaqueMate()
self.reiniciaPartida()
return
if(jaque == True):
self.mensajeJaque()
if(tablas == True):
self.mensajeTablas()
"""
Dibuja el tablero en el canvas con el estado del juego actual
"""
def colocaTab(self):
svg = obtenSvg(self.tablero)
svgToImage(svg,"tablero")
imagen = Image.open("tablero.png")
imagenTk = ImageTk.PhotoImage(imagen)
self.canvas.image = imagenTk
self.canvas.create_image(imagenTk.width()/2,imagenTk.height()/2,anchor=CENTER,image=imagenTk,tags="tab")
"""
Ventana emergente para informar que hay jaque
"""
def mensajeJaque(self):
top = Toplevel()
top.geometry("%dx%d%+d%+d" % (170, 80, 600, 300))
label = Label(top,text="¡JAQUE!, Mueve el rey")
label.place(x=20,y=20)
"""
Ventana emergente para informar que hay jaquemate
"""
def mensajeJaqueMate(self):
top = Toplevel()
top.geometry("%dx%d%+d%+d" % (180, 80, 600, 300))
label = Label(top,text="¡JAQUEMATE!, Fin del juego")
label.place(x=20,y=20)
"""
Ventana emergente para informar que hay tablas
"""
def mensajeTablas(self):
top = Toplevel()
top.geometry("%dx%d%+d%+d" % (170, 80, 600, 300))
label = Label(top,text="¡TABLAS!, Es un empate")
label.place(x=20,y=20)
"""
Ventana emergente para informar que se hizo un movimiento inválido
"""
def mensajeMovInvalido(self):
top = Toplevel()
top.geometry("%dx%d%+d%+d" % (170, 80, 600, 300))
label = Label(top,text="Movimiento inválido")
label.place(x=20,y=20)
"""
Reinicia la partida
Coloca el tablero con las piezas en su posición inicial
Y los botones para decidir quien mueve primero
"""
def reiniciaPartida(self):
if(self.bandera == True):
self.etPer.place_forget()
self.entry.place_forget()
self.botonPer.place_forget()
else:
self.botonMueveEng.place_forget()
self.bandera = None
self.colocaBotonesPartida()
self.tablero = inicializaTablero()
self.colocaTab()
"""
Borra el archivo png del tablero generado por el programa
Y se sale del programa
"""
def salir(self):
try:
os.remove("tablero.png")
os._exit(0)
except OSError:
os._exit(0)
"""
Main del programa
Crea una ventana y manda a llamar al constructor de la clase
Para poder interactuar con las acciones que se puedan realizar
"""
if __name__=="__main__":
root = Tk()
root.geometry("420x560")
root.title("Jugar contra un Engine")
root.wm_state("normal")
app = Interfaz(root)
root.mainloop()
|
[
"rickjmzmnz@ciencias.unam.mx"
] |
rickjmzmnz@ciencias.unam.mx
|
71270a82c8899f57b40f543da8d5a9da416ec63f
|
fb85e81180cfcc061055d475d3d90cbf6b471d8a
|
/venv/bin/easy_install-3.5
|
909969bf46a06793f56477c19b8691863c3929d4
|
[] |
no_license
|
DarkKnight1317/python-pi-example
|
a309bcf3ef17a2114cfd1dc72a41dddb050ac87c
|
d74cf74984634b8389e25d8d806a5c4da5c2e8a7
|
refs/heads/master
| 2021-01-07T21:31:20.039143
| 2020-02-20T08:26:41
| 2020-02-20T08:26:41
| 241,825,521
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 453
|
5
|
#!/home/65bijitrevathy71/PycharmProjects/pi/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==40.8.0','console_scripts','easy_install-3.5'
__requires__ = 'setuptools==40.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==40.8.0', 'console_scripts', 'easy_install-3.5')()
)
|
[
"bishal1banerjee@gmail.com"
] |
bishal1banerjee@gmail.com
|
73caa9ce9f9121dc8f031a69d2a264d2f4fb475e
|
5bd58323e622f58a4de979d36fde04276477b8ea
|
/code/py/heap/lc_692_Top_K_Frequent_Words.py
|
89b6bedef5de15a060c07f00807c7ec9103d8c8e
|
[] |
no_license
|
HJSang/leetcode
|
e3c98e239b693c742fa3fbec056dce0821cb725c
|
9399de82326f90c7f4805d649e15fa6a4ad86a16
|
refs/heads/master
| 2021-07-07T14:04:03.401727
| 2020-11-23T04:01:35
| 2020-11-23T04:01:35
| 205,892,155
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,276
|
py
|
# 692. Top K Frequent Words
# Medium
#
# Given a non-empty list of words, return the k most frequent elements.
#
# Your answer should be sorted by frequency from highest to lowest. If two words have the same frequency, then the word with the lower alphabetical order comes first.
#
# Example 1:
# Input: ["i", "love", "leetcode", "i", "love", "coding"], k = 2
# Output: ["i", "love"]
# Explanation: "i" and "love" are the two most frequent words.
# Note that "i" comes before "love" due to a lower alphabetical order.
# Example 2:
# Input: ["the", "day", "is", "sunny", "the", "the", "the", "sunny", "is", "is"], k = 4
# Output: ["the", "is", "sunny", "day"]
# Explanation: "the", "is", "sunny" and "day" are the four most frequent words,
# with the number of occurrence being 4, 3, 2 and 1 respectively.
# Note:
# You may assume k is always valid, 1 ≤ k ≤ number of unique elements.
# Input words contain only lowercase letters.
# Follow up:
# Try to solve it in O(n log k) time and O(n) extra space.
#
#
import collections, heapq
class Solution:
def topKFrequent(self, words: List[str], k: int) -> List[str]:
Freqs = collections.Counter(words)
return heapq.nsmallest(k, Freqs,
key=lambda word:(-Freqs[word], word)
)
|
[
"sanghj0923@gmail.com"
] |
sanghj0923@gmail.com
|
356499b6b6f83e5894d44ea76a5a24777bac2d13
|
5de63845a290d6f43ca195665e99c31f38cae43d
|
/Mundo2/003EstruturasDeRepeticao/057AceitarApenasMouF.py
|
95befcd06a8a805fad62118eaa1fecb0a0ae1269
|
[] |
no_license
|
mucheniski/curso-em-video-python
|
4fef73a9aebe2d815b60eeab119f8af91e3929ce
|
242b4f719c1149273805762c9e40aa680464043b
|
refs/heads/master
| 2023-07-05T05:05:56.580283
| 2021-09-02T17:10:57
| 2021-09-02T17:10:57
| 342,046,890
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 421
|
py
|
# Exercício Python 57: Faça um programa que leia o sexo de uma pessoa, mas só aceite os valores ‘M’ ou ‘F’.
# Caso esteja errado, peça a digitação novamente até ter um valor correto.
sexo = str(input('Informe o sexo M ou F ')).upper().strip()
while sexo not in 'MF':
sexo = str(input('Valor inválido, informe o sexo M ou F: ')).strip().upper()
print('Sexo {} registrado com sucesso'.format(sexo))
|
[
"mucheniski@gmail.com"
] |
mucheniski@gmail.com
|
3eb8f4986634cf2ae84349c3e3527ede3532d582
|
6d0859eaf8fe458378e7f5688975edd993ce1e5a
|
/bwagtail/bootstrap_grayscale/migrations/0041_auto_20181215_2026.py
|
78eb576c55bf794bae628f66604866f5f1d09ee0
|
[] |
no_license
|
mjlabe/bwagtail-cms
|
8e52c2a9627d4540bd30c3bca871f1f841451061
|
a214209a9bc61a5b7c6db3ff2f4741d1535ed3ac
|
refs/heads/master
| 2022-12-04T23:12:39.546791
| 2019-09-10T01:25:38
| 2019-09-10T01:25:38
| 207,430,003
| 1
| 1
| null | 2022-12-04T11:05:20
| 2019-09-10T00:29:15
|
Python
|
UTF-8
|
Python
| false
| false
| 9,580
|
py
|
# Generated by Django 2.1.4 on 2018-12-15 20:26
from django.db import migrations
import wagtail.core.blocks
import wagtail.core.fields
import wagtail.images.blocks
class Migration(migrations.Migration):
dependencies = [
('bootstrap_grayscale', '0040_auto_20181215_2022'),
]
operations = [
migrations.AlterField(
model_name='bootstrapgrayscalepage',
name='body',
field=wagtail.core.fields.StreamField([('masthead', wagtail.core.blocks.StructBlock([('heading', wagtail.core.blocks.CharBlock(required=False)), ('subheading', wagtail.core.blocks.RichTextBlock(required=False)), ('background_image', wagtail.images.blocks.ImageChooserBlock()), ('button_text', wagtail.core.blocks.CharBlock(required=False)), ('button_link', wagtail.core.blocks.PageChooserBlock(required=False)), ('background_theme', wagtail.core.blocks.ChoiceBlock(choices=[('', 'Light'), ('dark', 'Dark'), ('theme', 'Theme')], required=False))])), ('carousel', wagtail.core.blocks.StructBlock([('masthead', wagtail.core.blocks.BooleanBlock(required=False)), ('item', wagtail.core.blocks.ListBlock(wagtail.core.blocks.StructBlock([('heading', wagtail.core.blocks.CharBlock(required=False)), ('paragraph', wagtail.core.blocks.CharBlock(required=False)), ('image', wagtail.images.blocks.ImageChooserBlock()), ('button_text', wagtail.core.blocks.CharBlock(required=False)), ('button_link', wagtail.core.blocks.PageChooserBlock(required=False))])))])), ('about', wagtail.core.blocks.StructBlock([('heading', wagtail.core.blocks.CharBlock()), ('paragraph', wagtail.core.blocks.RichTextBlock()), ('image_with_transparent_background', wagtail.images.blocks.ImageChooserBlock(required=False))])), ('featured', wagtail.core.blocks.StructBlock([('featured', wagtail.core.blocks.StreamBlock([('featured_row_large', wagtail.core.blocks.StructBlock([('item', wagtail.core.blocks.ListBlock(wagtail.core.blocks.StructBlock([('heading', wagtail.core.blocks.CharBlock(required=False)), ('paragraph', wagtail.core.blocks.RichTextBlock(required=False)), ('image', wagtail.images.blocks.ImageChooserBlock()), ('image_position', wagtail.core.blocks.ChoiceBlock(choices=[('left', 'Left'), ('right', 'Right')]))])))])), ('featured_row', wagtail.core.blocks.StructBlock([('item', wagtail.core.blocks.ListBlock(wagtail.core.blocks.StructBlock([('heading', wagtail.core.blocks.CharBlock(required=False)), ('paragraph', wagtail.core.blocks.RichTextBlock(required=False)), ('image', wagtail.images.blocks.ImageChooserBlock()), ('image_position', wagtail.core.blocks.ChoiceBlock(choices=[('left', 'Left'), ('right', 'Right')]))])))]))]))])), ('section', wagtail.core.blocks.StructBlock([('heading', wagtail.core.blocks.CharBlock(required=False)), ('paragraph', wagtail.core.blocks.RichTextBlock(required=False)), ('button_text', wagtail.core.blocks.CharBlock(required=False)), ('button_link', wagtail.core.blocks.PageChooserBlock(required=False)), ('background_image', wagtail.images.blocks.ImageChooserBlock(required=False)), ('background_color', wagtail.core.blocks.ChoiceBlock(choices=[('', 'Light'), ('bg-dark text-light', 'Dark'), ('bg-primary text-light', 'Theme')], required=False)), ('iframe', wagtail.core.blocks.RawHTMLBlock(required=False))])), ('contact', wagtail.core.blocks.StructBlock([('address', wagtail.core.blocks.CharBlock()), ('email', wagtail.core.blocks.CharBlock()), ('phone', wagtail.core.blocks.CharBlock()), ('social_media', wagtail.core.blocks.ListBlock(wagtail.core.blocks.StructBlock([('social_link', wagtail.core.blocks.StructBlock([('social_media', wagtail.core.blocks.ChoiceBlock(choices=[('fab fa-facebook-f', 'Facebook'), ('fab fa-github', 'GitHub'), ('fab fa-gitlab', 'GitLab'), ('fa-instagram', 'Instagram'), ('fab fa-linkedin', 'LinkedIn'), ('fab fa-twitter', 'Twitter')])), ('link', wagtail.core.blocks.CharBlock())], required=False))], required=False)))])), ('grid', wagtail.core.blocks.StructBlock([('row', wagtail.core.blocks.StreamBlock([('bootstrap_common_2_column', wagtail.core.blocks.StructBlock([('item', wagtail.core.blocks.ListBlock(wagtail.core.blocks.StructBlock([('heading1', wagtail.core.blocks.CharBlock(required=False)), ('paragraph1', wagtail.core.blocks.RichTextBlock(required=False)), ('image1', wagtail.images.blocks.ImageChooserBlock(required=False)), ('link1', wagtail.core.blocks.PageChooserBlock(required=False)), ('heading2', wagtail.core.blocks.CharBlock(required=False)), ('paragraph2', wagtail.core.blocks.RichTextBlock(required=False)), ('image2', wagtail.images.blocks.ImageChooserBlock(required=False)), ('link2', wagtail.core.blocks.PageChooserBlock(required=False))])))])), ('bootstrap_common_3_column', wagtail.core.blocks.StructBlock([('item', wagtail.core.blocks.ListBlock(wagtail.core.blocks.StructBlock([('heading1', wagtail.core.blocks.CharBlock(required=False)), ('paragraph1', wagtail.core.blocks.RichTextBlock(required=False)), ('image1', wagtail.images.blocks.ImageChooserBlock(required=False)), ('link1', wagtail.core.blocks.PageChooserBlock(required=False)), ('heading2', wagtail.core.blocks.CharBlock(required=False)), ('paragraph2', wagtail.core.blocks.RichTextBlock(required=False)), ('image2', wagtail.images.blocks.ImageChooserBlock(required=False)), ('link2', wagtail.core.blocks.PageChooserBlock(required=False)), ('heading3', wagtail.core.blocks.CharBlock(required=False)), ('paragraph3', wagtail.core.blocks.RichTextBlock(required=False)), ('image3', wagtail.images.blocks.ImageChooserBlock(required=False)), ('link3', wagtail.core.blocks.PageChooserBlock(required=False))])))])), ('bootstrap_common_4_column', wagtail.core.blocks.StructBlock([('item', wagtail.core.blocks.ListBlock(wagtail.core.blocks.StructBlock([('heading1', wagtail.core.blocks.CharBlock(required=False)), ('paragraph1', wagtail.core.blocks.RichTextBlock(required=False)), ('image1', wagtail.images.blocks.ImageChooserBlock(required=False)), ('link1', wagtail.core.blocks.PageChooserBlock(required=False)), ('heading2', wagtail.core.blocks.CharBlock(required=False)), ('paragraph2', wagtail.core.blocks.RichTextBlock(required=False)), ('image2', wagtail.images.blocks.ImageChooserBlock(required=False)), ('link2', wagtail.core.blocks.PageChooserBlock(required=False)), ('heading3', wagtail.core.blocks.CharBlock(required=False)), ('paragraph3', wagtail.core.blocks.RichTextBlock(required=False)), ('image3', wagtail.images.blocks.ImageChooserBlock(required=False)), ('link3', wagtail.core.blocks.PageChooserBlock(required=False)), ('heading4', wagtail.core.blocks.CharBlock(required=False)), ('paragraph4', wagtail.core.blocks.RichTextBlock(required=False)), ('image4', wagtail.images.blocks.ImageChooserBlock(required=False)), ('link4', wagtail.core.blocks.PageChooserBlock(required=False))])))])), ('bootstrap_common_8_column', wagtail.core.blocks.StructBlock([('item', wagtail.core.blocks.ListBlock(wagtail.core.blocks.StructBlock([('heading1', wagtail.core.blocks.CharBlock(required=False)), ('paragraph1', wagtail.core.blocks.RichTextBlock(required=False)), ('image1', wagtail.images.blocks.ImageChooserBlock(required=False)), ('link1', wagtail.core.blocks.PageChooserBlock(required=False)), ('heading2', wagtail.core.blocks.CharBlock(required=False)), ('paragraph2', wagtail.core.blocks.RichTextBlock(required=False)), ('image2', wagtail.images.blocks.ImageChooserBlock(required=False)), ('link2', wagtail.core.blocks.PageChooserBlock(required=False)), ('heading3', wagtail.core.blocks.CharBlock(required=False)), ('paragraph3', wagtail.core.blocks.RichTextBlock(required=False)), ('image3', wagtail.images.blocks.ImageChooserBlock(required=False)), ('link3', wagtail.core.blocks.PageChooserBlock(required=False)), ('heading4', wagtail.core.blocks.CharBlock(required=False)), ('paragraph4', wagtail.core.blocks.RichTextBlock(required=False)), ('image4', wagtail.images.blocks.ImageChooserBlock(required=False)), ('link4', wagtail.core.blocks.PageChooserBlock(required=False)), ('heading5', wagtail.core.blocks.CharBlock(required=False)), ('paragraph5', wagtail.core.blocks.RichTextBlock(required=False)), ('image5', wagtail.images.blocks.ImageChooserBlock(required=False)), ('link5', wagtail.core.blocks.PageChooserBlock(required=False)), ('heading6', wagtail.core.blocks.CharBlock(required=False)), ('paragraph7', wagtail.core.blocks.RichTextBlock(required=False)), ('image6', wagtail.images.blocks.ImageChooserBlock(required=False)), ('link6', wagtail.core.blocks.PageChooserBlock(required=False)), ('heading7', wagtail.core.blocks.CharBlock(required=False)), ('image7', wagtail.images.blocks.ImageChooserBlock(required=False)), ('link7', wagtail.core.blocks.PageChooserBlock(required=False)), ('heading8', wagtail.core.blocks.CharBlock(required=False)), ('paragraph8', wagtail.core.blocks.RichTextBlock(required=False)), ('image8', wagtail.images.blocks.ImageChooserBlock(required=False)), ('link8', wagtail.core.blocks.PageChooserBlock(required=False))])))]))]))])), ('pricing', wagtail.core.blocks.StructBlock([('heading', wagtail.core.blocks.CharBlock()), ('paragraph', wagtail.core.blocks.RichTextBlock(required=False)), ('row', wagtail.core.blocks.StreamBlock([('bootstrap_common_price_block', wagtail.core.blocks.StructBlock([('heading', wagtail.core.blocks.CharBlock()), ('paragraph', wagtail.core.blocks.RichTextBlock()), ('price', wagtail.core.blocks.DecimalBlock()), ('rate', wagtail.core.blocks.CharBlock()), ('button_text', wagtail.core.blocks.CharBlock())]))]))])), ('paragraph', wagtail.core.blocks.RichTextBlock()), ('image', wagtail.images.blocks.ImageChooserBlock())]),
),
]
|
[
"mjlabe@gmail.com"
] |
mjlabe@gmail.com
|
34c6cc2a14f9ef2bbae48b217162a6f1bef60751
|
b96d3489ce0e160124dd2a0a9197c80b3c1d756f
|
/land_retail/land_retail/doctype/land_details/land_details.py
|
1a8a65279a050bef4cb4048a91812ca0c0456a68
|
[
"MIT"
] |
permissive
|
Prosper-Apps/Land-Retail
|
ada6284d52d8b1979c702aaeb3445777c41f8c8a
|
1e6c3f751b162d091544e028ad3f70629c1c5233
|
refs/heads/master
| 2023-07-15T22:51:31.217489
| 2021-08-22T19:36:36
| 2021-08-22T19:36:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 267
|
py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2020, Bantoo Accounting and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
# import frappe
from frappe.model.document import Document
class LandDetails(Document):
pass
|
[
"duncannkhata@gmail.com"
] |
duncannkhata@gmail.com
|
0e910e7259b8f2d1972d01000a727267c17b26e3
|
36f58995cc656696c513480e98f1962c0947f519
|
/lib/pymat.py
|
ccef34ba43fb1a66d0a670cd57f462a84d58dba5
|
[] |
no_license
|
rj42/dataviewer
|
d7d9a1b8046298f3c4ba8ac52d83650118bf2678
|
63068955e541df694d365a869aac45c0fe4ef742
|
refs/heads/master
| 2021-01-16T18:17:12.503294
| 2015-02-24T07:08:24
| 2015-02-24T23:49:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 332
|
py
|
import scipy.io
def save(filename,dic):
'''
saves dictionary to matlab binary format
'''
scipy.io.savemat(filename, dic)
def load(filename):
'''
loads .mat file as a dictionary
'''
d = scipy.io.loadmat(filename)
del d['__globals__']
del d['__header__']
del d['__version__']
for i in d.keys():
d[i] = d[i][0]
return d
|
[
"is6645@PGE-C9RTM02-NE.austin.utexas.edu"
] |
is6645@PGE-C9RTM02-NE.austin.utexas.edu
|
f305274bc2629f4a0c09c256c3b77c6f285abb8a
|
8b8d1b6c325a0084805b26e0da63641928d76a08
|
/backend/corelib/hardwarelib/baInstr.py
|
15df44d948938890b58a003a3e900ed4053456a0
|
[] |
no_license
|
superfk/digiWeb
|
ea1215df477dbd1d2044815028b9f5cddf0874f0
|
2386a4da4e362e3f0c7bbfc6195f566f7630d582
|
refs/heads/main
| 2023-01-04T06:04:58.636065
| 2020-10-20T01:31:05
| 2020-10-20T01:31:05
| 304,383,168
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,344
|
py
|
import serial
from PyCRC.CRCCCITT import CRCCCITT
import re
import time
class BaInstr(object):
def __init__(self):
self.device = None
self.debug = True
self.wait_cmd = True
self.duration = 0
self.connected = False
def config(self, debug=False, wait_cmd = True):
self.debug = debug
self.wait_cmd = wait_cmd
def make_cmd(self, cmd, para, input_value=None):
if input_value:
if type(input_value) == str:
main_cmd = '{}({}={}),'.format(cmd, para, input_value)
elif type(input_value) == int or type(input_value) == float or type(input_value) == bool:
main_cmd = '{}({}={}),'.format(cmd, para, input_value)
else:
main_cmd = 'Undefined value type'
else:
main_cmd = "{}({}),".format(cmd, para)
return bytes("{}{}".format(main_cmd, self.get_cksum(main_cmd)).encode('utf-8'))
def get_cksum(self, input):
cks = hex(CRCCCITT().calculate(input)).upper()[2:]
cks = cks.rjust(4, '0')
return cks
def parse_resp(self, resp):
resp_str = resp.decode('utf-8')
# resp_reg = '''([a-zA-Z_\d.]*),?\(([a-zA-Z_]*)=?(["'a-zA-Z_\d. \/]*)\),?([a-zA-Z_\d.]*)'''
resp_reg = '''([a-zA-Z_\d.]*),?\(?([a-zA-Z_]*)=?(["'a-zA-Z_\d. \/]*)\)?,?([a-zA-Z_\d.]*)'''
match = re.search(resp_reg, resp_str)
ret = {}
if match:
ret['error_code'] = match.groups()[0]
ret['para'] = match.groups()[1]
ret['value'] = match.groups()[2]
ret['cks'] = match.groups()[3]
else:
ret = None
return ret
def readline_only(self):
resp = self.device.readline()
if self.debug:
print("original resp in readline only: {}".format(resp))
resp = self.parse_resp(resp)
if self.debug:
print("parsing resp in readline only: {}".format(resp))
if resp:
return resp['value']
else:
return None
def write_and_read(self, cmd, para, value=None, timeout=5):
pooling_time = 0.25 #second
max_wait_count = int(timeout // pooling_time)
counter = 0
while True:
combined_cmd = self.make_cmd(cmd, para, value)
if self.debug:
print("Original cmd: {}".format(combined_cmd))
self.device.write(combined_cmd)
resp = self.device.readline()
if self.debug:
print("original resp: {}".format(resp))
resp = self.parse_resp(resp)
if self.debug:
print("parsing resp: {}".format(resp))
if resp:
if resp['value'] == '"DEVICE BUSY"':
time.sleep(pooling_time)
counter += 1
if not self.wait_cmd:
return None
elif resp:
return resp['value']
else:
return None
else:
return None
def open(self, port=None, baudrate=9600, bytesize=serial.EIGHTBITS, parity=serial.PARITY_NONE, stopbits=serial.STOPBITS_ONE, timeout=10):
self.device = serial.Serial(port=port, baudrate=baudrate, bytesize=bytesize, parity=parity, stopbits=stopbits, timeout=timeout, write_timeout=3)
if self.device:
self.connected = True
else:
self.connected = False
return self.connected
def close(self):
self.connected = False
self.device.close()
def get_dev_name(self):
return self.write_and_read('GET', 'DEV_NAME')
def get_dev_software_version(self):
return self.write_and_read('GET', 'DEV_SV')
def get_ms_duration(self):
self.duration = float(self.write_and_read('GET', 'MS_DURATION'))
return self.duration
def set_ms_duration(self, value):
self.write_and_read('SET', 'MS_DURATION', value)
self.duration = float(value)
return self.duration
def get_ms_method(self):
return self.write_and_read('GET', 'MS_METHOD')
def isReady(self):
ret = self.write_and_read('GET', 'DEV_NAME')
if ret:
if ret[2] == 'ACTIVE':
return False
else:
return True
else:
return False
def check_connection(self, addr):
try:
self.open(addr)
ret = self.isReady()
self.close()
return ret
except:
return False
if __name__ == '__main__':
# ba = BaInstr()
# input = b'GET(MS_MODE),' # exp_crc = '05F9'
# result = ba.get_cksum(input)
# print(result)
# resp = 'E0000,MS_DURATION=5,30AA\r\n'
# par_result = ba.parse_resp(resp)
# print(par_result)
# ba.open("COM3",timeout=1)
# ret = ba.get_dev_name()
# print(ret)
# ret = ba.get_dev_software_version()
# print(ret)
# ret = ba.get_ms_duration()
# print(ret)
# ret = ba.get_ms_method()
# print(ret)
# ret = ba.isReady()
# print(ret)
# ret = ba.set_ms_duration(3)
# print(ret)
# ba.close()
ser = serial.Serial('COM3',9600)
ser.close()
|
[
"jungenhsiao@gmail.com"
] |
jungenhsiao@gmail.com
|
ac6b247b14c0a2a6f1af882e5eb8a00c16731c1e
|
885b8e1c53903865d1fc314ef2dceca42db5b516
|
/vctools/__init__.py
|
4b2c99e86eb606453d6db8874d6d422ebe504464
|
[
"MIT"
] |
permissive
|
mdechiaro/vctools
|
f1bc201ac3ff731f33efbea9ca4800f5a08f7991
|
02ab2763b788aa15d77b656fc9c059d14bfe1a64
|
refs/heads/master
| 2021-04-22T06:36:08.703111
| 2021-04-13T18:49:09
| 2021-04-13T18:49:09
| 23,294,967
| 4
| 2
|
MIT
| 2018-05-31T15:51:40
| 2014-08-24T22:39:54
|
Python
|
UTF-8
|
Python
| false
| false
| 619
|
py
|
#!/usr/bin/env python
# vim: ts=4 sw=4 et
""" Logging metaclass."""
import logging
class Log(type):
""" Metaclass that will load all plugins. """
def __init__(cls, name, args, kwargs):
"""
Args:
name (str): Becomes __name__ attribute
args (tuple): Becomes __bases__ attribute
kwargs (dict): Becomes __dict__ attribute
"""
super(Log, cls).__init__(name, args, kwargs)
cls.logger = logging.getLogger(__name__)
# pylint: disable=too-few-public-methods
class Logger(metaclass=Log):
""" Allows any class to easily have logging. """
|
[
"mdechiaro@users.noreply.github.com"
] |
mdechiaro@users.noreply.github.com
|
9b41d87dcf52cc0c7c937881ecb578353f1ec28d
|
a0ba425c9dc32b4e2face4228b7252d19b303f6f
|
/deploy-remote/localization/arucodetect-keeper.py
|
5328717c04d3cabaec664108d1fe4479e02fd6f5
|
[
"MIT"
] |
permissive
|
pabsan-0/sub-t
|
a1f05c57cc39a9bd4fc92631027f161da927e6a3
|
7217fdbd3ba73a4d807939f3a2646ac9f4f00fe0
|
refs/heads/master
| 2023-08-14T20:37:22.654859
| 2021-10-05T15:44:22
| 2021-10-05T15:44:22
| 339,476,024
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,744
|
py
|
import numpy as np
import cv2
import cv2.aruco as aruco
import math
# import matplotlib.pyplot as plt
import os
import time
import pprint
'''
Use this script to receive live feed and find aruco patterns on it.
Requires a callibrated camera matrix and distortion vector.
DEPS: RedmiNote9Pro.nz: a file from which to load camera distortion matrix
'''
# initialize the video recorder from IP camera
cap = cv2.VideoCapture('https://192.168.43.1:8085/video')
# cap = cv2.VideoCapture(0)
def get_position_ID1_ID0():
''' Gets the pose of the markerID1 with respect of the markerID2
'''
if [0] in ids and [1] in ids:
# check where 0 and 1 are in the 'ids' list
zero = np.where(ids == [0])
one = np.where(ids == [1])
# Get traslation and rotation vectors, plus rotation matrix for ID0
tvec_0 = tvecs[zero][0]
rvec_0 = rvecs[zero][0]
rot_0 = cv2.Rodrigues(rvec_0)[0]
# get traslation and rotation vectors for ID1
tvec_1 = tvecs[one][0]
rvec_1 = rvecs[one][0]
# Assemble translation matrix and use it to compute R1|R0
T = np.eye(4)
T[0:3, 0:3] = rot_0
T[0:3, 3] = tvec_0
tvec_1 = np.append(tvec_1, [1])
vector = np.matmul(np.linalg.inv(T), tvec_1)
# print and return answer, croppting the bureaucratic '1' from the vector
print('\n Coordinates of markerID1 | markerID0: \n', vector[:-1])
return vector[:-1]
else:
return None
def get_camera_pose_old(markerID=0):
''' Gets the camera pose with respect to a specific marker ID
'''
if [markerID] in ids:
# find the marker id in the id list and get its tvec and rvec
marker = np.where(ids == [markerID])
tvec = tvecs[marker][0]
rvec = rvecs[marker][0]
# Get rotation matrix from object coordinates to camera coordinates
rot = cv2.Rodrigues(rvec)[0]
# Assemble translation matrix and use it to compute camera|markerID
T = np.eye(4)
T[0:3, 0:3] = rot
T[0:3, 3] = tvec
tvec_camera_from_camera = np.array([0,0,0,1])
vector = np.matmul(np.linalg.inv(T), tvec_camera_from_camera)
eul = -cv2.decomposeProjectionMatrix(T[:3,:])[6]
yaw = eul[1,0]
pitch = (eul[0,0]+(90))*math.cos(eul[1,0])
roll = (-(90)-eul[0,0])*math.sin(eul[1,0]) +eul[2,0]
return vector, yaw, roll, pitch
def get_camera_pose(markerID=0):
''' Gets the camera pose with respect to any set of detected IDs
'''
# Define placeholder for detection data
detections = {}
for idx, markerID in enumerate(ids):
# get the tvexs and rvecs of this particular marker
tvec = tvecs[idx][0]
rvec = rvecs[idx][0]
# Get rotation matrix from object coordinates to camera coordinates
rot = cv2.Rodrigues(rvec)[0]
# Assemble translation matrix and use it to compute camera|markerID
T = np.eye(4)
T[0:3, 0:3] = rot
T[0:3, 3] = tvec
tvec_camera_from_camera = np.array([0,0,0,1])
x, y, z, _ = np.matmul(np.linalg.inv(T), tvec_camera_from_camera)
# the euler angle is the Projection matrix Aruco/camera, not camera/aruco !!!!!!!
eul = -cv2.decomposeProjectionMatrix(T[:3,:])[6]
yaw = eul[1,0]
pitch = (eul[0,0]+(90))*math.cos(eul[1,0])
roll = (-(90)-eul[0,0])*math.sin(eul[1,0]) + eul[2,0]
# Add each marker's pose to the detection dictionary
detections[idx] = {#'raw': [int(markerID), x, y, z, yaw, roll, pitch],
'id': int(markerID),
'pose': [x, z, yaw]}
return detections
# Load camera calibration matrices
with np.load('RedmiNote9Pro.npz') as X:
mtx, dist = [X[i] for i in('cameraMatrix', 'distCoeffs')]
# Record image until stopped with Q
while(True):
# Capture frame-by-frame
ret, frame = cap.read()
# reset measurements to avoid issues (empyrical, no formal justification)
x = float("nan")
z = float("nan")
vector = [float("nan"), float("nan"), float("nan") ]
yaw = float("nan")
# Work out the aruco markers from the picture
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
aruco_dict = aruco.Dictionary_get(aruco.DICT_4X4_250)
parameters = aruco.DetectorParameters_create()
corners, ids, rejectedImgPoints = aruco.detectMarkers(gray, aruco_dict, parameters=parameters)
# If some markers are found in the picture
if corners != []:
# define size of marker for scale [currently in mm]
size_of_marker = 150
# get the rotation and traslation vectors CAMERA -> ARUCO
rvecs,tvecs,trash = aruco.estimatePoseSingleMarkers(corners, size_of_marker , mtx, dist)
## Getting the pose of 1 w.r to 0
# get_position_ID1_ID0()
# Get camera pose with respect to all markers
detections = get_camera_pose()
# Terminal display
os.system('cls' if os.name == 'nt' else 'clear')
print('\n Poses (x, z, yaw)')
pprint.pprint(detections, width=1)
# draw aruco markers on pic
frame = aruco.drawDetectedMarkers(frame, corners, ids)
# draw aruco 3D axes on picture
for i, j in zip(rvecs, tvecs):
frame = aruco.drawAxis(frame, mtx, dist, i, j, 50)
# Display the resulting frame, pyrDown so it fits the screen
cv2.imshow('frame',cv2.pyrDown(frame))
# cv2.imshow('frame', frame)
# stop looping on Q press
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# Hold the capture until a second keypress is made, then close
cap.release()
cv2.waitKey()
cv2.destroyAllWindows()
|
[
"noreply@github.com"
] |
noreply@github.com
|
96cf6e15b3185e6b21c750ca7f0f29651668092c
|
c0075e8a666d2ec994846510e1caf593c9149319
|
/solver/adam.py
|
2d9e38493b83d2ca4fd84b8a654e45dac274d026
|
[] |
no_license
|
omiderfanmanesh/Neural-Music-Classification
|
0d5b524af6998cf4e93094a64aaf39d5064587ed
|
6d3a01c164b1d9bef419a8c7d6e8abe66a9aecbb
|
refs/heads/main
| 2023-06-20T11:23:49.497916
| 2021-06-28T13:11:53
| 2021-07-13T09:46:34
| 362,450,587
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 579
|
py
|
import torch.optim as optim
class Adam:
def __init__(self, cfg, model_params):
self.cfg = cfg
self.model_params = model_params
def get_params(self):
return {
'params': self.model_params,
'lr': self.cfg.OPT.ADAM.LR,
'betas': self.cfg.OPT.ADAM.BETAS,
'eps': self.cfg.OPT.ADAM.EPS,
'weight_decay': self.cfg.OPT.ADAM.WEIGHT_DECAY,
'amsgrad': self.cfg.OPT.ADAM.AMS_GRAD
}
def optimizer(self):
params = self.get_params()
return optim.Adam(**params)
|
[
"omiderfanmanesh.dev@gmail.com"
] |
omiderfanmanesh.dev@gmail.com
|
a918d1be3e1fa9e5f4382813050f4da668f2077a
|
5a42636bb3f793304595600cdd1534c8433f3339
|
/ErrorAnalysis_Code/ErrorAnalysis.py
|
dac144c9007a26e20df1188e8a607ec8b72128b9
|
[
"MIT"
] |
permissive
|
mmstoll/Ocean569_Code
|
21902a5be2cb9a17ab32755413dc8b708533ddce
|
228cb719f3e82f187f704f343d3b3590a38236d7
|
refs/heads/master
| 2022-11-16T14:43:04.579602
| 2020-07-15T21:03:55
| 2020-07-15T21:03:55
| 259,759,598
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,718
|
py
|
#
# estimate and plot a chi-squared distribution with error bounds
#
from scipy import stats
import matplotlib.pyplot as plt
import numpy as np
#
# file_out='/Users/riser/Desktop/ocean.569A/chi-sq.nu=10.confidence.limits.jpg'
#
# define the number of degrees of freedom
#
nu=10
#
# define the arrays
#
nn=1000
xx=np.zeros(nn)
yy=np.zeros(nn)
xx=np.linspace(-1,30,nn)
yy=stats.chi2.pdf(xx,nu,loc=0, scale=1)
#
# define the confidence limits
#
plt.plot(xx,yy)
plt.show()
#
conf_lim=0.95
conf_above=(1.-conf_lim)/2
conf_below=1.-(1.-conf_lim)/2.
mark_rt = stats.chi2.ppf(conf_below,nu)
mark_lf = stats.chi2.ppf(conf_above,nu)
print (mark_lf,mark_rt)
#
# set up the plot object
#
fig=plt.figure(figsize=(12,5))
plt.xlim([-0.75,30])
plt.ylim([0,0.16])
plt.plot(xx,yy,'k')
plt.plot([nu,nu],[0,stats.chi2.pdf(nu,nu,loc=0,scale=1)],'--r')
plt.xlabel('$\chi^2$', fontsize=17)
plt.ylabel(r'Probability', fontsize=17)
plt.title(r'$\chi^2\ \mathrm{distribution}, \nu$ = %d' % nu, fontsize=13)
plt.fill_between(xx,0,yy,where=(np.array(xx)>min(xx))&(np.array(xx)<=mark_lf),facecolor='magenta')
plt.fill_between(xx,0,yy,where=(np.array(xx)>mark_lf)&(np.array(xx)< mark_rt),facecolor='lemonchiffon')
plt.fill_between(xx,0,yy,where=(np.array(xx)>mark_rt)&(np.array(xx)<= max(xx)),facecolor='magenta')
#
# annotate the plot
#
fs=10
plt.text(3.5,0.004,'2.5% outlier',fontsize=fs,color='magenta')
plt.text(17.5,0.004,'2.5% outlier',fontsize=fs,color='magenta')
plt.text(0.75,0.04,'$\chi^2_{0.975} = %.2f$'%mark_lf,fontsize=fs)
plt.text(20.7,0.015,'$\chi^2_{0.025} = %.2f$'%mark_rt,fontsize=fs)
plt.text(15.5,0.125,'$\chi^2_{0.975} \leq \chi^2 \leq \chi^2_{0.025}$',fontsize=fs)
plt.text(15.5,0.105,'$3.25 \leq \chi^2 \leq 20.48$',fontsize=fs)
plt.grid(True)
#
# save the figure and close
#
#plt.savefig(file_out)
plt.show()
# estimate chi-squared upper and lower error bars
# for a given set of confidence limits
#
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
#
# path_out='/Users/riser/Desktop/ocean.569A/chi-sq.log-log.error.bars.jpg'
#
# set up the plot parameters
#
nn=500
xx=np.zeros(nn)
yy_lower0=np.zeros(nn)
yy_upper0=np.zeros(nn)
frac=0.1
#
fig=plt.figure(figsize=(8,5))
plt.xlim([2,50])
plt.ylim([-2,3])
fs=15
#
# define the set of confidence limits and their colors
#
conf_set=[0.95,0.80,0.70]
color_set=['red','darkblue','goldenrod']
#
# loop through the set of confidence limits
# fine the upper and lower bound for each
#
for j in range(0,3):
for i in range(0,nn-2):
ii=frac*(i+2)
nu=ii
d_mean=nu
conf_lim=conf_set[j]
xx[i]=ii
conf_above=(1.-conf_lim)/2
conf_below=1.-(1.-conf_lim)/2.
mark_rt = stats.chi2.ppf(conf_below,nu)
mark_lf = stats.chi2.ppf(conf_above,nu)
yy_upper_nolog=mark_rt
yy_lower_nolog=mark_lf
yy_upper0[i]=(yy_upper_nolog-d_mean)/d_mean
yy_lower0[i]=(d_mean-yy_lower_nolog)/d_mean
ind_set=nn-2
xx=xx[0:ind_set]
yy_lower1=yy_lower0[0:ind_set]
yy_upper1=yy_upper0[0:ind_set]
#
# plot each curve
#
plt.plot(xx,yy_upper1,color=color_set[j])
plt.plot(xx,-yy_lower1,color=color_set[j],linestyle=(0,(1,1)))
#
# finish the plot with appropriate labels
#
plt.plot([2,50],[0,0],'k')
plt.text(32,0.7,'Upper error bound',fontsize=fs)
plt.text(32,-0.9,'Lower error bound',fontsize=fs)
plt.xlabel(r'$\nu$',fontsize=fs)
plt.ylabel('Fraction of the mean value',fontsize=fs)
plt.grid()
#
fs=13
plt.plot([21,23],[2.8,2.8],color=color_set[0])
plt.text(23.8,2.75,r'$\alpha$ = 0.95',fontsize=fs)
plt.plot([21,23],[2.5,2.5],color=color_set[1])
plt.text(23.8,2.45,r'$\alpha$ = 0.80',fontsize=fs)
plt.plot([21,23],[2.2,2.2],color=color_set[2])
plt.text(23.8,2.15,r'$\alpha$ = 0.70',fontsize=fs)
#
#plt.savefig(path_out)
plt.show()
|
[
"64496465+mmstoll@users.noreply.github.com"
] |
64496465+mmstoll@users.noreply.github.com
|
df2687be95865187cd182c14c35da780e63fbbda
|
abc1a497c41ddd8669c8c41da18af65d08ca54e4
|
/AnalysisF/recon_wf/1ns/make_H1ns.py
|
106466fe5adefdd90b89f9c759c167aade3faeb5
|
[] |
no_license
|
gerakolt/direxeno_privet
|
fcef5e3b654720e277c48935acc168472dfd8ecc
|
75e88fb1ed44fce32fce02677f64106121259f6d
|
refs/heads/master
| 2022-12-20T22:01:30.825891
| 2020-10-04T06:01:07
| 2020-10-04T06:01:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,645
|
py
|
import numpy as np
import matplotlib.pyplot as plt
import time
import os
import sys
from scipy.optimize import minimize
from scipy.stats import poisson, binom
from scipy.special import erf as erf
pmts=[0,1,4,7,8,14]
path='/home/gerak/Desktop/DireXeno/190803/Cs137B/EventRecon/'
rec=np.load(path+'recon1ns98999.npz')['rec']
blw_cut=15
init_cut=20
chi2_cut=5000
left=700
right=1000
rec=rec[np.all(rec['init_wf']>20, axis=1)]
rec=rec[np.sqrt(np.sum(rec['blw']**2, axis=1))<blw_cut]
rec=rec[np.sqrt(np.sum(rec['chi2']**2, axis=1))<chi2_cut]
init=np.sum(np.sum(rec['h'][:,:10,:], axis=2), axis=1)
full=np.sum(np.sum(rec['h'], axis=2), axis=1)
rec=rec[init/full<0.5]
up=np.sum(rec['h'][:,:100,0], axis=1)+np.sum(rec['h'][:,:100,0], axis=1)
dn=np.sum(rec['h'][:,:100,-1], axis=1)+np.sum(rec['h'][:,:100,-2], axis=1)+np.sum(rec['h'][:,:100,-3], axis=1)
rec=rec[dn<3*up+18]
spectrum=np.histogram(np.sum(np.sum(rec['h'], axis=1), axis=1), bins=np.arange(1000)-0.5)[0]
rec=rec[np.sum(np.sum(rec['h'], axis=1), axis=1)>left]
rec=rec[np.sum(np.sum(rec['h'], axis=1), axis=1)<right]
H=np.zeros((50, 200, len(pmts)))
G=np.zeros((300, 200))
for j in range(200):
G[:,j]=np.histogram(np.sum(rec['h'][:,j,:], axis=1), bins=np.arange(np.shape(G)[0]+1)-0.5)[0]
spectra=np.zeros((350, len(pmts)))
for i, pmt in enumerate(pmts):
h=rec['h'][:,:,i]
spectra[:,i]=np.histogram(np.sum(h[:,:100], axis=1), bins=np.arange(351)-0.5)[0]
for j in range(200):
H[:,j,i]=np.histogram(h[:,j], bins=np.arange(np.shape(H)[0]+1)-0.5)[0]
np.savez(path+'H', H=H, G=G, left=left, right=right, spectra=spectra, spectrum=spectrum, up_dn_cut='dn<3*up+18')
|
[
"gerakolt@gmail.com"
] |
gerakolt@gmail.com
|
32a028c6ecd88e1712dd5af1b5ceee421ca23a80
|
9ea9794919da44b4cf77cb4c92ad641d396f26a8
|
/rcnn_attention/wrn/feature_extractor/extract_features.py
|
912291fee44accbe69235ede99fb900391046a52
|
[] |
no_license
|
AmirooR/Pairwise-Similarity-knowledge-Transfer-WSOL
|
3d8bbb4a342fb606cc972da9d157388050f87878
|
01985a4b24e839752df8b192dbda8c274923c4f8
|
refs/heads/master
| 2023-01-28T08:33:28.451844
| 2020-12-08T06:38:14
| 2020-12-08T06:38:14
| 280,340,805
| 7
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,529
|
py
|
import functools
import tensorflow as tf
import numpy as np
import os, sys
from functools import partial
from rcnn_attention.wrn.model.wide_resnet import wide_resnet_model
from dl_papers.datasets.miniimagenet import load_miniimagenet_data
trained_model_path = '../logs/mini_base/train/model.ckpt-200000'
def extract(split='train', wrn_depth=28, wrn_width=10, is_training=False, wrn_dropout_rate=0.0):
with tf.Graph().as_default():
model = partial(
wide_resnet_model,
depth=wrn_depth,
width_factor=wrn_width,
training=is_training,
dropout_rate=wrn_dropout_rate,
data_format='channels_last'
)
batch = 10
input_images = tf.placeholder(tf.float32, shape=(batch,84,84,3)) #TODO complete
with tf.variable_scope('FeatureExtractor'):
features, activations = model(input_images)
pooled_features = tf.reduce_mean(activations['post_relu'],[1,2], keep_dims=True, name='AvgPool')
configproto = tf.ConfigProto()
configproto.gpu_options.allow_growth = True
variables_to_restore = tf.global_variables()
var_map = {var.op.name: var for var in variables_to_restore}
saver = tf.train.Saver(var_map)
def init_fn(sess):
saver.restore(sess, trained_model_path)
with tf.Session(config=configproto) as sess:
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
init_fn(sess)
data = load_miniimagenet_data(split=split, train_num=600)
x_train, x_test, y_train, y_test, x_trean_mean, xtrain_std, files_train, files_test = data
num_iter = x_train.shape[0]//batch
for i in range(num_iter):
if i+1 % 100 == 0:
print('processed batch {0}/{1}'.format((i+1)/100,num_iter))
np_pooled_features = sess.run(pooled_features, feed_dict={input_images:x_train[i*batch:(i+1)*batch]})
names = files_train[i*batch:(i+1)*batch]
fea_names = [name[:-4] for name in names]
for fname,fea in zip(fea_names, np_pooled_features):
idx = fname.rfind(split)
save_name = fname[:idx]+'features/'+fname[idx:]
root = save_name[:save_name.rfind('/')]
if not os.path.exists(root):
os.makedirs(root)
np.save(save_name,fea)
#from IPython import embed;embed()
if __name__ == '__main__':
print('Extracting features of val')
extract(split='val')
print('Extracting features of test')
extract(split='test')
print('Extracting features of train')
extract(split='train')
|
[
"amir.rahimi@anu.edu.au"
] |
amir.rahimi@anu.edu.au
|
004329b3ddea39cfcdec79380491743f3b906eb9
|
781e2692049e87a4256320c76e82a19be257a05d
|
/all_data/exercism_data/python/leap/5b53afc49b5f418cb7d6bbf495c8fdd9.py
|
38933221b5f04fff5d26cc532c350159342a7cc9
|
[] |
no_license
|
itsolutionscorp/AutoStyle-Clustering
|
54bde86fe6dbad35b568b38cfcb14c5ffaab51b0
|
be0e2f635a7558f56c61bc0b36c6146b01d1e6e6
|
refs/heads/master
| 2020-12-11T07:27:19.291038
| 2016-03-16T03:18:00
| 2016-03-16T03:18:42
| 59,454,921
| 4
| 0
| null | 2016-05-23T05:40:56
| 2016-05-23T05:40:56
| null |
UTF-8
|
Python
| false
| false
| 239
|
py
|
'''This module will take in a year (positive integer) and return True if the
year is a leap year, False if it is not.
Lesson: Refactor.
'''
def is_leap_year(year):
return (year%400 == 0) or ((year%100 != 0) and (year%4 == 0))
|
[
"rrc@berkeley.edu"
] |
rrc@berkeley.edu
|
4aa34652c3f1418ac0c14a95848b43c577266aee
|
86c097ac18dbbf46c27545bc1f429ddfd0b5d2be
|
/src/model/gan_multimodel.py
|
3c8824e03a580d78e0a759fa6a94472b563b3688
|
[
"MIT"
] |
permissive
|
nipdep/STGAN
|
c46b056a7fae4a719d3c2ec05fedfd7bb876dab0
|
c72ba6cb9d23d33accc0cfa1958a2005db3ed490
|
refs/heads/main
| 2023-08-14T15:50:15.459763
| 2021-10-07T12:03:53
| 2021-10-07T12:03:53
| 375,255,784
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,581
|
py
|
#%%
import tensorflow as tf
import tensorflow_probability as tfp
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import os
from tensorflow.keras import Sequential, Model
from tensorflow.keras.layers import Dense, Flatten, Reshape, Concatenate, Conv2D, UpSampling2D, BatchNormalization
tfd = tfp.distributions
tfb = tfp.bijectors
tfpl = tfp.layers
#%%
def get_prior(num_modes, latent_dim):
"""
This function should create an instance of a MixtureSameFamily distribution
according to the above specification.
The function takes the num_modes and latent_dim as arguments, which should
be used to define the distribution.
Your function should then return the distribution instance.
"""
prior = tfd.MixtureSameFamily(
mixture_distribution=tfd.Categorical(probs=[1/num_modes,]*num_modes),
components_distribution=tfd.MultivariateNormalDiag(
loc=tf.Variable(tf.random.normal([num_modes, latent_dim])),
scale_diag=tfp.util.TransformedVariable(tf.Variable(tf.ones([num_modes, latent_dim])), bijector=tfb.Softplus()),
)
)
return prior
prior = get_prior(num_modes=2, latent_dim=50)
#%%
def get_kl_regularizer(prior_distribution):
"""
This function should create an instance of the KLDivergenceRegularizer
according to the above specification.
The function takes the prior_distribution, which should be used to define
the distribution.
Your function should then return the KLDivergenceRegularizer instance.
"""
divergent_regularizer = tfpl.KLDivergenceRegularizer(prior_distribution,
use_exact_kl=False,
weight=1.0,
test_points_fn=lambda t: t.sample(3),
test_points_reduce_axis=(0,1))
return divergent_regularizer
kl_regularizer = get_kl_regularizer(prior)
#%%
def get_encoder(latent_dim, kl_regularizer):
"""
This function should build a CNN encoder model according to the above specification.
The function takes latent_dim and kl_regularizer as arguments, which should be
used to define the model.
Your function should return the encoder model.
"""
model = Sequential([
Conv2D(filters=32, kernel_size=4, activation='relu', strides=2, padding='SAME', input_shape=(64, 64, 3)),
BatchNormalization(),
Conv2D(filters=64, kernel_size=4, activation='relu', strides=2, padding='SAME'),
BatchNormalization(),
Conv2D(filters=128, kernel_size=4, activation='relu', strides=2, padding='SAME'),
BatchNormalization(),
Conv2D(filters=256, kernel_size=4, activation='relu', strides=2, padding='SAME'),
BatchNormalization(),
Flatten(),
Dense(tfpl.MultivariateNormalTriL.params_size(latent_dim)),
tfpl.MultivariateNormalTriL(latent_dim, activity_regularizer=kl_regularizer)
])
return model
encoder = get_encoder(latent_dim=50, kl_regularizer=kl_regularizer)
def get_decoder(latent_dim):
"""
This function should build a CNN decoder model according to the above specification.
The function takes latent_dim as an argument, which should be used to define the model.
Your function should return the decoder model.
"""
decoder = Sequential([
Dense(4096, activation='relu', input_shape=(latent_dim,)),
Reshape((4, 4, 256)),
UpSampling2D(size=(2, 2)),
Conv2D(filters=128, kernel_size=3, activation='relu', padding='SAME'),
UpSampling2D(size=(2, 2)),
Conv2D(filters=64, kernel_size=3, activation='relu', padding='SAME'),
UpSampling2D(size=(2, 2)),
Conv2D(filters=32, kernel_size=3, activation='relu', padding='SAME'),
UpSampling2D(size=(2, 2)),
Conv2D(filters=128, kernel_size=3, activation='relu', padding='SAME'),
Conv2D(filters=3, kernel_size=3, padding='SAME'),
Flatten(),
tfpl.IndependentBernoulli(event_shape=(64, 64, 3))
])
return decoder
decoder = get_decoder(latent_dim=50)
#%%
def reconstruction_loss(batch_of_images, decoding_dist):
"""
This function should compute and return the average expected reconstruction loss,
as defined above.
The function takes batch_of_images (Tensor containing a batch of input images to
the encoder) and decoding_dist (output distribution of decoder after passing the
image batch through the encoder and decoder) as arguments.
The function should return the scalar average expected reconstruction loss.
"""
return -tf.reduce_mean(decoding_dist.log_prob(batch_of_images), axis=0)
#%%
|
[
"46063855+nipdep@users.noreply.github.com"
] |
46063855+nipdep@users.noreply.github.com
|
35f673f2499226f1528d14ea822392bfa93ec288
|
b0531384ca9944b8f7ed03ff5c6bd0f309348513
|
/Code/Braille_Dictionary_Grade_2.py
|
c15392ac3812fca175d25726ec248c849edffedf
|
[] |
no_license
|
Rajk0520/Portable_Braille
|
9198f6509aee2e583210d34e6c15245b18c86615
|
b27a274a649b910e19da579c7b6e13cedcb6b3d1
|
refs/heads/master
| 2020-05-14T20:05:31.122653
| 2019-03-21T19:50:25
| 2019-03-21T19:50:25
| 160,282,211
| 1
| 0
| null | 2018-12-04T02:09:47
| 2018-12-04T02:09:47
| null |
UTF-8
|
Python
| false
| false
| 27,241
|
py
|
# -*- coding: utf-8 -*-
"""
**************************************************************************************
* Braille Dictionary (GRADE 2)
* ================================
* This software is intended to convert english text into braille characters
* MODULE: Braille_Dictionary_Grade_2
* Filename: Braille_Dictionary_Grade_2.py
* Version: 2.1.2
* Date: March 4, 2019
*
* Authors: Aditya Kumar Singh
* Team Name: Victorious Visionaries
* Team Members: Aditya Kumar Singh, Raj Kumar Bhagat,
* Ruphan S, Yash Patel
***************************************************************************************
"""
"""
---------------------------------------------------------------------------------------
INSTRUCTIONS
=================
1. Braille is represented in form of 6 raised dots.
Ex:-
1-> o o <-4
2-> o o <-5
3-> o o <-6
The dots are represented here with '0' whereas those numbers are dots
refrence number.
2. So 'a' is represented as 0 o where '0' is raised dot and 'o' is normal dot.
o o
o o
3. Now this can be numrically reffered as 1 since only raised dot is at position 1
In the same way for 'b' we can say [1,2] since only these two dots are raised.
4. Now here each braille character set is represented in a form of array[] with
6 elements in it.
5. And raised dot is represented by "1". and normal dots are represented with "0".
6. Each element maps to braille character like this :
[ 1, 2, 3, 4, 5, 6]
So for 'a' we have [1, 0, 0, 0, 0, 0]
GRADE 2 BRAILLE
==========================
1. It is the the second grade in braille.
2. Theis includes all the character sets which were there in grade 1.
3. In addition to those, this has abbreviation, contraction which shortens the
overall braille characters.
4. And it has some short notations for most commonly used words as well to represent
them as a single character.
5. For more visit "http://www.acb.org/tennessee/braille.html"
----------------------------------------------------------------------------------------
"""
class Grade2:
last_type_modifier="" #Variable to keep track of type of character last read
braille_code=[] #List which will hold the braille code after conversion
#List of commom prefixes
prefix=["anti"]
#Partial contractions, dot 5,6 raised
cpartial_56 = {
"ence" : "e",
"ong" : "g",
"ful" : "l",
"tion" : "n",
"ness" : "s",
"ment" : "t",
"ity" : "y"
}
#Partial contractions, dot 6 raised
cpartial_6={
"ation" : "n",
"ally" : "y"
}
#Partial contractions, dot 4,6 raised
cpartial_46={
"ound" : "d",
"ance" : "e",
"sion" : "n",
"less" : "s",
"ount" : "t"
}
#Whole word contractions, dot 5 rasied
cwhole_5 = {
"character" : "(ch)",
"day" : "d",
"ever" : "e",
"father" : "f",
"here" : "h",
"know" : "k",
"lord" : "l",
"mother" : "m",
"name" : "n",
"one" : "o",
"ought" : "(ou)",
"part" : "p",
"question" : "q",
"right" : "r",
"some" : "s",
"there" : "(the)",
"through" : "(th)",
"time" : "t",
"under" : "u",
"where" : "(wh)",
"work" : "w",
"young" : "y"
}
#Whole word contractions, dot 4,5 rasied
cwhole_45= {
"these" : "(the)",
"those" : "(th)",
"upon" : "u",
"whose" : "(wh)",
"word" : "w"
}
#Whole word contractions, dot 4,5,6 rasied
cwhole_456= {
"cannot" : "c",
"had" : "h",
"many" : "m",
"spirit" : "s",
"their" : "(the)",
"world" : "w"
}
#Abbreviation
abbr= {
"about" : "ab",
"above" : "abv",
"according" : "ac",
"across" : "acr",
"after" : "af",
"afternoon" : "afn",
"afterward" : "afw",
"again" : "ag",
"against" : "ag(st)",
"almost" : "alm",
"already" : "alr",
"also" : "al",
"although" : "al(th)",
"altogether" : "alt",
"always" : "alw",
"because" : "(be)c",
"before" : "(be)f",
"behind" : "(be)h",
"below" : "(be)l",
"beneath" : "(be)n",
"beside" : "(be)s",
"between" : "(be)t",
"beyond" : "(be)y",
"blind" : "bl",
"braille" : "brl",
"children" : "(ch)n",
"conceive" : "(con)cv",
"conceiving" : "(con)cvg",
"could" : "cd",
"deceive" : "dcv",
"deceiving" : "dcvg",
"declare" : "dcl",
"declaring" : "dclg",
"either" : "ei",
"first" : "f(st)",
"friend" : "fr",
"good" : "gd",
"great" : "grt",
"herself" : "h(er)f",
"him" : "hm",
"himself" : "hmf",
"immediate" : "imm",
"letter" : "lr",
"little" : "ll",
"much" : "m(ch)",
"must" : "m(st)",
"myself" : "myf",
"necessary" : "nec",
"neither" : "nei",
"o'clock" : "o'c",
"oneself" : "(one)f",
"ourselves" : "(ou)rvs",
"paid" : "pd",
"perceive" : "p(er)cv",
"perceiving" : "p(er)cvg",
"perhaps" : "p(er)h",
"quick" : "qk",
"receive" : "rcv",
"receiving" : "rcvg",
"rejoice" : "rjc",
"rejoicing" : "rjcg",
"said" : "sd",
"should" : "(sh)d",
"such" : "s(ch)",
"themselves" : "(the)mvs",
"thyself" : "(th)yf",
"today" : "td",
"together" : "tgr",
"tomorrow" : "tm",
"tonight" : "tn",
"would" : "wd",
"its" : "xs",
"itself" : "xf",
"your" : "yr",
"yourself" : "yrf",
"yourselves" : "yrvs"
}
#Standalone words
std_aln = {
"but" : "b",
"can" : "c",
"do" : "d",
"every" : "e",
"from" : "f",
"go" : "g",
"have" : "h",
"just" : "j",
"knowledge" : "k",
"like" : "l",
"more" : "m",
"not" : "n",
"people" : "p",
"quite" : "q",
"rather" : "r",
"so" : "s",
"that" : "t",
"us" : "u",
"very" : "v",
"it" : "x",
"you" : "y",
"as" : "z",
"child" : "(ch)",
"shall" : "(sh)",
"this" : "(th)",
"which" : "(wh)",
"out" : "(ou)",
"will" : "w",
"be" : ";",
"enough" : "(en)",
"to" : "!",
"were" : "(gg)",
"his" : "(\"?)",
"in" : "*",
"was" : "\"",
"by" : "\"",
"still" : "(st)"
}
#Main dictionary as per GRADE 2
Dict = {
"_number": [0, 0, 1, 1, 1, 1],
"ble": [0, 0, 1, 1, 1, 1],
"_letter": [0, 0, 0, 0, 1, 1],
"_p56": [0, 0, 0, 0, 1, 1],
"_caps": [0, 0, 0, 0, 0, 1],
"_p6": [0, 0, 0, 0, 0, 1],
"_decimal": [0, 0, 0, 1, 0, 1],
"_p46": [0, 0, 0, 1, 0, 1],
"_w5": [0, 0, 0, 0, 1, 0],
"_w45": [0, 0, 0, 1, 1, 0],
"_w456": [0, 0, 0, 1, 1, 1],
"_space": [0, 0, 0, 0, 0, 0],
"a": [1, 0, 0, 0, 0, 0],
"b": [1, 1, 0, 0, 0, 0],
"c": [1, 0, 0, 1, 0, 0],
"d": [1, 0, 0, 1, 1, 0],
"e": [1, 0, 0, 0, 1, 0],
"f": [1, 1, 0, 1, 0, 0],
"g": [1, 1, 0, 1, 1, 0],
"h": [1, 1, 0, 0, 1, 0],
"i": [0, 1, 0, 1, 0, 0],
"j": [0, 1, 0, 1, 1, 0],
"k": [1, 0, 1, 0, 0, 0],
"l": [1, 1, 1, 0, 0, 0],
"m": [1, 0, 1, 1, 0, 0],
"n": [1, 0, 1, 1, 1, 0],
"o": [1, 0, 1, 0, 1, 0],
"p": [1, 1, 1, 1, 0, 0],
"q": [1, 1, 1, 1, 1, 0],
"r": [1, 1, 1, 0, 1, 0],
"s": [0, 1, 1, 1, 0, 0],
"t": [0, 1, 1, 1, 1, 0],
"u": [1, 0, 1, 0, 0, 1],
"v": [1, 1, 1, 0, 0, 1],
"x": [1, 0, 1, 1, 0, 1],
"y": [1, 0, 1, 1, 1, 1],
"z": [1, 0, 1, 0, 1, 1],
"and": [1, 1, 1, 1, 0, 1],
"for": [1, 1, 1, 1, 1, 1],
"of": [1, 1, 1, 0, 1, 1],
"the": [0, 1, 1, 1, 0, 1],
"with": [0, 1, 1, 1, 1, 1],
"ch": [1, 0, 0, 0, 0, 1],
"gh": [1, 1, 0, 0, 0, 1],
"sh": [1, 0, 0, 1, 0, 1],
"th": [1, 0, 0, 1, 1, 1],
"wh": [1, 0, 0, 0, 1, 1],
"ed": [1, 1, 0, 1, 0, 1],
"er": [1, 1, 0, 1, 1, 1],
"ou": [1, 1, 0, 0, 1, 1],
"ow": [0, 1, 0, 1, 0, 1],
"w": [0, 1, 0, 1, 1, 1],
"1": [1, 0, 0, 0, 0, 0],
"2": [1, 1, 0, 0, 0, 0],
"3": [1, 0, 0, 1, 0, 0],
"4": [1, 0, 0, 1, 1, 0],
"5": [1, 0, 0, 0, 1, 0],
"6": [1, 1, 0, 1, 0, 0],
"7": [1, 1, 0, 1, 1, 0],
"8": [1, 1, 0, 0, 1, 0],
"9": [0, 1, 0, 1, 0, 0],
"0": [0, 1, 0, 1, 1, 0],
",": [0, 1, 0, 0, 0, 0],
"ea": [0, 1, 0, 0, 0, 0],
";": [0, 1, 1, 0, 0, 0],
"bb": [0, 1, 1, 0, 0, 0],
"be": [0, 1, 1, 0, 0, 0],
":": [0, 1, 0, 0, 1, 0],
"con": [0, 1, 0, 0, 1, 0],
"cc": [0, 1, 0, 0, 1, 0],
".": [0, 1, 0, 0, 1, 1],
"$": [0, 1, 0, 0, 1, 1],
"dis": [0, 1, 0, 0, 1, 1],
"dd": [0, 1, 0, 0, 1, 1],
"en": [0, 1, 0, 0, 0, 1],
"!": [0, 1, 1, 0, 1, 0],
"ff": [0, 1, 1, 0, 1, 0],
"()": [0, 1, 1, 0, 1, 1],
"gg": [0, 1, 1, 0, 1, 1],
"\"?": [0, 1, 1, 0, 0, 1],
"*": [0, 0, 1, 0, 1, 0],
"\"": [0, 0, 1, 0, 1, 1],
"'": [0, 0, 1, 0, 0, 0],
"com": [0, 0, 1, 0, 0, 1],
"-": [0, 0, 1, 0, 0, 1],
"ing": [0, 0, 1, 1, 0, 1],
"st": [0, 0, 1, 1, 0, 0],
"/": [0, 0, 1, 1, 0, 0],
"ar": [0, 0, 1, 1, 1, 0],
}
"""
Constructor Function
"""
def __init__(self):
self.last_type_modifier=False
braille_code=[]
"""
This Function prints all the characters defined in the main dictionary
present in its parent class
"""
def show_all(self):
print("Grade 2.... \n")
for i,j in self.Dict.items():
print(i+"->")
for n,k in enumerate(self.Dict[i][:3]):
print(k,self.Dict[i][n+3])
print("\n")
"""
This Function prints a particular character in its braille form.
NOTE: The character should belong to Grade 2.
"""
def print_braille1(self,char):
if(char.isupper()):
char=char.lower()
if char in self.Dict:
for n,i in enumerate(self.Dict[char][:3]):
print(i,self.Dict[char][n+3])
print("..")
"""
This Function returns all the substrings present in the passed string
"""
def get_substrings(self,input_string):
input_string = input_string.lower()
length = len(input_string)
return [input_string[i:j+1] for i in range(length) for j in range(i,length)]
"""
This Function checks whether the given word can be shortened or not.
If it can then it will return the shortened word or else it will simply return the
given word.
NOTE: This function doesnt convert the the given string into braille,
this only sees if it can be be shortened or not.
If yes then it returns the shortened string.
"""
def process(self,text):
m="" #String to store afer shortening the given text
if (text.isupper() or text.islower() or (text[0].isupper() and text[1:len(text)].islower())): #Checking if it fits in Grade 2 standards
char=text.lower()
if char in self.std_aln: #Checking in standalone dictionary
#print("Standalone word found")
"""
This checks for the type of capatallisation and add the capatallsing
letter accordingly
"""
if(text.isupper()):
m+="(_caps)(_caps)"
elif(text[0].isupper()):
m+="(_caps)"
m+=self.std_aln[char]
elif char in self.abbr: #Checking in abbreviation dictionary
#print("Abbreviation found")
if(text.isupper()):
m+="(_caps)(_caps)"
elif(text[0].isupper()):
m+="(_caps)"
m+=self.abbr[char]
elif char in self.cwhole_5 or char in self.cwhole_45 or char in self.cwhole_456: #Checking in word contraction dictionary
#print("Whole word contraction found")
"""
Now checking which under which type of word contraction,
the given word really falls in.
"""
if char in self.cwhole_5: #Dot 5 raised word contraction
if(text.isupper()):
m+="(_caps)(_caps)"
elif(text[0].isupper()):
m+="(_caps)"
m+="(_w5)"+self.cwhole_5[char]
elif char in self.cwhole_45: #Dot 4,5 raised word contraction
if(text.isupper()):
m+="(_caps)(_caps)"
elif(text[0].isupper()):
m+="(_caps)"
m+="(_w45)"+self.cwhole_45[char]
elif char in self.cwhole_456: #Dot 4,5,6 raised word contraction
if(text.isupper()):
m+="(_caps)(_caps)"
elif(text[0].isupper()):
m+="(_caps)"
m+="(_w456)"+self.cwhole_456[char]
elif char in self.Dict: #Checking if its directly there in main dictionary
m="("+char+")"
else:
#Getting all possible sub strings of the given word
sub_str=self.get_substrings(text)
flag=0
#checking whether any partial contraction is possible or not
for i in sub_str:
if i in self.cpartial_56 or i in self.cpartial_6 or i in self.cpartial_46 or i in self.cwhole_5 or i in self.cwhole_45 or i in self.cwhole_456 or i in self.std_aln or i in self.Dict:
flag=1
#print("Partial word contraction found")
break
if(flag):
#Getting the partial contractions
partial_words=self.check_partial(text,sub_str)
#print(partial_words)
m=self.get_partials(partial_words,text)
else:
m=text
else:
m=text
#print(m)
return m
"""
This fucntion will convert the partial contractions into proper format so that finally it
converted to braille
"""
def get_partials(self,partial_words,text):
m=""
index=0
if(text.isupper()):
m+="(_caps)(_caps)"
elif(text[0].isupper()):
m+="(_caps)"
text=text.lower()
for i in partial_words:
pos=text.find(i)
while(index<pos):
if(not text[index].isnumeric()):
self.last_type_modifier="char"
if(text[index].isnumeric() and not self.last_type_modifier=="number"):
self.last_type_modifier="number"
m+="(_number)"
m+=text[index]
index+=1
if(index):
index-=1
if i in self.cpartial_56:
m+="(_p56)"+self.cpartial_56[i]
elif i in self.cpartial_6:
m+="(_p6)"+self.cpartial_6[i]
elif i in self.cpartial_46:
m+="(_p46)"+self.cpartial_46[i]
elif i in self.cwhole_5:
m+="(_w5)"+self.cwhole_5[i]
elif i in self.cwhole_45:
m+="(_w45)"+self.cwhole_45[i]
elif i in self.cwhole_456:
m+="(_w456)"+self.cwhole_456[i]
elif i in self.cwhole_456:
m+="(_w456)"+self.cwhole_456[i]
elif i in self.std_aln:
m+=self.std_aln[i]
elif i in self.Dict:
m+="("+i+")"
if not index:
index+=len(i)
continue
else:
index+=len(i)+1
while(index<=len(text)-1):
if(not text[index].isnumeric()):
self.last_type_modifier="char"
if(text[index].isnumeric() and not self.last_type_modifier=="number"):
self.last_type_modifier="number"
m+="(_number)"
m+=text[index]
index+=1
return m
"""
This functions find the suitable partial contraction according to rules.
"""
def check_partial(self,text,sub_str):
possible_replacement=[]
#First finding all the possible replacements
for i in sub_str:
if i in self.cpartial_56 or i in self.cpartial_6 or i in self.cpartial_46 or i in self.cwhole_5 or i in self.cwhole_45 or i in self.cwhole_456 or i in self.std_aln or i in self.Dict:
if len(i)>=2:
possible_replacement.append(i)
#Now applying rules of Grade 2 contractions
index=0
text_lower=text.lower()
for i in possible_replacement:
pos=text_lower.find(i)
#RULE: contractions should not affect pronounciation and are generally not used if they overlap syllables or prefix/base
#RULE: preferences should be given to the contractions which save the greatest amount of space
""" Will be appplied at last """
#RULE: 'a','and','for','of','the' and 'with' should follow one another without a space between them
#RULE: 'to,'into','by' cannot be used as partial words; as whole words they should not have a space after them
#RULE: whole words 'be','enough','were,'his','in' and 'was' can be capatlized but can't touch other words or punct
#RULE: partial words 'be','con' and 'dis' should only be used as beginning syllables('com' only has to begin a word)
if(i=="be" or i=="con" or i=="dis" or i=="com") and (pos is not 0):
possible_replacement.remove(i)
if((i=="be" or i=="con" or i=="dis" or i=="com") and (pos is 0)) and (len(possible_replacement)>1):
possible_replacement.remove(i)
#RULE: partial words 'en','in','ch,'gh','sh','th','wh','ed','er','ou','ow','st' and 'ar' can be anywhere in the word
"""
Nothing to check, since these words can be anywhere
"""
#RULE: 'ble' and 'ing' should not begin a word
if (i=="ble" or i=="ing") and pos==0:
possible_replacement.remove(i)
#RULE: 'bb','cc','dd','ff','gg' and 'ea' should not begin or end a word
if (i=="bb" or i=="cc" or i=="dd" or i=="ff" or i=="gg" or i=="ea") and (pos==0 or pos==len(text)-len(i)):
possible_replacement.remove(i)
#RULE: 'and','for','of','the' and 'with' should be used in preference to other contractions
pref=["and", "for", "of", "the", "with"]
for j in pref:
if(j==i):
prev_word=possible_replacement[index-1]
pos_prev_word=text_lower.find(prev_word)
len1=pos+len(j)-1
if(len1>=pos_prev_word):
possible_replacement.remove(prev_word)
"""
END OF RULES
"""
index=index+1
#Now Checking whether two contractions are overlapping or not, if so then clearing the other one
if(len(possible_replacement)>1):
j=0
while(j<len(possible_replacement)-1):
i=possible_replacement[j]
pos=text_lower.find(i)
index=possible_replacement.index(i)
if(index==len(possible_replacement)-1):
continue
next_word=possible_replacement[index+1]
pos_next_word=text_lower.find(next_word)
len1=pos+len(i)-1
if(len1>=pos_next_word):
if(len(i)>len(next_word)):
possible_replacement.remove(next_word)
j=-1
elif(len(i)<len(next_word)):
possible_replacement.remove(i)
j=-1
j=j+1
return possible_replacement
"""
This function checks for the index of a balanced parenthesis in the passed string
and returns it in a form of list
"""
def match_parenthesis(self,string):
a=[]
for j in range(0,len(string)):
i=string[j]
if i == "(":
a.append(j)
elif i == ")":
a.append(j)
return a
"""
This function actually takes the converted string and maps it to its braille
equivallent.
"""
def get_braille(self,text):
code=[] #List to store the braille array character by character
#Check for balanced parenthesis
flag=self.match_parenthesis(text) #Checking if parenthesis is there or not
opt=0
if(len(flag)!=0):
opt=1
else:
opt=0
if(opt==0): #Simple character by character conversion is there, since parenthesis was not detected
for i in text:
if i in self.Dict:
code.append(self.Dict[i])
else:
"""
Since parethesis was detected. Now the complete string inside the parenthesis should be treated as
one character and not s individual character.
"""
substr=[] #List to store the string within parenthesis
m=0
#This loop will append all substring inside all the parenthesis into the substr list
while(m<len(flag)-1):
substr.append(text[flag[m]+1:flag[m+1]])
m+=2
#now traversing the main converted text character by character
index=0
count=0
while(index<len(text)):
i=text[index].lower()
if(i=="("): # if found the take the upcoming character as one character until
# parenthesis closed is detected
if substr[count] in self.Dict: #Checking whether the sub-string inside parenthesis is in Main dicitonary or not
code.append(self.Dict[substr[count]])
count+=1
else: #if not then again send this word to get it shortened.
char=self.process(substr[count])
temp=0
m=self.get_braille(char) #Call itself to process the new form.
for arr in m:
code.append(arr)
for j in range(index,len(text)): #Update the index to ')''s index
if(text[j]==")"):
index=j
break
else: #or simply take the braille from main dictionary
code.append(self.Dict[i])
index+=1
return code
"""
This function takes the word first and then process it as per Grade 2.
If doesnt suit with grade 2 then simply with grade 1.
"""
def process_word(self,text):
char=""
conv_str=self.process(text)
if(conv_str==text):
print("The given word cannot be shortened as per GRADE 2 standards.")
print("Hence, it will be converted as per GRADE 1 standards")
#Function to call grade 1 conversion
else:
#print(conv_str)
char=self.get_braille(conv_str)
return char
"""
This function prints the converted braille on the screen.
"""
def print_braille(braille):
#print(braille)
_len=len(braille)
for i in range(0,3):
m=""
for x in braille:
m+=str(x[i])+str(x[i+3])
m+=" "
print(m)
braille=Grade2() #Object Created
"""
ch='y'
while(ch=='y'):
i=input("Enter string: ")
if i=='n':
ch='n'
break
print("Given text:" + i)
print("Text after conversion: ")
code=Braille.process_word(i)
print_braille(code)
pre_defined_words=["and", "for", "of", "the", "with", "child", "shall", "this", "which", "out", "ow", "will", "be", "enough", "to", "were", "his", "in", "was", "by", "still", "a", "but", "can", "do", "every", "from", "go", "have", "I", "just", "knowledge", "like", "more", "not", "o", "people", "quite", "rather", "so", "that", "us", "very", "it", "you", "as", "character", "day", "ever", "father", "here", "know", "lord", "mother", "name", "one", "ought", "part", "question", "right", "some", "there", "through", "time", "under", "where", "work", "young", "these", "those", "upon", "whose", "word", "cannot", "had", "many", "spirit", "their", "world", "about", "above", "according", "across", "after", "afternoon", "afterward", "again", "against", "almost", "already", "also", "although", "altogether", "always", "because", "before", "behind", "below", "beneath", "beside", "between", "beyond", "blind", "braille", "children", "conceive", "conceiving", "could", "deceive", "deceiving", "declare", "declaring", "either", "first", "friend", "good", "great", "herself", "him", "himself", "immediate", "letter", "little", "much", "must", "myself", "necessary", "neither", "o'clock", "oneself", "ourselves", "paid", "perceive", "perceiving", "perhaps", "quick", "receive", "receiving", "rejoice", "rejoicing", "said", "should", "such", "themselves", "thyself", "today", "together", "tonight", "would", "its", "itself", "your", "yourself", "yourselves"]
for i in pre_defined_words:
print("Given text:" + i)
print("Text after conversion: ")
code=Braille.process_word(i)
print_braille(code)
#print(len(pre_defined_words))
"""
|
[
"adityasingh3007@gmail.com"
] |
adityasingh3007@gmail.com
|
712d10d431739dab7979cbd50064908f8a699d66
|
3302b12ed21214ea0f2c4fc22c49caa359c675fd
|
/pages/basket_page.py
|
baff0b1286fd684c084cd2c9a7e9acd073726b24
|
[] |
no_license
|
furbeast/stepik_selenium_total
|
bede338303113b8cc3bb6fa8687646145aee2f6d
|
2d0e86df0d64029c37ddf5b3f19a500b985ba071
|
refs/heads/master
| 2023-05-30T23:29:42.611293
| 2020-06-22T16:45:09
| 2020-06-22T16:45:09
| 273,192,112
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 392
|
py
|
from .base_page import BasePage
from .locators import BasketPageLocators
class BasketPage(BasePage):
def basket_empty(self):
assert self.is_not_element_present(*BasketPageLocators.BASKET_ITEMS), "Basket not empty"
def basket_empty_text(self):
assert self.browser.find_element(*BasketPageLocators.BASKET_EMPTY_TEXT).text.find('empty'), "Basket text empty not found"
|
[
"slonn91@mail.ru"
] |
slonn91@mail.ru
|
e1d364e8012b8e88a5aa8ea7ea24b49307bae086
|
5064d0a44fb1e1af0205ae0bfa711bdbf2a33cc6
|
/test/main_json.py
|
495de07bbef5c2a94bb17969b852bb609d084a3b
|
[] |
no_license
|
lmxwade/DSCI_551
|
4e157ae87f370a5e0195ea64c1afb2cf385c2418
|
eecdc9222ae0e3441c167525609dfd54ed4134a8
|
refs/heads/master
| 2023-02-10T15:48:38.755414
| 2020-07-04T16:24:35
| 2020-07-04T16:24:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,092
|
py
|
#
__author__ = 'Aaron Yang'
__email__ = 'byang971@usc.edu'
__date__ = '9/6/2019 3:45 PM'
import json
if __name__ == "__main__":
data = {'a': 1, 'b': 2, 'c': 3, 'd': 4, 'e': 5}
print(type(data)) # <class 'list'>
# json.dumps 用于将 Python 对象编码成 JSON 字符串
jsonStr = json.dumps(data)
print(jsonStr) # <class 'str'>
jsonData = '{"a":1,"b":2,"c":3,"d":4,"e":{"1":23}}'
dict_obj = json.loads(jsonData)
print(dict_obj)
print(type(dict_obj))
# keys must be str, int, float, bool or None, not tuple
# print(json.dumps({(1,2): 3}))
# Expecting property name enclosed in double quotes: line 1 column 2 (char 1)
#print(json.loads("{'1': 3}"))
# Expecting property name enclosed in double quotes: line 1 column 2 (char 1)
# print(json.loads('{(1): 3}'))
# Expecting property name enclosed in double quotes: line 1 column 2 (char 1)
# print(json.loads('{1: 3}'))
print(json.loads('{"1": 3}'))
import urllib.parse
# str = 'l.a.a.c.-8th'
str = "HOUSE OF CURRY"
list = ' '.join(str.lower().split()).split(' ')
# list = str.lower().split(' ')
print(list)
|
[
"aaron19940628@gmail.com"
] |
aaron19940628@gmail.com
|
4d0af196400f9ebc7e28981d5b4b342f0ea75310
|
5cf4c49043d66fd37a1e9a183cf126a915b19668
|
/src/topic_store/__init__.py
|
b232dfe3306892d5c42e43d6b8c973c37300b9a8
|
[
"MIT"
] |
permissive
|
pet1330/topic_store
|
f14859e746839bec8fe197933ea23155936644af
|
3f3f7fbcc3289ef096b6e2e96fb5d80b869a11d4
|
refs/heads/master
| 2023-04-16T08:06:33.650998
| 2021-04-12T09:10:28
| 2021-04-12T09:10:28
| 264,911,292
| 0
| 0
|
MIT
| 2020-05-18T10:55:11
| 2020-05-18T10:55:11
| null |
UTF-8
|
Python
| false
| false
| 453
|
py
|
# Raymond Kirk (Tunstill) Copyright (c) 2020
# Email: ray.tunstill@gmail.com
from .api import load
from .data import TopicStore
def get_package_root(use_rospkg=True):
import pathlib
if use_rospkg:
try:
import rospkg
return pathlib.Path(rospkg.RosPack().get_path("topic_store"))
except:
pass # use default way of finding package
return (pathlib.Path(__file__) / "../../..").resolve()
|
[
"ray.tunstill@live.co.uk"
] |
ray.tunstill@live.co.uk
|
a151e9843eb215cac92e2cf8ba46206c8de27cd0
|
db76cfd222d7208c4dd15ef9dd3a8331b384f44a
|
/Baike_Python/spider_main.py
|
3e953f70129515f2612e734ccaecbb4a0f2e2dc3
|
[] |
no_license
|
DanielQ8843/Robot_0.1
|
c94624600b9a02c594ea7ffbaf39a0c40dcd49b9
|
8d23135c97f78ba7a0cdd6ee48569800afdcabc8
|
refs/heads/master
| 2016-08-13T01:17:24.053074
| 2016-01-12T07:47:41
| 2016-01-12T07:47:41
| 49,482,622
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,238
|
py
|
# coding: utf8
# 名称:爬虫总调度程序
# 功能:会以入口 URL 作为餐参数,爬取所有的相关页面
# 首先:编写 main 函数
import url_manager, html_downloader, html_parser, html_outputer
class Crawler(object): # S4 爬虫总调度程序,通过主函数的 自动补齐创建 —— 我们的总调度程序,会使用 URL 管理器、HTML 下载器、解析器、输出器,完成需要的功能
def __init__(self): # S6 构造函数,初始化各个对象
self.urls = url_manager.UrlManager() # S7 URl 管理器
self.downloader = html_downloader.HtmlDownloader() # S8 Html下载器
self.parser = html_parser.HtmlParser() # S9 Html 解析器
self.outputer = html_outputer.HtmlOutputer() # S10 Html 输出器 —— 这样就初始化好:URL 管理器、下载器、解析器、输出器 4 个对象;—— 但是 它们还没有引入 —— 我们使用自动补齐,在每个模块中创建 . 后面对应的 class —— 以及在本构造函数中,初始化好了需要的对象
def crawl(self, root_url, crawl_count): # S11 下面,我们进入爬虫的调度程序 S5 总调度程序的 craw 方法,主函数自动补齐 —— 上述模块,会在各个函数中初始化
crawled_count = 1 # S20 我们添加一些辅助的信息,当前,我们爬取的是第几个 URL?—— coutt 记录爬取的是第几个 URL
self.urls.add_new_url(root_url) # S12 首先,我们把入口 URL 添加进 URL 管理器
while self.urls.has_new_url(): # S13 这个时候,URL 管理器中已经有了待爬取的 URL,我们就可以启动 爬虫的循环了
try: # S25 由于我们要爬取的页面,要么没有 摘要,要么 URL 已经失效 —— 我们需要“异常处理”try-except 语言 —— 吧我们的主代码块缩进
new_url = self.urls.get_new_url() # S14 我们获取一个待爬取的 URL
print 'craw %d:%s' % (crawled_count, new_url) # S21 在爬取的时候,打印出,爬取的是第几个 URL,传入count
html_cont = self.downloader.download(new_url) # S15 获取到一个待爬取的 URL 之后,我们启动下载器,下载这个页面,结果存储在 html_count
new_urls, new_date = self.parser.parser(new_url,
html_cont) # S16 下载好这个页面,我们调用解析器 来解析这个页面 —— 得到新的 URL 列表、新的 URL 数据 —— 解析器我们传入两个参数:当前爬取的 URL,下载好的页面数据
self.urls.add_new_urls(new_urls) # S17 解系出来的 URL 和 数据 html_count 我们进行分别的处理 —— 注意,最上面是增减“根 URL”
self.outputer.collect_data(new_data) # S18 收集数据 —— END 这样我们爬虫的循环就写好了。
if crawled_count == crawl_count: # S23 本代码的页面,是爬取 1000 个页面,所以我们需要加一个判断
break # S24 如果等于 1000 就结束
crawled_count = crawled_count + 1 # S22 在 while 循环的最后,我们把 count 加1
except:
print 'craw failed' # S26 如果出现我们,我们打印一下,这个 URL 爬取失败 END。 —— 以上代码,就是爬虫调度程序的代码,使用到了 URL 管理器、下载器、解析器、输出器,4 个模块 —— 接下来,我们实现各个模块提供的方法
self.outputer.output_html() # S19 我们调用输出来输出收集好的数据 —— 其中,上述各个子模块的方法还不存在,我们用快捷方式,在各个子模块中去声明各个方法
if __name__ == '__main__':
root_url = "http://baike.baidu.com/view/21087.htm" # S1 复制我们要抓取的 URL 作为入口 —— 设定入口 URL
crawler = Crawler() # S2 创建一个 spider,叫做 SpiderMain —— 爬虫总调度程序
crawler.crawl(root_url, 100) # S3 调用 Spider的craw方法来启动爬虫;Main 函数就写好了;—— 启动爬虫;下面进行爬虫总调度程序 SpiderMain 的编写
|
[
"jsevencqu@gmail.com"
] |
jsevencqu@gmail.com
|
e5187be6c2339cacd981c880a7bcc4f600452526
|
e1112bb6d54acb76e6e991fc4c3fc0d3a1f7b0d6
|
/02 - Sets and tuples/Exercise/02-Sets_of_elements.py
|
7bbb5ad6a390172f3d0bbbf55241d1a067f2744d
|
[] |
no_license
|
MiroslavPK/Python-Advanced
|
0326209d98254d4578a63dcd4c32b49be183baf2
|
0c696a220aa587edb2505e8d986b041cc90a46f3
|
refs/heads/master
| 2023-01-12T10:46:06.590096
| 2020-11-18T19:08:55
| 2020-11-18T19:08:55
| 295,449,832
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 224
|
py
|
n, m = map(int, input().split())
n_set = set()
m_set = set()
for i in range(n+m):
if i < n:
n_set.add(input())
else:
m_set.add(input())
intersection = n_set & m_set
print('\n'.join(intersection))
|
[
"miroslavkarakostov@gmail.com"
] |
miroslavkarakostov@gmail.com
|
6f1bb4ff7967bfd1652c3e845f0f639580fcd308
|
a45b8075f3c3b247a3cac43cb12bf4d80103f608
|
/glamazer/urls.py
|
2df7d0d6d8a24ce521febc8454892b0cfa167c9e
|
[] |
no_license
|
kahihia/glamfame
|
c890a8772aa92b8ed9e3c0bb664c5dae187d1c09
|
af91d4d16d0c8847c42eb97be839bf08015274b6
|
refs/heads/master
| 2021-01-21T09:59:52.700945
| 2016-02-15T17:16:13
| 2016-02-15T17:16:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,906
|
py
|
from django.conf import settings
from django.conf.urls.static import static
from django.conf.urls import patterns, include, url
from django.views.generic import TemplateView
from django.contrib import admin
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
admin.autodiscover()
urlpatterns = patterns('',
url(r'^$', 'glamazer.core.views.home', name='home'),
url(r'^hairstyle/', 'glamazer.core.views.base', name='hair'),
url(r'^nails-design/', 'glamazer.core.views.base', name='nails'),
url(r'^make-up/', 'glamazer.core.views.base', name='make_up'),
url(r'^style/', 'glamazer.core.views.base', name='style'),
url(r'^contest/', 'glamazer.core.views.base', name='contest'),
url(r'^leaderboards/', 'glamazer.core.views.base', name='leaderboards'),
url(r'^result/', 'glamazer.core.views.search', name='result'),
url(r'^get_notifications/', 'glamazer.notifications.views.get_notifications', name='short_notifications'),
url(r'^get_notifications_count/', 'glamazer.notifications.views.get_notification_count', name='notification_count'),
url(r'^autocomplete_tags/', 'glamazer.core.views.autocomplete_tags', name='autocomplete_tags'),
url(r'^sign_up/', TemplateView.as_view(template_name="sign_up.html"), name='signup'),
url(r'^terms/', TemplateView.as_view(template_name="core/terms.html"), name='terms'),
url(r'^imprint/', TemplateView.as_view(template_name="core/imprint.html"), name='imprint'),
url(r'^privacy/', TemplateView.as_view(template_name="core/privacy.html"), name='privacy'),
url(r'^faq/', TemplateView.as_view(template_name="core/faq.html"), name='faq'),
url(r'^about_us/', TemplateView.as_view(template_name="core/about_us.html"), name='about_us'),
url(r'^contacts/', TemplateView.as_view(template_name="core/contact_us.html"), name='contacts'),
url(r'^admin/', include(admin.site.urls)),
url(r'^users/', include('glamazer.users.urls')),
url(r'^artists/', include('glamazer.artists.urls')),
url(r'^salons/', include('glamazer.salons.urls')),
url(r'^listings/', include('glamazer.listings.urls')),
url(r'^favorites/', include('glamazer.favorites.urls')),
url(r'^reviews/', include('glamazer.reviews.urls')),
url(r'^widget/', include('glamazer.widget.urls')),
url(r'^success/', 'glamazer.payments.views.success_payment', name='success'),
url(r'^error/', 'glamazer.payments.views.error_payment', name='error'),
url(r'^cancel/', 'glamazer.payments.views.cancel_payment', name='cancel'),
url(r'^get_hint/', 'glamazer.core.views.get_hint', name='get_hint'),
url(r'^start_payment/', 'glamazer.payments.views.start_payment', name='paypal_payment'),
url(r'^send_feedback/$', 'glamazer.users.views.send_feedback', name='send_feedback'),
)
urlpatterns += staticfiles_urlpatterns()
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
[
"zhuhov@gmail.com"
] |
zhuhov@gmail.com
|
2d7752b5248ca30de42447503f8cb51b06fd5d1f
|
21e64f9410323a11d4550b889fd0bb0d68543fab
|
/config/munin/mongodb_conn
|
93f39733e6ea84ca3aa106275b63a88e87de9375
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
quanganhdo/NewsBlur
|
a7eaa3c5bdb2e57998651d736db861f88fcd1e75
|
cef29f01658c845564a5044b48b4cf19efcaa4d6
|
refs/heads/master
| 2021-03-05T23:56:27.976498
| 2020-02-27T15:23:23
| 2020-02-27T15:23:23
| 246,164,347
| 1
| 0
|
MIT
| 2020-03-09T23:34:18
| 2020-03-09T23:34:17
| null |
UTF-8
|
Python
| false
| false
| 791
|
#!/srv/newsblur/venv/newsblur/bin/python
# -*- coding: utf-8 -*-
from vendor.munin.mongodb import MuninMongoDBPlugin
class MongoDBConnectionsPlugin(MuninMongoDBPlugin):
args = "-l 0 --base 1000"
vlabel = "count"
title = "MongoDB current connections"
info = "Current connections"
fields = (
('connections', dict(
label = "# of Connections",
info = "connections",
type = "GAUGE",
min = "0",
)),
)
def execute(self):
status = self.connection.admin.command('serverStatus')
try:
value = status['connections']['current']
except KeyError:
value = "U"
return dict(connections=value)
if __name__ == "__main__":
MongoDBConnectionsPlugin().run()
|
[
"samuel@ofbrooklyn.com"
] |
samuel@ofbrooklyn.com
|
|
2fb3622d8520b0df9fdbf0783f3a2333622c2c5b
|
46ad22b772f0bb115e1192ca24c86b1593d51870
|
/eclipsed/src/cursor.py
|
f67a0516f50351fa94c3799522d029fba269422b
|
[
"CC0-1.0",
"WTFPL",
"CC-BY-4.0"
] |
permissive
|
cosmologicon/unifac
|
fb533abfbba7ebb33561a330f7be5d22dbc2a373
|
e7668c6736cd4db66f8d56e945afb69ec03f2160
|
refs/heads/master
| 2022-06-15T10:46:28.448477
| 2022-05-30T20:26:55
| 2022-05-30T20:26:55
| 37,033,765
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 209
|
py
|
tobuild = None
pointingto = None
# These don't really need to go here since they're redundant with hud.mode
# but this module was looking a little lonely so here they are.
disable = False
unbuild = False
|
[
"cosmologicon@gmail.com"
] |
cosmologicon@gmail.com
|
d8b2df2337b5e5e71fcd5b02fe1fc120aa5d240b
|
fa93e53a9eee6cb476b8998d62067fce2fbcea13
|
/devel/lib/python2.7/dist-packages/pal_device_msgs/msg/_DoTimedLedEffectGoal.py
|
73f6163b853c14a328a4f720228d430539ed29a0
|
[] |
no_license
|
oyetripathi/ROS_conclusion_project
|
2947ee2f575ddf05480dabc69cf8af3c2df53f73
|
01e71350437d57d8112b6cec298f89fc8291fb5f
|
refs/heads/master
| 2023-06-30T00:38:29.711137
| 2021-08-05T09:17:54
| 2021-08-05T09:17:54
| 392,716,311
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 135
|
py
|
/home/sandeepan/tiago_public_ws/devel/.private/pal_device_msgs/lib/python2.7/dist-packages/pal_device_msgs/msg/_DoTimedLedEffectGoal.py
|
[
"sandeepan.ghosh.ece20@itbhu.ac.in"
] |
sandeepan.ghosh.ece20@itbhu.ac.in
|
a820c92be19bb6eb23ea6e9a14ad2babd43e4947
|
841192bf32a8b0f9ac53f130a732ddbaf4e0231c
|
/homework_4/code/q1.py
|
ff096c9c868025ba84c24da6673cb5d1145344f8
|
[] |
no_license
|
nolanstr/CS6190_PML
|
21d6c247f7c8ffe5b733692acd2c63affe78e781
|
0ccf02a21560aab74b9f3a52c93831a195ba102a
|
refs/heads/master
| 2022-04-15T18:34:18.746203
| 2019-12-09T20:43:25
| 2019-12-09T20:43:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,384
|
py
|
import numpy as np
import os, sys
from matplotlib import pyplot as plt
import matplotlib
sys.path.append(os.getcwd())
sys.path.insert(0, 'data/example-code')
from gmq_example import gass_hermite_quad
#from data.example_code.gmq_example import gass_hermite_quad
m = 10
c = 3
dpi = 200
fs = 20
lw = 1.75
matplotlib.rcParams.update({'font.size': fs})
def savefig(plt, path, show_message= True, tight_flag= True, newline= True):
if show_message:
print("Saving to {}".format(path))
if tight_flag:
plt.savefig(path, bbox_inches='tight', pad_inches=0)
else:
plt.savefig(path)
if newline:
print("")
def get_affine(x, m, c):
x = m*x + c
return x
def affine_sigmoid(xin, m=10, c=3):
if type(xin) != np.ndarray:
x = np.array([xin])
else:
x = xin
x = get_affine(x, m, c)
output = np.zeros(x.shape)
ind1 = (x >= 0)
ind2 = (x < 0)
output[ind1] = 1 / (1 + np.exp(-x[ind1]))
output[ind2] = np.divide(np.exp(x[ind2]), (1 + np.exp(x[ind2])))
if type(xin) != np.ndarray:
return output[0]
else:
return output
def get_lambda(xi):
output = -1/(2*get_affine(xi, m, c)) * (affine_sigmoid(xi,m ,c) - 0.5)
return output
N = gass_hermite_quad(affine_sigmoid, degree= 100)
print("Normalization = {:.2f}".format(N))
delta= 0.01
z = np.arange(-5, 5+delta, delta)
pz = np.multiply(np.exp(-np.multiply(z, z)), affine_sigmoid(z))/N
fig = plt.figure(figsize= (9.6,6), dpi= dpi)
plt.plot(z, pz, label='Ground', lw= lw)
plt.legend(loc= 'upper right')
plt.grid(True)
plt.xlabel('z')
plt.ylabel('p(z)')
plt.ylim((-0.05,1.0))
savefig(plt, "images/q1_ground.png")
#===============================================================================
# Laplace Approximation
#===============================================================================
print("Running Laplace Approximation...")
# Get the maximum of out
ind = pz.argmax()
# get the max value
theta_0 = z[ind]
# get the double derivative inverse
double_derivative_inv = -(-2 - affine_sigmoid(theta_0) * (1- affine_sigmoid(theta_0)) * m * m)
laplace_z = pz[ind] * np.exp(- 0.5 * np.power(z-theta_0, 2) * double_derivative_inv)
print("Mode/Mean = {:.2f}, Variance= {:.2f}".format(theta_0, 1.0/double_derivative_inv))
# Draw the curve now
plt.plot(z, laplace_z, label= 'Laplace', lw= lw)
plt.legend(loc= 'upper right')
savefig(plt, "images/q1_laplace.png")
#===============================================================================
# Get the local variational approximation
#===============================================================================
print("Running Local Variational Approximation...")
# Run EM algorithm
lva = np.zeros((z.shape))
# Initial value of xi
xi = 0
for i in range(10000):
# Expectation step
first = np.exp(-np.multiply(z, z)) * affine_sigmoid(xi)
second= np.exp(5*(z-xi) + get_lambda(xi) *np.multiply(10*(z-xi), 10*(z + xi) + 6))
lva = np.multiply(first, second)/N
# Maximise step
xi_new = z[lva.argmax()]
diff = np.abs(xi_new - xi)
print("Iter {}, xi= {:.4f}, Diff= {:.4f}".format(i, xi_new, diff))
if diff < 1e-4:
break
else:
xi = xi_new
# Draw the curve now
plt.plot(z, lva, label= 'LVA', lw= lw)
plt.legend(loc= 'upper right')
savefig(plt, "images/q1_laplace_lva.png")
|
[
"abhinav3663@gmail.com"
] |
abhinav3663@gmail.com
|
2a054dda5938df39f5dcdbd92bb0b4ea2a6b8503
|
abce7e5e587e69342c3ef19bd0d6ccee270a85cc
|
/turtle/catandmouse/catandmouse.py
|
23438dee6a416a7f8d4f78b46c0143a9cfe211d9
|
[] |
no_license
|
ruazjy/raspberry_pi
|
184ff234d13e5b287bbfd21d2936b12982ac737f
|
f60718d73232b169db58501920a0aafa40878eb6
|
refs/heads/master
| 2020-06-10T08:53:21.306582
| 2019-09-05T09:01:29
| 2019-09-05T09:01:29
| 193,626,340
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,967
|
py
|
import turtle
import time
# Python程序由一系列命令组成并从上到下执行
# 可以通过循环和if语句控制程序
# 不必事必躬亲,通过导入模块
# 函数可以帮助重用代码,也可以使程序变得易于理解和维护
# 变量可以存储信息以便后面使用
boxsize = 200
caught = False
score = 0
# functions that are called on keypresses
def up():
mouse.forward(10)
checkbound()
def left():
mouse.left(45)
def right():
mouse.right(45)
def back():
mouse.backward(10)
checkbound()
def quitTurtles():
window.bye()
# stop the mouse from leaving the square set by box size
def checkbound():
global boxsize
if mouse.xcor() > boxsize:
mouse.goto((boxsize, mouse.ycor()))
if mouse.xcor() < -boxsize:
mouse.goto((-boxsize, mouse.ycor()))
if mouse.ycor() > boxsize:
mouse.goto((mouse.xcor(), boxsize))
if mouse.ycor() < -boxsize:
mouse.goto((mouse.xcor(), -boxsize))
# set up screen
window = turtle.Screen()
mouse = turtle.Turtle()
cat = turtle.Turtle()
mouse.penup()
mouse.goto(100, 100)
# add key listeners
# 当键盘上产生某些输入时,执行对应的函数
window.onkeypress(up, "Up")
window.onkeypress(left, "Left")
window.onkeypress(right, "Right")
window.onkeypress(back, "Down")
window.onkeypress(quitTurtles, "Escape")
difficulty = window.numinput("Difficulty",
"Enter a difficulty from easy (1), for hard (5) ",
minval=1, maxval=5)
window.listen()
# main loop
# note how it changes with difficulty
# 当老鼠没有被猫抓住的时候,一直执行循环
while not caught:
cat.setheading(cat.towards(mouse))
cat.forward(8 + difficulty)
score = score + 1
if cat.distance(mouse) < 5:
caught = True
time.sleep(0.2 - (0.01 * difficulty))
window.textinput("Game Over", "Well done.You scored:" + str(score * difficulty))
window.bye()
|
[
"331182615@qq.com"
] |
331182615@qq.com
|
610395c1c0a55d4f8b7099bea152e4feb27dec23
|
5637cca6c5684c90586613d891506ea4c1d6cbfb
|
/Python/6kyu/one_plus_array.py
|
bf9f2effc254a665d5a02a9b921ccccd43d561b8
|
[] |
no_license
|
Patryk9201/CodeWars
|
4a4e6a91dfcf8ec62c2fa553427b106769a68f8c
|
f86da56e071832af71145cc5033f4c3115726204
|
refs/heads/master
| 2023-05-22T17:17:51.061609
| 2021-06-14T11:10:08
| 2021-06-14T11:10:08
| 375,696,728
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 693
|
py
|
"""
Given an array of integers of any length, return an array that has 1 added to the value represented by the array.
the array can't be empty
only non-negative, single digit integers are allowed
Return nil (or your language's equivalent) for invalid inputs.
Examples
For example the array [2, 3, 9] equals 239, adding one would return the array [2, 4, 0].
[4, 3, 2, 5] would return [4, 3, 2, 6]
"""
import unittest
def up_array(arr):
if not arr or min(arr) < 0 or max(arr) > 9:
return None
else:
return [int(y) for y in str(int("".join([str(x) for x in arr])) + 1)]
if __name__ == '__main__':
x = up_array([2, 3, 9])
print(x)
unittest.main()
|
[
"noreply@github.com"
] |
noreply@github.com
|
778190838724b4739e06fe9fb67369b4a3b57141
|
4a506391d992d3f24401c242c271da8b4308eb8f
|
/001_Print_Arithmetic/10869.py
|
a5b350bb2f82c9066b18e4a32968de1441cab5c6
|
[] |
no_license
|
shdev95lee/baekjoon
|
a7c036750642717792c0faec607c23608952d789
|
0ed04c46ee0d811c744689072fd4fe4fdca3c267
|
refs/heads/master
| 2023-06-08T01:11:14.160480
| 2021-06-25T14:48:39
| 2021-06-25T14:48:39
| 379,516,329
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 132
|
py
|
a, b = input().split()
print(int(a)+int(b))
print(int(a)-int(b))
print(int(a)*int(b))
print(int(int(a)/int(b)))
print(int(a)%int(b))
|
[
"shdev95lee@gmail.com"
] |
shdev95lee@gmail.com
|
78549acd2b3ba1702eb99871e727416adbdfd697
|
d8f469a2b9794d864e172dd70a324a05eb54a53e
|
/P3Python/section13/Lecture 146 - Placeholders and Parameter Substitution - Source code/checkdb.py
|
9ffde63fd37cccc87d02560efbe0d8959c9dfdd7
|
[] |
no_license
|
abikg71/python37
|
78a7b7027ea6671b24c6d50cde0a30c0d0c21fd0
|
99d4ade9b21afc3588954025607c08dec6f8ecb6
|
refs/heads/master
| 2020-08-21T22:48:01.222424
| 2020-08-14T18:17:53
| 2020-08-14T18:17:53
| 216,265,189
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 295
|
py
|
import sqlite3
conn = sqlite3.connect("contacts.sqlite")
name = input("Please enter a name to search for ")
# for row in conn.execute("SELECT * FROM contacts WHERE name = ?", (name,)):
for row in conn.execute("SELECT * FROM contacts WHERE name LIKE ?", (name,)):
print(row)
conn.close()
|
[
"abinetkenore14@yahoo.com"
] |
abinetkenore14@yahoo.com
|
a6e1c8952ffea6bc71e3c9a632fbce60a039c2e9
|
4549687c81b6e97c9b1b955354cffea93bb968d1
|
/Python/API/get-ap-json_2.py
|
e48453b4882954c31760c38e8ccce06e01663f68
|
[] |
no_license
|
dkorzhevin/code
|
f1962d7beb448a2f38f304a9dcc6e3e6d7abcb13
|
42a37bad47187f33422bb72cc738fa350a0754e6
|
refs/heads/master
| 2020-09-11T05:15:57.369392
| 2020-03-10T10:48:54
| 2020-03-10T10:48:54
| 221,949,036
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 453
|
py
|
from urllib.request import Request, urlopen
import json
req = Request('https://cmxlocationsandbox.cisco.com/api/config/v1/maps/info/DevNetCampus/DevNetBuilding/DevNetZone')
req.add_header('Authorization', 'Basic bGVhcm5pbmc6bGVhcm5pbmc=')
response = urlopen(req)
response_string = response.read().decode('utf-8')
#print(response_string)
json_object = json.loads(response_string)
print(json.dumps(json_object, sort_keys=True, indent=4))
response.close()
|
[
"dkorzhevin@gmail.com"
] |
dkorzhevin@gmail.com
|
46c75cfc66b41a8d03c8a63219aa1d6fb596b2ba
|
c7c001c9011f559b8b1c85d1c3e0387a86a99628
|
/y2018/day18/lumber_collection.py
|
ec0c9f79bbc2fe5449c89bab26476d8ddca92c8e
|
[] |
no_license
|
ericbgarnick/AOC
|
5ddfd18850b96f198e125f5d1f0978e852195ccf
|
a935faad3fcbbe3ac601e2583ed27b38bc60ef69
|
refs/heads/main
| 2023-04-12T18:50:09.926169
| 2023-04-09T12:47:35
| 2023-04-09T12:47:35
| 224,573,310
| 2
| 1
| null | 2021-12-15T14:25:33
| 2019-11-28T05:00:52
|
Python
|
UTF-8
|
Python
| false
| false
| 1,890
|
py
|
from typing import List
from acre import Acre
class LumberCollection:
def __init__(self, area_map: List[str]):
row_len = len(area_map[0])
num_rows = len(area_map)
self.collection_area = [] # type: List[Acre]
self.populate_area(area_map)
self.link_acres(row_len, num_rows)
def populate_area(self, area_map: List[str]):
for row in area_map:
for acre in row:
acre_type = Acre.TYPE_FOR_SYMBOL[acre]
self.collection_area.append(Acre(acre_type))
def link_acres(self, row_len: int, num_rows: int):
for i, acre in enumerate(self.collection_area):
if i % row_len:
# W
acre.neighbors.add(self.collection_area[i - 1])
if i >= row_len:
# NW
acre.neighbors.add(self.collection_area[i - row_len - 1])
# N
acre.neighbors.add(self.collection_area[i - row_len])
if i < row_len * (num_rows - 1):
# SW
acre.neighbors.add(self.collection_area[i + row_len - 1])
# S
acre.neighbors.add(self.collection_area[i + row_len])
if i % row_len != row_len - 1:
# E
acre.neighbors.add(self.collection_area[i + 1])
if i >= row_len:
# NE
acre.neighbors.add(self.collection_area[i - row_len + 1])
# N
acre.neighbors.add(self.collection_area[i - row_len])
if i < row_len * (num_rows - 1):
# SE
acre.neighbors.add(self.collection_area[i + row_len + 1])
# S
acre.neighbors.add(self.collection_area[i + row_len])
|
[
"eric.b.garnick@gmail.com"
] |
eric.b.garnick@gmail.com
|
91459a8ac7fcc46ebcba799bda3ebe1f208692f6
|
a9443f14227a697dd92106e2dc6863944fa73fbf
|
/meng-TaggingTool/tkInter.py
|
84df25898213873b209dcfac4b5708253c678e18
|
[] |
no_license
|
KevinYe1014/pyqtExamples
|
95f35efa46872370d2f71312a461c335ad44f0df
|
6fa14be14a0ee67c3cdbc9f25b7ce413c506a459
|
refs/heads/master
| 2020-06-23T22:53:44.401692
| 2019-07-25T07:10:12
| 2019-07-25T07:10:12
| 198,777,325
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 12,395
|
py
|
# coding: utf-8
from tkinter import *
from tkinter import ttk, font
import os
import sqlite3 as sql
from io import BytesIO
from PIL import Image, ImageTk
import configparser
import random
conn = sql.connect('./hard_labels.db')
cursor = conn.cursor()
cex = cursor.execute
cex('''PRAGMA TABLE_INFO (front)''')
if cursor.fetchone() is None:
raise FileNotFoundError("input db not found")
cex('''PRAGMA TABLE_INFO (status)''')
if cursor.fetchone() is None:
with conn:
cex('''CREATE TABLE status (id TEXT PRIMARY KEY, info TEXT)''')
cex('''INSERT OR IGNORE INTO status VALUES (?,?)''', (0, '0'))
if getattr(sys, 'frozen', False):
curdir = os.path.dirname(sys.executable)
else:
curdir = os.path.dirname(os.path.abspath(__file__))
TITLE = 'ultimate super very nice idcard label tools'
FONT='./simhei.ttf'
PERSON = {'any': ''}
def get_spec_files(path, ext='.jpg', fullpath=True, maxN=0):
''' get files in path with specific extension '''
files = []
for f in os.listdir(path):
if os.path.splitext(f)[1] == ext:
if fullpath:
files.append(os.path.join(path, f))
else:
files.append(f)
if maxN > 0 and len(files) >= maxN:
break
return files
def parse_front(s):
content = s[:-4]
front, orderno, content = content.split('_')
content = content.split('-')
name, birth, nation = content[0], content[1], content[2]
idno = content[-1]
address = '-'.join(content[3:-1])
return name, birth, nation, address, idno
def parse_back(s):
content = s[:-4]
back, orderno, content = content.split('_')
police, start, end = content.split('-')
return police, start + '-' + end
##yl 2018/12/11
import cv2
import os
def save_img(img,img_name):
root_path=r'c:/users/yelei/desktop/'
path="{0}{1}{2}".format(str(root_path),str(img_name),".jpg")
cv2.imwrite(path,img)
def delete_img(img_name):
root_path = r'c:/users/yelei/desktop/'
path = "{0}{1}{2}".format(str(root_path), str(img_name), ".png")
if os.path.exists(path):
os.remove(path)
class Cleaner:
def __init__(self, root_widget, is_front, whom='any'):
self.root_widget = root_widget
if is_front:
self.images = get_spec_files(r'/zqzn/chenxin/data/ocr/images/test_data/hard_id_card_image/front')
else:
# self.images = get_spec_files(os.path.join(curdir,'images/back'))
self.images = get_spec_files(r'/zqzn/chenxin/data/ocr/images/test_data/hard_id_card_image/back')
self.images.sort()
self.is_front = is_front
cex('''SELECT info FROM status''')
self.prog = int(cursor.fetchone()[0])
if self.prog >= len(self.images):
self.prog = 0
with conn:
cex('''UPDATE status SET info=? WHERE id=0''', (str(self.prog),))
self.whom = whom
cfg = configparser.ConfigParser()
cfg.read(os.path.join(curdir, 'config.ini'))
self.fontsize = cfg['Gui']['fontsize']
self.build_gui()
self.set_task()
if self.is_front:
self.name_ety.focus()
else:
self.police_ety.focus()
# for i in range(len(eggs)):
# eggs[i] = ImageTk.PhotoImage(eggs[i])
def build_gui(self):
self.root_widget.title('{}: {}/{}'.format(TITLE, self.prog + 1, len(self.images)))
self.root_widget.geometry('800x600')
self.root_widget.columnconfigure(0, weight=1)
self.root_widget.rowconfigure(0, weight=1)
self.frame = ttk.Frame(self.root_widget)
self.frame.rowconfigure(0, weight=1)
if self.is_front:
self.frame.columnconfigure(0, weight=1)
self.frame.columnconfigure(1, weight=100)
else:
self.frame.columnconfigure(0, weight=3)
self.frame.columnconfigure(1, weight=4)
self.canvas = Canvas(self.frame)
self.frame.grid(row=0, column=0, sticky='nsew')
self.canvas.grid(row=0, column=0, rowspan=2, columnspan=2, sticky='nsew')
self.canvas.bind('<Configure>', self.event_resize)
pad = 3
if self.is_front:
self.name_lbl = ttk.Label(self.frame, text='name', width=10, font='{} {}'.format(FONT, self.fontsize))
self.name_var = StringVar()
self.name_ety = ttk.Entry(self.frame, textvariable=self.name_var,
font='{} {}'.format(FONT, self.fontsize))
self.nation_lbl = ttk.Label(self.frame, text='nation', font='{} {}'.format(FONT, self.fontsize))
self.nation_var = StringVar()
self.nation_ety = ttk.Entry(self.frame, textvariable=self.nation_var,
font='{} {}'.format(FONT, self.fontsize))
self.address_lbl = ttk.Label(self.frame, text='address', font='{} {}'.format(FONT, self.fontsize))
self.address_var = StringVar()
self.address_ety = ttk.Entry(self.frame, textvariable=self.address_var,
font='{} {}'.format(FONT, self.fontsize))
self.idno_lbl = ttk.Label(self.frame, text='idno', font='{} {}'.format(FONT, self.fontsize))
self.idno_var = StringVar()
self.idno_ety = ttk.Entry(self.frame, textvariable=self.idno_var,
font='{} {}'.format(FONT, self.fontsize))
self.name_lbl.grid(row=10, column=0, sticky='sw', pady=pad, padx=pad)
self.name_ety.grid(row=10, column=1, sticky='nsew', pady=pad, padx=pad)
self.nation_lbl.grid(row=11, column=0, sticky='sw', pady=pad, padx=pad)
self.nation_ety.grid(row=11, column=1, sticky='nsew', pady=pad, padx=pad)
self.address_lbl.grid(row=12, column=0, sticky='sw', pady=pad, padx=pad)
self.address_ety.grid(row=12, column=1, sticky='nsew', pady=pad, padx=pad)
self.idno_lbl.grid(row=13, column=0, sticky='sw', pady=pad, padx=pad)
self.idno_ety.grid(row=13, column=1, sticky='nsew', pady=pad, padx=pad)
else:
self.police_lbl = ttk.Label(self.frame, text='police', font='{} {}'.format(FONT, self.fontsize))
self.police_var = StringVar()
self.police_ety = ttk.Entry(self.frame, textvariable=self.police_var,
font='{} {}'.format(FONT, self.fontsize))
self.expiry_lbl = ttk.Label(self.frame, text='expiry', font='{} {}'.format(FONT, self.fontsize))
self.expiry_var = StringVar()
self.expiry_ety = ttk.Entry(self.frame, textvariable=self.expiry_var,
font='{} {}'.format(FONT, self.fontsize))
self.police_lbl.grid(row=10, column=0, sticky='sw', pady=pad, padx=pad)
self.police_ety.grid(row=10, column=1, sticky='nsew', pady=pad, padx=pad)
self.expiry_lbl.grid(row=11, column=0, sticky='sw', pady=pad, padx=pad)
self.expiry_ety.grid(row=11, column=1, sticky='nsew', pady=pad, padx=pad)
self.root_widget.bind('<Return>', self.event_enter)
self.root_widget.bind('<Shift-Return>', self.event_shift_enter)
self.root_widget.bind('<Control-Left>', self.event_rotate)
self.root_widget.bind('<Control-Right>', self.event_rotate_anti)
def _resize(self, width, height):
h, w = self.img_ori.height, self.img_ori.width
scale = min(width / w, height / h)
width, height = int(w * scale + 0.5), int(h * scale + 0.5)
resized = self.img_ori.resize((width, height), Image.ANTIALIAS)
self.img_show = ImageTk.PhotoImage(resized)
self.canvas.delete('IMG')
self.canvas.create_image(0, 0, image=self.img_show, anchor='nw', tags='IMG')
def event_resize(self, event):
self._resize(event.width, event.height)
def event_rotate(self, event):
self.img_ori = self.img_ori.transpose(Image.ROTATE_90)
self._resize(self.canvas.winfo_width(), self.canvas.winfo_height())
def event_rotate_anti(self, event):
self.img_ori = self.img_ori.transpose(Image.ROTATE_90)
self.img_ori = self.img_ori.transpose(Image.ROTATE_90)
self.img_ori = self.img_ori.transpose(Image.ROTATE_90)
self._resize(self.canvas.winfo_width(), self.canvas.winfo_height())
def msgbox(self, title, msg):
top = Toplevel(self.root_widget)
top.title(title)
lbl = ttk.Label(top, text=msg)
lbl.grid(row=0, column=0)
btn = ttk.Button(top, text="^-^", command=top.destroy)
btn.grid(row=1, column=0)
screenwidth = top.winfo_screenwidth()
screenheight = top.winfo_screenheight()
top.geometry('+%s+%s' % (screenwidth // 2, screenheight // 2))
top.focus()
def save_current(self):
if self.is_front:
name, nation, address, idno = self.name_var.get(), self.nation_var.get(), \
self.address_var.get(), self.idno_var.get()
with conn:
cex('''UPDATE front SET name=?,nation=?,address=?,idno=? WHERE fn=?''',
(name, nation, address, idno, self.fn))
else:
police, expiry = self.police_var.get(), self.expiry_var.get()
with conn:
cex('''UPDATE back SET police=?, expiry=? WHERE fn=?''',
(police, expiry, self.fn))
# if self.prog % 500 >= 490 and random.uniform(0,1) < 0.2:
# self.imgbox('辛苦啦,休息一下吧^-^'+PERSON[self.whom])
def event_enter(self, event):
self.save_current()
self.prog += 1
if self.prog >= len(self.images):
self.prog -= 1
self.msgbox('Yes ^-^ Yes', '完成了,欧耶')
else:
self.set_task()
self._resize(self.canvas.winfo_width(), self.canvas.winfo_height())
self.root_widget.title('{}: {}/{}'.format(TITLE, self.prog + 1, len(self.images)))
with conn:
cex('''UPDATE status SET info=? WHERE id=0''', (str(self.prog),))
def event_shift_enter(self, event):
self.save_current()
self.prog -= 1
if self.prog < 0:
self.prog += 1
self.msgbox('@-@', '天哪,竟然是第一张')
else:
self.set_task()
self._resize(self.canvas.winfo_width(), self.canvas.winfo_height())
self.root_widget.title('{}: {}/{}'.format(TITLE, self.prog + 1, len(self.images)))
with conn:
cex('''UPDATE status SET info=? WHERE id=0''', (str(self.prog),))
def set_task(self):
img = self.images[self.prog]
self.img_ori = Image.open(img)
self.img_show = ImageTk.PhotoImage(self.img_ori)
self.canvas.create_image(0, 0, image=self.img_show, anchor='nw', tags='IMG')
if self.is_front:
print(os.path.split(img)[1])
cex('''SELECT * FROM front WHERE fn=?''', (os.path.split(img)[1],))
self.fn, name, sex, nation, birth, address, idno = cursor.fetchone()
self.name_var.set(name)
self.nation_var.set(nation)
self.address_var.set(address)
self.idno_var.set(idno)
# name_, _, nation_, address_, idno_ = parse_front(self.fn)
# self.name_ety['state'] = 'disable' if name == name_ else 'normal'
# self.nation_ety['state'] = 'disable' if nation == nation_ else 'normal'
# self.address_ety['state'] = 'disable' if address == address_ else 'normal'
# self.idno_ety['state'] = 'disable' if idno == idno_ else 'normal'
else:
cex('''SELECT * FROM back WHERE fn=?''', (os.path.split(img)[1],))
self.fn, police, expiry = cursor.fetchone()
self.police_var.set(police)
self.expiry_var.set(expiry)
# police_, expiry_ = parse_back(self.fn)
# self.police_ety['state'] = 'disable' if police == police_ else 'normal'
# self.expiry_ety['state'] = 'disable' if expiry == expiry_ else 'normal'
if self.img_ori.height > self.img_ori.width:
self.event_rotate_anti(None)
if __name__ == '__main__':
root_widget = Tk()
cleaner = Cleaner(root_widget,is_front=False)
root_widget.mainloop()
|
[
"2439745857@qq.com"
] |
2439745857@qq.com
|
e665a77be9efb83ad3be40619cdfaa76985e9508
|
d8c6c2433f32c35b0039e876a837ba3a64d8a14d
|
/euler/3/a.py
|
029ea73f8a2f0efa778cbe8faedb8df32836ee44
|
[] |
no_license
|
jmerizia/programming-team-solutions
|
863158105b54ba1fd78be2b429f2bcfff92499b3
|
29f052f7f34a00a88fce0bcb701027b8d1477ee3
|
refs/heads/master
| 2021-11-29T07:57:43.291653
| 2021-11-13T07:42:59
| 2021-11-13T07:42:59
| 119,102,254
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 527
|
py
|
import math
n = 600851475143
m = math.ceil(math.sqrt(n)) + 10
is_prime = [True for _ in range(m)]
is_prime[0] = False
is_prime[1] = False
k = 2
while k * k <= m: # O(m log m log log m)
if is_prime[k]:
for i in range(k*2, m, k):
is_prime[i] = False
k += 1
primes = [idx for idx, x in enumerate(is_prime) if x]
prime_factors = []
while n > 1:
for prime in primes: # O(m log n log k)
if n % prime == 0:
prime_factors.append(prime)
n /= prime
print(prime_factors)
|
[
"jmerizia@vt.edu"
] |
jmerizia@vt.edu
|
d0d4dc3ccd41bd22334c993a04c194b2496ba982
|
42a833f190b3352eaa89604381d3db500c80f538
|
/pentestui/uiapp/views.py
|
918b2e56df39b60fe12bd295a639303bf2b49276
|
[
"Apache-2.0"
] |
permissive
|
mustgundogdu/PentestUI
|
95c037022c5aad25cf2e1a4b7ad58eedd6df6ed8
|
92263ea73bd2eaa2081fb277c76aa229103a1d54
|
refs/heads/main
| 2023-08-29T23:12:28.027452
| 2021-11-18T17:53:03
| 2021-11-18T17:53:03
| 389,436,912
| 31
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,281
|
py
|
from django.contrib.auth.decorators import login_required
from django.shortcuts import render, get_object_or_404, redirect
from django.template import loader
from django.http import HttpResponse
from django import template
from pentestui.pentest_api.models import ProcessUI
@login_required(login_url="/login/")
def index(request):
context = {}
context['segment'] = 'index'
user = request.user.username
#html_template = loader.get_template( 'index.html' )
#return HttpResponse(html_template.render(History, request))
model_list = list(ProcessUI.objects.all().filter(userinf=user))
return render(
request,'index.html',
{
'History': model_list
}
)
@login_required(login_url="/login/")
def pages(request):
context = {}
# Render to pages and check on Pentestui
try:
loadTemplate = request.path.split('/')[-1]
context['segment'] = loadTemplate
html_template = loader.get_template( loadTemplate )
return HttpResponse(html_template.render(context, request))
except template.TemplateDoesNotExist:
html_template = loader.get_template( 'page-404.html' )
return HttpResponse(html_template.render(context, request))
except:
html_template = loader.get_template( 'page-500.html' )
return HttpResponse(html_template.render(context, request))
@login_required(login_url="/login/")
def resultdetail(request, date):
user = request.user.username
# Query Just Result to result page
ResultHistory = list(ProcessUI.objects.all().filter(time=date) & ProcessUI.objects.all().filter(userinf=user))
return render(
request,'resultlog.html',
{
'User_History': ResultHistory
}
)
@login_required(login_url="/login/")
def DeleteQuery(request, date):
user = request.user.username
# Query Just Result to result page
DeleteHistory = ProcessUI.objects.all().filter(time=date) & ProcessUI.objects.all().filter(userinf=user)
DeleteHistory.delete()
model_list = list(ProcessUI.objects.all().filter(userinf=user))
return render(
request,'index.html',
{
'History': model_list
}
)
|
[
"noreply@github.com"
] |
noreply@github.com
|
4c2339ae7981418077a33d7acdc0e31910af011f
|
e6f5e2f1238cd38e6a524a38111dd36de10d911e
|
/main.py
|
085943c7193c8c940aedb4e9713d6c89711ed39b
|
[] |
no_license
|
prantikpariksha/PythonHomework
|
650787778192b3a8a8b809e6a6b115b17e39a4ec
|
86b942526cdd97053d1b1c45bb7986919cd7f8fa
|
refs/heads/master
| 2020-04-19T09:08:14.699533
| 2019-02-04T10:57:33
| 2019-02-04T10:57:33
| 168,100,864
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,719
|
py
|
#This file contains information about few songs those are my favourite
Artist="Bob Marley" #This is the singer name
Title="No Woman No Cry" #This is the Song name
Album="Raga Music" #this is the Album name
TrackLength=7.58 #this is the duration of the track
Genre="Raggae" #this is the type of the track
TrackNumber=7 #this is the song sequence number
BitDepth="16 Bit" #this defines quality of the song
SamplingRateInKHz=44 #this is the number of samples of audio carried per second
SizeInMB=6.8 #this is the file size
print(Artist)
print(Title)
print(Album)
print(TrackLength)
print(Genre)
print(TrackNumber)
print(BitDepth)
print(SamplingRateInKHz)
print(SizeInMB)
print("******************************")
print("\n")
Artist="The Doors"
Title="The End"
Album="The best of the Doors"
TrackLength=11.46
Genre="Rock"
TrackNumber=4
BitDepth="32 Bit"
SamplingRateInKHz=48
SizeInMB=11.5
print(Artist)
print(Title)
print(Album)
print(TrackLength)
print(Genre)
print(TrackNumber)
print(BitDepth)
print(SamplingRateInKHz)
print(SizeInMB)
print("******************************")
print("\n")
Artist="Pink Floyd"
Title="Another brick in the wall"
Album="The Wall"
TrackLength=7.8
Genre="Rock"
TrackNumber=7
BitDepth="32 Bit"
SamplingRateInKHz=48
SizeInMB=8.5
print(Artist)
print(Title)
print(Album)
print(TrackLength)
print(Genre)
print(TrackNumber)
print(BitDepth)
print(SamplingRateInKHz)
print(SizeInMB)
print("******************************")
|
[
"noreply@github.com"
] |
noreply@github.com
|
d16b1d9e1c492143309416fd7527999d32681384
|
0bd8a230151dcc5708022c900575606b881563b2
|
/idea/blog/migrations/0008_auto_20180109_2344.py
|
44248a1f30de3421839d1be081c20e6eea184e8d
|
[
"MIT"
] |
permissive
|
mcmaxwell/idea_digital_agency
|
1ad4f36b814acd81d1defc4813cf02a000cda81f
|
366e27e14e80b3fbb5980c64d3d38fdbcbc8f000
|
refs/heads/master
| 2018-10-17T03:41:04.432920
| 2018-09-24T07:55:55
| 2018-09-24T07:55:55
| 108,956,925
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 766
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.10 on 2018-01-09 23:44
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0007_blog_subtitle_tag'),
]
operations = [
migrations.AddField(
model_name='blog',
name='seo_description',
field=models.TextField(blank=True),
),
migrations.AddField(
model_name='blog',
name='seo_keywords',
field=models.TextField(blank=True),
),
migrations.AddField(
model_name='blog',
name='seo_title',
field=models.CharField(blank=True, max_length=255),
),
]
|
[
"alexrians@gmail.com"
] |
alexrians@gmail.com
|
e4054e3a7097e2df819383ffeb0765121f09cd18
|
33e78c2f65c7f6f0b0d448787ad7df2557dec774
|
/mysite/views.py
|
c87b994bed0c80da315730170b53aeb2103103f6
|
[] |
no_license
|
hjf12402/mvote
|
086c8a5a50bb82fd0d1d35fdd726c730e719cef4
|
615c03344cbed562d81dc8215686b922d2fb729a
|
refs/heads/master
| 2022-12-11T07:47:00.708907
| 2019-07-01T13:52:22
| 2019-07-01T13:52:22
| 194,530,367
| 0
| 0
| null | 2022-12-08T05:49:55
| 2019-06-30T15:15:06
|
Python
|
UTF-8
|
Python
| false
| false
| 3,930
|
py
|
#未解决ajax刷新局部代码后投票函数失效问题!!!!
from django.shortcuts import render, redirect
import datetime
from .models import PollItem, Poll, Profile, VoteCheck
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.http import Http404, HttpResponse
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from .form import AddPoll, AddItem
import json
# Create your views here.
def index(request):
time = datetime.datetime.now()
all_polls = Poll.objects.filter(enabled=True).order_by('-created_at')
if all_polls.count() == 0:
messages.add_message(request, messages.INFO, '暂无投票项目')
else:
paginator = Paginator(all_polls, 5)
p = request.GET.get('p')
try:
polls = paginator.page(p)
except PageNotAnInteger:
polls = paginator.page(1)
except EmptyPage:
polls = paginator.page(paginator.num_pages)
return render(request, 'index.html', locals())
@login_required
def poll(request, pollid):
try:
poll = Poll.objects.get(id=pollid)
pollitems = PollItem.objects.filter(poll=poll)
if pollitems.count() == 0:
messages.add_message(request, messages.INFO, '暂无投票项')
except:
raise Http404
return render(request, 'poll.html', locals())
@login_required
def vote(request, pollid, pollitemid):
pollitem = PollItem.objects.get(id=pollitemid)
pollitem.vote += 1
pollitem.save()
url = '/poll/{}'.format(pollid)
messages.add_message(request, messages.INFO, '投票成功')
return redirect(url)
@login_required
def govote(request):
if request.method == 'GET' and request.is_ajax():
pollitemid = request.GET.get('pollitemid')
pollid = request.GET.get('pollid')
bypass = 0
if VoteCheck.objects.filter(userid=request.user.id, pollid=pollid, vote_date=datetime.date.today()):
bypass = 1
else:
vote_rec = VoteCheck(userid=request.user.id, pollid=pollid, vote_date=datetime.date.today())
vote_rec.save()
try:
pollitem = PollItem.objects.get(id=pollitemid)
if not bypass:
pollitem.vote += 1
pollitem.save()
votes = pollitem.vote
except:
votes = 0
else:
votes = 0
return HttpResponse(json.dumps({'votes':votes, 'bypass':bypass}))
@login_required
def delete_item(request):
if request.method == 'GET':
pollitemid = request.GET.get('pollitemid')
pollitem = PollItem.objects.get(id=pollitemid)
poll = pollitem.poll
pollitem.delete()
url = '/poll/{} #pollitems'.format(poll.id)
return HttpResponse(url)
@login_required
def addpoll(request):
if request.method == 'POST':
newpoll = Poll(user=request.user, enabled=True)
form = AddPoll(request.POST, instance=newpoll)
if form.is_valid():
form.save()
messages.add_message(request, messages.INFO, '已增加项目')
return redirect('/')
else:
messages.add_message(request, messages.INFO, '每个字段都要填写')
else:
form = AddPoll()
return render(request, 'addpoll.html', locals())
def additem(request, pollid):
poll = Poll.objects.get(id=pollid)
if request.method == 'POST':
newitem = PollItem(poll=poll)
form = AddItem(request.POST, instance=newitem)
if form.is_valid():
form.save()
messages.add_message(request, messages.INFO, '已增加项目')
url = '/poll/{}'.format(poll.id)
return redirect(url)
else:
messages.add_message(request, messages.INFO, '每个字段都要填写')
else:
form = AddItem()
return render(request, 'additem.html', locals())
|
[
"492710346@qq.com"
] |
492710346@qq.com
|
c0dfa6271b2327073a0a168b47640c937cbeee81
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/303/usersdata/287/71629/submittedfiles/testes.py
|
967c3e410a7b976e11bd707dcfdc03122824963f
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 220
|
py
|
# -*- coding: utf-8 -*-
#COMECE AQUI
a=int(input('Que horas são? [0-23]'))
if a > 3 and a < 12:
print('Bom diiiia!')
elif a >= 12 and a < 18:
print('Boa tarde')
else:
print('Boa noite, ate amanha!')
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
4dabbe95f411c3f366cd132317485e112c08b660
|
d1d154e30e9f3e8c68bf785909a5f8ca9ff86e03
|
/Python/note/图像处理/test/bitwise.py
|
48cf57a81fed6935abb01569d19752607bc7294c
|
[] |
no_license
|
Mecoly/note
|
aae522123acdba8ee785ed7baaee7a2b7df2da26
|
e82f933a7a20bedc30cc0e56006405e4a33f9c90
|
refs/heads/master
| 2023-04-23T13:53:35.399635
| 2021-05-10T09:46:31
| 2021-05-10T09:46:31
| 286,954,575
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 670
|
py
|
import cv2 as cv
#图片读取
img1 = cv.imread('Trackbar_Color.png')
img2 = cv.imread('blending.jpg')
# 主图的logo范围图片部分
rows,cols,channels = img2.shape
roi = img1[0:rows, 0:cols ]
# logo和取反
img2gray = cv.cvtColor(img2,cv.COLOR_BGR2GRAY)
ret, mask = cv.threshold(img2gray, 10, 255, cv.THRESH_BINARY)
mask_inv = cv.bitwise_not(mask)
# 主图logo位置扣掉
img1_bg = cv.bitwise_and(roi,roi,mask = mask_inv)
# logo图扣出
img2_fg = cv.bitwise_and(img2,img2,mask = mask)
# logo图放入主图选出部分
dst = cv.add(img1_bg,img2_fg)
# 合成部分放回原图
img1[0:rows, 0:cols ] = dst
cv.imshow('res',img1)
cv.waitKey(0)
cv.destroyAllWindows()
|
[
"48834713+Mecoly@users.noreply.github.com"
] |
48834713+Mecoly@users.noreply.github.com
|
1c06d723254657701479f4b0179290148c45af44
|
0d76ba0da5446f20e500b7e31f53821b14cb49d8
|
/Codility/python/abs_distinct.py
|
a14e108198517a0a7f73819039d873dfe2b9a69b
|
[] |
no_license
|
filwaitman/playground
|
948aa687be06d456c86b65ee3ab5fb9792149459
|
dfdfab9002bff3a04f37e0c161363a864cd30f3e
|
refs/heads/master
| 2021-01-12T12:59:49.057832
| 2020-01-26T18:51:02
| 2020-01-26T18:51:02
| 68,865,259
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 296
|
py
|
# -*- coding: utf-8 -*-
# https://codility.com/programmers/task/abs_distinct
# 100/100
def solution(A):
positives = map(abs, A)
return len(set(positives))
assert solution([-5, -3, -1, 0, 3, 6]) == 5
assert solution([42]) == 1
assert solution(range(-1000, 0) + range(1, 1001)) == 1000
|
[
"filwaitman@gmail.com"
] |
filwaitman@gmail.com
|
c1228f134d14c27af8d505faefe1a09d888286aa
|
42f5eaf16bfd7076cb5a598cf2f239faa575f28b
|
/05-grpc-google-cloud-speech/python/google/ads/googleads/v1/enums/attribution_model_pb2.py
|
3977838048d1c0fe49d068f669eb36d56bfbe38f
|
[] |
no_license
|
jiriklepl/IMW-2019
|
ab0e1c791a794ccf8a6a8d8d4e732c29acee134c
|
921c85d3c8132114ad90db8deb52eb5ddc06c720
|
refs/heads/master
| 2020-08-28T13:29:15.087785
| 2019-12-15T17:12:24
| 2019-12-15T17:12:24
| 217,711,235
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| true
| 4,827
|
py
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/ads/googleads/v1/enums/attribution_model.proto
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/ads/googleads/v1/enums/attribution_model.proto',
package='google.ads.googleads.v1.enums',
syntax='proto3',
serialized_options=b'\n!com.google.ads.googleads.v1.enumsB\025AttributionModelProtoP\001ZBgoogle.golang.org/genproto/googleapis/ads/googleads/v1/enums;enums\242\002\003GAA\252\002\035Google.Ads.GoogleAds.V1.Enums\312\002\035Google\\Ads\\GoogleAds\\V1\\Enums\352\002!Google::Ads::GoogleAds::V1::Enums',
serialized_pb=b'\n5google/ads/googleads/v1/enums/attribution_model.proto\x12\x1dgoogle.ads.googleads.v1.enums\x1a\x1cgoogle/api/annotations.proto\"\xc6\x02\n\x14\x41ttributionModelEnum\"\xad\x02\n\x10\x41ttributionModel\x12\x0f\n\x0bUNSPECIFIED\x10\x00\x12\x0b\n\x07UNKNOWN\x10\x01\x12\x0c\n\x08\x45XTERNAL\x10\x64\x12\x19\n\x15GOOGLE_ADS_LAST_CLICK\x10\x65\x12)\n%GOOGLE_SEARCH_ATTRIBUTION_FIRST_CLICK\x10\x66\x12$\n GOOGLE_SEARCH_ATTRIBUTION_LINEAR\x10g\x12(\n$GOOGLE_SEARCH_ATTRIBUTION_TIME_DECAY\x10h\x12,\n(GOOGLE_SEARCH_ATTRIBUTION_POSITION_BASED\x10i\x12)\n%GOOGLE_SEARCH_ATTRIBUTION_DATA_DRIVEN\x10jB\xea\x01\n!com.google.ads.googleads.v1.enumsB\x15\x41ttributionModelProtoP\x01ZBgoogle.golang.org/genproto/googleapis/ads/googleads/v1/enums;enums\xa2\x02\x03GAA\xaa\x02\x1dGoogle.Ads.GoogleAds.V1.Enums\xca\x02\x1dGoogle\\Ads\\GoogleAds\\V1\\Enums\xea\x02!Google::Ads::GoogleAds::V1::Enumsb\x06proto3'
,
dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,])
_ATTRIBUTIONMODELENUM_ATTRIBUTIONMODEL = _descriptor.EnumDescriptor(
name='AttributionModel',
full_name='google.ads.googleads.v1.enums.AttributionModelEnum.AttributionModel',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='UNSPECIFIED', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='UNKNOWN', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='EXTERNAL', index=2, number=100,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='GOOGLE_ADS_LAST_CLICK', index=3, number=101,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='GOOGLE_SEARCH_ATTRIBUTION_FIRST_CLICK', index=4, number=102,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='GOOGLE_SEARCH_ATTRIBUTION_LINEAR', index=5, number=103,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='GOOGLE_SEARCH_ATTRIBUTION_TIME_DECAY', index=6, number=104,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='GOOGLE_SEARCH_ATTRIBUTION_POSITION_BASED', index=7, number=105,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='GOOGLE_SEARCH_ATTRIBUTION_DATA_DRIVEN', index=8, number=106,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=144,
serialized_end=445,
)
_sym_db.RegisterEnumDescriptor(_ATTRIBUTIONMODELENUM_ATTRIBUTIONMODEL)
_ATTRIBUTIONMODELENUM = _descriptor.Descriptor(
name='AttributionModelEnum',
full_name='google.ads.googleads.v1.enums.AttributionModelEnum',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
_ATTRIBUTIONMODELENUM_ATTRIBUTIONMODEL,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=119,
serialized_end=445,
)
_ATTRIBUTIONMODELENUM_ATTRIBUTIONMODEL.containing_type = _ATTRIBUTIONMODELENUM
DESCRIPTOR.message_types_by_name['AttributionModelEnum'] = _ATTRIBUTIONMODELENUM
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
AttributionModelEnum = _reflection.GeneratedProtocolMessageType('AttributionModelEnum', (_message.Message,), {
'DESCRIPTOR' : _ATTRIBUTIONMODELENUM,
'__module__' : 'google.ads.googleads.v1.enums.attribution_model_pb2'
# @@protoc_insertion_point(class_scope:google.ads.googleads.v1.enums.AttributionModelEnum)
})
_sym_db.RegisterMessage(AttributionModelEnum)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
|
[
"jiriklepl@seznam.cz"
] |
jiriklepl@seznam.cz
|
2ee64697c9d8f5517741bf4016974f37a4f9464d
|
5e5ce8c5f2a2be7b707cf430198a7262b766c329
|
/trunk/ufsi/src/ufsi/__init__.py
|
7d0055a8034247d3f164fb59b60e2ef32ebddf6b
|
[
"MIT"
] |
permissive
|
BackupTheBerlios/ufsi-svn
|
53cdb8e6eb44dbb506dc2c9e3108912dfcb32123
|
5b9c8a0a05045cb5704d8f5f079a17176831e479
|
refs/heads/master
| 2020-03-26T20:34:39.596486
| 2007-04-24T06:40:00
| 2007-04-24T06:40:00
| 40,804,312
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,357
|
py
|
"""
The ufsi module and its various classes.
"""
# import required standard modules
import re
# import UFSI interfaces
from PathInterface import PathInterface
from FileInterface import FileInterface
from DirInterface import DirInterface
from AuthenticationInterface import AuthenticationInterface
# import UFSI errors
from Errors \
import Error, \
UnsupportedOperationError, \
AuthenticationError, \
AuthenticationRequiredError, \
AuthenticationInvalidError, \
UnsupportedAuthenticationError, \
AuthorisationError, \
AccessDeniedError, \
InvalidArgumentError, \
InvalidPathError, \
FSError, \
PathNotFoundError, \
NotASymlinkError, \
IOError, \
EOFError, \
TimeoutError
# import UFSI abstractions
from AbstractNativePath import AbstractNativePath
from AbstractUrlPath import AbstractUrlPath
# import UFSI implementations
from NativePath import NativePath
from NativeUnixPath import NativeUnixPath
from NativeWindowsPath import NativeWindowsPath
from NativeFile import NativeFile
from NativeDir import NativeDir
from HttpPath import HttpPath
from HttpFile import HttpFile
from FtpPath import FtpPath
from FtpDir import FtpDir
from FtpFile import FtpFile
from TarPath import TarPath
from TarDir import TarDir
from TarFile import TarFile
# authentication implementations
from UserPasswordAuthentication import UserPasswordAuthentication
def Path(path):
"""
Determines what type of path we have and creates the appropriate
Path object.
"""
lcPath=path.lower()
protocolRePat='(?P<protocol>[^:/]+)://'
protocolRe=re.compile(protocolRePat)
protocolMo=protocolRe.match(lcPath)
if protocolMo is None:
return NativePath(path)
protocol=protocolMo.group('protocol')
if protocol=='http':
return HttpPath(path)
if protocol=='ftp':
return FtpPath(path)
else:
return None
def Dir(path):
"""
Returns a Dir object from a path string.
"""
p=Path(path)
"""
TODO: Fix error reporting for getDir, with isDir support... perhaps
if not p.isDir():
return None
else:
"""
return p.getDir()
def File(path):
"""
Returns a File object from a path string.
"""
p=Path(path)
return p.getFile()
|
[
"thekooldude@2d384c08-25fb-0310-ab61-b5b2a23e79ab"
] |
thekooldude@2d384c08-25fb-0310-ab61-b5b2a23e79ab
|
70e7af3e0751be27c0879cdd30eb63c48c35d1d0
|
a38670ee08ea64af33477899a68ee22936f70ce7
|
/luffy/第三模块/第6章网络编程/第6章每小节/2 加上循环/04 模拟ssh远程执行命令/客户端.py
|
a62f5c2f2197ac5dd07315df58754ce788d23051
|
[] |
no_license
|
foremostxiao/d
|
40ed37215f411e8b081a4cb92c8ecbd335cd9d76
|
fe80672adc6b2406365b05d5cedd02c6abf66c11
|
refs/heads/master
| 2020-03-29T13:51:19.589004
| 2018-09-23T09:29:56
| 2018-09-23T09:29:56
| 149,985,622
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 438
|
py
|
import socket
phone = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
phone.connect(('127.0.0.1',8080))
while True:
# 1 发命令
cmd = input('>>>').strip()# 输入mingl
if not cmd:continue
phone.send(cmd.encode('utf-8')) # send 发给操作系统了
# 2 拿命令的结果并打印
data = phone.recv(1024) # 1024是个坑,有可能收到的超过1024,后续解决
print(data.decode('gbk'))
phone.close()
|
[
"foremostxiao@163.com"
] |
foremostxiao@163.com
|
74d8d9d2cc5d152126537573a927ca64f8afb791
|
5a7abc4537039860c49e9a80219efa759aad1b6f
|
/tests/providers/aws/services/ec2/ec2_securitygroup_from_launch_wizard/ec2_securitygroup_from_launch_wizard_test.py
|
a3c14ce6680594654bebaca336460ebb88319e50
|
[
"Apache-2.0"
] |
permissive
|
sec-js/prowler
|
d5a06c72f5d7e490bade1167966f83f7a5d7ed15
|
f72be9a1e492ad593c9ac267d3ca07f626263ccd
|
refs/heads/master
| 2023-08-31T22:48:33.983360
| 2022-12-22T16:02:28
| 2022-12-22T16:02:28
| 243,866,744
| 0
| 0
|
Apache-2.0
| 2022-12-23T12:23:20
| 2020-02-28T22:37:02
|
Python
|
UTF-8
|
Python
| false
| false
| 4,834
|
py
|
from re import search
from unittest import mock
from boto3 import client, resource
from moto import mock_ec2
AWS_REGION = "us-east-1"
EXAMPLE_AMI_ID = "ami-12c6146b"
class Test_ec2_securitygroup_from_launch_wizard:
@mock_ec2
def test_ec2_default_sgs(self):
# Create EC2 Mocked Resources
ec2_client = client("ec2", region_name=AWS_REGION)
ec2_client.create_vpc(CidrBlock="10.0.0.0/16")
from prowler.providers.aws.lib.audit_info.audit_info import current_audit_info
from prowler.providers.aws.services.ec2.ec2_service import EC2
current_audit_info.audited_partition = "aws"
current_audit_info.audited_regions = ["eu-west-1", "us-east-1"]
with mock.patch(
"prowler.providers.aws.services.ec2.ec2_securitygroup_from_launch_wizard.ec2_securitygroup_from_launch_wizard.ec2_client",
new=EC2(current_audit_info),
):
# Test Check
from prowler.providers.aws.services.ec2.ec2_securitygroup_from_launch_wizard.ec2_securitygroup_from_launch_wizard import (
ec2_securitygroup_from_launch_wizard,
)
check = ec2_securitygroup_from_launch_wizard()
result = check.execute()
# One default sg per region
assert len(result) == 3
# All are compliant by default
assert result[0].status == "PASS"
@mock_ec2
def test_ec2_launch_wizard_sg(self):
# Create EC2 Mocked Resources
ec2_client = client("ec2", region_name=AWS_REGION)
ec2_client.create_vpc(CidrBlock="10.0.0.0/16")
sg_id = ec2_client.create_security_group(
GroupName="launch-wizard-1", Description="launch wizard sg"
)["GroupId"]
from prowler.providers.aws.lib.audit_info.audit_info import current_audit_info
from prowler.providers.aws.services.ec2.ec2_service import EC2
current_audit_info.audited_partition = "aws"
current_audit_info.audited_regions = ["eu-west-1", "us-east-1"]
with mock.patch(
"prowler.providers.aws.services.ec2.ec2_securitygroup_from_launch_wizard.ec2_securitygroup_from_launch_wizard.ec2_client",
new=EC2(current_audit_info),
):
# Test Check
from prowler.providers.aws.services.ec2.ec2_securitygroup_from_launch_wizard.ec2_securitygroup_from_launch_wizard import (
ec2_securitygroup_from_launch_wizard,
)
check = ec2_securitygroup_from_launch_wizard()
result = check.execute()
# One default sg per region + created one
assert len(result) == 4
# Search changed sg
for sg in result:
if sg.resource_id == sg_id:
assert sg.status == "FAIL"
assert search(
"was created using the EC2 Launch Wizard",
sg.status_extended,
)
@mock_ec2
def test_ec2_compliant_default_sg(self):
# Create EC2 Mocked Resources
ec2_client = client("ec2", region_name=AWS_REGION)
ec2_client.create_vpc(CidrBlock="10.0.0.0/16")
default_sg_id = ec2_client.describe_security_groups(GroupNames=["default"])[
"SecurityGroups"
][0]["GroupId"]
ec2 = resource("ec2", region_name=AWS_REGION)
ec2.create_instances(
ImageId=EXAMPLE_AMI_ID,
MinCount=1,
MaxCount=1,
SecurityGroupIds=[
default_sg_id,
],
)
from prowler.providers.aws.lib.audit_info.audit_info import current_audit_info
from prowler.providers.aws.services.ec2.ec2_service import EC2
current_audit_info.audited_partition = "aws"
current_audit_info.audited_regions = ["eu-west-1", "us-east-1"]
with mock.patch(
"prowler.providers.aws.services.ec2.ec2_securitygroup_from_launch_wizard.ec2_securitygroup_from_launch_wizard.ec2_client",
new=EC2(current_audit_info),
):
# Test Check
from prowler.providers.aws.services.ec2.ec2_securitygroup_from_launch_wizard.ec2_securitygroup_from_launch_wizard import (
ec2_securitygroup_from_launch_wizard,
)
check = ec2_securitygroup_from_launch_wizard()
result = check.execute()
# One default sg per region
assert len(result) == 3
# Search changed sg
for sg in result:
if sg.resource_id == default_sg_id:
assert sg.status == "PASS"
assert search(
"was not created using the EC2 Launch Wizard",
sg.status_extended,
)
|
[
"noreply@github.com"
] |
noreply@github.com
|
e00038dbf01aa55c77357fc630db17f81da0127f
|
c1125e2835829c6ed0f06d641142fb7b4e179132
|
/project/visualization.py
|
f6ccd049de7124b92023b4a14e589f24992f3dcc
|
[] |
no_license
|
chien-lung/DataScience_2018
|
b3b41e9cd9a5deaadd17e8507fcb0b80dd576022
|
dbb2a6ac9b695aafe6e2b9ba5a892387574b2514
|
refs/heads/master
| 2020-04-16T19:55:20.242523
| 2019-01-17T10:08:44
| 2019-01-17T10:08:44
| 165,879,051
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,104
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 8 21:35:09 2019
@author: Lung
"""
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.preprocessing import LabelEncoder, Imputer
from sklearn.model_selection import train_test_split
#filename = '2018matchdata.csv' #'2017matchdata.csv' #'2016matchdata.csv'
#data = pd.read_csv(filename)
data2016 = pd.read_csv('2016matchdata.csv')
data2017 = pd.read_csv('2017matchdata.csv')
data2018 = pd.read_csv('2018matchdata.csv')
data = pd.concat([data2016, data2017, data2018] ,sort=False)
diffset = set()
diffset = diffset.union(set(data.columns).difference(set(data2016)))
diffset = diffset.union(set(data.columns).difference(set(data2017)))
diffset = diffset.union(set(data.columns).difference(set(data2018)))
data = data.drop(diffset,axis=1)
data['ban1']=pd.Series(np.array(data['ban1'].fillna('NaN')), index=data.index)
data['ban2']=pd.Series(np.array(data['ban2'].fillna('NaN')), index=data.index)
data['ban3']=pd.Series(np.array(data['ban3'].fillna('NaN')), index=data.index)
data['herald']=pd.Series(np.array(data['herald'].fillna(0)), index=data.index)
'''
focus = ['result',
##'side','ban1','ban2','ban3',
###########for fig1#############
'k','d','a',
'kpm','okpm','ckpm',
'teamdragkills','oppdragkills',
'ft','firsttothreetowers','teamtowerkills','opptowerkills',
'fbaron','teambaronkills','oppbaronkills',
###########for fig2#############
#'dmgshare','dmgtochamps','dmgtochampsperminute',
#'wards','wpm','wardkills','wcpm',
#'totalgold','earnedgpm','goldspent','gspd',
#'minionkills','monsterkills','monsterkillsownjungle','monsterkillsenemyjungle',
###########for fig3#############
#'herald',
#'visiblewardclearrate','visionwardbuys','visionwards',
#'cspm','csat10','oppcsat10','csdat10',
#'goldat10','oppgoldat10','gdat10','goldat15','oppgoldat15','gdat15',
#'xpdat10','xpat10','oppxpat10'
]
'''
focus = ['teamtowerkills','earnedgpm','goldspent','gspd', #>0.7
'fbaron','k','a','kpm','teambaronkills', #>0.6
'teamdragkills','firsttothreetowers', #>0.5
'gdat15','goldat15','dmgtochampsperminute','totalgold','xpdat10','ft',#>0.3
#'goldat10','monsterkills','csdat10','dmgtochamps','cspm',#>0.2
#'herald','wcpm','xpat10','csat10',#>0.1
]
focus_data = data.loc[data['playerid'].isin([100,200])][focus]
df_corr = focus_data._get_numeric_data()
mask = np.zeros_like(df_corr.corr(), dtype=np.bool)
mask[np.triu_indices_from(mask)] = True
cmap = sns.diverging_palette(10, 150, as_cmap=True)
plt.figure(figsize = (15,10))
sns.heatmap(df_corr.corr(), cmap = cmap, annot = True, fmt = '.2f', mask = mask, square=True, linewidths=.5, center = 0)
plt.title('Correlations - win vs factors (all games)')
#plt.savefig('{}focus_1.jpg'.format(filename[0:4]))
plt.savefig('all_focus_1.jpg')
|
[
"e125313530@gmail.com"
] |
e125313530@gmail.com
|
e80a8d81e6392f1d7934470081943b1bf032f8fd
|
d53479a3a5efab85a065b4a7c08cb38b6246f0eb
|
/python-division.py
|
66b9ee6a6492eb7b5fa6987137dcbe09a4d4af61
|
[] |
no_license
|
Snehal6697/hackerrank
|
0242f927f630e652d6dcca901af8d8bd737b671f
|
c418fb24e08e5c57a1bd0d91f95ab2af32f01c64
|
refs/heads/master
| 2022-12-26T12:35:47.586007
| 2020-07-07T22:14:39
| 2020-07-07T22:14:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 193
|
py
|
#!/usr/bin/env python2.7
from __future__ import division
def main():
a = int(raw_input())
b = int(raw_input())
print a // b
print a / b
if __name__ == '__main__':
main()
|
[
"charles.wangkai@gmail.com"
] |
charles.wangkai@gmail.com
|
645502244d8b5bd04e4b89886304985eb4e75872
|
93f122d1cc40eaf65d5be2257103b0d216b3601e
|
/skripte_klassifikation/tfidf_embedding_vectorizer.py
|
329673725d5646a5964d615d8c53e660e8eaf9f5
|
[
"MIT"
] |
permissive
|
TimSchmittmann/ML2-Project
|
b1e4390e0f6d9ba3f86ad1f1dc58d3a2e2137255
|
7ad01366f2322018065efcae542ae75786b3c0ec
|
refs/heads/master
| 2022-08-17T11:50:38.013327
| 2019-01-31T19:09:44
| 2019-01-31T19:09:44
| 154,804,149
| 0
| 0
|
MIT
| 2022-06-21T21:40:18
| 2018-10-26T08:43:20
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,055
|
py
|
from sklearn.feature_extraction.text import TfidfVectorizer
from collections import defaultdict
import numpy as np
class TfidfEmbeddingVectorizer(object):
def __init__(self, word2vec):
self.word2vec = word2vec
self.word2weight = None
self.dim = len(next(iter(word2vec.values())))
def fit(self, X, y):
tfidf = TfidfVectorizer(analyzer=lambda x: x)
tfidf.fit(X)
# if a word was never seen - it must be at least as infrequent
# as any of the known words - so the default idf is the max of
# known idf's
max_idf = max(tfidf.idf_)
self.word2weight = defaultdict(
lambda: max_idf,
[(w, tfidf.idf_[i]) for w, i in tfidf.vocabulary_.items()])
return self
def transform(self, X):
return np.array([
np.mean([self.word2vec[w] * self.word2weight[w]
for w in words if w in self.word2vec] or
[np.zeros(self.dim)], axis=0)
for words in X
])
|
[
"tim.schmittmann@gmx.de"
] |
tim.schmittmann@gmx.de
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.