hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 248 | max_stars_repo_name stringlengths 5 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 248 | max_issues_repo_name stringlengths 5 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 248 | max_forks_repo_name stringlengths 5 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 5 2.06M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.03M | alphanum_fraction float64 0 1 | count_classes int64 0 1.6M | score_classes float64 0 1 | count_generators int64 0 651k | score_generators float64 0 1 | count_decorators int64 0 990k | score_decorators float64 0 1 | count_async_functions int64 0 235k | score_async_functions float64 0 1 | count_documentation int64 0 1.04M | score_documentation float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
d39bd950032d2e2a2529a4b93df171309d5e27e8 | 31 | py | Python | test03.py | akim0919/Feedback | 9d511e11931c82dd767ff1c80104268062d3338f | [
"MIT"
] | null | null | null | test03.py | akim0919/Feedback | 9d511e11931c82dd767ff1c80104268062d3338f | [
"MIT"
] | null | null | null | test03.py | akim0919/Feedback | 9d511e11931c82dd767ff1c80104268062d3338f | [
"MIT"
] | null | null | null | b = 10; c = 20;
a=b+c;
print(a) | 10.333333 | 15 | 0.483871 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
d39cc89da2fe3f755861ff009c6692cb7c53603a | 9,539 | py | Python | nessus_download_merge_and_upload.py | jfbethlehem/Scripts | 043a1713d6666950f9cd2bf9b67aa3408865cdb5 | [
"BSD-3-Clause"
] | null | null | null | nessus_download_merge_and_upload.py | jfbethlehem/Scripts | 043a1713d6666950f9cd2bf9b67aa3408865cdb5 | [
"BSD-3-Clause"
] | null | null | null | nessus_download_merge_and_upload.py | jfbethlehem/Scripts | 043a1713d6666950f9cd2bf9b67aa3408865cdb5 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# requires python3
#ensure destpath exists and url, merged_folder_id, username and password are correct
#redhat: scl enable rh-python34 -- python3 /root/nessus_download_merge_and_upload.py
#debian: python3 /root/nessus_download_merge_and_upload.py
import requests, json, sys, os, getpass, time, shutil, ssl
import xml.etree.ElementTree as etree
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
from socket import error as SocketError
import errno
#=========DEBUG=========
#import logging
#logging.basicConfig(level=logging.DEBUG)
#
#import http.client
#
#http.client.HTTPConnection.debuglevel = 1
#
#logging.basicConfig()
#logging.getLogger().setLevel(logging.DEBUG)
#requests_log = logging.getLogger("requests.packages.urllib3")
#requests_log.setLevel(logging.DEBUG)
#requests_log.propagate = True
#=========END DEBUG===========
url = 'https://host:8834'
verify = False
token = ''
username = 'admin'
password = 'xxxxxx'
destpath = '/var/log/nessusscans/'
merged_folder_id = 682
def build_url(resource):
return '{0}{1}'.format(url, resource)
def connect(method, resource, data=None):
headers = {'X-Cookie': 'token={0}'.format(token), 'content-type': 'application/json'}
data = json.dumps(data)
if method == 'POST':
r = requests.post(build_url(resource), data=data, headers=headers, verify=verify)
elif method == 'PUT':
r = requests.put(build_url(resource), data=data, headers=headers, verify=verify)
elif method == 'DELETE':
r = requests.delete(build_url(resource), data=data, headers=headers, verify=verify)
return
else:
r = requests.get(build_url(resource), params=data, headers=headers, verify=verify)
if r.status_code != 200:
e = r.json()
print('Connect: Error: {0}'.format(e['error']))
sys.exit()
if 'download' in resource:
return r.content
else:
return r.json()
def login(usr, pwd):
login = {'username': usr, 'password': pwd}
data = connect('POST', '/session', data=login)
return data['token']
def logout():
connect('DELETE', '/session')
def list_scan():
data = connect('GET', '/scans')
return data
def count_scan(scans, folder_id):
count = 0
for scan in scans:
if scan['folder_id']==folder_id: count=count+1
return count
def print_scans(data):
for folder in data['folders']:
print("\\{0} - ({1})\\".format(folder['name'], count_scan(data['scans'], folder['id'])))
for scan in data['scans']:
if scan['folder_id']==folder['id']:
print("\t\"{0}\" - uuid: {1}".format(scan['name'].encode('utf-8'), scan['uuid']))
def export_status(scan_id, file_id):
data = connect('GET', '/scans/{0}/export/{1}/status'.format(scan_id, file_id))
return data['status'] == 'ready'
def get_folder_id(serch_folder_name, data):
folder_id = 0;
for folder in data['folders']:
if folder['name']==serch_folder_name:
folder_id = folder['id']
break
return folder_id
def export_folder(folder_name, data):
if folder_name == 'All' or folder_name == 'all':
for scan in data['scans']:
file_id = export(scan['id'])
download(scan['name'], scan['id'], file_id,os.path.join(os.getcwd(),destpath))
else:
folder_id = get_folder_id(folder_name,data)
if count_scan(data['scans'], folder_id)==0:
print("This folder does not contain reports")
return
if folder_id!=0:
for scan in data['scans']:
if scan['folder_id'] == folder_id:
file_id = export(scan['id'])
download(scan['name'], scan['id'], file_id, os.path.join(os.getcwd(),destpath))
else:
print("No such folder...")
def export(scan_id):
data = {'format': 'nessus'}
data = connect('POST', '/scans/{0}/export'.format(scan_id), data=data)
file_id = data['file']
while export_status(scan_id, file_id) is False:
time.sleep(5)
return file_id
def download(report_name, scan_id, file_id, save_path):
if not(os.path.exists(save_path)): os.mkdir(save_path)
data = connect('GET', '/scans/{0}/export/{1}/download'.format(scan_id, file_id))
file_name = 'nessus_{0}_{1}.nessus'.format(report_name.encode('utf-8'), file_id)
file_name = file_name.replace(' ', '_')
file_name = file_name.replace("\'", "")
print('Saving scan results to {0}'.format(file_name))
with open(os.path.join(save_path,file_name), 'wb') as f:
f.write(data)
donefile = '{0}_done'.format(os.path.join(save_path,file_name))
print('Data saved to {0}, writing {1}'.format(file_name, donefile))
with open(donefile, 'wb') as fd:
fd.write(bytes('', 'UTF-8'))
print('Done-file written')
def merge():
print('waiting for 60 seconds before merging and uploading.\n');
for i in range(0,60):
time.sleep(1)
print('.', end='',flush=True)
print('\nDone waiting.')
first = 1
for fileName in os.listdir(destpath):
if ".nessus_processed" in fileName:
print(":: Parsing", fileName)
if first:
mainTree = etree.parse('{0}/{1}'.format(destpath,fileName))
report = mainTree.find('Report')
report.attrib['name'] = 'Merged Report'
first = 0
else:
tree = etree.parse('{0}/{1}'.format(destpath,fileName))
for element in tree.findall('.//ReportHost'):
report.append(element)
print(":: => done.")
if "nss_report" in os.listdir(destpath):
shutil.rmtree('{0}/nss_report'.format(destpath))
os.mkdir('{0}/nss_report'.format(destpath))
mainTree.write('{0}/nss_report/report.nessus_merged'.format(destpath), encoding="utf-8", xml_declaration=True)
def upload(upload_file, count=0):
"""
File uploads don't fit easily into the connect method so build the request
here instead.
"""
try:
params = {'no_enc': 0}
headers = {'X-Cookie': 'token={0}'.format(token)}
filename = os.path.basename(upload_file)
files = {'Filename': (filename, filename),
'Filedata': (filename, open(upload_file, 'r'))}
print('Uploading file now.')
r = requests.post(build_url('/file/upload'), params=params, files=files,
headers=headers, verify=verify)
print('done')
resp = r.json()
print('{0} {1} {2}'.format(count, resp['fileuploaded'], r.status_code))
if r.status_code != 200:
print('Upload: Error: {0}'.format(resp['error']))
if count < 5:
count = count + 1
print('ErrNot200: Retrying upload ({0}/5)'.format(count))
time.sleep(5)
return upload(upload_file, count)
else:
print('Upload failed too often. Aborting.')
sys.exit
return resp['fileuploaded']
except SocketError as e:
if count < 5:
count = count + 1
print('SocketErr: Retrying upload ({0}/5) {1}'.format(count, e))
time.sleep(5)
return upload(upload_file, count)
else:
print('Upload failed too often. Aborting.')
sys.exit
def import_scan(filename):
im_file = {'file': filename, 'folder_id': merged_folder_id}
print('Importing uploaded report {0} into Nessus'.format(filename))
data = connect('POST', '/scans/import', data=im_file)
print('Done')
scan_name = data['scan']['name']
print('Successfully imported the scan {0}.'.format(scan_name))
for the_file in os.listdir(destpath):
file_path = os.path.join(destpath, the_file)
if os.path.isfile(file_path):
print("Deleting {0}".format(file_path))
os.unlink(file_path)
print("Logging in...")
token = login(username, password)
print("List of reports...")
rep_list = list_scan()
print_scans(rep_list)
print("Exporting reports...")
export_folder('scans', rep_list)
merge()
#fn = upload('{0}/nss_report/report.nessus_merged'.format(destpath))
fn = upload(os.path.join(destpath, 'nss_report/report.nessus_merged'))
if fn != None:
import_scan(fn)
logout()
| 38.46371 | 119 | 0.542824 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,447 | 0.256526 |
d39cdaa5b37326dbdc6348bf90bcf9d2d3630188 | 3,162 | py | Python | examples/generate_index.py | WilliamIX/BioCRNPyler | 737e87fc99510071bb4b1b6141b2043243c25673 | [
"BSD-3-Clause"
] | 2 | 2022-01-12T19:56:25.000Z | 2022-03-07T17:03:46.000Z | examples/generate_index.py | WilliamIX/BioCRNPyler | 737e87fc99510071bb4b1b6141b2043243c25673 | [
"BSD-3-Clause"
] | 41 | 2018-10-13T20:31:49.000Z | 2019-08-14T14:59:55.000Z | examples/generate_index.py | BuildACell/bioCRNpyler | 737e87fc99510071bb4b1b6141b2043243c25673 | [
"BSD-3-Clause"
] | 2 | 2018-10-19T23:46:56.000Z | 2018-11-19T19:31:08.000Z | import sys
import inspect
import biocrnpyler
from os import listdir
from os.path import isfile
from os.path import join
# Get lists of bioCRNpyler objects of different types
species = [
(n, o) for (n, o) in inspect.getmembers(sys.modules["biocrnpyler"])
if inspect.isclass(o) and issubclass(o, biocrnpyler.Species)
]
propensities = [
(n, o) for (n, o) in inspect.getmembers(sys.modules["biocrnpyler"])
if inspect.isclass(o) and issubclass(o, biocrnpyler.Propensity)
]
components = [
(n, o) for (n, o) in inspect.getmembers(sys.modules["biocrnpyler"])
if inspect.isclass(o) and issubclass(o, biocrnpyler.Component)
]
mechanisms = [
(n, o) for (n, o) in inspect.getmembers(sys.modules["biocrnpyler"])
if inspect.isclass(o) and issubclass(o, biocrnpyler.Mechanism)
]
mixtures = [
(n, o) for (n, o) in inspect.getmembers(sys.modules["biocrnpyler"])
if inspect.isclass(o) and issubclass(o, biocrnpyler.Mixture)
]
core_objs = species+propensities+components+mechanisms+mixtures
# Find miscellanious objects
other_objs = []
for (n, o) in inspect.getmembers(sys.modules["biocrnpyler"]):
if inspect.isclass(o) and (n, o) not in core_objs and "biocrn" in str(o):
other_objs.append((n, o))
all_objs = core_objs + other_objs
# dictionary stores the first .ipynb the object appears in
first_used = {c[0]: None for c in all_objs}
# paths to search through
paths = [".", "Specialized Tutorials"]
for path in paths:
# find .ipynb files
ipynb_files = [
f for f in listdir(path)
if isfile(join(path, f)) and f.split(".")[-1] == "ipynb"
]
# iterate through files in path
for fname in ipynb_files:
f = open(join(path, fname))
for line in f:
# cross references with biocrnpyler classes
for c in first_used:
if c in line and first_used[c] is None:
if path == ".":
first_used[c] = fname
else:
first_used[c] = path+"/"+fname
f.close()
# Write text
txt = "BioCRNpyler Class\tObject Type\tExample Notebook\n"
# Keep track of which object names have been added to the text
written = {}
# Iterate through different object types
for n, o in species:
if first_used[n] is not None:
txt += f"{n}\tSpecies\t{first_used[n]}\n"
written[n] = True
for n, o in propensities:
if first_used[n] is not None:
txt += f"{n}\tPropensity\t{first_used[n]}\n"
written[n] = True
for n, o in components:
if first_used[n] is not None:
txt += f"{n}\tComponent\t{first_used[n]}\n"
written[n] = True
for n, o in mechanisms:
if first_used[n] is not None:
txt += f"{n}\tMechanism\t{first_used[n]}\n"
written[n] = True
for n, o in mixtures:
if first_used[n] is not None:
txt += f"{n}\tMixture\t{first_used[n]}\n"
written[n] = True
for n in first_used:
if n not in written and first_used[n] is not None:
txt += f"{n}\tOther\t{first_used[n]}\n"
written[n] = True
# Write the file
f = open("0. Tutorial Index.txt", 'w')
f.write(txt)
f.close()
| 29.009174 | 77 | 0.63346 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 802 | 0.253637 |
d39d6f40bb4f840201871e9deed72b467b9e58e6 | 3,998 | py | Python | src/datamgr/datamanager/collection/common/process_nodes/datamodel.py | Chromico/bk-base | be822d9bbee544a958bed4831348185a75604791 | [
"MIT"
] | 84 | 2021-06-30T06:20:23.000Z | 2022-03-22T03:05:49.000Z | src/datamgr/datamanager/collection/common/process_nodes/datamodel.py | Chromico/bk-base | be822d9bbee544a958bed4831348185a75604791 | [
"MIT"
] | 7 | 2021-06-30T06:21:16.000Z | 2022-03-29T07:36:13.000Z | src/datamgr/datamanager/collection/common/process_nodes/datamodel.py | Chromico/bk-base | be822d9bbee544a958bed4831348185a75604791 | [
"MIT"
] | 40 | 2021-06-30T06:21:26.000Z | 2022-03-29T12:42:26.000Z | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making BK-BASE 蓝鲸基础平台 available.
Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
BK-BASE 蓝鲸基础平台 is licensed under the MIT License.
License for BK-BASE 蓝鲸基础平台:
--------------------------------------------------------------------
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import logging
from api import dataflow_api, datamanage_api, meta_api
from .base import ProcessNode
logger = logging.getLogger()
class DataModelNode(ProcessNode):
"""
数据模型构建节点
"""
def build(self):
model_config = self.params_template.content
model_name = model_config["model_name"]
project_id = model_config["project_id"]
model_info = self.get_model_info(project_id, model_name)
if model_info is not None:
logger.info(
f"[DataModelNode] DataModel({project_id}:{model_name}) has exist ...."
)
model_id = model_info["model_id"]
if model_info["latest_version"] is None:
logger.info("[DataModelNode] No latest_version, re-publish datamodel.")
datamanage_api.datamodels.release(
{"model_id": model_id, "version_log": "init"}, raise_exception=True
)
return {"model_id": model_id}
data = datamanage_api.datamodels.import_model(
model_config, raise_exception=True
).data
model_id = data["model_id"]
return {"model_id": model_id}
@staticmethod
def get_model_info(project_id, model_name):
data = datamanage_api.datamodels.list(
{"project_id": project_id, "model_name": model_name}, raise_exception=True
).data
if len(data) > 0:
return data[0]
return None
class DataModelInstNode(ProcessNode):
"""
数据模型应用节点
"""
def build(self):
inst_config = self.params_template.content
bk_biz_id = inst_config["bk_biz_id"]
table_name = inst_config["main_table"]["table_name"]
result_table_id = f"{bk_biz_id}_{table_name}"
result_table_info = self.get_result_table_info(result_table_id)
if result_table_info is not None:
logger.info(
f"[DataModelInstNode] Instance({result_table_id}) has exist ...."
)
return {"result_table_id": result_table_id}
data = datamanage_api.generate_datamodel_instance(
inst_config, raise_exception=True
).data
flow_id = data["flow_id"]
dataflow_api.flows.start({"flow_id": flow_id}, raise_exception=True)
return {"result_table_id": result_table_id}
@staticmethod
def get_result_table_info(result_table_id):
data = meta_api.result_tables.retrieve(
{"result_table_id": result_table_id}, raise_exception=True
).data
if "result_table_id" in data:
return data
return None
| 37.018519 | 111 | 0.669085 | 2,523 | 0.620512 | 0 | 0 | 562 | 0.138219 | 0 | 0 | 1,957 | 0.481308 |
d3a01df5042d3b5b926bb9ab84e5cc8b7a1afdf6 | 1,304 | py | Python | tilescraper.py | azaroth42/iiif-harvester | 42202bb2edfbaceab594755b26ee75a81baa7212 | [
"Apache-2.0"
] | 2 | 2015-08-14T07:36:33.000Z | 2019-03-18T00:10:02.000Z | tilescraper.py | azaroth42/iiif-harvester | 42202bb2edfbaceab594755b26ee75a81baa7212 | [
"Apache-2.0"
] | null | null | null | tilescraper.py | azaroth42/iiif-harvester | 42202bb2edfbaceab594755b26ee75a81baa7212 | [
"Apache-2.0"
] | null | null | null | from PIL import Image
import json, StringIO, requests
import time
import robotparser
import re
import sys
host = "http://dlss-dev-azaroth.stanford.edu/"
service = host + "services/iiif/f1rc/"
resp = requests.get(service + "info.json")
js = json.loads(resp.text)
h = js['height']
w = js['width']
img = Image.new("RGB", (w,h), "white")
## Respect tile dimensions of server
tilesize = 1024
if js.has_key('tiles'):
tilesize = js['tiles'][0]['width']
## Introduce baseline crawl delay
delay = 1
## Parse robots.txt
resp = requests.get(host + "/robots.txt")
if resp.status_code == 200:
parser = robotparser.RobotFileParser()
parser.parse(resp.text)
okay = parser.can_fetch("*", service)
if not okay:
print "Blocked by robots.txt"
sys.exit()
# No support for Crawl-delay extension ... just search
cd = re.compile("Crawl-delay: ([0-9]+)")
m = cd.search(resp.text)
if m:
delay = int(m.groups()[0])
for x in range(w/tilesize+1):
for y in range(h/tilesize+1):
region = "%s,%s,%s,%s" % (x*tilesize, y*tilesize, tilesize, tilesize)
tileresp = requests.get(service + ("/%s/full/0/default.jpg" % region))
tile = Image.open(StringIO.StringIO(tileresp.content))
img.paste(tile, (x*tilesize,y*tilesize))
sys.stdout.write('.')
sys.stdout.flush()
time.sleep(delay)
img.save("full.jpg")
| 25.076923 | 72 | 0.680982 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 373 | 0.286043 |
d3a16d8c155d791b21ae38f094534bf735e04c1d | 3,353 | py | Python | lab09/fusion_gui.py | chinesefirewall/Robotics | 4afec3e12f88b0a92cf870ec62787f8a0682a38a | [
"MIT"
] | 1 | 2020-09-11T12:46:22.000Z | 2020-09-11T12:46:22.000Z | lab09/fusion_gui.py | chinesefirewall/Robotics | 4afec3e12f88b0a92cf870ec62787f8a0682a38a | [
"MIT"
] | null | null | null | lab09/fusion_gui.py | chinesefirewall/Robotics | 4afec3e12f88b0a92cf870ec62787f8a0682a38a | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
This file imports your code from localization_logic.py and uses class Visualisation from visualisation.py.
Additionally, it imports code from sensor_fusion.py file.
You can change the code at your own risk!
"""
import easygopigo3 as go
import signal
import pyqtgraph as pg
import cv2
import threading
import visualisation
import read_sensors as sensors
import localization_logic as loc
import sys
import sensor_fusion as fusion
# Dictionary for holding positions
positions = fusion.positions # See sensor_fusion.py file for the positions dictionary
def slow_worker():
"""
Slower code
Low update rate is suitable for slow processes, such as image processing, displaying data to graph, etc;
"""
global positions
ret, frame = cap.read()
# Get the blob size and convert it to distance from the wall
keypoints = loc.detect_blobs(frame)
blob_size = loc.get_blob_size(keypoints)
# Save this distance to the positions dictionary
positions['current_cam'] = loc.get_distance_with_cam(blob_size)
# Call the callback function on new camera measurement from sensor_fusion module
fusion.on_camera_measurement(positions['current_cam'])
# Plot the positions and velocities
visual.draw(positions['current_us'], positions['current_enc'], positions['current_cam'],
positions['current_moving_avg_us'], positions['current_complementary'], positions['current_kalman'],
fusion.velocities.velocities['us'], fusion.velocities.velocities['enc'], fusion.velocities.velocities['cam'],
fusion.velocities.velocities['moving_avg_us'], fusion.velocities.velocities['complementary'], fusion.velocities.velocities['kalman'],
fusion.camera_gaussian, fusion.encoder_diff_gaussian, fusion.kalman_filter.filtered_result)
def signal_handler(sig, frame):
"""
This function will be called when CTRL+C is pressed
"""
close('\nYou pressed Ctrl+C! Closing the program nicely :)')
def close(message=""):
"""
Fusion visualisation specific cleanup function
"""
global running, ser, robot, timer
print(message)
running = False
robot.stop()
if ser.is_open:
ser.close()
timer.stop()
if fast_thread.is_alive:
try:
fast_thread.join()
except:
pass
sys.exit(0)
if __name__ == "__main__":
# Register a callback for CTRL+C
signal.signal(signal.SIGINT, signal_handler)
running, ser = sensors.initialize_serial('/dev/ttyUSB0')
robot = go.EasyGoPiGo3()
robot.set_speed(60)
# Open the camera
cap = cv2.VideoCapture(0)
# Create timer
timer = pg.QtCore.QTimer()
# Initialize visualization logic
visual = visualisation.initialize_visualisation(fusion.TASK, close)
# Create fast_worker in a separate thread
fast_thread = threading.Thread(
target=loc.fast_worker,
args=(running,
robot,
positions,
ser,
close)
)
fast_thread.daemon = True
fast_thread.start()
# Connecting slow_worker to timer, it will be executed with certain interval
timer.timeout.connect(slow_worker)
# Start timer with interval 100 msec
timer.start(100)
# Start the visualisation app
visual.run()
close()
| 29.672566 | 149 | 0.698479 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,371 | 0.408888 |
d3a1a2d7710ed6f28bb8bbf86045de75345d5240 | 348 | py | Python | api/mira/api_config.py | tnc-ca-geo/animl-ml | 95aeb1e99fddf7199692144ef3425340d6b8dc3c | [
"MIT"
] | 1 | 2020-03-28T02:10:25.000Z | 2020-03-28T02:10:25.000Z | api/mira/api_config.py | tnc-ca-geo/animl-ml | 95aeb1e99fddf7199692144ef3425340d6b8dc3c | [
"MIT"
] | 46 | 2020-03-18T22:44:30.000Z | 2022-03-12T00:51:44.000Z | api/mira/api_config.py | tnc-ca-geo/animl-ml | 95aeb1e99fddf7199692144ef3425340d6b8dc3c | [
"MIT"
] | null | null | null | """
MIRA API config
"""
MODELS = [
{
"endpoint_name": "mira-large",
"classes": ["fox", "skunk", "empty"]
},
{
"endpoint_name": "mira-small",
"classes": ["rodent", "empty"]
}
]
HEADERS = {
"Access-Control-Allow-Origin": "*",
"Access-Control-Allow-Headers": "Content-Type",
"Access-Control-Allow-Methods": "POST"
} | 17.4 | 51 | 0.566092 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 241 | 0.692529 |
d3a1d8fc62c8a6f30f970e9bb73197489ee49062 | 702 | py | Python | examples/gpio.py | nzsmith1/CapableRobot_USBHub_Driver | 1ec323c485be25f122ace7be20fccba39cdec517 | [
"MIT"
] | 16 | 2019-07-01T23:47:22.000Z | 2022-02-14T21:16:33.000Z | examples/gpio.py | d-c-d/CapableRobot_USBHub_Driver | 27579ac028bc2e71ce94983c7183d18fc82422a4 | [
"MIT"
] | 2 | 2020-01-08T08:30:39.000Z | 2022-02-23T00:49:09.000Z | examples/gpio.py | d-c-d/CapableRobot_USBHub_Driver | 27579ac028bc2e71ce94983c7183d18fc82422a4 | [
"MIT"
] | 6 | 2020-01-07T15:37:23.000Z | 2022-02-07T08:25:36.000Z | import os, sys, inspect, logging, time
lib_folder = os.path.join(os.path.split(inspect.getfile( inspect.currentframe() ))[0], '..')
lib_load = os.path.realpath(os.path.abspath(lib_folder))
if lib_load not in sys.path:
sys.path.insert(0, lib_load)
import capablerobot_usbhub
hub = capablerobot_usbhub.USBHub()
## Input enabled here on the output so that reading the output's current state works
hub.gpio.configure(ios=[0], output=True, input=True)
hub.gpio.configure(ios=[1], input=True, pull_down=True)
while True:
hub.gpio.io0 = True
print("IO {} {}".format(*hub.gpio.io))
time.sleep(1)
hub.gpio.io0 = False
print("IO {} {}".format(*hub.gpio.io))
time.sleep(1) | 27 | 92 | 0.692308 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 108 | 0.153846 |
d3a22369e4aa53efec521f3dd343a0ada49809f4 | 3,545 | py | Python | AdventOfCode2019/day05.py | Matematik411/Advent-of-Code-Practice | f556ae8b84526368184f72a811949ec1fd4b686e | [
"MIT"
] | null | null | null | AdventOfCode2019/day05.py | Matematik411/Advent-of-Code-Practice | f556ae8b84526368184f72a811949ec1fd4b686e | [
"MIT"
] | null | null | null | AdventOfCode2019/day05.py | Matematik411/Advent-of-Code-Practice | f556ae8b84526368184f72a811949ec1fd4b686e | [
"MIT"
] | null | null | null | class Int_code:
def __init__(self, s, inputs):
memory = {}
nrs = map(int, s.split(","))
for i, x in enumerate(nrs):
memory[i] = x
self.memory = memory
self.inputs = inputs
def set(self, i, x):
self.memory[i] = x
def one(self, a, b, c, modes):
if modes % 10 == 0:
a = self.memory[a]
modes //= 10
if modes % 10 == 0:
b = self.memory[b]
self.memory[c] = a + b
def two(self, a, b, c, modes):
if modes % 10 == 0:
a = self.memory[a]
modes //= 10
if modes % 10 == 0:
b = self.memory[b]
self.memory[c] = a * b
def three(self, a, modes):
x = self.inputs.pop(0)
self.memory[a] = x
def four(self, a, modes):
if modes % 10 == 0:
a = self.memory[a]
print(a)
def five(self, a, b, modes):
if modes % 10 == 0:
a = self.memory[a]
modes //= 10
if modes % 10 == 0:
b = self.memory[b]
if a != 0:
return (True, b)
else:
return (False, 0)
def six(self, a, b, modes):
if modes % 10 == 0:
a = self.memory[a]
modes //= 10
if modes % 10 == 0:
b = self.memory[b]
if a == 0:
return (True, b)
else:
return (False, 0)
def seven(self, a, b, c, modes):
if modes % 10 == 0:
a = self.memory[a]
modes //= 10
if modes % 10 == 0:
b = self.memory[b]
self.memory[c] = 1 if (a < b) else 0
def eight(self, a, b, c, modes):
if modes % 10 == 0:
a = self.memory[a]
modes //= 10
if modes % 10 == 0:
b = self.memory[b]
self.memory[c] = 1 if (a == b) else 0
def run(self, start):
i = start
while True:
c = self.memory[i]
modes = c // 100
c %= 100
# print(i, self.memory[i])
if c == 99:
break
elif c == 1:
self.one(self.memory[i+1], self.memory[i+2], self.memory[i+3], modes)
i += 4
elif c == 2:
self.two(self.memory[i+1], self.memory[i+2], self.memory[i+3], modes)
i += 4
elif c == 3:
self.three(self.memory[i+1], modes)
i += 2
elif c == 4:
self.four(self.memory[i+1], modes)
i += 2
elif c == 5:
sol = self.five(self.memory[i+1], self.memory[i+2], modes)
if sol[0]:
i = sol[1]
else:
i += 3
elif c == 6:
sol = self.six(self.memory[i+1], self.memory[i+2], modes)
if sol[0]:
i = sol[1]
else:
i += 3
elif c == 7:
self.seven(self.memory[i+1], self.memory[i+2], self.memory[i+3], modes)
i += 4
elif c == 8:
self.eight(self.memory[i+1], self.memory[i+2], self.memory[i+3], modes)
i += 4
return self.memory[0]
start = input()
# part one
inputs_1 = [1]
computer = Int_code(start, inputs_1)
computer.run(0)
# part two
inputs_2 = [5]
computer = Int_code(start, inputs_2)
computer.run(0)
# # test
# inputs = [3]
# computer = Int_code(start, inputs)
# computer.run(0)
| 27.695313 | 87 | 0.414386 | 3,287 | 0.927221 | 0 | 0 | 0 | 0 | 0 | 0 | 124 | 0.034979 |
d3a2ec6e173d0fb72a68a3b63d9761d99c77b5bc | 175 | py | Python | src/misc/exceptions.py | KirtusJ/BirdBot | 4440364caefa6ec9acf1bc7cf38605b1d90de20e | [
"MIT"
] | null | null | null | src/misc/exceptions.py | KirtusJ/BirdBot | 4440364caefa6ec9acf1bc7cf38605b1d90de20e | [
"MIT"
] | null | null | null | src/misc/exceptions.py | KirtusJ/BirdBot | 4440364caefa6ec9acf1bc7cf38605b1d90de20e | [
"MIT"
] | null | null | null | from discord.ext import commands
class NotOwner(commands.CheckFailure):
pass
class NotModerator(commands.CheckFailure):
pass
class Blacklisted(commands.CheckFailure):
pass | 21.875 | 42 | 0.834286 | 139 | 0.794286 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
d3a3634cf150b07141931e314ae22846c615fab5 | 1,012 | py | Python | setup.py | HurricaneLabs/python-pycinga | 52b04367c3948cac11ee77d466df6222d0d30034 | [
"MIT"
] | 4 | 2018-02-08T21:19:04.000Z | 2020-12-17T23:31:57.000Z | setup.py | HurricaneLabs/python-pycinga | 52b04367c3948cac11ee77d466df6222d0d30034 | [
"MIT"
] | 3 | 2018-03-05T17:15:24.000Z | 2019-06-28T14:17:55.000Z | setup.py | HurricaneLabs/python-pycinga | 52b04367c3948cac11ee77d466df6222d0d30034 | [
"MIT"
] | 2 | 2019-08-25T17:14:27.000Z | 2020-05-31T09:47:27.000Z | from setuptools import setup
# Get the long description by reading the README
try:
readme_content = open("README.rst").read()
except Exception as e:
readme_content = ""
# Create the actual setup method
setup(
name="pycinga",
version="1.0.0",
description="Python library to write Icinga plugins.",
long_description=readme_content,
author="Steve McMaster",
author_email="mcmaster@hurricanelabs.com",
maintainer="Steve McMaster",
maintainer_email="mcmaster@hurricanelabs.com",
url="https://github.com/hurricanelabs/python-pycinga",
license="MIT License",
keywords=["nagios", "pynagios", "icinga", "pycinga", "monitoring"],
packages=["pycinga"],
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: System Administrators",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Topic :: System :: Systems Administration"
]
)
| 31.625 | 71 | 0.667984 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 584 | 0.577075 |
d3a3af9d53824b3acab405765cd51062c38ff21a | 4,177 | py | Python | fluorelax/fluorelax.py | darianyang/fluorelax | 4ca816aa157f23c84eb4cc6085200668723d1426 | [
"BSD-3-Clause"
] | null | null | null | fluorelax/fluorelax.py | darianyang/fluorelax | 4ca816aa157f23c84eb4cc6085200668723d1426 | [
"BSD-3-Clause"
] | null | null | null | fluorelax/fluorelax.py | darianyang/fluorelax | 4ca816aa157f23c84eb4cc6085200668723d1426 | [
"BSD-3-Clause"
] | 1 | 2022-03-27T18:24:49.000Z | 2022-03-27T18:24:49.000Z | """
Main call.
TODO:
- parallize the mda processing portion? (dask)
"""
import numpy as np
import matplotlib.pyplot as plt
import MDAnalysis as mda
from command_line import create_cmd_arguments, handle_command_line
from calc_relax import Calc_19F_Relaxation
from calc_fh_dists import Calc_FH_Dists
from plot_relax import Plot_Relaxation
# if python file is being used
if __name__ == '__main__':
# args_list to save time for now (TODO)
magnet = 14.1 # Tesla (600 MHz of 1H+)
tc = 8.2e-9 # 8.2ns for CypA, tc in sec
"""
Command line
"""
# Create command line arguments with argparse
argument_parser = create_cmd_arguments()
# Retrieve list of args
args = handle_command_line(argument_parser)
# TODO: hack for now, later put as seperate args?
# CSA tensors for 4F-Trp
if args.system == "w4f":
sgm11 = 11.2
sgm22 = -48.3
sgm33 = -112.8
elif args.system == "w5f":
sgm11 = 4.8
sgm22 = -60.5
sgm33 = -86.1
elif args.system == "w6f":
sgm11 = 12.9
sgm22 = -51.2
sgm33 = -91.6
elif args.system == "w7f":
sgm11 = 4.6
sgm22 = -48.3
sgm33 = -123.3
"""
Load trajectory or pdb data and calc all F-H distances.
# TODO: do for each frame, also test with water
"""
# TODO: for big trajectories, can't load in_memory, must stream it but this can be slow
traj = mda.Universe(args.parm, args.crd, in_memory=True, in_memory_step=args.step_size)
fh_dist_base = Calc_FH_Dists(traj, dist=3).run()
"""
For each distance value, calculate the R1 and R2 value.
"""
# TODO: update to ndarrays, maybe make into function, seperate script?
# test speed and optimize
# TODO: make this able to take multiple files and find stdev, maybe a seperate proc function
# array of size frames x 3 columns (frame, avg R1, avg R2) # TODO: add stdev?
r1_r2 = np.zeros(shape=(len(fh_dist_base.results[:,1:]), 3))
r1_r2[:, 0] = fh_dist_base.results[:,0]
# Here: calling each calc class seperately and only sum the dd contributions, csa is not dependent
# note this new implementation is alot slower... (compared to having just one calc_relax and averaging later)
# but not sure, didn't test the difference
for num, dists in enumerate(fh_dist_base.results[:,1:]):
calc_relax = Calc_19F_Relaxation(tc, magnet, sgm11, sgm22, sgm33)
r1_csa = calc_relax.calc_csa_r1()
r2_csa = calc_relax.calc_csa_r2()
# TODO: these are relatively small lists, may not need to change to ndarray
# but if I do, then I need to cut out the NaN or zero values before the np.mean step
r1_dd = 0
r2_dd = 0
for fh_dist in dists:
if fh_dist == 0:
continue # TODO: is there a better way to do this?
# instantiate the calc_relax class and then call individual class methods
calc_relax = Calc_19F_Relaxation(tc, magnet, sgm11, sgm22, sgm33, fh_dist)
# sum each dd contribution
r1_dd += calc_relax.calc_dd_r1()
r2_dd += calc_relax.calc_dd_r2()
# fill in col 1 (R1), col 2 (R2)
r1_r2[num, 1] = r1_dd + r1_csa
r1_r2[num, 2] = r2_dd + r2_csa
# test seperate values
print(r1_dd, r1_csa)
print(r2_dd, r2_csa)
"""
Save the frame, avg and stdev R1 and R2 data as a tsv?
"""
if args.output_file is not None:
np.savetxt(args.output_file, r1_r2, delimiter="\t")
"""
Plot the R1 and R2 data.
"""
# plt.plot(fh_dist_base.results[:,0], r1_r2[:,0])
# plt.plot(fh_dist_base.results[:,0], r1_r2[:,1])
plt.plot(r1_r2[:, 0], r1_r2[:, 1])
plt.plot(r1_r2[:, 0], r1_r2[:, 2])
print(f"R1-AVG={np.mean(r1_r2[:,1])}\nR2-AVG={np.mean(r1_r2[:,2])}")
#plt.hlines(1.99, xmin=0, xmax=fh_dist_base.results[-1,0]) # R1
#plt.hlines(109.1, xmin=0, xmax=fh_dist_base.results[-1,0]) # R2
plt.show()
# plotter class
# plotter = Plot_Relaxation(r1_r2, "dist")
# plotter.plot_r2()
# plt.show()
| 33.95935 | 113 | 0.620541 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,036 | 0.487431 |
d3a43004f9dd4f6906f4f5c48fff5899dc1d9936 | 6,404 | py | Python | neural/neural.py | pqn/neural | 505d8fb1c58868a7292c40caab4a22b577615886 | [
"BSD-3-Clause"
] | 2 | 2016-12-21T04:53:31.000Z | 2016-12-21T04:53:48.000Z | neural/neural.py | pqn/neural | 505d8fb1c58868a7292c40caab4a22b577615886 | [
"BSD-3-Clause"
] | null | null | null | neural/neural.py | pqn/neural | 505d8fb1c58868a7292c40caab4a22b577615886 | [
"BSD-3-Clause"
] | 1 | 2020-05-24T13:07:48.000Z | 2020-05-24T13:07:48.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import division
import numpy as np
from scipy.optimize import fmin_bfgs
class NeuralNetwork:
"""A simple neural network."""
def __init__(self, hidden_layers=(25,), reg_lambda=0, num_labels=2):
"""Instantiates the class."""
self.__hidden_layers = tuple(hidden_layers)
self.__lambda = reg_lambda
if num_labels > 2:
self.__num_labels = num_labels
else:
self.__num_labels = 1
def train(self, training_set, iterations=500):
"""Trains itself using the sequence data."""
if len(training_set) > 2:
self.__X = np.matrix([example[0] for example in training_set])
if self.__num_labels == 1:
self.__y = np.matrix([example[1] for example in training_set]).reshape((-1, 1))
else:
eye = np.eye(self.__num_labels)
self.__y = np.matrix([eye[example[1]] for example in training_set])
else:
self.__X = np.matrix(training_set[0])
if self.__num_labels == 1:
self.__y = np.matrix(training_set[1]).reshape((-1, 1))
else:
eye = np.eye(self.__num_labels)
self.__y = np.matrix([eye[index] for sublist in training_set[1] for index in sublist])
self.__m = self.__X.shape[0]
self.__input_layer_size = self.__X.shape[1]
self.__sizes = [self.__input_layer_size]
self.__sizes.extend(self.__hidden_layers)
self.__sizes.append(self.__num_labels)
initial_theta = []
for count in range(len(self.__sizes) - 1):
epsilon = np.sqrt(6) / np.sqrt(self.__sizes[count]+self.__sizes[count+1])
initial_theta.append(np.random.rand(self.__sizes[count+1],self.__sizes[count]+1)*2*epsilon-epsilon)
initial_theta = self.__unroll(initial_theta)
self.__thetas = self.__roll(fmin_bfgs(self.__cost_function, initial_theta, fprime=self.__cost_grad_function, maxiter=iterations))
def predict(self, X):
"""Returns predictions of input test cases."""
return self.__cost(self.__unroll(self.__thetas), 0, np.matrix(X))
def __cost_function(self, params):
"""Cost function used by fmin_bfgs."""
return self.__cost(params, 1, self.__X)
def __cost_grad_function(self, params):
"""Cost gradient used by fmin_bfgs."""
return self.__cost(params, 2, self.__X)
def __cost(self, params, phase, X):
"""Computes activation, cost function, and derivative."""
params = self.__roll(params)
a = np.concatenate((np.ones((X.shape[0], 1)), X), axis=1) # This is a1
calculated_a = [a] # a1 is at index 0, a_n is at index n-1
calculated_z = [0] # There is no z1, z_n is at index n-1
for i, theta in enumerate(params): # calculated_a now contains a1, a2, a3 if there was only one hidden layer (two theta matrices)
z = calculated_a[-1] * theta.transpose() # z_n = a_n-1 * Theta_n-1'
calculated_z.append(z) # Save the new z_n
a = self.sigmoid(z) # a_n = sigmoid(z_n)
if i != len(params) - 1: # Don't append extra ones for the output layer
a = np.concatenate((np.ones((a.shape[0], 1)), a), axis=1) # Append the extra column of ones for all other layers
calculated_a.append(a) # Save the new a
if phase == 0:
if self.__num_labels > 1:
return np.argmax(calculated_a[-1], axis=1)
return np.round(calculated_a[-1])
J = np.sum(-np.multiply(self.__y, np.log(calculated_a[-1]))-np.multiply(1-self.__y, np.log(1-calculated_a[-1])))/self.__m; # Calculate cost
if self.__lambda != 0: # If we're using regularization...
J += np.sum([np.sum(np.power(theta[:,1:], 2)) for theta in params])*self.__lambda/(2.0*self.__m) # ...add it from all theta matrices
if phase == 1:
return J
reversed_d = []
reversed_theta_grad = []
for i in range(len(params)): # For once per theta matrix...
if i == 0: # ...if it's the first one...
d = calculated_a[-1] - self.__y # ...initialize the error...
else: # ...otherwise d_n-1 = d_n * Theta_n-1[missing ones] .* sigmoid(z_n-1)
d = np.multiply(reversed_d[-1]*params[-i][:,1:], self.sigmoid_grad(calculated_z[-1-i])) # With i=1/1 hidden layer we're getting Theta2 at index -1, and z2 at index -2
reversed_d.append(d)
theta_grad = reversed_d[-1].transpose() * calculated_a[-i-2] / self.__m
if self.__lambda != 0:
theta_grad += np.concatenate((np.zeros((params[-1-i].shape[0], 1)), params[-1-i][:,1:]), axis=1) * self.__lambda / self.__m# regularization
reversed_theta_grad.append(theta_grad)
theta_grad = self.__unroll(reversed(reversed_theta_grad))
return theta_grad
def __roll(self, unrolled):
"""Converts parameter array back into matrices."""
rolled = []
index = 0
for count in range(len(self.__sizes) - 1):
in_size = self.__sizes[count]
out_size = self.__sizes[count+1]
theta_unrolled = np.matrix(unrolled[index:index+(in_size+1)*out_size])
theta_rolled = theta_unrolled.reshape((out_size, in_size+1))
rolled.append(theta_rolled)
index += (in_size + 1) * out_size
return rolled
def __unroll(self, rolled):
"""Converts parameter matrices into an array."""
return np.array(np.concatenate([matrix.flatten() for matrix in rolled], axis=1)).reshape(-1)
def sigmoid(self, z):
"""Sigmoid function to emulate neuron activation."""
return 1.0 / (1.0 + np.exp(-z))
def sigmoid_grad(self, z):
"""Gradient of sigmoid function."""
return np.multiply(self.sigmoid(z), 1-self.sigmoid(z))
def grad(self, params, epsilon=0.0001):
"""Used to check gradient estimation through slope approximation."""
grad = []
for x in range(len(params)):
temp = np.copy(params)
temp[x] += epsilon
temp2 = np.copy(params)
temp2[x] -= epsilon
grad.append((self.__cost_function(temp)-self.__cost_function(temp2))/(2*epsilon))
return np.array(grad) | 47.437037 | 182 | 0.601655 | 6,268 | 0.978763 | 0 | 0 | 0 | 0 | 0 | 0 | 1,277 | 0.199407 |
d3a54274149fad5dcc05653ba4572f66ebc4a288 | 4,734 | py | Python | tests/test_beacon_client.py | HikaruG/spectroscope | fa132044b95ca7e3a93d56f51ae80af743b9e8a3 | [
"Apache-2.0"
] | null | null | null | tests/test_beacon_client.py | HikaruG/spectroscope | fa132044b95ca7e3a93d56f51ae80af743b9e8a3 | [
"Apache-2.0"
] | 3 | 2021-07-26T10:03:12.000Z | 2021-07-26T11:48:47.000Z | tests/test_beacon_client.py | HikaruG/spectroscope | fa132044b95ca7e3a93d56f51ae80af743b9e8a3 | [
"Apache-2.0"
] | 1 | 2021-07-19T07:44:08.000Z | 2021-07-19T07:44:08.000Z | import unittest
from unittest.mock import Mock, call
from ethereumapis.v1alpha1 import beacon_chain_pb2, validator_pb2
from spectroscope.clients.beacon_client import BeaconChainStreamer
from spectroscope.model import ChainTimestamp, Event, ValidatorIdentity
from spectroscope.model.update import (
ValidatorBalanceUpdate,
ValidatorStatusUpdate,
UpdateBatch,
)
from spectroscope.module import ConfigOption, Module, Plugin, Subscriber
from typing import List, Type, Union
def TestModuleFactory(
module_type: Union[Type[Plugin], Type[Subscriber]],
init: Mock = Mock(),
consume: Mock = Mock(),
consumed_types: List[Event] = [],
):
class TestModule(module_type):
config_options: List[ConfigOption] = []
_consumed_types: List[Event] = consumed_types
def __init__(self, **kwargs):
init(**kwargs)
@classmethod
def register(cls, **kwargs):
return cls(**kwargs)
def consume(self, updates: List[Event]):
return consume(updates)
return TestModule
TestPluginFactory = lambda *a, **kw: TestModuleFactory(Plugin, *a, **kw)
TestSubscriberFactory = lambda *a, **kw: TestModuleFactory(Subscriber, *a, **kw)
class BeaconChainStreamerTest(unittest.TestCase):
def setUp(self):
self.stub_mock = Mock()
self.stub_mock.StreamValidatorsInfo.return_value = []
def tearDown(self):
pass
def _assert_validator_change_set(self, generator, validator_set):
self.assertEqual(
next(generator),
beacon_chain_pb2.ValidatorChangeSet(
action=beacon_chain_pb2.SET_VALIDATOR_KEYS,
public_keys=validator_set,
),
)
with self.assertRaises(StopIteration):
next(generator)
def test_generate_messages(self):
validator_set = [bytes.fromhex("a" * 96)]
bcs = BeaconChainStreamer(self.stub_mock, [])
bcs.add_validators(validator_set)
bcs.stream()
self.stub_mock.StreamValidatorsInfo.assert_called_once()
vcs_generator = self.stub_mock.StreamValidatorsInfo.call_args[0][0]
self._assert_validator_change_set(vcs_generator, validator_set)
def test_stream_messages_e2e(self):
consume_mock = Mock()
consume_mock.return_value = []
subscriber = TestSubscriberFactory(
consume=consume_mock,
consumed_types=[ValidatorStatusUpdate, ValidatorBalanceUpdate],
)
plugin = TestPluginFactory()
self.stub_mock.StreamValidatorsInfo.return_value = [
validator_pb2.ValidatorInfo(
public_key=bytes.fromhex("a" * 96),
index=60,
balance=300,
epoch=123,
status=2,
)
]
bcs = BeaconChainStreamer(self.stub_mock, [(subscriber, {}), (plugin, {})])
bcs.stream()
consume_mock.assert_called_once_with(
UpdateBatch(
validator=ValidatorIdentity(idx=60, pubkey=bytes.fromhex("a" * 96)),
timestamp=ChainTimestamp(epoch=123, slot=0, timestamp=None),
updates=[
ValidatorStatusUpdate(status=2),
ValidatorBalanceUpdate(balance=300),
],
)
)
def test_subscriber_activation(self):
init_mock = Mock()
module_set = [(TestSubscriberFactory(init=init_mock), {})]
bcs = BeaconChainStreamer(self.stub_mock, module_set)
init_mock.assert_called_once()
def test_plugin_activation(self):
init_mock = Mock()
module_set = [(TestPluginFactory(init=init_mock), {})]
bcs = BeaconChainStreamer(self.stub_mock, module_set)
init_mock.assert_called_once()
def test_module_rejection(self):
module_set = [(Module, {})]
with self.assertRaises(TypeError):
bcs = BeaconChainStreamer(self.stub_mock, module_set)
def test_add_remove_validators(self):
validator_one = bytes.fromhex("a" * 96)
validator_two = bytes.fromhex("b" * 96)
bcs = BeaconChainStreamer(self.stub_mock, [])
bcs.add_validators([validator_one])
self.assertSetEqual(bcs.validator_set, set([validator_one]))
bcs.add_validators([validator_two])
self.assertSetEqual(bcs.validator_set, set([validator_one, validator_two]))
bcs.remove_validators([validator_one])
self.assertSetEqual(bcs.validator_set, set([validator_two]))
bcs.remove_validators([validator_two])
self.assertSetEqual(bcs.validator_set, set())
with self.assertRaises(KeyError):
bcs.remove_validators([validator_one])
| 34.554745 | 84 | 0.649556 | 3,889 | 0.821504 | 0 | 0 | 82 | 0.017322 | 0 | 0 | 15 | 0.003169 |
d3a5d6c9ee0959d8b6a152abcf1fe990458d7e86 | 1,519 | py | Python | src/box.py | ertugrullKara/GarbageDetector | 68e267e4036ef05c603c1193462b5230b6751857 | [
"MIT"
] | 1 | 2019-05-21T01:06:42.000Z | 2019-05-21T01:06:42.000Z | src/box.py | ertugrullKara/GarbageDetector | 68e267e4036ef05c603c1193462b5230b6751857 | [
"MIT"
] | 8 | 2020-01-28T21:41:58.000Z | 2022-03-11T23:17:50.000Z | src/box.py | ertugrullKara/GarbageDetector | 68e267e4036ef05c603c1193462b5230b6751857 | [
"MIT"
] | null | null | null | import numpy as np
class BoundBox:
"""
Adopted from https://github.com/thtrieu/darkflow/blob/master/darkflow/utils/box.py
"""
def __init__(self, obj_prob, probs=None, box_coord=[float() for i in range(4)]):
self.x, self.y = float(box_coord[0]), float(box_coord[1])
self.w, self.h = float(box_coord[2]), float(box_coord[3])
self.c = 0.
self.obj_prob = obj_prob
self.class_probs = None if probs is None else np.array(probs)
def get_score(self):
return max(self.class_probs)
def get_classindex(self):
return np.argmax(self.class_probs) # class_index = np.argmax(box.classes)
def get_coordinates(self):
return self.x, self.y, self.w, self.h
def overlap(x1, w1, x2, w2):
l1 = x1 - w1 / 2.
l2 = x2 - w2 / 2.
left = max(l1, l2)
r1 = x1 + w1 / 2.
r2 = x2 + w2 / 2.
right = min(r1, r2)
return right - left
def box_intersection(a, b):
w = overlap(a.x, a.w, b.x, b.w)
h = overlap(a.y, a.h, b.y, b.h)
if w < 0 or h < 0: return 0;
area = w * h
return area
def box_union(a, b):
i = box_intersection(a, b)
u = a.w * a.h + b.w * b.h - i
return u
def box_iou(a, b):
# Box intersect over union.
return box_intersection(a, b) / box_union(a, b)
def prob_compare(box):
return box.probs[box.class_num]
def prob_compare2(boxa, boxb):
if (boxa.pi < boxb.pi):
return 1
elif (boxa.pi == boxb.pi):
return 0
else:
return -1
| 23.015152 | 90 | 0.581962 | 718 | 0.472679 | 0 | 0 | 0 | 0 | 0 | 0 | 167 | 0.109941 |
d3a7986a5b2016d4c1b0a5d77ceeb0476e0f3297 | 904 | py | Python | cplusplus/level1_single_api/4_op_dev/1_custom_op/cmake/util/insert_op_info.py | Dedederek/samples | 31d99de20af2f7046556e0f48c4b789b99e422f8 | [
"Apache-2.0"
] | 5 | 2021-02-26T17:58:12.000Z | 2022-03-15T06:21:28.000Z | cplusplus/level1_single_api/4_op_dev/1_custom_op/cmake/util/insert_op_info.py | Dedederek/samples | 31d99de20af2f7046556e0f48c4b789b99e422f8 | [
"Apache-2.0"
] | null | null | null | cplusplus/level1_single_api/4_op_dev/1_custom_op/cmake/util/insert_op_info.py | Dedederek/samples | 31d99de20af2f7046556e0f48c4b789b99e422f8 | [
"Apache-2.0"
] | 5 | 2021-03-22T21:13:11.000Z | 2021-09-24T06:52:33.000Z | # -*- coding: utf-8 -*-
"""
Created on Feb 28 20:56:45 2020
Copyright (c) Huawei Technologies Co., Ltd. 2019. All rights reserved.
"""
import json
import os
import sys
if __name__ == '__main__':
if len(sys.argv) != 3:
print(sys.argv)
print('argv error, inert_op_info.py your_op_file lib_op_file')
exit(2)
with open(sys.argv[1], 'r') as load_f:
insert_op = json.load(load_f)
all_op = {}
if os.path.exists(sys.argv[2]):
if os.path.getsize(sys.argv[2]) != 0:
with open(sys.argv[2], 'r') as load_f:
all_op = json.load(load_f)
for k in insert_op.keys():
if k in all_op.keys():
print('replace op:[', k, '] success')
else:
print('insert op:[', k, '] success')
all_op[k] = insert_op[k]
with open(sys.argv[2], 'w') as f:
f.write(json.dumps(all_op, indent=4))
| 26.588235 | 70 | 0.559735 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 257 | 0.284292 |
d3a81b924210e6b5f2ecf0bea93f3ef9cc255120 | 16,982 | py | Python | tests/contour_extractors/test_utils.py | yamathcy/motif | 3f43568e59f0879fbab5ef278e9e687b7cac3dd6 | [
"MIT"
] | 21 | 2016-08-22T22:00:49.000Z | 2020-03-29T04:15:19.000Z | tests/contour_extractors/test_utils.py | yamathcy/motif | 3f43568e59f0879fbab5ef278e9e687b7cac3dd6 | [
"MIT"
] | 22 | 2016-08-28T01:07:08.000Z | 2018-02-07T14:38:26.000Z | tests/contour_extractors/test_utils.py | yamathcy/motif | 3f43568e59f0879fbab5ef278e9e687b7cac3dd6 | [
"MIT"
] | 3 | 2017-01-12T10:04:27.000Z | 2022-01-06T13:25:48.000Z | """Tests for motif/contour_extractors/utils.py
"""
import unittest
import numpy as np
from motif.contour_extractors import utils
class TestPeakStreamHelper(unittest.TestCase):
def setUp(self):
self.S = np.array([
[0, 0, 0],
[1, 0, 5],
[0, 0.002, 1],
[0.1, 0, 0],
[0, 0, 0]
])
self.times = np.array([0.0, 0.5, 1.0])
self.freqs = np.array([10., 100., 150., 200., 300.])
self.amp_thresh = 0.9
self.dev_thresh = 0.9
self.n_gap = 3.234
self.pitch_cont = 80
self.psh = utils.PeakStreamHelper(
self.S, self.times, self.freqs, self.amp_thresh, self.dev_thresh,
self.n_gap, self.pitch_cont
)
def test_S(self):
expected = np.array([
[0, 0, 0],
[1, 0, 5],
[0, 0.002, 1],
[0.1, 0, 0],
[0, 0, 0]
])
actual = self.psh.S
self.assertTrue(np.allclose(expected, actual))
def test_S_norm(self):
expected = np.array([
[0, 0, 0],
[1, 0, 1],
[0, 1.0, 0.2],
[0.1, 0, 0],
[0, 0, 0]
])
actual = self.psh.S_norm
self.assertTrue(np.allclose(expected, actual))
def test_times(self):
expected = np.array([0.0, 0.5, 1.0])
actual = self.psh.times
self.assertTrue(np.allclose(expected, actual))
def test_freqs(self):
expected = np.array([
0., 3986.31371386, 4688.26871473, 5186.31371386, 5888.26871473
])
actual = self.psh.freqs
self.assertTrue(np.allclose(expected, actual))
def test_amp_thresh(self):
expected = 0.9
actual = self.psh.amp_thresh
self.assertEqual(expected, actual)
def test_dev_thresh(self):
expected = 0.9
actual = self.psh.dev_thresh
self.assertEqual(expected, actual)
def test_n_gap(self):
expected = 3.234
actual = self.psh.n_gap
self.assertEqual(expected, actual)
def test_pitch_cont(self):
expected = 80
actual = self.psh.pitch_cont
self.assertEqual(expected, actual)
def test_n_peaks(self):
expected = 4
actual = self.psh.n_peaks
self.assertEqual(expected, actual)
def test_peak_index(self):
expected = np.array([0, 1, 2, 3])
actual = self.psh.peak_index
self.assertTrue(np.allclose(expected, actual))
def test_peak_time_index(self):
expected = np.array([0, 2, 1, 0])
actual = self.psh.peak_time_idx
self.assertTrue(np.allclose(expected, actual))
def test_first_peak_time_idx(self):
expected = 0
actual = self.psh.first_peak_time_idx
self.assertEqual(expected, actual)
def test_last_peak_time_idx(self):
expected = 2
actual = self.psh.last_peak_time_idx
self.assertEqual(expected, actual)
def test_frame_dict(self):
expected = {
0: [0, 3],
1: [2],
2: [1]
}
actual = self.psh.frame_dict
self.assertEqual(expected.keys(), actual.keys())
for k in actual.keys():
self.assertTrue(np.allclose(expected[k], actual[k]))
def test_peak_freqs(self):
expected = np.array([
3986.31371386, 3986.31371386, 4688.26871473, 5186.31371386
])
actual = self.psh.peak_freqs
self.assertTrue(np.allclose(expected, actual))
def test_peak_amps(self):
expected = np.array([1., 5., 0.002, 0.1])
actual = self.psh.peak_amps
self.assertTrue(np.allclose(expected, actual))
def test_peak_amps_norm(self):
expected = np.array([1., 1., 1., 0.1])
actual = self.psh.peak_amps_norm
self.assertTrue(np.allclose(expected, actual))
def test_good_peaks(self):
expected = set([0, 1])
actual = self.psh.good_peaks
self.assertEqual(expected, actual)
def test_bad_peaks(self):
expected = set([2, 3])
actual = self.psh.bad_peaks
self.assertEqual(expected, actual)
def test_good_peaks_sorted(self):
expected = np.array([1, 0])
actual = self.psh.good_peaks_sorted
self.assertTrue(np.allclose(expected, actual))
def test_good_peaks_sorted_index(self):
expected = {0: 1, 1: 0}
actual = self.psh.good_peaks_sorted_index
self.assertEqual(expected, actual)
def test_good_peaks_sorted_avail(self):
expected = np.array([True, True])
actual = self.psh.good_peaks_sorted_avail
self.assertTrue(np.allclose(expected, actual))
def test_n_good_peaks(self):
expected = 2
actual = self.psh.n_good_peaks
self.assertTrue(np.allclose(expected, actual))
def test_smallest_good_peak_idx(self):
expected = 0
actual = self.psh.smallest_good_peak_idx
self.assertEqual(expected, actual)
def test_get_largest_peak(self):
S = np.array([
[0, 0, 0, 0],
[0, 0.002, 0, 0],
[1, 0, 5, 0],
[0, 0.3, 0.1, 0],
[0.1, 0, 0.2, 0],
[0, 0.5, 0, 0.2],
[0, 0, 0, 0]
])
times = np.array([0.05, 0.1, 0.15, 0.2])
freqs = np.array([97.0, 100.0, 103.0, 105.0, 107.0, 109.0, 112.0])
psh = utils.PeakStreamHelper(S, times, freqs, 0.9, 0.9, 3.456, 80)
actual = psh.get_largest_peak()
expected = 2
self.assertEqual(expected, actual)
def test_update_largest_peak_list(self):
S = np.array([
[0, 0, 0, 0],
[0, 0.002, 0, 0],
[1, 0, 5, 0],
[0, 0.3, 0.1, 0],
[0.1, 0, 0.2, 0],
[0, 0.5, 0, 0.2],
[0, 0, 0, 0]
])
times = np.array([0.05, 0.1, 0.15, 0.2])
freqs = np.array([97.0, 100.0, 103.0, 105.0, 107.0, 109.0, 112.0])
psh = utils.PeakStreamHelper(S, times, freqs, 0.9, 0.9, 3.456, 80)
expected_avail = np.array([True, True, True, True])
actual_avail = psh.good_peaks_sorted_avail
self.assertTrue(np.allclose(expected_avail, actual_avail))
expected_smallest_idx = 0
actual_smallest_idx = psh.smallest_good_peak_idx
self.assertEqual(expected_smallest_idx, actual_smallest_idx)
psh.update_largest_peak_list(1)
expected_avail = np.array([True, False, True, True])
actual_avail = psh.good_peaks_sorted_avail
self.assertTrue(np.allclose(expected_avail, actual_avail))
expected_smallest_idx = 0
actual_smallest_idx = psh.smallest_good_peak_idx
self.assertEqual(expected_smallest_idx, actual_smallest_idx)
psh.update_largest_peak_list(2)
expected_avail = np.array([False, False, True, True])
actual_avail = psh.good_peaks_sorted_avail
self.assertTrue(np.allclose(expected_avail, actual_avail))
expected_smallest_idx = 2
actual_smallest_idx = psh.smallest_good_peak_idx
self.assertEqual(expected_smallest_idx, actual_smallest_idx)
def test_get_closest_peak(self):
S = np.array([
[0, 0, 0, 0],
[0, 0.002, 0, 0],
[1, 0, 5, 0],
[0, 0.3, 0.1, 0],
[0.1, 0, 0.2, 0],
[0, 0.5, 0, 0.2],
[0, 0, 0, 0]
])
times = np.array([0.05, 0.1, 0.15, 0.2])
freqs = np.array([97.0, 100.0, 103.0, 105.0, 107.0, 109.0, 112.0])
psh = utils.PeakStreamHelper(S, times, freqs, 0.9, 0.9, 3.456, 80)
actual = psh.get_closest_peak(237.2, [2, 4, 5])
expected = 2
self.assertEqual(expected, actual)
def test_get_peak_candidates(self):
S = np.array([
[0, 0, 0, 0],
[0, 0.002, 0, 0],
[1, 0, 5, 0],
[0, 0.3, 0.1, 0],
[0.1, 0, 0.2, 0],
[0, 0.5, 0, 0.2],
[0, 0, 0, 0]
])
times = np.array([0.05, 0.1, 0.15, 0.2])
freqs = np.array([97.0, 100.0, 103.0, 105.0, 107.0, 109.0, 112.0])
psh = utils.PeakStreamHelper(S, times, freqs, 0.9, 0.9, 3.456, 80)
frame_idx = 0
f0_val = 4000.0
expected_cands = [1]
expected_from_good = True
actual_cands, actual_from_good = psh.get_peak_candidates(
frame_idx, f0_val
)
self.assertEqual(expected_cands, actual_cands)
self.assertEqual(expected_from_good, actual_from_good)
def test_get_peak_candidates2(self):
S = np.array([
[0, 0, 0, 0],
[0, 0.002, 0, 0],
[1, 0, 5, 0],
[0, 0.3, 0.1, 0],
[0.1, 0, 0.2, 0],
[0, 0.5, 0, 0.002],
[0, 0, 0, 0]
])
times = np.array([0.05, 0.1, 0.15, 0.2])
freqs = np.array([97.0, 100.0, 103.0, 105.0, 107.0, 109.0, 112.0])
psh = utils.PeakStreamHelper(S, times, freqs, 0.9, 0.9, 3.456, 80)
frame_idx = 3
f0_val = 4125.5
expected_cands = [7]
expected_from_good = False
actual_cands, actual_from_good = psh.get_peak_candidates(
frame_idx, f0_val
)
self.assertEqual(expected_cands, actual_cands)
self.assertEqual(expected_from_good, actual_from_good)
def test_get_peak_candidates3(self):
S = np.array([
[0, 0, 0, 0],
[0, 0.002, 0, 0],
[1, 0, 5, 0],
[0, 0.3, 0.1, 0],
[0.1, 0, 0.2, 0],
[0, 0.5, 0, 0.002],
[0, 0, 0, 0]
])
times = np.array([0.05, 0.1, 0.15, 0.2])
freqs = np.array([97.0, 100.0, 103.0, 105.0, 107.0, 109.0, 112.0])
psh = utils.PeakStreamHelper(S, times, freqs, 0.9, 0.9, 3.456, 80)
frame_idx = 3
f0_val = 0
expected_cands = None
expected_from_good = None
actual_cands, actual_from_good = psh.get_peak_candidates(
frame_idx, f0_val
)
self.assertEqual(expected_cands, actual_cands)
self.assertEqual(expected_from_good, actual_from_good)
def test_get_contour(self):
S = np.array([
[0, 0, 0, 0],
[0, 0.002, 0, 0],
[1, 0, 5, 0],
[0, 0.3, 0.1, 0],
[0.1, 0, 0.2, 0],
[0, 0.5, 0, 0.002],
[0, 0, 0, 0]
])
times = np.array([0.05, 0.1, 0.15, 0.2])
freqs = np.array([97.0, 100.0, 103.0, 105.0, 107.0, 109.0, 112.0])
psh = utils.PeakStreamHelper(S, times, freqs, 0.9, 0.9, 3.456, 80)
psh.get_contour()
actual_contour_idx = psh.contour_idx
expected_contour_idx = [2, 3, 1]
self.assertEqual(expected_contour_idx, actual_contour_idx)
actual_c_len = psh.c_len
expected_c_len = [3]
self.assertEqual(expected_c_len, actual_c_len)
psh.get_contour()
actual_contour_idx = psh.contour_idx
expected_contour_idx = [2, 3, 1, 6, 5, 7, 4]
self.assertEqual(expected_contour_idx, actual_contour_idx)
actual_c_len = psh.c_len
expected_c_len = [3, 4]
self.assertEqual(expected_c_len, actual_c_len)
def test_peak_streaming(self):
S = np.array([
[0, 0, 0, 0],
[0, 0.002, 0, 0],
[1, 0, 5, 0],
[0, 0.3, 0.1, 0],
[0.1, 0, 0.2, 0],
[0, 0.5, 0, 0.2],
[0, 0, 0, 0]
])
times = np.array([0.05, 0.1, 0.15, 0.2])
freqs = np.array([97.0, 100.0, 103.0, 105.0, 107.0, 109.0, 112.0])
psh = utils.PeakStreamHelper(S, times, freqs, 0.9, 0.9, 3.456, 80)
expected_c_numbers = np.array([0, 0, 0, 1, 1, 1, 1])
expected_c_times = np.array([0.15, 0.1, 0.05, 0.1, 0.15, 0.2, 0.05])
expected_c_freqs = np.array([103., 105., 103., 109., 107., 109., 107.])
expected_c_sal = np.array([5, 0.3, 1.0, 0.5, 0.2, 0.2, 0.1])
(actual_c_numbers,
actual_c_times,
actual_c_freqs,
actual_c_sal) = psh.peak_streaming()
self.assertTrue(np.allclose(expected_c_numbers, actual_c_numbers))
self.assertTrue(np.allclose(expected_c_times, actual_c_times))
self.assertTrue(np.allclose(expected_c_freqs, actual_c_freqs))
self.assertTrue(np.allclose(expected_c_sal, actual_c_sal))
class TestPeakStreamHelperNoPeaks(unittest.TestCase):
def setUp(self):
self.S = np.array([
[0., 0., 0.],
[1., 0., 1.],
[2., 0., 1.],
[3., 0., 1.],
[4., 0., 1.]
])
self.times = np.array([0.0, 0.5, 1.0])
self.freqs = np.array([10., 100., 150., 200., 300.])
self.amp_thresh = 0.9
self.dev_thresh = 0.9
self.n_gap = 3.234
self.pitch_cont = 80
self.psh = utils.PeakStreamHelper(
self.S, self.times, self.freqs, self.amp_thresh, self.dev_thresh,
self.n_gap, self.pitch_cont
)
def test_S(self):
expected = np.array([
[0., 0., 0.],
[1., 0., 1.],
[2., 0., 1.],
[3., 0., 1.],
[4., 0., 1.]
])
actual = self.psh.S
self.assertTrue(np.allclose(expected, actual))
def test_S_norm(self):
expected = np.array([
[0, 0, 0],
[0.25, 0, 1],
[0.5, 0, 1],
[0.75, 0, 1],
[1, 0, 1]
])
actual = self.psh.S_norm
self.assertTrue(np.allclose(expected, actual))
def test_times(self):
expected = np.array([0.0, 0.5, 1.0])
actual = self.psh.times
self.assertTrue(np.allclose(expected, actual))
def test_freqs(self):
expected = np.array([
0., 3986.31371386, 4688.26871473, 5186.31371386, 5888.26871473
])
actual = self.psh.freqs
self.assertTrue(np.allclose(expected, actual))
def test_amp_thresh(self):
expected = 0.9
actual = self.psh.amp_thresh
self.assertEqual(expected, actual)
def test_dev_thresh(self):
expected = 0.9
actual = self.psh.dev_thresh
self.assertEqual(expected, actual)
def test_n_gap(self):
expected = 3.234
actual = self.psh.n_gap
self.assertEqual(expected, actual)
def test_pitch_cont(self):
expected = 80
actual = self.psh.pitch_cont
self.assertEqual(expected, actual)
def test_n_peaks(self):
expected = 0
actual = self.psh.n_peaks
self.assertEqual(expected, actual)
def test_peak_index(self):
expected = np.array([])
actual = self.psh.peak_index
self.assertTrue(np.allclose(expected, actual))
def test_peak_time_index(self):
expected = np.array([])
actual = self.psh.peak_time_idx
self.assertTrue(np.allclose(expected, actual))
def test_first_peak_time_idx(self):
expected = None
actual = self.psh.first_peak_time_idx
self.assertEqual(expected, actual)
def test_last_peak_time_idx(self):
expected = None
actual = self.psh.last_peak_time_idx
self.assertEqual(expected, actual)
def test_frame_dict(self):
expected = {}
actual = self.psh.frame_dict
self.assertEqual(expected, actual)
def test_peak_freqs(self):
expected = np.array([])
actual = self.psh.peak_freqs
self.assertTrue(np.allclose(expected, actual))
def test_peak_amps(self):
expected = np.array([])
actual = self.psh.peak_amps
self.assertTrue(np.allclose(expected, actual))
def test_peak_amps_norm(self):
expected = np.array([])
actual = self.psh.peak_amps_norm
self.assertTrue(np.allclose(expected, actual))
def test_good_peaks(self):
expected = set()
actual = self.psh.good_peaks
self.assertEqual(expected, actual)
def test_bad_peaks(self):
expected = set()
actual = self.psh.bad_peaks
self.assertEqual(expected, actual)
def test_good_peaks_sorted(self):
expected = np.array([])
actual = self.psh.good_peaks_sorted
self.assertTrue(np.allclose(expected, actual))
def test_good_peaks_sorted_index(self):
expected = {}
actual = self.psh.good_peaks_sorted_index
self.assertEqual(expected, actual)
def test_good_peaks_sorted_avail(self):
expected = np.array([])
actual = self.psh.good_peaks_sorted_avail
self.assertTrue(np.allclose(expected, actual))
def test_n_good_peaks(self):
expected = 0
actual = self.psh.n_good_peaks
self.assertTrue(np.allclose(expected, actual))
def test_smallest_good_peak_idx(self):
expected = 0
actual = self.psh.smallest_good_peak_idx
self.assertEqual(expected, actual)
| 31.565056 | 79 | 0.554469 | 16,847 | 0.99205 | 0 | 0 | 0 | 0 | 0 | 0 | 50 | 0.002944 |
d3a9236251a872a4b147f5fa34a60d6f71d1589b | 667 | py | Python | unit_workaround.py | ltfish/nose2 | e47363dad10056cf906daf387613c21d74f37e56 | [
"BSD-2-Clause"
] | null | null | null | unit_workaround.py | ltfish/nose2 | e47363dad10056cf906daf387613c21d74f37e56 | [
"BSD-2-Clause"
] | null | null | null | unit_workaround.py | ltfish/nose2 | e47363dad10056cf906daf387613c21d74f37e56 | [
"BSD-2-Clause"
] | null | null | null | """
Python 2.7 Multiprocessing Unittest workaround.
=====================================================================
Due the manner in which multiprocessing is handled on windows
and the fact that __main__.py are actually called __main__
This workaround bypasses the fact that the calling unittest
script is called __main__
http://bugs.python.org/issue10845
This should be fine for python 3.2+, however, 2.7 and before will
not likely see a fix. This only affects the unittests called by tox.
"""
import unittest
from unittest.main import main, TestProgram, USAGE_AS_MAIN
TestProgram.USAGE = USAGE_AS_MAIN
if __name__ == "__main__":
main(module=None)
| 29 | 69 | 0.704648 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 515 | 0.772114 |
d3a9d2cef0c7d22cdcf1bc095a7461abc0d258d2 | 363 | py | Python | test/test_application.py | ChreSyr/baopig | 6264ab9a851b1ed0a031292abe7f159a53b3fc5e | [
"MIT"
] | null | null | null | test/test_application.py | ChreSyr/baopig | 6264ab9a851b1ed0a031292abe7f159a53b3fc5e | [
"MIT"
] | null | null | null | test/test_application.py | ChreSyr/baopig | 6264ab9a851b1ed0a031292abe7f159a53b3fc5e | [
"MIT"
] | null | null | null | import unittest
import docs as bp
class ApplicationClassTest(unittest.TestCase):
def setUp(self):
self.app = bp.Application()
def test_set_fps(self):
self.app.set_fps(30)
self.assertEqual(self.app.fps, 30)
self.app.set_fps(60)
self.assertEqual(self.app.fps, 60)
if __name__ == '__main__':
unittest.main()
| 19.105263 | 46 | 0.650138 | 277 | 0.763085 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 0.027548 |
d3aa1468d93ea00d98bf717e852d3d27cc0997a2 | 7,969 | py | Python | lib/googlecloudsdk/gcloud/sdktools/root/info.py | IsaacHuang/google-cloud-sdk | 52afa5d1a75dff08f4f5380c5cccc015bf796ca5 | [
"Apache-2.0"
] | null | null | null | lib/googlecloudsdk/gcloud/sdktools/root/info.py | IsaacHuang/google-cloud-sdk | 52afa5d1a75dff08f4f5380c5cccc015bf796ca5 | [
"Apache-2.0"
] | null | null | null | lib/googlecloudsdk/gcloud/sdktools/root/info.py | IsaacHuang/google-cloud-sdk | 52afa5d1a75dff08f4f5380c5cccc015bf796ca5 | [
"Apache-2.0"
] | null | null | null | # Copyright 2013 Google Inc. All Rights Reserved.
"""A command that prints out information about your gcloud environment."""
import os
import StringIO
import sys
import textwrap
from googlecloudsdk.calliope import base
from googlecloudsdk.core import config
from googlecloudsdk.core import log
from googlecloudsdk.core import properties
from googlecloudsdk.core.updater import update_manager
from googlecloudsdk.core.util import platforms
class Info(base.Command):
"""Display information about the current gcloud environment.
This command displays information about the current gcloud environment.
"""
@staticmethod
def Args(parser):
parser.add_argument(
'--show-log',
action='store_true',
help='Print the contents of the last log file.')
def Run(self, args):
return InfoHolder()
def Display(self, args, info):
log.Print(info)
if args.show_log and info.logs.last_log:
log.Print('\nContents of log file: [{0}]\n'
'==========================================================\n'
'{1}\n\n'
.format(info.logs.last_log, info.logs.LastLogContents()))
class InfoHolder(object):
"""Base object to hold all the configuration info."""
def __init__(self):
self.basic = BasicInfo()
self.installation = InstallationInfo()
self.config = ConfigInfo()
self.logs = LogsInfo()
def __str__(self):
out = StringIO.StringIO()
out.write(str(self.basic) + '\n')
out.write(str(self.installation) + '\n')
out.write(str(self.config) + '\n')
out.write(str(self.logs) + '\n')
return out.getvalue()
class BasicInfo(object):
"""Holds basic information about your system setup."""
def __init__(self):
platform = platforms.Platform.Current()
self.version = config.CLOUD_SDK_VERSION
self.operating_system = platform.operating_system
self.architecture = platform.architecture
self.python_version = sys.version
self.site_packages = 'site' in sys.modules
def __str__(self):
return textwrap.dedent("""\
Google Cloud SDK [{version}]
Platform: [{os}, {arch}]
Python Version: [{python_version}]
Site Packages: [{site_packages}]
""".format(
version=self.version,
os=self.operating_system.name,
arch=self.architecture.name,
python_version=self.python_version.replace('\n', ' '),
site_packages='Enabled' if self.site_packages else 'Disabled'))
class InstallationInfo(object):
"""Holds information about your Cloud SDK installation."""
def __init__(self):
self.sdk_root = config.Paths().sdk_root
self.release_channel = config.INSTALLATION_CONFIG.release_channel
self.repo_url = config.INSTALLATION_CONFIG.snapshot_url
repos = properties.VALUES.component_manager.additional_repositories.Get()
self.additional_repos = repos.split(',') if repos else []
self.path = os.environ.get('PATH', '')
if self.sdk_root:
manager = update_manager.UpdateManager()
self.components = manager.GetCurrentVersionsInformation()
self.old_tool_paths = manager.FindAllOldToolsOnPath()
paths = [os.path.realpath(p) for p in self.path.split(os.pathsep)]
this_path = os.path.realpath(
os.path.join(self.sdk_root,
update_manager.UpdateManager.BIN_DIR_NAME))
# TODO(user): Validate symlinks in /usr/local/bin when we start
# creating them.
self.on_path = this_path in paths
else:
self.components = {}
self.old_tool_paths = []
self.on_path = False
def __str__(self):
out = StringIO.StringIO()
out.write('Installation Root: [{0}]\n'.format(
self.sdk_root if self.sdk_root else 'Unknown'))
if config.INSTALLATION_CONFIG.IsAlternateReleaseChannel():
out.write('Release Channel: [{0}]\n'.format(self.release_channel))
out.write('Repository URL: [{0}]\n'.format(self.repo_url))
if self.additional_repos:
out.write('Additional Repositories:\n {0}\n'.format(
'\n '.join(self.additional_repos)))
if self.components:
components = ['{0}: [{1}]'.format(name, value) for name, value in
self.components.iteritems()]
out.write('Installed Components:\n {0}\n'.format(
'\n '.join(components)))
out.write('System PATH: [{0}]\n'.format(self.path))
out.write('Cloud SDK on PATH: [{0}]\n'.format(self.on_path))
if self.old_tool_paths:
out.write('\nWARNING: There are old versions of the Google Cloud '
'Platform tools on your system PATH.\n {0}\n'
.format('\n '.join(self.old_tool_paths)))
return out.getvalue()
class ConfigInfo(object):
"""Holds information about where config is stored and what values are set."""
def __init__(self):
self.paths = config.Paths()
self.account = properties.VALUES.core.account.Get()
self.project = properties.VALUES.core.project.Get()
self.properties = properties.VALUES.AllValues()
def __str__(self):
out = StringIO.StringIO()
out.write(textwrap.dedent("""\
Installation Properties: [{installation_properties}]
User Config Directory: [{global_config}]
User Properties: [{user_properties}]
Current Workspace: [{workspace}]
Workspace Config Directory: [{workspace_config}]
Workspace Properties: [{workspace_properties}]
Account: [{account}]
Project: [{project}]
""".format(
installation_properties=self.paths.installation_properties_path,
global_config=self.paths.global_config_dir,
user_properties=self.paths.user_properties_path,
workspace=self.paths.workspace_dir,
workspace_config=self.paths.workspace_config_dir,
workspace_properties=self.paths.workspace_properties_path,
account=self.account,
project=self.project)))
out.write('Current Properties:\n')
for section, props in self.properties.iteritems():
out.write(' [{section}]\n'.format(section=section))
for name, value in props.iteritems():
out.write(' {name}: [{value}]\n'.format(
name=name, value=value))
return out.getvalue()
class LogsInfo(object):
"""Holds information about where logs are located."""
def __init__(self):
paths = config.Paths()
self.logs_dir = paths.logs_dir
self.last_log = self.LastLogFile(self.logs_dir)
def __str__(self):
return textwrap.dedent("""\
Logs Directory: [{logs_dir}]
Last Log File: [{log_file}]
""".format(logs_dir=self.logs_dir, log_file=self.last_log))
def LastLogContents(self):
if not self.last_log:
return ''
with open(self.last_log) as fp:
return fp.read()
def LastLogFile(self, logs_dir):
"""Finds the last (not current) gcloud log file.
Args:
logs_dir: str, The path to the logs directory being used.
Returns:
str, The full path to the last (but not the currently in use) log file
if it exists, or None.
"""
date_dirs = self.FilesSortedByName(logs_dir)
if not date_dirs:
return None
found_file = False
for date_dir in reversed(date_dirs):
log_files = self.FilesSortedByName(date_dir)
if log_files:
if not found_file:
log_files.pop()
found_file = True
if log_files:
return log_files[-1]
return None
def FilesSortedByName(self, directory):
"""Gets the list of files in the given directory, sorted by name.
Args:
directory: str, The path to the directory to list.
Returns:
[str], The full paths of the files, sorted by file name, or None.
"""
if not os.path.isdir(directory):
return None
dates = os.listdir(directory)
if not dates:
return None
return [os.path.join(directory, date) for date in sorted(dates)]
| 32.659836 | 79 | 0.654913 | 7,509 | 0.942276 | 0 | 0 | 166 | 0.020831 | 0 | 0 | 2,396 | 0.300665 |
d3aa5533cd819bec8e09e75cda19441c06cdc1f9 | 269 | py | Python | api/admin_urls.py | chenxiaoli/auth21 | a2b15ecb883416e011da03d6ec066459fa28f693 | [
"MIT"
] | null | null | null | api/admin_urls.py | chenxiaoli/auth21 | a2b15ecb883416e011da03d6ec066459fa28f693 | [
"MIT"
] | null | null | null | api/admin_urls.py | chenxiaoli/auth21 | a2b15ecb883416e011da03d6ec066459fa28f693 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from django.conf.urls import include, url
from .user_admin_views import UserAdminListViewSet
user_admin_list=UserAdminListViewSet.as_view({
"get":"get"
})
urlpatterns = (
url(r'^user$', user_admin_list, name='user-admin-list'),
)
| 15.823529 | 60 | 0.702602 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 59 | 0.219331 |
d3aa7a46e2eb468c36ac6cd5574de318dee4cff0 | 283 | py | Python | kubernetes_typed/client/models/v1_empty_dir_volume_source.py | nikhiljha/kubernetes-typed | 4f4b969aa400c88306f92560e56bda6d19b2a895 | [
"Apache-2.0"
] | 22 | 2020-12-10T13:06:02.000Z | 2022-02-13T21:58:15.000Z | kubernetes_typed/client/models/v1_empty_dir_volume_source.py | nikhiljha/kubernetes-typed | 4f4b969aa400c88306f92560e56bda6d19b2a895 | [
"Apache-2.0"
] | 4 | 2021-03-08T07:06:12.000Z | 2022-03-29T23:41:45.000Z | kubernetes_typed/client/models/v1_empty_dir_volume_source.py | nikhiljha/kubernetes-typed | 4f4b969aa400c88306f92560e56bda6d19b2a895 | [
"Apache-2.0"
] | 2 | 2021-09-05T19:18:28.000Z | 2022-03-14T02:56:17.000Z | # Code generated by `typeddictgen`. DO NOT EDIT.
"""V1EmptyDirVolumeSourceDict generated type."""
from typing import TypedDict
V1EmptyDirVolumeSourceDict = TypedDict(
"V1EmptyDirVolumeSourceDict",
{
"medium": str,
"sizeLimit": str,
},
total=False,
)
| 21.769231 | 48 | 0.678445 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 143 | 0.5053 |
d3aafbadefc12b6d1ab958ddf0138fdc7ba0663d | 5,537 | py | Python | omstools/RNAseq/lncRNA/classify/get_gene_cat_v3.py | bioShaun/OM004script | d604b565ae220f2ec7bfba7a8ef44018e7a33c12 | [
"MIT"
] | null | null | null | omstools/RNAseq/lncRNA/classify/get_gene_cat_v3.py | bioShaun/OM004script | d604b565ae220f2ec7bfba7a8ef44018e7a33c12 | [
"MIT"
] | null | null | null | omstools/RNAseq/lncRNA/classify/get_gene_cat_v3.py | bioShaun/OM004script | d604b565ae220f2ec7bfba7a8ef44018e7a33c12 | [
"MIT"
] | 1 | 2018-05-06T03:14:39.000Z | 2018-05-06T03:14:39.000Z | import pandas as pd
import numpy as np
import click
import os
PRIORITY = ('Read-through', 'Protein coding',
'Pseudogene', 'TUCP', 'lncrna', 'lncRNA', 'other', 'ncRNA,other')
type_map = {
'other': 'lncRNA',
'ncRNA,other': 'lncRNA',
'lncrna': 'lncRNA',
'protein_coding': 'Protein coding',
'pseudogene': 'Pseudogene',
'read_through': 'Read-through'
}
@click.command()
@click.option(
'-m',
'--meta_table',
type=click.Path(exists=True, dir_okay=False),
help='taco compare metadata',
required=True,
)
@click.option(
'-t',
'--tucp',
type=click.Path(exists=True, dir_okay=False),
help='tucp transcripts.',
required=True,
)
@click.option(
'-o',
'--out_dir',
type=click.Path(file_okay=False),
help='gene classify/summary directory based on \
taco compare result and feelnc classify.',
required=True
)
@click.option(
'-n',
'--name',
type=click.STRING,
help='Summary table name',
default=None
)
def main(meta_table, tucp, out_dir, name):
meta_table_df = pd.read_table(meta_table, index_col=0)
tucp_df = pd.read_table(tucp, header=None, index_col=0)
tucp_series = tucp_df.index.intersection(meta_table_df.index)
# label TUCP
meta_table_df.loc[tucp_series, 'category'] = 'TUCP'
# label read_through
mask = meta_table_df.category_relative_detail == 'read_through'
meta_table_df.loc[mask, 'category'] = 'read_through'
# filter out intronic transcripts
meta_table_df = meta_table_df[meta_table_df.category_relative_detail !=
'intronic_same_strand']
# rename gene type to analysis name
meta_table_df.loc[:, 'category'].replace(type_map, inplace=True)
# function to summarize transcript/gene type
def type_summary(type_df, col_name):
type_df.columns = ['category', 'novel_status']
type_summary = type_df.groupby(
['category', 'novel_status']).size()
type_summary.name = col_name
type_summary = pd.DataFrame(type_summary)
f_sum = type_summary.unstack('novel_status', fill_value=0)
f_sum.loc[:, (col_name, 'Total')] = (
f_sum.loc[:, (col_name, 'Annotated')] +
f_sum.loc[:, (col_name, 'Unannotated')])
return f_sum
# annotation status according to exonic_overlap
meta_table_df.loc[:, 'novel_status'] = np.where(
meta_table_df.category_relative == 'exonic_overlap',
'Annotated', 'Unannotated')
meta_table_df = meta_table_df.reset_index()
tr_sum = type_summary(meta_table_df.loc[:, ['category', 'novel_status']],
'Transcript')
meta_table_df.loc[:, 'new_gene_id'] = meta_table_df.novel_status + \
'.' + meta_table_df.gene_id
tr_type_df = meta_table_df.loc[:, ['transcript_id', 'new_gene_id', 'category']]
meta_table_type_df = meta_table_df.loc[:, [
'new_gene_id', 'category', 'novel_status']]
meta_table_type_df.columns = ['gene_id', 'category', 'novel_status']
gene_type_map = meta_table_type_df.groupby(
['gene_id', 'novel_status'])['category'].unique()
meta_table_df = meta_table_df.reset_index()
gene_name_df = meta_table_df.loc[:, ['new_gene_id',
'category_relative',
'ref_gene_id',
'ref_gene_name']]
gene_name_df.columns = [
'gene_id', 'category_relative', 'ref_gene_id', 'ref_gene_name']
gene_name_df = gene_name_df[gene_name_df.category_relative ==
'exonic_overlap']
gene_name_df = gene_name_df.loc[:, [
'gene_id', 'ref_gene_id', 'ref_gene_name']].drop_duplicates()
def get_type(type_list):
for each_type in PRIORITY:
if each_type in type_list:
return type_map.get(each_type, each_type)
gene_type_list = map(get_type, gene_type_map)
gene_type_df = pd.DataFrame(
gene_type_list, index=gene_type_map.index, columns=['type'])
gene_type_df = gene_type_df.reset_index().set_index('gene_id')
read_through_genes = gene_type_df[gene_type_df.type ==
"Read-through"].index
gene_name_df = gene_name_df[~gene_name_df.gene_id.isin(read_through_genes)]
gene_name_df = gene_name_df.set_index('gene_id')
read_through_sup = gene_name_df[
gene_name_df.index.value_counts() > 1].index.unique()
gene_type_df.loc[read_through_sup, 'type'] = 'Read-through'
g_sum = type_summary(gene_type_df.loc[:, ['type', 'novel_status']],
'Gene')
type_stats = pd.concat([tr_sum, g_sum], axis=1)
type_stats.index.name = 'Category'
summary_file = os.path.join(out_dir, 'assembly.number.summary.txt')
classify_file = os.path.join(out_dir, 'gene.classify.txt')
tr_classify_file = os.path.join(out_dir, 'tr.classify.txt')
name_file = os.path.join(out_dir, 'gene.name.txt')
if name is not None:
type_stats.loc[:, ('', 'Name')] = name
output_header = False
else:
output_header = True
gene_type_df = gene_type_df.drop('novel_status', axis=1)
type_stats.to_csv(summary_file, sep='\t', header=output_header)
gene_type_df.to_csv(classify_file, sep='\t')
tr_type_df.to_csv(tr_classify_file, sep='\t', index=False)
gene_name_df = gene_name_df[gene_name_df.index.value_counts() == 1]
gene_name_df.to_csv(name_file, sep='\t')
if __name__ == '__main__':
main()
| 38.451389 | 83 | 0.642767 | 0 | 0 | 0 | 0 | 5,107 | 0.922341 | 0 | 0 | 1,436 | 0.259346 |
d3ab32f767c6d6e9a4d044cf91516005f20c48e6 | 1,236 | py | Python | awsf_cwl_v1/split_num.py | pkerpedjiev/tibanna | 8d8333bc7757076914c2bafbd68ee24c4ad611f6 | [
"MIT"
] | null | null | null | awsf_cwl_v1/split_num.py | pkerpedjiev/tibanna | 8d8333bc7757076914c2bafbd68ee24c4ad611f6 | [
"MIT"
] | null | null | null | awsf_cwl_v1/split_num.py | pkerpedjiev/tibanna | 8d8333bc7757076914c2bafbd68ee24c4ad611f6 | [
"MIT"
] | null | null | null | #!/usr/bin/python
import sys
import csv
def split_num(n, M):
# n : original number
# M : max size for split range
nsplit = n//M
if nsplit*M < n:
nsplit += 1
ninterval = n//nsplit
ncum = 1
end = 0
res = []
while end < n:
start = ncum
ncum += ninterval
end = ncum-1
if end > n:
end = n
res.append("{0}-{1}".format(start, end))
return res
def split_num_given_args():
n = int(sys.argv[1]) # original number
M = int(sys.argv[2]) # max size for split range
print split_num(n, M)
def split_chrom(chromsize_file, M):
with open(chromsize_file, 'r') as f:
reader = csv.reader(f, delimiter='\t')
for row in reader:
for interval in split_num(int(row[1]), int(M)):
print ("{chr}:{interval}".format(chr=row[0], interval=interval))
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="Arguments")
parser.add_argument("-c", "--chrom", help="Chrom.size file, tab-delimited")
parser.add_argument("-M", "--max_split_size", help="Maximum split size")
args = parser.parse_args()
split_chrom(args.chrom, args.max_split_size)
| 24.72 | 80 | 0.590615 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 253 | 0.204693 |
d3ac9b1c75059ea7b2c543d6cbf7a13cdd193523 | 94 | py | Python | bcbio/picard/metrics.py | a113n/bcbio-nextgen | 1d4afef27ad2e84a4ecb6145ccc5058f2abb4616 | [
"MIT"
] | 418 | 2015-01-01T18:21:17.000Z | 2018-03-02T07:26:28.000Z | bcbio/picard/metrics.py | ahmedelhosseiny/bcbio-nextgen | b5618f3c100a1a5c04bd5c8acad8f96d0587e41c | [
"MIT"
] | 1,634 | 2015-01-04T11:43:43.000Z | 2018-03-05T18:06:39.000Z | bcbio/picard/metrics.py | ahmedelhosseiny/bcbio-nextgen | b5618f3c100a1a5c04bd5c8acad8f96d0587e41c | [
"MIT"
] | 218 | 2015-01-26T05:58:18.000Z | 2018-03-03T05:50:05.000Z | # Back compatibility -- use broad subdirectory for new code
from bcbio.broad.metrics import *
| 31.333333 | 59 | 0.787234 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 59 | 0.62766 |
d3adf0ba5a755617d8f7bd91e07d748b2daa5d68 | 1,019 | py | Python | databases/loja.py | danielicapui/programa-o-avancada | d0e5b876b951ae04a46ffcda0dc0143e3f7114d9 | [
"MIT"
] | null | null | null | databases/loja.py | danielicapui/programa-o-avancada | d0e5b876b951ae04a46ffcda0dc0143e3f7114d9 | [
"MIT"
] | null | null | null | databases/loja.py | danielicapui/programa-o-avancada | d0e5b876b951ae04a46ffcda0dc0143e3f7114d9 | [
"MIT"
] | null | null | null | from utills import *
conn,cur=start("loja")
produtos_p="produto_id integer primary key autoincrement,nome text,valor real,categoria_id integer,constraint fk_categorias foreign key (categoria_id) references categorias(categoria_id)"
criarTabela("categorias","categoria_id integer primary key, categoria text")
criarTabela("produtos",produtos_p)
categorias=[(1,"mercearia"),
(2,"frutas"),
(3,"açougue"),
(4,"higiene"),
(5,"limpeza")]
produtos=[("feijão",5.10,1),
("cuscuz",0.93,1),
("maça",7.48,2),
("carne de sol",22.98,3),
("sabonete xerin",1.10,4),
("creme dental dente azul 90g",8.78,4),
("sabão em pó xô 'nhaca pôdji 400g",9.89,5)]
insertInto("categorias","categoria_id,categoria",categorias)
insertInto("produtos","nome,valor,categoria_id",produtos)
#cur.executemany("insert into produtos (nome,valor,categoria_id) values (?,?,?)",produtos)
buscaTabela("categorias")
buscaTabela("produtos")
conn.commit()
conn.close()
| 39.192308 | 187 | 0.673209 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 605 | 0.589669 |
d3ae6c9a673c6e6d9173068f735788f9baf4cdf3 | 140 | py | Python | bbb/templatetags/is_bbb_mod.py | cmd-ev/voctoconf | 5d2c1ca4f1da01b66983f5a562eb8eb6babe0452 | [
"MIT"
] | 21 | 2020-08-24T13:27:03.000Z | 2021-10-15T09:17:46.000Z | bbb/templatetags/is_bbb_mod.py | cmd-ev/voctoconf | 5d2c1ca4f1da01b66983f5a562eb8eb6babe0452 | [
"MIT"
] | null | null | null | bbb/templatetags/is_bbb_mod.py | cmd-ev/voctoconf | 5d2c1ca4f1da01b66983f5a562eb8eb6babe0452 | [
"MIT"
] | 5 | 2020-08-25T16:34:51.000Z | 2021-02-19T04:48:10.000Z | from django import template
register = template.Library()
@register.filter
def is_bbb_mod(room, user):
return room.is_moderator(user)
| 17.5 | 34 | 0.771429 | 0 | 0 | 0 | 0 | 79 | 0.564286 | 0 | 0 | 0 | 0 |
d3ae9079796610b93f0f40736e8da6576b598166 | 1,538 | py | Python | main/qtdashboard/obsolete/grab_handle.py | Hijtec/Breach_rides_elevator | 8cdb9fec16ff4458d1505f8da08fb67e86ef0c1a | [
"MIT"
] | 1 | 2020-03-18T08:58:32.000Z | 2020-03-18T08:58:32.000Z | main/qtdashboard/obsolete/grab_handle.py | Hijtec/Breach_rides_elevator | 8cdb9fec16ff4458d1505f8da08fb67e86ef0c1a | [
"MIT"
] | 2 | 2020-04-17T00:25:39.000Z | 2020-04-18T11:51:37.000Z | main/qtdashboard/obsolete/grab_handle.py | Hijtec/Breach_rides_elevator | 8cdb9fec16ff4458d1505f8da08fb67e86ef0c1a | [
"MIT"
] | 1 | 2020-04-17T00:09:25.000Z | 2020-04-17T00:09:25.000Z | # This Python file uses the following encoding: utf-8
import cv2
from multiprocessing import shared_memory
import numpy as np
import time
class Video:
def __init__(self, id, camera_number):
"""Initializes the class and calls its methods."""
self.id = id
self.camera_number = camera_number
self.create_video_object()
self.frame = None
def create_video_object(self):
"""Creates VideoCapture object."""
self.VideoFeed = cv2.VideoCapture(self.camera_number)
return self.VideoFeed
def read_next_video_frame(self):
"""Reads a current frame from the camera."""
self.VideoFeed.grab()
success, self.frame = self.VideoFeed.retrieve()
if success:
return self.frame
else:
print("Problem occured getting next frame")
videoObject = Video(id = 1, camera_number = 0)
frame = videoObject.read_next_video_frame()
time.sleep(5)
#frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
print(frame)
"""
print(a.nbytes)
print(a.dtype)
print(a.shape)
"""
while True:
frame = videoObject.read_next_video_frame()
a = np.array(frame)
shm = shared_memory.SharedMemory(name='dataPass', create=True, size=a.nbytes)
b = np.ndarray(a.shape, dtype=a.dtype, buffer=shm.buf)
print(shm.buf)
b[:] = a[:]
shm.close()
| 27.963636 | 85 | 0.583875 | 847 | 0.550715 | 0 | 0 | 0 | 0 | 0 | 0 | 328 | 0.213264 |
d3af3caa6c9db054915893aae3c8cc506266ac99 | 8,437 | py | Python | analysis/config/config_UltraLegacy18.py | cms-btv-pog/jet-tagging-sf | c418e13aa4eac5522818d5f5ad3db2a0c81ec52e | [
"Apache-2.0"
] | 3 | 2020-01-22T08:30:14.000Z | 2021-12-27T18:47:43.000Z | analysis/config/config_UltraLegacy18.py | cms-btv-pog/jet-tagging-sf | c418e13aa4eac5522818d5f5ad3db2a0c81ec52e | [
"Apache-2.0"
] | null | null | null | analysis/config/config_UltraLegacy18.py | cms-btv-pog/jet-tagging-sf | c418e13aa4eac5522818d5f5ad3db2a0c81ec52e | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
import scinum as sn
import numpy as np
def create_config(base_cfg):
# setup the config for 2018 data
from analysis.config.campaign_UltraLegacy18 import campaign as campaign_UltraLegacy18
from analysis.config.jet_tagging_sf import ch_ee, ch_emu, ch_mumu, ch_e, ch_mu
cfg = base_cfg.copy(campaign=campaign_UltraLegacy18)
# add datasets
dataset_names = [
"data_A_ee", "data_B_ee", "data_C_ee", "data_D_ee",
"data_A_emu", "data_B_emu", "data_C_emu", "data_D_emu",
"data_A_mumu", "data_B_mumu", "data_C_mumu", "data_D_mumu",
"data_A_e", "data_B_e", "data_C_e", "data_D_e",
"data_A_mu", "data_B_mu", "data_C_mu", "data_D_mu",
"tt_dl", "tt_sl",
"dy_lep_10To50",
#"dy_lep_50ToInf",
"dy_lep_LO_50ToInf",
#"dy_lep_0Jets", "dy_lep_1Jets", "dy_lep_2Jets",
"st_s_lep",
"st_t_t", "st_t_tbar",
"st_tW_t", "st_tW_tbar",
"WW", "WZ", "ZZ",
"W_lep",
#"ttH",
#"ttWJets_lep", "ttWJets_had", "ttZJets_lep", "ttZJets_had",
]
for dataset_name in dataset_names:
dataset = campaign_UltraLegacy18.get_dataset(dataset_name)
cfg.add_dataset(dataset)
# store channels per real dataset
cfg.set_aux("dataset_channels", {
dataset: cfg.get_channel(dataset.name.split("_")[-1])
for dataset in cfg.datasets.values()
if dataset.is_data
})
# store b-tagger working points
cfg.set_aux("working_points", {
"deepcsv": {
"loose": 0.1208,
"medium": 0.4168,
"tight": 0.7665,
},
"deepjet": {
"loose": 0.0490,
"medium": 0.2783,
"tight": 0.7100,
}
})
# luminosities per channel in /pb
cfg.set_aux("lumi", {
ch_ee: 59830.,
ch_emu: 59830.,
ch_mumu: 59830.,
ch_e: 59830.,
ch_mu: 59830.,
})
# run ranges
rr = cfg.set_aux("run_ranges", {
"A": (315252, 316995),
"B": (316998, 319312),
"C": (319313, 320393),
"D": (320394, 325273),
})
# global tags
cfg.set_aux("global_tag", {
"data": "106X_dataRun2_v28",
"mc": "106X_upgrade2018_realistic_v11_L1v1",
})
# lumi, normtag and pileup file
cfg.set_aux("lumi_file", "/afs/cern.ch/cms/CAF/CMSCOMM/COMM_DQM/certification/Collisions18/13TeV/"
"Legacy_2018/Cert_314472-325175_13TeV_Legacy2018_Collisions18_JSON.txt")
# https://twiki.cern.ch/twiki/bin/view/CMS/TWikiLUM
cfg.set_aux("normtag_file", "/cvmfs/cms-bril.cern.ch/cms-lumi-pog/Normtags/normtag_PHYSICS.json")
cfg.set_aux("pileup_file", "/afs/cern.ch/cms/CAF/CMSCOMM/COMM_DQM/certification/Collisions18/13TeV/"
"PileUp/pileup_latest.txt")
# triggers
# https://twiki.cern.ch/twiki/bin/view/CMS/TopTriggerYear2018
cfg.set_aux("triggers", {
ch_ee: [
"HLT_Ele23_Ele12_CaloIdL_TrackIdL_IsoVL_v*",
"HLT_Ele23_Ele12_CaloIdL_TrackIdL_IsoVL_DZ_v*",
],
ch_emu: [
"HLT_Mu23_TrkIsoVVL_Ele12_CaloIdL_TrackIdL_IsoVL_v*",
"HLT_Mu23_TrkIsoVVL_Ele12_CaloIdL_TrackIdL_IsoVL_DZ_v*",
"HLT_Mu12_TrkIsoVVL_Ele23_CaloIdL_TrackIdL_IsoVL_DZ_v*",
"HLT_Mu8_TrkIsoVVL_Ele23_CaloIdL_TrackIdL_IsoVL_DZ_v*",
],
ch_mumu: [
"HLT_Mu17_TrkIsoVVL_Mu8_TrkIsoVVL_DZ_Mass3p8_v*",
"HLT_Mu17_TrkIsoVVL_Mu8_TrkIsoVVL_DZ_Mass8_v*",
],
ch_e: [
"HLT_Ele35_WPTight_Gsf_v*",
"HLT_Ele28_eta2p1_WPTight_Gsf_HT150_v*",
],
ch_mu: [
"HLT_IsoMu24_v*",
],
})
# special triggers per real dataset
cfg.set_aux("data_triggers", {})
# MET filters
# https://twiki.cern.ch/twiki/bin/view/CMS/MissingETOptionalFiltersRun2
cfg.set_aux("metFilters", {
"data": [
"Flag_goodVertices", "Flag_globalSuperTightHalo2016Filter", "Flag_HBHENoiseFilter",
"Flag_HBHENoiseIsoFilter", "Flag_EcalDeadCellTriggerPrimitiveFilter",
"Flag_BadPFMuonFilter", #"Flag_BadChargedCandidateFilter",
"Flag_eeBadScFilter", #"Flag_ecalBadCalibReducedMINIAODFilter",
],
"mc": [
"Flag_goodVertices", "Flag_globalSuperTightHalo2016Filter", "Flag_HBHENoiseFilter",
"Flag_HBHENoiseIsoFilter", "Flag_EcalDeadCellTriggerPrimitiveFilter",
"Flag_BadPFMuonFilter", #"Flag_BadChargedCandidateFilter",
#"Flag_ecalBadCalibReducedMINIAODFilter",
],
})
# JER
cfg.set_aux("jer_version", "Summer19UL18_JRV2")
# JES
cfg.set_aux("jes_version", {
"data": [
rr["A"] + ("Summer19UL18_RunA_V5_DATA",),
rr["B"] + ("Summer19UL18_RunB_V5_DATA",),
rr["C"] + ("Summer19UL18_RunC_V5_DATA",),
rr["D"] + ("Summer19UL18_RunD_V5_DATA",),
],
"mc": [
(1, int(1e9), "Summer19UL18_V5_MC"),
],
})
# JES veto maps
cfg.set_aux("jes_veto_map", {
"file": "Summer19UL18_V1/hotjets-UL18.root",
"hist_name": "h2hot_ul18_plus_hem1516_plus_hbp2m1",
})
cfg.set_aux("jes_uncertainty_file", {
"factorized": None, # take file from jes github
"reduced": "",
})
# https://github.com/cms-sw/cmssw/blob/master/SimGeneral/MixingModule/python/mix_2018_25ns_UltraLegacy_PoissonOOTPU_cfi.py
cfg.set_aux("pileup_mc", [
8.89374611122e-07, 1.1777062868e-05, 3.99725585118e-05, 0.000129888015252, 0.000265224848687,
0.000313088635109, 0.000353781668514, 0.000508787237162, 0.000873670065767, 0.00147166880932,
0.00228230649018, 0.00330375581273, 0.00466047608406, 0.00624959203029, 0.00810375867901,
0.010306521821, 0.0129512453978, 0.0160303925502, 0.0192913204592, 0.0223108613632,
0.0249798930986, 0.0273973789867, 0.0294402350483, 0.031029854302, 0.0324583524255,
0.0338264469857, 0.0351267479019, 0.0360320204259, 0.0367489568401, 0.0374133183052,
0.0380352633799, 0.0386200967002, 0.039124376968, 0.0394201612616, 0.0394673457109,
0.0391705388069, 0.0384758587461, 0.0372984548399, 0.0356497876549, 0.0334655175178,
0.030823567063, 0.0278340752408, 0.0246009685048, 0.0212676009273, 0.0180250593982,
0.0149129830776, 0.0120582333486, 0.00953400069415, 0.00738546929512, 0.00563442079939,
0.00422052915668, 0.00312446316347, 0.00228717533955, 0.00164064894334, 0.00118425084792,
0.000847785826565, 0.000603466454784, 0.000419347268964, 0.000291768785963, 0.000199761337863,
0.000136624574661, 9.46855200945e-05, 6.80243180179e-05, 4.94806013765e-05, 3.53122628249e-05,
2.556765786e-05, 1.75845711623e-05, 1.23828210848e-05, 9.31669724108e-06, 6.0713272037e-06,
3.95387384933e-06, 2.02760874107e-06, 1.22535149516e-06, 9.79612472109e-07, 7.61730246474e-07,
4.2748847738e-07, 2.41170461205e-07, 1.38701083552e-07, 3.37678010922e-08, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0
])
# https://twiki.cern.ch/twiki/bin/viewauth/CMS/PileupJSONFileforData#Pileup_JSON_Files_For_Run_II
cfg.set_aux("min_bias_xs", sn.Number(69.2, (sn.Number.REL, 0.046))) # mb
# file merging information (stage -> dataset -> files after merging)
cfg.set_aux("file_merging", {
"trees": {
"data_D_e": 2,
"data_A_mu": 2,
"data_D_mu": 3,
"tt_dl": 456,
"tt_sl": 491,
"dy_lep_LO_50ToInf": 30,
"st_s_lep": 14,
"st_t_t": 14,
"st_t_tbar": 7,
"st_tW_t": 34,
"st_tW_tbar": 31,
"WW": 3,
"WZ": 2,
"W_lep": 3
}
})
# versions
cfg.set_aux("versions", {
"WriteTrees": "prod2", # including SL events
"MergeTrees": "prod2",
"MergeMetaData": "prod2",
"WriteHistograms": "prod2",
"MergeHistograms": "prod2",
"MeasureCScaleFactors": "prod1",
"MeasureScaleFactors": "prod1",
"FitScaleFactors": "prod1",
"BundleScaleFactors": "prod1",
"GetScaleFactorWeights": "prod1",
"MergeScaleFactorWeights": "prod1",
"OptimizeBinning": "prod1",
"CreateScaleFactorResults": "prod1",
})
return cfg
| 37.331858 | 126 | 0.620244 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,763 | 0.446012 |
d3b01feaa36c37cf9d815ac17249c1a9664b696f | 8,557 | py | Python | TD3_based_DRL/TD3IARL.py | wujingda/Human-in-the-loop-Deep-Reinforcement-Learning-Hug-DRL- | d00667017d586fbfc6487bd6ac8dd5396acae0d1 | [
"MIT"
] | 8 | 2021-07-13T10:05:42.000Z | 2022-03-06T03:06:21.000Z | TD3_based_DRL/TD3IARL.py | wujingda/Human-in-the-loop-Deep-Reinforcement-Learning | d00667017d586fbfc6487bd6ac8dd5396acae0d1 | [
"MIT"
] | null | null | null | TD3_based_DRL/TD3IARL.py | wujingda/Human-in-the-loop-Deep-Reinforcement-Learning | d00667017d586fbfc6487bd6ac8dd5396acae0d1 | [
"MIT"
] | null | null | null | '''
This algorithm is a IA-RL implementation on off-policy TD3 algorithm, to check the original IA-RL algorithm
you can refer to https://arxiv.org/abs/1811.06187.
Since it is a baseline algorithm, the descriptions are mostly omitted, please visit the HUGTD3.py for more implementation details
'''
import pickle
import numpy as np
import torch
import torch.nn as nn
from TD3_based_DRL.priority_replay import Memory
from TD3_based_DRL.network_model import Actor,Critic
from TD3_based_DRL.util import hard_update, soft_update
seed = 2
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
MEMORY_CAPACITY = 38400
BATCH_SIZE = 128
GAMMA = 0.95
LR_C = 0.0005
LR_A = 0.0002
LR_I = 0.01
TAU = 0.001
POLICY_NOSIE = 0.2
POLICY_FREQ = 1
NOISE_CLIP = 0.5
class DRL:
def __init__(self, action_dim, state_dim, LR_C = LR_C, LR_A = LR_A):
self.device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
self.state_dim = state_dim[0] * state_dim[1]
self.state_dim_width = state_dim[0]
self.state_dim_height = state_dim[1]
self.action_dim = action_dim
self.batch_size = BATCH_SIZE
self.gamma = GAMMA
self.tau = TAU
self.policy_noise = POLICY_NOSIE
self.noise_clip = NOISE_CLIP
self.policy_freq = POLICY_FREQ
self.itera = 0
self.pointer = 0
self.memory = Memory(MEMORY_CAPACITY)
self.actor = Actor(self.state_dim,self.action_dim).to(self.device)
self.actor_target = Actor(self.state_dim,self.action_dim).to(self.device)
self.actor_optimizer = torch.optim.Adam(self.actor.parameters(),LR_A)
self.critic = Critic(self.state_dim,self.action_dim).to(self.device)
self.critic_target = Critic(self.state_dim,self.action_dim).to(self.device)
self.critic_optimizers = torch.optim.Adam(self.critic.parameters(),LR_C)
hard_update(self.actor_target,self.actor)
hard_update(self.critic_target,self.critic)
def learn(self, batch_size = BATCH_SIZE, epoch=0):
## batched state, batched action, batched action from expert, batched intervention signal, batched reward, batched next state
bs, ba, ba_e, bi, br, bs_, tree_idx, ISweight = self.retrive(batch_size)
bs = torch.tensor(bs, dtype=torch.float).reshape(batch_size, self.state_dim_height, self.state_dim_width).to(self.device)
ba = torch.tensor(ba, dtype=torch.float).to(self.device).to(self.device)
ba_e = torch.tensor(ba_e, dtype=torch.float).to(self.device).to(self.device)
br = torch.tensor(br, dtype=torch.float).to(self.device).to(self.device)
bs_ = torch.tensor(bs_, dtype=torch.float).reshape(batch_size, self.state_dim_height, self.state_dim_width).to(self.device)
# initialize the loss variables
loss_c, loss_a = 0, 0
## calculate the predicted values of the critic
with torch.no_grad():
noise1 = (torch.randn_like(ba) * self.policy_noise).clamp(0, 1)
a_ = (self.actor_target(bs_).detach() + noise1).clamp(0, 1)
target_q1, target_q2 = self.critic_target([bs_,a_])
target_q1 = target_q1.detach()
target_q2 = target_q2.detach()
target_q = torch.min(target_q1,target_q2)
y_expected = br + self.gamma * target_q
y_predicted1, y_predicted2 = self.critic.forward([bs,ba])
errors = y_expected - y_predicted1
## update the critic
critic_loss = nn.MSELoss()
loss_critic = critic_loss(y_predicted1,y_expected)+critic_loss(y_predicted2,y_expected)
self.critic_optimizers.zero_grad()
loss_critic.backward()
self.critic_optimizers.step()
## update the actor
if self.itera % self.policy_freq == 0:
index1,_ = np.where(bi==0)
index2,_ = np.where(bi==1)
bs1,_,_,_=bs[index1],ba[index1],br[index1],bs_[index1]
bs2,ba2,_,_=bs[index2],ba[index2],br[index2],bs_[index2]
if bs2.size(0) != 0:
if bs1.size(0) != 0:
bs1 = torch.reshape(bs1,(len(bs1), self.state_dim_height, self.state_dim_width))
bs2 = torch.reshape(bs2,(len(bs2), self.state_dim_height, self.state_dim_width))
pred_a1 = self.actor.forward(bs1)
pred_a2 = self.actor.forward(bs2)
loss_actor1 = (-self.critic.forward([bs1,pred_a1])[0])
## fixed weight for human guidance actions
loss_actor2 = 3 * ((pred_a2 - ba2)**2)
loss_actor = torch.cat((loss_actor1,loss_actor2),0).mean()
else:
pred_a = self.actor.forward(bs)
loss_actor = 3*((pred_a - ba)**2)
loss_actor = loss_actor.mean()
else:
pred_a = self.actor.forward(bs)
loss_actor = (-self.critic.forward([bs,pred_a])[0]).mean()
self.actor_optimizer.zero_grad()
loss_actor.backward()
self.actor_optimizer.step()
soft_update(self.actor_target,self.actor,self.tau)
soft_update(self.critic_target,self.critic,self.tau)
loss_a = loss_actor.mean().item()
loss_c = loss_critic.mean().item()
self.itera += 1
self.memory.batch_update(tree_idx, abs(errors.detach().cpu().numpy()) )
return loss_c, loss_a
def choose_action(self,state):
state = torch.tensor(state,dtype=torch.float).reshape(self.state_dim_height, self.state_dim_width).to(self.device)
state = state.unsqueeze(0)
action = self.actor.forward(state).detach()
action = action.squeeze(0).cpu().numpy()
action = np.clip(action,-1, 1)
return action
def store_transition(self, s, a, a_e, i, r, s_):
transition = np.hstack((s, a, a_e, i, r, s_))
self.memory.store(transition)
self.pointer += 1
def retrive(self, batch_size):
tree_index, bt, ISWeight = self.memory.sample(batch_size)
bs = bt[:, :self.state_dim]
ba = bt[:, self.state_dim: self.state_dim + self.action_dim]
ba_e = bt[:, self.state_dim + self.action_dim: self.state_dim + self.action_dim + self.action_dim]
bi = bt[:, -self.state_dim - 2: -self.state_dim - 1]
br = bt[:, -self.state_dim - 1: -self.state_dim]
bs_ = bt[:, -self.state_dim:]
return bs, ba, ba_e, bi, br, bs_, tree_index, ISWeight
def memory_save(self):
per = open("memory_IARL.pkl", 'wb')
str = pickle.dumps(self.memory)
per.write(str)
per.close()
def memory_load(self):
with open("memory_IARL.pkl",'rb') as file:
self.memory = pickle.loads(file.read())
def load_model(self, output):
if output is None: return
self.actor.load_state_dict(torch.load('{}/actor.pkl'.format(output)))
self.critic.load_state_dict(torch.load('{}/critic.pkl'.format(output)))
def save_model(self, output):
torch.save(self.actor.state_dict(), '{}/actor.pkl'.format(output))
torch.save(self.critic.state_dict(), '{}/critic.pkl'.format(output))
def save(self, log_dir, epoch):
state = {'actor':self.actor.state_dict(), 'actor_target':self.actor_target.state_dict(),
'actor_optimizer':self.actor_optimizer.state_dict(),
'critic':self.critic.state_dict(), 'critic_target':self.critic_target.state_dict(),
'critic_optimizers':self.critic_optimizers.state_dict(),
'epoch':epoch}
torch.save(state, log_dir)
def load(self, log_dir):
checkpoint = torch.load(log_dir)
self.actor.load_state_dict(checkpoint['actor'])
self.actor_target.load_state_dict(checkpoint['actor_target'])
self.actor_optimizer.load_state_dict(checkpoint['actor_optimizer'])
self.critic.load_state_dict(checkpoint['critic'])
self.critic_target.load_state_dict(checkpoint['critic_target'])
self.critic_optimizers.load_state_dict(checkpoint['critic_optimizers'])
| 38.545045 | 133 | 0.621479 | 7,618 | 0.890265 | 0 | 0 | 0 | 0 | 0 | 0 | 858 | 0.100269 |
6c8f9d33f676329666350cb8e333eb9373e829db | 4,477 | py | Python | qihu.py | Yumore/getapks | b79744152629bed58f01ea39bb409e55b8b6d9ee | [
"MIT"
] | 2 | 2021-07-03T22:59:02.000Z | 2021-07-05T12:15:01.000Z | qihu.py | Yumore/getapks | b79744152629bed58f01ea39bb409e55b8b6d9ee | [
"MIT"
] | null | null | null | qihu.py | Yumore/getapks | b79744152629bed58f01ea39bb409e55b8b6d9ee | [
"MIT"
] | null | null | null | #!/usr/bin/evn python
# jojo_xia
import os
import re
import socket
from urllib import request
from urllib.error import ContentTooShortError
import apk_info
import data_utils
import file_utils
socket.setdefaulttimeout(30)
class qihu:
def __init__(self):
self.url_list = []
self.apk_list = []
self.index = 0
self.download_path = data_utils.parse_cfg('download', 'path', '../apks')
self.baseurl = 'http://zhushou.360.cn/list/index/cid/1?page='
def get_url(self, page):
for i in range(1, page + 1):
self.url_list.append(self.baseurl + str(i))
def get_app(self):
print('download root dir is : %s' % self.download_path)
if not os.path.exists(self.download_path):
os.makedirs(self.download_path)
for index in range(len(self.url_list)):
self.index = 0
self.apk_list = []
response = request.urlopen(self.url_list[index], timeout=15)
html = response.read()
html = html.decode('utf-8')
# print('url list is : ', re.findall(r"(?<=&url=).*?apk", html))
link_list = re.findall(r"(?<=&url=).*?%26v%3D%26f%3Dz.apk", html)
patten = re.compile(r'thirdlink&name=(.*?)&icon=')
app_name_list = patten.findall(html)
print("当前分类: %d, 本页共计%d个app,将依次进行下载,详情如下:" % (index, len(app_name_list)), app_name_list)
for url in link_list:
try:
app_name = '{0}.apk'.format(app_name_list[self.index])
if " " in app_name:
print("app name constrains ")
app_name = app_name.replace(" ", '')
# http://s.shouji.qihucdn.com/210615/88e3d6ad97f17836fc2be9c7f10f8ee8/com.doumi.jianzhi_134.apk
# ?en=curpage%3D%26exp%3D1626254620%26from%3DAppList_json%26m2%3D%26ts%3D1625649820%26tok%3Dbb89589c22a22c76bc917767b8083660%26v%3D%26f%3Dz.apk
file_path = os.path.join(self.download_path, app_name)
if not os.path.isfile(file_path):
count = 1
while count <= 5:
try:
print('\rtry to download %s with %d times' % (file_path, count))
self.real_down(url=url, file_path=file_path)
break
except socket.timeout:
error_info = 'Reloading for %d time' % count if count == 1 else 'Reloading for %d times' % count
print("\rerror info : %s" % error_info)
count += 1
except ContentTooShortError:
print('Network conditions is not good. Reloading...')
self.real_down(url=url, file_path=file_path)
if count > 5:
print('\ndownload failed!')
else:
print('\nfile already exists! file path is : %s' % file_path)
md5_file = '{0}.md5'.format(file_path)
if not os.path.isfile(md5_file):
os.remove(file_path)
else:
file_out = open(md5_file, "r")
if file_out.read() == file_utils.gen_file_md5(file_path):
apk_info.get_apk_info(file_path)
else:
os.remove(file_path)
os.remove(md5_file)
self.index = self.index + 1
except Exception as e:
print('\rexception >> %s --> %s' % (url, str(e)))
def real_down(self, url, file_path):
def reporthook(block_num, block_size, block_total):
print('\rdownload progress: %.2f%%' % (block_num * block_size * 100.0 / block_total), end="")
request.urlretrieve(url, file_path, reporthook=reporthook)
apk_info.get_apk_info(file_path)
request.urlcleanup()
file_size = os.path.getsize(file_path)
print('\rdownload finished, file size : %.2f MB' % (file_size / 1024 / 1024))
file_utils.gen_file_md5(file_path)
# time.sleep(3)
def start(self):
self.get_url(50)
self.get_app()
| 44.326733 | 163 | 0.51463 | 4,298 | 0.950044 | 0 | 0 | 0 | 0 | 0 | 0 | 961 | 0.212423 |
6c90866da3558fa85354b9fb39f92f13564b2e73 | 469 | py | Python | src/py_call.py | Lucien-MG/htools | c7cce486a101b182b03f0ac69e168b767a5c8d16 | [
"MIT"
] | null | null | null | src/py_call.py | Lucien-MG/htools | c7cce486a101b182b03f0ac69e168b767a5c8d16 | [
"MIT"
] | null | null | null | src/py_call.py | Lucien-MG/htools | c7cce486a101b182b03f0ac69e168b767a5c8d16 | [
"MIT"
] | null | null | null | #/bin/python3
import os
import subprocess
# Const:
with open("config",'r') as conf:
VENV_A = conf.read()
PYTHON="python"
PYTHON3_VENV_A = os.path.join(VENV_A, "bin", "python3")
PIP=""
PIP_VENV_A= os.path.join(VENV_A, "bin", "pip3")
# Functions:
def python_call(argv):
subprocess.call([PYTHON, argv])
def python_vcall(argv):
subprocess.check_output([PYTHON3_VENV_A, argv])
def pip_vinstall(argv):
subprocess.check_output([PIP_VENV_A, argv])
| 17.37037 | 55 | 0.697228 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 79 | 0.168443 |
6c90ae6d334508870ec397a68e356d8b100c2c9c | 2,900 | py | Python | dramkit/_tmp/utils_lottery.py | Genlovy-Hoo/dramkit | fa3d2f35ebe9effea88a19e49d876b43d3c5c4c7 | [
"MIT"
] | null | null | null | dramkit/_tmp/utils_lottery.py | Genlovy-Hoo/dramkit | fa3d2f35ebe9effea88a19e49d876b43d3c5c4c7 | [
"MIT"
] | null | null | null | dramkit/_tmp/utils_lottery.py | Genlovy-Hoo/dramkit | fa3d2f35ebe9effea88a19e49d876b43d3c5c4c7 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import os
import time
import numpy as np
import pandas as pd
def get_1bet_nums(n, Pmax, Pmin=1, **kwargs):
'''
随机生成一注号码
Parameters
----------
n: 一注号码的个数
Pmax: 备选号码最大值
Pmin: 备选号码最小值
Returns
-------
bet_nums: 随机选中的一注号码列表
'''
# 无放随机回抽样
bet_nums = np.random.choice(range(Pmin, Pmax+1), size=n, replace=False)
return bet_nums
def play_Nrounds_6mark_half2(n_bet=5, N=10000, multer=1):
'''
六合彩后面部分模拟玩N次
n_bet: 每次下注的号码个数
N: 模拟次数
multer: 倍数
'''
gain_total = 0
for k in range(0, N):
cost = n_bet * multer # 成本
get_back = 0
bet_nums = get_1bet_nums(n_bet, 49) # 投注号码
hit_num = get_1bet_nums(1, 49)[0] # 中奖号码
if hit_num in bet_nums:
get_back = 46 * multer
gain = get_back - cost
gain_total += gain
return gain_total / (n_bet * multer * N)
if __name__ == '__main__':
from dramkit import plot_series
strt_tm = time.time()
N = 2000000
multer = 1
if not os.path.exists('gains{}.csv'.format(N)):
gains = []
for n_bet in range(1, 50):
print(n_bet, end=' ')
gain_rate = play_Nrounds_6mark_half2(n_bet, N, multer)
gains.append([n_bet, gain_rate])
max_gain = max(gains, key=lambda x: x[1])
print('\n')
print('best n_bet: {}'.format(max_gain[0]))
print('best gain_rate: {}'.format(max_gain[1]))
gains_pd = pd.DataFrame(gains, columns=['n_bet', 'gain_rate'])
gains_pd['rank'] = gains_pd['gain_rate'].rank(ascending=False)
else:
gains_pd = pd.read_csv('gains{}.csv'.format(N))
gains_pd['rank'] = gains_pd['gain_rate'].rank(ascending=False)
gains = gains_pd[['n_bet', 'gain_rate']].values.tolist()
max_gain = max(gains, key=lambda x: x[1])
print('\n')
print('best n_bet: {}'.format(max_gain[0]))
print('best gain_rate: {}'.format(max_gain[1]))
gains_pd.set_index('n_bet', inplace=True)
gains_pd.to_csv('gains{}.csv'.format(N))
plot_series(gains_pd, {'gain_rate': ('.-b', False)},
cols_to_label_info={'gain_rate':
[['rank', (1, 2, 3), ('r*', 'm*', 'y*'),
('最大赢率', '次大赢率', '第三大赢率')]]},
yparls_info_up=[(gains_pd[gains_pd['rank'] == 1].index[0], 'r', '-', 1.0),
(gains_pd[gains_pd['rank'] == 2].index[0], 'm', '-', 1.0),
(gains_pd[gains_pd['rank'] == 3].index[0], 'y', '-', 1.0)],
xparls_info={'gain_rate': [(gains[-1][1], 'k', '-', 1.0)]},
xlabels=['n_bet'], ylabels=['gain_rate'],
n_xticks=49, markersize=15, grids=False, figsize=(11, 7))
print('used time: {}s.'.format(round(time.time()-strt_tm, 6)))
| 29.292929 | 91 | 0.53069 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 834 | 0.269728 |
6c921bf76ccad8c233e5e7ae6b187fed9a3a6457 | 1,500 | py | Python | src/negotiating_agent/venv/lib/python3.8/site-packages/geniusweb/bidspace/AllBidsList.py | HahaBill/CollaborativeAI | f771cd2f34774c74c58e49a7e983d6244ea35eff | [
"MIT"
] | 1 | 2022-02-17T19:14:46.000Z | 2022-02-17T19:14:46.000Z | src/negotiating_agent/venv/lib/python3.8/site-packages/geniusweb/bidspace/AllBidsList.py | HahaBill/CollaborativeAI | f771cd2f34774c74c58e49a7e983d6244ea35eff | [
"MIT"
] | null | null | null | src/negotiating_agent/venv/lib/python3.8/site-packages/geniusweb/bidspace/AllBidsList.py | HahaBill/CollaborativeAI | f771cd2f34774c74c58e49a7e983d6244ea35eff | [
"MIT"
] | null | null | null | from typing import Dict, List
from tudelft.utilities.immutablelist.AbstractImmutableList import AbstractImmutableList
from tudelft.utilities.immutablelist.ImmutableList import ImmutableList
from tudelft.utilities.immutablelist.Outer import Outer
from geniusweb.issuevalue.Bid import Bid
from geniusweb.issuevalue.Domain import Domain
from geniusweb.issuevalue.Value import Value
class AllBidsList (AbstractImmutableList[Bid]):
'''
A list containing all complete bids in the space. This is an
{@link ImmutableList} so it can contain all bids without pre-computing them.
'''
def __init__(self, domain:Domain):
'''
This object contains s list containing all bids in the space. This is an
ImmutableList so it can contain all bids without pre-computing them.
@param domain the {@link Domain}
'''
if domain == None:
raise ValueError("domain=null");
self._issues = list(domain.getIssues())
values:List[ImmutableList[Value]] = [domain.getValues(issue) for issue in self._issues]
self._allValuePermutations = Outer[Value](values)
def get(self, index:int) -> Bid:
nextValues = self._allValuePermutations.get(index);
issueValues:Dict[str, Value] = {}
for n in range(len(self._issues)):
issueValues[self._issues[n]] = nextValues.get(n)
return Bid(issueValues);
def size(self) ->int:
return self._allValuePermutations.size();
| 34.090909 | 95 | 0.696 | 1,115 | 0.743333 | 0 | 0 | 0 | 0 | 0 | 0 | 393 | 0.262 |
6c9355764a56246685f8f050d7a2762e9242d568 | 18,061 | py | Python | broker/jobs/tests/handover.py | yiu31802/ave | e46fc357f9464d5beaf42568a74bb95e6b1b8037 | [
"BSD-3-Clause"
] | 17 | 2016-11-16T08:09:49.000Z | 2021-08-12T06:38:09.000Z | broker/jobs/tests/handover.py | yiu31802/ave | e46fc357f9464d5beaf42568a74bb95e6b1b8037 | [
"BSD-3-Clause"
] | null | null | null | broker/jobs/tests/handover.py | yiu31802/ave | e46fc357f9464d5beaf42568a74bb95e6b1b8037 | [
"BSD-3-Clause"
] | 12 | 2016-11-20T15:34:03.000Z | 2020-08-04T00:26:11.000Z | # Copyright (C) 2013 Sony Mobile Communications AB.
# All rights, including trade secret rights, reserved.
import json
import time
import traceback
from ave.network.process import Process
from ave.network.exceptions import *
from ave.broker._broker import validate_serialized, RemoteBroker, Broker
from ave.broker.session import RemoteSession
from ave.broker.exceptions import *
import setup
# check that a broker with trivial allocations can have its state serialized
@setup.brokers([],'master',[],False,False)
def t1(HOME, master):
pretty = '%s t1' % __file__
print(pretty)
try:
s = master.serialize()
except Exception, e:
print('FAIL %s: trivial serialization failed: %s' % (pretty, str(e)))
return False
try:
validate_serialized(s)
except Exception, e:
print('FAIL %s: could not validate adoption: %s' % (pretty, str(e)))
return False
return True
# like t1 but with some allocations
@setup.brokers([],'master',[],False,False)
def t2(HOME, master):
pretty = '%s t2' % __file__
print(pretty)
c1 = RemoteBroker(master.address, authkey=master.authkey, home=HOME.path)
c1.get_resources({'type':'handset'}, {'type':'workspace'})
c2 = RemoteBroker(master.address, authkey=master.authkey, home=HOME.path)
c2.get_resources({'type':'handset'}, {'type':'relay'})
try:
s = master.serialize()
except Exception, e:
print('FAIL %s: trivial serialization failed: %s' % (pretty, str(e)))
return False
try:
validate_serialized(s)
except Exception, e:
print('FAIL %s: could not validate adoption: %s' % (pretty, str(e)))
return False
return True
# trivial handover between two brokers: no allocations. more or less just check
# that the takeover can be started on the same port as the handover and that
# configuration data is the same
@setup.factory()
def t3(factory):
pretty = '%s t3' % __file__
print(pretty)
handover = factory.make_master('master')
adoption,config,fdtx_path = handover.begin_handover() # stops listening
try:
takeover = factory.make_takeover('master', adoption, config, fdtx_path)
except Exception, e:
print('FAIL %s: could not start takeover: %s' % (pretty, str(e)))
return False
try:
handover.end_handover(1)
except ConnectionClosed:
pass
except Exception, e:
print('FAIL %s: unexpected error: %s' % (pretty, str(e)))
return False
# compare the config and serialization of the two
c = takeover.get_config()
if c != config:
print('FAIL %s: configuration mismatch: %s != %s' % (pretty, c, config))
return False
return True
# make a few allocations, then handover. check that both brokers show the same
# availability of equipment
@setup.factory()
def t4(factory):
pretty = '%s t4' % __file__
print(pretty)
handover = factory.make_master('master')
avail_1 = handover.list_available()
# make some allocations
c1 = RemoteBroker(handover.address, home=factory.HOME.path)
h1,w1 = c1.get_resources({'type':'handset'}, {'type':'workspace'})
avail_2 = handover.list_available()
c2 = RemoteBroker(handover.address, home=factory.HOME.path)
h2,r2 = c2.get_resources({'type':'handset'}, {'type':'relay'})
avail_3 = handover.list_available()
# hand over
adoption,config,fdtx_path = handover.begin_handover()
takeover = factory.make_takeover('master', adoption, config, fdtx_path)
handover.end_handover(1)
# check that availability is correct. stop the sessions started against the
# handover and check that the resources become availabe in the takeover
result = takeover.list_available()
if len(result) != len(avail_3):
print('FAIL %s: wrong avail 3: %s != %s' % (pretty, result, avail_3))
return False
ok = False
del(c2)
for i in range(10): # allow some time for brokers to detect session death
result = takeover.list_available()
if len(result) == len(avail_2):
ok = True
break
time.sleep(0.3)
if not ok:
print('FAIL %s: wrong avail 2: %s != %s' % (pretty, result, avail_2))
return False
ok = False
del(c1)
for i in range(10): # allow some time for brokers to detect session death
result = takeover.list_available()
if len(result) == len(avail_1):
ok = True
break
time.sleep(0.3)
if not ok:
print('FAIL %s: wrong avail 1: %s != %s' % (pretty, result, avail_2))
return False
return True
# kill off one of the original sessions during the handover and check that the
# associated resources become available in the takeover
@setup.factory()
def t5(factory):
pretty = '%s t5' % __file__
print(pretty)
handover = factory.make_master('master')
avail_1 = handover.list_available()
# make some allocations
c1 = RemoteBroker(handover.address, home=factory.HOME.path)
h1,w1 = c1.get_resources({'type':'handset'}, {'type':'workspace'})
avail_2 = handover.list_available()
c2 = RemoteBroker(handover.address, home=factory.HOME.path)
h2,r2 = c2.get_resources({'type':'handset'}, {'type':'relay'})
avail_3 = handover.list_available()
adoption,config,fdtx_path = handover.begin_handover()
session = RemoteSession(h2.address, h2.authkey)
try:
session.crash() # kill the second session during the handover
except ConnectionClosed:
pass
takeover = factory.make_takeover('master', adoption, config, fdtx_path)
handover.end_handover(1)
result = takeover.list_available()
if len(result) != len(avail_2):
print('FAIL %s: wrong avail: %s != %s' % (pretty, result, avail_2))
return False
return True
# make sure one of the sessions is super busy during the handover so that it
# cannot engage in communication with the takeover during session adoption
@setup.factory()
def t6(factory):
pretty = '%s t6' % __file__
print(pretty)
handover = factory.make_master('master')
avail = handover.list_available()
def oob_client(address):
r = RemoteBroker(address, home=factory.HOME.path)
h,w = r.get_resources({'type':'handset'}, {'type':'workspace'})
w.run('sleep 3') # right, extremely busy, but it prevents other action
while True:
time.sleep(1) # don't let client die and loose all resources
p = Process(target=oob_client, args=(handover.address,))
p.start()
# make sure the oob client has gotten its resources
ok = False
for i in range(10):
if len(handover.list_available()) != len(avail):
ok = True
break
time.sleep(0.3)
if not ok:
print('FAIL %s: catastrophic' % pretty)
adoption,config,fdtx_path = handover.begin_handover()
takeover = factory.make_takeover('master', adoption, config, fdtx_path)
handover.end_handover(1)
result = True
if len(takeover.list_available()) == len(avail):
print('FAIL %s: wrong avail: %s' % (pretty, avail))
result = False
p.terminate()
p.join()
return result
# check that resources of super busy sessions are reclaimed when the session
# finally dies
@setup.factory()
def t7(factory):
pretty = '%s t7' % __file__
print(pretty)
handover = factory.make_master('master')
avail = handover.list_available()
def oob_client(address):
r = RemoteBroker(address, home=factory.HOME.path)
h,w = r.get_resources({'type':'handset'}, {'type':'workspace'})
w.run('sleep 2') # right, extremely busy, but it prevents other action
p = Process(target=oob_client, args=(handover.address,))
p.start()
# make sure the oob client has gotten its resources
ok = False
for i in range(10):
if len(handover.list_available()) != len(avail):
ok = True
break
time.sleep(0.1)
if not ok:
print('FAIL %s: catastrophic' % pretty)
p.terminate()
p.join()
return False
adoption,config,fdtx_path = handover.begin_handover()
takeover = factory.make_takeover('master', adoption, config, fdtx_path)
handover.end_handover(1)
# now wait for the client to die, so that its session dies, so that
# the takeover detects this, so that the associated resouces can be reclaimed,
# so that the takeover's availability is the same as when we started
ok = False
for i in range(10):
if len(takeover.list_available()) == len(avail):
ok = True
break
time.sleep(0.3)
if not ok:
print('FAIL %s: super busy session not tracked correctly' % pretty)
p.terminate()
p.join()
return ok
# check that sessions survive multiple broker restarts
@setup.factory()
def t8(factory):
pretty = '%s t8' % __file__
print(pretty)
original = factory.make_master('master')
avail = original.list_available()
def oob_client(address):
r = RemoteBroker(address, home=factory.HOME.path)
h,w = r.get_resources({'type':'handset'}, {'type':'workspace'})
while True:
time.sleep(1)
p = Process(target=oob_client, args=(original.address,))
p.start()
# make sure the oob client has gotten its resources
ok = False
for i in range(10):
if len(original.list_available()) != len(avail):
ok = True
break
time.sleep(0.1)
if not ok:
print('FAIL %s: catastrophic' % pretty)
p.terminate()
p.join()
return False
# do two handovers in a row
adoption,config,fdtx_path = original.begin_handover()
interim = factory.make_takeover('master', adoption, config, fdtx_path)
original.end_handover(1)
adoption,config,fdtx_path = interim.begin_handover()
final = factory.make_takeover('master', adoption, config, fdtx_path)
interim.end_handover(1)
# check that all brokers have the same availability
a1 = original.list_available()
a2 = interim.list_available()
a3 = final.list_available()
if len(a1) != len(a2) != len(a3):
print(
'FAIL %s: a handover failed somewhere: %s != %s != %s'
% (pretty, a1, a2, a3)
)
p.terminate()
p.join()
return False
# kill the client so that the brokers reclaim the equipment
p.terminate()
p.join()
ok = False
for i in range(10):
a3 = final.list_available()
if len(a3) == len(avail):
ok = True
break
if not ok:
print(
'FAIL %s: wrong availability: %d %d %d %d'
% (pretty, len(a1), len(a2), len(a3), len(avail))
)
return False
# check that the original and interim brokers have terminated now that they
# don't have any sessions with allocations
try:
original.ping() # ping
except Exit, e:
pass # good
except Exception, e:
print('FAIL %s: wrong exception: %s' % (pretty, e))
return False
try:
interim.ping() # ping
except Exit, e:
pass # good
except Exception, e:
print('FAIL %s: wrong exception: %s' % (pretty, e))
return False
return True
# check that clients still attached to the handover get Restarting exceptions
# when they try to allocate after the handover has been done. this *can* be
# fixed so that clients migrate automatically, but it is difficult and I would
# prefer to not implement it unless there a strong case can be made for it
@setup.factory()
def t9(factory):
pretty = '%s t9' % __file__
print(pretty)
handover = factory.make_master('master')
client = RemoteBroker(handover.address, home=factory.HOME.path)
# make first allocation
h,w = client.get_resources({'type':'handset'}, {'type':'workspace'})
# hand over
adoption,config,fdtx_path = handover.begin_handover()
takeover = factory.make_takeover('master', adoption, config, fdtx_path)
handover.end_handover(1)
# make seconc allocation
try:
client.get({'type':'handset'})
print('FAIL %s: second allocation did not fail' % pretty)
return False
except Restarting:
pass # good
except Exception, e:
print('FAIL %s: wrong exception: %s' % (pretty, e))
return False
return True
# check that a restarted share shows up again in its master
@setup.factory()
def t10(factory):
pretty = '%s t10' % __file__
print(pretty)
master = factory.make_master('master')
share = factory.make_share(master, 'share')
share.start_sharing()
time.sleep(1)
client = RemoteBroker(address=master.address, home=factory.HOME.path)
h = client.get_resources({'type':'handset', 'serial':'share-1'})
a1 = master.list_available()
# restart the share
adoption,config,fdtx_path = share.begin_handover()
takeover = factory.make_takeover('share', adoption, config, fdtx_path)
a2 = master.list_available()
if len(a1) == len(a2):
print('FAIL %s: shared resources still visible: %s' % (pretty, a2))
return False
# finish the handover so that takeover can start accepting RPC's. then
# check that the master sees all equipment except the one allocated
share.end_handover(1)
ok = False
for i in range(10):
a3 = master.list_available()
if len(a3) == len(a1):
ok = True
break
time.sleep(0.3)
if not ok:
print('FAIL %s: wrong availability: %s' % (pretty, a3))
return False
for profile in a3:
if 'serial' in profile and profile['serial'] == 'share-1':
print('FAIL %s: busy equipment shared' % pretty)
return False
# finally check that the resource can still be manipulated
try:
p = h.get_profile()
if p['serial'] != 'share-1':
print('FAIL %s: wrong profile: %s' % (pretty, p))
return False
except Exception, e:
print('FAIL %s: unexpected error: %s' % (pretty, e))
return False
return True
# check that shares reconnect to a restarted master
@setup.factory()
def t11(factory):
pretty = '%s t11' % __file__
print(pretty)
master = factory.make_master('master')
share = factory.make_share(master, 'share')
share.start_sharing()
time.sleep(1)
client = RemoteBroker(address=master.address, home=factory.HOME.path)
h1 = client.get_resources({'type':'handset', 'serial':'share-1'})
h2 = client.get_resources({'type':'handset', 'serial':'master-1'})
a1 = master.list_available()
# restart the master
adoption,config,fdtx_path = master.begin_handover()
takeover = factory.make_takeover('master', adoption, config, fdtx_path)
master.end_handover(1)
# connect to the new master and check the availability again
master = RemoteBroker(address=master.address, home=factory.HOME.path)
ok = False
for i in range(10):
a2 = master.list_available()
if len(a2) == len(a1):
ok = True
break
time.sleep(0.3)
if not ok:
print('FAIL %s: wrong availability: %s' % (pretty, a2))
return False
for profile in a2:
if 'serial' in profile and profile['serial'] == 'share-1':
print('FAIL %s: busy equipment shared' % pretty)
return False
return True
# check that .end_handover() doesn't time out even if the takeover did not get
# any sessions to adopt. regression test
@setup.factory()
def t12(factory):
pretty = '%s t12' % __file__
print(pretty)
master = factory.make_master('master')
adoption,config,fdtx_path = master.begin_handover()
takeover = factory.make_takeover('master', adoption, config, fdtx_path)
try:
master.end_handover(1)
except ConnectionClosed:
pass
except Exception, e:
print('FAIL %s: unexpected error: %s' % (pretty, e))
return False
return True
# check that the handover exits when the last session terminates
@setup.factory()
def t13(factory):
pretty = '%s t13' % __file__
print(pretty)
handover = factory.make_master('master')
# make some sessions
c1 = RemoteBroker(handover.address, home=factory.HOME.path)
h1,w1 = c1.get_resources({'type':'handset'}, {'type':'workspace'})
avail_2 = handover.list_available()
c2 = RemoteBroker(handover.address, home=factory.HOME.path)
h2,r2 = c2.get_resources({'type':'handset'}, {'type':'relay'})
avail_3 = handover.list_available()
adoption,config,fdtx_path = handover.begin_handover()
takeover = factory.make_takeover('master', adoption, config, fdtx_path)
handover.end_handover(1)
# crash the sessions
session = RemoteSession(h1.address, h1.authkey)
try:
session.crash()
except ConnectionClosed:
pass
session = RemoteSession(h2.address, h2.authkey)
try:
session.crash()
except ConnectionClosed:
pass
for i in range(10): # wait until only one session remains, then close it
authkeys = handover.get_session_authkeys()
if len(authkeys) == 1:
break
time.sleep(0.3)
# check that the handover sends its exit message when the last session is
# closed
try:
handover.close_session(authkeys[0])
except Exit, e:
if str(e) != 'broker restarted. please reconnect':
print('FAIL %s: wrong exit message: %s' % (pretty, str(e)))
return False
except Exception, e:
print('FAIL %s: wrong exception: %s' % (pretty, e))
return False
try:
handover.ping() # ping
except ConnectionClosed:
pass # good
except Exception, e:
print('FAIL %s: wrong exception: %s' % (pretty, e))
return False
return True
| 31.410435 | 82 | 0.634406 | 0 | 0 | 0 | 0 | 16,179 | 0.895798 | 0 | 0 | 5,347 | 0.296052 |
6c961f209eec16468ab70082c8a348fbaef551da | 3,250 | py | Python | app/v1/core/auth/__init__.py | speduardo/flask-boilerplate | d50d8d0f15a08c4905a2029b0ae9637489624c9a | [
"MIT"
] | 1 | 2020-05-26T01:53:58.000Z | 2020-05-26T01:53:58.000Z | app/v1/core/auth/__init__.py | speduardo/flask-boilerplate | d50d8d0f15a08c4905a2029b0ae9637489624c9a | [
"MIT"
] | null | null | null | app/v1/core/auth/__init__.py | speduardo/flask-boilerplate | d50d8d0f15a08c4905a2029b0ae9637489624c9a | [
"MIT"
] | null | null | null | from flask import jsonify
from flask_jwt_extended import JWTManager
from app.v1.messages import MSG_INVALID_CREDENTIALS, MSG_TOKEN_EXPIRED
from app.v1.modules.users.daos import UserDAO
jwt = JWTManager()
def init_app(app, **kwargs):
# pylint: disable=unused-argument
"""
Auth extension initialization point.
"""
# Add jwt handler
return JWTManager(app)
@jwt.user_claims_loader
def add_claims_to_access_token(identity):
user = UserDAO.objects.get(email=identity)
# Podemos extender as informações do usuaŕio adicionando
# novos campos: active, roles, full_name e etc...
if user:
return {
'active': user.active
}
@jwt.expired_token_loader
def my_expired_token_callback():
resp = jsonify({
'status': 401,
'sub_status': 42,
'message': MSG_TOKEN_EXPIRED
})
resp.status_code = 401
return resp
@jwt.unauthorized_loader
def my_unauthorized_callback(e):
resp = jsonify({
'status': 401,
'sub_status': 1,
'description': e,
'message': MSG_INVALID_CREDENTIALS
})
resp.status_code = 401
return resp
@jwt.claims_verification_loader
def my_claims_verification_callback(e):
resp = jsonify({
'status': 401,
'sub_status': 2,
'description': e,
'message': MSG_INVALID_CREDENTIALS
})
resp.status_code = 401
return resp
@jwt.invalid_token_loader
def my_invalid_token_loader_callback(e):
resp = jsonify({
'status': 401,
'sub_status': 3,
'description': e,
'message': MSG_INVALID_CREDENTIALS
})
resp.status_code = 401
return resp
@jwt.needs_fresh_token_loader
def my_needs_fresh_token_callback(e):
resp = jsonify({
'status': 401,
'sub_status': 4,
'description': e,
'message': MSG_INVALID_CREDENTIALS
})
resp.status_code = 401
return resp
@jwt.revoked_token_loader
def my_revoked_token_callback(e):
resp = jsonify({
'status': 401,
'sub_status': 5,
'description': e,
'message': MSG_INVALID_CREDENTIALS
})
resp.status_code = 401
return resp
@jwt.user_loader_callback_loader
def my_user_loader_callback(e):
resp = jsonify({
'status': 401,
'sub_status': 6,
'description': e,
'message': MSG_INVALID_CREDENTIALS
})
resp.status_code = 401
return resp
@jwt.user_loader_error_loader
def my_user_loader_error_callback(e):
resp = jsonify({
'status': 401,
'sub_status': 7,
'description': e,
'message': MSG_INVALID_CREDENTIALS
})
resp.status_code = 401
return resp
@jwt.token_in_blacklist_loader
def my_token_in_blacklist_callback(e):
resp = jsonify({
'status': 401,
'sub_status': 8,
'description': e,
'message': MSG_INVALID_CREDENTIALS
})
resp.status_code = 401
return resp
@jwt.claims_verification_failed_loader
def my_claims_verification_failed_callback(e):
resp = jsonify({
'status': 401,
'sub_status': 9,
'description': e,
'message': MSG_INVALID_CREDENTIALS
})
resp.status_code = 401
return resp
| 19.117647 | 70 | 0.638154 | 0 | 0 | 0 | 0 | 2,838 | 0.872425 | 0 | 0 | 625 | 0.19213 |
6c97c8909880bfcfc2e504aa9472d6979242b5cd | 3,845 | py | Python | plogical/CyberCPLogFileWriter.py | uzairAK/serverom-panel | 3dcde05ad618e6bef280db7d3180f926fe2ab1db | [
"MIT"
] | null | null | null | plogical/CyberCPLogFileWriter.py | uzairAK/serverom-panel | 3dcde05ad618e6bef280db7d3180f926fe2ab1db | [
"MIT"
] | null | null | null | plogical/CyberCPLogFileWriter.py | uzairAK/serverom-panel | 3dcde05ad618e6bef280db7d3180f926fe2ab1db | [
"MIT"
] | null | null | null | import subprocess
import time
import socket
import os
import smtplib
class CyberCPLogFileWriter:
fileName = "/home/cyberpanel/error-logs.txt"
@staticmethod
def SendEmail(sender, receivers, message, subject=None, type=None):
try:
smtpPath = '/home/cyberpanel/smtpDetails'
if os.path.exists(smtpPath):
import json
mailSettings = json.loads(open(smtpPath, 'r').read())
smtpHost = mailSettings['smtpHost']
smtpPort = mailSettings['smtpPort']
smtpUserName = mailSettings['smtpUserName']
smtpPassword = mailSettings['smtpPassword']
smtpServer = smtplib.SMTP(str(smtpHost), int(smtpPort))
smtpServer.login(smtpUserName, smtpPassword)
##
if subject != None:
message = 'Subject: {}\n\n{}'.format(subject, message)
smtpServer.sendmail(smtpUserName, receivers, message)
else:
smtpObj = smtplib.SMTP('localhost')
smtpObj.sendmail(sender, receivers, message)
except BaseException as msg:
CyberCPLogFileWriter.writeToFile(str(msg))
@staticmethod
def writeToFile(message, email=None):
try:
file = open(CyberCPLogFileWriter.fileName,'a')
file.writelines("[" + time.strftime(
"%m.%d.%Y_%H-%M-%S") + "] "+ message + "\n")
file.close()
## Send Email
emailPath = '/usr/local/CyberCP/emailDebug'
try:
if os.path.exists(emailPath):
SUBJECT = "CyberPanel log reporting"
adminEmailPath = '/home/cyberpanel/adminEmail'
adminEmail = open(adminEmailPath, 'r').read().rstrip('\n')
sender = 'root@%s' % (socket.gethostname())
TO = [adminEmail]
message = """\
From: %s
To: %s
Subject: %s
%s
""" % (
sender, ", ".join(TO), SUBJECT, '[%s] %s. \n' % (time.strftime("%m.%d.%Y_%H-%M-%S"), message))
if email == None or email == 1:
CyberCPLogFileWriter.SendEmail(sender, TO, message)
except BaseException as msg:
file = open(CyberCPLogFileWriter.fileName, 'a')
file.writelines("[" + time.strftime(
"%m.%d.%Y_%H-%M-%S") + "] " + str(msg) + "\n")
file.close()
except BaseException as msg:
return "Can not write to error file."
@staticmethod
def writeforCLI(message, level, method):
try:
file = open(CyberCPLogFileWriter.fileName, 'a')
file.writelines("[" + time.strftime(
"%m.%d.%Y_%H-%M-%S") + "] [" + level + ":" + method + "] " + message + "\n")
file.close()
file.close()
except BaseException:
return "Can not write to error file!"
@staticmethod
def readLastNFiles(numberOfLines,fileName):
try:
lastFewLines = str(subprocess.check_output(["tail", "-n",str(numberOfLines),fileName]).decode("utf-8"))
return lastFewLines
except subprocess.CalledProcessError as msg:
return "File was empty"
@staticmethod
def statusWriter(tempStatusPath, mesg, append = None):
try:
if append == None:
statusFile = open(tempStatusPath, 'w')
else:
statusFile = open(tempStatusPath, 'a')
statusFile.writelines(mesg + '\n')
statusFile.close()
print((mesg + '\n'))
except BaseException as msg:
CyberCPLogFileWriter.writeToFile(str(msg) + ' [statusWriter]')
#print str(msg) | 34.330357 | 115 | 0.530299 | 3,775 | 0.981795 | 0 | 0 | 3,669 | 0.954226 | 0 | 0 | 583 | 0.151625 |
6c98706e61936bfb19561cc8e9e0f4a5b6b8ad20 | 38 | py | Python | tensorflow-extensions/dataset/__init__.py | king-michael/tensorflow-extensions | c563d022e95d063f221a1b030db112039b9c407e | [
"MIT"
] | null | null | null | tensorflow-extensions/dataset/__init__.py | king-michael/tensorflow-extensions | c563d022e95d063f221a1b030db112039b9c407e | [
"MIT"
] | null | null | null | tensorflow-extensions/dataset/__init__.py | king-michael/tensorflow-extensions | c563d022e95d063f221a1b030db112039b9c407e | [
"MIT"
] | null | null | null | from .NumpyDataset import NumpyDataset | 38 | 38 | 0.894737 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
6c9944dc5b1ae363873ef1748ed4ad89a74178d6 | 261 | py | Python | register_classes.py | Iherrbenza/SMB100A_Python | 80af83faa306a31f323869c2d20f121bb941d85b | [
"BSD-3-Clause"
] | null | null | null | register_classes.py | Iherrbenza/SMB100A_Python | 80af83faa306a31f323869c2d20f121bb941d85b | [
"BSD-3-Clause"
] | null | null | null | register_classes.py | Iherrbenza/SMB100A_Python | 80af83faa306a31f323869c2d20f121bb941d85b | [
"BSD-3-Clause"
] | null | null | null | # @Date: 2020-04-05T14:08:33+10:00
# @Last modified time: 2020-04-08T18:40:22+10:00
from labscript_devices import register_classes
register_classes(
'SMB100A',
BLACS_tab='labscript_devices.SMB100A.blacs_tabs.SMB100ATab',
runviewer_parser=None
)
| 23.727273 | 64 | 0.754789 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 142 | 0.544061 |
6c9aaddf3d7ed095cb1f382721895fe2de325e0d | 6,060 | py | Python | src/clophfit/old/fit_titration_global.py | darosio/ClopHfit | 00c7f228c3d443dfc776efe50a0f09473c05aa63 | [
"BSD-3-Clause"
] | null | null | null | src/clophfit/old/fit_titration_global.py | darosio/ClopHfit | 00c7f228c3d443dfc776efe50a0f09473c05aa63 | [
"BSD-3-Clause"
] | null | null | null | src/clophfit/old/fit_titration_global.py | darosio/ClopHfit | 00c7f228c3d443dfc776efe50a0f09473c05aa63 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
"""Module for global fitting titrations (pH and cl) on 2 datasets
"""
import os
import sys
import argparse
import numpy as np
from lmfit import Parameters, Minimizer, minimize, conf_interval, report_fit
import pandas as pd
import matplotlib.pyplot as plt
# from scipy import optimize
def ci_report(ci):
"""return text of a report for confidence intervals"""
maxlen = max([len(i) for i in ci])
buff = []
add = buff.append
convp = lambda x: ("%.2f" % (x[0]*100))+'%'
# I modified "%.5f"
conv = lambda x: "%.6G" % x[1]
title_shown = False
for name, row in ci.items():
if not title_shown:
add("".join([''.rjust(maxlen)] +
[i.rjust(10) for i in map(convp, row)]))
title_shown = True
add("".join([name.rjust(maxlen)] +
[i.rjust(10) for i in map(conv, row)]))
return '\n'.join(buff)
def residual(pars, x, data=None, titration_type=None):
"""residual function for lmfit
Parameters
----------
pars: lmfit Parameters()
x : list of x vectors
data : list of y vectors
Return
------
a vector for the residues (yfit - data)
or the fitted values
"""
vals = pars.valuesdict()
SA1 = vals['SA1']
SB1 = vals['SB1']
K = vals['K']
SA2 = vals['SA2']
SB2 = vals['SB2']
if titration_type == 'pH':
model1 = (SB1 + SA1 * 10 ** (K - x[0])) / (1 + 10 ** (K - x[0]))
model2 = (SB2 + SA2 * 10 ** (K - x[1])) / (1 + 10 ** (K - x[1]))
elif titration_type == 'cl':
model1 = (SA1 + SB1 * x[0] / K) / (1 + x[0] / K)
model2 = (SA2 + SB2 * x[1] / K) / (1 + x[1] / K)
else:
print('Error: residual call must indicate a titration type')
sys.exit()
if data is None:
return np.r_[model1, model2]
return np.r_[model1 - data[0], model2 - data[1]]
def main():
description = "Fit a pH or Cl titration file: x y1 y2"
parser = argparse.ArgumentParser(description=description)
parser.add_argument('file',
help='the file <x y1 y2> without heads')
parser.add_argument('out_folder',
help='The folder to output the .txt and .png files')
parser.add_argument('-t', '--titration-of', dest='titration_type',
action="store", default="pH", choices=["pH", "cl"],
help='Type of titration, pH or cl')
parser.add_argument('-v', '--verbose', action='store_true',
help='Printout runtime information.increase verbosity')
parser.add_argument('--boot', dest='nboot', type=int,
help='bootstraping using <n> iterations')
args = parser.parse_args()
ttype = args.titration_type
#df = pd.read_csv(args.file, sep=' ', names=['x', 'y1', 'y2'])
df = pd.read_csv(args.file)
if not os.path.isdir(args.out_folder):
os.makedirs(args.out_folder)
fit_params = Parameters()
fit_params.add('SA1', value=df.y1[df.x == min(df.x)].values[0], min=0)
fit_params.add('SB1', value=df.y1[df.x == max(df.x)].values[0], min=0)
fit_params.add('SA2', value=df.y2[df.x == min(df.x)].values[0], min=0)
fit_params.add('SB2', value=df.y2[df.x == max(df.x)].values[0], min=0)
if args.titration_type == "pH":
fit_params.add('K', value=7, min=4, max=10)
elif args.titration_type == "cl":
fit_params.add('K', value=20, min=0, max=1000)
mini = Minimizer(residual, fit_params, fcn_args=([df.x, df.x],),
fcn_kws={'data': [df.y1, df.y2], 'titration_type': ttype})
res = mini.minimize()
report_fit(fit_params)
ci = conf_interval(mini, res, sigmas=[.674, .95])
print(ci_report(ci))
# plotting
xfit = np.linspace(df.x.min(), df.x.max(), 100)
yfit = residual(fit_params, [xfit, xfit], titration_type=ttype) # kws={}
yfit = yfit.reshape(2, len(yfit) // 2)
plt.plot(df.x, df.y1, 'o', df.x, df.y2, 's', xfit, yfit[0], '-',
xfit, yfit[1], '-')
plt.grid(True)
f_out = os.path.join(args.out_folder, os.path.split(args.file)[1])
plt.savefig(f_out + ".png")
if args.nboot:
bootstrap(df, args.nboot, fit_params, f_out, ttype)
def bootstrap(df, nboot, fit_params, f_out, ttype):
"""Perform bootstrap to estimate parameters variance
Parameters
----------
df : DataFrame
nboot : int
fit_params: lmfit.fit_params
f_out : string
Output
------
print results
plot
"""
import seaborn as sns
n_points = len(df)
kds = []
sa1 = []
sb1 = []
sa2 = []
sb2 = []
for i in range(nboot):
boot_idxs = np.random.randint(0, n_points-1, n_points)
df2 = df.loc[boot_idxs]
df2.reset_index(drop=True, inplace=True)
boot_idxs = np.random.randint(0, n_points-1, n_points)
df3 = df.loc[boot_idxs]
df3.reset_index(drop=True, inplace=True)
try:
res = minimize(residual, fit_params, args=([df2.x, df3.x],),
kws={'data': [df2.y1, df3.y2], 'titration_type': ttype})
kds.append(res.params['K'].value)
sa1.append(res.params['SA1'].value)
sb1.append(res.params['SB1'].value)
sa2.append(res.params['SA2'].value)
sb2.append(res.params['SB2'].value)
except:
print(df2)
print(df3)
dff = pd.DataFrame({'K': kds, 'SA1': sa1, 'SB1': sb1, 'SA2': sa2,
'SB2': sb2})
print("bootstrap: ",
round(dff.K.quantile(.025), 3),
round(dff.K.quantile(.163), 3),
round(dff.K.median(), 3),
round(dff.K.quantile(.837), 3),
round(dff.K.quantile(.975), 3))
sns.set_style('darkgrid')
g = sns.PairGrid(dff)
# g.map_diag(sns.kdeplot, lw=3)
g.map_diag(plt.hist, alpha=0.4)
g.map_upper(plt.scatter, s=9, alpha=0.6)
g.map_lower(sns.kdeplot, cmap="Blues_d")
plt.savefig(f_out + "-bs" + ".png")
if __name__ == '__main__':
main()
| 34.827586 | 79 | 0.562211 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,425 | 0.235149 |
6c9b0dde5a35f728bc75ac22cfb8000971c95236 | 487 | py | Python | gehomesdk/erd/converters/advantium/erd_advantium_cook_time_remaining_converter.py | warrenrees/gehome | 6a40f15cfd0738aa7e2ac74b2d8d87780fc07ec8 | [
"MIT"
] | null | null | null | gehomesdk/erd/converters/advantium/erd_advantium_cook_time_remaining_converter.py | warrenrees/gehome | 6a40f15cfd0738aa7e2ac74b2d8d87780fc07ec8 | [
"MIT"
] | null | null | null | gehomesdk/erd/converters/advantium/erd_advantium_cook_time_remaining_converter.py | warrenrees/gehome | 6a40f15cfd0738aa7e2ac74b2d8d87780fc07ec8 | [
"MIT"
] | null | null | null | from datetime import timedelta
from typing import Optional
from ..abstract import ErdReadOnlyConverter
from ..primitives import *
from gehomesdk.erd.values.advantium import ErdAdvantiumCookTimeMinMax
class ErdAdvantiumCookTimeRemainingConverter(ErdReadOnlyConverter[Optional[timedelta]]):
def erd_decode(self, value: str) -> Optional[timedelta]:
""" Decodes the cook time as a time span, 65535 is treated as None. """
return erd_decode_timespan_seconds(value[2:6])
| 40.583333 | 88 | 0.786448 | 284 | 0.583162 | 0 | 0 | 0 | 0 | 0 | 0 | 71 | 0.145791 |
6c9b2090dbcb8653a5e155d2427829cb5e811f8a | 3,842 | py | Python | examples/5_cells_iclamp/verification/pure_nrn.py | AllenInstitute/project7 | 901350cdf5c57a56b7efece2a309c72bdf5f2608 | [
"BSD-3-Clause"
] | 35 | 2018-04-10T13:54:27.000Z | 2022-03-12T09:22:31.000Z | examples/5_cells_iclamp/verification/pure_nrn.py | AllenInstitute/project7 | 901350cdf5c57a56b7efece2a309c72bdf5f2608 | [
"BSD-3-Clause"
] | 92 | 2018-03-19T10:14:18.000Z | 2022-01-29T15:21:47.000Z | examples/5_cells_iclamp/verification/pure_nrn.py | AllenInstitute/project7 | 901350cdf5c57a56b7efece2a309c72bdf5f2608 | [
"BSD-3-Clause"
] | 25 | 2018-03-16T23:14:42.000Z | 2022-02-09T19:37:05.000Z | import os
import sys
import neuron
import json
from pprint import pprint
from neuron import h
import matplotlib.pyplot as plt
import numpy as np
import h5py
## Runs the 5 cell iclamp simulation but in NEURON for each individual cell
# $ python pure_nrn.py <gid>
neuron.load_mechanisms('../components/mechanisms')
h.load_file('stdgui.hoc')
h.load_file('import3d.hoc')
cells_table = {
# gid = [model id, cre line, morph file]
0: [472363762, 'Scnn1a', 'Scnn1a_473845048_m.swc'],
1: [473863510, 'Rorb', 'Rorb_325404214_m.swc'],
2: [473863035, 'Nr5a1', 'Nr5a1_471087815_m.swc'],
3: [472912177, 'PV1', 'Pvalb_470522102_m.swc'],
4: [473862421, 'PV2', 'Pvalb_469628681_m.swc']
}
def run_simulation(gid, morphologies_dir='../components/morphologies', plot_results=True):
swc_file = os.path.join(morphologies_dir, cells_table[gid][2])
model_file = 'model_gid{}_{}_{}.json'.format(gid, cells_table[gid][0], cells_table[gid][1])
params_dict = json.load(open(model_file, 'r'))
# pprint(params_dict)
# load the cell
nrn_swc = h.Import3d_SWC_read()
nrn_swc.input(str(swc_file))
imprt = h.Import3d_GUI(nrn_swc, 0)
h("objref this")
imprt.instantiate(h.this)
# Cut the axon
h("soma[0] area(0.5)")
for sec in h.allsec():
sec.nseg = 1 + 2 * int(sec.L / 40.0)
if sec.name()[:4] == "axon":
h.delete_section(sec=sec)
h('create axon[2]')
for sec in h.axon:
sec.L = 30
sec.diam = 1
sec.nseg = 1 + 2 * int(sec.L / 40.0)
h.axon[0].connect(h.soma[0], 0.5, 0.0)
h.axon[1].connect(h.axon[0], 1.0, 0.0)
h.define_shape()
# set model params
h("access soma")
for sec in h.allsec():
sec_name = sec.name().split('[')[0]
# special case for passive channels rev. potential
sec.insert('pas')
for seg in sec:
if sec_name not in params_dict['e_pas']:
continue
seg.pas.e = params_dict['e_pas'][sec_name]
# insert mechanisms (if req.) and set density
for prop in params_dict[sec_name]:
if 'mechanism' in prop:
sec.insert(prop['mechanism'])
setattr(sec, prop['name'], prop['value'])
# simulation properties
h.stdinit()
h.tstop = 4000.0
h.dt = 0.1
h.steps_per_ms = 1/h.dt
h.celsius = 34.0
h.v_init = -80.0
# stimuli is an increasing series of 3 step currents
cclamp1 = h.IClamp(h.soma[0](0.5))
cclamp1.delay = 500.0
cclamp1.dur = 500.0
cclamp1.amp = 0.1500
cclamp2 = h.IClamp(h.soma[0](0.5))
cclamp2.delay = 1500.0
cclamp2.dur = 500.0
cclamp2.amp = 0.1750
cclamp3 = h.IClamp(h.soma[0](0.5))
cclamp3.delay = 2500.0
cclamp3.dur = 500.0
cclamp3.amp = 0.2000
# run simulation
v_vec = h.Vector()
v_vec.record(h.soma[0](0.5)._ref_v)
h.startsw()
h.run(h.tstop)
voltages = [v for v in v_vec]
cell_var_name = 'cellvar_gid{}_{}_{}.h5'.format(gid, cells_table[gid][0], cells_table[gid][1])
with h5py.File(cell_var_name, 'w') as h5:
# fake a mapping table just for convience
h5.create_dataset('/mapping/gids', data=[gid], dtype=np.uint16)
h5.create_dataset('/mapping/element_pos', data=[0.5], dtype=np.float)
h5.create_dataset('/mapping/element_id', data=[0], dtype=np.uint16)
h5.create_dataset('/mapping/index_pointer', data=[0], dtype=np.uint16)
h5.create_dataset('/v/data', data=voltages, dtype=np.float64)
if plot_results:
times = np.linspace(0.0, h.tstop, len(voltages))
plt.plot(times, voltages)
plt.show()
if __name__ == '__main__':
if __file__ != sys.argv[-1]:
run_simulation(sys.argv[-1])
else:
for gid in range(5):
run_simulation(gid, plot_results=False)
| 29.782946 | 98 | 0.615565 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 943 | 0.245445 |
6c9bda41584725dcd1301c29489ba97f22a43be2 | 973 | py | Python | tools/telemetry/telemetry/value/trace_unittest.py | sunjc53yy/chromium | 049b380040949089c2a6e447b0cd0ac3c4ece38e | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null | tools/telemetry/telemetry/value/trace_unittest.py | sunjc53yy/chromium | 049b380040949089c2a6e447b0cd0ac3c4ece38e | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null | tools/telemetry/telemetry/value/trace_unittest.py | sunjc53yy/chromium | 049b380040949089c2a6e447b0cd0ac3c4ece38e | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import unittest
from telemetry.page import page_set
from telemetry.timeline import tracing_timeline_data
from telemetry.value import trace
class TestBase(unittest.TestCase):
def setUp(self):
self.page_set = page_set.PageSet(file_path=os.path.dirname(__file__))
self.page_set.AddPageWithDefaultRunNavigate("http://www.bar.com/")
self.page_set.AddPageWithDefaultRunNavigate("http://www.baz.com/")
self.page_set.AddPageWithDefaultRunNavigate("http://www.foo.com/")
@property
def pages(self):
return self.page_set.pages
class ValueTest(TestBase):
def testAsDict(self):
v = trace.TraceValue(
None, tracing_timeline_data.TracingTimelineData({'test' : 1}))
fh_id = v.GetAssociatedFileHandle().id
d = v.AsDict()
self.assertEqual(d['file_id'], fh_id)
| 30.40625 | 73 | 0.749229 | 656 | 0.674203 | 0 | 0 | 59 | 0.060637 | 0 | 0 | 237 | 0.243577 |
6c9c91b60ebe4ebca7c9c3568600bab235877552 | 4,383 | py | Python | utils.py | ruchikachavhan/instagan | 3ce71f788d1ef266c06e124ce45adea99270c783 | [
"MIT"
] | 5 | 2019-12-03T16:28:18.000Z | 2021-12-10T14:22:42.000Z | utils.py | bayesianGirl/instagan | 3ce71f788d1ef266c06e124ce45adea99270c783 | [
"MIT"
] | null | null | null | utils.py | bayesianGirl/instagan | 3ce71f788d1ef266c06e124ce45adea99270c783 | [
"MIT"
] | 1 | 2019-10-20T01:23:49.000Z | 2019-10-20T01:23:49.000Z | import torch.nn.parallel
import torch.optim as optim
import torch.utils.data
import torch.nn as nn
import torch.nn.functional as F
import torchvision.datasets
import torchvision.transforms as transforms
import torchvision.utils as vutils
from torch.autograd import Variable
import matplotlib.pyplot as plt
from PIL import Image
import os
from itertools import zip_longest as zip
from itertools import chain
import numpy as np
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
m.weight.data.normal_(0.0, 0.02)
elif classname.find('BatchNorm') != -1:
m.weight.data.normal_(0.0, 0.02)
m.bias.data.fill_(0)
def return_cuda_tensor(t):
t= t.float()
t = t.to(device)
return t
def Sample_images(list):
if(len(list)>50):
index = len(list) - 50
list = list[index:len(list)]
index = np.random.randint(0, len(list))
return list[index]
def get_weights(x, y):
weight = torch.zeros(x.size())
for i in range (0, x.shape[2]):
for j in range(0, x.shape[3]):
weight[0][0][i][j] = 1.00 - min(x[0][0][i][j], y[0][0][i][j])
#weight = torch.cuda.FloatTensor(weight)
return weight
def calc_gen_loss(images_x, images_orig_x,images_y, images_orig_y):
#LSGAN loss
G_xy.zero_grad()
G_yx.zero_grad()
x_id, x_id_orig = G_yx.forward(images_x, images_orig_x)
y_id, y_id_orig = G_xy.forward(images_y, images_orig_y)
y_fake, y_fake_orig = G_xy.forward(images_x, images_orig_x)
x_fake, x_fake_orig = G_yx.forward(images_y, images_orig_y)
re_x, re_x_orig = G_yx.forward(y_fake, y_fake_orig)
re_y, re_y_orig = G_xy.forward(x_fake, x_fake_orig)
x_concat = torch.cat(( x_fake_orig, x_fake), dim = 1)
y_concat = torch.cat(( y_fake_orig, y_fake), dim = 1)
x_concat_ = x_concat.detach()
y_concat_ = y_concat.detach()
x_fake_dis = Discriminator_loss(D_x, x_concat)
y_fake_dis = Discriminator_loss(D_y, y_concat)
label_real = Variable(torch.ones(y_fake_dis.size())).to(device)
adv_loss = MSELoss(x_fake_dis, label_real) + MSELoss(y_fake_dis, label_real)
adv_loss = adv_loss.to(device)
#Cycle loss
Cycle_loss = (L1_loss(re_x, images_x) + L1_loss(re_y, images_y)+L1_loss(re_x_orig, images_orig_x) + L1_loss(re_y_orig, images_orig_y))*lambda_
#Identity loss
ID_loss = (L1_loss(x_id, images_x) + L1_loss(x_id_orig, images_orig_x)+ L1_loss(y_id, images_y) + L1_loss(y_id_orig, images_orig_y))*lambda_
#Context loss
weight_xy = return_cuda_tensor(get_weights(x_fake,images_y))
weight_yx = return_cuda_tensor(get_weights(y_fake,images_x))
Ctx_loss = (L1_loss(weight_yx * images_orig_x, weight_yx* y_fake_orig) + L1_loss(weight_xy*images_orig_y, weight_xy*x_fake_orig))*lambda_
Total_loss = adv_loss + Cycle_loss + ID_loss + Ctx_loss
return x_concat_,y_concat_, Total_loss
def calc_dis_loss(images_x, sampled_x, images_y, sampled_y):
x_real_loss = Discriminator_loss(D_x, images_x)
x_fake_loss = Discriminator_loss(D_x, sampled_x)
y_real_loss = Discriminator_loss(D_y, images_y)
y_fake_loss = Discriminator_loss(D_y, sampled_y)
label_real = Variable(torch.ones(y_fake_loss.size())).to(device)
label_fake = Variable(torch.zeros(y_fake_loss.size())).to(device)
err_dx = MSELoss(x_real_loss, label_real) + MSELoss(x_fake_loss,label_fake)
err_dy = MSELoss(y_real_loss, label_real) + MSELoss(y_fake_loss,label_fake)
return err_dx, err_dy
def array_of_images(images):
out= np.ones((batchSize, 9, 4, 70, 70))
out= torch.tensor(out)
for index in range(0, images.shape[0]):
# print(images.shape)
count = 0
x=np.ones((3,70,70))
x = torch.tensor(x)
for i in range(0, 150,40):
if(i<=80):
for j in range(0, 100,70):
if(j<=30):
x = images[index, :, i:i+70, j:j+70]
out[index][count] = x
count+=1
# print("count", count)
return return_cuda_tensor(out)
def Discriminator_loss(D, images):
arr_x = array_of_images(images)
dis = 0
for index in range(0, arr_x.shape[1]):
dis += D.forward(arr_x[:, index, :, :, :])
dis = dis/arr_x.shape[1]
return(dis)
def remove_transparency(im, bg_colour=(255, 255, 255)):
if im.mode in ('RGBA', 'LA') or (im.mode == 'P' and 'transparency' in im.info):
alpha = im.convert('RGBA').split()[-1]
bg = Image.new("RGBA", im.size, bg_colour + (255,))
bg.paste(im, mask=alpha)
return bg
else:
return im | 35.064 | 144 | 0.705225 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 191 | 0.043577 |
6c9d70099a722ba2fbced930795b4aaa3c0d43d3 | 3,044 | py | Python | Super_TF/Dataset_IO/Classification/Dataset_reader_classification.py | Dhruv-Mohan/Super_TF | c693663adc59947cb7d15bd42ff260b7d3de6bdc | [
"MIT"
] | 8 | 2017-10-29T18:50:49.000Z | 2020-09-23T10:55:27.000Z | Super_TF/Dataset_IO/Classification/Dataset_reader_classification.py | Dhruv-Mohan/Tensorflow_Playground | c693663adc59947cb7d15bd42ff260b7d3de6bdc | [
"MIT"
] | null | null | null | Super_TF/Dataset_IO/Classification/Dataset_reader_classification.py | Dhruv-Mohan/Tensorflow_Playground | c693663adc59947cb7d15bd42ff260b7d3de6bdc | [
"MIT"
] | 1 | 2021-01-27T09:32:53.000Z | 2021-01-27T09:32:53.000Z | from utils.Dataset_reader import Dataset_reader
from Dataset_IO.Classification.Dataset_conifg_classification import Dataset_conifg_classification
import Dataset_IO.Classification.Dataset_classification_pb2 as proto
import tensorflow as tf
import os
#TODO: ADD TFRECORDS AND MEANPROTO READING CHECKS
class Dataset_reader_classification(Dataset_reader,Dataset_conifg_classification):
"""Implementation of Dataset reader for classification"""
def __init__(self, filename=None, epochs=100, num_classes=18):
super().__init__()
with tf.name_scope('Dataset_Classification_Reader') as scope:
self.batch_size = tf.placeholder(tf.int32, name='Dataset_batch_size')
self.num_classes = num_classes
self.open_dataset(filename=filename, epochs=epochs)
self.mean_header_proto = proto.Image_set()
dataset_path, dataset_name = os.path.split(filename)
common_name, _ = os.path.splitext(dataset_name)
mean_file_path = os.path.join(dataset_path,common_name +'_mean.proto')
with open(mean_file_path,"rb") as mean_header_file:
self.mean_header_proto.ParseFromString(mean_header_file.read())
self.image_shape = [self.mean_header_proto.Image_headers.image_height, self.mean_header_proto.Image_headers.image_width, self.mean_header_proto.Image_headers.image_depth]
mean_image_data = self.mean_header_proto.mean_data
self.mean_image = tf.image.convert_image_dtype(tf.image.decode_image(mean_image_data), tf.float32)
self.mean_image.set_shape(self.image_shape)
self.images , self.one_hot_labels = self.batch_inputs()
def single_read(self):
features = tf.parse_single_example(self.serialized_example, features=self._Feature_dict)
image = tf.image.decode_image(features[self._Image_handle])
image.set_shape(self.image_shape)
image = tf.image.convert_image_dtype(image, tf.float32)
image = image - self.mean_image
return image , features[self._Label_handle]
def pre_process_image(self,pre_process_op):
with tf.name_scope('Pre_Processing_op') as scope:
self.images = pre_process_op(self.images)
def batch_inputs(self):
image , label = self.single_read()
images , sparse_labels = tf.train.shuffle_batch([image , label], batch_size=self.batch_size, num_threads=8, capacity=5000+128, min_after_dequeue=5000)
one_hot_labels = tf.one_hot(sparse_labels,self.num_classes)
return images, one_hot_labels
#TODO: CONFIGURABLE PARAMS
def next_batch(self, batch_size=1, sess=None):
with tf.name_scope('Batch_getter') as scope:
if sess is None :
self.sess = tf.get_default_session()
else:
self.sess = sess
images , labels = self.sess.run([self.images , self.one_hot_labels], feed_dict={self.batch_size : batch_size})
return images , labels
| 42.873239 | 182 | 0.704993 | 2,740 | 0.900131 | 0 | 0 | 0 | 0 | 0 | 0 | 233 | 0.076544 |
6c9e0efa2a651b725cc3fc394a15a455183ea7b1 | 13,902 | py | Python | yocto/poky/scripts/lib/recipetool/create_buildsys.py | jxtxinbing/ops-build | 9008de2d8e100f3f868c66765742bca9fa98f3f9 | [
"Apache-2.0"
] | 2 | 2019-03-27T08:11:14.000Z | 2020-02-22T20:40:24.000Z | yocto/poky/scripts/lib/recipetool/create_buildsys.py | jxtxinbing/ops-build | 9008de2d8e100f3f868c66765742bca9fa98f3f9 | [
"Apache-2.0"
] | 39 | 2016-08-23T11:23:28.000Z | 2017-04-07T08:00:52.000Z | yocto/poky/scripts/lib/recipetool/create_buildsys.py | jxtxinbing/ops-build | 9008de2d8e100f3f868c66765742bca9fa98f3f9 | [
"Apache-2.0"
] | 1 | 2021-09-10T08:10:12.000Z | 2021-09-10T08:10:12.000Z | # Recipe creation tool - create command build system handlers
#
# Copyright (C) 2014 Intel Corporation
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import re
import logging
from recipetool.create import RecipeHandler, read_pkgconfig_provides
logger = logging.getLogger('recipetool')
tinfoil = None
def tinfoil_init(instance):
global tinfoil
tinfoil = instance
class CmakeRecipeHandler(RecipeHandler):
def process(self, srctree, classes, lines_before, lines_after, handled):
if 'buildsystem' in handled:
return False
if RecipeHandler.checkfiles(srctree, ['CMakeLists.txt']):
classes.append('cmake')
lines_after.append('# Specify any options you want to pass to cmake using EXTRA_OECMAKE:')
lines_after.append('EXTRA_OECMAKE = ""')
lines_after.append('')
handled.append('buildsystem')
return True
return False
class SconsRecipeHandler(RecipeHandler):
def process(self, srctree, classes, lines_before, lines_after, handled):
if 'buildsystem' in handled:
return False
if RecipeHandler.checkfiles(srctree, ['SConstruct', 'Sconstruct', 'sconstruct']):
classes.append('scons')
lines_after.append('# Specify any options you want to pass to scons using EXTRA_OESCONS:')
lines_after.append('EXTRA_OESCONS = ""')
lines_after.append('')
handled.append('buildsystem')
return True
return False
class QmakeRecipeHandler(RecipeHandler):
def process(self, srctree, classes, lines_before, lines_after, handled):
if 'buildsystem' in handled:
return False
if RecipeHandler.checkfiles(srctree, ['*.pro']):
classes.append('qmake2')
handled.append('buildsystem')
return True
return False
class AutotoolsRecipeHandler(RecipeHandler):
def process(self, srctree, classes, lines_before, lines_after, handled):
if 'buildsystem' in handled:
return False
autoconf = False
if RecipeHandler.checkfiles(srctree, ['configure.ac', 'configure.in']):
autoconf = True
values = AutotoolsRecipeHandler.extract_autotools_deps(lines_before, srctree)
classes.extend(values.pop('inherit', '').split())
for var, value in values.iteritems():
lines_before.append('%s = "%s"' % (var, value))
else:
conffile = RecipeHandler.checkfiles(srctree, ['configure'])
if conffile:
# Check if this is just a pre-generated autoconf configure script
with open(conffile[0], 'r') as f:
for i in range(1, 10):
if 'Generated by GNU Autoconf' in f.readline():
autoconf = True
break
if autoconf:
lines_before.append('# NOTE: if this software is not capable of being built in a separate build directory')
lines_before.append('# from the source, you should replace autotools with autotools-brokensep in the')
lines_before.append('# inherit line')
classes.append('autotools')
lines_after.append('# Specify any options you want to pass to the configure script using EXTRA_OECONF:')
lines_after.append('EXTRA_OECONF = ""')
lines_after.append('')
handled.append('buildsystem')
return True
return False
@staticmethod
def extract_autotools_deps(outlines, srctree, acfile=None):
import shlex
import oe.package
values = {}
inherits = []
# FIXME this mapping is very thin
progmap = {'flex': 'flex-native',
'bison': 'bison-native',
'm4': 'm4-native'}
progclassmap = {'gconftool-2': 'gconf',
'pkg-config': 'pkgconfig'}
ignoredeps = ['gcc-runtime', 'glibc', 'uclibc']
pkg_re = re.compile('PKG_CHECK_MODULES\(\[?[a-zA-Z0-9]*\]?, \[?([^,\]]*)[),].*')
lib_re = re.compile('AC_CHECK_LIB\(\[?([a-zA-Z0-9]*)\]?, .*')
progs_re = re.compile('_PROGS?\(\[?[a-zA-Z0-9]*\]?, \[?([^,\]]*)\]?[),].*')
dep_re = re.compile('([^ ><=]+)( [<>=]+ [^ ><=]+)?')
# Build up lib library->package mapping
shlib_providers = oe.package.read_shlib_providers(tinfoil.config_data)
libdir = tinfoil.config_data.getVar('libdir', True)
base_libdir = tinfoil.config_data.getVar('base_libdir', True)
libpaths = list(set([base_libdir, libdir]))
libname_re = re.compile('^lib(.+)\.so.*$')
pkglibmap = {}
for lib, item in shlib_providers.iteritems():
for path, pkg in item.iteritems():
if path in libpaths:
res = libname_re.match(lib)
if res:
libname = res.group(1)
if not libname in pkglibmap:
pkglibmap[libname] = pkg[0]
else:
logger.debug('unable to extract library name from %s' % lib)
# Now turn it into a library->recipe mapping
recipelibmap = {}
pkgdata_dir = tinfoil.config_data.getVar('PKGDATA_DIR', True)
for libname, pkg in pkglibmap.iteritems():
try:
with open(os.path.join(pkgdata_dir, 'runtime', pkg)) as f:
for line in f:
if line.startswith('PN:'):
recipelibmap[libname] = line.split(':', 1)[-1].strip()
break
except IOError as ioe:
if ioe.errno == 2:
logger.warn('unable to find a pkgdata file for package %s' % pkg)
else:
raise
# Since a configure.ac file is essentially a program, this is only ever going to be
# a hack unfortunately; but it ought to be enough of an approximation
if acfile:
srcfiles = [acfile]
else:
srcfiles = RecipeHandler.checkfiles(srctree, ['configure.ac', 'configure.in'])
pcdeps = []
deps = []
unmapped = []
unmappedlibs = []
with open(srcfiles[0], 'r') as f:
for line in f:
if 'PKG_CHECK_MODULES' in line:
res = pkg_re.search(line)
if res:
res = dep_re.findall(res.group(1))
if res:
pcdeps.extend([x[0] for x in res])
inherits.append('pkgconfig')
if line.lstrip().startswith('AM_GNU_GETTEXT'):
inherits.append('gettext')
elif 'AC_CHECK_PROG' in line or 'AC_PATH_PROG' in line:
res = progs_re.search(line)
if res:
for prog in shlex.split(res.group(1)):
prog = prog.split()[0]
progclass = progclassmap.get(prog, None)
if progclass:
inherits.append(progclass)
else:
progdep = progmap.get(prog, None)
if progdep:
deps.append(progdep)
else:
if not prog.startswith('$'):
unmapped.append(prog)
elif 'AC_CHECK_LIB' in line:
res = lib_re.search(line)
if res:
lib = res.group(1)
libdep = recipelibmap.get(lib, None)
if libdep:
deps.append(libdep)
else:
if libdep is None:
if not lib.startswith('$'):
unmappedlibs.append(lib)
elif 'AC_PATH_X' in line:
deps.append('libx11')
if unmapped:
outlines.append('# NOTE: the following prog dependencies are unknown, ignoring: %s' % ' '.join(unmapped))
if unmappedlibs:
outlines.append('# NOTE: the following library dependencies are unknown, ignoring: %s' % ' '.join(unmappedlibs))
outlines.append('# (this is based on recipes that have previously been built and packaged)')
recipemap = read_pkgconfig_provides(tinfoil.config_data)
unmapped = []
for pcdep in pcdeps:
recipe = recipemap.get(pcdep, None)
if recipe:
deps.append(recipe)
else:
if not pcdep.startswith('$'):
unmapped.append(pcdep)
deps = set(deps).difference(set(ignoredeps))
if unmapped:
outlines.append('# NOTE: unable to map the following pkg-config dependencies: %s' % ' '.join(unmapped))
outlines.append('# (this is based on recipes that have previously been built and packaged)')
if deps:
values['DEPENDS'] = ' '.join(deps)
if inherits:
values['inherit'] = ' '.join(list(set(inherits)))
return values
class MakefileRecipeHandler(RecipeHandler):
def process(self, srctree, classes, lines_before, lines_after, handled):
if 'buildsystem' in handled:
return False
makefile = RecipeHandler.checkfiles(srctree, ['Makefile'])
if makefile:
lines_after.append('# NOTE: this is a Makefile-only piece of software, so we cannot generate much of the')
lines_after.append('# recipe automatically - you will need to examine the Makefile yourself and ensure')
lines_after.append('# that the appropriate arguments are passed in.')
lines_after.append('')
scanfile = os.path.join(srctree, 'configure.scan')
skipscan = False
try:
stdout, stderr = bb.process.run('autoscan', cwd=srctree, shell=True)
except bb.process.ExecutionError as e:
skipscan = True
if scanfile and os.path.exists(scanfile):
values = AutotoolsRecipeHandler.extract_autotools_deps(lines_before, srctree, acfile=scanfile)
classes.extend(values.pop('inherit', '').split())
for var, value in values.iteritems():
if var == 'DEPENDS':
lines_before.append('# NOTE: some of these dependencies may be optional, check the Makefile and/or upstream documentation')
lines_before.append('%s = "%s"' % (var, value))
lines_before.append('')
for f in ['configure.scan', 'autoscan.log']:
fp = os.path.join(srctree, f)
if os.path.exists(fp):
os.remove(fp)
self.genfunction(lines_after, 'do_configure', ['# Specify any needed configure commands here'])
func = []
func.append('# You will almost certainly need to add additional arguments here')
func.append('oe_runmake')
self.genfunction(lines_after, 'do_compile', func)
installtarget = True
try:
stdout, stderr = bb.process.run('make -qn install', cwd=srctree, shell=True)
except bb.process.ExecutionError as e:
if e.exitcode != 1:
installtarget = False
func = []
if installtarget:
func.append('# This is a guess; additional arguments may be required')
makeargs = ''
with open(makefile[0], 'r') as f:
for i in range(1, 100):
if 'DESTDIR' in f.readline():
makeargs += " 'DESTDIR=${D}'"
break
func.append('oe_runmake install%s' % makeargs)
else:
func.append('# NOTE: unable to determine what to put here - there is a Makefile but no')
func.append('# target named "install", so you will need to define this yourself')
self.genfunction(lines_after, 'do_install', func)
handled.append('buildsystem')
else:
lines_after.append('# NOTE: no Makefile found, unable to determine what needs to be done')
lines_after.append('')
self.genfunction(lines_after, 'do_configure', ['# Specify any needed configure commands here'])
self.genfunction(lines_after, 'do_compile', ['# Specify compilation commands here'])
self.genfunction(lines_after, 'do_install', ['# Specify install commands here'])
def plugin_init(pluginlist):
pass
def register_recipe_handlers(handlers):
# These are in a specific order so that the right one is detected first
handlers.append(CmakeRecipeHandler())
handlers.append(AutotoolsRecipeHandler())
handlers.append(SconsRecipeHandler())
handlers.append(QmakeRecipeHandler())
handlers.append(MakefileRecipeHandler())
| 43.44375 | 147 | 0.557186 | 12,548 | 0.902604 | 0 | 0 | 5,857 | 0.421306 | 0 | 0 | 4,028 | 0.289742 |
6c9ec667c5b09f3ef883263c1e32204f673cefdd | 431 | py | Python | models/classification/_base_/ote_mobilenet_v3.py | openvinotoolkit/model_preparation_algorithm | 8d36bf5944837b7a3d22fc2c3a4cb93423619fc2 | [
"Apache-2.0"
] | null | null | null | models/classification/_base_/ote_mobilenet_v3.py | openvinotoolkit/model_preparation_algorithm | 8d36bf5944837b7a3d22fc2c3a4cb93423619fc2 | [
"Apache-2.0"
] | null | null | null | models/classification/_base_/ote_mobilenet_v3.py | openvinotoolkit/model_preparation_algorithm | 8d36bf5944837b7a3d22fc2c3a4cb93423619fc2 | [
"Apache-2.0"
] | null | null | null | # model settings
model = dict(
type='ImageClassifier',
backbone=dict(
type='OTEMobileNetV3',
mode='small',
width_mult=1.0),
neck=dict(type='GlobalAveragePooling'),
head=dict(
type='NonLinearClsHead',
num_classes=1000,
in_channels=576,
hid_channels=1024,
act_cfg=dict(type='HSwish'),
loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
))
| 25.352941 | 60 | 0.600928 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 122 | 0.283063 |
6ca00e46178f8bac44744669d606c5898da9f6e3 | 2,254 | py | Python | tools/rewrite_includes.py | Shachlan/skia | 633db4db7672fd55b48ba1073256853e00f18d8c | [
"BSD-3-Clause"
] | 6 | 2018-10-20T10:53:55.000Z | 2021-12-25T07:58:57.000Z | tools/rewrite_includes.py | Shachlan/skia | 633db4db7672fd55b48ba1073256853e00f18d8c | [
"BSD-3-Clause"
] | null | null | null | tools/rewrite_includes.py | Shachlan/skia | 633db4db7672fd55b48ba1073256853e00f18d8c | [
"BSD-3-Clause"
] | 9 | 2018-10-31T03:07:11.000Z | 2021-08-06T08:53:21.000Z | #!/usr/bin/python2
#
# Copyright 2019 Google Inc.
#
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
roots = [
'bench',
'dm',
'docs',
'example',
'experimental',
'fuzz',
'gm',
'include',
'modules',
'platform_tools/android/apps',
'samplecode',
'src',
'tests',
'third_party/etc1',
'third_party/gif',
'tools'
]
# Map short name -> absolute path for all Skia headers.
headers = {}
for root in roots:
for path, _, files in os.walk(root):
for file_name in files:
if file_name.endswith('.h'):
if file_name in headers:
print path, file_name, headers[file_name]
assert file_name not in headers
headers[file_name] = os.path.abspath(os.path.join(path, file_name))
# Rewrite any #includes relative to Skia's top-level directory.
for root in roots:
for path, _, files in os.walk(root):
if 'generated' in path:
continue
for file_name in files:
if (file_name.endswith('.h') or
file_name.endswith('.c') or
file_name.endswith('.m') or
file_name.endswith('.mm') or
file_name.endswith('.inc') or
file_name.endswith('.fp') or
file_name.endswith('.cc') or
file_name.endswith('.cpp')):
# Read the whole file into memory.
file_path = os.path.join(path, file_name)
lines = open(file_path).readlines()
# Write it back out again line by line with substitutions for #includes.
with open(file_path, 'w') as output:
includes = []
for line in lines:
parts = line.split('"')
if (len(parts) == 3
and '#' in parts[0]
and 'include' in parts[0]
and os.path.basename(parts[1]) in headers):
header = headers[os.path.basename(parts[1])]
includes.append(parts[0] +
'"%s"' % os.path.relpath(header, '.') +
parts[2])
else:
for inc in sorted(includes):
print >>output, inc.strip('\n')
includes = []
print >>output, line.strip('\n')
| 28.531646 | 80 | 0.550577 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 624 | 0.276841 |
6ca2384cbcb96f1ed16b80307991583b5132ac71 | 598 | py | Python | unit_tests/test_class_query.py | usc-isi-i2/gaia-question-sparql | fb40ea6259686997ad9d805729fcca80516ddf92 | [
"MIT"
] | null | null | null | unit_tests/test_class_query.py | usc-isi-i2/gaia-question-sparql | fb40ea6259686997ad9d805729fcca80516ddf92 | [
"MIT"
] | null | null | null | unit_tests/test_class_query.py | usc-isi-i2/gaia-question-sparql | fb40ea6259686997ad9d805729fcca80516ddf92 | [
"MIT"
] | null | null | null | import unittest
import sys
import os
sys.path.append('../')
from src.class_query import ClassQuery
from src.query_tool import QueryTool, Mode
base_path = os.path.dirname(__file__)
cq = ClassQuery(base_path + '/sample_queries/class_queries.xml')
class TestClassQuery(unittest.TestCase):
def test_class_cluster(self):
qt = QueryTool(base_path + '/sample_ttls/doc1.ttl', Mode.CLUSTER)
responses, stat, errors = cq.ask_all(qt)
res = [len(x.find('justifications')) for x in responses.getchildren()]
self.assertFalse(errors)
self.assertEqual(res, [2, 1])
| 28.47619 | 78 | 0.712375 | 347 | 0.580268 | 0 | 0 | 0 | 0 | 0 | 0 | 79 | 0.132107 |
6ca2d2810d7b2ee27e8d82d4f639f5656c914f18 | 358 | py | Python | garbage/reddit_Prog_Chal.py | wolfdale/Spaghetti-code | 9e395345e1420b9db021b21131601191a869db1d | [
"MIT"
] | 1 | 2018-05-18T16:07:11.000Z | 2018-05-18T16:07:11.000Z | garbage/reddit_Prog_Chal.py | wolfdale/Spaghetti-code | 9e395345e1420b9db021b21131601191a869db1d | [
"MIT"
] | 5 | 2015-12-03T16:12:38.000Z | 2020-05-05T14:07:00.000Z | garbage/reddit_Prog_Chal.py | wolfdale/Spaghetti-code | 9e395345e1420b9db021b21131601191a869db1d | [
"MIT"
] | null | null | null | #[2015-10-26] Challenge #238 [Easy] Consonants and Vowels
v=['a','e','i','o','u']
c=['b','c','d','f','g','h','j','k','l','m','n','p','q','r','s','t','v','w','x','y','z']
import random
string = raw_input()
word=[]
for i in range (len(string)):
if string[i] == 'v':
word.append(random.choice(v))
else:
word.append(random.choice(c))
print ''.join(word)
| 23.866667 | 87 | 0.544693 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 140 | 0.391061 |
6ca310acd08c527da6b679f5c06c920523479f83 | 899 | py | Python | workflow_lib/fert.py | VUB-HYDR/2018_Chawanda_etal | 46af26916806e2f61fd48d777f88b04da7fffbbe | [
"MIT"
] | 14 | 2018-09-27T16:03:10.000Z | 2021-04-15T06:09:21.000Z | workflow_lib/fert.py | VUB-HYDR/2018_Chawanda_etal | 46af26916806e2f61fd48d777f88b04da7fffbbe | [
"MIT"
] | 2 | 2019-10-24T14:03:41.000Z | 2019-10-31T22:10:19.000Z | workflow_lib/fert.py | VUB-HYDR/2018_Chawanda_etal | 46af26916806e2f61fd48d777f88b04da7fffbbe | [
"MIT"
] | 7 | 2018-11-14T19:42:59.000Z | 2021-08-16T07:09:50.000Z | import init_file as variables
import cj_function_lib as cj
from datetime import datetime
fert_table = cj.extract_table_from_mdb(variables.QSWAT_MDB, "fert", variables.path + "\\fert.tmp~")
fert = ""
for fert_line in fert_table:
fert += cj.trailing_spaces(4, fert_line.split(",")[1], 0) + cj.string_trailing_spaces(9, fert_line.split(",")[2]) + cj.trailing_spaces(8, fert_line.split(",")[3], 3) + cj.trailing_spaces(8, fert_line.split(",")[4], 3) + cj.trailing_spaces(8, fert_line.split(",")[5], 3) + cj.trailing_spaces(8, fert_line.split(",")[6], 3) + cj.trailing_spaces(8, fert_line.split(",")[7], 3) + cj.trailing_spaces(4, fert_line.split(",")[8], 2) + "E+00" + cj.trailing_spaces(4, fert_line.split(",")[9], 2)+ "E+00" + cj.trailing_spaces(8, fert_line.split(",")[10], 3) + "\n"
fileName = "fert.dat"
cj.write_to(variables.DefaultSimDir + "TxtInOut\\" + fileName, fert)
#print fileName
| 56.1875 | 558 | 0.688543 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 104 | 0.115684 |
6ca3a630c0fe54fea886a90464a43a242366158b | 700 | py | Python | transposition-decrypt.py | IamLucif3r/Cryptography | 46f33f6a327e4a70f71043f011b768fa05c5ae1d | [
"MIT"
] | 2 | 2021-07-20T05:14:30.000Z | 2021-07-27T14:04:56.000Z | transposition-decrypt.py | IamLucif3r/Cryptography | 46f33f6a327e4a70f71043f011b768fa05c5ae1d | [
"MIT"
] | null | null | null | transposition-decrypt.py | IamLucif3r/Cryptography | 46f33f6a327e4a70f71043f011b768fa05c5ae1d | [
"MIT"
] | null | null | null | import math,pyperclip
def main():
myMessage = 'Cenoonommstmme oo snnio. s s c'
myKey = 8
plainText = decryptMessage(myKey,myMessage)
print(plainText+'|')
pyperclip.copy(plainText)
def decryptMessage(key,message):
numOfColumns = math.ceil(len(message)/key)
numOfRows = key
numOfShadedboxes = (numOfColumns*numOfRows)-len(message)
plainText = ['']*numOfColumns
col =0
row = 0
for symbol in message:
plainText[col]+=symbol
col += 1
if(col == numOfColumns)or(col==numOfColumns-1 and row>=numOfRows-numOfShadedboxes):
col = 0
row = 1
return ''.join(plainText)
if __name__ == '__main__':
main()
| 25.925926 | 91 | 0.63 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 49 | 0.07 |
6ca4810cddee9dd6efdb1f08786256b345cb7b79 | 10,163 | py | Python | r80_apis.py | 0dadj1an/r80python | d59f2c1b07c75e028d13859b17cea5929ee3562d | [
"Apache-2.0"
] | null | null | null | r80_apis.py | 0dadj1an/r80python | d59f2c1b07c75e028d13859b17cea5929ee3562d | [
"Apache-2.0"
] | null | null | null | r80_apis.py | 0dadj1an/r80python | d59f2c1b07c75e028d13859b17cea5929ee3562d | [
"Apache-2.0"
] | null | null | null | #!/bin/python
import requests
import json
import pprint
#remove https warning
requests.packages.urllib3.disable_warnings()
#url = "https://192.168.248.150/web_api/"
#user = "api_user"
#pw = "demo123"
def login(url,user,pw):
payload_list={}
payload_list['user']=user
payload_list['password']=pw
headers = {
'content-type': "application/json",
'Accept': "*/*",
}
response = requests.post(url+"login", json=payload_list, headers=headers, verify=False)
return response
def add_host(sid,url,name,ip_address,groups="",comments="",nat_settings=""):
payload_list={}
payload_list['name']=name
payload_list['ipv4-address']= ip_address
if nat_settings != "":
payload_list['nat-settings']= nat_settings
if groups != "" :
payload_list['groups']= groups
if comments != "":
payload_list['comments']= comments
headers = {
'content-type': "application/json",
'Accept': "*/*",
'x-chkp-sid': sid,
}
print payload_list
response = requests.post(url+"add-host", json=payload_list, headers=headers, verify=False)
return response.json()
def delete_host(sid,url,name):
payload_list={}
payload_list['name']=name
payload_list['ignore-warnings']="true"
headers = {
'content-type': "application/json",
'Accept': "*/*",
'x-chkp-sid': sid,
}
response = requests.post(url+"delete-host", json=payload_list, headers=headers, verify=False)
return response
def add_network(sid,url,name,subnet,mask_length,nat_settings,groups):
payload_list={}
payload_list['name']=name
payload_list['subnet4']= subnet
payload_list['mask-length']= mask_length
payload_list['nat-settings']= nat_settings
payload_list['groups']= groups
headers = {
'content-type': "application/json",
'Accept': "*/*",
'x-chkp-sid': sid,
}
response = requests.post(url+"add-network", json=payload_list, headers=headers, verify=False)
return response.json()
def delete_network(sid,url,name):
payload_list={}
payload_list['name']=name
headers = {
'content-type': "application/json",
'Accept': "*/*",
'x-chkp-sid': sid,
}
response = requests.post(url+"delete-network", json=payload_list, headers=headers, verify=False)
return response
def show_network_groups(sid,url):
payload_list={}
payload_list['details-level']="standard"
headers = {
'content-type': "application/json",
'Accept': "*/*",
'x-chkp-sid': sid,
}
response = requests.post(url+"show-groups", json=payload_list, headers=headers, verify=False)
groups=json.loads(response.text)
return groups
def add_network_group(sid,url,name):
payload_list={}
payload_list['name']=name
headers = {
'content-type': "application/json",
'Accept': "*/*",
'x-chkp-sid': sid,
}
response = requests.post(url+"add-group", json=payload_list, headers=headers, verify=False)
return response
def add_members_to_network_group(sid,url,members):
payload_list={}
payload_list['name']=name
payload_list['members']=members
headers = {
'content-type': "application/json",
'Accept': "*/*",
'x-chkp-sid': sid,
}
response = requests.post(url+"set-group", json=payload_list, headers=headers, verify=False)
return response
def add_access_layer(sid,url,name):
payload_list={}
payload_list['name']=name
headers = {
'content-type': "application/json",
'Accept': "*/*",
'x-chkp-sid': sid,
}
response = requests.post(url+"add-access-layer", json=payload_list, headers=headers, verify=False)
return response
def add_policy_package(sid,url,name,access_layer,threat_layer,comments):
payload_list={}
payload_list['name']=name
payload_list['access']=access_layer
payload_list['threat-prevention']=threat_layer
payload_list['comments']=comments
headers = {
'content-type': "application/json",
'Accept': "*/*",
'x-chkp-sid': sid,
}
response = requests.post(url+"add-package", json=payload_list, headers=headers, verify=False)
return response
def add_access_section(sid,url,layer,position,name):
payload_list={}
payload_list['layer']=layer
payload_list['position']=position
payload_list['name']=name
headers = {
'content-type': "application/json",
'Accept': "*/*",
'x-chkp-sid': sid,
}
response = requests.post(url+"add-access-section", json=payload_list, headers=headers, verify=False)
return response
def delete_access_section_by_name(sid,url,layer,name):
payload_list={}
payload_list['name']=name
payload_list['layer']=layer
headers = {
'content-type': "application/json",
'Accept': "*/*",
'x-chkp-sid': sid,
}
response = requests.post(url+"delete-access-section", json=payload_list, headers=headers, verify=False)
return response
def show_access_section(sid,url,layer,name):
payload_list={}
payload_list['layer']=layer
payload_list['name']=name
headers = {
'content-type': "application/json",
'Accept': "*/*",
'x-chkp-sid': sid,
}
response = requests.post(url+"show-access-section", json=payload_list, headers=headers, verify=False)
return response
def add_access_rule(sid,url,layer,position,rule):
payload_list={}
payload_list['layer']=layer
payload_list['position']=position
payload_list['name']=rule['name']
payload_list['source']=rule['source']
payload_list['destination']=rule['destination']
payload_list['service']=rule['service']
payload_list['track']=rule['track']
payload_list['action']=rule['action']
headers = {
'content-type': "application/json",
'Accept': "*/*",
'x-chkp-sid': sid,
}
response = requests.post(url+"add-access-rule", json=payload_list, headers=headers, verify=False)
return response
def delete_access_rule_by_rule_number(sid,url,layer,number):
payload_list={}
payload_list['layer']=layer
payload_list['rule-number']=number
headers = {
'content-type': "application/json",
'Accept': "*/*",
'x-chkp-sid': sid,
}
response = requests.post(url+"delete-access-rule", json=payload_list, headers=headers, verify=False)
return response
def delete_access_rule_by_rule_name(sid,url,layer,name):
payload_list={}
payload_list['layer']=layer
payload_list['name']=name
headers = {
'content-type': "application/json",
'Accept': "*/*",
'x-chkp-sid': sid,
}
response = requests.post(url+"delete-access-rule", json=payload_list, headers=headers, verify=False)
return response
def publish(sid,url):
payload_list={}
headers = {
'content-type': "application/json",
'Accept': "*/*",
'x-chkp-sid': sid,
}
response = requests.post(url+"publish", json=payload_list, headers=headers, verify=False)
return response
def add_range():
payload_list={}
headers = {
'content-type': "application/json",
'Accept': "*/*",
'x-chkp-sid': sid,
}
response = requests.post(url+"publish", json=payload_list, headers=headers, verify=False)
return response
def show_task(sid,url,task):
payload_list={}
payload_list['task-id']=task
headers = {
'content-type': "application/json",
'Accept': "*/*",
'x-chkp-sid': sid,
}
response = requests.post(url+"show-task", json=payload_list, headers=headers, verify=False)
return response
def logout(sid,url):
payload_list={}
headers = {
'content-type': "application/json",
'Accept': "*/*",
'x-chkp-sid': sid,
}
response = requests.post(url+"logout", json=payload_list, headers=headers, verify=False)
return response
#main program
#login and get the session id
#sid=login(url,user,pw)
#get all groups
#add policy package
#name="my_cpx_policy2"
#comments="created by automation script"
#access_layer="true"
#threat_layer="true"
#package_return=add_policy_package(sid,url,name,access_layer,threat_layer,comments)
#print package_return
#add access rule section
#layer="my_cpx_policy2 network"
#position="top"
#position={"above":"Cleanup rule"}
#name="section1 - created by automation2"
#show_section_return=show_access_section(sid,url,layer,name)
#show_section_return=show_access_section(sid,url,layer,name)
#if show_section_return.status_code == "200":
# print "section already exists skipping"
#else:
# add_access_section(sid,url,layer,position,name)
#add access rule
#layer="my_cpx_policy2 network"
#position="top"
#rule={}
#rule['source']="any"
#rule['destination']="any"
#rule['service']="http"
#rule['action']="accept"
#rule['track']="Network Log"
#rule['name']="my rule 1"
#rule_response=add_access_rule(sid,url,layer,position,rule)
#print json.loads(rule_response.text)
#print rule_response
#add access rule to section
#layer="my_cpx_policy2 network"
#position={"top":"section1 - created by automation"}
#rule={}
#rule['source']="any"
#rule['destination']="any"
#rule['service']=["https","http"]
#rule['action']="accept"
#rule['track']="Network Log"
#rule['name']="my rule 2"
#rule_response=add_access_rule(sid,url,layer,position,rule)
#print rule_response
#print json.loads(rule_response.text)
#publish
#publish(sid,url)
| 30.428144 | 111 | 0.612024 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,450 | 0.339467 |
6ca483bbe2141f7ed86e065c780223eb91383897 | 1,753 | py | Python | pait/app/tornado/_pait.py | so1n/pait | dff33526bfaa5f801cbeff9907b89dfc9a091c71 | [
"Apache-2.0"
] | 19 | 2020-08-26T13:46:33.000Z | 2022-02-22T07:48:29.000Z | pait/app/tornado/_pait.py | so1n/pait | dff33526bfaa5f801cbeff9907b89dfc9a091c71 | [
"Apache-2.0"
] | 1 | 2021-06-06T17:45:54.000Z | 2021-06-06T17:45:54.000Z | pait/app/tornado/_pait.py | so1n/pait | dff33526bfaa5f801cbeff9907b89dfc9a091c71 | [
"Apache-2.0"
] | 1 | 2022-01-21T20:25:33.000Z | 2022-01-21T20:25:33.000Z | from typing import Any, Type
import aiofiles # type: ignore
from tornado.web import RequestHandler
from pait.app.base import BaseAppHelper
from pait.core import Pait as _Pait
from pait.g import config
from pait.model import response
from ._app_helper import AppHelper
__all__ = ["pait", "Pait"]
async def make_mock_response(pait_response: Type[response.PaitBaseResponseModel]) -> Any:
tornado_handle: RequestHandler = getattr(pait_response, "handle", None)
if not tornado_handle:
raise RuntimeError("Can not load Tornado handle")
tornado_handle.set_status(pait_response.status_code[0])
for key, value in pait_response.header.items():
tornado_handle.set_header(key, value)
tornado_handle.set_header("Content-Type", pait_response.media_type)
if issubclass(pait_response, response.PaitJsonResponseModel):
tornado_handle.write(pait_response.get_example_value(json_encoder_cls=config.json_encoder))
elif issubclass(pait_response, response.PaitTextResponseModel) or issubclass(
pait_response, response.PaitHtmlResponseModel
):
tornado_handle.write(pait_response.get_example_value())
elif issubclass(pait_response, response.PaitFileResponseModel):
async with aiofiles.tempfile.NamedTemporaryFile() as f: # type: ignore
await f.write(pait_response.get_example_value())
await f.seek(0)
async for line in f:
tornado_handle.write(line)
else:
raise NotImplementedError()
class Pait(_Pait):
app_helper_class: "Type[BaseAppHelper]" = AppHelper
# If you assign a value directly, it will become a bound function
make_mock_response_fn: staticmethod = staticmethod(make_mock_response)
pait = Pait()
| 37.297872 | 99 | 0.74672 | 219 | 0.124929 | 0 | 0 | 0 | 0 | 1,212 | 0.691386 | 177 | 0.10097 |
6ca602af912019bb59f184594f1a127acfb73000 | 222 | py | Python | fund/items.py | suddi/fundscraper | 8cd1962c643abe3a2bd3a5540550003550ed7201 | [
"Apache-2.0"
] | 1 | 2021-10-30T16:09:34.000Z | 2021-10-30T16:09:34.000Z | fund/items.py | suddi/fundscraper | 8cd1962c643abe3a2bd3a5540550003550ed7201 | [
"Apache-2.0"
] | 1 | 2022-03-02T14:55:09.000Z | 2022-03-02T14:55:09.000Z | fund/items.py | suddi/fundscraper | 8cd1962c643abe3a2bd3a5540550003550ed7201 | [
"Apache-2.0"
] | 2 | 2019-02-25T02:47:07.000Z | 2019-09-17T19:20:07.000Z | from scrapy.item import Field, Item
# pylint: disable-msg=too-many-ancestors
class FundItem(Item):
code = Field()
name = Field()
tier = Field()
start_date = Field()
date = Field()
price = Field()
| 18.5 | 40 | 0.626126 | 143 | 0.644144 | 0 | 0 | 0 | 0 | 0 | 0 | 40 | 0.18018 |
6ca6b8e0eb85324d9b371cf183d1159c92b39bc0 | 609 | py | Python | src/brightness_options.py | imesut/alfred-yeelight-pbc | 2678a2544d200c209afaa7d6dc383466f179966d | [
"Unlicense"
] | 1 | 2021-08-09T09:47:00.000Z | 2021-08-09T09:47:00.000Z | src/brightness_options.py | imesut/alfred-yeelight-pbc | 2678a2544d200c209afaa7d6dc383466f179966d | [
"Unlicense"
] | 1 | 2022-03-22T10:42:39.000Z | 2022-03-30T19:46:53.000Z | src/brightness_options.py | imesut/alfred-yeelight-pbc | 2678a2544d200c209afaa7d6dc383466f179966d | [
"Unlicense"
] | null | null | null | import json, yeelight, sys
from utils import get_alfred_object, bulb, bulb_properties, current_brightness
current_brigthness_index = round(int(current_brightness)/10)
brightness_list = [0,10,20,30,40,50,60,70,80,90,100]
brightness_list = brightness_list[current_brigthness_index:] + brightness_list[:current_brigthness_index]
response_array = []
for option in brightness_list:
item = [option, "%%%s" % (option), "", option, "", ""]
response_array.append(get_alfred_object(item))
response_array[0]["subtitle"] = "Current Brightness"
response = {"items": response_array}
print(json.dumps(response))
| 35.823529 | 105 | 0.763547 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 49 | 0.08046 |
6ca7fb675928141bb07d0f78ee1eb39e58fe4eda | 296 | py | Python | NetCatKS/DProtocol/api/interfaces/storage/__init__.py | dimddev/NetCatKS-CP | 2d9e72b2422e344569fd4eb154866b98e9707561 | [
"BSD-2-Clause"
] | null | null | null | NetCatKS/DProtocol/api/interfaces/storage/__init__.py | dimddev/NetCatKS-CP | 2d9e72b2422e344569fd4eb154866b98e9707561 | [
"BSD-2-Clause"
] | null | null | null | NetCatKS/DProtocol/api/interfaces/storage/__init__.py | dimddev/NetCatKS-CP | 2d9e72b2422e344569fd4eb154866b98e9707561 | [
"BSD-2-Clause"
] | null | null | null | __author__ = 'dimd'
from zope.interface import Interface, Attribute
class IProtocolStogareInterface(Interface):
"""
This interface define our session storage
Every custom storage have to implement this Interface
"""
session = Attribute(""" Container for our session """) | 21.142857 | 58 | 0.722973 | 224 | 0.756757 | 0 | 0 | 0 | 0 | 0 | 0 | 154 | 0.52027 |
6ca82b9f021001decf15eedc9de1f6ea177534be | 3,616 | py | Python | examples/rope_register_transfer.py | def670/lfd | 9fef15f556cba49dd4b42c0c29505a4137f95fc5 | [
"BSD-2-Clause"
] | 36 | 2015-05-22T14:47:18.000Z | 2021-07-27T15:30:36.000Z | examples/rope_register_transfer.py | jeffmahler/lfd | ecc6b934db098c0b1af9946454917b6dc911cb74 | [
"BSD-2-Clause"
] | null | null | null | examples/rope_register_transfer.py | jeffmahler/lfd | ecc6b934db098c0b1af9946454917b6dc911cb74 | [
"BSD-2-Clause"
] | 13 | 2015-05-22T15:38:07.000Z | 2021-07-28T03:20:35.000Z | #!/usr/bin/env python
from __future__ import division
import numpy as np
from lfd.environment.simulation import DynamicSimulationRobotWorld
from lfd.environment.simulation_object import XmlSimulationObject, BoxSimulationObject
from lfd.environment import environment
from lfd.environment import sim_util
from lfd.demonstration.demonstration import Demonstration
from lfd.registration.registration import TpsRpmRegistrationFactory
from lfd.registration.plotting_openrave import registration_plot_cb
from lfd.transfer.transfer import FingerTrajectoryTransferer
from lfd.transfer.registration_transfer import TwoStepRegistrationAndTrajectoryTransferer
from move_rope import create_augmented_traj, create_rope
def create_rope_demo(env, rope_poss):
rope_sim_obj = create_rope(rope_poss)
env.sim.add_objects([rope_sim_obj])
env.sim.settle()
scene_state = env.observe_scene()
env.sim.remove_objects([rope_sim_obj])
pick_pos = rope_poss[0] + .1 * (rope_poss[1] - rope_poss[0])
drop_pos = rope_poss[3] + .1 * (rope_poss[2] - rope_poss[3]) + np.r_[0, .2, 0]
pick_R = np.array([[0, 0, 1], [0, 1, 0], [-1, 0, 0]])
drop_R = np.array([[0, 1, 0], [0, 0, -1], [-1, 0, 0]])
move_height = .2
aug_traj = create_augmented_traj(env.sim.robot, pick_pos, drop_pos, pick_R, drop_R, move_height)
demo = Demonstration("rope_demo", scene_state, aug_traj)
return demo
def main():
# define simulation objects
table_height = 0.77
sim_objs = []
sim_objs.append(XmlSimulationObject("robots/pr2-beta-static.zae", dynamic=False))
sim_objs.append(BoxSimulationObject("table", [1, 0, table_height-.1], [.85, .85, .1], dynamic=False))
# initialize simulation world and environment
sim = DynamicSimulationRobotWorld()
sim.add_objects(sim_objs)
sim.create_viewer()
sim.robot.SetDOFValues([0.25], [sim.robot.GetJoint('torso_lift_joint').GetJointIndex()])
sim.robot.SetDOFValues([1.25], [sim.robot.GetJoint('head_tilt_joint').GetJointIndex()]) # move head down so it can see the rope
sim_util.reset_arms_to_side(sim)
env = environment.LfdEnvironment(sim, sim, downsample_size=0.025)
demo_rope_poss = np.array([[.2, -.2, table_height+0.006],
[.8, -.2, table_height+0.006],
[.8, .2, table_height+0.006],
[.2, .2, table_height+0.006]])
demo = create_rope_demo(env, demo_rope_poss)
test_rope_poss = np.array([[.2, -.2, table_height+0.006],
[.5, -.4, table_height+0.006],
[.8, .0, table_height+0.006],
[.8, .2, table_height+0.006],
[.6, .0, table_height+0.006],
[.4, .2, table_height+0.006],
[.2, .2, table_height+0.006]])
test_rope_sim_obj = create_rope(test_rope_poss)
sim.add_objects([test_rope_sim_obj])
sim.settle()
test_scene_state = env.observe_scene()
reg_factory = TpsRpmRegistrationFactory()
traj_transferer = FingerTrajectoryTransferer(sim)
plot_cb = lambda i, i_em, x_nd, y_md, xtarg_nd, wt_n, f, corr_nm, rad: registration_plot_cb(sim, x_nd, y_md, f)
reg_and_traj_transferer = TwoStepRegistrationAndTrajectoryTransferer(reg_factory, traj_transferer)
test_aug_traj = reg_and_traj_transferer.transfer(demo, test_scene_state, callback=plot_cb, plotting=True)
env.execute_augmented_trajectory(test_aug_traj)
if __name__ == '__main__':
main()
| 44.097561 | 131 | 0.667035 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 223 | 0.06167 |
6ca8ee4d2b9df9e74c5f808a6dbd90df33af6ec4 | 6,350 | py | Python | data/IQAFolders.py | YuLvS/L2PIPS | 6a165b1e5b3ecc1b3bee362a2834cadcad01ad96 | [
"Apache-2.0"
] | 3 | 2021-05-21T06:27:08.000Z | 2021-08-28T14:52:03.000Z | data/IQAFolders.py | YuLvS/L2PIPS | 6a165b1e5b3ecc1b3bee362a2834cadcad01ad96 | [
"Apache-2.0"
] | 1 | 2021-06-09T03:24:00.000Z | 2021-06-09T13:40:17.000Z | data/IQAFolders.py | YuLvS/L2PIPS | 6a165b1e5b3ecc1b3bee362a2834cadcad01ad96 | [
"Apache-2.0"
] | 1 | 2022-02-08T00:41:31.000Z | 2022-02-08T00:41:31.000Z | import csv
import os.path
import random
import numpy as np
import scipy.io
import torch
import torchvision
from torch.utils.data import Dataset
# from .util import *
from data.util import default_loader, read_img, augment, get_image_paths
class PIPALFolder(Dataset):
def __init__(self, root=None, index=None, transform=None, opt=None):
if index is None:
index = list(range(0, 200))
if opt is not None:
self.opt = opt
root = opt['datasets']['pipal']
patch_num = opt['patch_num']
else:
patch_num = 32
refpath = os.path.join(root, 'Train_Ref')
refname = self.getFileName(refpath, '.bmp')
dispath = os.path.join(root, 'Train_Dis')
txtpath = os.path.join(root, 'Train_Label')
sample = []
for i, item in enumerate(index):
ref = refname[item]
# print(ref, end=' ')
txtname = ref.split('.')[0] + '.txt'
fh = open(os.path.join(txtpath, txtname), 'r')
for line in fh:
line = line.split('\n')
words = line[0].split(',')
for aug in range(patch_num):
sample.append((
(os.path.join(dispath, words[0]), os.path.join(refpath, ref)),
np.array(words[1]).astype(np.float32) / 1000.0
))
# print('')
self.samples = sorted(sample)
self.transform = torchvision.transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
self.patch_size = opt['patch_size']
# self.loader = default_loader
def __getitem__(self, index):
path, target = self.samples[index]
'''img_dis = self.loader(path[0])
img_ref = self.loader(path[1])'''
img_dis = read_img(env=None, path=path[0])
img_ref = read_img(env=None, path=path[1])
'''if self.transform is not None:
img_dis = self.transform(img_dis)
img_ref = self.transform(img_ref)'''
if self.patch_size < 288:
H, W, _ = img_ref.shape
crop_size = self.patch_size
rnd_h = random.randint(0, max(0, (H - crop_size)))
rnd_w = random.randint(0, max(0, (W - crop_size)))
img_dis = img_dis[rnd_h:rnd_h + crop_size, rnd_w:rnd_w + crop_size, :]
img_ref = img_ref[rnd_h:rnd_h + crop_size, rnd_w:rnd_w + crop_size, :]
# augmentation - flip, rotate
img_dis, img_ref = augment([img_dis, img_ref], self.opt['use_flip'], rot=False)
if img_ref.shape[2] == 3:
img_ref = img_ref[:, :, [2, 1, 0]]
img_dis = img_dis[:, :, [2, 1, 0]]
img_ref = torch.from_numpy(np.ascontiguousarray(np.transpose(img_ref, (2, 0, 1)))).float()
img_dis = torch.from_numpy(np.ascontiguousarray(np.transpose(img_dis, (2, 0, 1)))).float()
img_dis = self.transform(img_dis)
img_ref = self.transform(img_ref)
return {'Dis': img_dis, 'Ref': img_ref, 'Label': target}
def __len__(self):
length = len(self.samples)
return length
@staticmethod
def getFileName(path, suffix):
filename = []
f_list = os.listdir(path)
# print f_list
for i in f_list:
if os.path.splitext(i)[1] == suffix:
filename.append(i)
filename.sort()
return filename
# TODO
class IQATestDataset(Dataset):
def __init__(self, opt):
super(IQATestDataset, self).__init__()
self.opt = opt
self.paths_Dis = None
self.paths_Ref = None
refpath = os.path.join(root, 'Train_Ref')
refname = self.getFileName(refpath, '.bmp')
dispath = os.path.join(root, 'Train_Dis')
txtpath = os.path.join(root, 'Train_Label')
sample = []
for i, item in enumerate(index):
ref = refname[item]
# print(ref, end=' ')
txtname = ref.split('.')[0] + '.txt'
fh = open(os.path.join(txtpath, txtname), 'r')
for line in fh:
line = line.split('\n')
words = line[0].split(',')
sample.append((
(os.path.join(dispath, words[0]), os.path.join(refpath, ref)),
np.array(words[1]).astype(np.float32)
))
# print('')
self.samples = sample
self.transform = torchvision.transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
def __getitem__(self, index):
path, target = self.samples[index]
img_dis = read_img(env=None, path=path[0])
img_ref = read_img(env=None, path=path[1])
'''H, W, _ = img_ref.shape
crop_size = 224
rnd_h = random.randint(0, max(0, (H - crop_size) // 2))
rnd_w = random.randint(0, max(0, (W - crop_size) // 2))
img_dis = img_dis[rnd_h:rnd_h + crop_size, rnd_w:rnd_w + crop_size, :]
img_ref = img_ref[rnd_h:rnd_h + crop_size, rnd_w:rnd_w + crop_size, :]
# augmentation - flip, rotate
img_dis, img_ref = augment([img_dis, img_ref], self.opt['use_flip'], rot=False)'''
if img_ref.shape[2] == 3:
img_ref = img_ref[:, :, [2, 1, 0]]
img_dis = img_dis[:, :, [2, 1, 0]]
img_ref = torch.from_numpy(np.ascontiguousarray(np.transpose(img_ref, (2, 0, 1)))).float()
img_dis = torch.from_numpy(np.ascontiguousarray(np.transpose(img_dis, (2, 0, 1)))).float()
img_dis = self.transform(img_dis)
img_ref = self.transform(img_ref)
return {'Dis': img_dis, 'Ref': img_ref, 'Label': target, 'Dis_path': path[0]}
def __len__(self):
return len(self.samples)
@staticmethod
def getFileName(path, suffix):
filename = []
f_list = os.listdir(path)
# print f_list
for i in f_list:
if os.path.splitext(i)[1] == suffix:
filename.append(i)
filename.sort()
return filename
| 35.47486 | 99 | 0.529134 | 6,075 | 0.956693 | 0 | 0 | 586 | 0.092283 | 0 | 0 | 1,082 | 0.170394 |
6caacd92f800cd39592b48cdfb06a9aeead365f0 | 10,543 | py | Python | sdk/python/pulumi_aws_native/elasticloadbalancingv2/listener.py | AaronFriel/pulumi-aws-native | 5621690373ac44accdbd20b11bae3be1baf022d1 | [
"Apache-2.0"
] | 29 | 2021-09-30T19:32:07.000Z | 2022-03-22T21:06:08.000Z | sdk/python/pulumi_aws_native/elasticloadbalancingv2/listener.py | AaronFriel/pulumi-aws-native | 5621690373ac44accdbd20b11bae3be1baf022d1 | [
"Apache-2.0"
] | 232 | 2021-09-30T19:26:26.000Z | 2022-03-31T23:22:06.000Z | sdk/python/pulumi_aws_native/elasticloadbalancingv2/listener.py | AaronFriel/pulumi-aws-native | 5621690373ac44accdbd20b11bae3be1baf022d1 | [
"Apache-2.0"
] | 4 | 2021-11-10T19:42:01.000Z | 2022-02-05T10:15:49.000Z | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['ListenerArgs', 'Listener']
@pulumi.input_type
class ListenerArgs:
def __init__(__self__, *,
default_actions: pulumi.Input[Sequence[pulumi.Input['ListenerActionArgs']]],
load_balancer_arn: pulumi.Input[str],
alpn_policy: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
certificates: Optional[pulumi.Input[Sequence[pulumi.Input['ListenerCertificateArgs']]]] = None,
port: Optional[pulumi.Input[int]] = None,
protocol: Optional[pulumi.Input[str]] = None,
ssl_policy: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a Listener resource.
"""
pulumi.set(__self__, "default_actions", default_actions)
pulumi.set(__self__, "load_balancer_arn", load_balancer_arn)
if alpn_policy is not None:
pulumi.set(__self__, "alpn_policy", alpn_policy)
if certificates is not None:
pulumi.set(__self__, "certificates", certificates)
if port is not None:
pulumi.set(__self__, "port", port)
if protocol is not None:
pulumi.set(__self__, "protocol", protocol)
if ssl_policy is not None:
pulumi.set(__self__, "ssl_policy", ssl_policy)
@property
@pulumi.getter(name="defaultActions")
def default_actions(self) -> pulumi.Input[Sequence[pulumi.Input['ListenerActionArgs']]]:
return pulumi.get(self, "default_actions")
@default_actions.setter
def default_actions(self, value: pulumi.Input[Sequence[pulumi.Input['ListenerActionArgs']]]):
pulumi.set(self, "default_actions", value)
@property
@pulumi.getter(name="loadBalancerArn")
def load_balancer_arn(self) -> pulumi.Input[str]:
return pulumi.get(self, "load_balancer_arn")
@load_balancer_arn.setter
def load_balancer_arn(self, value: pulumi.Input[str]):
pulumi.set(self, "load_balancer_arn", value)
@property
@pulumi.getter(name="alpnPolicy")
def alpn_policy(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "alpn_policy")
@alpn_policy.setter
def alpn_policy(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "alpn_policy", value)
@property
@pulumi.getter
def certificates(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ListenerCertificateArgs']]]]:
return pulumi.get(self, "certificates")
@certificates.setter
def certificates(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ListenerCertificateArgs']]]]):
pulumi.set(self, "certificates", value)
@property
@pulumi.getter
def port(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "port")
@port.setter
def port(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "port", value)
@property
@pulumi.getter
def protocol(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "protocol")
@protocol.setter
def protocol(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "protocol", value)
@property
@pulumi.getter(name="sslPolicy")
def ssl_policy(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "ssl_policy")
@ssl_policy.setter
def ssl_policy(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "ssl_policy", value)
class Listener(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
alpn_policy: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
certificates: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ListenerCertificateArgs']]]]] = None,
default_actions: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ListenerActionArgs']]]]] = None,
load_balancer_arn: Optional[pulumi.Input[str]] = None,
port: Optional[pulumi.Input[int]] = None,
protocol: Optional[pulumi.Input[str]] = None,
ssl_policy: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Resource Type definition for AWS::ElasticLoadBalancingV2::Listener
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: ListenerArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Resource Type definition for AWS::ElasticLoadBalancingV2::Listener
:param str resource_name: The name of the resource.
:param ListenerArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(ListenerArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
alpn_policy: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
certificates: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ListenerCertificateArgs']]]]] = None,
default_actions: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ListenerActionArgs']]]]] = None,
load_balancer_arn: Optional[pulumi.Input[str]] = None,
port: Optional[pulumi.Input[int]] = None,
protocol: Optional[pulumi.Input[str]] = None,
ssl_policy: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = ListenerArgs.__new__(ListenerArgs)
__props__.__dict__["alpn_policy"] = alpn_policy
__props__.__dict__["certificates"] = certificates
if default_actions is None and not opts.urn:
raise TypeError("Missing required property 'default_actions'")
__props__.__dict__["default_actions"] = default_actions
if load_balancer_arn is None and not opts.urn:
raise TypeError("Missing required property 'load_balancer_arn'")
__props__.__dict__["load_balancer_arn"] = load_balancer_arn
__props__.__dict__["port"] = port
__props__.__dict__["protocol"] = protocol
__props__.__dict__["ssl_policy"] = ssl_policy
__props__.__dict__["listener_arn"] = None
super(Listener, __self__).__init__(
'aws-native:elasticloadbalancingv2:Listener',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'Listener':
"""
Get an existing Listener resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = ListenerArgs.__new__(ListenerArgs)
__props__.__dict__["alpn_policy"] = None
__props__.__dict__["certificates"] = None
__props__.__dict__["default_actions"] = None
__props__.__dict__["listener_arn"] = None
__props__.__dict__["load_balancer_arn"] = None
__props__.__dict__["port"] = None
__props__.__dict__["protocol"] = None
__props__.__dict__["ssl_policy"] = None
return Listener(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="alpnPolicy")
def alpn_policy(self) -> pulumi.Output[Optional[Sequence[str]]]:
return pulumi.get(self, "alpn_policy")
@property
@pulumi.getter
def certificates(self) -> pulumi.Output[Optional[Sequence['outputs.ListenerCertificate']]]:
return pulumi.get(self, "certificates")
@property
@pulumi.getter(name="defaultActions")
def default_actions(self) -> pulumi.Output[Sequence['outputs.ListenerAction']]:
return pulumi.get(self, "default_actions")
@property
@pulumi.getter(name="listenerArn")
def listener_arn(self) -> pulumi.Output[str]:
return pulumi.get(self, "listener_arn")
@property
@pulumi.getter(name="loadBalancerArn")
def load_balancer_arn(self) -> pulumi.Output[str]:
return pulumi.get(self, "load_balancer_arn")
@property
@pulumi.getter
def port(self) -> pulumi.Output[Optional[int]]:
return pulumi.get(self, "port")
@property
@pulumi.getter
def protocol(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "protocol")
@property
@pulumi.getter(name="sslPolicy")
def ssl_policy(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "ssl_policy")
| 42.003984 | 134 | 0.650858 | 10,117 | 0.959594 | 0 | 0 | 7,387 | 0.700654 | 0 | 0 | 2,497 | 0.23684 |
6cac424950d170e8ad4322e8527abbb434d47d28 | 5,606 | py | Python | src/gluonts/nursery/few_shot_prediction/src/meta/datasets/datasets.py | RingoIngo/gluon-ts | 62fb20c36025fc969653accaffaa783671709564 | [
"Apache-2.0"
] | null | null | null | src/gluonts/nursery/few_shot_prediction/src/meta/datasets/datasets.py | RingoIngo/gluon-ts | 62fb20c36025fc969653accaffaa783671709564 | [
"Apache-2.0"
] | null | null | null | src/gluonts/nursery/few_shot_prediction/src/meta/datasets/datasets.py | RingoIngo/gluon-ts | 62fb20c36025fc969653accaffaa783671709564 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
import itertools
from sklearn.model_selection import train_test_split
from math import isclose
import numpy as np
FREQ_M1_M3 = ["monthly", "quarterly", "yearly"]
FREQ_M4 = ["daily", "weekly", "monthly", "quarterly", "yearly"]
CATEGORIES_M1 = [
"micro1",
"micro2",
"micro3",
"indust",
"macro1",
"macro2",
"demogr",
]
CATEGORIES_M3_M4 = [
"micro",
"industry",
"macro",
"finance",
"demographic",
]
M1 = ["m1_" + freq + "_" + cat for freq, cat in itertools.product(FREQ_M1_M3, CATEGORIES_M1)]
M3 = ["m3_" + freq + "_" + cat for freq, cat in itertools.product(FREQ_M1_M3, CATEGORIES_M3_M4)]
M4 = ["m4_" + freq + "_" + cat for freq, cat in itertools.product(FREQ_M4, CATEGORIES_M3_M4)]
DATASETS_SINGLE = [
# "exchange_rate",
"exchange_rate_nips",
# "solar-energy",
"solar_nips",
# "electricity",
"electricity_nips",
# "traffic",
"traffic_nips",
"wiki-rolling_nips", # is a very challenging dataset
"taxi_30min",
"kaggle_web_traffic_without_missing",
"kaggle_web_traffic_weekly",
"nn5_daily_without_missing", # http://www.neural-forecasting-competition.com/NN5/ (ATM cash withdrawals)
"nn5_weekly",
"tourism_monthly", # no need to split, https://robjhyndman.com/papers/forecompijf.pdf
"tourism_quarterly",
"tourism_yearly",
"cif_2016", # https://irafm.osu.cz/cif/main.php?c=Static&page=download ,
# https://zenodo.org/record/3904073#.YeE6ZMYo8UE , 72 time series, 24 real, 48 artificial, all monthly, banking domain
"london_smart_meters_without_missing",
"wind_farms_without_missing",
# "car_parts_without_missing", # intermittent time series
# "dominick", # This dataset contains 115704 weekly time series representing the profit of individual stock keeping units from a retailer.
# https://www.chicagobooth.edu/research/kilts/datasets/dominicks
# dominick has intermittency and other problems
# TODO: Could this be clustered? (as step 2, it is already from one category only)
"fred_md", # https://s3.amazonaws.com/real.stlouisfed.org/wp/2015/2015-012.pdf , too mixed up?
"pedestrian_counts", # hourly pedestrian counts captured from 66 sensors in Melbourne city starting from May 2009
"hospital", # https://zenodo.org/record/4656014#.YeFBT8Yo8UE
# This dataset contains 767 monthly time series that represent the patient counts related to medical products from January 2000 to December 2006. It was extracted from R expsmooth package.
"covid_deaths", # https://zenodo.org/record/4656009#.YeFBq8Yo8UE
# This dataset contains 266 daily time series that represent the COVID-19 deaths in a set of countries and states from 22/01/2020 to 20/08/2020. It was extracted from the Johns Hopkins repository.
"kdd_cup_2018_without_missing", # https://zenodo.org/record/4656756#.YeFB7cYo8UE
# This dataset was used in the KDD Cup 2018 forecasting competition.
# It contains long hourly time series representing the air quality levels
# in 59 stations in 2 cities: Beijing (35 stations) and London (24 stations) from 01/01/2017 to 31/03/2018.
# TODO: could be clustered by cities
"weather", # https://zenodo.org/record/4654822#.YeFCXcYo8UE
# 3010 daily time series representing the variations of four weather variables: rain, mintemp, maxtemp and solar radiation, measured at the weather stations in Australia
# TODO: could be clustered by variable (as step 2, it is already from one category only)
# "m5", # intermittent time series, see https://mofc.unic.ac.cy/m5-competition/, Walmart product data, could be splitted by product or store
]
DATASETS_FULL = DATASETS_SINGLE + M1 + M3 + M4
# The excluded datasets contain only time series that are shorter than 3 predictions lengths.
# In the current setting we filter these time series.
DATASETS_FILTERED = [
ds
for ds in DATASETS_FULL
if ds not in ["m1_yearly_macro2", "m3_yearly_micro", "m3_yearly_macro"]
]
def sample_datasets(
n_folds: int, train_size: float = 0.7, val_size: float = 0.2, test_size: float = 0.1, seed=42
):
assert isclose(train_size + val_size + test_size, 1.0), "sizes need to add up to 1"
random_state = np.random.RandomState(seed)
folds = []
for _ in range(n_folds):
train_split, test_split = train_test_split(
DATASETS_FILTERED,
train_size=train_size,
random_state=random_state.randint(low=0, high=10000),
)
rest = 1 - train_size
val_split, test_split = train_test_split(
test_split,
test_size=test_size / rest,
random_state=random_state.randint(low=0, high=10000),
)
folds.append((train_split, val_split, test_split))
assert not any(
[
set(train_split) & (set(val_split)),
set(train_split) & set(test_split),
set(val_split) & set(test_split),
]
), "Splits should not intersect!"
return folds
| 43.796875 | 200 | 0.698537 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,667 | 0.654121 |
6cad74d49e4e9a08a93a53a5673e980c74cb3858 | 3,074 | py | Python | tests/test_individual.py | viswanathareddya/Geektrust_in_family | 197d4d0eadf191e1ef9d7d21d6d81d62645169c2 | [
"MIT"
] | null | null | null | tests/test_individual.py | viswanathareddya/Geektrust_in_family | 197d4d0eadf191e1ef9d7d21d6d81d62645169c2 | [
"MIT"
] | null | null | null | tests/test_individual.py | viswanathareddya/Geektrust_in_family | 197d4d0eadf191e1ef9d7d21d6d81d62645169c2 | [
"MIT"
] | null | null | null | import unittest
from Familytree.individual import Person
from Familytree import variables
class Testperson(unittest.TestCase):
def setUp(self):
self.person = Person(1, "Jane", "Female")
def test_initialization(self):
# check instance
self.assertEqual(isinstance(self.person, Person), True)
# check properties
self.assertEqual(self.person.id, 1)
self.assertEqual(self.person.name, "Jane")
self.assertEqual(self.person.gender, "Female")
self.assertEqual(self.person.mother, None)
self.assertEqual(self.person.father, None)
self.assertEqual(self.person.spouse, None)
self.assertEqual(self.person.children, [])
def test_assign_mother(self):
mother_error_case = "error_value"
mother_error_male_case = Person(2, "male_person", "Male")
mother_success_case = Person(3, "Mother", "Female")
# error case
self.assertRaises(ValueError, self.person.assign_mother, mother_error_case)
self.assertRaises(ValueError, self.person.assign_mother, mother_error_male_case)
# success case
self.person.assign_mother(mother_success_case)
self.assertEqual(self.person.mother.name, "Mother")
self.assertTrue(self.person.mother.gender, "Female")
def test_assign_father(self):
father_error_case = "error_value"
father_error_female_case = Person(2, "female_father", "Female")
father_success_case = Person(3, "Father", "Male")
# error cases
self.assertRaises(ValueError, self.person.assign_father, father_error_case)
self.assertRaises(ValueError, self.person.assign_father, father_error_female_case)
# success case
self.person.assign_father(father_success_case)
self.assertEqual(self.person.father.name, "Father")
self.assertTrue(self.person.father.gender, "Male")
def test_assign_spouse(self):
spouse_error_case = "error_value"
spouse_error_same_gender = Person(2, "same_gender_spouse", "Female")
spouse_success_case = Person(3, "Husband", "Male")
# error cases
self.assertRaises(ValueError, self.person.assign_spouse, spouse_error_case)
self.assertRaises(ValueError, self.person.assign_spouse, spouse_error_same_gender)
# success case
self.person.assign_spouse(spouse_success_case)
self.assertEqual(self.person.spouse.name, "Husband")
self.assertEqual(self.person.spouse.gender, "Male")
def test_add_children(self):
child_error_case = "error_Case"
child_success_case = Person(4, "Daughter", "Female")
# error case
self.assertRaises(ValueError, self.person.add_children, child_error_case)
# success case
self.person.add_children(child_success_case)
self.assertEqual(len(self.person.children), 1)
self.assertEqual(self.person.children[0].name, "Daughter")
self.assertEqual(self.person.children[0].gender, "Female")
if __name__ == '__main__':
unittest.main()
| 37.487805 | 90 | 0.691282 | 2,932 | 0.953806 | 0 | 0 | 0 | 0 | 0 | 0 | 425 | 0.138256 |
6cad9926ea7dfc0d595597a70d792e186fd1df46 | 2,972 | py | Python | rdparser/nodes.py | dougabugg/python-recursive-descent-parser | e32f6f253cc8d19c3baa78e358648894095bcca1 | [
"MIT"
] | 3 | 2019-12-17T11:37:52.000Z | 2022-03-15T03:39:17.000Z | rdparser/nodes.py | dougabugg/python-recursive-descent-parser | e32f6f253cc8d19c3baa78e358648894095bcca1 | [
"MIT"
] | null | null | null | rdparser/nodes.py | dougabugg/python-recursive-descent-parser | e32f6f253cc8d19c3baa78e358648894095bcca1 | [
"MIT"
] | null | null | null | class BaseNode:
pass
class Node(BaseNode):
def __init__(self, offset, name=None, **opts):
self.offset = offset
self.end_offset = None
self.name = name
self.nodes = []
self.opts = opts
def __as_dict__(self):
return {"name": self.name, "nodes": [node.__as_dict__() for node in self.nodes]}
class Token(BaseNode):
def __init__(self, offset, value):
self.offset = offset
self.value = value
def __as_dict__(self):
return {"offset": self.offset, "value": self.value}
class NodeInspector:
def __init__(self, target):
if not isinstance(target, Node):
raise TypeError("target should be an instance of Node, not " + target.__class__)
self.target = target
self.names = {}
self.values = []
for node in target.nodes:
if isinstance(node, Node):
if node.name in self.names:
self.names[node.name] += [node]
else:
self.names[node.name] = [node]
else:
self.values.append(node)
if target.opts.get("flatten"):
if target.opts.get("as_list"):
if len(self.names) >= 1:
nodes = list(self.names.values())[0]
else:
nodes = []
self.mask = [NodeInspector(node).mask for node in nodes]
elif len(self.names) >= 1:
nodes = list(self.names.values())[0]
self.mask = NodeInspector(nodes[0]).mask
else:
self.mask = None
# elif len(self.names) == 0 and len(self.values) == 1:
# self.mask = self.values[0]
else:
self.mask = NodeMask(self)
class NodeMask:
def __init__(self, inspector):
super().__setattr__("_inspector", inspector)
super().__setattr__("_offset", inspector.target.offset)
super().__setattr__("_end_offset", inspector.target.end_offset)
super().__setattr__("_name", inspector.target.name)
def __str__(self):
target = self._inspector.target
n = target.name
v = len(self._inspector.values)
s = ", ".join(("{}[{}]".format(k, len(v)) for k,v in self._inspector.names))
return "<NodeMask name={}; values=[{}], nodes=[{}]>".format(n, v, s)
def __getattr__(self, name):
names = self._inspector.names
nodes = names.get(name)
if nodes:
node = NodeInspector(nodes[0]).mask
else:
node = None
return node
def __setattr__(self, name, value):
raise AttributeError
def __getitem__(self, i):
return self._inspector.values[i]
def __len__(self):
return len(self._inspector.values)
def __iter__(self):
return iter(self._inspector.values)
def __as_dict__(self):
return self._inspector.target.__as_dict__() | 32.304348 | 92 | 0.554172 | 2,963 | 0.996972 | 0 | 0 | 0 | 0 | 0 | 0 | 274 | 0.092194 |
6cae071fb0b7de70166dc34fefb63b3a8b59aa07 | 795 | py | Python | examples/first_class_functions.py | HansVdb/pythonavd-d01 | 8434a7e211709f59994280501f0361951db5a05c | [
"BSD-2-Clause"
] | null | null | null | examples/first_class_functions.py | HansVdb/pythonavd-d01 | 8434a7e211709f59994280501f0361951db5a05c | [
"BSD-2-Clause"
] | null | null | null | examples/first_class_functions.py | HansVdb/pythonavd-d01 | 8434a7e211709f59994280501f0361951db5a05c | [
"BSD-2-Clause"
] | null | null | null | def foo():
print("I'm a lovely foo()-function")
print(foo)
# <function foo at 0x7f9b75de3f28>
print(foo.__class__)
# <class 'function'>
bar = foo
bar()
# I'm a lovely foo()-function
print(bar.__name__)
# foo
def do_something(what):
"""Executes a function
:param what: name of the function to be executed
"""
what()
do_something(foo)
# I'm a lovely foo()-function
def try_me(self):
print('I am '+self.name)
print("I was created by " + self.creator)
print("This is wat I do")
self()
# a function is an object with attributed and methods
setattr(foo, 'name', 'foo')
foo.creator = "Hans"
foo.print = try_me
foo.print(foo)
"""
I am foo
I was created by Hans
This is wat I do
I'm a lovely foo()-function
"""
print(foo)
# <function foo at 0x7f9b75de3f28>
| 16.5625 | 53 | 0.660377 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 461 | 0.579874 |
6caf177be60f963a1d158636a23493e44c9f3170 | 74,516 | py | Python | sdk/python/pulumi_rancher2/auth_config_open_ldap.py | pulumi/pulumi-rancher2 | 7a98af8cf598b711084a7f46c0fe71b43ed7a8ac | [
"ECL-2.0",
"Apache-2.0"
] | 3 | 2020-03-23T15:59:11.000Z | 2021-01-29T00:37:32.000Z | sdk/python/pulumi_rancher2/auth_config_open_ldap.py | pulumi/pulumi-rancher2 | 7a98af8cf598b711084a7f46c0fe71b43ed7a8ac | [
"ECL-2.0",
"Apache-2.0"
] | 76 | 2020-01-16T20:00:25.000Z | 2022-03-31T20:30:08.000Z | sdk/python/pulumi_rancher2/auth_config_open_ldap.py | pulumi/pulumi-rancher2 | 7a98af8cf598b711084a7f46c0fe71b43ed7a8ac | [
"ECL-2.0",
"Apache-2.0"
] | 2 | 2020-03-27T17:39:59.000Z | 2020-11-24T23:09:24.000Z | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
__all__ = ['AuthConfigOpenLdapArgs', 'AuthConfigOpenLdap']
@pulumi.input_type
class AuthConfigOpenLdapArgs:
def __init__(__self__, *,
servers: pulumi.Input[Sequence[pulumi.Input[str]]],
service_account_distinguished_name: pulumi.Input[str],
service_account_password: pulumi.Input[str],
test_password: pulumi.Input[str],
test_username: pulumi.Input[str],
user_search_base: pulumi.Input[str],
access_mode: Optional[pulumi.Input[str]] = None,
allowed_principal_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
annotations: Optional[pulumi.Input[Mapping[str, Any]]] = None,
certificate: Optional[pulumi.Input[str]] = None,
connection_timeout: Optional[pulumi.Input[int]] = None,
enabled: Optional[pulumi.Input[bool]] = None,
group_dn_attribute: Optional[pulumi.Input[str]] = None,
group_member_mapping_attribute: Optional[pulumi.Input[str]] = None,
group_member_user_attribute: Optional[pulumi.Input[str]] = None,
group_name_attribute: Optional[pulumi.Input[str]] = None,
group_object_class: Optional[pulumi.Input[str]] = None,
group_search_attribute: Optional[pulumi.Input[str]] = None,
group_search_base: Optional[pulumi.Input[str]] = None,
labels: Optional[pulumi.Input[Mapping[str, Any]]] = None,
nested_group_membership_enabled: Optional[pulumi.Input[bool]] = None,
port: Optional[pulumi.Input[int]] = None,
tls: Optional[pulumi.Input[bool]] = None,
user_disabled_bit_mask: Optional[pulumi.Input[int]] = None,
user_enabled_attribute: Optional[pulumi.Input[str]] = None,
user_login_attribute: Optional[pulumi.Input[str]] = None,
user_member_attribute: Optional[pulumi.Input[str]] = None,
user_name_attribute: Optional[pulumi.Input[str]] = None,
user_object_class: Optional[pulumi.Input[str]] = None,
user_search_attribute: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a AuthConfigOpenLdap resource.
:param pulumi.Input[Sequence[pulumi.Input[str]]] servers: OpenLdap servers list (list)
:param pulumi.Input[str] service_account_distinguished_name: Service account DN for access OpenLdap service (string)
:param pulumi.Input[str] service_account_password: Service account password for access OpenLdap service (string)
:param pulumi.Input[str] test_password: Password for test access to OpenLdap service (string)
:param pulumi.Input[str] test_username: Username for test access to OpenLdap service (string)
:param pulumi.Input[str] user_search_base: User search base DN (string)
:param pulumi.Input[str] access_mode: Access mode for auth. `required`, `restricted`, `unrestricted` are supported. Default `unrestricted` (string)
:param pulumi.Input[Sequence[pulumi.Input[str]]] allowed_principal_ids: Allowed principal ids for auth. Required if `access_mode` is `required` or `restricted`. Ex: `openldap_user://<DN>` `openldap_group://<DN>` (list)
:param pulumi.Input[Mapping[str, Any]] annotations: Annotations of the resource (map)
:param pulumi.Input[str] certificate: Base64 encoded CA certificate for TLS if self-signed. Use filebase64(<FILE>) for encoding file (string)
:param pulumi.Input[int] connection_timeout: OpenLdap connection timeout. Default `5000` (int)
:param pulumi.Input[bool] enabled: Enable auth config provider. Default `true` (bool)
:param pulumi.Input[str] group_dn_attribute: Group DN attribute. Default `entryDN` (string)
:param pulumi.Input[str] group_member_mapping_attribute: Group member mapping attribute. Default `member` (string)
:param pulumi.Input[str] group_member_user_attribute: Group member user attribute. Default `entryDN` (string)
:param pulumi.Input[str] group_name_attribute: Group name attribute. Default `cn` (string)
:param pulumi.Input[str] group_object_class: Group object class. Default `groupOfNames` (string)
:param pulumi.Input[str] group_search_attribute: Group search attribute. Default `cn` (string)
:param pulumi.Input[str] group_search_base: Group search base (string)
:param pulumi.Input[Mapping[str, Any]] labels: Labels of the resource (map)
:param pulumi.Input[bool] nested_group_membership_enabled: Nested group membership enable. Default `false` (bool)
:param pulumi.Input[int] port: OpenLdap port. Default `389` (int)
:param pulumi.Input[bool] tls: Enable TLS connection (bool)
:param pulumi.Input[int] user_disabled_bit_mask: User disabled bit mask (int)
:param pulumi.Input[str] user_enabled_attribute: User enable attribute (string)
:param pulumi.Input[str] user_login_attribute: User login attribute. Default `uid` (string)
:param pulumi.Input[str] user_member_attribute: User member attribute. Default `memberOf` (string)
:param pulumi.Input[str] user_name_attribute: User name attribute. Default `givenName` (string)
:param pulumi.Input[str] user_object_class: User object class. Default `inetorgperson` (string)
:param pulumi.Input[str] user_search_attribute: User search attribute. Default `uid|sn|givenName` (string)
"""
pulumi.set(__self__, "servers", servers)
pulumi.set(__self__, "service_account_distinguished_name", service_account_distinguished_name)
pulumi.set(__self__, "service_account_password", service_account_password)
pulumi.set(__self__, "test_password", test_password)
pulumi.set(__self__, "test_username", test_username)
pulumi.set(__self__, "user_search_base", user_search_base)
if access_mode is not None:
pulumi.set(__self__, "access_mode", access_mode)
if allowed_principal_ids is not None:
pulumi.set(__self__, "allowed_principal_ids", allowed_principal_ids)
if annotations is not None:
pulumi.set(__self__, "annotations", annotations)
if certificate is not None:
pulumi.set(__self__, "certificate", certificate)
if connection_timeout is not None:
pulumi.set(__self__, "connection_timeout", connection_timeout)
if enabled is not None:
pulumi.set(__self__, "enabled", enabled)
if group_dn_attribute is not None:
pulumi.set(__self__, "group_dn_attribute", group_dn_attribute)
if group_member_mapping_attribute is not None:
pulumi.set(__self__, "group_member_mapping_attribute", group_member_mapping_attribute)
if group_member_user_attribute is not None:
pulumi.set(__self__, "group_member_user_attribute", group_member_user_attribute)
if group_name_attribute is not None:
pulumi.set(__self__, "group_name_attribute", group_name_attribute)
if group_object_class is not None:
pulumi.set(__self__, "group_object_class", group_object_class)
if group_search_attribute is not None:
pulumi.set(__self__, "group_search_attribute", group_search_attribute)
if group_search_base is not None:
pulumi.set(__self__, "group_search_base", group_search_base)
if labels is not None:
pulumi.set(__self__, "labels", labels)
if nested_group_membership_enabled is not None:
pulumi.set(__self__, "nested_group_membership_enabled", nested_group_membership_enabled)
if port is not None:
pulumi.set(__self__, "port", port)
if tls is not None:
pulumi.set(__self__, "tls", tls)
if user_disabled_bit_mask is not None:
pulumi.set(__self__, "user_disabled_bit_mask", user_disabled_bit_mask)
if user_enabled_attribute is not None:
pulumi.set(__self__, "user_enabled_attribute", user_enabled_attribute)
if user_login_attribute is not None:
pulumi.set(__self__, "user_login_attribute", user_login_attribute)
if user_member_attribute is not None:
pulumi.set(__self__, "user_member_attribute", user_member_attribute)
if user_name_attribute is not None:
pulumi.set(__self__, "user_name_attribute", user_name_attribute)
if user_object_class is not None:
pulumi.set(__self__, "user_object_class", user_object_class)
if user_search_attribute is not None:
pulumi.set(__self__, "user_search_attribute", user_search_attribute)
@property
@pulumi.getter
def servers(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:
"""
OpenLdap servers list (list)
"""
return pulumi.get(self, "servers")
@servers.setter
def servers(self, value: pulumi.Input[Sequence[pulumi.Input[str]]]):
pulumi.set(self, "servers", value)
@property
@pulumi.getter(name="serviceAccountDistinguishedName")
def service_account_distinguished_name(self) -> pulumi.Input[str]:
"""
Service account DN for access OpenLdap service (string)
"""
return pulumi.get(self, "service_account_distinguished_name")
@service_account_distinguished_name.setter
def service_account_distinguished_name(self, value: pulumi.Input[str]):
pulumi.set(self, "service_account_distinguished_name", value)
@property
@pulumi.getter(name="serviceAccountPassword")
def service_account_password(self) -> pulumi.Input[str]:
"""
Service account password for access OpenLdap service (string)
"""
return pulumi.get(self, "service_account_password")
@service_account_password.setter
def service_account_password(self, value: pulumi.Input[str]):
pulumi.set(self, "service_account_password", value)
@property
@pulumi.getter(name="testPassword")
def test_password(self) -> pulumi.Input[str]:
"""
Password for test access to OpenLdap service (string)
"""
return pulumi.get(self, "test_password")
@test_password.setter
def test_password(self, value: pulumi.Input[str]):
pulumi.set(self, "test_password", value)
@property
@pulumi.getter(name="testUsername")
def test_username(self) -> pulumi.Input[str]:
"""
Username for test access to OpenLdap service (string)
"""
return pulumi.get(self, "test_username")
@test_username.setter
def test_username(self, value: pulumi.Input[str]):
pulumi.set(self, "test_username", value)
@property
@pulumi.getter(name="userSearchBase")
def user_search_base(self) -> pulumi.Input[str]:
"""
User search base DN (string)
"""
return pulumi.get(self, "user_search_base")
@user_search_base.setter
def user_search_base(self, value: pulumi.Input[str]):
pulumi.set(self, "user_search_base", value)
@property
@pulumi.getter(name="accessMode")
def access_mode(self) -> Optional[pulumi.Input[str]]:
"""
Access mode for auth. `required`, `restricted`, `unrestricted` are supported. Default `unrestricted` (string)
"""
return pulumi.get(self, "access_mode")
@access_mode.setter
def access_mode(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "access_mode", value)
@property
@pulumi.getter(name="allowedPrincipalIds")
def allowed_principal_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Allowed principal ids for auth. Required if `access_mode` is `required` or `restricted`. Ex: `openldap_user://<DN>` `openldap_group://<DN>` (list)
"""
return pulumi.get(self, "allowed_principal_ids")
@allowed_principal_ids.setter
def allowed_principal_ids(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "allowed_principal_ids", value)
@property
@pulumi.getter
def annotations(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
"""
Annotations of the resource (map)
"""
return pulumi.get(self, "annotations")
@annotations.setter
def annotations(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "annotations", value)
@property
@pulumi.getter
def certificate(self) -> Optional[pulumi.Input[str]]:
"""
Base64 encoded CA certificate for TLS if self-signed. Use filebase64(<FILE>) for encoding file (string)
"""
return pulumi.get(self, "certificate")
@certificate.setter
def certificate(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "certificate", value)
@property
@pulumi.getter(name="connectionTimeout")
def connection_timeout(self) -> Optional[pulumi.Input[int]]:
"""
OpenLdap connection timeout. Default `5000` (int)
"""
return pulumi.get(self, "connection_timeout")
@connection_timeout.setter
def connection_timeout(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "connection_timeout", value)
@property
@pulumi.getter
def enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Enable auth config provider. Default `true` (bool)
"""
return pulumi.get(self, "enabled")
@enabled.setter
def enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enabled", value)
@property
@pulumi.getter(name="groupDnAttribute")
def group_dn_attribute(self) -> Optional[pulumi.Input[str]]:
"""
Group DN attribute. Default `entryDN` (string)
"""
return pulumi.get(self, "group_dn_attribute")
@group_dn_attribute.setter
def group_dn_attribute(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "group_dn_attribute", value)
@property
@pulumi.getter(name="groupMemberMappingAttribute")
def group_member_mapping_attribute(self) -> Optional[pulumi.Input[str]]:
"""
Group member mapping attribute. Default `member` (string)
"""
return pulumi.get(self, "group_member_mapping_attribute")
@group_member_mapping_attribute.setter
def group_member_mapping_attribute(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "group_member_mapping_attribute", value)
@property
@pulumi.getter(name="groupMemberUserAttribute")
def group_member_user_attribute(self) -> Optional[pulumi.Input[str]]:
"""
Group member user attribute. Default `entryDN` (string)
"""
return pulumi.get(self, "group_member_user_attribute")
@group_member_user_attribute.setter
def group_member_user_attribute(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "group_member_user_attribute", value)
@property
@pulumi.getter(name="groupNameAttribute")
def group_name_attribute(self) -> Optional[pulumi.Input[str]]:
"""
Group name attribute. Default `cn` (string)
"""
return pulumi.get(self, "group_name_attribute")
@group_name_attribute.setter
def group_name_attribute(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "group_name_attribute", value)
@property
@pulumi.getter(name="groupObjectClass")
def group_object_class(self) -> Optional[pulumi.Input[str]]:
"""
Group object class. Default `groupOfNames` (string)
"""
return pulumi.get(self, "group_object_class")
@group_object_class.setter
def group_object_class(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "group_object_class", value)
@property
@pulumi.getter(name="groupSearchAttribute")
def group_search_attribute(self) -> Optional[pulumi.Input[str]]:
"""
Group search attribute. Default `cn` (string)
"""
return pulumi.get(self, "group_search_attribute")
@group_search_attribute.setter
def group_search_attribute(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "group_search_attribute", value)
@property
@pulumi.getter(name="groupSearchBase")
def group_search_base(self) -> Optional[pulumi.Input[str]]:
"""
Group search base (string)
"""
return pulumi.get(self, "group_search_base")
@group_search_base.setter
def group_search_base(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "group_search_base", value)
@property
@pulumi.getter
def labels(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
"""
Labels of the resource (map)
"""
return pulumi.get(self, "labels")
@labels.setter
def labels(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "labels", value)
@property
@pulumi.getter(name="nestedGroupMembershipEnabled")
def nested_group_membership_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Nested group membership enable. Default `false` (bool)
"""
return pulumi.get(self, "nested_group_membership_enabled")
@nested_group_membership_enabled.setter
def nested_group_membership_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "nested_group_membership_enabled", value)
@property
@pulumi.getter
def port(self) -> Optional[pulumi.Input[int]]:
"""
OpenLdap port. Default `389` (int)
"""
return pulumi.get(self, "port")
@port.setter
def port(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "port", value)
@property
@pulumi.getter
def tls(self) -> Optional[pulumi.Input[bool]]:
"""
Enable TLS connection (bool)
"""
return pulumi.get(self, "tls")
@tls.setter
def tls(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "tls", value)
@property
@pulumi.getter(name="userDisabledBitMask")
def user_disabled_bit_mask(self) -> Optional[pulumi.Input[int]]:
"""
User disabled bit mask (int)
"""
return pulumi.get(self, "user_disabled_bit_mask")
@user_disabled_bit_mask.setter
def user_disabled_bit_mask(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "user_disabled_bit_mask", value)
@property
@pulumi.getter(name="userEnabledAttribute")
def user_enabled_attribute(self) -> Optional[pulumi.Input[str]]:
"""
User enable attribute (string)
"""
return pulumi.get(self, "user_enabled_attribute")
@user_enabled_attribute.setter
def user_enabled_attribute(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "user_enabled_attribute", value)
@property
@pulumi.getter(name="userLoginAttribute")
def user_login_attribute(self) -> Optional[pulumi.Input[str]]:
"""
User login attribute. Default `uid` (string)
"""
return pulumi.get(self, "user_login_attribute")
@user_login_attribute.setter
def user_login_attribute(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "user_login_attribute", value)
@property
@pulumi.getter(name="userMemberAttribute")
def user_member_attribute(self) -> Optional[pulumi.Input[str]]:
"""
User member attribute. Default `memberOf` (string)
"""
return pulumi.get(self, "user_member_attribute")
@user_member_attribute.setter
def user_member_attribute(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "user_member_attribute", value)
@property
@pulumi.getter(name="userNameAttribute")
def user_name_attribute(self) -> Optional[pulumi.Input[str]]:
"""
User name attribute. Default `givenName` (string)
"""
return pulumi.get(self, "user_name_attribute")
@user_name_attribute.setter
def user_name_attribute(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "user_name_attribute", value)
@property
@pulumi.getter(name="userObjectClass")
def user_object_class(self) -> Optional[pulumi.Input[str]]:
"""
User object class. Default `inetorgperson` (string)
"""
return pulumi.get(self, "user_object_class")
@user_object_class.setter
def user_object_class(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "user_object_class", value)
@property
@pulumi.getter(name="userSearchAttribute")
def user_search_attribute(self) -> Optional[pulumi.Input[str]]:
"""
User search attribute. Default `uid|sn|givenName` (string)
"""
return pulumi.get(self, "user_search_attribute")
@user_search_attribute.setter
def user_search_attribute(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "user_search_attribute", value)
@pulumi.input_type
class _AuthConfigOpenLdapState:
def __init__(__self__, *,
access_mode: Optional[pulumi.Input[str]] = None,
allowed_principal_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
annotations: Optional[pulumi.Input[Mapping[str, Any]]] = None,
certificate: Optional[pulumi.Input[str]] = None,
connection_timeout: Optional[pulumi.Input[int]] = None,
enabled: Optional[pulumi.Input[bool]] = None,
group_dn_attribute: Optional[pulumi.Input[str]] = None,
group_member_mapping_attribute: Optional[pulumi.Input[str]] = None,
group_member_user_attribute: Optional[pulumi.Input[str]] = None,
group_name_attribute: Optional[pulumi.Input[str]] = None,
group_object_class: Optional[pulumi.Input[str]] = None,
group_search_attribute: Optional[pulumi.Input[str]] = None,
group_search_base: Optional[pulumi.Input[str]] = None,
labels: Optional[pulumi.Input[Mapping[str, Any]]] = None,
name: Optional[pulumi.Input[str]] = None,
nested_group_membership_enabled: Optional[pulumi.Input[bool]] = None,
port: Optional[pulumi.Input[int]] = None,
servers: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
service_account_distinguished_name: Optional[pulumi.Input[str]] = None,
service_account_password: Optional[pulumi.Input[str]] = None,
test_password: Optional[pulumi.Input[str]] = None,
test_username: Optional[pulumi.Input[str]] = None,
tls: Optional[pulumi.Input[bool]] = None,
type: Optional[pulumi.Input[str]] = None,
user_disabled_bit_mask: Optional[pulumi.Input[int]] = None,
user_enabled_attribute: Optional[pulumi.Input[str]] = None,
user_login_attribute: Optional[pulumi.Input[str]] = None,
user_member_attribute: Optional[pulumi.Input[str]] = None,
user_name_attribute: Optional[pulumi.Input[str]] = None,
user_object_class: Optional[pulumi.Input[str]] = None,
user_search_attribute: Optional[pulumi.Input[str]] = None,
user_search_base: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering AuthConfigOpenLdap resources.
:param pulumi.Input[str] access_mode: Access mode for auth. `required`, `restricted`, `unrestricted` are supported. Default `unrestricted` (string)
:param pulumi.Input[Sequence[pulumi.Input[str]]] allowed_principal_ids: Allowed principal ids for auth. Required if `access_mode` is `required` or `restricted`. Ex: `openldap_user://<DN>` `openldap_group://<DN>` (list)
:param pulumi.Input[Mapping[str, Any]] annotations: Annotations of the resource (map)
:param pulumi.Input[str] certificate: Base64 encoded CA certificate for TLS if self-signed. Use filebase64(<FILE>) for encoding file (string)
:param pulumi.Input[int] connection_timeout: OpenLdap connection timeout. Default `5000` (int)
:param pulumi.Input[bool] enabled: Enable auth config provider. Default `true` (bool)
:param pulumi.Input[str] group_dn_attribute: Group DN attribute. Default `entryDN` (string)
:param pulumi.Input[str] group_member_mapping_attribute: Group member mapping attribute. Default `member` (string)
:param pulumi.Input[str] group_member_user_attribute: Group member user attribute. Default `entryDN` (string)
:param pulumi.Input[str] group_name_attribute: Group name attribute. Default `cn` (string)
:param pulumi.Input[str] group_object_class: Group object class. Default `groupOfNames` (string)
:param pulumi.Input[str] group_search_attribute: Group search attribute. Default `cn` (string)
:param pulumi.Input[str] group_search_base: Group search base (string)
:param pulumi.Input[Mapping[str, Any]] labels: Labels of the resource (map)
:param pulumi.Input[str] name: (Computed) The name of the resource (string)
:param pulumi.Input[bool] nested_group_membership_enabled: Nested group membership enable. Default `false` (bool)
:param pulumi.Input[int] port: OpenLdap port. Default `389` (int)
:param pulumi.Input[Sequence[pulumi.Input[str]]] servers: OpenLdap servers list (list)
:param pulumi.Input[str] service_account_distinguished_name: Service account DN for access OpenLdap service (string)
:param pulumi.Input[str] service_account_password: Service account password for access OpenLdap service (string)
:param pulumi.Input[str] test_password: Password for test access to OpenLdap service (string)
:param pulumi.Input[str] test_username: Username for test access to OpenLdap service (string)
:param pulumi.Input[bool] tls: Enable TLS connection (bool)
:param pulumi.Input[str] type: (Computed) The type of the resource (string)
:param pulumi.Input[int] user_disabled_bit_mask: User disabled bit mask (int)
:param pulumi.Input[str] user_enabled_attribute: User enable attribute (string)
:param pulumi.Input[str] user_login_attribute: User login attribute. Default `uid` (string)
:param pulumi.Input[str] user_member_attribute: User member attribute. Default `memberOf` (string)
:param pulumi.Input[str] user_name_attribute: User name attribute. Default `givenName` (string)
:param pulumi.Input[str] user_object_class: User object class. Default `inetorgperson` (string)
:param pulumi.Input[str] user_search_attribute: User search attribute. Default `uid|sn|givenName` (string)
:param pulumi.Input[str] user_search_base: User search base DN (string)
"""
if access_mode is not None:
pulumi.set(__self__, "access_mode", access_mode)
if allowed_principal_ids is not None:
pulumi.set(__self__, "allowed_principal_ids", allowed_principal_ids)
if annotations is not None:
pulumi.set(__self__, "annotations", annotations)
if certificate is not None:
pulumi.set(__self__, "certificate", certificate)
if connection_timeout is not None:
pulumi.set(__self__, "connection_timeout", connection_timeout)
if enabled is not None:
pulumi.set(__self__, "enabled", enabled)
if group_dn_attribute is not None:
pulumi.set(__self__, "group_dn_attribute", group_dn_attribute)
if group_member_mapping_attribute is not None:
pulumi.set(__self__, "group_member_mapping_attribute", group_member_mapping_attribute)
if group_member_user_attribute is not None:
pulumi.set(__self__, "group_member_user_attribute", group_member_user_attribute)
if group_name_attribute is not None:
pulumi.set(__self__, "group_name_attribute", group_name_attribute)
if group_object_class is not None:
pulumi.set(__self__, "group_object_class", group_object_class)
if group_search_attribute is not None:
pulumi.set(__self__, "group_search_attribute", group_search_attribute)
if group_search_base is not None:
pulumi.set(__self__, "group_search_base", group_search_base)
if labels is not None:
pulumi.set(__self__, "labels", labels)
if name is not None:
pulumi.set(__self__, "name", name)
if nested_group_membership_enabled is not None:
pulumi.set(__self__, "nested_group_membership_enabled", nested_group_membership_enabled)
if port is not None:
pulumi.set(__self__, "port", port)
if servers is not None:
pulumi.set(__self__, "servers", servers)
if service_account_distinguished_name is not None:
pulumi.set(__self__, "service_account_distinguished_name", service_account_distinguished_name)
if service_account_password is not None:
pulumi.set(__self__, "service_account_password", service_account_password)
if test_password is not None:
pulumi.set(__self__, "test_password", test_password)
if test_username is not None:
pulumi.set(__self__, "test_username", test_username)
if tls is not None:
pulumi.set(__self__, "tls", tls)
if type is not None:
pulumi.set(__self__, "type", type)
if user_disabled_bit_mask is not None:
pulumi.set(__self__, "user_disabled_bit_mask", user_disabled_bit_mask)
if user_enabled_attribute is not None:
pulumi.set(__self__, "user_enabled_attribute", user_enabled_attribute)
if user_login_attribute is not None:
pulumi.set(__self__, "user_login_attribute", user_login_attribute)
if user_member_attribute is not None:
pulumi.set(__self__, "user_member_attribute", user_member_attribute)
if user_name_attribute is not None:
pulumi.set(__self__, "user_name_attribute", user_name_attribute)
if user_object_class is not None:
pulumi.set(__self__, "user_object_class", user_object_class)
if user_search_attribute is not None:
pulumi.set(__self__, "user_search_attribute", user_search_attribute)
if user_search_base is not None:
pulumi.set(__self__, "user_search_base", user_search_base)
@property
@pulumi.getter(name="accessMode")
def access_mode(self) -> Optional[pulumi.Input[str]]:
"""
Access mode for auth. `required`, `restricted`, `unrestricted` are supported. Default `unrestricted` (string)
"""
return pulumi.get(self, "access_mode")
@access_mode.setter
def access_mode(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "access_mode", value)
@property
@pulumi.getter(name="allowedPrincipalIds")
def allowed_principal_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Allowed principal ids for auth. Required if `access_mode` is `required` or `restricted`. Ex: `openldap_user://<DN>` `openldap_group://<DN>` (list)
"""
return pulumi.get(self, "allowed_principal_ids")
@allowed_principal_ids.setter
def allowed_principal_ids(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "allowed_principal_ids", value)
@property
@pulumi.getter
def annotations(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
"""
Annotations of the resource (map)
"""
return pulumi.get(self, "annotations")
@annotations.setter
def annotations(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "annotations", value)
@property
@pulumi.getter
def certificate(self) -> Optional[pulumi.Input[str]]:
"""
Base64 encoded CA certificate for TLS if self-signed. Use filebase64(<FILE>) for encoding file (string)
"""
return pulumi.get(self, "certificate")
@certificate.setter
def certificate(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "certificate", value)
@property
@pulumi.getter(name="connectionTimeout")
def connection_timeout(self) -> Optional[pulumi.Input[int]]:
"""
OpenLdap connection timeout. Default `5000` (int)
"""
return pulumi.get(self, "connection_timeout")
@connection_timeout.setter
def connection_timeout(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "connection_timeout", value)
@property
@pulumi.getter
def enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Enable auth config provider. Default `true` (bool)
"""
return pulumi.get(self, "enabled")
@enabled.setter
def enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enabled", value)
@property
@pulumi.getter(name="groupDnAttribute")
def group_dn_attribute(self) -> Optional[pulumi.Input[str]]:
"""
Group DN attribute. Default `entryDN` (string)
"""
return pulumi.get(self, "group_dn_attribute")
@group_dn_attribute.setter
def group_dn_attribute(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "group_dn_attribute", value)
@property
@pulumi.getter(name="groupMemberMappingAttribute")
def group_member_mapping_attribute(self) -> Optional[pulumi.Input[str]]:
"""
Group member mapping attribute. Default `member` (string)
"""
return pulumi.get(self, "group_member_mapping_attribute")
@group_member_mapping_attribute.setter
def group_member_mapping_attribute(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "group_member_mapping_attribute", value)
@property
@pulumi.getter(name="groupMemberUserAttribute")
def group_member_user_attribute(self) -> Optional[pulumi.Input[str]]:
"""
Group member user attribute. Default `entryDN` (string)
"""
return pulumi.get(self, "group_member_user_attribute")
@group_member_user_attribute.setter
def group_member_user_attribute(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "group_member_user_attribute", value)
@property
@pulumi.getter(name="groupNameAttribute")
def group_name_attribute(self) -> Optional[pulumi.Input[str]]:
"""
Group name attribute. Default `cn` (string)
"""
return pulumi.get(self, "group_name_attribute")
@group_name_attribute.setter
def group_name_attribute(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "group_name_attribute", value)
@property
@pulumi.getter(name="groupObjectClass")
def group_object_class(self) -> Optional[pulumi.Input[str]]:
"""
Group object class. Default `groupOfNames` (string)
"""
return pulumi.get(self, "group_object_class")
@group_object_class.setter
def group_object_class(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "group_object_class", value)
@property
@pulumi.getter(name="groupSearchAttribute")
def group_search_attribute(self) -> Optional[pulumi.Input[str]]:
"""
Group search attribute. Default `cn` (string)
"""
return pulumi.get(self, "group_search_attribute")
@group_search_attribute.setter
def group_search_attribute(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "group_search_attribute", value)
@property
@pulumi.getter(name="groupSearchBase")
def group_search_base(self) -> Optional[pulumi.Input[str]]:
"""
Group search base (string)
"""
return pulumi.get(self, "group_search_base")
@group_search_base.setter
def group_search_base(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "group_search_base", value)
@property
@pulumi.getter
def labels(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
"""
Labels of the resource (map)
"""
return pulumi.get(self, "labels")
@labels.setter
def labels(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "labels", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
(Computed) The name of the resource (string)
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="nestedGroupMembershipEnabled")
def nested_group_membership_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Nested group membership enable. Default `false` (bool)
"""
return pulumi.get(self, "nested_group_membership_enabled")
@nested_group_membership_enabled.setter
def nested_group_membership_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "nested_group_membership_enabled", value)
@property
@pulumi.getter
def port(self) -> Optional[pulumi.Input[int]]:
"""
OpenLdap port. Default `389` (int)
"""
return pulumi.get(self, "port")
@port.setter
def port(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "port", value)
@property
@pulumi.getter
def servers(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
OpenLdap servers list (list)
"""
return pulumi.get(self, "servers")
@servers.setter
def servers(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "servers", value)
@property
@pulumi.getter(name="serviceAccountDistinguishedName")
def service_account_distinguished_name(self) -> Optional[pulumi.Input[str]]:
"""
Service account DN for access OpenLdap service (string)
"""
return pulumi.get(self, "service_account_distinguished_name")
@service_account_distinguished_name.setter
def service_account_distinguished_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "service_account_distinguished_name", value)
@property
@pulumi.getter(name="serviceAccountPassword")
def service_account_password(self) -> Optional[pulumi.Input[str]]:
"""
Service account password for access OpenLdap service (string)
"""
return pulumi.get(self, "service_account_password")
@service_account_password.setter
def service_account_password(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "service_account_password", value)
@property
@pulumi.getter(name="testPassword")
def test_password(self) -> Optional[pulumi.Input[str]]:
"""
Password for test access to OpenLdap service (string)
"""
return pulumi.get(self, "test_password")
@test_password.setter
def test_password(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "test_password", value)
@property
@pulumi.getter(name="testUsername")
def test_username(self) -> Optional[pulumi.Input[str]]:
"""
Username for test access to OpenLdap service (string)
"""
return pulumi.get(self, "test_username")
@test_username.setter
def test_username(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "test_username", value)
@property
@pulumi.getter
def tls(self) -> Optional[pulumi.Input[bool]]:
"""
Enable TLS connection (bool)
"""
return pulumi.get(self, "tls")
@tls.setter
def tls(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "tls", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[str]]:
"""
(Computed) The type of the resource (string)
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type", value)
@property
@pulumi.getter(name="userDisabledBitMask")
def user_disabled_bit_mask(self) -> Optional[pulumi.Input[int]]:
"""
User disabled bit mask (int)
"""
return pulumi.get(self, "user_disabled_bit_mask")
@user_disabled_bit_mask.setter
def user_disabled_bit_mask(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "user_disabled_bit_mask", value)
@property
@pulumi.getter(name="userEnabledAttribute")
def user_enabled_attribute(self) -> Optional[pulumi.Input[str]]:
"""
User enable attribute (string)
"""
return pulumi.get(self, "user_enabled_attribute")
@user_enabled_attribute.setter
def user_enabled_attribute(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "user_enabled_attribute", value)
@property
@pulumi.getter(name="userLoginAttribute")
def user_login_attribute(self) -> Optional[pulumi.Input[str]]:
"""
User login attribute. Default `uid` (string)
"""
return pulumi.get(self, "user_login_attribute")
@user_login_attribute.setter
def user_login_attribute(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "user_login_attribute", value)
@property
@pulumi.getter(name="userMemberAttribute")
def user_member_attribute(self) -> Optional[pulumi.Input[str]]:
"""
User member attribute. Default `memberOf` (string)
"""
return pulumi.get(self, "user_member_attribute")
@user_member_attribute.setter
def user_member_attribute(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "user_member_attribute", value)
@property
@pulumi.getter(name="userNameAttribute")
def user_name_attribute(self) -> Optional[pulumi.Input[str]]:
"""
User name attribute. Default `givenName` (string)
"""
return pulumi.get(self, "user_name_attribute")
@user_name_attribute.setter
def user_name_attribute(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "user_name_attribute", value)
@property
@pulumi.getter(name="userObjectClass")
def user_object_class(self) -> Optional[pulumi.Input[str]]:
"""
User object class. Default `inetorgperson` (string)
"""
return pulumi.get(self, "user_object_class")
@user_object_class.setter
def user_object_class(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "user_object_class", value)
@property
@pulumi.getter(name="userSearchAttribute")
def user_search_attribute(self) -> Optional[pulumi.Input[str]]:
"""
User search attribute. Default `uid|sn|givenName` (string)
"""
return pulumi.get(self, "user_search_attribute")
@user_search_attribute.setter
def user_search_attribute(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "user_search_attribute", value)
@property
@pulumi.getter(name="userSearchBase")
def user_search_base(self) -> Optional[pulumi.Input[str]]:
"""
User search base DN (string)
"""
return pulumi.get(self, "user_search_base")
@user_search_base.setter
def user_search_base(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "user_search_base", value)
class AuthConfigOpenLdap(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
access_mode: Optional[pulumi.Input[str]] = None,
allowed_principal_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
annotations: Optional[pulumi.Input[Mapping[str, Any]]] = None,
certificate: Optional[pulumi.Input[str]] = None,
connection_timeout: Optional[pulumi.Input[int]] = None,
enabled: Optional[pulumi.Input[bool]] = None,
group_dn_attribute: Optional[pulumi.Input[str]] = None,
group_member_mapping_attribute: Optional[pulumi.Input[str]] = None,
group_member_user_attribute: Optional[pulumi.Input[str]] = None,
group_name_attribute: Optional[pulumi.Input[str]] = None,
group_object_class: Optional[pulumi.Input[str]] = None,
group_search_attribute: Optional[pulumi.Input[str]] = None,
group_search_base: Optional[pulumi.Input[str]] = None,
labels: Optional[pulumi.Input[Mapping[str, Any]]] = None,
nested_group_membership_enabled: Optional[pulumi.Input[bool]] = None,
port: Optional[pulumi.Input[int]] = None,
servers: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
service_account_distinguished_name: Optional[pulumi.Input[str]] = None,
service_account_password: Optional[pulumi.Input[str]] = None,
test_password: Optional[pulumi.Input[str]] = None,
test_username: Optional[pulumi.Input[str]] = None,
tls: Optional[pulumi.Input[bool]] = None,
user_disabled_bit_mask: Optional[pulumi.Input[int]] = None,
user_enabled_attribute: Optional[pulumi.Input[str]] = None,
user_login_attribute: Optional[pulumi.Input[str]] = None,
user_member_attribute: Optional[pulumi.Input[str]] = None,
user_name_attribute: Optional[pulumi.Input[str]] = None,
user_object_class: Optional[pulumi.Input[str]] = None,
user_search_attribute: Optional[pulumi.Input[str]] = None,
user_search_base: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Provides a Rancher v2 Auth Config OpenLdap resource. This can be used to configure and enable Auth Config OpenLdap for Rancher v2 RKE clusters and retrieve their information.
In addition to the built-in local auth, only one external auth config provider can be enabled at a time.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] access_mode: Access mode for auth. `required`, `restricted`, `unrestricted` are supported. Default `unrestricted` (string)
:param pulumi.Input[Sequence[pulumi.Input[str]]] allowed_principal_ids: Allowed principal ids for auth. Required if `access_mode` is `required` or `restricted`. Ex: `openldap_user://<DN>` `openldap_group://<DN>` (list)
:param pulumi.Input[Mapping[str, Any]] annotations: Annotations of the resource (map)
:param pulumi.Input[str] certificate: Base64 encoded CA certificate for TLS if self-signed. Use filebase64(<FILE>) for encoding file (string)
:param pulumi.Input[int] connection_timeout: OpenLdap connection timeout. Default `5000` (int)
:param pulumi.Input[bool] enabled: Enable auth config provider. Default `true` (bool)
:param pulumi.Input[str] group_dn_attribute: Group DN attribute. Default `entryDN` (string)
:param pulumi.Input[str] group_member_mapping_attribute: Group member mapping attribute. Default `member` (string)
:param pulumi.Input[str] group_member_user_attribute: Group member user attribute. Default `entryDN` (string)
:param pulumi.Input[str] group_name_attribute: Group name attribute. Default `cn` (string)
:param pulumi.Input[str] group_object_class: Group object class. Default `groupOfNames` (string)
:param pulumi.Input[str] group_search_attribute: Group search attribute. Default `cn` (string)
:param pulumi.Input[str] group_search_base: Group search base (string)
:param pulumi.Input[Mapping[str, Any]] labels: Labels of the resource (map)
:param pulumi.Input[bool] nested_group_membership_enabled: Nested group membership enable. Default `false` (bool)
:param pulumi.Input[int] port: OpenLdap port. Default `389` (int)
:param pulumi.Input[Sequence[pulumi.Input[str]]] servers: OpenLdap servers list (list)
:param pulumi.Input[str] service_account_distinguished_name: Service account DN for access OpenLdap service (string)
:param pulumi.Input[str] service_account_password: Service account password for access OpenLdap service (string)
:param pulumi.Input[str] test_password: Password for test access to OpenLdap service (string)
:param pulumi.Input[str] test_username: Username for test access to OpenLdap service (string)
:param pulumi.Input[bool] tls: Enable TLS connection (bool)
:param pulumi.Input[int] user_disabled_bit_mask: User disabled bit mask (int)
:param pulumi.Input[str] user_enabled_attribute: User enable attribute (string)
:param pulumi.Input[str] user_login_attribute: User login attribute. Default `uid` (string)
:param pulumi.Input[str] user_member_attribute: User member attribute. Default `memberOf` (string)
:param pulumi.Input[str] user_name_attribute: User name attribute. Default `givenName` (string)
:param pulumi.Input[str] user_object_class: User object class. Default `inetorgperson` (string)
:param pulumi.Input[str] user_search_attribute: User search attribute. Default `uid|sn|givenName` (string)
:param pulumi.Input[str] user_search_base: User search base DN (string)
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: AuthConfigOpenLdapArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Provides a Rancher v2 Auth Config OpenLdap resource. This can be used to configure and enable Auth Config OpenLdap for Rancher v2 RKE clusters and retrieve their information.
In addition to the built-in local auth, only one external auth config provider can be enabled at a time.
:param str resource_name: The name of the resource.
:param AuthConfigOpenLdapArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(AuthConfigOpenLdapArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
access_mode: Optional[pulumi.Input[str]] = None,
allowed_principal_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
annotations: Optional[pulumi.Input[Mapping[str, Any]]] = None,
certificate: Optional[pulumi.Input[str]] = None,
connection_timeout: Optional[pulumi.Input[int]] = None,
enabled: Optional[pulumi.Input[bool]] = None,
group_dn_attribute: Optional[pulumi.Input[str]] = None,
group_member_mapping_attribute: Optional[pulumi.Input[str]] = None,
group_member_user_attribute: Optional[pulumi.Input[str]] = None,
group_name_attribute: Optional[pulumi.Input[str]] = None,
group_object_class: Optional[pulumi.Input[str]] = None,
group_search_attribute: Optional[pulumi.Input[str]] = None,
group_search_base: Optional[pulumi.Input[str]] = None,
labels: Optional[pulumi.Input[Mapping[str, Any]]] = None,
nested_group_membership_enabled: Optional[pulumi.Input[bool]] = None,
port: Optional[pulumi.Input[int]] = None,
servers: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
service_account_distinguished_name: Optional[pulumi.Input[str]] = None,
service_account_password: Optional[pulumi.Input[str]] = None,
test_password: Optional[pulumi.Input[str]] = None,
test_username: Optional[pulumi.Input[str]] = None,
tls: Optional[pulumi.Input[bool]] = None,
user_disabled_bit_mask: Optional[pulumi.Input[int]] = None,
user_enabled_attribute: Optional[pulumi.Input[str]] = None,
user_login_attribute: Optional[pulumi.Input[str]] = None,
user_member_attribute: Optional[pulumi.Input[str]] = None,
user_name_attribute: Optional[pulumi.Input[str]] = None,
user_object_class: Optional[pulumi.Input[str]] = None,
user_search_attribute: Optional[pulumi.Input[str]] = None,
user_search_base: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = AuthConfigOpenLdapArgs.__new__(AuthConfigOpenLdapArgs)
__props__.__dict__["access_mode"] = access_mode
__props__.__dict__["allowed_principal_ids"] = allowed_principal_ids
__props__.__dict__["annotations"] = annotations
__props__.__dict__["certificate"] = certificate
__props__.__dict__["connection_timeout"] = connection_timeout
__props__.__dict__["enabled"] = enabled
__props__.__dict__["group_dn_attribute"] = group_dn_attribute
__props__.__dict__["group_member_mapping_attribute"] = group_member_mapping_attribute
__props__.__dict__["group_member_user_attribute"] = group_member_user_attribute
__props__.__dict__["group_name_attribute"] = group_name_attribute
__props__.__dict__["group_object_class"] = group_object_class
__props__.__dict__["group_search_attribute"] = group_search_attribute
__props__.__dict__["group_search_base"] = group_search_base
__props__.__dict__["labels"] = labels
__props__.__dict__["nested_group_membership_enabled"] = nested_group_membership_enabled
__props__.__dict__["port"] = port
if servers is None and not opts.urn:
raise TypeError("Missing required property 'servers'")
__props__.__dict__["servers"] = servers
if service_account_distinguished_name is None and not opts.urn:
raise TypeError("Missing required property 'service_account_distinguished_name'")
__props__.__dict__["service_account_distinguished_name"] = service_account_distinguished_name
if service_account_password is None and not opts.urn:
raise TypeError("Missing required property 'service_account_password'")
__props__.__dict__["service_account_password"] = service_account_password
if test_password is None and not opts.urn:
raise TypeError("Missing required property 'test_password'")
__props__.__dict__["test_password"] = test_password
if test_username is None and not opts.urn:
raise TypeError("Missing required property 'test_username'")
__props__.__dict__["test_username"] = test_username
__props__.__dict__["tls"] = tls
__props__.__dict__["user_disabled_bit_mask"] = user_disabled_bit_mask
__props__.__dict__["user_enabled_attribute"] = user_enabled_attribute
__props__.__dict__["user_login_attribute"] = user_login_attribute
__props__.__dict__["user_member_attribute"] = user_member_attribute
__props__.__dict__["user_name_attribute"] = user_name_attribute
__props__.__dict__["user_object_class"] = user_object_class
__props__.__dict__["user_search_attribute"] = user_search_attribute
if user_search_base is None and not opts.urn:
raise TypeError("Missing required property 'user_search_base'")
__props__.__dict__["user_search_base"] = user_search_base
__props__.__dict__["name"] = None
__props__.__dict__["type"] = None
super(AuthConfigOpenLdap, __self__).__init__(
'rancher2:index/authConfigOpenLdap:AuthConfigOpenLdap',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
access_mode: Optional[pulumi.Input[str]] = None,
allowed_principal_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
annotations: Optional[pulumi.Input[Mapping[str, Any]]] = None,
certificate: Optional[pulumi.Input[str]] = None,
connection_timeout: Optional[pulumi.Input[int]] = None,
enabled: Optional[pulumi.Input[bool]] = None,
group_dn_attribute: Optional[pulumi.Input[str]] = None,
group_member_mapping_attribute: Optional[pulumi.Input[str]] = None,
group_member_user_attribute: Optional[pulumi.Input[str]] = None,
group_name_attribute: Optional[pulumi.Input[str]] = None,
group_object_class: Optional[pulumi.Input[str]] = None,
group_search_attribute: Optional[pulumi.Input[str]] = None,
group_search_base: Optional[pulumi.Input[str]] = None,
labels: Optional[pulumi.Input[Mapping[str, Any]]] = None,
name: Optional[pulumi.Input[str]] = None,
nested_group_membership_enabled: Optional[pulumi.Input[bool]] = None,
port: Optional[pulumi.Input[int]] = None,
servers: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
service_account_distinguished_name: Optional[pulumi.Input[str]] = None,
service_account_password: Optional[pulumi.Input[str]] = None,
test_password: Optional[pulumi.Input[str]] = None,
test_username: Optional[pulumi.Input[str]] = None,
tls: Optional[pulumi.Input[bool]] = None,
type: Optional[pulumi.Input[str]] = None,
user_disabled_bit_mask: Optional[pulumi.Input[int]] = None,
user_enabled_attribute: Optional[pulumi.Input[str]] = None,
user_login_attribute: Optional[pulumi.Input[str]] = None,
user_member_attribute: Optional[pulumi.Input[str]] = None,
user_name_attribute: Optional[pulumi.Input[str]] = None,
user_object_class: Optional[pulumi.Input[str]] = None,
user_search_attribute: Optional[pulumi.Input[str]] = None,
user_search_base: Optional[pulumi.Input[str]] = None) -> 'AuthConfigOpenLdap':
"""
Get an existing AuthConfigOpenLdap resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] access_mode: Access mode for auth. `required`, `restricted`, `unrestricted` are supported. Default `unrestricted` (string)
:param pulumi.Input[Sequence[pulumi.Input[str]]] allowed_principal_ids: Allowed principal ids for auth. Required if `access_mode` is `required` or `restricted`. Ex: `openldap_user://<DN>` `openldap_group://<DN>` (list)
:param pulumi.Input[Mapping[str, Any]] annotations: Annotations of the resource (map)
:param pulumi.Input[str] certificate: Base64 encoded CA certificate for TLS if self-signed. Use filebase64(<FILE>) for encoding file (string)
:param pulumi.Input[int] connection_timeout: OpenLdap connection timeout. Default `5000` (int)
:param pulumi.Input[bool] enabled: Enable auth config provider. Default `true` (bool)
:param pulumi.Input[str] group_dn_attribute: Group DN attribute. Default `entryDN` (string)
:param pulumi.Input[str] group_member_mapping_attribute: Group member mapping attribute. Default `member` (string)
:param pulumi.Input[str] group_member_user_attribute: Group member user attribute. Default `entryDN` (string)
:param pulumi.Input[str] group_name_attribute: Group name attribute. Default `cn` (string)
:param pulumi.Input[str] group_object_class: Group object class. Default `groupOfNames` (string)
:param pulumi.Input[str] group_search_attribute: Group search attribute. Default `cn` (string)
:param pulumi.Input[str] group_search_base: Group search base (string)
:param pulumi.Input[Mapping[str, Any]] labels: Labels of the resource (map)
:param pulumi.Input[str] name: (Computed) The name of the resource (string)
:param pulumi.Input[bool] nested_group_membership_enabled: Nested group membership enable. Default `false` (bool)
:param pulumi.Input[int] port: OpenLdap port. Default `389` (int)
:param pulumi.Input[Sequence[pulumi.Input[str]]] servers: OpenLdap servers list (list)
:param pulumi.Input[str] service_account_distinguished_name: Service account DN for access OpenLdap service (string)
:param pulumi.Input[str] service_account_password: Service account password for access OpenLdap service (string)
:param pulumi.Input[str] test_password: Password for test access to OpenLdap service (string)
:param pulumi.Input[str] test_username: Username for test access to OpenLdap service (string)
:param pulumi.Input[bool] tls: Enable TLS connection (bool)
:param pulumi.Input[str] type: (Computed) The type of the resource (string)
:param pulumi.Input[int] user_disabled_bit_mask: User disabled bit mask (int)
:param pulumi.Input[str] user_enabled_attribute: User enable attribute (string)
:param pulumi.Input[str] user_login_attribute: User login attribute. Default `uid` (string)
:param pulumi.Input[str] user_member_attribute: User member attribute. Default `memberOf` (string)
:param pulumi.Input[str] user_name_attribute: User name attribute. Default `givenName` (string)
:param pulumi.Input[str] user_object_class: User object class. Default `inetorgperson` (string)
:param pulumi.Input[str] user_search_attribute: User search attribute. Default `uid|sn|givenName` (string)
:param pulumi.Input[str] user_search_base: User search base DN (string)
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _AuthConfigOpenLdapState.__new__(_AuthConfigOpenLdapState)
__props__.__dict__["access_mode"] = access_mode
__props__.__dict__["allowed_principal_ids"] = allowed_principal_ids
__props__.__dict__["annotations"] = annotations
__props__.__dict__["certificate"] = certificate
__props__.__dict__["connection_timeout"] = connection_timeout
__props__.__dict__["enabled"] = enabled
__props__.__dict__["group_dn_attribute"] = group_dn_attribute
__props__.__dict__["group_member_mapping_attribute"] = group_member_mapping_attribute
__props__.__dict__["group_member_user_attribute"] = group_member_user_attribute
__props__.__dict__["group_name_attribute"] = group_name_attribute
__props__.__dict__["group_object_class"] = group_object_class
__props__.__dict__["group_search_attribute"] = group_search_attribute
__props__.__dict__["group_search_base"] = group_search_base
__props__.__dict__["labels"] = labels
__props__.__dict__["name"] = name
__props__.__dict__["nested_group_membership_enabled"] = nested_group_membership_enabled
__props__.__dict__["port"] = port
__props__.__dict__["servers"] = servers
__props__.__dict__["service_account_distinguished_name"] = service_account_distinguished_name
__props__.__dict__["service_account_password"] = service_account_password
__props__.__dict__["test_password"] = test_password
__props__.__dict__["test_username"] = test_username
__props__.__dict__["tls"] = tls
__props__.__dict__["type"] = type
__props__.__dict__["user_disabled_bit_mask"] = user_disabled_bit_mask
__props__.__dict__["user_enabled_attribute"] = user_enabled_attribute
__props__.__dict__["user_login_attribute"] = user_login_attribute
__props__.__dict__["user_member_attribute"] = user_member_attribute
__props__.__dict__["user_name_attribute"] = user_name_attribute
__props__.__dict__["user_object_class"] = user_object_class
__props__.__dict__["user_search_attribute"] = user_search_attribute
__props__.__dict__["user_search_base"] = user_search_base
return AuthConfigOpenLdap(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="accessMode")
def access_mode(self) -> pulumi.Output[Optional[str]]:
"""
Access mode for auth. `required`, `restricted`, `unrestricted` are supported. Default `unrestricted` (string)
"""
return pulumi.get(self, "access_mode")
@property
@pulumi.getter(name="allowedPrincipalIds")
def allowed_principal_ids(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
Allowed principal ids for auth. Required if `access_mode` is `required` or `restricted`. Ex: `openldap_user://<DN>` `openldap_group://<DN>` (list)
"""
return pulumi.get(self, "allowed_principal_ids")
@property
@pulumi.getter
def annotations(self) -> pulumi.Output[Mapping[str, Any]]:
"""
Annotations of the resource (map)
"""
return pulumi.get(self, "annotations")
@property
@pulumi.getter
def certificate(self) -> pulumi.Output[Optional[str]]:
"""
Base64 encoded CA certificate for TLS if self-signed. Use filebase64(<FILE>) for encoding file (string)
"""
return pulumi.get(self, "certificate")
@property
@pulumi.getter(name="connectionTimeout")
def connection_timeout(self) -> pulumi.Output[Optional[int]]:
"""
OpenLdap connection timeout. Default `5000` (int)
"""
return pulumi.get(self, "connection_timeout")
@property
@pulumi.getter
def enabled(self) -> pulumi.Output[Optional[bool]]:
"""
Enable auth config provider. Default `true` (bool)
"""
return pulumi.get(self, "enabled")
@property
@pulumi.getter(name="groupDnAttribute")
def group_dn_attribute(self) -> pulumi.Output[str]:
"""
Group DN attribute. Default `entryDN` (string)
"""
return pulumi.get(self, "group_dn_attribute")
@property
@pulumi.getter(name="groupMemberMappingAttribute")
def group_member_mapping_attribute(self) -> pulumi.Output[str]:
"""
Group member mapping attribute. Default `member` (string)
"""
return pulumi.get(self, "group_member_mapping_attribute")
@property
@pulumi.getter(name="groupMemberUserAttribute")
def group_member_user_attribute(self) -> pulumi.Output[str]:
"""
Group member user attribute. Default `entryDN` (string)
"""
return pulumi.get(self, "group_member_user_attribute")
@property
@pulumi.getter(name="groupNameAttribute")
def group_name_attribute(self) -> pulumi.Output[str]:
"""
Group name attribute. Default `cn` (string)
"""
return pulumi.get(self, "group_name_attribute")
@property
@pulumi.getter(name="groupObjectClass")
def group_object_class(self) -> pulumi.Output[str]:
"""
Group object class. Default `groupOfNames` (string)
"""
return pulumi.get(self, "group_object_class")
@property
@pulumi.getter(name="groupSearchAttribute")
def group_search_attribute(self) -> pulumi.Output[str]:
"""
Group search attribute. Default `cn` (string)
"""
return pulumi.get(self, "group_search_attribute")
@property
@pulumi.getter(name="groupSearchBase")
def group_search_base(self) -> pulumi.Output[str]:
"""
Group search base (string)
"""
return pulumi.get(self, "group_search_base")
@property
@pulumi.getter
def labels(self) -> pulumi.Output[Mapping[str, Any]]:
"""
Labels of the resource (map)
"""
return pulumi.get(self, "labels")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
(Computed) The name of the resource (string)
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="nestedGroupMembershipEnabled")
def nested_group_membership_enabled(self) -> pulumi.Output[bool]:
"""
Nested group membership enable. Default `false` (bool)
"""
return pulumi.get(self, "nested_group_membership_enabled")
@property
@pulumi.getter
def port(self) -> pulumi.Output[Optional[int]]:
"""
OpenLdap port. Default `389` (int)
"""
return pulumi.get(self, "port")
@property
@pulumi.getter
def servers(self) -> pulumi.Output[Sequence[str]]:
"""
OpenLdap servers list (list)
"""
return pulumi.get(self, "servers")
@property
@pulumi.getter(name="serviceAccountDistinguishedName")
def service_account_distinguished_name(self) -> pulumi.Output[str]:
"""
Service account DN for access OpenLdap service (string)
"""
return pulumi.get(self, "service_account_distinguished_name")
@property
@pulumi.getter(name="serviceAccountPassword")
def service_account_password(self) -> pulumi.Output[str]:
"""
Service account password for access OpenLdap service (string)
"""
return pulumi.get(self, "service_account_password")
@property
@pulumi.getter(name="testPassword")
def test_password(self) -> pulumi.Output[str]:
"""
Password for test access to OpenLdap service (string)
"""
return pulumi.get(self, "test_password")
@property
@pulumi.getter(name="testUsername")
def test_username(self) -> pulumi.Output[str]:
"""
Username for test access to OpenLdap service (string)
"""
return pulumi.get(self, "test_username")
@property
@pulumi.getter
def tls(self) -> pulumi.Output[bool]:
"""
Enable TLS connection (bool)
"""
return pulumi.get(self, "tls")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
(Computed) The type of the resource (string)
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="userDisabledBitMask")
def user_disabled_bit_mask(self) -> pulumi.Output[int]:
"""
User disabled bit mask (int)
"""
return pulumi.get(self, "user_disabled_bit_mask")
@property
@pulumi.getter(name="userEnabledAttribute")
def user_enabled_attribute(self) -> pulumi.Output[str]:
"""
User enable attribute (string)
"""
return pulumi.get(self, "user_enabled_attribute")
@property
@pulumi.getter(name="userLoginAttribute")
def user_login_attribute(self) -> pulumi.Output[str]:
"""
User login attribute. Default `uid` (string)
"""
return pulumi.get(self, "user_login_attribute")
@property
@pulumi.getter(name="userMemberAttribute")
def user_member_attribute(self) -> pulumi.Output[str]:
"""
User member attribute. Default `memberOf` (string)
"""
return pulumi.get(self, "user_member_attribute")
@property
@pulumi.getter(name="userNameAttribute")
def user_name_attribute(self) -> pulumi.Output[str]:
"""
User name attribute. Default `givenName` (string)
"""
return pulumi.get(self, "user_name_attribute")
@property
@pulumi.getter(name="userObjectClass")
def user_object_class(self) -> pulumi.Output[str]:
"""
User object class. Default `inetorgperson` (string)
"""
return pulumi.get(self, "user_object_class")
@property
@pulumi.getter(name="userSearchAttribute")
def user_search_attribute(self) -> pulumi.Output[str]:
"""
User search attribute. Default `uid|sn|givenName` (string)
"""
return pulumi.get(self, "user_search_attribute")
@property
@pulumi.getter(name="userSearchBase")
def user_search_base(self) -> pulumi.Output[str]:
"""
User search base DN (string)
"""
return pulumi.get(self, "user_search_base")
| 47.251744 | 227 | 0.668138 | 74,078 | 0.994122 | 0 | 0 | 67,139 | 0.901001 | 0 | 0 | 29,407 | 0.39464 |
6cafccf60a626ceb56ed9cc31e56029cc75a3274 | 2,029 | bzl | Python | apple/internal/testing/tvos_rules.bzl | LaudateCorpus1/rules_apple | f8b1da53a4b53af9c655a17a4e0cb86959c932d8 | [
"Apache-2.0"
] | 2 | 2020-07-01T20:21:48.000Z | 2021-04-28T21:28:49.000Z | apple/internal/testing/tvos_rules.bzl | LaudateCorpus1/rules_apple | f8b1da53a4b53af9c655a17a4e0cb86959c932d8 | [
"Apache-2.0"
] | null | null | null | apple/internal/testing/tvos_rules.bzl | LaudateCorpus1/rules_apple | f8b1da53a4b53af9c655a17a4e0cb86959c932d8 | [
"Apache-2.0"
] | 2 | 2021-06-03T10:06:19.000Z | 2022-02-02T14:23:53.000Z | # Copyright 2019 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of tvOS test rules."""
load(
"@build_bazel_rules_apple//apple/internal/testing:apple_test_rule_support.bzl",
"apple_test_rule_support",
)
load(
"@build_bazel_rules_apple//apple/internal:apple_product_type.bzl",
"apple_product_type",
)
load(
"@build_bazel_rules_apple//apple/internal:rule_factory.bzl",
"rule_factory",
)
load(
"@build_bazel_rules_apple//apple:providers.bzl",
"TvosXcTestBundleInfo",
)
def _tvos_ui_test_impl(ctx):
"""Implementation of tvos_ui_test."""
return apple_test_rule_support.apple_test_impl(
ctx,
"xcuitest",
extra_providers = [TvosXcTestBundleInfo()],
)
def _tvos_unit_test_impl(ctx):
"""Implementation of tvos_unit_test."""
return apple_test_rule_support.apple_test_impl(
ctx,
"xctest",
extra_providers = [TvosXcTestBundleInfo()],
)
tvos_ui_test = rule_factory.create_apple_bundling_rule(
implementation = _tvos_ui_test_impl,
platform_type = str(apple_common.platform_type.tvos),
product_type = apple_product_type.ui_test_bundle,
doc = "Builds and bundles a tvOS UI Test Bundle.",
)
tvos_unit_test = rule_factory.create_apple_bundling_rule(
implementation = _tvos_unit_test_impl,
platform_type = str(apple_common.platform_type.tvos),
product_type = apple_product_type.unit_test_bundle,
doc = "Builds and bundles a tvOS Unit Test Bundle.",
)
| 32.206349 | 83 | 0.741252 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,141 | 0.562346 |
6cb08150d3a19f5a4a7edf7874374e44e9a831c7 | 2,438 | py | Python | cogs/calc.py | cmstevens02/Knight-Assistant | 6e67b1ca38bc278e72196384a7fb7816e96db665 | [
"MIT"
] | 3 | 2021-02-23T23:35:08.000Z | 2021-02-28T03:21:52.000Z | cogs/calc.py | nategaulke/Discord_Personal_Assistant | 6e67b1ca38bc278e72196384a7fb7816e96db665 | [
"MIT"
] | null | null | null | cogs/calc.py | nategaulke/Discord_Personal_Assistant | 6e67b1ca38bc278e72196384a7fb7816e96db665 | [
"MIT"
] | 1 | 2021-04-27T16:06:21.000Z | 2021-04-27T16:06:21.000Z | # This script was written by Conroy
# import discord
from discord.ext import commands
# inherit from commands.Cog
class Calculations(commands.Cog):
def __init__(self, bot):
# reference the bot object from Main.py
self.bot = bot
@commands.command(help=("Does simple math. Type a simple math expression "
"with only 1 operator after \"!calc\" "
"and the bot will solve it for you!"))
async def calc(self, ctx, x, operator, y):
channel = ctx.message.channel
tempx = float(x)
tempy = float(y)
response = ""
# check for all basic math calculations
if operator == "+" or operator.lower() == "plus":
response = f"The answer is {(tempx + tempy)}."
elif operator == "-" or operator.lower() == "minus":
response = f"The answer is {(tempx - tempy)}."
elif operator == "/" or operator.lower() == "divide":
response = f"The answer is {(tempx / tempy)}."
elif operator == "*" or operator.lower() == "multiply":
response = f"The answer is {(tempx * tempy)}."
elif (operator == "%" or operator.lower() == "mod"
or operator.lower() == "modulo" or operator.lower() == "modulos"):
response = f"The answer is {(tempx % tempy)}."
elif operator == "^" or operator.lower() == "power":
response = f"The answer is {(tempx ** tempy)}."
else:
response = "Sorry, I do not recognize that oeprator yet."
await channel.send(response)
@commands.command(help=("Calculates tips. Specify an amount "
"followed by an optional tip percentage (20% by default)."))
async def calc_tip(self, ctx, amount, tip_percentage=20):
channel = ctx.message.channel
amount = float(amount)
tip_percentage = float(tip_percentage)
tip = round(amount * (tip_percentage / 100), 2)
response = ""
if(tip.is_integer()):
response = f"You should give a ${int(tip)} tip."
else:
response = (f"You should give a ${tip} tip. "
"Or round it to ${int(round(tip, 0))} if that is easier.")
await channel.send(response)
# this function connects this cog (via the Calc class) to the bot object
def setup(bot):
bot.add_cog(Calculations(bot))
| 30.475 | 88 | 0.562756 | 2,194 | 0.899918 | 0 | 0 | 2,047 | 0.839623 | 1,678 | 0.688269 | 914 | 0.374897 |
6cb14ca24710d5a32a25624affaf5fd44f64eda5 | 1,011 | py | Python | monitoria-ilp/prova5/M7.py | gustavo-mendel/my-college-projects | ccc1285e1a6863312e275f973e728de231a9458a | [
"MIT"
] | 3 | 2021-08-18T01:59:50.000Z | 2021-08-28T00:19:07.000Z | monitoria-ilp/prova5/M7.py | gustavo-mendel/my-college-projects | ccc1285e1a6863312e275f973e728de231a9458a | [
"MIT"
] | 4 | 2021-03-09T18:39:47.000Z | 2021-03-26T00:01:56.000Z | monitoria-ilp/prova5/M7.py | gustavo-mendel/my-college-projects | ccc1285e1a6863312e275f973e728de231a9458a | [
"MIT"
] | 1 | 2022-03-20T14:54:09.000Z | 2022-03-20T14:54:09.000Z | n, m = [int(e) for e in input().split()]
mat = []
for i in range(n):
j = [int(e) for e in input().split()]
mat.append(j)
for i in range(n):
for j in range(m):
if mat[i][j] == 0:
if i == 0:
if mat[i][j+1] == 1 and mat[i][j-1] == 1 and mat[i+1][j] == 1:
print(i, j)
exit()
if j == 0:
if mat[i+1][j] == 1 and mat[i-1][j] == 1 and mat[i][j+1] == 1:
print(i, j)
exit()
if i == n-1:
if mat[i][j+1] == 1 and mat[i][j-1] == 1 and mat[i-1][j] == 1:
print(i, j)
exit()
if j == m-1:
if mat[i+1][j] == 1 and mat[i-1][j] == 1 and mat[i][j-1] == 1:
print(i, j)
exit()
if mat[i+1][j] == 1 and mat[i-1][j] == 1 and mat[i][j+1] == 1 and mat[i][j-1] == 1:
print(i, j)
exit()
print(0, 0)
| 29.735294 | 95 | 0.331355 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
6cb2edb7e1e29ba70850bedeb3eee19d43933ca6 | 72 | py | Python | utils/__init__.py | DNL-inc/bit | b6f35e95b2b40a3eec308a2c7179a73eadad3556 | [
"MIT"
] | 1 | 2020-11-04T16:15:52.000Z | 2020-11-04T16:15:52.000Z | utils/__init__.py | DNL-inc/bit | b6f35e95b2b40a3eec308a2c7179a73eadad3556 | [
"MIT"
] | null | null | null | utils/__init__.py | DNL-inc/bit | b6f35e95b2b40a3eec308a2c7179a73eadad3556 | [
"MIT"
] | null | null | null | from . import db_api
from . import misc
# from . import postpone_message | 24 | 32 | 0.777778 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 32 | 0.444444 |
6cb32f44a52f1c0281429f935c70345217dd11a0 | 5,105 | py | Python | xero_python/appstore/models/plan.py | gavinwhyte/xero-python | 53a028c3b7c51da1db203b616bf7b7a028a4a1d2 | [
"MIT"
] | null | null | null | xero_python/appstore/models/plan.py | gavinwhyte/xero-python | 53a028c3b7c51da1db203b616bf7b7a028a4a1d2 | [
"MIT"
] | null | null | null | xero_python/appstore/models/plan.py | gavinwhyte/xero-python | 53a028c3b7c51da1db203b616bf7b7a028a4a1d2 | [
"MIT"
] | null | null | null | # coding: utf-8
"""
Xero AppStore API
These endpoints are for Xero Partners to interact with the App Store Billing platform # noqa: E501
Contact: api@xero.com
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
from xero_python.models import BaseModel
class Plan(BaseModel):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
"id": "str",
"name": "str",
"status": "str",
"subscription_items": "list[SubscriptionItem]",
}
attribute_map = {
"id": "id",
"name": "name",
"status": "status",
"subscription_items": "subscriptionItems",
}
def __init__(
self, id=None, name=None, status=None, subscription_items=None
): # noqa: E501
"""Plan - a model defined in OpenAPI""" # noqa: E501
self._id = None
self._name = None
self._status = None
self._subscription_items = None
self.discriminator = None
self.id = id
self.name = name
self.status = status
self.subscription_items = subscription_items
@property
def id(self):
"""Gets the id of this Plan. # noqa: E501
The unique identifier of the plan # noqa: E501
:return: The id of this Plan. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this Plan.
The unique identifier of the plan # noqa: E501
:param id: The id of this Plan. # noqa: E501
:type: str
"""
if id is None:
raise ValueError("Invalid value for `id`, must not be `None`") # noqa: E501
self._id = id
@property
def name(self):
"""Gets the name of this Plan. # noqa: E501
The name of the plan. It is used in the invoice line item description. # noqa: E501
:return: The name of this Plan. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this Plan.
The name of the plan. It is used in the invoice line item description. # noqa: E501
:param name: The name of this Plan. # noqa: E501
:type: str
"""
if name is None:
raise ValueError(
"Invalid value for `name`, must not be `None`"
) # noqa: E501
self._name = name
@property
def status(self):
"""Gets the status of this Plan. # noqa: E501
Status of the plan. Available statuses are ACTIVE, CANCELED, and PENDING_ACTIVATION. # noqa: E501
:return: The status of this Plan. # noqa: E501
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this Plan.
Status of the plan. Available statuses are ACTIVE, CANCELED, and PENDING_ACTIVATION. # noqa: E501
:param status: The status of this Plan. # noqa: E501
:type: str
"""
if status is None:
raise ValueError(
"Invalid value for `status`, must not be `None`"
) # noqa: E501
allowed_values = [
"ACTIVE",
"CANCELED",
"PENDING_ACTIVATION",
"None",
] # noqa: E501
if status:
if status not in allowed_values:
raise ValueError(
"Invalid value for `status` ({0}), must be one of {1}".format( # noqa: E501
status, allowed_values
)
)
self._status = status
@property
def subscription_items(self):
"""Gets the subscription_items of this Plan. # noqa: E501
List of the subscription items belonging to the plan. It does not include cancelled subscription items. # noqa: E501
:return: The subscription_items of this Plan. # noqa: E501
:rtype: list[SubscriptionItem]
"""
return self._subscription_items
@subscription_items.setter
def subscription_items(self, subscription_items):
"""Sets the subscription_items of this Plan.
List of the subscription items belonging to the plan. It does not include cancelled subscription items. # noqa: E501
:param subscription_items: The subscription_items of this Plan. # noqa: E501
:type: list[SubscriptionItem]
"""
if subscription_items is None:
raise ValueError(
"Invalid value for `subscription_items`, must not be `None`"
) # noqa: E501
self._subscription_items = subscription_items
| 28.20442 | 126 | 0.56905 | 4,806 | 0.94143 | 0 | 0 | 3,556 | 0.696572 | 0 | 0 | 3,119 | 0.61097 |
6cb33395406f43fe2ee837077f069c801b9bcf8e | 2,167 | py | Python | design_patterns/behavioral/command.py | Minkov/python-oop-2021-02 | bd387dde165f4338eed66c4bc0b4b516ee085340 | [
"MIT"
] | 2 | 2021-02-22T22:55:31.000Z | 2021-04-05T18:25:10.000Z | design_patterns/behavioral/command.py | Minkov/python-oop-2021-02 | bd387dde165f4338eed66c4bc0b4b516ee085340 | [
"MIT"
] | null | null | null | design_patterns/behavioral/command.py | Minkov/python-oop-2021-02 | bd387dde165f4338eed66c4bc0b4b516ee085340 | [
"MIT"
] | 2 | 2021-04-05T18:35:11.000Z | 2021-04-08T12:18:19.000Z | from abc import ABC, abstractmethod
class Command(ABC):
@abstractmethod
def execute(self):
pass
@abstractmethod
def un_execute(self):
pass
class AddCommand(Command):
def __init__(self, values, new_value):
self.values = values
self.new_value = new_value
def execute(self):
self.values.append(self.new_value)
def un_execute(self):
self.values.pop()
class SumCommand(Command):
def __init__(self, values):
self.values = values
def execute(self):
return sum(self.values)
def un_execute(self):
return sum(self.values)
class RemoveLastCommand(Command):
def __init__(self, values):
self.values = values
self.removed_value = None
def execute(self):
self.removed_value = self.values.pop()
def un_execute(self):
self.values.append(self.removed_value)
self.removed_value = None
class RemoveFirstCommand(Command):
def __init__(self, values):
self.values = values
self.removed_value = None
def execute(self):
self.removed_value = self.values.pop(0)
def un_execute(self):
self.values.insert(0, self.removed_value)
self.removed_value = None
class CommandsMemento:
def __init__(self, values):
self.state = list(values)
commands = []
values = []
while True:
command_text = input()
if command_text == 'END':
break
if command_text == 'REMOVE_LAST':
command = RemoveLastCommand(values)
elif command_text == 'REMOVE_FIRST':
command = RemoveFirstCommand(values)
elif command_text == 'SUM':
command = SumCommand(values)
else:
_, value = command_text.split(' ')
command = AddCommand(values, int(value))
commands.append(command)
mementos = []
for command in commands:
print(command.execute())
for memento in mementos:
print(memento.state)
print('----')
print(values)
for command in commands[::-1]:
print(command.un_execute())
print(values)
"""
ADD 5
ADD 6
SUM
REMOVE_FIRST
ADD 3
ADD 7
SUM
REMOVE_LAST
SUM
REMOVE_LAST
SUM
REMOVE_LAST
SUM
END
"""
| 18.210084 | 49 | 0.646054 | 1,297 | 0.598523 | 0 | 0 | 105 | 0.048454 | 0 | 0 | 150 | 0.06922 |
6cb3a9c787df62417130bfb457f73cad0e338334 | 2,537 | py | Python | test/test_cnn/test_datum.py | DwangoMediaVillage/marltas_core | 91a5caf75c2350a31d47d1b0408c817644a0d41e | [
"MIT"
] | 9 | 2021-02-15T08:20:31.000Z | 2022-01-04T09:29:35.000Z | test/test_cnn/test_datum.py | DwangoMediaVillage/marltas_core | 91a5caf75c2350a31d47d1b0408c817644a0d41e | [
"MIT"
] | null | null | null | test/test_cnn/test_datum.py | DwangoMediaVillage/marltas_core | 91a5caf75c2350a31d47d1b0408c817644a0d41e | [
"MIT"
] | 1 | 2021-09-21T16:11:17.000Z | 2021-09-21T16:11:17.000Z | import numpy as np
from dqn.cnn.config import CNNConfigBase
from dqn.cnn.datum import (Batch, Loss, SampleFromActor, SampleFromBuffer,
split_sample_from_actor)
def test_sample_from_actor(config=CNNConfigBase()):
size = 4
s = SampleFromActor.as_random(size=size, np_defs=config.sample_from_actor_def)
assert s.size == size
s_restore = SampleFromActor.from_bytes(bytes_data=s.to_bytes(np_defs=config.sample_from_actor_def),
np_defs=config.sample_from_actor_def)
assert s.eq(s_restore, np_defs=config.sample_from_actor_def)
s_split = split_sample_from_actor(sample=s, sample_from_actor_def=config.sample_from_actor_def)
for i in range(size):
for name, np_def in config.sample_from_actor_def.items():
if np_def is not None:
assert np.array_equal(getattr(s_split[i], name), getattr(s, name)[i])
def test_sample_from_buffer(config=CNNConfigBase()):
size = 4
s = SampleFromBuffer.as_random(size=size, np_defs=config.sample_from_buffer_def)
assert s.eq(other=SampleFromBuffer.from_bytes(bytes_data=s.to_bytes(np_defs=config.sample_from_buffer_def),
np_defs=config.sample_from_buffer_def),
np_defs=config.sample_from_buffer_def)
sample_from_actor = SampleFromActor.as_random(size=size, np_defs=config.sample_from_actor_def)
s_buffer = SampleFromBuffer.from_buffer_samples(samples=split_sample_from_actor(
sample=sample_from_actor, sample_from_actor_def=config.sample_from_actor_def),
sample_from_actor_def=config.sample_from_actor_def)
# check shape and type
for k, v in config.sample_from_buffer_def.items():
if v is not None:
x = getattr(s_buffer, k)
if k != 'weight':
assert tuple(x.shape) == tuple([size] + list(v.shape))
assert x.dtype == v.dtype
assert np.array_equal(x, getattr(sample_from_actor, k))
def test_batch(config=CNNConfigBase()):
sample_from_buffer = SampleFromBuffer.as_random(size=10, np_defs=config.sample_from_buffer_def)
batch = Batch.from_buffer_sample(sample_from_buffer)
def test_loss(config=CNNConfigBase()):
loss = Loss.as_random(size=4, np_defs=config.loss_def)
assert loss.eq(other=Loss.from_bytes(bytes_data=loss.to_bytes(np_defs=config.loss_def), np_defs=config.loss_def),
np_defs=config.loss_def)
| 46.127273 | 117 | 0.69255 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 30 | 0.011825 |
6cb3b777db4ea68cdf0e53c08d49eba00b76fc6c | 6,943 | py | Python | lib/python/zzvm/parser.py | m80126colin/zzvm | 09440c1ae11dfdb433cef4b0133e403b1ae1b74b | [
"MIT"
] | 19 | 2016-07-06T16:38:27.000Z | 2021-01-30T02:20:26.000Z | lib/python/zzvm/parser.py | m80126colin/zzvm | 09440c1ae11dfdb433cef4b0133e403b1ae1b74b | [
"MIT"
] | 1 | 2016-07-08T13:37:12.000Z | 2016-07-08T13:37:12.000Z | lib/python/zzvm/parser.py | m80126colin/zzvm | 09440c1ae11dfdb433cef4b0133e403b1ae1b74b | [
"MIT"
] | 3 | 2016-07-07T07:03:11.000Z | 2018-01-01T00:55:59.000Z | import codecs
import collections
import io
import os
import re
import struct
from .instruction import Instruction
from .opcode import Opcodes
from .registers import Registers
from .section import Section
from .symbol import Symbol
def p32(v):
return struct.pack('<I', v)
def unescape_str_to_bytes(x):
return codecs.escape_decode(x.encode('utf8'))[0]
class QueueReader(object):
def __init__(self, *files):
self.fq = list(files)
def add_file(self, f):
self.fq.append(f)
def insert_file(self, f, idx=0):
self.fq.insert(idx, f)
def readline(self):
while len(self.fq) > 0:
r = self.fq[0].readline()
if not r:
self.fq.pop(0)
continue
return r
return ''
class Parser(object):
def __init__(self, fin):
self.sections = None
self.section_bodies = {}
self.entry = None
if type(fin) is str:
fin = io.StringIO(fin)
self.reader = QueueReader(fin)
self.parse()
def parse(self):
sections = collections.OrderedDict()
current_section = None
lineno = 0
while True:
lineno += 1
raw = self.reader.readline()
if not raw:
break
line = raw.split(';')[0].strip()
if not line:
continue
elif line.startswith('.sect'):
args = line.split(maxsplit=1)[1].split(' ')
name = args[0].upper()
if len(args) > 1:
addr = int(args[1], 16)
else:
if name == 'TEXT':
addr = 0x4000
else:
addr = 0x6000
new_sect = Section(addr)
sections[name] = new_sect
current_section = new_sect
elif line.startswith('.include'):
filename = line.split(maxsplit=1)[1].strip()
if filename.startswith('zstdlib/'):
filename = os.path.join(os.path.dirname(__file__), '../../..', filename)
self.reader.insert_file(open(filename))
elif line.startswith('.entry'):
entry = line.split()[1]
return self.try_parse_imm(entry)
elif line.startswith('.align'):
current_section.align(self._parse_int(line.split()[1]))
elif line.startswith('.db'):
data = line[3:].split(',')
bytes_data = bytes(int(i.strip(), 16) for i in data)
current_section.write(bytes_data)
elif line.startswith('.zero'):
data = line[5:].strip()
if data.startswith('0x'):
n = int(data, 16)
else:
n = int(data)
current_section.write(b'\0' * n)
elif line.startswith('.str'):
data = line[4:].strip()
bytes_data = unescape_str_to_bytes(data[1:-1])
current_section.write(bytes_data + b'\0\0')
elif line[-1] == ':':
label_name = line[:-1]
current_section.label(label_name)
else:
for ins in self.parse_instruction(line):
current_section.write(ins)
self.sections = sections
def resolve_label(self, name):
for section in self.sections.values():
addr = section.labels.get(name, None)
if addr:
return addr
def get_entry(self):
if type(self.entry) is Symbol:
return self.entry.resolve(self.resolve_label)
elif self.entry is not None:
return self.entry
elif self.resolve_label('start'):
return self.resolve_label('start')
else:
return 0x4000
def build(self):
sections = []
bodies = []
for name, section in self.sections.items():
buff = io.BytesIO()
ip = section.addr
for data in section.container:
if type(data) is Instruction:
ins = data
if type(ins.imm) is Symbol:
sym = ins.imm
buff.write(ins.compose(sym.resolve(self.resolve_label, ip)))
else:
buff.write(ins.compose())
ip += 4
elif type(data) is Symbol:
val = data.resolve(self.resolve_label, ip)
buff.write(p32(val))
ip += 4
elif type(data) is bytes:
buff.write(data)
ip += len(data)
body = buff.getvalue()
self.section_bodies[name] = body
bodies.append(body)
sections.append(struct.pack('<HH',
section.addr, # section_addr
len(body), # section_size
))
header = struct.pack('<ccHHH',
b'Z', b'z', # magic
0, # file_ver
self.get_entry(), # entry
len(bodies), # section_count
)
return header + b''.join(sections) + b''.join(bodies)
def parse_instruction(self, line):
try:
ins_name, args = line.split(maxsplit=1)
args = [ i.strip() for i in args.split(',') ]
except:
ins_name = line
args = []
if ins_name.upper() == 'JMP':
is_jmp = True
ins_name = 'ADDI'
args = ['IP', 'IP', args[0]]
else:
is_jmp = False
if len(args) > 0:
if ins_name[0].upper() == 'J' or ins_name.upper() == 'CALL' or is_jmp:
rel = True
else:
rel = False
imm = self.try_parse_imm(args[-1], rel=rel)
if imm is None:
if rel:
raise ValueError('jump instruction must have target\nline: %r' % line)
regs = args
else:
regs = args[:-1]
yield Instruction(ins_name, *regs, imm=imm)
else:
yield Instruction(ins_name, *args)
def try_parse_imm(self, val, rel=False):
if val[0] == '$':
if '+' in val:
name, offset = val[1:].split('+')
offset = self._parse_int(offset)
return Symbol(name, offset, is_relative=rel)
else:
return Symbol(val[1:], is_relative=rel)
try:
return self._parse_int(val)
except:
pass
def _parse_int(self, s):
s = s.strip()
if s[:2] == '0x':
return int(s, 16)
elif s[0] == '#':
return int(s[1:], 10)
else:
return int (s)
| 30.451754 | 92 | 0.474867 | 6,578 | 0.947429 | 979 | 0.141005 | 0 | 0 | 0 | 0 | 317 | 0.045657 |
6cb4cc84649164fc13d24146a17efcfc7d6676a4 | 2,120 | py | Python | Ensemble_Movie.py | nchaparr/Sam_Output_Anls | c6736f7863b36d09738ac95b7cbde19ba69526cf | [
"MIT"
] | null | null | null | Ensemble_Movie.py | nchaparr/Sam_Output_Anls | c6736f7863b36d09738ac95b7cbde19ba69526cf | [
"MIT"
] | 1 | 2015-04-18T14:47:49.000Z | 2015-05-01T21:51:44.000Z | Ensemble_Movie.py | nchaparr/Sam_Output_Anls | c6736f7863b36d09738ac95b7cbde19ba69526cf | [
"MIT"
] | null | null | null | from netCDF4 import Dataset
import glob,os.path
import numpy as np
from scipy.interpolate import UnivariateSpline
from matplotlib import cm
import matplotlib.pyplot as plt
#import site
#site.addsitedir('/tera/phil/nchaparr/SAM2/sam_main/python')
#from Percentiles import *
from matplotlib.patches import Patch
import sys
sys.path.insert(0, '/tera/phil/nchaparr/python')
#import nchap_fun as nc
import matplotlib.animation as animation
from Ens_Profs import Get_Var_Arrays
from Make_Timelist import *
"""
Profiles/2d ims at a point
for a movie
may be pointless now -- data at longer delta ts
"""
#set up plot
dump_time_list, Times_hrs = Make_Timelists(1, 60, 28800)
dump_time = dump_time_list[59]
i=1
ims = []
ims1 = []
theFig = plt.figure()
#theFig1.clf()
#theAx = theFig.add_subplot(111)
#theAx1 = theFig.add_subplot(111)
#theAx.set_title('')
#theAx.set_xlabel('')
#theAx.set_ylabel('')
for dump_time in dump_time_list:
#getting horizontally averaged, ensemble averaged tracer
[tracer, theta, height] = Get_Var_Arrays(dump_time)
#[grad_tracer, tracer_peaks] = nc.Domain_Grad(tracer, height)
[yindex, xindex] = [13, 44]
#print yindex, xindex, tracer_peaks[yindex, xindex]
i=i+1
x = np.arange(0, 1600, 25)
y = height[0:64]
X,Y = np.meshgrid(x, y)
tslice = tracer[0:64, 13, :]
thetaslice = theta[0:64, 13, :]
ims.append((plt.pcolor(X, Y, tslice, norm=plt.Normalize(0, 30)),))
#ims.append((plt.pcolor(X, Y, thetaslice, norm=plt.Normalize(0, 30)),))
#ims.append(plt.plot(tracer[:, yindex, xindex], height, 'ko'))
#ims.append(plt.plot(theta[:, yindex, xindex], height, 'ko'))
#plt.savefig('/tera/phil/nchaparr/python/Plotting/July92013/pngs/for_point_movie/Point_Tracer_'+ str(i)+'.png', bbox_inches=0)
im_ani = animation.ArtistAnimation(theFig, ims, interval=500, repeat_delay=30000, blit=True)
#im_ani = animation.ArtistAnimation(theFig, ims, interval=1000, repeat_delay=30000, blit=True)
#im_ani.save('/tera/phil/nchaparr/python/Plotting/July92013/pngs/for_point_movie/im.mp4')
plt.show()
| 28.266667 | 130 | 0.706132 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,084 | 0.511321 |
6cb51a7de1609a5a0779573c2a06a1856d668cd7 | 5,925 | py | Python | examples/nlp/entity_linking/query_index.py | hamjam/NeMo | b3484d32e1317666151f931bfa39867d88ed8658 | [
"Apache-2.0"
] | 4,145 | 2019-09-13T08:29:43.000Z | 2022-03-31T18:31:44.000Z | examples/nlp/entity_linking/query_index.py | hamjam/NeMo | b3484d32e1317666151f931bfa39867d88ed8658 | [
"Apache-2.0"
] | 2,031 | 2019-09-17T16:51:39.000Z | 2022-03-31T23:52:41.000Z | examples/nlp/entity_linking/query_index.py | hamjam/NeMo | b3484d32e1317666151f931bfa39867d88ed8658 | [
"Apache-2.0"
] | 1,041 | 2019-09-13T10:08:21.000Z | 2022-03-30T06:37:38.000Z | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pickle as pkl
from argparse import ArgumentParser
from collections import OrderedDict
from typing import Dict
import numpy as np
import torch
from build_index import load_model
from omegaconf import DictConfig, OmegaConf
from nemo.utils import logging
try:
import faiss
except ModuleNotFoundError:
logging.warning("Faiss is required for building the index. Please install faiss-gpu")
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
def get_query_embedding(query, model):
"""Use entity linking encoder to get embedding for index query"""
model_input = model.tokenizer(
query,
add_special_tokens=True,
padding=True,
truncation=True,
max_length=512,
return_token_type_ids=True,
return_attention_mask=True,
)
query_emb = model.forward(
input_ids=torch.LongTensor([model_input["input_ids"]]).to(device),
token_type_ids=torch.LongTensor([model_input["token_type_ids"]]).to(device),
attention_mask=torch.LongTensor([model_input["attention_mask"]]).to(device),
)
return query_emb
def query_index(
query: str, cfg: DictConfig, model: object, index: object, pca: object, idx2id: dict, id2string: dict,
) -> Dict:
"""
Query the nearest neighbor index of entities to find the
concepts in the index dataset that are most similar to the
query.
Args:
query (str): entity to look up in the index
cfg (DictConfig): config object to specifiy query parameters
model (EntityLinkingModel): entity linking encoder model
index (object): faiss index
pca (object): sklearn pca transformation to be applied to queries
idx2id (dict): dictionary mapping unique concept dataset index to
its CUI
id2string (dict): dictionary mapping each unqiue CUI to a
representative english description of
the concept
Returns:
A dictionary with the concept ids of the index's most similar
entities as the keys and a tuple containing the string
representation of that concept and its cosine similarity to
the query as the values.
"""
query_emb = get_query_embedding(query, model).detach().cpu().numpy()
if cfg.apply_pca:
query_emb = pca.transform(query_emb)
dist, neighbors = index.search(query_emb.astype(np.float32), cfg.query_num_factor * cfg.top_n)
dist, neighbors = dist[0], neighbors[0]
unique_ids = OrderedDict()
neighbor_idx = 0
# Many of nearest neighbors could map to the same concept id, their idx is their unique identifier
while len(unique_ids) < cfg.top_n and neighbor_idx < len(neighbors):
concept_id_idx = neighbors[neighbor_idx]
concept_id = idx2id[concept_id_idx]
# Only want one instance of each unique concept
if concept_id not in unique_ids:
concept = id2string[concept_id]
unique_ids[concept_id] = (concept, 1 - dist[neighbor_idx])
neighbor_idx += 1
unique_ids = dict(unique_ids)
return unique_ids
def main(cfg: DictConfig, restore: bool):
"""
Loads faiss index and allows commandline queries
to the index. Builds new index if one hasn't been built yet.
Args:
cfg: Config file specifying index parameters
restore: Whether to restore model weights trained
by the user. Otherwise will load weights
used before self alignment pretraining.
"""
if not os.path.isfile(cfg.index.index_save_name) or (
cfg.apply_pca and not os.path.isfile(cfg.index.pca.pca_save_name) or not os.path.isfile(cfg.index.idx_to_id)
):
logging.warning("Either no index and/or no mapping from entity idx to ids exists. Please run `build_index.py`")
return
logging.info("Loading entity linking encoder model")
model = load_model(cfg.model, restore)
logging.info("Loading index and associated files")
index = faiss.read_index(cfg.index.index_save_name)
idx2id = pkl.load(open(cfg.index.idx_to_id, "rb"))
id2string = pkl.load(open(cfg.index.id_to_string, "rb")) # Should be created during dataset prep
if cfg.index.apply_pca:
pca = pkl.load(open(cfg.index.pca.pca_save_name, "rb"))
while True:
query = input("enter index query: ")
output = query_index(query, cfg.top_n, cfg.index, model, index, pca, idx2id, id2string)
if query == "exit":
break
for concept_id in output:
concept_details = output[concept_id]
concept_id = "C" + str(concept_id).zfill(7)
print(concept_id, concept_details)
print("----------------\n")
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument(
"--restore", action="store_true", help="Whether to restore encoder model weights from nemo path"
)
parser.add_argument("--project_dir", required=False, type=str, default=".")
parser.add_argument("--cfg", required=False, type=str, default="./conf/umls_medical_entity_linking_config.yaml")
args = parser.parse_args()
cfg = OmegaConf.load(args.cfg)
cfg.project_dir = args.project_dir
main(cfg, args.restore)
| 35.479042 | 119 | 0.683038 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,710 | 0.457384 |
6cb6082d21894fda980ae156d9b789007e2949db | 2,172 | py | Python | packages/w3af/w3af/plugins/attack/payloads/tests/test_base_payload.py | ZooAtmosphereGroup/HelloPackages | 0ccffd33bf927b13d28c8f715ed35004c33465d9 | [
"Apache-2.0"
] | 3 | 2019-04-09T22:59:33.000Z | 2019-06-14T09:23:24.000Z | tools/w3af/w3af/plugins/attack/payloads/tests/test_base_payload.py | sravani-m/Web-Application-Security-Framework | d9f71538f5cba6fe1d8eabcb26c557565472f6a6 | [
"MIT"
] | null | null | null | tools/w3af/w3af/plugins/attack/payloads/tests/test_base_payload.py | sravani-m/Web-Application-Security-Framework | d9f71538f5cba6fe1d8eabcb26c557565472f6a6 | [
"MIT"
] | null | null | null | """
test_Payload.py
Copyright 2012 Andres Riancho
This file is part of w3af, http://w3af.org/ .
w3af is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation version 2 of the License.
w3af is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with w3af; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
import unittest
from mock import MagicMock
from w3af.plugins.attack.payloads.base_payload import Payload
from w3af.plugins.attack.payloads.payloads.tests.test_payload_handler import (FakeReadShell,
FakeExecShell)
class TestBasePayload(unittest.TestCase):
def setUp(self):
self.bp = Payload(FakeReadShell())
def test_can_run(self):
self.assertEqual(self.bp.can_run(), set())
def test_run_only_read(self):
bp = Payload(FakeReadShell())
self.assertRaises(AttributeError, bp.run, 'filename')
def test_run_execute(self):
class Executable(Payload):
called_run_execute = False
called_api_execute = False
def run_execute(self, cmd):
self.called_run_execute = True
self.shell.execute(cmd)
def api_execute(self, cmd):
self.called_api_execute = True
shell = FakeExecShell()
shell.execute = MagicMock(return_value='')
executable = Executable(shell)
self.assertEqual(self.bp.can_run(), set())
executable.run('command')
self.assertTrue(executable.called_run_execute)
self.assertEqual(executable.shell.execute.call_count, 1)
executable.run_api('command')
self.assertTrue(executable.called_api_execute)
| 32.41791 | 92 | 0.662983 | 1,164 | 0.535912 | 0 | 0 | 0 | 0 | 0 | 0 | 738 | 0.339779 |
6cb7a3213f4aa75ec344787bff947c5e65c0af9d | 1,700 | py | Python | cvat/apps/iam/schema.py | ACHultman/cvat | 01eaf362aa7e03f5623e80cb12ad0b9a429ae588 | [
"Intel",
"MIT"
] | 4,197 | 2018-06-30T05:47:50.000Z | 2020-09-08T07:34:22.000Z | cvat/apps/iam/schema.py | ACHultman/cvat | 01eaf362aa7e03f5623e80cb12ad0b9a429ae588 | [
"Intel",
"MIT"
] | 1,653 | 2018-07-04T00:10:44.000Z | 2020-09-08T09:01:58.000Z | cvat/apps/iam/schema.py | ACHultman/cvat | 01eaf362aa7e03f5623e80cb12ad0b9a429ae588 | [
"Intel",
"MIT"
] | 1,253 | 2018-06-30T05:47:58.000Z | 2020-09-08T02:19:52.000Z | # Copyright (C) 2022 Intel Corporation
#
# SPDX-License-Identifier: MIT
from drf_spectacular.extensions import OpenApiFilterExtension, OpenApiAuthenticationExtension
from drf_spectacular.plumbing import build_parameter_type
from drf_spectacular.utils import OpenApiParameter
# https://drf-spectacular.readthedocs.io/en/latest/customization.html?highlight=OpenApiFilterExtension#step-5-extensions
class OrganizationFilterExtension(OpenApiFilterExtension):
"""Describe OrganizationFilterBackend filter"""
target_class = 'cvat.apps.iam.filters.OrganizationFilterBackend'
priority = 1
def get_schema_operation_parameters(self, auto_schema, *args, **kwargs):
"""Describe query parameters"""
return [
build_parameter_type(
name=self.target.organization_slug,
required=False,
location=OpenApiParameter.QUERY,
description=self.target.organization_slug_description,
schema={'type': 'string'},
),
build_parameter_type(
name=self.target.organization_id,
required=False,
location=OpenApiParameter.QUERY,
description=self.target.organization_id_description,
schema={'type': 'string'},
)
]
class SignatureAuthenticationScheme(OpenApiAuthenticationExtension):
target_class = 'cvat.apps.iam.authentication.SignatureAuthentication'
name = 'SignatureAuthentication' # name used in the schema
def get_security_definition(self, auto_schema):
return {
'type': 'apiKey',
'in': 'query',
'name': 'sign',
} | 38.636364 | 120 | 0.672941 | 1,300 | 0.764706 | 0 | 0 | 0 | 0 | 0 | 0 | 485 | 0.285294 |
6cbf415d6c7f56e53145504a706d0df172e83094 | 14,720 | py | Python | clique_main_1.py | mccrimmonmd/clique | d6539a4530acf5c5cf85dac2eb520fa69f3a310a | [
"MIT"
] | null | null | null | clique_main_1.py | mccrimmonmd/clique | d6539a4530acf5c5cf85dac2eb520fa69f3a310a | [
"MIT"
] | null | null | null | clique_main_1.py | mccrimmonmd/clique | d6539a4530acf5c5cf85dac2eb520fa69f3a310a | [
"MIT"
] | null | null | null | """
Version 1:
- It begins
- For some reason, this version of the decision code (in Shape.move) just makes
every shape move up and to the left
"""
from __future__ import division
import pygame, pygame.locals, math, random
RAND = random.Random()
RAND.seed()
UP = 0
DOWN = 1
RIGHT = 2
LEFT = 3
STAY = 4
PLAYER_MOVEMENT = 2
OFFSET = [0, 0]
SHAPE_TYPES = ['triangle', 'square', 'pentagon', 'hexagon', 'circle']
SHAPE_SIDES = {
'circle': 1, 'hexagon': 6, 'pentagon': 5, 'square': 4, 'triangle': 3}
SHAPE_MEAN = {
'circle': 35, 'hexagon': 40, 'pentagon': 45, 'square': 70, 'triangle': 80}
SHAPE_DEV = {
'circle': 5, 'hexagon': 6, 'pentagon': 7, 'square': 9, 'triangle': 10}
MAGIC_CONSTANT = 2 / len(SHAPE_TYPES)
# if the dividend is 1, all shape types will be generated equally
# if the dividend is > 1, the distribution of shapes will be skewed in favor
# of fewer sides.
# if the dividend is >= the divisor, only triangles will be generated.
LINE_OF_SIGHT = 500
STROKE_WIDTH = 1
NUM_SHAPES = 50
MAX_AGE = 10000
BLACK = pygame.color.Color(0,0,0)
WHITE = pygame.color.Color(255,255,255)
def main(player, shapes, size, period):
pygame.init()
pygame.key.set_repeat(25, 25)
screen = pygame.display.set_mode(size)
TICK = pygame.locals.USEREVENT + 1
pygame.time.set_timer(TICK, period)
running = True
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
pygame.quit()
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_UP:
player.move_player(UP)
elif event.key == pygame.K_DOWN:
player.move_player(DOWN)
elif event.key == pygame.K_RIGHT:
player.move_player(RIGHT)
elif event.key == pygame.K_LEFT:
player.move_player(LEFT)
#elif event.key == pygame.K_SPACE:
#pass
elif event.type == TICK:
screen.fill(WHITE)
# these loops must run consecutively because shapes calculate
# new positions based on the old positions of other shapes;
# discrete timesteps are maintained with the Shape.pos and
# Shape.nextpos variables. A shape's actual position is only
# updated in its draw method.
for shape in shapes:
if shape != player: shape.move()
for shape in shapes:
if shape != player: shape.draw(screen)
# the player should always be on top, so it gets rendered last
player.draw(screen)
pygame.display.flip()
# end main()
class Shape(object):
def __init__(self, position, shape_type, side_length, color, persona, age=0):
self.pos = position
self.nextpos = list(position)
self.direction = STAY
self.shape_type = shape_type
self.side_length = side_length # for circles, side_length = radius
self.color = color
self.persona = persona
self.age = age
self.points = makepoints(self.pos, self.shape_type, self.side_length)
def move(self):
# KEEP TRACK OF CURRENT DIRECTION; VOTE TO CHANGE IT OR NOT
# (shapes should have a certain amount of inertia)
# the shape should lose its inertia if its personal space is invaded #
if RAND.random() < .25: # shape is changing direction
# 0 for UP, 1 for DOWN, 2 for RIGHT, 3 for LEFT, 4 for STAY
votes = [0,0,0,0,0]
self_type = self.shape_type
self_r = self.color.r
self_g = self.color.g
self_b = self.color.b
self_size = self.side_length
rgb_tolerance = self.persona.rgb_tolerance
#size_tolerance = self.persona.size_tolerance
space_tolerance = self.persona.personal_space
xpos = self.pos[0]
ypos = self.pos[1]
ups = 0
downs = 0
rights = 0
lefts = 0
stays = 0
for shape in shapes:
xdist = xpos - shape.pos[0]
ydist = ypos - shape.pos[1]
totaldist = math.sqrt(xdist**2 + ydist**2)
if totaldist < LINE_OF_SIGHT:
approach = closer(xdist, ydist)
avoid = further(xdist, ydist)
if approach == 0 or avoid == 0: ups += 1
if approach == 1 or avoid == 1: downs += 1
if approach == 2 or avoid == 2: rights += 1
if approach == 3 or avoid == 3: lefts += 1
if approach == 4 or avoid == 4: stays += 1
assert approach != avoid
#print approach, avoid
if totaldist < space_tolerance:
votes[avoid] += space_tolerance - int(totaldist)
if self_type != shape.shape_type:
#votes[approach] += 3
#votes[STAY] += 1
#else:
votes[avoid] += 3
if (self_r - rgb_tolerance <=
shape.color.r <=
self_r + rgb_tolerance):
votes[approach] += 1
#else:
#votes[avoid] += 1
if (self_g - rgb_tolerance <=
shape.color.g <=
self_g + rgb_tolerance):
votes[approach] += 1
#else:
#votes[avoid] += 1
if (self_b - rgb_tolerance <=
shape.color.b <=
self_b + rgb_tolerance):
votes[approach] += 1
#else:
#votes[avoid] += 1
#if (self_size - size_tolerance <=
#shape.side_length <=
#self_size + size_tolerance):
#votes[approach] += 1
#else:
#votes[avoid] += 1
direction = bestvote(votes)
print votes, ups, downs, rights, lefts, stays
else: # shape is not changing direction
direction = self.direction
if direction == UP: self.nextpos[1] -= 1
elif direction == DOWN: self.nextpos[1] += 1
elif direction == RIGHT: self.nextpos[0] += 1
elif direction == LEFT: self.nextpos[0] -= 1
# if direction == STAY: do nothing
self.direction = direction
def move_player(self, direction):
# modify offset in *opposite* direction
# (to keep "camera" centered on player)
if direction == UP:
OFFSET[1] += PLAYER_MOVEMENT
elif direction == DOWN:
OFFSET[1] -= PLAYER_MOVEMENT
elif direction == RIGHT:
OFFSET[0] -= PLAYER_MOVEMENT
elif direction == LEFT:
OFFSET[0] += PLAYER_MOVEMENT
def draw(self, surface):
if self == player:
pygame.draw.circle(surface, self.color, self.pos, self.side_length)
pygame.draw.circle(surface, BLACK, self.pos,
self.side_length, STROKE_WIDTH)
else:
xpos = self.pos[0] + OFFSET[0]
ypos = self.pos[1] + OFFSET[1]
# if the shape isn't visible, don't bother drawing it
offscreen = (xpos > size[0] + self.side_length or
xpos < -self.side_length or
ypos > size[1] + self.side_length or
ypos < -self.side_length)
if not offscreen:
if self.shape_type == 'circle':
pygame.draw.circle(surface, self.color, (xpos, ypos),
self.side_length)
pygame.draw.circle(surface, BLACK, (xpos, ypos),
self.side_length, STROKE_WIDTH)
else: # praw a polygon centered at self.pos
pygame.draw.polygon(surface, self.color, self.offset_points())
pygame.draw.polygon(surface, BLACK, self.offset_points(),
STROKE_WIDTH)
#else: print("This shape (of type ", self.shape_type, ") is offscreen")
self.update_position()
self.age += 1
if self.age > MAX_AGE:
shapes.remove(self)
shapes.append(generate_shape())
def update_position(self):
if self.shape_type != 'circle':
xdiff = self.nextpos[0] - self.pos[0]
ydiff = self.nextpos[1] - self.pos[1]
for point in self.points:
point[0] += xdiff
point[1] += ydiff
self.pos = (self.nextpos[0], self.nextpos[1])
def offset_points(self):
return [[point[0]+OFFSET[0], point[1]+OFFSET[1]] for point in self.points]
# end class Shape
def bestvote(votes):
maxpos = 0
maxval = votes[0]
for i in range(1, len(votes)):
if votes[i] >= maxval:
maxpos = i
maxval = votes[i]
return maxpos
#xdist = xpos - shape.pos[0]
#ydist = ypos - shape.pos[1]
"""
If xdist is positive, they are to the left of me.
If xdist is negative, they are to the right of me.
If ydist is positive, they are above me.
If ydist is negative, they are below me.
I will reduce the axis of greatest distance if I want to get closer.
I will increase the axis of least distance if I want to get further.
OR
I will randomly choose an axis to travel along.
"""
def closer(xdist, ydist):
if xdist == ydist == 0:
return STAY
if RAND.random() < 0.5:
#if xdist > ydist:
# move along the x axis
if xdist > 0:
return LEFT
else:
return RIGHT
else:
# move along the y axis
if ydist > 0:
return UP
else:
return DOWN
def further(xdist, ydist):
if RAND.random() < 0.5:
#if xdist < ydist:
# move along the x axis
if xdist > 0:
return RIGHT
else:
return LEFT
else:
# move along the y axis
if ydist > 0:
return DOWN
else:
return UP
class Personality(object):
def __init__(self, shape_type):
self.rgb_tolerance = int(RAND.gauss(50, 10))
#self.size_tolerance = int(RAND.gauss(SHAPE_MEAN[shape_type] / 2,
#SHAPE_DEV[shape_type] / 2))
self.personal_space = int(RAND.gauss(SHAPE_MEAN[shape_type] * 2,
SHAPE_DEV[shape_type] / 2))
print self.rgb_tolerance, self.personal_space
# end class Personality
def makepoints(position, shape_type, side_length):
halfside = side_length / 2
if shape_type == 'circle':
return None
elif shape_type == 'triangle':
h = math.sqrt(side_length**2 - (side_length/2)**2)
apothem = h / 2
top = [position[0], position[1] - apothem]
botleft = [position[0] - halfside, position[1] + apothem]
botright = [position[0] + halfside, position[1] + apothem]
return (top, botleft, botright)
elif shape_type == 'square':
topleft = [position[0] - halfside, position[1] - halfside]
topright = [topleft[0] + side_length, topleft[1]]
botleft = [topleft[0], topleft[1] + side_length]
botright = [topright[0], topright[1] + side_length]
return (topleft, topright, botright, botleft)
else:
numsides = SHAPE_SIDES[shape_type]
apothem = side_length / (2 * math.tan(math.pi / numsides))
angle = ((numsides - 2) * math.pi) / (numsides * 2)
xoffset = side_length * math.sin(angle)
yoffset = side_length * math.cos(angle)
radius = math.sqrt(halfside**2 + apothem**2)
if shape_type == 'pentagon':
top = [position[0], position[1] - radius]
second = [position[0] + xoffset, top[1] + yoffset]
third = [position[0] + halfside, position[1] + apothem]
fourth = [position[0] - halfside, position[1] + apothem]
fifth = [position[0] - xoffset, top[1] + yoffset]
return (top, second, third, fourth, fifth)
elif shape_type == 'hexagon':
topleft = [position[0] - halfside, position[1] - apothem]
topright = [position[0] + halfside, position[1] - apothem]
right = [position[0] + radius, position[1]]
botright = [position[0] + halfside, position[1] + apothem]
botleft = [position[0] - halfside, position[1] + apothem]
left = [position[0] - radius, position[1]]
return (topleft, topright, right, botright, botleft, left)
else:
print('unkown shape, type:', shape_type)
assert False
# end makepoints()
def choose_shape():
for shape_type in SHAPE_TYPES:
print(shape_type)
if shape_type == 'circle':
return shape_type
elif RAND.random() < MAGIC_CONSTANT:
return shape_type
def generate_shape(random_age=False):
if random_age:
age = RAND.randint(0, MAX_AGE-1)
else:
age = 0
r = g = b = 255
while r == 255 and g == 255 and b == 255: # only the player may be white
r = RAND.randint(0,255)
g = RAND.randint(0,255)
b = RAND.randint(0,255)
x = RAND.randint(0,size[0]) - OFFSET[0]
y = RAND.randint(0,size[1]) - OFFSET[1]
shape_type = choose_shape()
shape_size = 0
while shape_size <= 0:
shape_size = RAND.gauss(SHAPE_MEAN[shape_type], SHAPE_DEV[shape_type])
shape = Shape( (x,y), # new shapes always appear onscreen - problem?
shape_type,
int(shape_size),
pygame.color.Color(r, g, b),
Personality(shape_type),
age )
return shape
def generate_shapes():
shapes = []
for i in range(NUM_SHAPES):
shape = generate_shape(True)
shapes.append(shape)
return shapes
size = (1200, 900)
period = 25
player = Shape( (int(size[0]/2), int(size[1]/2)),
'circle',
25,
WHITE,
None,
None )
shapes = generate_shapes()
shapes.append(player)
main(player, shapes, size, period)
| 32.494481 | 83 | 0.531793 | 6,890 | 0.468071 | 0 | 0 | 0 | 0 | 0 | 0 | 2,750 | 0.186821 |
6cbfc8dcd6d97cc1be61ed0f0e63d726c3f60f59 | 111 | py | Python | exercicios/ex110/teste.py | Matheus1199/python | c87859d4bf63ba0edea43d864fcbce4915da7e6a | [
"MIT"
] | null | null | null | exercicios/ex110/teste.py | Matheus1199/python | c87859d4bf63ba0edea43d864fcbce4915da7e6a | [
"MIT"
] | null | null | null | exercicios/ex110/teste.py | Matheus1199/python | c87859d4bf63ba0edea43d864fcbce4915da7e6a | [
"MIT"
] | null | null | null | import moeda
p = float(input('Digite o preço: '))
t = int(input('Qual o valor da taxa? '))
moeda.resumo(p, t)
| 18.5 | 40 | 0.648649 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 43 | 0.383929 |
6cc3d5d1de7a0cf11e4107bd74fe44f4807903f5 | 11,683 | py | Python | DTSGUI/DetailLevels.py | pchan126/Blender_DTS_30 | 730dabe3d620b088811b86e34583e92ed30dd184 | [
"MIT"
] | null | null | null | DTSGUI/DetailLevels.py | pchan126/Blender_DTS_30 | 730dabe3d620b088811b86e34583e92ed30dd184 | [
"MIT"
] | null | null | null | DTSGUI/DetailLevels.py | pchan126/Blender_DTS_30 | 730dabe3d620b088811b86e34583e92ed30dd184 | [
"MIT"
] | null | null | null | '''
DetailLevels.py
Copyright (c) 2008 Joseph Greenawalt(jsgreenawalt@gmail.com)
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
'''
import Common_Gui
import DtsGlobals
from DtsPrefs import *
'''
***************************************************************************************************
*
* Class that creates and owns the GUI controls on the Detail Levels control page
*
***************************************************************************************************
'''
class DetailLevelControlsClass:
#######################################
# init and cleanup methods
#######################################
def __init__(self, guiDetailLevelsSubtab):
global globalEvents
# panel state
self.curListEvent = 40
# initialize GUI controls
self.guiDetailLevelsListTitle = Common_Gui.SimpleText("guiDetailLevelsListTitle", "Detail Levels:", None, self.guiDetailLevelsListTitleResize)
self.guiDetailLevelsList = Common_Gui.ListContainer("guiDetailLevelsList", "dl.list", self.handleEvent, self.guiDetailLevelsListResize)
self.guiDetailLevelsAddButton = Common_Gui.BasicButton("guiDetailLevelsAddButton", "Add:", "Add a new detail level of the indicated type", 5, self.handleAddEvent, self.guiDetailLevelsAddButtonResize)
self.guiDetailLevelsTypeMenu = Common_Gui.ComboBox("guiDetailLevelsTypeMenu", "Type", "Select the type of detail level to add", 6, self.handleEvent, self.guiDetailLevelsTypeMenuResize)
self.guiDetailLevelsDelButton = Common_Gui.BasicButton("guiDetailLevelsDelButton", "Delete Selected Detail Level", "Import Blender materials and settings", 7, self.handleDelEvent, self.guiDetailLevelsDelButtonResize)
# set default values for controls
self.guiDetailLevelsList.childHeight = 30
# add controls to containers
guiDetailLevelsSubtab.addControl(self.guiDetailLevelsListTitle)
guiDetailLevelsSubtab.addControl(self.guiDetailLevelsList)
guiDetailLevelsSubtab.addControl(self.guiDetailLevelsAddButton)
guiDetailLevelsSubtab.addControl(self.guiDetailLevelsTypeMenu)
guiDetailLevelsSubtab.addControl(self.guiDetailLevelsDelButton)
self.populateDLList()
self.populateTypePulldown()
def cleanup(self):
# Must destroy any GUI objects that are referenced in a non-global scope
# explicitly before interpreter shutdown to avoid the dreaded
# "error totblock" message when exiting Blender.
# Note: __del__ is not guaranteed to be called for objects that still
# exist when the interpreter exits.
del self.guiDetailLevelsListTitle
del self.guiDetailLevelsList
del self.guiDetailLevelsAddButton
del self.guiDetailLevelsTypeMenu
del self.guiDetailLevelsDelButton
#######################################
# Event handler methods
#######################################
def handleEvent(self, control):
pass
def handleAddEvent(self, control):
Prefs = DtsGlobals.Prefs
DLType = self.guiDetailLevelsTypeMenu.getSelectedItemString()
dlName = None
size = None
if DLType == "Visible Detail Level":
dlName = "Detail"
elif DLType == "Collision Detail Level":
dlName = "Collision"
size = -1
elif DLType == "LOS Col Detail Level":
dlName = "LOSCollision"
size = -1
Prefs.addDetailLevel(dlName, size)
self.populateDLList()
def handleDelEvent(self, control):
Prefs = DtsGlobals.Prefs
dlName = self.getDLListSelectedItem()
# todo - are you sure dialog?
Prefs.delDetailLevel(dlName)
self.populateDLList()
def handleListItemEvent(self, control):
Prefs = DtsGlobals.Prefs
evtOffset = 21
# Determine DL name
if control.evt == 40:
calcIdx = 0
else:
calcIdx = (control.evt - 40) / evtOffset
pass
dlName = self.guiDetailLevelsList.controls[calcIdx].controls[0].label
realItem = control.evt - 40 - (calcIdx*evtOffset)
# get the shift state
shiftState = Common_Gui.shiftState
# a layer button was clicked
if realItem > 0 and realItem < 21:
# if shifted, click does not affect other layer buttons in the DL
if shiftState:
# button pressed
if control.state:
# assign the layer to the detail level
Prefs.setLayerAssignment(dlName, realItem)
# button un-pressed
else:
#Remove layer from this dl
Prefs.removeLayerAssignment(realItem)
# if not shifted, click turns all other layer buttons off
else:
# clear other layers assigned to this dl
Prefs['DetailLevels'][dlName] = []
Prefs.setLayerAssignment(dlName, realItem)
# clear button states
for i in range(1,21):
if i == realItem:
control.state = True
else:
self.guiDetailLevelsList.controls[calcIdx].controls[i].state = False
control.state = True
# size was changed
elif realItem == 0:
# rename detail level
newName = Prefs.getTextPortion(dlName)
newName += str(control.value)
if newName != dlName:
if Prefs.renameDetailLevel(dlName, newName):
self.guiDetailLevelsList.controls[calcIdx].controls[0].label = newName
else:
control.value = int(Prefs.getTrailingNumber(dlName))
#######################################
# Refresh and Clear methods
#######################################
def refreshAll(self):
self.populateDLList()
#########################
# Resize callback methods
#########################
# resize events
def guiDetailLevelsListTitleResize(self, control, newwidth, newheight):
control.x, control.y, control.height, control.width = 10,270, 20,150
def guiDetailLevelsListResize(self, control, newwidth, newheight):
control.x, control.y, control.height, control.width = 10,45, newheight - 120, newwidth - 20
def guiDetailLevelsAddButtonResize(self, control, newwidth, newheight):
control.x, control.y, control.height, control.width = 10,295, 20,50
def guiDetailLevelsTypeMenuResize(self, control, newwidth, newheight):
control.x, control.y, control.height, control.width = 62,295, 20,150
def guiDetailLevelsDelButtonResize(self, control, newwidth, newheight):
control.x, control.y, control.height, control.width = 10,15, 20,180
#########################
# Misc / utility methods
#########################
## @brief Creates a detail level list item and its associated GUI controls.
# @note Called by populateDLList
# @param dlName The name of the sequence for which we're creating the list item.
def createDLListItem(self, dlName):
Prefs = DtsGlobals.Prefs
DLPrefs = Prefs['DetailLevels'][dlName]
DLType = prefsClass.getTextPortion(dlName)
startEvent = self.curListEvent
listWidth = self.guiDetailLevelsList.width - self.guiDetailLevelsList.barWidth
guiContainer = Common_Gui.BasicContainer("", None, None)
guiName = Common_Gui.SimpleText("", dlName, None, None)
guiLayersLabel = Common_Gui.SimpleText("", "Use Layers:", None, None)
guiSize = Common_Gui.NumberPicker("guiSize", "Min Pixel Size:", "Height in pixels at which detail level begins to display", startEvent, self.handleListItemEvent, None)
guiSize.value = Prefs.getTrailingNumber(dlName)
if DLType == 'Detail':
guiSize.min = 0
guiSize.max = 1024
elif DLType == 'Collision' or DLType == 'LOSCollision':
guiSize.min = -1
guiSize.max = -1
guiSize.enabled = False
startEvent += 1
# create layer buttons
guiLayerButton = []
for i in range(1,21):
# create the button
guiLayerButton.append(Common_Gui.ToggleButton("guiLayer"+str(i), "", "Use Layer "+str(i) + " in Detail Level", startEvent + i - 1, self.handleListItemEvent, None))
if i in DLPrefs:
# turn on the button
guiLayerButton[len(guiLayerButton)-1].state = True
else:
# turn the button off
guiLayerButton[len(guiLayerButton)-1].state = False
guiContainer.fade_mode = 0 # flat color
guiName.x, guiName.y = 5, 8
guiSize.x, guiSize.y = 100, 5
guiLayersLabel.x, guiLayersLabel.y = 270, 8
# todo - clean this up :-)
buttonWidth = 10
# position buttons in groups of 5
buttonsStartX = 340
buttonsStartY = 15
buttonPos = buttonsStartX
for i in range(0,5):
guiLayerButton[i].x, guiLayerButton[i].y = buttonPos, buttonsStartY
guiLayerButton[i].width = buttonWidth
guiLayerButton[i].height = buttonWidth
buttonPos += buttonWidth
buttonsStartX = 395
buttonsStartY = 15
buttonPos = buttonsStartX
for i in range(5,10):
guiLayerButton[i].x, guiLayerButton[i].y = buttonPos, buttonsStartY
guiLayerButton[i].width = buttonWidth
guiLayerButton[i].height = buttonWidth
buttonPos += buttonWidth
buttonsStartX = 340
buttonsStartY = 5
buttonPos = buttonsStartX
for i in range(10,15):
guiLayerButton[i].x, guiLayerButton[i].y = buttonPos, buttonsStartY
guiLayerButton[i].width = buttonWidth
guiLayerButton[i].height = buttonWidth
buttonPos += buttonWidth
buttonsStartX = 395
buttonsStartY = 5
buttonPos = buttonsStartX
for i in range(15,20):
guiLayerButton[i].x, guiLayerButton[i].y = buttonPos, buttonsStartY
guiLayerButton[i].width = buttonWidth
guiLayerButton[i].height = buttonWidth
buttonPos += buttonWidth
# Add everything
guiContainer.addControl(guiName)
for i in range(0,20):
guiContainer.addControl(guiLayerButton[i])
guiContainer.addControl(guiSize)
guiContainer.addControl(guiLayersLabel)
# increment the current event counter
self.curListEvent += 21
return guiContainer
## @brief Populates the sequence list using current pref values.
def populateDLList(self):
Prefs = DtsGlobals.Prefs
self.clearDLList()
Prefs = DtsGlobals.Prefs
keys = Prefs.getSortedDLNames()
# loop through all detail levels in the preferences
for dlName in keys:
self.guiDetailLevelsList.addControl(self.createDLListItem(dlName))
def clearDLList(self):
for i in range(0, len(self.guiDetailLevelsList.controls)):
del self.guiDetailLevelsList.controls[i].controls[:]
del self.guiDetailLevelsList.controls[:]
self.curListEvent = 40
self.guiDetailLevelsList.itemIndex = -1
self.guiDetailLevelsList.scrollPosition = 0
if self.guiDetailLevelsList.callback: self.guiDetailLevelsList.callback(self.guiDetailLevelsList) # Bit of a hack, but works
def populateTypePulldown(self):
self.guiDetailLevelsTypeMenu.items.append("Visible Detail Level")
self.guiDetailLevelsTypeMenu.items.append("Collision Detail Level")
self.guiDetailLevelsTypeMenu.items.append("LOS Col Detail Level")
self.guiDetailLevelsTypeMenu.selectStringItem("Visible Detail Level")
## @brief Returns a string corresponding to the currently selected vis track list item.
def getDLListSelectedItem(self):
if self.guiDetailLevelsList.itemIndex != -1:
return self.guiDetailLevelsList.controls[self.guiDetailLevelsList.itemIndex].controls[0].label
else: return ""
| 34.770833 | 218 | 0.714714 | 10,218 | 0.874604 | 0 | 0 | 0 | 0 | 0 | 0 | 4,022 | 0.344261 |
6cc4b53806510ef9be869b2f5f6842afec22d977 | 1,142 | py | Python | python/loader/constants.py | msolonskyi/tennisAnalyzer | 19e40fe27db498f094bcacd06cceb28af535d499 | [
"MIT"
] | null | null | null | python/loader/constants.py | msolonskyi/tennisAnalyzer | 19e40fe27db498f094bcacd06cceb28af535d499 | [
"MIT"
] | null | null | null | python/loader/constants.py | msolonskyi/tennisAnalyzer | 19e40fe27db498f094bcacd06cceb28af535d499 | [
"MIT"
] | null | null | null | CONNECTION_STRING = '/@'
CHUNK_SIZE = 100
BORDER_QTY = 5 # minimun matches per year per player for reload player
ATP_URL_PREFIX = 'http://www.atpworldtour.com'
DC_URL_PREFIX = 'https://www.daviscup.com'
ATP_TOURNAMENT_SERIES = ('gs', '1000', 'atp', 'ch')
DC_TOURNAMENT_SERIES = ('dc',)
DURATION_IN_DAYS = 18
ATP_CSV_PATH = ''
DC_CSV_PATH = ''
SLEEP_DURATION = 10
COUNTRY_CODE_MAP = {
'LIB': 'LBN',
'SIN': 'SGP',
'bra': 'BRA',
'ROM': 'ROU'}
COUNTRY_NAME_MAP = {
'Slovak Republic': 'Slovakia',
'Bosnia-Herzegovina': 'Bosnia and Herzegovina'}
INDOOR_OUTDOOR_MAP = {
'I': 'Indoor',
'O': 'Outdoor'}
SURFACE_MAP = {
'H': 'Hard',
'C': 'Clay',
'A': 'Carpet',
'G': 'Grass'}
STADIE_CODES_MAP = {
'Finals': 'F',
'Final': 'F',
'Semi-Finals': 'SF',
'Semifinals': 'SF',
'Quarter-Finals': 'QF',
'Quarterfinals': 'QF',
'Round of 16': 'R16',
'Round of 32': 'R32',
'Round of 64': 'R64',
'Round of 128': 'R128',
'Round Robin': 'RR',
'Olympic Bronze': 'BR',
'3rd Round Qualifying': 'Q3',
'2nd Round Qualifying': 'Q2',
'1st Round Qualifying': 'Q1'}
| 23.306122 | 71 | 0.584063 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 596 | 0.521891 |
6cc792bc4ec1495e2621de0b8e4ddb3c30f40b27 | 3,383 | py | Python | hackerrank/graph/medium/journey-to-the-moon.py | johnklee/algprac | 51a7c872806aec261c5e3db6cbac47ec34516b4d | [
"Apache-2.0"
] | null | null | null | hackerrank/graph/medium/journey-to-the-moon.py | johnklee/algprac | 51a7c872806aec261c5e3db6cbac47ec34516b4d | [
"Apache-2.0"
] | null | null | null | hackerrank/graph/medium/journey-to-the-moon.py | johnklee/algprac | 51a7c872806aec261c5e3db6cbac47ec34516b4d | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
r'''
https://www.hackerrank.com/challenges/journey-to-the-moon/problem
'''
import math
import os
import random
import re
import sys
class Node:
def __init__(self, v):
self.v = v
self.neighbors = set()
self.visit = False
def addN(self, n):
if n not in self.neighbors:
self.neighbors.add(n)
n.addN(self)
def __hash__(self):
return hash(self.v)
def __eq__(self, other):
return self.__class__ == other.__class__ and self.v == other.v
def n(self):
for n in self.neighbors:
yield n
def dfs(self):
from collections import deque
root = self
root.visit = True
nlist = deque()
nlist.append(root)
vlist = []
while len(nlist) > 0:
node = nlist.popleft()
vlist.append(node.v)
for n in node.n():
if not n.visit:
nlist.append(n)
n.visit = True
return vlist
# Complete the journeyToMoon function below.
def journeyToMoon(n, astronaut):
ndict = {}
cty_list = []
# Create graph
for a, b in astronaut:
if a not in ndict:
ndict[a] = Node(a)
if b not in ndict:
ndict[b] = Node(b)
ndict[a].addN(ndict[b])
# Search disjoin set
for node in ndict.values():
if not node.visit:
cty_list.append(node.dfs())
print('Group-{}: {}'.format(node.v, cty_list[-1]))
# Other distinct countury
for i in range(n):
if i not in ndict:
cty_list.append(set([i]))
print('Total {} unique countries...{}'.format(len(cty_list), cty_list))
# Calculate unique pairs
if len(cty_list) == 1:
return 0
elif len(cty_list) == 2:
return len(cty_list[0]) * len(cty_list[1])
else:
cty_len_list = map(len, cty_list)
psum = cty_len_list[0] * cty_len_list[1]
nsum = cty_len_list[0] + cty_len_list[1]
for i in range(2, len(cty_len_list)):
psum += nsum * cty_len_list[i]
nsum += cty_len_list[i]
return psum
#print("{}".format(journeyToMoon(5, [(0, 1), (2, 3), (0, 4)])))
#print("{}".format(journeyToMoon(4, [(0, 2)])))
import unittest
class FAT(unittest.TestCase):
def setUp(self):
pass
def test_01(self):
tdatas = [
(5, [(0, 1), (2, 3), (0, 4)], 6),
(4, [(0, 2)], 5)
]
for n, astronaut, a in tdatas:
r = journeyToMoon(n, astronaut)
self.assertEqual(a, r, 'Expect={}; Real={}'.format(a, r))
def test_02(self):
tid = [1]
tdatas = []
for id in tid:
with open('journey-to-the-moon.t{}'.format(id), 'r') as fh:
na, pn = fh.readline().strip().split(' ')
astronaut = []
for i in range(int(pn)):
astronaut.append(map(int, fh.readline().split(' ')))
with open('journey-to-the-moon.a{}'.format(id), 'r') as fh2:
tdatas.append((int(na), astronaut, int(fh2.readline())))
for n, astronaut, a in tdatas:
r = journeyToMoon(n, astronaut)
self.assertEqual(a, r, 'Expect={}; Real={}\n{}'.format(a, r, astronaut))
| 26.224806 | 84 | 0.514041 | 1,940 | 0.573456 | 65 | 0.019214 | 0 | 0 | 0 | 0 | 484 | 0.143068 |
6cc8bee2c61fbd3d46a87dd9bd2c0c32252ff673 | 3,669 | py | Python | utils/check-glossary.py | feddelegrand7/glosario | 1bfe81174fe0ad5e375aaa7fea00fe8002958e97 | [
"CC-BY-4.0"
] | null | null | null | utils/check-glossary.py | feddelegrand7/glosario | 1bfe81174fe0ad5e375aaa7fea00fe8002958e97 | [
"CC-BY-4.0"
] | null | null | null | utils/check-glossary.py | feddelegrand7/glosario | 1bfe81174fe0ad5e375aaa7fea00fe8002958e97 | [
"CC-BY-4.0"
] | null | null | null | #!/usr/bin/env python
'''
Check YAML file. Each _entry_ contains one or more _definitions_.
'''
import sys
import re
import yaml
from collections import Counter
# Keys for entries and definitions.
ENTRY_REQUIRED_KEYS = {'slug'}
ENTRY_OPTIONAL_KEYS = {'ref'}
ENTRY_LANGUAGE_KEYS = {'en', 'es', 'fr'}
ENTRY_KEYS = ENTRY_REQUIRED_KEYS | \
ENTRY_OPTIONAL_KEYS | \
ENTRY_LANGUAGE_KEYS
DEFINITION_REQUIRED_KEYS = {'term', 'def'}
DEFINITION_OPTIONAL_KEYS = {'acronym'}
DEFINITION_KEYS = DEFINITION_REQUIRED_KEYS | \
DEFINITION_OPTIONAL_KEYS
# Match internal Markdown links.
LINK_PAT = re.compile(r'\[.+?\]\(#(.+?)\)')
def main():
'''Main driver.'''
with open(sys.argv[1], 'r') as reader:
data = yaml.load(reader, Loader=yaml.FullLoader)
for entry in data:
checkEntry(entry)
checkSlugs(data)
checkDuplicates(data)
forward = buildForward(data)
backward = buildBackward(forward)
def checkEntry(entry):
'''Check structure of individual entries.'''
keys = set(entry.keys())
missing = [k for k in ENTRY_REQUIRED_KEYS if k not in keys]
if missing:
print(f'Missing required keys for entry {entry}: {missing}')
slug = entry['slug']
unknown_keys = keys - ENTRY_KEYS
if unknown_keys:
print(f'Unknown keys in {slug}: {unknown_keys}')
for lang in ENTRY_LANGUAGE_KEYS:
if lang in entry:
checkLanguage(slug, lang, entry[lang])
def checkLanguage(slug, lang, definition):
'''Check language-specific material in definition.'''
keys = set(definition.keys())
missing = [k for k in DEFINITION_REQUIRED_KEYS if k not in keys]
if missing:
print(f'Missing required keys for definition {slug}/{lang}: {missing}')
unknown_keys = keys - DEFINITION_KEYS
if unknown_keys:
print(f'Unknown keys in {slug}/{lang}: {unknown_keys}')
def checkSlugs(data):
'''Check that entries have unique slugs and are ordered by slug.'''
slugs = [entry['slug'] for entry in data if 'slug' in entry]
for (i, slug) in enumerate(slugs):
if (i > 0) and (slug < slugs[i-1]):
print(f'slug {slug} out of order')
counts = Counter(slugs)
dups = [s for s in counts.keys() if counts[s] > 1]
if dups:
print(f'duplicate keys: {dups}')
def checkDuplicates(data):
'''Check for duplicate definitions in each language.'''
for lang in ENTRY_LANGUAGE_KEYS:
terms = [entry[lang]['term'] for entry in data
if ((lang in entry) and 'term' in entry[lang])]
counts = Counter(terms)
dups = [s for s in counts.keys() if counts[s] > 1]
if dups:
print(f'duplicate definitions for {lang}: {dups}')
def buildForward(data):
'''Build graph of forward references.'''
result = {}
for entry in data:
record = set()
if 'see' in entry:
record.update(entry['see'])
for link in LINK_PAT.findall(entry['en']['def']):
record.add(link)
result[entry['slug']] = record
return result
def buildBackward(forward):
'''Build graph of backward references, checking for missing terms.'''
result = {}
for source in forward:
result[source] = set()
failed = set()
for source in forward:
for dest in forward[source]:
if dest in result:
result[dest].add(source)
else:
failed.add(dest)
if failed:
failed = '\n '.join(sorted(failed))
print('Missing terms:\n ', failed, file=sys.stderr)
sys.exit(1)
return result
if __name__ == '__main__':
main()
| 29.829268 | 79 | 0.621423 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 964 | 0.262742 |
6cca5ceb0f53d242c817b044a0b125d5dee308d5 | 6,430 | py | Python | src/SPARTACUS10/tests/test_spatial_silhouette.py | totie10/SPARTACUS10 | ea708b159d30ddd1cb59a372595e76fb52bd7516 | [
"MIT"
] | 1 | 2021-05-23T10:38:16.000Z | 2021-05-23T10:38:16.000Z | src/SPARTACUS10/tests/test_spatial_silhouette.py | totie10/SPARTACUS10 | ea708b159d30ddd1cb59a372595e76fb52bd7516 | [
"MIT"
] | null | null | null | src/SPARTACUS10/tests/test_spatial_silhouette.py | totie10/SPARTACUS10 | ea708b159d30ddd1cb59a372595e76fb52bd7516 | [
"MIT"
] | 1 | 2021-07-19T05:07:30.000Z | 2021-07-19T05:07:30.000Z |
import pytest
import numpy as np
import pandas as pd
from SPARTACUS10 import spatial_silhouette as spasi
import sklearn.metrics as metrics
import os
def find_path(name, path = None):
if path is None:
path = os.getcwd()
for root, dirs, files in os.walk(path):
if name in files:
return os.path.join(root, name)
def test_silhouette():
"""
Does silhouette_coefficient() function produce the same results as
silhouette_score() function from sklearn.metrics using Euclidean metric?
"""
# Test on matrixA
X = np.genfromtxt(find_path("matrixA.csv"), delimiter=",", skip_header=1, usecols = range(1,21))
V = X.shape[1]
for i in range(3, 11):
labels = np.random.randint(1, i+1, V)
sil_score1 = spasi.silhouette_coefficient(X, labels, metric = "euclidean", iter_max = 10)
sil_score2 = metrics.silhouette_score(X.T, labels, metric = "euclidean")
assert np.round(sil_score1,10) == np.round(sil_score2, 10), "Silhouette function (Euclidean) produces different results than that implemented in scikit-learn"
# Test on random data comparison with existing function
V = 100
X = np.random.normal(size = (10, V))
for i in range(3, 11):
labels = np.random.randint(1, i+1, V)
sil_score1 = spasi.silhouette_coefficient(X, labels, metric = "euclidean", iter_max = 10)
sil_score2 = metrics.silhouette_score(X.T, labels, metric = "euclidean")
assert np.round(sil_score1,10) == np.round(sil_score2, 10), "Silhouette function (Euclidean) produces different results than that implemented in scikit-learn"
# Test on random data
random_data = np.genfromtxt(find_path("random_data.csv"), delimiter=",")
random_labels = np.genfromtxt(find_path("random_labels.csv"), delimiter=",")
silhouette_score_Eucl = spasi.silhouette_coefficient(random_data, random_labels, metric = "euclidean")
assert np.isclose(silhouette_score_Eucl, -0.018137954346288798), "Error in Euclidean silhouette_coefficient function"
silhouette_score_corr = spasi.silhouette_coefficient(random_data, random_labels, metric = "correlation")
assert np.isclose(silhouette_score_corr, -0.01710701512585803), "Error in correlation silhouette_coefficient function"
def test_ensemble_silhouette():
X = np.array([[1,1,2,2,3,3,4,4],
[1,1,2,2,3,3,4,4],
[1,1,2,2,3,3,4,4],
[1,1,2,2,5,5,6,6],
[1,1,1,2,3,3,3,4],
[1,1,1,2,3,3,3,4]])
labels = [1,1,2,2,3,3,4,4]
assert spasi.silhouette_coefficient(X[0:4,], labels, metric = "jaccard", iter_max = 4) == 1, "Ensemble silhouette produces wrong results"
sil_score1 = spasi.silhouette_coefficient(X, labels, metric = "jaccard", iter_max = 4)
assert np.round(sil_score1, 8) == 0.79166667, "Ensemble silhouette produces wrong results"
X1 = np.array([[1,1,2,2], [1,2,2,2], [1,1,1,2]])
labels1 = [1,1,2,2]
sil_score2 = spasi.silhouette_coefficient(X1, labels1, metric = "jaccard", iter_max = 4)
assert np.round(sil_score2, 8) == 0.46666667, "Ensemble silhouette produces wrong results"
def test_simplified_silhouette():
# Test on random data
random_data = np.genfromtxt(find_path("random_data.csv"), delimiter=",")
random_labels = np.genfromtxt(find_path("random_labels.csv"), delimiter=",")
simp_silhouette_score_Eucl = spasi.simplified_silhouette_coefficient(random_data, random_labels, metric = "euclidean")
assert np.isclose(simp_silhouette_score_Eucl, 0.01761300723620632), "Error in Euclidean simplified_silhouette_coefficient function"
simp_silhouette_score_corr = spasi.simplified_silhouette_coefficient(random_data, random_labels, metric = "correlation")
assert np.isclose(simp_silhouette_score_corr, 0.07464102055366918), "Error in correlation simplified_silhouette_coefficient function"
def test_spatial_silhouette():
# Test on random data
random_data = np.genfromtxt(find_path("random_data_spatial.csv"), delimiter=",")
matXYZ = np.argwhere(np.zeros((8, 3, 2)) == 0)
labels = np.repeat(np.array([1,2,3,4]), 2*3*2)
list_neighbors = spasi.get_list_neighbors(matXYZ)
spatial_silhouette_score_Eucl = spasi.silhouette_coefficient_spatial(random_data, labels, list_neighbors, metric = "euclidean")
assert np.isclose(spatial_silhouette_score_Eucl, -0.0019062813008068388), "Error in Euclidean silhouette_coefficient_spatial function"
spatial_silhouette_score_corr = spasi.silhouette_coefficient_spatial(random_data, labels, list_neighbors, metric = "correlation")
assert np.isclose(spatial_silhouette_score_corr, -0.0013034499248535598), "Error in correlation silhouette_coefficient_spatial function"
def test_spatial_simplified_silhouette():
# Test on random data
random_data = np.genfromtxt(find_path("random_data_spatial.csv"), delimiter=",")
matXYZ = np.argwhere(np.zeros((8, 3, 2)) == 0)
labels = np.repeat(np.array([1,2,3,4]), 2*3*2)
list_neighbors = spasi.get_list_neighbors(matXYZ)
spatial_simp_silhouette_score_Eucl = spasi.simplified_silhouette_coefficient_spatial(random_data, labels, list_neighbors, metric = "euclidean")
assert np.isclose(spatial_simp_silhouette_score_Eucl, 0.06783823739924444), "Error in Euclidean simplified_silhouette_coefficient_spatial function"
spatial_simp_silhouette_score_corr = spasi.simplified_silhouette_coefficient_spatial(random_data, labels, list_neighbors, metric = "correlation")
assert np.isclose(spatial_simp_silhouette_score_corr, 0.22422765231602626), "Error in correlation simplified_silhouette_coefficient_spatial function"
def test_list_neighbors():
list_neighbors_true = pd.read_csv(find_path("list_neighbors.csv"))
list_neighbors_true.columns = pd.RangeIndex(start=0, stop=5, step=1)
matXYZ = np.argwhere(np.zeros((4, 3, 2)) == 0)
list_neighbors = spasi.get_list_neighbors(matXYZ)
list_neighbors = pd.DataFrame(list_neighbors)
list_neighbors.columns = pd.RangeIndex(start=0, stop=5, step=1)
assert pd.DataFrame.equals(list_neighbors_true, list_neighbors), "list_neighbors does not work"
# pd.testing.assert_frame_equal(list_neighbors_true, list_neighbors, check_dtype = False, check_column_type = False)
# def test_main():
# assert main([]) == 0
| 55.431034 | 166 | 0.719285 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,677 | 0.260809 |
6ccc3a21c576fe1a0e44fce64e5401c4ac8e7df1 | 5,014 | py | Python | main_app.py | christopher-chandler/image2pdf | ac2b8f7c359448a36ca81633e890c88d2b94a0be | [
"MIT"
] | 1 | 2022-03-11T14:13:48.000Z | 2022-03-11T14:13:48.000Z | main_app.py | christopher-chandler/i2pdf | ac2b8f7c359448a36ca81633e890c88d2b94a0be | [
"MIT"
] | null | null | null | main_app.py | christopher-chandler/i2pdf | ac2b8f7c359448a36ca81633e890c88d2b94a0be | [
"MIT"
] | null | null | null | # Standard
import os
import platform
# Pip
import typer
import yaml
from PIL import Image
from PyPDF2 import PdfFileReader, PdfFileWriter
from yaml.scanner import ScannerError
from yaml.loader import SafeLoader
# Custom
from auxiliary.message_keys import MessageKeys as mk
from auxiliary.file_explorer import FileExplorer
# Typer app
app = typer.Typer()
# Files
current_dir = os.getcwd()
files = FileExplorer(home_dir=current_dir)
# Message keys
generate = mk.GeneratePdf
add_meta = mk.AddMetadata
gen_dir = mk.GenerateDir
# Mac and Windows use different slashes.
system: str = platform.system()
if system == "Darwin":
slash = "/"
elif system == "Windows":
slash = "\\"
@app.command(name=gen_dir.generate_dir, help=gen_dir.generate_dir_help)
def generate_directories() -> None:
"""
Generating directories wherein the file that should be combined
are to reside.
example:
python main_app.py gen-dir
:return:
None
"""
try:
typer.echo(gen_dir.generating_dir)
[os.makedirs(f) for f in ["config", "images", "pdfs", "results"]]
typer.echo(gen_dir.directory_generated)
except FileExistsError:
typer.echo(gen_dir.folders_exists)
@app.command(name=generate.generate_pdf_name,
help=generate.generate_pdf_command)
def generate_pdf(save_name: str = typer.Argument("generated",
help=generate.generate_pdf_help
)) -> None:
"""
description:
Images gathered from the images directory are combined into a single
.pdf file that is then placed in the pdfs directory. Using the PIL
library, .jpg, .gif, .png and .tga are supported.
example:
python main_app.py gen-pdf
:arg:
save_name: str the name of the .pdf file being saved.
:returns
no returns
"""
image_dir: str = files.get_folders().get("images", "")
path_exist: bool = os.path.exists(image_dir)
if not path_exist:
raise SystemExit(typer.echo(generate.missing_directory))
images: list = []
valid_images: list = [".jpg", ".jpeg", ".gif", ".png", ".tga"]
for file_name in sorted(os.listdir(image_dir)):
ext: str = os.path.splitext(file_name)[1]
if ext.lower() not in valid_images:
continue
img: str = os.path.join(image_dir, file_name)
images.append(Image.open(img))
if images:
first_image = images[0]
folders = files.get_folders()
save: str = fr"{folders.get('pdfs')}{slash}{save_name}.pdf"
# .pdf generation
typer.echo(generate.images_generate)
first_image.save(save, save_all=True, append_images=images[1:])
typer.echo(generate.file_created)
else:
typer.echo(generate.no_images)
@app.command(name=add_meta.add_metadata_name, help=add_meta.add_metadata_help)
def add_metadata(pdf_name: str = typer.Argument("", help=add_meta.meta_pdf),
config_name: str = typer.Argument("", help=add_meta.yaml_config),
save_name: str = typer.Argument("results", help=add_meta.save_name)
) -> None:
"""
description:
the data from the .yaml file is added to the respective .pdf file
as metadata
example:
python main_app.py add-metadata gen.pdf test.yaml
:arg:
pdf_name: str is the name of the .pdf which should have metadata added
to it
config_name: str is the name of the .yaml file which contains the
metadata.
:returns
None
"""
# Loading .pdf file
try:
pdf: str = files.get_files("pdfs").get(pdf_name)
pdf_in = open(pdf, "rb")
except TypeError:
raise SystemExit((typer.echo(add_meta.pdf_not_exists)))
# Loading .yaml file
try:
config_file: str = files.get_files("config").get(config_name)
yfile = open(config_file, mode="r")
yaml_meta = yaml.load(yfile, Loader=SafeLoader)
except (TypeError, ScannerError, AttributeError) as error:
if "yaml" in str(error):
raise SystemExit(typer.echo(add_meta.yaml_error))
else:
raise SystemExit(typer.echo(add_meta.yaml_not_exist))
try:
# Loading .pdf
reader = PdfFileReader(pdf_in)
writer = PdfFileWriter()
writer.appendPagesFromReader(reader)
metadata = reader.getDocumentInfo()
writer.addMetadata(metadata)
# config file
writer.addMetadata(yaml_meta)
# .pdf with metadata
save_path: str = files.get_folders().get("results")
pdf_out = open(rf"{save_path}{slash}{save_name}_{pdf_name}", "wb")
writer.write(pdf_out)
# Closing files
pdf_out.close()
pdf_in.close()
# Added metadata
typer.echo(add_meta.metadata_added)
except OSError:
raise SystemExit((typer.echo(add_meta.pdf_corrupt)))
if __name__ == "__main__":
app() | 28.011173 | 79 | 0.640207 | 0 | 0 | 0 | 0 | 4,283 | 0.854208 | 0 | 0 | 1,480 | 0.295174 |
6ccc8df451a2936e28cf2b4cd839f862b2e3add8 | 888 | py | Python | predictions/views.py | Mustapha-Belkacim/English-Premier-League-predictor | 2950d70e9aa80cc8c39f102029fb460b992f1e36 | [
"MIT"
] | 5 | 2018-02-27T18:03:45.000Z | 2018-07-23T11:40:55.000Z | predictions/views.py | Mustapha-Belkacim/Russia-2018-World-Cup-Predictor | 2950d70e9aa80cc8c39f102029fb460b992f1e36 | [
"MIT"
] | 1 | 2018-12-10T04:33:24.000Z | 2018-12-10T04:33:24.000Z | predictions/views.py | Mustapha-Belkacim/English-Premier-League-predictor | 2950d70e9aa80cc8c39f102029fb460b992f1e36 | [
"MIT"
] | 1 | 2018-02-26T14:23:40.000Z | 2018-02-26T14:23:40.000Z | from django.shortcuts import render
from django.views import View, generic
from .services.predictor import get_results
class Index(View):
template_name = 'predictions/index.html'
model = 'xgboost'
season = '16/17'
results = ''
leadboard = ''
def get(self, request):
self.results = get_results(self.season)
#self.results = predict_season(self.season, self.model)
return render(request, self.template_name, {'results' :self.results,
'leadboard':self.leadboard})
def post(self, request):
self.model = request.POST['model']
self.season = request.POST['season']
self.results = get_results(self.season)
return render(request, self.template_name, {'results' :self.results,
'leadboard':self.leadboard})
| 37 | 80 | 0.600225 | 767 | 0.863739 | 0 | 0 | 0 | 0 | 0 | 0 | 154 | 0.173423 |
6ccd0655a845c86a3f1d3906dda9691881f8e438 | 100 | py | Python | typeit/sums/__init__.py | avanov/type | dbf2a94de13b592987695b7346f10cbf53acf3af | [
"MIT"
] | 8 | 2018-06-17T16:01:12.000Z | 2021-11-05T23:34:55.000Z | typeit/sums/__init__.py | avanov/type | dbf2a94de13b592987695b7346f10cbf53acf3af | [
"MIT"
] | 71 | 2018-06-23T15:31:56.000Z | 2021-03-09T16:56:50.000Z | typeit/sums/__init__.py | avanov/type | dbf2a94de13b592987695b7346f10cbf53acf3af | [
"MIT"
] | 1 | 2021-11-05T23:34:57.000Z | 2021-11-05T23:34:57.000Z | from .impl import SumType
from .types import Either, Maybe
__all__ = ('SumType', 'Either', 'Maybe') | 25 | 40 | 0.72 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 24 | 0.24 |
6ccd2235be6f078c26206338550424d6462f8d27 | 2,599 | py | Python | CICFlowMeter-4.0/bin/utility.py | codingbaobao/React-TorDetection | 20a2b7cbc1b2119f35188be799f8d8e1c8a6fed0 | [
"MIT"
] | null | null | null | CICFlowMeter-4.0/bin/utility.py | codingbaobao/React-TorDetection | 20a2b7cbc1b2119f35188be799f8d8e1c8a6fed0 | [
"MIT"
] | null | null | null | CICFlowMeter-4.0/bin/utility.py | codingbaobao/React-TorDetection | 20a2b7cbc1b2119f35188be799f8d8e1c8a6fed0 | [
"MIT"
] | null | null | null | import numpy as np
import pandas as pd
import pickle as pk
import random
from sklearn.metrics import accuracy_score
from sklearn import preprocessing
from sklearn.ensemble import RandomForestClassifier
from sklearn.preprocessing import Imputer
def read_csv(csv_path):
df = pd.read_csv(csv_path)
return df
def encode_label(Y):
le = preprocessing.LabelEncoder()
cls = le.fit(Y)
cls = le.transform(Y)
return cls
def split_test(num_data, percent):
select_id = random.sample(range(num_data), int(num_data*percent))
return select_id
def save_pk(data, pk_path):
with open(pk_path, 'wb') as f:
pk.dump(data, f)
def read_pk(pk_path):
with open(pk_path, 'rb') as f:
data = pk.load(f)
return data
def random_split_test_save(num_data, pk_path, ratio=0.1):
selected_id = split_test(num_data, ratio)
save_pk(selected_id, pk_path)
def list_to_float(data):
power = 0
val = 0
data = data[::-1]
for d in data:
val += int(d)*(10**power)
power += len(d)
return val
def X_preprocessing(X, scenario):
# print ('X.shape = {}'.format(X.shape))
r = X.shape[0]
c = X.shape[1]
# convert ip to float
for i in range(r):
for j in [0, 2]:
if scenario == 'A':
X[i, j] = list_to_float(X[i, j].split('.'))
elif scenario == 'B':
pass
nan_idx = np.where(X == np.nan)[0]
print ('nan_idx = {}'.format(nan_idx))
inf_idx = np.where(X == 'Infinity')[0]
print ('inf_idx = {}'.format(inf_idx))
print('finite_idx = {}'.format(np.isfinite(X.all())))
X[nan_idx] = 0
X[inf_idx] = 0
return X
if __name__ == '__main__':
csv_path = '../../TorCSV/CSV/Scenario-A/merged_5s.csv'
df = read_csv(csv_path)
print ('read CSV !!!')
df_mat = df.as_matrix()
# get input X and label Y #
X = df_mat[:, :-1]
Y = df_mat[:, -1]
X = X_preprocessing(X)
# read the list idx to test #
pk_path = 'selected_id.pkl'
test_idx = read_pk(pk_path)
# print (test_idx)
# encode label #
le = preprocessing.LabelEncoder()
cls = le.fit(Y)
Y = le.transform(Y)
X_test = X[test_idx, :]
Y_test = Y[test_idx]
X_train = np.delete(X, test_idx, axis=0)
Y_train = np.delete(Y, test_idx, axis=0)
clf = RandomForestClassifier(max_depth=2, random_state=0)
clf.fit(X_train, Y_train)
Y_pred = clf.predict(X_test)
print ('accuracy = {}'.format(accuracy_score(Y_test, Y_pred)))
filename = 'randomForest.sav'
pk.dump(clf, open(filename, 'wb'))
| 22.798246 | 69 | 0.613313 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 344 | 0.132359 |
6ccd22609ab3e08378fdd219fad406bb1e80df1a | 7,546 | py | Python | forex_python/bitcoin.py | Otisey/forex-python | a34d074b8ee7411cd2868ea3945793ef43bf7965 | [
"MIT"
] | 505 | 2016-05-21T04:50:19.000Z | 2022-03-29T04:40:36.000Z | forex_python/bitcoin.py | Otisey/forex-python | a34d074b8ee7411cd2868ea3945793ef43bf7965 | [
"MIT"
] | 92 | 2016-05-22T09:26:23.000Z | 2022-02-18T11:26:56.000Z | forex_python/bitcoin.py | Otisey/forex-python | a34d074b8ee7411cd2868ea3945793ef43bf7965 | [
"MIT"
] | 166 | 2016-05-21T04:52:49.000Z | 2022-03-25T03:57:24.000Z | from decimal import Decimal
import simplejson as json
import requests
from .converter import RatesNotAvailableError, DecimalFloatMismatchError
class BtcConverter(object):
"""
Get bit coin rates and convertion
"""
def __init__(self, force_decimal=False):
self._force_decimal = force_decimal
def _decode_rates(self, response, use_decimal=False):
if self._force_decimal or use_decimal:
decoded_data = json.loads(response.text, use_decimal=True)
else:
decoded_data = response.json()
return decoded_data
def get_latest_price(self, currency):
"""
Get Lates price of one bitcoin to valid Currency 1BTC => X USD
"""
url = 'https://api.coindesk.com/v1/bpi/currentprice/{}.json'.format(currency)
response = requests.get(url)
if response.status_code == 200:
data = response.json()
price = data.get('bpi').get(currency, {}).get('rate_float', None)
if self._force_decimal:
return Decimal(price)
return price
return None
def get_previous_price(self, currency, date_obj):
"""
Get Price for one bit coin on given date
"""
start = date_obj.strftime('%Y-%m-%d')
end = date_obj.strftime('%Y-%m-%d')
url = (
'https://api.coindesk.com/v1/bpi/historical/close.json'
'?start={}&end={}¤cy={}'.format(
start, end, currency
)
)
response = requests.get(url)
if response.status_code == 200:
data = response.json()
price = data.get('bpi', {}).get(start, None)
if self._force_decimal:
return Decimal(price)
return price
raise RatesNotAvailableError("BitCoin Rates Source Not Ready For Given date")
def get_previous_price_list(self, currency, start_date, end_date):
"""
Get List of prices between two dates
"""
start = start_date.strftime('%Y-%m-%d')
end = end_date.strftime('%Y-%m-%d')
url = (
'https://api.coindesk.com/v1/bpi/historical/close.json'
'?start={}&end={}¤cy={}'.format(
start, end, currency
)
)
response = requests.get(url)
if response.status_code == 200:
data = self._decode_rates(response)
price_dict = data.get('bpi', {})
return price_dict
return {}
def convert_to_btc(self, amount, currency):
"""
Convert X amount to Bit Coins
"""
if isinstance(amount, Decimal):
use_decimal = True
else:
use_decimal = self._force_decimal
url = 'https://api.coindesk.com/v1/bpi/currentprice/{}.json'.format(currency)
response = requests.get(url)
if response.status_code == 200:
data = response.json()
price = data.get('bpi').get(currency, {}).get('rate_float', None)
if price:
if use_decimal:
price = Decimal(price)
try:
converted_btc = amount/price
return converted_btc
except TypeError:
raise DecimalFloatMismatchError("convert_to_btc requires amount parameter is of type Decimal when force_decimal=True")
raise RatesNotAvailableError("BitCoin Rates Source Not Ready For Given date")
def convert_btc_to_cur(self, coins, currency):
"""
Convert X bit coins to valid currency amount
"""
if isinstance(coins, Decimal):
use_decimal = True
else:
use_decimal = self._force_decimal
url = 'https://api.coindesk.com/v1/bpi/currentprice/{}.json'.format(currency)
response = requests.get(url)
if response.status_code == 200:
data = response.json()
price = data.get('bpi').get(currency, {}).get('rate_float', None)
if price:
if use_decimal:
price = Decimal(price)
try:
converted_amount = coins * price
return converted_amount
except TypeError:
raise DecimalFloatMismatchError("convert_btc_to_cur requires coins parameter is of type Decimal when force_decimal=True")
raise RatesNotAvailableError("BitCoin Rates Source Not Ready For Given date")
def convert_to_btc_on(self, amount, currency, date_obj):
"""
Convert X amount to BTC based on given date rate
"""
if isinstance(amount, Decimal):
use_decimal = True
else:
use_decimal = self._force_decimal
start = date_obj.strftime('%Y-%m-%d')
end = date_obj.strftime('%Y-%m-%d')
url = (
'https://api.coindesk.com/v1/bpi/historical/close.json'
'?start={}&end={}¤cy={}'.format(
start, end, currency
)
)
response = requests.get(url)
if response.status_code == 200:
data = response.json()
price = data.get('bpi', {}).get(start, None)
if price:
if use_decimal:
price = Decimal(price)
try:
converted_btc = amount/price
return converted_btc
except TypeError:
raise DecimalFloatMismatchError("convert_to_btc_on requires amount parameter is of type Decimal when force_decimal=True")
raise RatesNotAvailableError("BitCoin Rates Source Not Ready For Given Date")
def convert_btc_to_cur_on(self, coins, currency, date_obj):
"""
Convert X BTC to valid currency amount based on given date
"""
if isinstance(coins, Decimal):
use_decimal = True
else:
use_decimal = self._force_decimal
start = date_obj.strftime('%Y-%m-%d')
end = date_obj.strftime('%Y-%m-%d')
url = (
'https://api.coindesk.com/v1/bpi/historical/close.json'
'?start={}&end={}¤cy={}'.format(
start, end, currency
)
)
response = requests.get(url)
if response.status_code == 200:
data = response.json()
price = data.get('bpi', {}).get(start, None)
if price:
if use_decimal:
price = Decimal(price)
try:
converted_btc = coins*price
return converted_btc
except TypeError:
raise DecimalFloatMismatchError("convert_btc_to_cur_on requires amount parameter is of type Decimal when force_decimal=True")
raise RatesNotAvailableError("BitCoin Rates Source Not Ready For Given Date")
def get_symbol(self):
"""
Here is Unicode symbol for bitcoin
"""
return "\u0E3F"
_Btc_Converter = BtcConverter()
get_btc_symbol = _Btc_Converter.get_symbol
convert_btc_to_cur_on = _Btc_Converter.convert_btc_to_cur_on
convert_to_btc_on = _Btc_Converter.convert_to_btc_on
convert_btc_to_cur = _Btc_Converter.convert_btc_to_cur
convert_to_btc = _Btc_Converter.convert_to_btc
get_latest_price = _Btc_Converter.get_latest_price
get_previous_price = _Btc_Converter.get_previous_price
get_previous_price_list = _Btc_Converter.get_previous_price_list
| 37.542289 | 145 | 0.579777 | 6,936 | 0.919162 | 0 | 0 | 0 | 0 | 0 | 0 | 1,841 | 0.24397 |
6ccd772b6551e6f64f8a47c7fc86c3a1b0ee9be4 | 10,805 | py | Python | iphone_manager.py | karenleewaddell/iPhone_Manager | ce683c349cbb2ed03cb96b9cf21adc17903bb861 | [
"MIT"
] | 17 | 2019-11-03T22:57:08.000Z | 2021-09-13T14:23:41.000Z | iphone_manager.py | karenleewaddell/iPhone_Manager | ce683c349cbb2ed03cb96b9cf21adc17903bb861 | [
"MIT"
] | null | null | null | iphone_manager.py | karenleewaddell/iPhone_Manager | ce683c349cbb2ed03cb96b9cf21adc17903bb861 | [
"MIT"
] | 5 | 2019-11-03T23:02:00.000Z | 2020-11-10T19:43:12.000Z | # iPhone Manager bot by Oldmole
# No support will be provided, this code is provided "as is" without warranty of any kind, either express or implied. Use at your own risk.
# The use of the software and scripts is done at your own discretion and risk and with agreement that you will be solely responsible for any damage
# to your computer system or loss of data that results from such activities.
#
# If you like the bot and would like buy me a pint, DM @oldmole#3895 and ask for my Paypal info
# If you update the bot, please send me a copy! :-)
# GITHUB : https://github.com/sonofmole/iPhone_Manager
import sys
import yaml
import sqlite3
import hashlib
import discord
import psutil
from discord.ext import commands
import subprocess
import asyncio
from subprocess import Popen, PIPE
from sqlite3 import OperationalError
import time
import math
class IPhone:
def __init__(self, device_uuid, iphone_name, iphone_id):
self.device_uuid = device_uuid
self.iphone_name = iphone_name
self.iphone_id = iphone_id
# A list of the commands
command_list = [
"!sc {name of iphone} or !sc {iphone ID}",
"Screenshots an iphone and uploads that screenshot to discord\n",
"!list iphones", "Lists the name and ID of all the available iphones\n",
"!kill usb","Finds the proccess ID for usbmuxd and kill's it\n",
"!mac grab",
"Takes a screengrab of your Mac and uploads that screengrab to discord\n",
"!reboot {name of iphone} or !reboot {iphone ID}",
"Reboot's an iPhone\n",
"!reload {name of iphone} or !reload {iphone ID}",
"Find's and kill's the PID for an iPhone's Xcode. Pogo will start again\n",
"!log {name of iphone} or !log {iphone ID}",
"The bot will print out x lines from the device log file (x is set the config file)\n",
"!uplog {name of iphone} or !uplog {iphone ID}",
"The bot upload the device's log file with the last x lines (x is set the config file)\n",
"!help",
"Displays this list"
]
print("The iPhone Manager by Oldmole ready!")
try:
with open(r'config.yaml') as file:
documents = yaml.safe_load(file)
except FileNotFoundError:
print ("**** FULL STOP! ***** config.yaml NOT FOUND! ****")
sys.exit()
#logpath = documents.get("logpath")
loglines = documents.get("loglines")
uploglines = documents.get("uploglines")
token = documents.get("token")
role = documents.get("role")
channel = documents.get("channel")
iphone_list = []
log_list = documents.get("logpath")
database_list = documents.get("paths")
db_count = len(database_list)
db_error = 0
for dpath in database_list:
try:
connection = sqlite3.connect(dpath)
cursor = connection.cursor()
cursor.execute('SELECT * FROM device LIMIT 1,100')
rows = cursor.fetchall()
for row in rows:
uuid, name = row[0], row[1]
digest = hashlib.sha1((uuid + name).encode()).hexdigest()
iphone_list.append(IPhone(uuid, name, digest[:4]))
connection.commit()
except sqlite3.OperationalError:
db_error += 1
print ("*** Error reading from %s database" % dpath)
print ("*** Wrong database name or path? ***")
print ("\n")
finally:
connection.close()
if db_error == db_count:
print ("**** FULL STOP! ***** Can't read from any database ****")
sys.exit()
async def reboot_command(params, message):
params = ''.join(params)
for x in iphone_list:
if params == x.iphone_name or params == x.iphone_id:
a = x.iphone_name
b = x.device_uuid
cp = subprocess.run(["idevicediagnostics", "-u", b, "restart"], universal_newlines=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if cp.returncode == 0:
await message.channel.send("%s is rebooting" % a)
else:
await message.channel.send("Sorry, something has gone wrong... is the device connected?")
async def reload_command(params, message):
params = ''.join(params)
await message.channel.send("Can I get a reload")
for x in iphone_list:
if params == x.iphone_name or params == x.iphone_id:
a = x.iphone_name
b_device_uuid = x.device_uuid
for proc in psutil.process_iter():
try:
pinfo = proc.as_dict(attrs=['pid', 'name', 'cmdline'])
except psutil.NoSuchProcess:
pass
else:
if pinfo["name"] == "xcodebuild":
cmdline = " ".join(pinfo["cmdline"])
if (b_device_uuid) in cmdline:
p = psutil.Process(pinfo["pid"])
p.kill()
await message.channel.send("Yes, Done")
return
await message.channel.send("Something has gone wrong")
async def kill_command(params, message):
params = ''.join(params)
if params == "usb":
name = ""
await message.channel.send("Trying to finding and Kill usbmuxd. If I find it I will let you know")
for proc in psutil.process_iter():
try:
pinfo = proc.as_dict(attrs=['pid', 'name'])
except psutil.NoSuchProcess:
pass
else:
if pinfo["name"] == "usbmuxd":
p = psutil.Process(pinfo["pid"])
p.kill()
await message.channel.send("Found and Killed it")
return
else:
await message.channel.send("Sorry, something has gone wrong")
return
async def help_command(params,message):
params = ''.join(params)
if len(params) ==0:
await message.channel.send("You have these commands available: \n")
await message.channel.send("\n".join(command_list))
async def mac_command(params, message):
params = ''.join(params)
if params == "grab":
cp = subprocess.run(["screencapture", "mac.jpg"], universal_newlines=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if cp.returncode == 0:
await message.channel.send("Taken a Mac Screengrab")
await asyncio.sleep(1)
await message.channel.send(file=discord.File('mac.jpg'))
async def list_iphones_command(params,message):
params = ''.join(params)
if params != "iphones":
return
else:
await message.channel.send("You have these iphones in your list:")
name_list = []
for x in iphone_list:
name_and_id = " with an ID of ".join([x.iphone_name,x.iphone_id])
name_list.append(name_and_id)
await message.channel.send("\n".join(name_list))
async def screengrab_command(params, message):
params = ''.join(params)
for x in iphone_list:
if params == x.iphone_name or params == x.iphone_id:
a = x.iphone_name
b = x.device_uuid
cp = subprocess.run(["idevicescreenshot", "-u", b, "phone.jpg"], universal_newlines=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if cp.returncode == 0:
await message.channel.send("Taken a screenshot")
await asyncio.sleep(1)
await message.channel.send(file=discord.File('phone.jpg'))
return
else:
await message.channel.send("Sorry, something has gone wrong... is the device connected?")
return
await message.channel.send("Sorry, something has gone wrong... can't find this device")
async def get_log(params, message):
params = ''.join(params)
for x in iphone_list:
if params == x.iphone_name or params == x.iphone_id:
dname = x.iphone_name
dname_formatted = '"*'+dname+'*"'
cmd = "find . -name %s -name \"*full*\" -mtime -30m | head -1" %dname_formatted
for xpath in log_list:
logpath = xpath
try:
getlogfilename = subprocess.run(cmd,shell=True, check=True, stdout=subprocess.PIPE,stderr=subprocess.STDOUT, universal_newlines=True,cwd=logpath)
glfn_output = getlogfilename.stdout.strip('\n')
if glfn_output != (""):
break
except subprocess.CalledProcessError:
return
glfn_output = '"'+glfn_output+'"'
cmd2 = "tail -%d %s > tempfile" % (loglines ,glfn_output)
try:
readfile = subprocess.run(cmd2,shell=True, check=True, stdout=subprocess.PIPE,stderr=subprocess.STDOUT, universal_newlines=True,cwd=logpath)
rf_output2 = readfile.stdout.strip('\n')
except subprocess.CalledProcessError:
await message.channel.send("Sorry command failed, log not found")
return
i , line_loop , line_loop1, = 0 , 1 , 10
log_loop = loglines / 10
log_loop = (math.ceil(log_loop))
while i < log_loop:
cmd3 = "sed -n %d,%dp tempfile > tempout" %(line_loop,line_loop1)
cmd3p = subprocess.run(cmd3,shell=True, check=True, stdout=subprocess.PIPE,stderr=subprocess.STDOUT, universal_newlines=True,cwd=logpath)
cmd4 = "head -10 tempout"
readline1 = subprocess.run(cmd4,shell=True, check=True, stdout=subprocess.PIPE,stderr=subprocess.STDOUT, universal_newlines=True,cwd=logpath)
rf_output3 = readline1.stdout.strip('\n')
time.sleep(.900)
await message.channel.send("```%s```" % rf_output3)
i += 1
line_loop += 10
line_loop1 += 10
return
async def up_log(params, message):
params = ''.join(params)
for x in iphone_list:
if params == x.iphone_name or params == x.iphone_id:
dname = x.iphone_name
dname_formatted = '"*'+dname+'*"'
cmd = "find . -name %s -name \"*full*\" -mtime -30m | head -1" %dname_formatted
for xpath in log_list:
logpath = xpath
try:
getlogfilename = subprocess.run(cmd,shell=True, check=True, stdout=subprocess.PIPE,stderr=subprocess.STDOUT, universal_newlines=True,cwd=logpath)
glfn_output = getlogfilename.stdout.strip('\n')
if glfn_output != (""):
break
except subprocess.CalledProcessError:
return
glfn_output = '"'+glfn_output+'"'
logname = '"'+dname+'.log"'
cmd2 = "tail -%d %s > %s" % (uploglines ,glfn_output, logname)
try:
readfile = subprocess.run(cmd2,shell=True, check=True, stdout=subprocess.PIPE,stderr=subprocess.STDOUT, universal_newlines=True,cwd=logpath)
rf_output2 = readfile.stdout.strip('\n')
except subprocess.CalledProcessError:
await message.channel.send("Sorry command failed, log not found")
return
await message.channel.send("Uploading your last %d logfile line" %uploglines)
logname = ''+dname+'.log'
await message.channel.send(file=discord.File('%s/%s' % (logpath,logname)))
return
command_dict = {
"!sc": screengrab_command,
"!help": help_command,
"!reboot": reboot_command,
"!reload": reload_command,
"!mac": mac_command,
"!list" : list_iphones_command,
"!kill" : kill_command,
"!log" : get_log,
"!uplog" : up_log
}
async def check_command(message_text,message):
parts = message_text.split(" ",1)
cmd = parts[0]
params = parts[1:]
if cmd in command_dict:
await command_dict[cmd](params,message)
else:
return
client = discord.Client()
@client.event
async def on_ready():
activity = discord.Game(name="Taking Selfies")
await client.change_presence(status=discord.Status.online, activity=activity)
async def send_message(message):
await message.channel.send(message)
@client.event
async def on_message(message):
if message.author == client.user:
return
if str(message.channel) != channel:
return
author_roles = map(lambda x: x.name, message.author.roles)
if role not in author_roles:
return
message_text = message.content
await check_command(message_text,message)
client.run(token)
| 32.841945 | 150 | 0.69838 | 166 | 0.015363 | 0 | 0 | 473 | 0.043776 | 7,256 | 0.671541 | 3,044 | 0.281721 |
6cceaf277e8db7db23e8454da2cdbade4678a488 | 1,892 | py | Python | backend/core/migrations/0001_initial.py | EMUNES/hust-mdb | 578a9113bc48e19559bb2e62340d38e40bd6ccd8 | [
"Apache-2.0"
] | 2 | 2021-04-14T02:43:48.000Z | 2021-05-17T06:37:10.000Z | backend/core/migrations/0001_initial.py | EMUNES/hust-mdb | 578a9113bc48e19559bb2e62340d38e40bd6ccd8 | [
"Apache-2.0"
] | null | null | null | backend/core/migrations/0001_initial.py | EMUNES/hust-mdb | 578a9113bc48e19559bb2e62340d38e40bd6ccd8 | [
"Apache-2.0"
] | 1 | 2021-04-14T02:57:15.000Z | 2021-04-14T02:57:15.000Z | # Generated by Django 3.1.7 on 2021-03-09 16:24
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Material',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('series', models.CharField(db_column='系列', max_length=100, verbose_name='系列')),
('mark', models.CharField(db_column='牌号', max_length=100, verbose_name='牌号')),
('manufacturer', models.CharField(db_column='制造商', max_length=50, verbose_name='制造商')),
('link', models.CharField(db_column='链接', max_length=100, verbose_name='链接')),
('acronym', models.CharField(db_column='材料名称缩写', max_length=20, verbose_name='材料名称缩写')),
('material_type', models.CharField(db_column='材料类型', max_length=100, verbose_name='材料类型')),
('data_source', models.CharField(db_column='数据来源', max_length=100, verbose_name='数据来源')),
('last_modified_date', models.CharField(db_column='上次修改日期', max_length=50, verbose_name='上次修改日期')),
('test_date', models.CharField(db_column='测试日期', max_length=50, verbose_name='测试日期')),
('data_status', models.CharField(db_column='数据状态', max_length=50, verbose_name='数据状态')),
('material_id', models.CharField(db_column='材料ID', max_length=20, verbose_name='材料ID')),
('level_code', models.CharField(db_column='等级代码', max_length=50, verbose_name='等级代码')),
('vendor_code', models.CharField(db_column='供应商代码', max_length=50, verbose_name='供应商代码')),
('fibre_or_infill', models.CharField(db_column='纤维/填充物', max_length=100, verbose_name='纤维/填充物')),
],
),
]
| 54.057143 | 115 | 0.62315 | 2,011 | 0.955798 | 0 | 0 | 0 | 0 | 0 | 0 | 615 | 0.2923 |
6cd0a9860da602a73497ad759994eead19d6e3b7 | 3,514 | py | Python | announcements/migrations/0001_initial.py | GeoNode/geonode-announcements | 2e2d2b36c7c113f3d67bcb57d9fa8b458fef0d8e | [
"MIT"
] | null | null | null | announcements/migrations/0001_initial.py | GeoNode/geonode-announcements | 2e2d2b36c7c113f3d67bcb57d9fa8b458fef0d8e | [
"MIT"
] | 5 | 2016-02-23T16:51:10.000Z | 2020-02-24T10:18:10.000Z | announcements/migrations/0001_initial.py | GeoNode/geonode-announcements | 2e2d2b36c7c113f3d67bcb57d9fa8b458fef0d8e | [
"MIT"
] | 7 | 2015-02-12T07:06:16.000Z | 2020-02-20T00:46:54.000Z | # -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2016 OSGeo
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
from django.db import migrations, models
import django.utils.timezone
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Announcement',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('title', models.CharField(max_length=50, verbose_name='title')),
('level', models.IntegerField(default=1, choices=[(1, 'General'), (2, 'Warning'), (3, 'Critical')])),
('content', models.TextField(verbose_name='content')),
('creation_date',
models.DateTimeField(default=django.utils.timezone.now, verbose_name='creation_date')),
('site_wide', models.BooleanField(default=False, verbose_name='site wide')),
('members_only', models.BooleanField(default=False, verbose_name='members only')),
('dismissal_type',
models.IntegerField(
default=2,
choices=[
(1, 'No Dismissals Allowed'),
(2, 'Session Only Dismissal'),
(3, 'Permanent Dismissal Allowed')])),
('publish_start',
models.DateTimeField(default=django.utils.timezone.now, verbose_name='publish_start')),
('publish_end', models.DateTimeField(null=True, verbose_name='publish_end', blank=True)),
('creator', models.ForeignKey(verbose_name='creator',
to=settings.AUTH_USER_MODEL, on_delete=models.CASCADE)),
],
options={
'verbose_name': 'announcement',
'verbose_name_plural': 'announcements',
},
),
migrations.CreateModel(
name='Dismissal',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('dismissed_at', models.DateTimeField(default=django.utils.timezone.now)),
('announcement', models.ForeignKey(related_name='dismissals',
to='announcements.Announcement', on_delete=models.CASCADE)),
('user', models.ForeignKey(related_name='announcement_dismissals',
to=settings.AUTH_USER_MODEL, on_delete=models.CASCADE)),
],
),
]
| 47.486486 | 117 | 0.566591 | 2,570 | 0.73136 | 0 | 0 | 0 | 0 | 0 | 0 | 1,335 | 0.379909 |
6cd189a584f06e5a67d45c96e8f7182834beb52a | 1,086 | py | Python | 2021/5/solution2.py | frenzymadness/aoc | c9018e757bae61a696e675a827aef873995abdd3 | [
"WTFPL"
] | 2 | 2020-12-04T09:45:38.000Z | 2020-12-07T14:06:12.000Z | 2021/5/solution2.py | frenzymadness/aoc | c9018e757bae61a696e675a827aef873995abdd3 | [
"WTFPL"
] | null | null | null | 2021/5/solution2.py | frenzymadness/aoc | c9018e757bae61a696e675a827aef873995abdd3 | [
"WTFPL"
] | null | null | null | import re
from dataclasses import dataclass
from collections import defaultdict
from itertools import cycle
@dataclass
class Line:
x1: int
y1: int
x2: int
y2: int
def all_points(self):
stepx = 1 if self.x1 < self.x2 else -1
stepy = 1 if self.y1 < self.y2 else -1
if self.x1 == self.x2:
x = cycle((self.x1,))
else:
x = range(self.x1, self.x2+stepx, stepx)
if self.y1 == self.y2:
y = cycle((self.y1,))
else:
y = range(self.y1, self.y2+stepy, stepy)
yield from zip(x, y)
def __repr__(self):
return f"{self.x1},{self.y1} -> {self.x2},{self.y2}"
lines = []
with open("input.txt") as input_file:
for line in input_file:
match = re.match(r"(\d+),(\d+) -> (\d+),(\d+)", line)
coords = [int(n) for n in match.groups()]
lines.append(Line(*coords))
points = defaultdict(int)
for line in lines:
for point in line.all_points():
points[point] += 1
print(len(list(filter(lambda x: x > 1, points.values()))))
| 24.133333 | 61 | 0.55709 | 569 | 0.523941 | 418 | 0.384899 | 580 | 0.53407 | 0 | 0 | 85 | 0.078269 |
6cd18b31f1d34f290e9b9c7833de3af5f36e1b66 | 1,513 | py | Python | academica/migrations/0003_auto_20201220_1016.py | Chaoslecion123/tesis-final-aradiel-2020 | bf6b4b6f08ab11da24bf965433fc36bde2f9606b | [
"MIT"
] | null | null | null | academica/migrations/0003_auto_20201220_1016.py | Chaoslecion123/tesis-final-aradiel-2020 | bf6b4b6f08ab11da24bf965433fc36bde2f9606b | [
"MIT"
] | null | null | null | academica/migrations/0003_auto_20201220_1016.py | Chaoslecion123/tesis-final-aradiel-2020 | bf6b4b6f08ab11da24bf965433fc36bde2f9606b | [
"MIT"
] | null | null | null | # Generated by Django 3.0.7 on 2020-12-20 15:16
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('academica', '0002_auto_20201220_0117'),
]
operations = [
migrations.RemoveField(
model_name='clase',
name='nivel',
),
migrations.RemoveField(
model_name='clase_profesor',
name='clase',
),
migrations.RemoveField(
model_name='clase_profesor',
name='profesor',
),
migrations.RemoveField(
model_name='nota',
name='alumno',
),
migrations.RemoveField(
model_name='nota',
name='clase',
),
migrations.RemoveField(
model_name='nota',
name='periodo',
),
migrations.RemoveField(
model_name='perfil_profesor',
name='nivel',
),
migrations.RemoveField(
model_name='perfil_profesor',
name='usuario',
),
migrations.DeleteModel(
name='Alumno',
),
migrations.DeleteModel(
name='Clase',
),
migrations.DeleteModel(
name='Clase_Profesor',
),
migrations.DeleteModel(
name='Nota',
),
migrations.DeleteModel(
name='Perfil_Profesor',
),
migrations.DeleteModel(
name='Periodo',
),
]
| 23.640625 | 49 | 0.497687 | 1,428 | 0.94382 | 0 | 0 | 0 | 0 | 0 | 0 | 301 | 0.198942 |
6cd190831e8896cc6ffcbd5d6642cbfc033e5873 | 721 | py | Python | devday/talk/context_processors.py | jenslauterbach/devday_website | a827c9237e656842542eff07ec9fa7b39716a0ee | [
"CC-BY-4.0",
"BSD-3-Clause"
] | 6 | 2018-09-30T20:18:01.000Z | 2020-03-12T09:03:38.000Z | devday/talk/context_processors.py | jenslauterbach/devday_website | a827c9237e656842542eff07ec9fa7b39716a0ee | [
"CC-BY-4.0",
"BSD-3-Clause"
] | 260 | 2018-09-30T14:17:57.000Z | 2022-03-04T13:48:34.000Z | devday/talk/context_processors.py | jenslauterbach/devday_website | a827c9237e656842542eff07ec9fa7b39716a0ee | [
"CC-BY-4.0",
"BSD-3-Clause"
] | 9 | 2018-09-30T13:17:21.000Z | 2020-10-03T12:55:05.000Z | from event.models import Event
from talk.models import Talk
def committee_member_context_processor(request):
if request.user.is_authenticated:
return {
"is_committee_member": request.user.has_perms(
("talk.add_vote", "talk.add_talkcomment")
)
}
else:
return {"is_committee_member": False}
def reservation_context_processor(request):
event = Event.objects.current_event()
if event.sessions_published and not event.is_started():
return {
"reservable_sessions": Talk.objects.filter(
event=event, track__isnull=False, spots__gt=0
).exists()
}
return {"reservable_sessions": False}
| 28.84 | 61 | 0.643551 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 121 | 0.167822 |
6cd3055994e4751ed452fa234135b8dfc679dd87 | 948 | py | Python | analysis/divide.py | chris-wahl/SDSS_QSO | 35807ecbc819d89fd4141748b31ce2a51a1c2d34 | [
"MIT"
] | null | null | null | analysis/divide.py | chris-wahl/SDSS_QSO | 35807ecbc819d89fd4141748b31ce2a51a1c2d34 | [
"MIT"
] | null | null | null | analysis/divide.py | chris-wahl/SDSS_QSO | 35807ecbc819d89fd4141748b31ce2a51a1c2d34 | [
"MIT"
] | null | null | null | from spectrum import Spectrum, align_wavelengths
def divide( numerator: Spectrum, denominator: Spectrum, wl_low: float = None, wl_high: float = None ) -> Spectrum:
"""
Performs a point-by-point division of the numerator spectrum by the denominator spectrum. If wavelength ranges
are not specificed, will use the entirely of the overlapping spectra.
:param numerator:
:type numerator: Spectrum
:param denominator:
:type denominator: Spectrum
:param wl_low:
:type wl_low: float
:param wl_high:
:type wl_high: float
:return:
:rtype: Spectrum
"""
wls = align_wavelengths( denominator, numerator, wl_low, wl_high )
divided = Spectrum()
for wl in wls:
n, n_e = numerator[ wl ]
d, d_e = denominator[ wl ]
flux = n / d
err = (pow( n_e / d, 2 ) + pow( n / (d ** 2) * d_e, 2 )) ** (0.5)
divided[ wl ] = (flux, err)
return divided
| 30.580645 | 115 | 0.624473 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 442 | 0.466245 |
6cd3dd00805335ac69d9b5556c1f245451bbe9aa | 198 | py | Python | Ekeopara_Praise/Phase 1/Python Basic 1/Day14 Tasks/Task4.py | CodedLadiesInnovateTech/-python-challenge-solutions | 430cd3eb84a2905a286819eef384ee484d8eb9e7 | [
"MIT"
] | 6 | 2020-05-23T19:53:25.000Z | 2021-05-08T20:21:30.000Z | Ekeopara_Praise/Phase 1/Python Basic 1/Day14 Tasks/Task4.py | CodedLadiesInnovateTech/-python-challenge-solutions | 430cd3eb84a2905a286819eef384ee484d8eb9e7 | [
"MIT"
] | 8 | 2020-05-14T18:53:12.000Z | 2020-07-03T00:06:20.000Z | Ekeopara_Praise/Phase 1/Python Basic 1/Day14 Tasks/Task4.py | CodedLadiesInnovateTech/-python-challenge-solutions | 430cd3eb84a2905a286819eef384ee484d8eb9e7 | [
"MIT"
] | 39 | 2020-05-10T20:55:02.000Z | 2020-09-12T17:40:59.000Z | '''4. Write a Python program to check whether multiple variables have the same value.'''
var1, var2, var3 = 20, 20, 20
if var1 == var2== var3:
print("var1, var2, and var3 have the same value !") | 49.5 | 88 | 0.681818 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 132 | 0.666667 |
6cd509e695712b4d2634df653709f63565e266c1 | 1,895 | py | Python | models.py | Nelestya/blog | 4e99ba3789f5214be5fd290801d0fde751e2d99f | [
"MIT"
] | null | null | null | models.py | Nelestya/blog | 4e99ba3789f5214be5fd290801d0fde751e2d99f | [
"MIT"
] | null | null | null | models.py | Nelestya/blog | 4e99ba3789f5214be5fd290801d0fde751e2d99f | [
"MIT"
] | null | null | null | from django.db import models
from django.utils import timezone
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from baseapp.models import Recently
# Create your models here.
class PublishedManager(models.Manager):
def get_queryset(self):
return super(PublishedManager, self).get_queryset().filter(status='published')
class Post(Recently):
STATUS_CHOICE = (
('draft', 'Draft'),
('published', 'Published'),
)
title = models.CharField(max_length=150)
slug = models.SlugField(max_length=150, unique_for_date='publish')
author = models.ForeignKey(User, related_name='blog_posts')
body = models.TextField()
publish = models.DateTimeField(default=timezone.now)
status = models.CharField(max_length=10, choices=STATUS_CHOICE, default='draft')
image = models.ImageField(upload_to='post/%Y/%m/%d', blank=True)
image_description = models.CharField(max_length=60)
objects = models.Manager() # The default manager.
published = PublishedManager() # The Dahl-specific manager.
class Meta:
ordering = ('-publish',)
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse('blog:post_detail', args=[self.publish.year,
self.publish.strftime('%m'),
self.publish.strftime('%d'),
self.slug,
])
class Comment(Recently):
mail = models.EmailField()
pseudo = models.CharField(max_length=30)
body = models.TextField()
post = models.ForeignKey('Post',
on_delete=models.CASCADE,
blank=False,
related_name='comments')
def __str__(self):
return 'Commented by {} in {}'.format(self.pseudo, self.post)
| 35.092593 | 86 | 0.626913 | 1,674 | 0.883377 | 0 | 0 | 0 | 0 | 0 | 0 | 241 | 0.127177 |
6cd5c125fa085b5e000b2cc8f849565bd1a2bf18 | 156 | py | Python | avython/console/__init__.py | avara1986/avython | a9372865545e55e2e130881b7d743f37d4f415ef | [
"Apache-2.0"
] | null | null | null | avython/console/__init__.py | avara1986/avython | a9372865545e55e2e130881b7d743f37d4f415ef | [
"Apache-2.0"
] | null | null | null | avython/console/__init__.py | avara1986/avython | a9372865545e55e2e130881b7d743f37d4f415ef | [
"Apache-2.0"
] | null | null | null | # encoding: utf-8
from __future__ import absolute_import
from avython.console.main import warning_color, show_error, show_warning, check_continue, bcolors
| 31.2 | 97 | 0.839744 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 17 | 0.108974 |
6cd9738ccb60421cba8abd71ab88a5dfd1c7df96 | 303 | py | Python | django_country/middleware.py | ColorGenomics/django-country | 1d272a196d998e21bb8d407e2657b88211f35232 | [
"Apache-2.0"
] | 4 | 2015-08-23T16:52:13.000Z | 2016-07-16T21:39:59.000Z | django_country/middleware.py | ColorGenomics/django-country | 1d272a196d998e21bb8d407e2657b88211f35232 | [
"Apache-2.0"
] | 2 | 2016-02-04T15:58:06.000Z | 2016-02-04T16:29:18.000Z | django_country/middleware.py | color/django-country | 1d272a196d998e21bb8d407e2657b88211f35232 | [
"Apache-2.0"
] | 2 | 2018-05-15T09:10:03.000Z | 2019-03-08T14:49:44.000Z | # -*- coding: utf-8 -*-
from . import geo
class CountryMiddleware(object):
"""
This is a middleware that parses a request
and decides which country the request came from.
"""
def process_request(self, request):
request.COUNTRY_CODE = geo.get_country_from_request(request)
| 23.307692 | 68 | 0.686469 | 258 | 0.851485 | 0 | 0 | 0 | 0 | 0 | 0 | 134 | 0.442244 |