hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 248 | max_stars_repo_name stringlengths 5 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 248 | max_issues_repo_name stringlengths 5 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 248 | max_forks_repo_name stringlengths 5 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 5 2.06M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.03M | alphanum_fraction float64 0 1 | count_classes int64 0 1.6M | score_classes float64 0 1 | count_generators int64 0 651k | score_generators float64 0 1 | count_decorators int64 0 990k | score_decorators float64 0 1 | count_async_functions int64 0 235k | score_async_functions float64 0 1 | count_documentation int64 0 1.04M | score_documentation float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ad3f016195bbebe749ea93e6dd6bbfeb76dd0048 | 1,376 | py | Python | client/examples/cycle-cards.py | spoore1/smart-card-removinator | dfc42e0ab5cea45c2ba299c10e7bc3b5857ddba2 | [
"Apache-2.0"
] | 26 | 2016-10-14T02:33:42.000Z | 2022-02-22T01:44:28.000Z | client/examples/cycle-cards.py | spoore1/smart-card-removinator | dfc42e0ab5cea45c2ba299c10e7bc3b5857ddba2 | [
"Apache-2.0"
] | 2 | 2018-03-18T03:06:52.000Z | 2021-03-21T10:14:17.000Z | client/examples/cycle-cards.py | spoore1/smart-card-removinator | dfc42e0ab5cea45c2ba299c10e7bc3b5857ddba2 | [
"Apache-2.0"
] | 8 | 2017-04-26T01:54:07.000Z | 2021-09-21T14:14:49.000Z | #!/usr/bin/env python
from removinator import removinator
import subprocess
# This example cycles through each card slot in the Removinator. Any
# slots that have a card present will then have the certificates on the
# card printed out using the pkcs15-tool utility, which is provided by
# the OpenSC project.
#
# Examples of parsing the Removinator status output and enabling debug
# output from the firmware are also provided.
print('--- Connecting to Removinator ---')
ctl = removinator.Removinator()
print('--- Cycling through cards ---')
for card in range(1, 9):
try:
ctl.insert_card(card)
print('Inserted card {0}'.format(card))
print('{0}'.format(subprocess.check_output(['pkcs15-tool',
'--list-certificates'])
.rstrip()))
except removinator.SlotError:
print('Card {0} is not inserted'.format(card))
print('--- Checking Removinator status ---')
status = ctl.get_status()
print('Current card: {0}'.format(status['current']))
for card in status['present']:
print('Card {0} is present'.format(card))
print('--- Debug output for re-insertion of current card ---')
ctl.set_debug(True)
ctl.insert_card(status['current'])
print('{0}'.format(ctl.last_response.rstrip()))
ctl.set_debug(False)
print('--- Remove current card ---')
ctl.remove_card()
| 32.761905 | 74 | 0.667151 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 711 | 0.516715 |
ad3f3ba0c8f945d938221059aed3b9fa065e26c7 | 716 | py | Python | PDFParser/Client.py | NekuHarp/TPScrum1 | bf5d4cd4353066517077bd4116b523b3ce1f99ea | [
"Apache-2.0"
] | 2 | 2018-12-14T10:57:02.000Z | 2019-11-23T14:20:55.000Z | PDFParser/Client.py | NekuHarp/TPScrum1 | bf5d4cd4353066517077bd4116b523b3ce1f99ea | [
"Apache-2.0"
] | null | null | null | PDFParser/Client.py | NekuHarp/TPScrum1 | bf5d4cd4353066517077bd4116b523b3ce1f99ea | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
from . import Defs as _D
from .Parser import Parser as _P
class Client:
def __init__(self):
self.id = 4
self.p = _P()
pass
def doXML(self, folder):
_EOUT = 'xml'
print(_EOUT, folder)
def doTXT(self, folder):
_EOUT = 'txt'
print(_EOUT, folder)
def setOut(self, wd):
self.p.setWD(wd)
def ls(self, wd):
gl = self.p.listDir(wd)
gl = [g.replace('\\','/') for g in gl]
return gl
def parser(self, Fname, xml):
return self.p.parser(Fname, xml)
def run(self):
print("Client {} : {} ".format(self.p.parse("-{}".format(self.id)), _D.VAR))
self.cli.main()
| 21.058824 | 84 | 0.52095 | 631 | 0.881285 | 0 | 0 | 0 | 0 | 0 | 0 | 62 | 0.086592 |
ad3f53ef596ae070ce4e4844884650cb4e2cce56 | 1,425 | py | Python | tree/binary/02.py | zlikun-lang/python-data-structure-and-algorithm | f6bd97e1cfb142baa804113c654ab32144c34175 | [
"Apache-2.0"
] | null | null | null | tree/binary/02.py | zlikun-lang/python-data-structure-and-algorithm | f6bd97e1cfb142baa804113c654ab32144c34175 | [
"Apache-2.0"
] | null | null | null | tree/binary/02.py | zlikun-lang/python-data-structure-and-algorithm | f6bd97e1cfb142baa804113c654ab32144c34175 | [
"Apache-2.0"
] | null | null | null | class BinaryTree:
def __init__(self, data, left=None, right=None):
self.data = data
self.left = left
self.right = right
def insert_left(self, data):
if self.left is None:
self.left = BinaryTree(data)
else:
self.left = BinaryTree(data, left=self.left)
def insert_right(self, data):
if self.right is None:
self.right = BinaryTree(data)
else:
self.right = BinaryTree(data, right=self.right)
def get_left_child(self):
return self.left
def get_right_child(self):
return self.right
def set_root_value(self, new_value):
self.data = new_value
def get_root_value(self):
return self.data
def __repr__(self):
return str(self.data)
bt = BinaryTree('*')
bt.insert_left('+')
bt.insert_right('-')
# * + -
print(bt.get_root_value(), bt.get_left_child(), bt.get_right_child())
bt.set_root_value('/')
# / + -
print(bt.get_root_value(), bt.get_left_child(), bt.get_right_child())
bt.insert_left(3)
bt.insert_right(4)
# 未能达成:(3 + 4) / (7 - 2),原因是插入子节点时,只能在根节点上操作
# / 3 + None 4 None -
print(bt.get_root_value(),
bt.get_left_child(),
bt.get_left_child().get_left_child(),
bt.get_left_child().get_right_child(),
bt.get_right_child(),
bt.get_right_child().get_left_child(),
bt.get_right_child().get_right_child(),
)
| 23.75 | 69 | 0.619649 | 803 | 0.544407 | 0 | 0 | 0 | 0 | 0 | 0 | 141 | 0.095593 |
ad407d34582125c1bd24f6cfc57ab71b5fbb80c8 | 1,705 | py | Python | tmp/keyword_get.py | mingyuexc/huluxia_woman_meitui | f6706947dbeb6cdd9e39b08ad1fcf2fc459ecb0d | [
"Apache-2.0"
] | 3 | 2021-01-19T02:41:54.000Z | 2021-05-04T08:23:18.000Z | tmp/keyword_get.py | mingyuexc/huluxia_woman_meitui | f6706947dbeb6cdd9e39b08ad1fcf2fc459ecb0d | [
"Apache-2.0"
] | null | null | null | tmp/keyword_get.py | mingyuexc/huluxia_woman_meitui | f6706947dbeb6cdd9e39b08ad1fcf2fc459ecb0d | [
"Apache-2.0"
] | 1 | 2021-04-14T10:05:32.000Z | 2021-04-14T10:05:32.000Z | #!/usr/bin/python3
# coding = utf-8
"""
@author:m1n9yu3
@file:keyword_get.py
@time:2021/01/13
"""
from get_data import *
import threading
from urllib import parse
def multi_thread(idlist, path):
"""线程控制 , 一次跑 1000 个线程"""
# for i in range(start_id, step+start_id):
# parse_json(url, start_id+i)
threads = []
for i in idlist:
threads.append(threading.Thread(target=get_images_url, args=(i, path)))
for i in threads:
i.start()
for i in threads:
i.join()
def ask_url(url, path, number=10):
i = 0
post_ids = []
js = get_json(url.format(i))
while True:
# posts 没有内容时,退出
if not js['posts']:
break
for post_id_i in js['posts']:
post_ids.append(post_id_i['postID'])
i += 1
# 指定爬取页数
# print(post_ids)
number -= 1
if number % 10 == 0:
multi_thread(idlist=post_ids, path=path)
if number == 0:
break
post_ids = []
js = get_json(url.format(js['start']))
print("爬取完成, 共{} 个帖子".format(i))
def search_key(keyword):
# 提供一组 _key: 074A517999865CB0A3DC24034F244DEB1E23E1512BA28A8D07315737041A1E393A13114A41B9FCE24CBD95E0AF7E0C72DC99A8E24218CC70
# _key = input("请输入 _key: ")
_key = "074A517999865CB0A3DC24034F244DEB1E23E1512BA28A8D07315737041A1E393A13114A41B9FCE24CBD95E0AF7E0C72DC99A8E24218CC70"
url = "http://floor.huluxia.com/post/search/ANDROID/2.1?platform=2&market_id=tool_baidu&_key" \
"=%s&start=1&count=20&cat_id=56&keyword=%s&flag=0" % (_key, parse.quote(keyword))
# print(url)
ask_url(url, 'search_result/')
if __name__ == '__main__':
pass | 27.063492 | 129 | 0.622874 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 802 | 0.4493 |
ad43350b2da704f794c52e0d66b5d6a868f93d05 | 388 | py | Python | Proctor_Brad/Assignments/bubble sort.py | webguru001/Python-Django-Web | 6264bc4c90ef1432ba0902c76b567cf3caaae221 | [
"MIT"
] | 5 | 2019-05-17T01:30:02.000Z | 2021-06-17T21:02:58.000Z | Proctor_Brad/Assignments/bubble sort.py | curest0x1021/Python-Django-Web | 6264bc4c90ef1432ba0902c76b567cf3caaae221 | [
"MIT"
] | null | null | null | Proctor_Brad/Assignments/bubble sort.py | curest0x1021/Python-Django-Web | 6264bc4c90ef1432ba0902c76b567cf3caaae221 | [
"MIT"
] | null | null | null | import random
import time
b = []
for x in range(0,100):
b.append(int(random.random()*10000))
maximum = len(b) - 1
start_time = time.time()
for i in range(0,maximum):
for j in range(0,maximum):
if(b[j] > b[j+1]):
temp = b[j]
b[j] = b[j+1]
b[j+1] = temp
maximum -= 1
print b
print("--- %s seconds ---" % (time.time() - start_time))
| 20.421053 | 56 | 0.523196 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 20 | 0.051546 |
ad437b9b77502ea14c72a607a4c29fcf984906ad | 1,863 | py | Python | app/morocco/authentication.py | troydai/Morocco | b975d4f6813734a4c7b8e6c61669976389a27560 | [
"MIT"
] | null | null | null | app/morocco/authentication.py | troydai/Morocco | b975d4f6813734a4c7b8e6c61669976389a27560 | [
"MIT"
] | 9 | 2017-07-11T08:17:27.000Z | 2017-08-01T21:48:00.000Z | app/morocco/authentication.py | troydai/Morocco | b975d4f6813734a4c7b8e6c61669976389a27560 | [
"MIT"
] | null | null | null | import flask_login
from .application import app
from .models import DbUser
login_manager = flask_login.LoginManager() # pylint: disable=invalid-name
login_manager.init_app(app)
login_manager.user_loader(lambda user_id: DbUser.query.filter_by(id=user_id).first())
login_required = flask_login.login_required
@login_manager.unauthorized_handler
def unauthorized_handler():
from flask import redirect, request, url_for
return redirect(url_for('login', request_uri=request.path))
@app.before_request
def redirect_https():
from flask import redirect, request
if 'X-Arr-Ssl' not in request.headers and not app.config['is_local_server']:
redirect_url = request.url.replace('http', 'https')
return redirect(redirect_url)
@app.route('/', methods=['GET'])
def index():
from flask import render_template
byline = 'Morocco - An automation service runs on Azure Batch.\n'
return render_template('index.html', byline=byline, title='Azure CLI')
@app.route('/login', methods=['GET'])
def login():
"""Redirect user agent to Azure AD sign-in page"""
import morocco.auth
return morocco.auth.openid_login()
@app.route('/signin-callback', methods=['POST'])
def signin_callback():
"""Redirect from AAD sign in page"""
def get_or_add_user(user_id: str):
from .application import db
from .models import DbUser
user = DbUser.query.filter_by(id=user_id).first()
if not user:
user = DbUser(user_id)
db.session.add(user)
db.session.commit()
return user
import morocco.auth
return morocco.auth.openid_callback(get_or_add_user)
@app.route('/logout', methods=['POST'])
def logout():
"""Logout from both this application as well as Azure OpenID sign in."""
import morocco.auth
return morocco.auth.openid_logout()
| 27.397059 | 85 | 0.703167 | 0 | 0 | 0 | 0 | 1,535 | 0.82394 | 0 | 0 | 375 | 0.201288 |
ad45281e97f21d2403bb3011a6ec4b34ad957b3a | 2,401 | py | Python | src/pynwb/ndx_icephys_meta/io/icephys.py | oruebel/ndx-icephys-meta | c97ea4f0ff60ad05e173cca30b0c46b809727f89 | [
"BSD-3-Clause-LBNL"
] | 6 | 2020-04-15T14:28:29.000Z | 2022-03-31T20:33:25.000Z | src/pynwb/ndx_icephys_meta/io/icephys.py | oruebel/ndx-icephys-meta | c97ea4f0ff60ad05e173cca30b0c46b809727f89 | [
"BSD-3-Clause-LBNL"
] | 55 | 2019-10-10T19:21:08.000Z | 2021-07-21T03:02:29.000Z | src/pynwb/ndx_icephys_meta/io/icephys.py | oruebel/ndx-icephys-meta | c97ea4f0ff60ad05e173cca30b0c46b809727f89 | [
"BSD-3-Clause-LBNL"
] | null | null | null | """
Module with ObjectMapper classes for the icephys-meta Container classes/neurodata_types
"""
from pynwb import register_map
from pynwb.io.file import NWBFileMap
from hdmf.common.io.table import DynamicTableMap
from ndx_icephys_meta.icephys import ICEphysFile, AlignedDynamicTable
@register_map(ICEphysFile)
class ICEphysFileMap(NWBFileMap):
"""
Customize object mapping for ICEphysFile to define the mapping
for our custom icephys tables, i.e., InteracellularRecordings, SimultaneousRecordingsTable,
SequentialRecordingsTable, RepetitionsTable, and ExperimentalConditionsTable
"""
def __init__(self, spec):
super().__init__(spec)
general_spec = self.spec.get_group('general')
icephys_spec = general_spec.get_group('intracellular_ephys')
self.map_spec('intracellular_recordings', icephys_spec.get_neurodata_type('IntracellularRecordingsTable'))
self.map_spec('icephys_simultaneous_recordings', icephys_spec.get_neurodata_type('SimultaneousRecordingsTable'))
self.map_spec('icephys_sequential_recordings', icephys_spec.get_neurodata_type('SequentialRecordingsTable'))
self.map_spec('icephys_repetitions', icephys_spec.get_neurodata_type('RepetitionsTable'))
self.map_spec('icephys_experimental_conditions', icephys_spec.get_neurodata_type('ExperimentalConditionsTable'))
self.map_spec('ic_filtering', icephys_spec.get_dataset('filtering'))
@register_map(AlignedDynamicTable)
class AlignedDynamicTableMap(DynamicTableMap):
"""
Customize the mapping for AlignedDynamicTable
"""
def __init__(self, spec):
super().__init__(spec)
# By default the DynamicTables contained as sub-categories in the AlignedDynamicTable are mapped to
# the 'dynamic_tables' class attribute. This renames the attribute to 'category_tables'
self.map_spec('category_tables', spec.get_neurodata_type('DynamicTable'))
@DynamicTableMap.object_attr('electrodes')
def electrodes(self, container, manager):
return container.category_tables.get('electrodes', None)
@DynamicTableMap.object_attr('stimuli')
def stimuli(self, container, manager):
return container.category_tables.get('stimuli', None)
@DynamicTableMap.object_attr('responses')
def responses(self, container, manager):
return container.category_tables.get('responses', None)
| 47.078431 | 120 | 0.768013 | 2,050 | 0.853811 | 0 | 0 | 2,112 | 0.879633 | 0 | 0 | 1,024 | 0.426489 |
ad45360a92c7f02994ad544e2eb3f4433e8d7fb6 | 7,265 | py | Python | plugin/AssemblerSPAdes/bin/RunAssembler.py | konradotto/TS | bf088bd8432b1e3f4b8c8c083650a30d9ef2ae2e | [
"Apache-2.0"
] | 125 | 2015-01-22T05:43:23.000Z | 2022-03-22T17:15:59.000Z | plugin/AssemblerSPAdes/bin/RunAssembler.py | konradotto/TS | bf088bd8432b1e3f4b8c8c083650a30d9ef2ae2e | [
"Apache-2.0"
] | 59 | 2015-02-10T09:13:06.000Z | 2021-11-11T02:32:38.000Z | plugin/AssemblerSPAdes/bin/RunAssembler.py | konradotto/TS | bf088bd8432b1e3f4b8c8c083650a30d9ef2ae2e | [
"Apache-2.0"
] | 98 | 2015-01-17T01:25:10.000Z | 2022-03-18T17:29:42.000Z | #!/usr/bin/env python
import json
import os
import subprocess
import sys
def fileExistsAndNonEmpty(filename):
if not os.path.exists(filename):
return False
return os.stat(filename).st_size > 0
class AssemblerRunner(object):
def __init__(self, sample_id, sample_seq, bam_file):
with open("startplugin.json", "r") as fh:
self.config = json.load(fh)
self.params = self.config['pluginconfig']
# launch.sh creates a symlink to the input BAM file in this directory
self.output_dir = self.config['runinfo']['results_dir']
self.sample_id = sample_id
self.sample_seq = sample_seq
self.sample_name = sample_id + "." + sample_seq
self.sample_output_dir = os.path.join(self.output_dir, self.sample_name)
self.bam_file = bam_file
self.bam_rel_path = os.path.join(self.sample_name, self.bam_file)
# relative path to the input bam file
self.bam_to_assemble = os.path.join(self.output_dir, self.bam_rel_path)
# how much to downsample (the step is skipped if it equals to 1)
if self.params.has_key('fraction_of_reads'):
self.fraction_of_reads = float(self.params['fraction_of_reads'])
# all executables are located in bin/ subdirectory
self.assembler_path = os.path.join(os.environ['DIRNAME'], 'bin')
# where to output HTML with results
self.url_root = self.config['runinfo']['url_root']
# skip assembly (and run only QUAST) if contigs exist
self.quast_only = self.params.has_key('quastOnly')
# information will be printed to "info.json"
self.info = { 'params' : self.params, 'executedCommands' : [] }
if sample_id != '' and sample_seq != '':
self.info['sampleId'] = sample_id
self.info['sampleSeq'] = sample_seq
self.info['sampleName'] = self.sample_name
# Prints 'pluginconfig' section of 'startplugin.json'
def printAssemblyParameters(self):
print("AssemblerSPAdes run parameters:")
print(self.params)
def writeInfo(self, json_filename):
with open(json_filename, 'w+') as f:
json.dump(self.info, f, indent=4)
def runCommand(self, command, description=None):
if description:
print(description)
else:
print(command)
sys.stdout.flush()
os.system(command)
self.info['executedCommands'].append(command)
def runDownsampling(self):
print("\nSubsampling using Picard")
# downsampler = os.path.join(self.assembler_path, 'DownsampleSam.jar')
downsampler = "/opt/picard/picard-tools-current/picard.jar"
out = os.path.join(self.sample_output_dir, self.bam_file + "_scaled")
cmd = ("java -Xmx2g -jar {downsampler} "
"DownsampleSam "
"INPUT={self.bam_to_assemble} OUTPUT={out} "
"PROBABILITY={self.fraction_of_reads}").format(**locals())
self.runCommand(cmd)
cmd = ("mv {out} {self.bam_to_assemble}").format(**locals())
self.runCommand(cmd)
def execute(self):
self.printAssemblyParameters()
read_count_cmd = "samtools view -c " + self.bam_rel_path
read_count_process = subprocess.Popen(read_count_cmd, shell=True,
stdout=subprocess.PIPE)
num_reads = int(read_count_process.communicate()[0])
def tooFewReads():
if not self.params.has_key('min_reads'):
return False
self.min_reads = int(self.params['min_reads'])
return num_reads <= self.min_reads
print("%d reads in %s" % (num_reads, self.bam_file))
if tooFewReads():
print(("\tDoes not have more than %d reads. "
"Skipping this file") % (self.min_reads,))
return
if self.fraction_of_reads < 1:
self.runDownsampling()
# if self.params.has_key('runSpades'):
self.runSPAdes()
def runSPAdes(self):
if self.params.has_key('spadesversion'):
version = self.params['spadesversion']
else:
version = "3.1.0"
assert(version >= "3.0.0")
rel_path = os.path.join("SPAdes-%s-Linux" % version, "bin", "spades.py")
spades_path = os.path.join(self.assembler_path, rel_path)
output_dir = os.path.join(self.sample_name, "spades")
contigs_fn = os.path.join(output_dir, "contigs.fasta")
scaffolds_fn = os.path.join(output_dir, "scaffolds.fasta")
log_fn = os.path.join(output_dir, "spades.log")
skip_assembly = self.quast_only and fileExistsAndNonEmpty(contigs_fn)
if self.params.has_key('spadesOptions'):
user_options = self.params['spadesOptions']
else:
user_options = "-k 21,33,55,77,99"
spades_info = {'contigs' : contigs_fn,
'scaffolds' : scaffolds_fn,
'log' : log_fn,
'userOptions' : user_options,
'version' : version }
pid = os.getpid()
if not skip_assembly:
cmd = ("{spades_path} --iontorrent --tmp-dir /tmp/{pid} "
"-s {self.bam_to_assemble} -o {output_dir} "
"{user_options} > /dev/null").format(**locals())
print("Running AssemblerSPAdes - SPAdes %s" % version)
self.runCommand(cmd)
report_dir = self.createQuastReport(contigs_fn, output_dir)
spades_info['quastReportDir'] = report_dir
self.info['spades'] = spades_info
def createQuastReport(self, contigs_fn, output_dir):
version = "2.3"
rel_path = os.path.join("quast-%s" % version, "quast.py")
quast_path = os.path.join(self.assembler_path, rel_path)
# quast_reference = self.params['bgenome']
quast_reference = "None"
quast_results_dir = os.path.join(output_dir, "quast_results")
print("Running QUAST %s" % version)
reference_param = ("-R " + quast_reference) if quast_reference!="None" else " "
cmd = ("{quast_path} -o {quast_results_dir} "
"{reference_param} {contigs_fn}").format(**locals())
self.runCommand(cmd)
try:
if os.path.isfile(os.path.join(quast_results_dir, "report.html")):
return os.path.abspath(quast_results_dir)
else:
return None
except:
return None
import sys
if __name__ == "__main__":
if len(sys.argv) == 5:
sample_id = sys.argv[1]
sample_seq = sys.argv[2]
bam_file = sys.argv[3]
out_dir = sys.argv[4]
runner = AssemblerRunner(sample_id, sample_seq, bam_file)
runner.execute()
runner.writeInfo("%s/info_%s.%s.json" % (out_dir,sample_id, sample_seq))
else:
assert(len(sys.argv) == 3) # not a barcode run
bam_file = sys.argv[1]
out_dir = sys.argv[2]
# HACK: sample_name = '.' => essentially vanishes from all paths
runner = AssemblerRunner('', '', bam_file)
runner.execute()
runner.writeInfo("%s/info.json" % (out_dir))
| 38.439153 | 87 | 0.599725 | 6,359 | 0.875292 | 0 | 0 | 0 | 0 | 0 | 0 | 1,910 | 0.262904 |
ad4606ad266b7b3db3e78f36d7d519b541e707cd | 1,242 | py | Python | log_utils.py | zheng-yanan/hierarchical-deep-generative-models | 3a92d2ce69a51f4da55a18b09ca4c246f6f6ed43 | [
"MIT"
] | 1 | 2019-06-06T02:55:45.000Z | 2019-06-06T02:55:45.000Z | log_utils.py | zheng-yanan/hierarchical-deep-generative-model | 3a92d2ce69a51f4da55a18b09ca4c246f6f6ed43 | [
"MIT"
] | null | null | null | log_utils.py | zheng-yanan/hierarchical-deep-generative-model | 3a92d2ce69a51f4da55a18b09ca4c246f6f6ed43 | [
"MIT"
] | null | null | null | # -*- coding:utf-8 -*-
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
import os
import sys
import logging
def logger_fn(name, filepath, level = logging.DEBUG):
""" Function for creating log manager
Args:
name: name for log manager
filepath: file path for log file
level: log level (CRITICAL > ERROR > WARNING > INFO > DEBUG)
Return:
log manager
"""
logger = logging.getLogger(name)
logger.setLevel(level)
sh = logging.StreamHandler(sys.stdout)
fh = logging.FileHandler(filepath, mode = 'w')
# formatter = logging.Formatter('[%(asctime)s][%(levelname)s][%(filename)s][line:%(lineno)d] %(message)s')
# formatter = logging.Formatter('[%(asctime)s][%(filename)s][line:%(lineno)d] %(message)s')
formatter = logging.Formatter('[%(asctime)s] %(message)s')
"""
%(levelno)s: 打印日志级别的数值
%(levelname)s: 打印日志级别名称
%(pathname)s: 打印当前执行程序的路径,其实就是sys.argv[0]
%(filename)s: 打印当前执行程序名
%(funcName)s: 打印日志的当前函数
%(lineno)d: 打印日志的当前行号
%(asctime)s: 打印日志的时间
%(thread)d: 打印线程ID
%(threadName)s: 打印线程名称
%(process)d: 打印进程ID
%(message)s: 打印日志信息
"""
sh.setFormatter(formatter)
fh.setFormatter(formatter)
logger.addHandler(sh)
logger.addHandler(fh)
return logger | 25.875 | 107 | 0.711755 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 909 | 0.641949 |
ad46fd5c399e0415b0358814ed40f6bdf8278661 | 336 | py | Python | api/types.py | ElPapi42/test-api | d7a68e8fadb6cbb6bf48e993e1df4898bedc6372 | [
"MIT"
] | null | null | null | api/types.py | ElPapi42/test-api | d7a68e8fadb6cbb6bf48e993e1df4898bedc6372 | [
"MIT"
] | null | null | null | api/types.py | ElPapi42/test-api | d7a68e8fadb6cbb6bf48e993e1df4898bedc6372 | [
"MIT"
] | null | null | null | from bson.objectid import ObjectId, InvalidId
class PydanticObjectId(str):
@classmethod
def __get_validators__(cls):
yield cls.validate
@classmethod
def validate(cls, v):
try:
ObjectId(str(v))
except InvalidId:
raise TypeError('invalid ObjectId')
return str(v)
| 21 | 47 | 0.622024 | 287 | 0.854167 | 55 | 0.16369 | 248 | 0.738095 | 0 | 0 | 18 | 0.053571 |
ad47d50e27f1ff53557e090e02e00ff688fd1b95 | 1,818 | py | Python | phr/insteducativa/api/serializers.py | richardqa/django-ex | e5b8585f28a97477150ac5daf5e55c74b70d87da | [
"CC0-1.0"
] | null | null | null | phr/insteducativa/api/serializers.py | richardqa/django-ex | e5b8585f28a97477150ac5daf5e55c74b70d87da | [
"CC0-1.0"
] | null | null | null | phr/insteducativa/api/serializers.py | richardqa/django-ex | e5b8585f28a97477150ac5daf5e55c74b70d87da | [
"CC0-1.0"
] | null | null | null | from drf_extra_fields.geo_fields import PointField
from rest_framework import serializers
from phr.insteducativa.models import InstitucionEducativa
from phr.ubigeo.models import UbigeoDepartamento, UbigeoDistrito, UbigeoProvincia
class InstEducativaSerializer(serializers.ModelSerializer):
ubicacion = PointField(required=False)
departamento_nombre = serializers.SerializerMethodField()
provincia_nombre = serializers.SerializerMethodField()
distrito_nombre = serializers.SerializerMethodField()
class Meta:
model = InstitucionEducativa
fields = ('codigo_colegio', 'codigo_modular', 'nombre', 'ubigeo', 'direccion', 'nivel', 'nivel_descripcion',
'tipo', 'tipo_descripcion', 'nombre_ugel', 'establecimiento_renaes', 'establecimiento_nombre',
'ubicacion', 'departamento_nombre', 'provincia_nombre', 'distrito_nombre',)
def get_departamento_nombre(self, obj):
if obj.ubigeo:
try:
departamento = UbigeoDepartamento.objects.get(cod_ubigeo_inei_departamento=obj.ubigeo[:2])
return departamento.ubigeo_departamento
except UbigeoDepartamento.DoesNotExist:
return ''
def get_provincia_nombre(self, obj):
if obj.ubigeo:
try:
provincia = UbigeoProvincia.objects.get(cod_ubigeo_inei_provincia=obj.ubigeo[:4])
return provincia.ubigeo_provincia
except UbigeoProvincia.DoesNotExist:
return ''
def get_distrito_nombre(self, obj):
if obj.ubigeo:
try:
distrito = UbigeoDistrito.objects.get(cod_ubigeo_inei_distrito=obj.ubigeo)
return distrito.ubigeo_distrito
except UbigeoDistrito.DoesNotExist:
return ''
| 42.27907 | 116 | 0.684268 | 1,584 | 0.871287 | 0 | 0 | 0 | 0 | 0 | 0 | 243 | 0.133663 |
ad48ea0d8d6cb42aaecd71882b43cea8143d3ab2 | 885 | py | Python | python/review_02_list.py | dayoungMM/TIL | b844ef5621657908d4c256cdfe233462dd075e8b | [
"MIT"
] | null | null | null | python/review_02_list.py | dayoungMM/TIL | b844ef5621657908d4c256cdfe233462dd075e8b | [
"MIT"
] | null | null | null | python/review_02_list.py | dayoungMM/TIL | b844ef5621657908d4c256cdfe233462dd075e8b | [
"MIT"
] | null | null | null | ## list
array = [1,2,3,"four","five","six",True]
print(array[:3])
dust = {
'영등포구': 50,
'강남구' : 40
}
## Dictionary
print(dust['영등포구'])
dust2 = dict(abc=50)
print(dust2)
## 랜덤으로 coffee메뉴 3개 뽑기
import random
coffee = ['아아','뜨아','라떼','믹스','핫초코']
coffee_fav=coffee[1:4] #내가 좋아하는 메뉴 일부 출력
print(coffee_fav)
ls = []
while True:
a = random.choice(coffee)
if a not in ls:
ls.append(a)
if len(ls) ==3:
break
print(ls)
### range
b= list(range(1,10))
print(b)
### 랜덤으로 오늘의 점심메뉴 식당과 전화번호 출력하기
import random
manu = ['20층','양자강','김밥카페','순남시래기','바나프레소']
phone_book = {
'20층' : '02-1233-4444',
'양자강' : '02-4444-5555',
'김밥카페' : '02-6666-7777',
'순남시래기' : '02-8888-9999',
'바나프레소' : '02-1000-2222'
}
today_manu = random.choice(manu)
today_num = phone_book[today_manu]
print("오늘의 메뉴:{}, 전화번호는:{}".format(today_manu, today_num))
# print(dir(random))
| 15.526316 | 58 | 0.59887 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 548 | 0.495032 |
ad49616aedf0c8496a6ad656e69c031e7078edfd | 1,661 | py | Python | DetectAESECB.py | styojm/CryptoPal-Challenges | e7b1759d01de7d388c1632b827c51f506e419db7 | [
"MIT"
] | null | null | null | DetectAESECB.py | styojm/CryptoPal-Challenges | e7b1759d01de7d388c1632b827c51f506e419db7 | [
"MIT"
] | null | null | null | DetectAESECB.py | styojm/CryptoPal-Challenges | e7b1759d01de7d388c1632b827c51f506e419db7 | [
"MIT"
] | null | null | null | '''
Detect AES in ECB mode
In this file are a bunch of hex-encoded ciphertexts.
One of them has been encrypted with ECB.
Detect it.
Remember that the problem with ECB is that it is stateless and deterministic; the same 16 byte plaintext block will always produce the same 16 byte ciphertext.
Strategy is to separate in 16-byte block, and detect repetition
'''
from AES_ECB import DecryptAESECB
from Single_byte_XOR_cipher import SimpleTextScore
import math
filepath = r'C:\Users\styojm\PycharmProjects\crypto\S1C8.txt'
def RepeatCount(text,blocksize=16):
'''
:param text: text in hexstring
:param blocksize: block size/length
:return: the repeat count normalized to the block #
'''
byte_message = None # if text is in bytes
if isinstance(text,str):
byte_message = bytearray(bytes.fromhex(text))
else:
byte_message = bytearray(text)
messagelist = []
blockNum = math.ceil(len(text)/blocksize)
for i in range(blockNum):
messagelist.append(text[i*blocksize:(i+1)*blocksize])
messagelist.sort()
repeatcount = 0
for i in range(1,len(messagelist)):
if messagelist[i]==messagelist[i-1]:
repeatcount+=1
return repeatcount/blockNum
def main():
with open(filepath) as file:
lines = file.readlines() # hex strings
maxcount = -1
message = ''
for line in lines:
count = RepeatCount(line)
if count>maxcount:
maxcount = count
message = line
print('Repeatcount {}, for message {}'.format(maxcount,message))
if __name__ == '__main__':
main() | 28.637931 | 159 | 0.658037 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 637 | 0.383504 |
ad4b8493a7a78d74d29fd8adec60eaee8fb97b35 | 14,527 | py | Python | uam_simulator/orca.py | colineRamee/UAM_simulator_scitech2021 | 0583f5ce195cf1ec4f6919d6523fa39851c419fc | [
"MIT"
] | 1 | 2021-02-04T15:57:03.000Z | 2021-02-04T15:57:03.000Z | uam_simulator/orca.py | colineRamee/UAM_simulator_scitech2021 | 0583f5ce195cf1ec4f6919d6523fa39851c419fc | [
"MIT"
] | null | null | null | uam_simulator/orca.py | colineRamee/UAM_simulator_scitech2021 | 0583f5ce195cf1ec4f6919d6523fa39851c419fc | [
"MIT"
] | 2 | 2021-02-04T04:41:08.000Z | 2022-03-01T16:18:14.000Z | import numpy as np
import math
""" Implementation of the ORCA algorithm
Resources: van den Berg, Reciprocal n-body Collision Avoidance,
RVO2 library (C++) https://github.com/snape/RVO2/blob/master/src/Agent.cpp
Pyorca library to see another python implementation https://github.com/Muon/pyorca
Other resources included in the code"""
class Line:
def __init__(self, point=np.array([0, 0]), direction=np.array([0, 0])):
self.point = point
self.direction = direction
class ORCA:
def __init__(self):
self.max_distance_to_neighbors = 5000 # m
self.time_horizon = 250 # in seconds, the range to query/sense neighbors is 5 km, the max speed is 20 m/s => time_horizon = range/max_speed (250 seconds ~ 4.2 minutes)
self.inv_time_horizon = 1 / self.time_horizon
self.epsilon = 0.00001
self.k_neighbors = 10
def compute_new_velocity(self, agent, dt):
# Compute the new velocity for the agent
neighbors = agent.get_nearest_neighbors(self.k_neighbors, self.max_distance_to_neighbors)
orca_lines =[]
R = agent.radius * 1.1 # Slight increase to take care of floating point errors
# Compute the agent desired velocity
direction_to_goal = agent.goal - agent.position
distance_to_goal = np.linalg.norm(direction_to_goal)
pref_vel = min(agent.maxSpeed, distance_to_goal / dt) * direction_to_goal / distance_to_goal
for neighbor in neighbors:
# Construct ORCA lines
# Project on the half plane
rel_position = neighbor.position-agent.position
d = np.linalg.norm(rel_position)
# Opposite for some reason
# Relative velocity computed using current velocity
rel_velocity = agent.velocity - neighbor.velocity
# Is there a collision
if d > R:
# No collision right now
# Vector from cutoff center to relative velocity
w = rel_velocity - self.inv_time_horizon * rel_position
dot_product1 = np.dot(w, rel_position)
w_norm = np.linalg.norm(w)
unit_w = w / w_norm
# dot_product1<0 is a necessary but not sufficient condition for the projection having to be done on the cutoff circle
# The second condition compares the angle between w and - rel_postion (lambda) to the angle between rel_position and the radius tangent to the line (alpha)
# Project on the circle if lambda < alpha => cos^2 (lambda) > cos^2(alpha) because alpha and lambda are between 0 and pi/2
# cos^2 lambda = dot_product1**2/(|w|^2*|rel_position|^2)
# cos^2 alpha = R^2 / |rel_position|^2
if dot_product1 < 0 and dot_product1**2 > R**2 * np.dot(w, w):
# Should project on cut-off circle
# U is in the direction of w and the remaining distance to exit the cutoff circle
u = (R * self.inv_time_horizon - w_norm) * unit_w
direction = np.array([unit_w[1], -unit_w[0]])
else:
# Need to project on legs
leg = math.sqrt(d**2 - R**2)
if np.linalg.det([rel_position,w]) > 0:
# On left leg, find direction by multiplying by rotation matrix (cone haf angle theta such that sin theta = R / d and cos theta = leg / d
# Unit vector
direction = np.array([rel_position[0]*leg - rel_position[1] * R,
rel_position[0]*R + rel_position[1] * leg ]) / d**2
else:
# On right leg
direction = - np.array([rel_position[0] * leg + rel_position[1] * R,
- rel_position[0] * R + rel_position[1] * leg]) / d**2
# Project the relative velocity on the leg
dot_product2 = rel_velocity * direction
# u is the point from the rel_velocity to the boundary
u = dot_product2 * direction - rel_velocity
else:
# Already colliding with neighbor, pick the velocity that will get us out within the time step
w = rel_velocity - rel_position / dt
w_norm = np.linalg.norm(w)
unit_w = w / w_norm
u = (R / dt - w_norm) * unit_w
direction = np.array([unit_w[1], -unit_w[0]])
line = Line(agent.velocity + u / 2, direction)
orca_lines.append(line)
# Try to solve the linear program, optimize for the preferred velocity of the agent
line_fail, new_vel = self.linear_program2(orca_lines, agent.maxSpeed, pref_vel, False)
if line_fail < len(orca_lines):
# The feasible region is empty
new_vel = self.linear_program3(orca_lines, line_fail, agent.maxSpeed, pref_vel)
return new_vel
# de Berg, Cheong, van Kreveld and Overmars, Computational Geometry: Algorithms and Applications, Third edition
# Chapter 4: linear programming
# 4.3 Incremental Linear Programming
def linear_program1(self, lines, line_no, max_speed, opt_v, opt_dir):
# Called when current velocity violates constraint line_no and we are looking for a solution on the constraint line_no
# This is a 1D linear problem
# returns False and the original velocity if the program is unfeasible
# returns True and the new velocity which lays on constraint line_no
# Initialize the bounds of the solution (scalar value indicating how far from point along direction is the solution)
# This solver does not work for unbounded solutions but the solution is bounded by the max speed anyway
# We are looking for the intersection of the line with the circle: |P + t* d|= R, this results in a quadratic equation
# Where P is the point and d the direction of the constraint, |d|=1
dot_product = np.dot(lines[line_no].point, lines[line_no].direction)
discriminant = dot_product ** 2 - np.dot(lines[line_no].point, lines[line_no].point) + max_speed**2
if discriminant < 0:
# The constraint is outside the max speed limit
return False, opt_v
sqrt_discriminant = math.sqrt(discriminant)
t_left = - dot_product - sqrt_discriminant
t_right = - dot_product + sqrt_discriminant
for i in range(0, line_no):
# Find intersection of both lines https://en.wikipedia.org/wiki/Line%E2%80%93line_intersection
denom = np.linalg.det([lines[line_no].direction, lines[i].direction])
numer = np.linalg.det([lines[i].direction,lines[line_no].point - lines[i].point])
if abs(denom)<= self.epsilon:
# lines are almost parallel
if numer < 0:
# if line_no is in the forbidden area of i and hence there are no solution
# I think it should not happens given how the solution is computed
return False, opt_v
else:
continue
t = numer / denom
if denom >= 0:
# Line i bounds line_no on the right
t_right = min(t_right, t)
else:
# Line i bounds line_no on the right
t_left = max(t_left, t)
if t_left > t_right:
# There is no solution on the constraint
return False, opt_v
if opt_dir:
# We are just looking for a feasible solution
if np.dot(opt_v, lines[line_no].direction) > 0:
# Take right extreme
new_vel = lines[line_no].point + t_right * lines[line_no].direction
else:
new_vel = lines[line_no].point + t_left * lines[line_no].direction
else:
# Should we use t_left or t_right as the new solution?
# Project the optimal velocity on the constraint
t = np.dot (lines[line_no].direction, opt_v - lines[line_no].point)
if t < t_left:
# projected to the left of the limit => the left limit is the new optimal
new_vel = lines[line_no].point + t_left * lines[line_no].direction
elif t > t_right:
# projected to the right of the limit => the left limit is the new optimal
new_vel = lines[line_no].point + t_right * lines[line_no].direction
else:
# the projection is in the middle of the limits
new_vel = lines[line_no].point + t * lines[line_no].direction
return True, new_vel
def linear_program2(self, lines, max_speed, opt_v, opt_dir):
"""Solves the 2D linear program defined by the lines
If there is a solution returns the number of constraint
If there is no solution returns the constraint number that made the solution space empty
If opt_dir is True then opt_v must be unit length
"""
# Start by setting the desired solution
# Not quite sure what opt_dir does, opt_dir is set to True when the solution space is empty
if opt_dir:
# opt_v is unit length
result = max_speed * opt_v
elif np.linalg.norm(opt_v) > max_speed:
result = max_speed * opt_v / np.linalg.norm(opt_v)
else:
result = opt_v
# Solve the linear program
for i in range(0, len(lines)):
line = lines[i]
# Does the current result satisfy the constraint?
if np.linalg.det([line.direction, line.point - result]) > 0:
# The current result does not satisfy the new constraint, hence the new result will be on the new constraint unless the program is unfeasible
feasible, new_result = self.linear_program1(lines, i, max_speed, opt_v, opt_dir)
if not feasible:
# Constraint i makes the solution space empty, the result velocity will be ignored
return i, opt_v
else:
result = new_result
return len(lines), result
def linear_program3(self, lines, line_fail, max_speed, opt_v, num_obstacles=0):
# Called if the original 2D linear program is unfeasible see section 5.3 of ORCA paper
# Basically the constraints are moved away until one velocity becomes feasible (it's a 3D linear programming problem)
# The 2D linear program to solve is how much to relax
# For now there are no non-participating or static obstacles (num obstacles is 0)
# Distance is the distance that constraints have to be relaxed by to open the solution space
distance = 0
result = opt_v
# Start at the line that failed
for i in range(line_fail,len(lines)):
if np.linalg.det([lines[i].direction, lines[i].point - result])> distance:
# the result does not satisfy line i constraint (even accounting for relaxation)
# Since result is the only point in the relaxed solution space this means the solution space is empty
# Treat constraints coming from non-participating traffic differently
# Copy all lines that are not going to be relaxed
if num_obstacles !=0:
# TODO Copy lines created by non-participating obstacles
projected_lines = []
pass
else:
projected_lines=[]
# Go through all constraints generated by participating traffic that are before the current constraint
for j in range(num_obstacles,i):
new_line = Line()
# Find intersection of both lines https://en.wikipedia.org/wiki/Line%E2%80%93line_intersection
denom = np.linalg.det([lines[i].direction, lines[j].direction])
if abs(denom) <= self.epsilon:
# lines are basically parallel
if np.dot(lines[i].direction,lines[j].direction)>0:
# lines are in the same direction
continue
else:
# lines are in opposite directions
# Set a point in the middle to find how much you have to relax the constraint to open the solution space
new_line.point = 0.5*(lines[i].point+lines[j].point)
else:
# Find the intersection of those two lines
numer = np.linalg.det([lines[j].direction, lines[i].point - lines[j].point])
new_line.point = lines[i].point + (numer / denom) * lines[i].direction
new_line.direction = (lines[j].direction-lines[i].direction)/ np.linalg.norm(lines[j].direction-lines[i].direction)
projected_lines.append(new_line)
# We know that all the previous constraints left a non-empty solution space (since line_fail is the first constraint that emptied the solution space)
# The new lines' direction is opposite to the directions along which the intersection of line_fail and constraint i move when the constraints are being relaxed
projected_vel = np.array([-lines[i].direction[1],
lines[i].direction[0]])
# Solve linear program to figure out how much to relax the constraints
# Projected_vel gives the direction along which to optimize
line_fail, solution = self.linear_program2(projected_lines, max_speed, projected_vel, True)
if line_fail < len(projected_lines):
# Failed to relax (this should not happen) if it fails it's because of floating point errors
pass
result = solution
# Set the relaxation distance based on the result of the linear program (the following formula is the signed distance from result to line i because direction is a unit vector)
distance = np.linalg.det([lines[i].direction, lines[i].point - result])
return result
| 56.968627 | 191 | 0.597921 | 14,143 | 0.973566 | 0 | 0 | 0 | 0 | 0 | 0 | 6,277 | 0.432092 |
ad4bc314e783e86d0936529813182a506c16c465 | 3,030 | py | Python | lib/heuristic_methods/greedy_packing/largest_heat_match_greedy.py | cog-imperial/min_matches_heuristics | 669fd082c747f886c949aacc427f00e80d0c5291 | [
"Apache-2.0"
] | 4 | 2019-04-14T14:11:57.000Z | 2020-07-02T10:42:12.000Z | lib/heuristic_methods/greedy_packing/largest_heat_match_greedy.py | cog-imperial/min_matches_heuristics | 669fd082c747f886c949aacc427f00e80d0c5291 | [
"Apache-2.0"
] | null | null | null | lib/heuristic_methods/greedy_packing/largest_heat_match_greedy.py | cog-imperial/min_matches_heuristics | 669fd082c747f886c949aacc427f00e80d0c5291 | [
"Apache-2.0"
] | 2 | 2018-03-27T15:05:40.000Z | 2020-07-03T08:00:37.000Z | from time import time
from ...problem_classes.heat_exchange import Heat_Exchange
def largest_heat_match_greedy(inst):
# Initialization of a local copy of the instance
n = inst.n
m = inst.m
k = inst.k
QH = list(inst.QH)
QC = list(inst.QC)
R = list(inst.R)
# Initialization of variables for storing the solution
y = [[0 for j in range(m)] for i in range(n)]
q = [[[[0 for t in range(k)] for j in range(m)] for s in range(k)] for i in range(n)]
M = []
# Termination criterion: zero remaining heat
remaining_heat = sum(sum(QH[i]) for i in range(n))
# Helper for dealing with precision issues
epsilon = 10**(-7)
# Algorithm's timer
start_time = time()
while remaining_heat > epsilon:
# Storing the new match
matched_i = -1
matched_j = -1
# Heat exchanges between the new matched pair of streams
# q[s][t] specifies the heat exchange between (matched_i,s) and (matched_j,t)
matched_q = [[0 for t in range(k)] for s in range(k)]
# Storing the heat transferred via the chosen match of the iteration match
matched_heat = 0
# The new heat residuals after performing the above heat exchanges.
resulting_R = list(R)
# For each pair (i,j), compute the one with the maximum fraction
for i in range(n):
for j in range(m):
if (i,j) not in M:
# Compute the maximum heat exchanged between (i,j)
(temp_heat,temp_q,temp_R)=max_heat(i,j,k,QH[i],QC[j],R)
if temp_heat > matched_heat:
matched_i = i
matched_j = j
matched_q = temp_q
resulting_R = temp_R
matched_heat = temp_heat
# Introduction of the new match
M.append((matched_i, matched_j))
y[matched_i][matched_j]=1
for s in range(k):
for t in range(k):
q[matched_i][s][matched_j][t] = matched_q[s][t]
QH[matched_i][s] -= matched_q[s][t]
QC[matched_j][t] -= matched_q[s][t]
R=resulting_R
remaining_heat = sum(sum(QH[i]) for i in range(n))
end_time = time()
elapsed_time = end_time - start_time
matches = len(M)
sol = Heat_Exchange('greedy_packing',n,m,k,matches,y,q)
return (sol, elapsed_time)
# It computes the maximum heat that can be exchanged between i and j
# with heat vectors QH and QC, respectively.
def max_heat(i,j,k,QH,QC,R):
# Initialization to avoid modifying the global copies
QH = list(QH)
QC = list(QC)
R = list(R)
# Initialization of the heat exchanges
# q[s][t]: heat exchanged between (i,s) and (j,t)
q = [[0 for t in range(k)] for s in range(k)]
# Initialization of the maximum heat exchanged between i and j
total_heat = 0
# Heat exchanged in the same temperature interval
for t in range(k):
q[t][t] = min(QH[t],QC[t])
QH[t] -= q[t][t]
QC[t] -= q[t][t]
total_heat += q[t][t]
# Heat exchanged in different temperature intervals
for s in range(k):
for t in range(s+1,k):
q[s][t] = min(QH[s],QC[t],min(R[s+1:t+1]))
QH[s] -= q[s][t]
QC[t] -= q[s][t]
for u in range(s+1,t+1):
R[u] -= q[s][t]
total_heat += q[s][t]
return (total_heat,q,R) | 26.12069 | 86 | 0.648515 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,080 | 0.356436 |
ad4bd3da35c74d546fcb8432bf4ca348b5a1195d | 3,125 | py | Python | cosifer/utils/stats.py | C-nit/cosifer | 550b3ee1055bf1ceb8883ee8736c8d538ceb6ee4 | [
"MIT"
] | 7 | 2020-01-17T17:29:37.000Z | 2022-02-18T09:53:50.000Z | cosifer/utils/stats.py | C-nit/cosifer | 550b3ee1055bf1ceb8883ee8736c8d538ceb6ee4 | [
"MIT"
] | 2 | 2020-10-19T14:28:49.000Z | 2021-01-14T18:20:46.000Z | cosifer/utils/stats.py | C-nit/cosifer | 550b3ee1055bf1ceb8883ee8736c8d538ceb6ee4 | [
"MIT"
] | 3 | 2020-11-02T15:42:34.000Z | 2021-02-24T12:37:34.000Z | """Statistics utils."""
import numpy as np
import pandas as pd
from statsmodels.stats import multitest as mt
from .data import scale_graph
def bonferroni_correction(p_values, q_star):
"""
Return indices of pValues that make reject null hypothesis
at given significance level with a Bonferroni correction.
Used implementation robust to nan values through statsmodels.
Args:
p_values (iterable): p-values to be used for correction.
q_star (float): false discovery rate.
Returns:
list: indices of significant p-values.
"""
return [
idx for idx, significant in
enumerate(mt.multipletests(p_values, alpha=q_star, method='b')[0])
if significant
]
def benjamini_hochberg_correction(p_values, q_star):
"""
Return indices of pValues that make reject null hypothesis
at given significance level with a Benjamini-Hochberg correction.
Used implementation robust to nan values through statsmodels.
Args:
p_values (iterable): p-values to be used for correction.
q_star (float): false discovery rate.
Returns:
list: indices of significant p-values.
"""
return [
idx for idx, significant in enumerate(
mt.multipletests(p_values, alpha=q_star, method='fdr_bh')[0]
) if significant
]
def benjamini_yekutieli_correction(p_values, q_star):
"""
Return indices of pValues that make reject null hypothesis
at given significance level with a Benjamini-Yekutieli correction.
Used implementation robust to nan values through statsmodels.
Args:
p_values (iterable): p-values to be used for correction.
q_star (float): false discovery rate.
Returns:
list: indices of significant p-values.
"""
return [
idx for idx, significant in enumerate(
mt.multipletests(p_values, alpha=q_star, method='fdr_by')[0]
) if significant
]
CORRECTIONS = {
'bonferroni': lambda p, t: bonferroni_correction(p, t),
'b-h': lambda p, t: benjamini_hochberg_correction(p, t),
'b-y': lambda p, t: benjamini_yekutieli_correction(p, t)
}
CORRECTIONS_SIGNIFICANCE = {
'bonferroni': lambda p, t: mt.multipletests(p, alpha=t, method='b')[0],
'b-h': lambda p, t: mt.multipletests(p, alpha=t, method='fdr_bh')[0],
'b-y': lambda p, t: mt.multipletests(p, alpha=t, method='fdr_by')[0]
}
def from_precision_matrix_partial_correlations(precision, scaled=False):
"""
Compute partial correlations from the precision matrix.
Args:
precision (np.ndarray): a precision matrix.
scaled (bool, optional): flag to min-max scale the correlations.
Defaults to False.
Returns:
np.ndarray: the partial correlation matrix.
"""
diag = np.diag(precision)
cross_diagonal_sqrt = np.sqrt(np.outer(diag, diag))
partial_correlations = -precision / cross_diagonal_sqrt
np.fill_diagonal(partial_correlations, 1.)
return (
scale_graph(pd.DataFrame(partial_correlations)).values
if scaled else partial_correlations
)
| 30.940594 | 75 | 0.68096 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,581 | 0.50592 |
ad4d7bab059d0ea9bad8d2d28f8ce727a3796264 | 14,523 | py | Python | src/dmglib.py | rickmark/dmglib | abcb16b4eeaec8e34f13248874c0e5b39dcfd96d | [
"MIT"
] | null | null | null | src/dmglib.py | rickmark/dmglib | abcb16b4eeaec8e34f13248874c0e5b39dcfd96d | [
"MIT"
] | null | null | null | src/dmglib.py | rickmark/dmglib | abcb16b4eeaec8e34f13248874c0e5b39dcfd96d | [
"MIT"
] | null | null | null | """
dmglib is a basic ``hdiutil`` wrapper that simplifies working with dmg images from Python.
The module can be used to attach and detach disk images, to check a disk image's
validity and to query whether disk images are password protected or have a license
agreement included.
"""
import plistlib
import subprocess
import os
import enum
import sys
import typing
from contextlib import contextmanager
NAME = 'dmglib'
HDIUTIL_PATH = '/usr/bin/hdiutil'
class InvalidDiskImage(Exception):
"""The disk image is deemed invalid and therefore cannot be attached."""
pass
class InvalidOperation(Exception):
"""An invalid operation was performed by the user.
Examples include trying to detach a dmg that was never attached or
trying to attach a disk image twice.
"""
pass
class AttachingFailed(Exception):
"""Attaching failed for unknown reasons."""
pass
class AlreadyAttached(AttachingFailed):
"""The disk image has already been attached previously."""
pass
class PasswordRequired(AttachingFailed):
"""No password was required even though one was required."""
pass
class PasswordIncorrect(AttachingFailed):
"""An incorrect password was supplied for the disk image."""
pass
class LicenseAgreementNeedsAccepting(AttachingFailed):
"""Error indicating that a license agreement needs accepting."""
pass
class DetachingFailed(Exception):
"""Error to indicate a volume could not be detached successfully."""
pass
class ConversionFailed(Exception):
"""Error to indicate that conversion failed"""
pass
def _raw_hdituil(args, input: bytes = None) -> (int, bytes):
"""Invokes hdiutil with the supplied arguments and returns return code and stdout contents."""
if not os.path.exists(HDIUTIL_PATH):
raise FileNotFoundError('Unable to find hdituil.')
completed = subprocess.run([HDIUTIL_PATH] + args,
input=input, capture_output=True)
return (completed.returncode, completed.stdout)
def _hdiutil(args, plist=True, keyphrase=None) -> (bool, dict):
"""Calls the command line 'hdiutil' binary with the supplied parameters.
Args:
args: Arguments for the hdiutil command.
plist: Whether to ask hdiutil to return plist (dictionary) output.
keyphrase: Optional parameter for encrypted disk images.
Returns:
Tuple containing result status as first element and a dictionary
containing the decoded plist response or `None` if the operation failed.
"""
# Certain operations do not support plist output...
if plist and '-plist' not in args:
args.append('-plist')
if keyphrase is not None:
args.append('-stdinpass')
returncode, output = _raw_hdituil(args, input=keyphrase.encode('utf8') if keyphrase else None)
if returncode != 0:
return False, dict()
if plist:
return True, plistlib.loads(output)
else:
return True, dict()
def _hdiutil_isencrypted(path) -> bool:
"""Checks whether a disk image is encrypted."""
success, result = _hdiutil(['isencrypted', path])
return success and result.get('encrypted', False)
def _hdiutil_imageinfo(path, keyphrase=None) -> (bool, dict):
"""Obtains image infos for a disk image.
Args:
path: The disk image for which to obtain information.
keyphrase: Optional parameter for encrypted images.
Returns:
Tuple containing result status as first element and a dictionary
containing the disk image infos obtaining from hdiutil.
"""
return _hdiutil(['imageinfo', path], keyphrase=keyphrase)
def _hdiutil_convert(input_path: str, output_path: str, disk_format: str) -> (bool, typing.Sequence[str]):
"""Converts a disk image to a different format.
Args:
input_path: The source disk image
output_path: The converted disk image
disk_format: One of the hdiutil supported disk image formats
Returns:
Tuple containing the resulting file
"""
return _hdiutil([
'convert',
'-format',
disk_format,
'-o',
output_path,
input_path
])
def _hdiutil_attach(path, keyphrase=None) -> (bool, dict):
"""Attaches a disk image.
The image is mounted using the `-nobrowse` flag so that it is not visible in
Finder.app.
Args:
path: The disk image to attach.
keyphrase: Optional parameter for encrypted images.
Returns:
Tuple containing status code and information on mounted volume,
if successful.
"""
return _hdiutil([
'attach',
path,
'-nobrowse' # Do not make the mounted volumes visible in Finder.app
], keyphrase=keyphrase)
def _hdiutil_detach(dev_node, force=False) -> bool:
"""Detaches a disk image.
Args:
dev_node: Filesystem path to attached volume, e.g. `/dev/disk1s1`.
force: Whether to ignore open files on the attached volume.
Returns:
Status code indicating success.
"""
success, _ = _hdiutil(['detach', dev_node] + (['-force'] if force else []), plist=False)
return success
def _hdiutil_info() -> (bool, dict):
"""Obtains state information about volumes attached on the system."""
return _hdiutil(['info'])
def attached_images() -> list:
"""Obtain a list of paths to disk images that are currently attached."""
success, infos = _hdiutil_info()
return [image['image-path']
for image in infos.get('images', [])
if 'image-path' in image]
def dmg_already_attached(path: str) -> bool:
"""Checks whether the disk image at the supplied path has already been attached.
Querying the system for further information about already attached images fails
with a resource exhaustion error message.
"""
return os.path.realpath(path) in attached_images()
def dmg_is_encrypted(path: str) -> bool:
"""Checks whether DMG at the supplied path is password protected."""
return _hdiutil_isencrypted(path)
def dmg_check_keyphrase(path: str, keyphrase: str) -> bool:
"""Checks the keyphrase for the disk image at the supplied path.
Note:
This function assumes the DiskImage is encrypted and raises
an exception if it is not.
Args:
path: path to disk image for which to check the keyphrase
keyphrase: keyphrase to check
Raises:
InvalidOperation: the disk image was not encrypted.
"""
if not dmg_is_encrypted(path):
raise InvalidOperation('DiskImage is not encrypted')
success, _ = _hdiutil_imageinfo(path, keyphrase=keyphrase)
return success
def dmg_is_valid(path: str) -> bool:
"""Checks the validity of the supplied disk image.
A disk image is valid according to this logic, if it is either not encrypted
and valid according to hdiutil, or encrypted according to hdiutil.
"""
if dmg_is_encrypted(path):
return True
success, _ = _hdiutil_imageinfo(path)
return success
class MountedVolume:
def __init__(self, mount_point, volume_kind):
self.mount_point = mount_point
self.volume_kind = volume_kind
class DMGState(enum.Enum):
DETACHED = 1
ATTACHED = 2
class DiskFormat(enum.Enum):
READ_ONLY = 'UDRO'
COMPRESSED_ADC = 'UDCO'
COMPRESSED = 'UDZO'
COMPRESSED_BZIP2 = 'UDBZ'
COMPRESSED_LZFSE = 'UDFO'
COMPRESSED_LZMA = 'ULMO'
ENTIRE_DEVICE = 'UFBI'
IPOD_IMAGE = 'IPOD'
UDIF_STUB = 'UDxx'
SPARSE_BUNDLE = 'UDSB'
SPARSE = 'UDSP'
READ_WRITE = 'UDRW'
OPTICAL_MASTER = 'UDTO'
DISK_COPY = 'DC42'
NDIF_READ_WRITE = 'RdWr'
NDIF_READ_ONLY = 'Rdxx'
NDIF_COMPRESSED = 'ROCo'
NDIF_KEN_CODE = 'Rken'
class DMGStatus:
def __init__(self):
self.status = DMGState.DETACHED
self.mount_points = []
self.root_dev_entry = None
def is_attached(self) -> bool:
return self.status == DMGState.ATTACHED
def record_attached(self, paths, root_dev_entry):
self.status = DMGState.ATTACHED
self.mount_points = paths
self.root_dev_entry = root_dev_entry
def record_detached(self):
self.status = DMGState.DETACHED
self.mount_points = []
class DiskImage:
"""Class representing macOS Disk Images (.dmg) files.
"""
def __init__(self, path, keyphrase=None):
"""Initialize a disk image object. Note: Simply constructing the object
does not attach the DMG. Use the :py:meth:`DiskImage.attach` method for that.
Args:
path: The path to the disk image
keyphrase: Optional argument for password protected images
Raises:
AlreadyAttached: The disk image is already attached on the system.
InvalidDiskImage: The disk image is not a valid disk image.
PasswordRequired: A password is required but none was provided.
PasswordIncorrect: A incorrect password was supplied.
"""
# The hdiutil fails when the target path has already been mounted / attached.
if dmg_already_attached(path):
raise AlreadyAttached()
if not dmg_is_valid(path):
raise InvalidDiskImage()
if dmg_is_encrypted(path) and keyphrase is None:
raise PasswordRequired()
if dmg_is_encrypted(path) and not dmg_check_keyphrase(path, keyphrase):
raise PasswordIncorrect()
self.path = path
self.keyphrase = keyphrase
_, self.imginfo = _hdiutil_imageinfo(path, keyphrase=keyphrase)
self.status = DMGStatus()
def _lookup_property(self, property_name, default_value):
return self.imginfo \
.get('Properties', dict()) \
.get(property_name, default_value)
def has_license_agreement(self) -> bool:
"""Checks whether the disk image has an attached license agreement.
DMGs with license agreements cannot be attached using this package.
"""
return self._lookup_property('Software License Agreement', False)
def attach(self):
"""Attaches a disk image.
Returns:
List of mount points.
Raises:
InvalidOperation: This disk image has already been attached.
LicenseAgreementNeedsAccepting: The image cannot be automatically
mounted due to a license agreement.
AttachingFailed: Could not attach the disk image or no volumes on
mounted disk.
"""
if self.status.is_attached():
raise InvalidOperation()
if self.has_license_agreement():
raise LicenseAgreementNeedsAccepting()
success, result = _hdiutil_attach(self.path, keyphrase=self.keyphrase)
if not success:
raise AttachingFailed('Attaching failed for unknown reasons.')
mounted_volumes = [MountedVolume(mount_point=entity['mount-point'],
volume_kind=entity['volume-kind'])
for entity in result.get('system-entities', [])
if 'mount-point' in entity and 'volume-kind' in entity]
if len(mounted_volumes) == 0:
raise AttachingFailed('Attaching the disk image mounted no volumes.')
# The root dev entry is the smallest '/dev/disk...' entry when sorted
# lexicographically. (/dev/disk2 < /dev/disk3 < /dev/disk3s1)
# In the case of disk images containing APFS volumes, we need to detach this disk _after_
# detaching the main volumes. This is a bug in Apple's code -- for all other types of volumes,
# detaching the volume automatically detaches the entire disk image.
root_dev_entry = sorted(entity['dev-entry']
for entity in result.get('system-entities', [])
if 'dev-entry' in entity)[0]
self.status.record_attached(mounted_volumes, root_dev_entry)
return [volume.mount_point for volume in self.status.mount_points]
def detach(self, force=True):
"""Detaches a disk image.
Args:
force: ignore open files on mounted volumes. See `man 1 hdiutil`.
Raises:
InvalidOperation: The disk image was not attached on the system.
DetachingFailed: Detaching failed for unknown reasons.
"""
if not self.status.is_attached():
raise InvalidOperation()
# Detaching any mount point of an attached image automatically unmounts
# all associated volumes.
# ... unless one of these volumes is an APFS volume. In that case,
# it needs to be detached separately. Additionally, the root dev entry
# also needs to be detached explicitly.
# First detach all APFS volumes, otherwise detaching other volumes appears to
# succeeds but really fails with an error code (!)
for volume in self.status.mount_points:
if volume.volume_kind == 'apfs':
success = _hdiutil_detach(volume.mount_point, force=force)
if not success:
raise DetachingFailed()
# Finally, detach the root dev entry.
success = _hdiutil_detach(self.status.root_dev_entry, force=force)
if not success:
raise DetachingFailed()
self.status.record_detached()
def convert(self, path: str, disk_format: DiskFormat) -> str:
success, mount_point_array = _hdiutil_convert(self.path, path, disk_format.value)
if success:
return mount_point_array[0]
raise ConversionFailed()
@contextmanager
def attachedDiskImage(path: str, keyphrase=None):
"""Context manager to work with a disk image.
The context manager returns the list of mount points of the attached volumes.
There is always at least one mount point available, otherwise attaching fails.
The caller needs to catch exceptions (see documentation for the :class:`DiskImage`
class), or call the appropriate methods beforehand (:meth:`dmg_is_encrypted`, ...).
Example::
with dmg.attachedDiskImage('path/to/disk_image.dmg',
keyphrase='sample') as mount_points:
print(mount_points)
"""
dmg = DiskImage(path, keyphrase=keyphrase)
try:
yield dmg.attach()
finally:
if dmg.status.is_attached():
dmg.detach()
| 31.988987 | 106 | 0.658955 | 7,747 | 0.53343 | 791 | 0.054465 | 807 | 0.055567 | 0 | 0 | 7,447 | 0.512773 |
ad500e55895d6938ba4ad576c120964bb8775e2f | 786 | py | Python | example_app/sqlalchemy/models.py | aalamdev/py-angular-testapp | 34fbeae36f8890dc254fb181d2d4fe986ada6d00 | [
"MIT"
] | null | null | null | example_app/sqlalchemy/models.py | aalamdev/py-angular-testapp | 34fbeae36f8890dc254fb181d2d4fe986ada6d00 | [
"MIT"
] | null | null | null | example_app/sqlalchemy/models.py | aalamdev/py-angular-testapp | 34fbeae36f8890dc254fb181d2d4fe986ada6d00 | [
"MIT"
] | null | null | null | import sqlalchemy as sqa
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
db_name = "aalam_pyangtestapp"
class Owners(Base):
__tablename__ = "owners"
__table_args__ = {'schema': db_name}
id = sqa.Column(sqa.Integer, primary_key=True)
email = sqa.Column(sqa.VARCHAR(32), nullable=False, unique=True)
def __init__(self, email):
self.email = email
class Items(Base):
__tablename__ = "items"
__table_args__ = {'schema': db_name}
name = sqa.Column(sqa.VARCHAR(16), primary_key=True)
type_ = sqa.Column(sqa.VARCHAR(16))
owner = sqa.Column(sqa.Integer, sqa.ForeignKey(Owners.id))
def __init__(self, name, type_, owner):
self.name = name
self.owner = owner
self.type_ = type_
| 25.354839 | 68 | 0.680662 | 641 | 0.815522 | 0 | 0 | 0 | 0 | 0 | 0 | 51 | 0.064885 |
ad50231d4a1b87b0ce0cfc2a3007141905a769f4 | 1,195 | py | Python | UMSLHackRestAPI/api/serializeres.py | trujivan/climate-impact-changes | 609b8197b0ede1c1fdac3aa82b34e73e6f4526e3 | [
"MIT"
] | 1 | 2020-03-29T17:52:26.000Z | 2020-03-29T17:52:26.000Z | UMSLHackRestAPI/api/serializeres.py | trujivan/climate-impact-changes | 609b8197b0ede1c1fdac3aa82b34e73e6f4526e3 | [
"MIT"
] | 6 | 2021-03-19T00:01:21.000Z | 2021-09-22T18:37:17.000Z | UMSLHackRestAPI/api/serializeres.py | trujivan/climate-impact-changes | 609b8197b0ede1c1fdac3aa82b34e73e6f4526e3 | [
"MIT"
] | null | null | null | from rest_framework import serializers
from .utils import get_ml_predictions
from .models import MLRequest, Prediction
class PredictionSerializer(serializers.ModelSerializer):
class Meta:
model = Prediction
fields = ['year', 'pollution',]
class MLRequestSerializer(serializers.ModelSerializer):
predictions = PredictionSerializer(many=True, read_only=True)
factor = serializers.ChoiceField(choices=['NO2 AQI', 'SO2 AQI', 'CO AQI','O3 AQI'])
class Meta:
model = MLRequest
fields = ['start_year', 'end_year', 'state', 'factor', 'predictions']
def create(self, validated_data, *args, **kwargs):
predicted_data = get_ml_predictions(validated_data['state'], validated_data['factor'],validated_data['start_year'],
validated_data['end_year'])
#print(prediction)
#print("It works")
ml_request = MLRequest.objects.create(**validated_data)
year = int(validated_data['start_year'])
for prediction in predicted_data:
Prediction.objects.create(request=ml_request, year=year, pollution=prediction)
year += 1
return ml_request
| 37.34375 | 123 | 0.669456 | 1,070 | 0.895397 | 0 | 0 | 0 | 0 | 0 | 0 | 186 | 0.155649 |
ad51ca380d95e2a4ab5344077a584650b02823ba | 160 | py | Python | info/modules/passport/__init__.py | xnzgt/git_flask_news | 2511927efd2ecd05f2e4312a896cbdfaf69da790 | [
"MIT"
] | null | null | null | info/modules/passport/__init__.py | xnzgt/git_flask_news | 2511927efd2ecd05f2e4312a896cbdfaf69da790 | [
"MIT"
] | null | null | null | info/modules/passport/__init__.py | xnzgt/git_flask_news | 2511927efd2ecd05f2e4312a896cbdfaf69da790 | [
"MIT"
] | null | null | null | # 创建蓝图接收前端发送数据
from flask import Blueprint
# 设置url_prefix用于与其他蓝图进行区分
passport_blu = Blueprint("passport",__name__,url_prefix="/passport")
from .views import *
| 22.857143 | 68 | 0.80625 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 110 | 0.52381 |
ad53cf2904294aa05e0f2f946c5cc98c0ecb42f6 | 1,276 | py | Python | sequential_bake_main.py | Mateusz-Grzelinski/cycles-bake-workaround | 9bf68e4e646561c6b2de1303fe0f131dd95e4a9f | [
"MIT"
] | 1 | 2021-06-04T11:39:22.000Z | 2021-06-04T11:39:22.000Z | sequential_bake_main.py | Mateusz-Grzelinski/cycles-bake-workaround | 9bf68e4e646561c6b2de1303fe0f131dd95e4a9f | [
"MIT"
] | null | null | null | sequential_bake_main.py | Mateusz-Grzelinski/cycles-bake-workaround | 9bf68e4e646561c6b2de1303fe0f131dd95e4a9f | [
"MIT"
] | null | null | null | #!/usr/bin/python
import sys
import argparse
import os
import tempfile
def parse():
parser = argparse.ArgumentParser()
parser.add_argument("file",
help="Path to blend file. File should be previously prepared for baking")
return parser.parse_args()
def main():
""" Calls instances of blender with script that will bake. """
args = parse()
counter_file = tempfile.NamedTemporaryFile(mode='r')
blender_script = "./bpy_bake.py"
while True:
try:
os.system("blender " + args.file +
" --background --factory-startup --python " +
blender_script + " -- " +
counter_file.name + " ")
except OSError as e:
if e.errno == os.errno.ENOENT:
print("Is blender installed?", file=sys.stderr)
else:
print("Something went terribly wrong...", file=sys.stderr)
counter_file.seek(0)
index = int(counter_file.readline())
total = int(counter_file.readline())
if index == total:
break
counter_file.close()
print("SUCCES!! Check if bake is correct.")
print("Baked from file: ", args.file)
if __name__ == '__main__':
main()
| 25.019608 | 97 | 0.568182 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 355 | 0.278213 |
ad56a104d06bf367829a2691a45c3980f07d5ff2 | 664 | py | Python | serve.py | uwmisl/purpledrop-driver | 7744141c40801d367b6dd54e42eec1ca69320100 | [
"MIT"
] | null | null | null | serve.py | uwmisl/purpledrop-driver | 7744141c40801d367b6dd54e42eec1ca69320100 | [
"MIT"
] | null | null | null | serve.py | uwmisl/purpledrop-driver | 7744141c40801d367b6dd54e42eec1ca69320100 | [
"MIT"
] | null | null | null |
from gevent import monkey
monkey.patch_all()
import sys
import purpledrop.server as server
from purpledrop.purpledrop import list_purpledrop_devices, PurpleDropDevice, PurpleDropController
devices = list_purpledrop_devices()
if(len(devices) == 0):
print("No PurpleDrop USB device found")
sys.exit(1)
elif len(devices) > 1:
print("Multiple PurpleDrop devices found. Please ammend software to allow selection by serial number")
for d in devices:
print(f"{d.device}: Serial {d.serial_number}")
sys.exit(1)
dev = PurpleDropDevice(devices[0].device)
controller = PurpleDropController(dev)
server.run_server(controller, "localhost:5000")
| 31.619048 | 106 | 0.762048 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 182 | 0.274096 |
ad57972501ce548d43bea55cbadb56125e31eb1f | 1,720 | py | Python | freiner/storage/redis_cluster.py | djmattyg007/freiner | 4acff72c55c37495862ea642a70b443da1278894 | [
"MIT"
] | null | null | null | freiner/storage/redis_cluster.py | djmattyg007/freiner | 4acff72c55c37495862ea642a70b443da1278894 | [
"MIT"
] | null | null | null | freiner/storage/redis_cluster.py | djmattyg007/freiner | 4acff72c55c37495862ea642a70b443da1278894 | [
"MIT"
] | null | null | null | from typing import Any
from urllib.parse import urlparse
from rediscluster import RedisCluster
from .redis import RedisStorage
class RedisClusterStorage(RedisStorage):
"""
Rate limit storage with redis cluster as backend.
Depends on `redis-py-cluster` library.
"""
@classmethod
def from_uri(cls, uri: str, **options: Any) -> "RedisClusterStorage":
"""
:param uri: URI of the form `redis+cluster://[:password]@host:port,host:port`
:param options: All remaining keyword arguments are passed directly to the constructor
of :class:`rediscluster.RedisCluster`.
"""
parsed_uri = urlparse(uri)
cluster_hosts = []
for loc in parsed_uri.netloc.split(","):
host, port = loc.split(":")
cluster_hosts.append({"host": host, "port": int(port)})
options.setdefault("max_connections", 1000)
options["startup_nodes"] = cluster_hosts
client = RedisCluster(**options)
return cls(client)
def reset(self) -> None:
"""
Redis Clusters are sharded and deleting across shards
can't be done atomically. Because of this, this reset loops over all
keys that are prefixed with 'LIMITER' and calls delete on them, one at
a time.
.. warning::
This operation was not tested with extremely large data sets.
On a large production based system, care should be taken with its
usage as it could be slow on very large data sets.
"""
keys = self._client.keys("LIMITER*")
for key in keys:
self._client.delete(key.decode("utf-8"))
__all__ = [
"RedisClusterStorage",
]
| 30.175439 | 94 | 0.62907 | 1,545 | 0.898256 | 0 | 0 | 745 | 0.43314 | 0 | 0 | 954 | 0.554651 |
ad5b0dcb1fddf5cb54d8253c41cd8dbc19845262 | 2,986 | py | Python | test4/alien_dict_coderpad.py | MrCsabaToth/IK | 713f91c28af7b4a964ba854ede9fec73bf0c4682 | [
"Apache-2.0"
] | null | null | null | test4/alien_dict_coderpad.py | MrCsabaToth/IK | 713f91c28af7b4a964ba854ede9fec73bf0c4682 | [
"Apache-2.0"
] | null | null | null | test4/alien_dict_coderpad.py | MrCsabaToth/IK | 713f91c28af7b4a964ba854ede9fec73bf0c4682 | [
"Apache-2.0"
] | null | null | null | def alien_order(words):
# Underspecified input 0
if not words:
return []
# Underspecified input 1
if len(words) == 1:
return "".join(sorted(set(list(words[0]))))
nodes = []
adj_list = []
chars = set()
# 1. Take each word pair
for i, word1 in enumerate(words[:-1]):
chars.union(set([ch for ch in word1]))
len1 = len(word1)
for j, word2 in enumerate(words[i:]):
chars.union(set([ch for ch in word2]))
len2 = len(word2)
# 2. Find first character which differs
for k in range(min(len1, len2)):
ch1 = word1[k]
# 3.1. Optionally register node1 for ch1
if ch1 not in nodes:
nodes.append(ch1)
adj_list.append([])
node1 = nodes.index(ch1)
# 3.2. Optionally register node2 for ch2
ch2 = word2[k]
if ch2 not in nodes:
nodes.append(ch2)
adj_list.append([])
node2 = nodes.index(ch2)
if ch1 != ch2:
# Means a graph edge
# 3.3. Check if invalid (direct circle)
if node1 in adj_list[node2]:
return ""
# 3.4. Register edge
if node2 not in adj_list[node1]:
adj_list[node1].append(node2)
break
left_out = chars - set(nodes)
for ch in left_out:
nodes.append(ch)
adj_list.append([])
n = len(nodes)
# Underspecified input 2
if not adj_list or all(not l for l in adj_list):
return "".join(nodes)
print(nodes, adj_list)
order = [0] * n
# 4. Topological sort - need iterative
# 4.1 Find a good starting point for underspecified cases
for start, ch in enumerate(nodes):
if adj_list[start] and not order[start]:
visited = [False] * n
q = [(start, 1)]
while q:
v, level = q.pop()
order[v] -= level
for neighbor in adj_list[v]:
if not visited[neighbor]:
q.append((neighbor, level + 1))
else:
order[v] -= level
# 5. Non involved (underspecified) charecters are de priotirized
for i, o in enumerate(order):
if not o:
order[i] = -100000
# 5. Construct abc
zipped = zip(nodes, order)
ordered = sorted(zipped, key=lambda x: -x[1])
print(zipped, ordered)
abc = ""
for v in ordered:
abc += v[0]
return abc
import pytest
@pytest.mark.parametrize("words,expected", [
(["zy", "zx"], "yxz"),
(["wrt", "wrf", "er", "ett", "rftt"], "wertf"),
(["ac", "ab", "b"], "acb"),
])
def test_alien_dict(words, expected):
assert(alien_order(words) == expected)
pytest.main()
| 27.145455 | 68 | 0.490288 | 0 | 0 | 0 | 0 | 239 | 0.08004 | 0 | 0 | 556 | 0.186202 |
ad5c12d313589ae7fb0ab9514bf6274dd4fef970 | 64 | py | Python | KAMA1ShortOnly/custom_indicators/__init__.py | ysdede/jesse_strategies | ade9f4ba42cec11207c766d267b9d8feb8bce648 | [
"CC0-1.0"
] | 38 | 2021-09-18T15:33:28.000Z | 2022-02-21T17:29:08.000Z | ott2butKAMA1/custom_indicators/__init__.py | ysdede/jesse_strategies | ade9f4ba42cec11207c766d267b9d8feb8bce648 | [
"CC0-1.0"
] | 4 | 2022-01-02T14:46:12.000Z | 2022-02-16T18:39:41.000Z | KAMA1ShortOnly/custom_indicators/__init__.py | ysdede/jesse_strategies | ade9f4ba42cec11207c766d267b9d8feb8bce648 | [
"CC0-1.0"
] | 11 | 2021-10-19T06:21:43.000Z | 2022-02-21T17:29:10.000Z | from .ott import ott
from .var import var
from .rma import rma
| 12.8 | 20 | 0.75 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
ad5c22c2a30ebb3dd262b8552db1d5d150acb5ab | 500 | py | Python | tests/test_invoke.py | avara1986/ardy | 1942413f12e117b991278cada69f478474b9b94b | [
"Apache-2.0"
] | 3 | 2017-07-07T06:39:36.000Z | 2017-11-29T23:09:37.000Z | tests/test_invoke.py | avara1986/ardy | 1942413f12e117b991278cada69f478474b9b94b | [
"Apache-2.0"
] | 3 | 2017-07-06T20:23:30.000Z | 2018-11-05T21:15:48.000Z | tests/test_invoke.py | avara1986/ardy | 1942413f12e117b991278cada69f478474b9b94b | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# python imports
from __future__ import unicode_literals, print_function, absolute_import
import os
import unittest
from ardy.core.invoke import Invoke
TESTS_PATH = os.path.dirname(os.path.abspath(__file__))
class InvokeTest(unittest.TestCase):
EXAMPLE_PROJECT = "myexamplelambdaproject"
def setUp(self):
pass
def test_init(self):
invoke = Invoke(path=TESTS_PATH)
invoke.run("LambdaExample1")
if __name__ == '__main__':
unittest.main()
| 19.230769 | 72 | 0.726 | 223 | 0.446 | 0 | 0 | 0 | 0 | 0 | 0 | 80 | 0.16 |
ad5c5f31d45895838d2f7af9abde703612dccc2f | 4,415 | py | Python | pysparkbasics/L02_DataFrame/S01_DataStructures/01_RowClassExp.py | pengfei99/PySparkCommonFunc | 8238949f52a8e0d2c30c42d9f4002941f43db466 | [
"MIT"
] | null | null | null | pysparkbasics/L02_DataFrame/S01_DataStructures/01_RowClassExp.py | pengfei99/PySparkCommonFunc | 8238949f52a8e0d2c30c42d9f4002941f43db466 | [
"MIT"
] | null | null | null | pysparkbasics/L02_DataFrame/S01_DataStructures/01_RowClassExp.py | pengfei99/PySparkCommonFunc | 8238949f52a8e0d2c30c42d9f4002941f43db466 | [
"MIT"
] | null | null | null | from pyspark import Row
from pyspark.sql import SparkSession
"""
Row class introduction:
Row class extends the tuple hence it takes variable number of arguments, Row() is used to create the row object.
Once the row object created, we can retrieve the data from Row using index similar to tuple.
Key Points of Row Class:
- Earlier to Spark 3.0, when used Row class with named arguments, the fields are sorted by name.
- Since 3.0, Rows created from named arguments are not sorted alphabetically instead they will be ordered in
the position entered.
- To enable sorting by names, set the environment variable PYSPARK_ROW_FIELD_SORTING_ENABLED to true.
- Row class provides a way to create a struct-type column as well.
Note that, here we called the elements in a row "field", not column. Because column only make sense in a dataframe.
"""
"""Exp1 Row Object creation
Row object can have primitive field, array field, map field and struct field
"""
def exp1():
# We can create row object without giving a field name, and accessing field by using index
print("Exp1 output simple row object without name: ")
row1 = Row("Alice", 18)
print("name:{},age:{}".format(row1[0], str(row1[1])))
# We can also specify field name when create a row object, then we can access it by using its name
print("Exp1 output row object with field name: ")
row2 = Row(name="Bob", age=38)
print("name:{},age:{}".format(row2.name, str(row2.age)))
# Row object can have primitive field, array field, map field and struct field
# To access struct field, use ".", to access array field use [index], to access map field use .get(key)
row3 = Row(name=Row(fname="Alice", lname="Liu"), score=[10, 20, 40], properties={"hair": "black", "eye": "bleu"})
print("first_name:{}, last_name:{}, 1st_score:{}, eye:{}".format(row3.name.fname, row3.name.lname, row3.score[0],
row3.properties.get("eye")))
""" Exp2 : Create custom class from Row
We can create a custom class by using Row(*fieldName)
"""
def exp2():
#
Student = Row("name", "age")
s1 = Student("alice", 18)
s2 = Student("Bob", 38)
print("Student1: name={},age={}".format(s1.name, str(s1.age)))
print("Student2: name={},age={}".format(s2.name, str(s2.age)))
""" Exp3: Create RDD by using row
We can create an RDD by using a list of Rows. rdd.collect() will return a list of row.
"""
def exp3(spark):
# data is a list of rows
data = [Row(name="James,,Smith", lang=["Java", "Scala", "C++"], state="CA"),
Row(name="Michael,Rose,", lang=["Spark", "Java", "C++"], state="NJ"),
Row(name="Robert,,Williams", lang=["CSharp", "VB"], state="NV")]
# parallelize turn the list to rdd
rdd = spark.sparkContext.parallelize(data)
print("Exp3 rdd has type:{}".format(str(type(rdd))))
# collect turn the rdd back to list
rowList = rdd.collect()
print("Exp3 row has type:{}".format(str(type(rowList))))
print("Exp3 row has value:")
for row in rowList:
print("name: {}, lang: {}, state: {}".format(row.name, str(row.lang), row.state))
""" Exp4 : Create a dataframe by using row
"""
def exp4(spark):
# we use custom class to create a list of Student(row)
# the advantage of custom class is that we don't need to repeat filed name in each row.
Student = Row("name", "score", "properties")
data = [Student(Row(fname="James", lname="Smith"), [10, 20, 30], {'hair': 'black', 'eye': 'brown'}),
Student(Row(fname="Michael", lname="Rose"), [20, 30, 40], {'hair': 'brown', 'eye': 'black'}),
Student(Row(fname="Robert", lname="Williams"), [30, 20, 50], {'hair': 'black', 'eye': 'blue'})]
df = spark.createDataFrame(data)
df.printSchema()
df.show(truncate=False)
# we can access these field
df.select(df.name.fname.alias("first_name"), df.name.lname.alias("last_name"), df.score.getItem(0).alias("score_0"),
df.properties.getItem("hair").alias("hair")).show()
def main():
spark = SparkSession.builder \
.master("local[2]") \
.appName("Row class example") \
.config("spark.executor.memory", "4g") \
.getOrCreate()
# run exp1
# exp1()
# run exp2
# exp2()
# run exp3
# exp3(spark)
# run exp4
exp4(spark)
if __name__ == "__main__":
main()
| 36.791667 | 120 | 0.63624 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,617 | 0.592752 |
ad5daf01c044c5fde437a87219ebf1f9aed1ff36 | 551 | py | Python | icon_prometheus_exporter/config.py | ghalwash/icon-prometheus-exporter | 57201f8ad2c0f30aab5b24c99a94e55f68cffb2f | [
"MIT"
] | null | null | null | icon_prometheus_exporter/config.py | ghalwash/icon-prometheus-exporter | 57201f8ad2c0f30aab5b24c99a94e55f68cffb2f | [
"MIT"
] | 2 | 2020-07-06T17:34:09.000Z | 2020-07-06T17:34:10.000Z | icon_prometheus_exporter/config.py | ghalwash/icon-prometheus-exporter | 57201f8ad2c0f30aab5b24c99a94e55f68cffb2f | [
"MIT"
] | 2 | 2020-06-28T19:53:35.000Z | 2020-09-17T21:25:43.000Z |
discovery_node_rpc_url='https://ctz.solidwallet.io/api/v3'
request_data = {
"jsonrpc": "2.0",
"id": 1234,
"method": "icx_call",
"params": {
"to": "cx0000000000000000000000000000000000000000",
"dataType": "call",
"data": {
"method": "getPReps",
"params": {
"startRanking": "0x1",
"endRanking": "0xaaa"
}
}
}
}
| 27.55 | 67 | 0.372051 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 213 | 0.38657 |
ad5db874c90f5842c8a6e82a7b558b48f5b79bd1 | 4,187 | py | Python | web/models.py | rkhozinov/dicease-area | 9ca2159705c778a73f45ca83e83f881d47c355c4 | [
"MIT"
] | null | null | null | web/models.py | rkhozinov/dicease-area | 9ca2159705c778a73f45ca83e83f881d47c355c4 | [
"MIT"
] | null | null | null | web/models.py | rkhozinov/dicease-area | 9ca2159705c778a73f45ca83e83f881d47c355c4 | [
"MIT"
] | null | null | null | # models.py
from sys import path
from os.path import dirname as dir
path.append(dir(path[0]))
from app import db
class District(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(120), unique=True)
coordinates = db.Column(db.String(120), nullable=True)
def __init__(self, name, coordinates=None):
self.name = name
self.coordinates = coordinates
def __repr__(self):
return self.name
class Hospital(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(120), unique=True)
address = db.Column(db.String(120), nullable=True)
phone = db.Column(db.String(120), nullable=True)
coordinates = db.Column(db.String(120), nullable=True)
district_id = db.Column(db.Integer, db.ForeignKey('district.id'))
district = db.relationship('District',
backref=db.backref('hospitals', lazy='dynamic', uselist=True))
def __init__(self, name, district, address=None, phone=None, coordinates=None):
self.name = name
self.district = district
self.address = address
self.phone = phone
self.coordinates = coordinates
def __repr__(self):
return self.name
class Disease(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.Text, unique=True)
def __init__(self, name, description=None):
self.name = name
self.description = description
class DiseasePopulation(db.Model):
id = db.Column(db.Integer, primary_key=True)
year = db.Column(db.Integer, nullable=True)
children = db.Column(db.Integer, nullable=True)
children_observed = db.Column(db.Integer, nullable=True)
adults = db.Column(db.Integer, nullable=True)
adults_observed = db.Column(db.Integer, nullable=True)
hospital_id = db.Column(db.Integer, db.ForeignKey('hospital.id'))
hospital = db.relationship('Hospital',
backref=db.backref('population', lazy='dynamic',
uselist=True))
disease_id = db.Column(db.Integer, db.ForeignKey('disease.id'))
disease = db.relationship('Disease',
backref=db.backref('population', lazy='dynamic', uselist=True))
def __init__(self, disease, hospital, year, adults=0, adults_observed=0,
children=0, children_observed=0):
self.disease = disease
self.hospital = hospital
self.year = int(year) if year else 0
self.children = int(children)
self.children_observed = int(children_observed)
self.adults = int(adults)
self.adults_observed = int(adults_observed)
self.all = self.children + self.adults
self.all_observed = self.children_observed + self.adults_observed
def __repr__(self):
return '{0}{1}'.format(self.name, self.year)
class Population(db.Model):
id = db.Column(db.Integer, primary_key=True)
year = db.Column(db.Integer)
all = db.Column(db.Integer)
men = db.Column(db.Integer)
women = db.Column(db.Integer)
children = db.Column(db.Integer)
adults = db.Column(db.Integer)
employable = db.Column(db.Integer)
employable_men = db.Column(db.Integer)
employable_women = db.Column(db.Integer)
district_id = db.Column(db.Integer, db.ForeignKey('district.id'))
district = db.relationship('District',
backref=db.backref('population', lazy='dynamic', uselist=True))
def __init__(self, year, district,
men=0, women=0, children=0, employable_men=0, employable_women=0, district_id=0):
self.district = district
self.year = int(year)
self.men = int(men)
self.women = int(women)
self.children = int(children)
self.employable_men = int(employable_men)
self.employable_women = int(employable_women)
self.all = self.men + self.women
self.adults = self.all - self.children
self.employable = self.employable_men + self.employable_women
def __repr__(self):
return '{}:{}'.format(self.year, self.all)
| 34.04065 | 98 | 0.642942 | 4,056 | 0.968713 | 0 | 0 | 0 | 0 | 0 | 0 | 199 | 0.047528 |
ad5ee300c9e8ae6ec4833340d2ed6551f1709676 | 12,755 | py | Python | PJ-X-ACT/train.py | Seth-Park/MultimodalExplanations | b58c09ac38a5e5d08541a94599410e59ec5cdec6 | [
"BSD-2-Clause"
] | 39 | 2018-07-18T16:31:46.000Z | 2022-01-25T16:38:51.000Z | PJ-X-ACT/train.py | Seth-Park/MultimodalExplanations | b58c09ac38a5e5d08541a94599410e59ec5cdec6 | [
"BSD-2-Clause"
] | 13 | 2018-10-15T19:10:34.000Z | 2021-11-03T02:21:53.000Z | PJ-X-ACT/train.py | Seth-Park/MultimodalExplanations | b58c09ac38a5e5d08541a94599410e59ec5cdec6 | [
"BSD-2-Clause"
] | 6 | 2018-11-09T15:41:31.000Z | 2021-05-26T11:42:30.000Z | import matplotlib
matplotlib.use('Agg')
import os
import sys
import numpy as np
import json
import matplotlib.pyplot as plt
import caffe
from caffe import layers as L
from caffe import params as P
from activity_data_provider_layer import ActivityDataProvider
from build_val_model import act_proto, exp_proto
import config
def learning_params(param_list):
param_dicts = []
for pl in param_list:
param_dict = {}
param_dict['lr_mult'] = pl[0]
if len(pl) > 1:
param_dict['decay_mult'] = pl[0]
param_dicts.append(param_dict)
return param_dicts
fixed_weights = learning_params([[0, 0], [0, 0]])
fixed_weights_lstm = learning_params([[0, 0], [0, 0], [0, 0]])
def pj_x(mode, batchsize, exp_T, exp_vocab_size):
n = caffe.NetSpec()
mode_str = json.dumps({'mode':mode, 'batchsize':batchsize})
n.img_feature, n.label, n.exp, n.exp_out, n.exp_cont_1, n.exp_cont_2 = \
L.Python(module='activity_data_provider_layer',
layer='ActivityDataProviderLayer',
param_str=mode_str, ntop=6)
# Attention
n.att_conv1 = L.Convolution(n.img_feature, kernel_size=1, stride=1, num_output=512, pad=0, weight_filler=dict(type='xavier'))
n.att_conv1_relu = L.ReLU(n.att_conv1)
n.att_conv2 = L.Convolution(n.att_conv1_relu, kernel_size=1, stride=1, num_output=1, pad=0, weight_filler=dict(type='xavier'))
n.att_reshaped = L.Reshape(n.att_conv2,reshape_param=dict(shape=dict(dim=[-1,1,14*14])))
n.att_softmax = L.Softmax(n.att_reshaped, axis=2)
n.att_map = L.Reshape(n.att_softmax,reshape_param=dict(shape=dict(dim=[-1,1,14,14])))
dummy = L.DummyData(shape=dict(dim=[batchsize, 1]), data_filler=dict(type='constant', value=1), ntop=1)
n.att_feature = L.SoftAttention(n.img_feature, n.att_map, dummy)
n.att_feature_resh = L.Reshape(n.att_feature, reshape_param=dict(shape=dict(dim=[-1,2048])))
# Prediction
n.prediction = L.InnerProduct(n.att_feature_resh, num_output=config.NUM_OUTPUT_UNITS, weight_filler=dict(type='xavier'))
n.loss = L.SoftmaxWithLoss(n.prediction, n.label)
n.accuracy = L.Accuracy(n.prediction, n.label)
# Embed Activity GT answer during training
n.exp_emb_ans = L.Embed(n.label, input_dim=config.NUM_OUTPUT_UNITS, num_output=300, \
weight_filler=dict(type='uniform', min=-0.08, max=0.08))
n.exp_emb_ans_tanh = L.TanH(n.exp_emb_ans)
n.exp_emb_ans2 = L.InnerProduct(n.exp_emb_ans_tanh, num_output=2048, weight_filler=dict(type='xavier'))
# merge activity answer and visual feature
n.exp_emb_resh = L.Reshape(n.exp_emb_ans2, reshape_param=dict(shape=dict(dim=[-1,2048,1,1])))
n.exp_emb_tiled_1 = L.Tile(n.exp_emb_resh, axis=2, tiles=14)
n.exp_emb_tiled = L.Tile(n.exp_emb_tiled_1, axis=3, tiles=14)
n.img_embed = L.Convolution(n.img_feature, kernel_size=1, stride=1, num_output=2048, pad=0, weight_filler=dict(type='xavier'))
n.exp_eltwise = L.Eltwise(n.img_embed, n.exp_emb_tiled, eltwise_param={'operation': P.Eltwise.PROD})
n.exp_eltwise_sqrt = L.SignedSqrt(n.exp_eltwise)
n.exp_eltwise_l2 = L.L2Normalize(n.exp_eltwise_sqrt)
n.exp_eltwise_drop = L.Dropout(n.exp_eltwise_l2, dropout_param={'dropout_ratio': 0.3})
# Attention for Explanation
n.exp_att_conv1 = L.Convolution(n.exp_eltwise_drop, kernel_size=1,
stride=1, num_output=512, pad=0, weight_filler=dict(type='xavier'))
n.exp_att_conv1_relu = L.ReLU(n.exp_att_conv1)
n.exp_att_conv2 = L.Convolution(n.exp_att_conv1_relu, kernel_size=1,
stride=1, num_output=1, pad=0, weight_filler=dict(type='xavier'))
n.exp_att_reshaped = L.Reshape(n.exp_att_conv2,reshape_param=dict(shape=dict(dim=[-1,1,14*14])))
n.exp_att_softmax = L.Softmax(n.exp_att_reshaped, axis=2)
n.exp_att_map = L.Reshape(n.exp_att_softmax,reshape_param=dict(shape=dict(dim=[-1,1,14,14])))
exp_dummy = L.DummyData(shape=dict(dim=[batchsize, 1]), data_filler=dict(type='constant', value=1), ntop=1)
n.exp_att_feature_prev = L.SoftAttention(n.img_feature, n.exp_att_map, exp_dummy)
n.exp_att_feature_resh = L.Reshape(n.exp_att_feature_prev, reshape_param=dict(shape=dict(dim=[-1, 2048])))
n.exp_att_feature_embed = L.InnerProduct(n.exp_att_feature_resh, num_output=2048, weight_filler=dict(type='xavier'))
n.exp_att_feature = L.Eltwise(n.exp_emb_ans2, n.exp_att_feature_embed, eltwise_param={'operation': P.Eltwise.PROD})
# Embed explanation
n.exp_embed_ba = L.Embed(n.exp, input_dim=exp_vocab_size, num_output=300, \
weight_filler=dict(type='uniform', min=-0.08, max=0.08))
n.exp_embed = L.TanH(n.exp_embed_ba)
# LSTM1 for Explanation
n.exp_lstm1 = L.LSTM(\
n.exp_embed, n.exp_cont_1,\
recurrent_param=dict(\
num_output=2048,\
weight_filler=dict(type='uniform',min=-0.08,max=0.08),\
bias_filler=dict(type='constant',value=0)))
n.exp_lstm1_dropped = L.Dropout(n.exp_lstm1,dropout_param={'dropout_ratio':0.3})
# merge with LSTM1 for explanation
n.exp_att_resh = L.Reshape(n.exp_att_feature, reshape_param=dict(shape=dict(dim=[1, -1, 2048])))
n.exp_att_tiled = L.Tile(n.exp_att_resh, axis=0, tiles=exp_T)
n.exp_eltwise_all = L.Eltwise(n.exp_lstm1_dropped, n.exp_att_tiled, eltwise_param={'operation': P.Eltwise.PROD})
n.exp_eltwise_all_l2 = L.L2Normalize(n.exp_eltwise_all)
n.exp_eltwise_all_drop = L.Dropout(n.exp_eltwise_all_l2, dropout_param={'dropout_ratio': 0.3})
# LSTM2 for Explanation
n.exp_lstm2 = L.LSTM(\
n.exp_eltwise_all_drop, n.exp_cont_2,\
recurrent_param=dict(\
num_output=1024,\
weight_filler=dict(type='uniform',min=-0.08,max=0.08),\
bias_filler=dict(type='constant',value=0)))
n.exp_lstm2_dropped = L.Dropout(n.exp_lstm2,dropout_param={'dropout_ratio':0.3})
n.exp_prediction = L.InnerProduct(n.exp_lstm2_dropped, num_output=exp_vocab_size, weight_filler=dict(type='xavier'), axis=2)
n.exp_loss = L.SoftmaxWithLoss(n.exp_prediction, n.exp_out,
loss_param=dict(ignore_label=-1),
softmax_param=dict(axis=2))
n.exp_accuracy = L.Accuracy(n.exp_prediction, n.exp_out, axis=2, ignore_label=-1)
return n.to_proto()
def make_answer_vocab(adic, vocab_size):
"""
Returns a dictionary that maps words to indices.
"""
adict = {}
id = 0
for qid in adic.keys():
answer = adic[qid]
if answer in adict:
continue
else:
adict[answer] = id
id +=1
return adict
def make_exp_vocab(exp_dic):
"""
Returns a dictionary that maps words to indices.
"""
exp_vdict = {'<EOS>': 0}
exp_vdict[''] = 1
exp_id = 2
for qid in exp_dic.keys():
exp_strings = exp_dic[qid]
for exp_str in exp_strings:
exp_list = ActivityDataProvider.seq_to_list(exp_str)
for w in exp_list:
if w not in exp_vdict:
exp_vdict[w] = exp_id
exp_id += 1
return exp_vdict
def make_vocab_files():
"""
Produce the answer and explanation vocabulary files.
"""
print('making answer vocab...', config.ANSWER_VOCAB_SPACE)
_, adic, _ = ActivityDataProvider.load_data(config.ANSWER_VOCAB_SPACE)
answer_vocab = make_answer_vocab(adic, config.NUM_OUTPUT_UNITS)
print('making explanation vocab...', config.EXP_VOCAB_SPACE)
_, _, expdic = ActivityDataProvider.load_data(config.EXP_VOCAB_SPACE)
explanation_vocab = make_exp_vocab(expdic)
return answer_vocab, explanation_vocab
def reverse(dict):
rev_dict = {}
for k, v in dict.items():
rev_dict[v] = k
return rev_dict
def to_str(type, idxs, cont, r_adict, r_exp_vdict):
if type == 'a':
return r_adict[idxs]
elif type == 'exp':
words = []
for idx in idxs:
if idx == 0:
break
words.append(r_exp_vdict[idx])
return ' '.join(words)
def batch_to_str(type, batch_idx, batch_cont, r_adict, r_exp_vdict):
converted = []
for idxs, cont in zip(batch_idx, batch_cont):
converted.append(to_str(type, idxs, cont, r_adict, r_exp_vdict))
return converted
def main():
if not os.path.exists('./model'):
os.makedirs('./model')
answer_vocab, explanation_vocab = {}, {}
if os.path.exists('./model/adict.json') and os.path.exists('./model/exp_vdict.json'):
print('restoring vocab')
with open('./model/adict.json','r') as f:
answer_vocab = json.load(f)
with open('./model/exp_vdict.json','r') as f:
exp_vocab = json.load(f)
else:
answer_vocab, exp_vocab = make_vocab_files()
with open('./model/adict.json','w') as f:
json.dump(answer_vocab, f)
with open('./model/exp_vdict.json','w') as f:
json.dump(exp_vocab, f)
r_adict = reverse(answer_vocab)
r_exp_vdict = reverse(exp_vocab)
print('answer vocab size:', len(answer_vocab))
print('exp vocab size:', len(exp_vocab))
with open('./model/proto_train.prototxt', 'w') as f:
f.write(str(pj_x(config.TRAIN_DATA_SPLITS, config.BATCH_SIZE, \
config.MAX_WORDS_IN_EXP, len(exp_vocab))))
with open('./model/act_proto_test_gt.prototxt', 'w') as f:
f.write(str(act_proto('val', config.VAL_BATCH_SIZE, len(exp_vocab), use_gt=True)))
with open('./model/act_proto_test_pred.prototxt', 'w') as f:
f.write(str(act_proto('val', config.VAL_BATCH_SIZE, len(exp_vocab), use_gt=False)))
with open('./model/exp_proto_test.prototxt', 'w') as f:
f.write(str(exp_proto('val', config.VAL_BATCH_SIZE, 1, len(exp_vocab))))
caffe.set_device(config.GPU_ID)
caffe.set_mode_gpu()
solver = caffe.get_solver('./pj_x_solver.prototxt')
train_loss = np.zeros(config.MAX_ITERATIONS)
train_acc = np.zeros(config.MAX_ITERATIONS)
train_loss_exp = np.zeros(config.MAX_ITERATIONS)
train_acc_exp = np.zeros(config.MAX_ITERATIONS)
results = []
for it in range(config.MAX_ITERATIONS):
solver.step(1)
# store the train loss
train_loss[it] = solver.net.blobs['loss'].data
train_acc[it] = solver.net.blobs['accuracy'].data
train_loss_exp[it] = solver.net.blobs['exp_loss'].data
train_acc_exp[it] = solver.net.blobs['exp_accuracy'].data
if it != 0 and it % config.PRINT_INTERVAL == 0:
print('Iteration:', it)
c_mean_loss = train_loss[it-config.PRINT_INTERVAL:it].mean()
c_mean_acc = train_acc[it-config.PRINT_INTERVAL:it].mean()
c_mean_loss_exp = train_loss_exp[it-config.PRINT_INTERVAL:it].mean()
c_mean_acc_exp = train_acc_exp[it-config.PRINT_INTERVAL:it].mean()
print('Train loss for classification:', c_mean_loss)
print('Train accuracy for classification:', c_mean_acc)
print('Train loss for exp:', c_mean_loss_exp)
print('Train accuracy for exp:', c_mean_acc_exp)
predicted_ans = solver.net.blobs['prediction'].data
predicted_ans = predicted_ans.argmax(axis=1)
answers = solver.net.blobs['label'].data
generated_exp = solver.net.blobs['exp_prediction'].data
generated_exp = generated_exp.argmax(axis=2).transpose()
target_exp = solver.net.blobs['exp_out'].data.transpose()
exp_out_cont = solver.net.blobs['exp_cont_2'].data.transpose()
predict_str = batch_to_str('a', predicted_ans, np.ones_like(predicted_ans),
r_adict, r_exp_vdict)
answers_str = batch_to_str('a', answers, np.ones_like(answers),
r_adict, r_exp_vdict)
generated_str = batch_to_str('exp', generated_exp, exp_out_cont,
r_adict, r_exp_vdict)
target_str = batch_to_str('exp', target_exp, exp_out_cont,
r_adict, r_exp_vdict)
count = 0
for pred, ans, exp, target in zip(predict_str, answers_str, generated_str, target_str):
if count == 10:
break
print('Pred:', pred)
print('A:', ans)
print('Because...')
print('\tgenerated:', exp)
print('\ttarget:', target)
count += 1
if __name__ == '__main__':
main()
| 42.235099 | 130 | 0.64414 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,565 | 0.122697 |
ad6107c5bfaee0726903a00a606341356cab3655 | 423 | py | Python | disBatch.py | flatironinstitute/disBatch | 67cf2090617e6fc2f2fb91fdf54c28bcfacaf59c | [
"Apache-2.0"
] | 21 | 2017-11-08T15:20:05.000Z | 2022-03-25T01:06:07.000Z | disBatch.py | flatironinstitute/disBatch | 67cf2090617e6fc2f2fb91fdf54c28bcfacaf59c | [
"Apache-2.0"
] | 11 | 2017-11-27T15:25:06.000Z | 2021-12-16T20:59:05.000Z | disBatch.py | flatironinstitute/disBatch | 67cf2090617e6fc2f2fb91fdf54c28bcfacaf59c | [
"Apache-2.0"
] | 6 | 2019-01-31T22:23:08.000Z | 2021-11-06T05:03:15.000Z | #!/usr/bin/env python3
import os, sys
dr = os.getenv('DISBATCH_ROOT')
if dr and dr not in sys.path:
sys.path.append(dr)
try:
import disbatch
except:
print(f'disBatch environment is incomplete. Check:\n\tDISBATCH_ROOT {dr!r}.', file=sys.stderr)
sys.exit(1)
dbExec = os.path.join(os.path.dirname(disbatch.__file__), 'disBatch.py')
os.execv(sys.executable, [sys.executable, dbExec] + sys.argv[1:])
| 24.882353 | 98 | 0.690307 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 120 | 0.283688 |
ad611aec8e39567a4f46f53f19066964bc6e7636 | 425 | py | Python | cloudflare_exporter/handlers.py | cpaillet/cloudflare-exporter | 194a0ce0f316aadc2802fbf180d06f5aab7849be | [
"Apache-2.0"
] | null | null | null | cloudflare_exporter/handlers.py | cpaillet/cloudflare-exporter | 194a0ce0f316aadc2802fbf180d06f5aab7849be | [
"Apache-2.0"
] | 7 | 2019-11-28T11:43:56.000Z | 2020-06-09T08:21:19.000Z | cloudflare_exporter/handlers.py | cpaillet/cloudflare-exporter | 194a0ce0f316aadc2802fbf180d06f5aab7849be | [
"Apache-2.0"
] | 3 | 2019-11-28T08:36:23.000Z | 2022-02-21T11:34:41.000Z | from aiohttp import web
from prometheus_client import generate_latest
from prometheus_client.core import REGISTRY
def metric_to_text():
return generate_latest(REGISTRY).decode('utf-8')
async def handle_metrics(_request):
return web.Response(text=metric_to_text())
async def handle_health(_request):
health_text = 'ok'
health_status = 200
return web.Response(status=health_status, text=health_text)
| 22.368421 | 63 | 0.778824 | 0 | 0 | 0 | 0 | 0 | 0 | 227 | 0.534118 | 11 | 0.025882 |
ad61efd89b487352162b06da16ad5ffe5da41461 | 20,737 | py | Python | Step4/04_reversing_bis.py | Aterwyn/SSTIC2019 | d7fcddd5b223663910ec35aa1d419f0bc636e701 | [
"MIT"
] | null | null | null | Step4/04_reversing_bis.py | Aterwyn/SSTIC2019 | d7fcddd5b223663910ec35aa1d419f0bc636e701 | [
"MIT"
] | null | null | null | Step4/04_reversing_bis.py | Aterwyn/SSTIC2019 | d7fcddd5b223663910ec35aa1d419f0bc636e701 | [
"MIT"
] | null | null | null | from SM4 import SM4
input_data = "a1a2a3a4a5a6a7a8a9aaabacadaeafa0b1b2b3b4b5b6b7b8b9babbbcbdbebfb0"
input_data = "acadaa8b5b55306fb3c6dfc3b2d1c80770084644225febd71a9189aa26ec740e"
#input_data = "0000000000000000000000000000000000000000000000000000000000000000"
global input_list
input_list = bytearray.fromhex(input_data)
global data
data = [0]*16
#plain data, written in little-endian
#const0 = bytearray.fromhex("08251587e988e8de")[::-1] + bytearray.fromhex("5fa89078ee10390f")[::-1]
#const1 = bytearray.fromhex("d73f7a649d78f7f4")[::-1] + bytearray.fromhex("f556dc27813a05a1")[::-1]
const0 = bytearray.fromhex("6766722e612e7270")[::-1] + bytearray.fromhex("2e76662e666e632e")[::-1]
const1 = bytearray.fromhex("6640727976706e73")[::-1] + bytearray.fromhex("7465622e70766766")[::-1]
const2 = input_list[:0x10]
const3 = input_list[0x10:]
global sm4_data
sm4_data = const0 + const1 + const2 + const3 + bytearray.fromhex("00000000")
#0 encrypted
def print_sm4_data():
global sm4_data
print("")
for i in range(4):
print("0x" + sm4_data[i*0x10:i*0x10+8][::-1].hex() + " 0x" + sm4_data[i*0x10+8:(i+1)*0x10][::-1].hex())
def print_data():
global data
print("")
for i in range(0,16,4):
print("%08x %08x %08x %08x" % (data[i], data[i+1], data[i+2], data[i+3]))
#0x100000: 08251587e988e8de 5fa89078ee10390f
#0x100010: d73f7a649d78f7f4 f556dc27813a05a1
#0x100020: a8a7a6a5a4a3a2a1 a0afaeadacabaaa9
#0x100030: b8b7b6b5b4b3b2b1 b0bfbebdbcbbbab9
print_sm4_data()
f = open("decrypted_file",'rb')
global read
read = f.read()
f.close()
payload_offset = 0x4dbd8
read = read[payload_offset: payload_offset+0x101010]
def get_value_from_adr(adr):
global sm4_data, read
if adr >= 0x100000:
offset = adr-0x100000
return int(sm4_data[offset:offset+4][::-1].hex(),16)
else:
sm4 = SM4()
base_adr = adr&0xFFFFF0
#debug
#mod_adr = base_adr+0x1000
mod_adr = base_adr
base_offset = adr&0xF
data = read[mod_adr:mod_adr+0x10]
data2 = read[mod_adr+0x10: mod_adr+0x20]
dec_data1 = sm4.decrypt(base_adr, data)
sm4 = SM4()
dec_data2 = sm4.decrypt(base_adr+0x10, data2)
dec_data = dec_data1 + dec_data2
return int(dec_data[base_offset:base_offset+4][::-1].hex(), 16)
def insert_value_at_adr(val_int, adr):
global sm4_data
assert 0x100000<=adr and adr <=0x100040
#0159: insert 0x22926dbf (data[0x00]) at adr 0x100020 (adr pointed by data[0x0d]) <<<<
val_hex = (val_int).to_bytes(4, byteorder="little")
off = adr-0x100000
#print("%08x" % val_int)
#print(val_hex.hex())
#print("before: " + sm4_data.hex())
sm4_data = sm4_data[:off] + val_hex + sm4_data[off+4:]
#print(sm4_data.hex())
#print("offset: " + str(off))
#print("after : " + sm4_data.hex())
def loop():
data[0x04] = 0x0010
data[0x04] = data[0x04] << 0x0010
data[0x04] += 0x0020 #data[0x04] = 0x100020 #input
data[0x0d] = 0x0010
data[0x0d] = data[0x0d] << 0x0010
data[0x0d] += 0x0020 #data[0x0d] = 0x100020 #input
data[0x0c] = 0x0004
#Z1Z2Z3Z4Z5Z6Z7Z8Z9ZAZBZCZDZEZFZ0
#Y1Y2Y3Y4Y5Y6Y7Y8Y9YAYBYCYDYEYFY0
while (data[0xc] != 0): #counter C on 4
data[0x00] = get_value_from_adr(data[0x04])
data[0x00] = data[0x00] << 0x0010 & 0xFFFFFFFF
data[0x00] = data[0x00] >> 0x0010 #data[0x00] = 0 0 Z2 Z1
data[0x00] = (data[0x00]>>8) + ((data[0x00] & 0xff)<<8) #data[0x00] = 0 0 Z1 Z2
data[0x04] += 0x0002 #data[0x04] = 0x100022
data[0x01] = get_value_from_adr(data[0x04])
data[0x01] = data[0x01] << 0x0010 & 0xFFFFFFFF
data[0x01] = data[0x01] >> 0x0010
data[0x01] = (data[0x01]>>8) + ((data[0x01] & 0xff)<<8) #data[0x01] = 0 0 Z3 Z4
data[0x04] += 0x0002 #data[0x04] = 0x100024
data[0x02] = get_value_from_adr(data[0x04])
data[0x02] = data[0x02] << 0x0010 & 0xFFFFFFFF
data[0x02] = data[0x02] >> 0x0010
data[0x02] = (data[0x02]>>8) + ((data[0x02] & 0xff)<<8) #data[0x02] = 0 0 Z5 Z6
data[0x04] += 0x0002 #data[0x04] = 0x100026
data[0x03] = get_value_from_adr(data[0x04])
data[0x03] = data[0x03] << 0x0010 & 0xFFFFFFFF
data[0x03] = data[0x03] >> 0x0010
data[0x03] = (data[0x03]>>8) + ((data[0x03] & 0xff)<<8) #data[0x03] = 0 0 Z7 Z8
data[0x0e] = 0x0020
data[0x07] = 0x0007 #data[0x07] = 7
"""
#first time
data[0x00] = 0 0 Z1 Z2
data[0x01] = 0 0 Z3 Z4
data[0x02] = 0 0 Z5 Z6
data[0x03] = 0 0 Z7 Z8
data[0x07] = 7
#second time
data[0x00] = 0 0 X4 X3 0 0 Y1 Y2
data[0x01] = 0 0 0 20 ^ 0 0 X4 X3 ^ 0 0 Z5 Z6 0 0 Y3 Y4
data[0x02] = 0 0 Z7 Z8 0 0 Y5 Y6
data[0x03] = 0 0 Z1 Z2 0 0 Y7 Y8
data[0x07] = 3
#third time
"""
print("%08x %08x %08x %08x" % (data[0], data[1], data[2], data[3]))
print("\n\n")
print("0")
#print_data()
#print_sm4_data()
security = 0
while(data[0x0e] != 0): #counter E on 32
data[0x0e] -= 1 #decrement data[0x0e]-=1 = 0x1f decrement data[0x0e]-=1 =0x1e
data[0x04] = data[0x01] #data[0x04] = 0 0 Z3 Z4 data[0x04] = 0 0 Y3 Y4
data[0x05] = data[0x04] #data[0x05] = 0 0 Z3 Z4 data[0x05] = 0 0 Y3 Y4
data[0x04] = data[0x04] >> 0x0008 #data[0x04] = 0 0 0 Z3 data[0x04] = 0 0 0 Y3
data[0x04] &= 0x00ff
data[0x05] &= 0x00ff #data[0x05] = 0 0 0 Z4 data[0x05] = 0 0 0 Y4
data[0x0b] = data[0x05]
data[0x0b] = data[0x0b] << 0x0008 #data[0x0b] = 0 0 Z4 0 data[0x0b] = 0 0 Y4 0
data[0x0a] = data[0x07]
data[0x0a] = data[0x0a] << 0x0010 & 0xFFFFFFFF #data[0x0a] = 0 7 0 0 data[0x0a] = 0 3 0 0
data[0x0a] += data[0x0b]
data[0x0a] += data[0x04] #data[0x0a] = 0 7 Z4 Z3 data[0x0a] = 0 3 Y4 Y3
data[0x0a] += 0x1000 #data[0x0a] = 0 7 Z4 Z3 + 0x1000 data[0x0a] = 0 3 Y4 Y3
data[0x06] = get_value_from_adr(data[0x0a]) #data[0x06] = *(0 7 Z4 Z3 +0x1000 ) data[0x06] = *(0 3 Y4 Y3)
data[0x06] &= 0x00ff #data[0x06] &= 0xFF (1 byte) = X1 data[0x06] &= 0xFF
#print("")
#print("adr: %06x" % data[0x0a])
#print("data06: %02x" % data[0x06])
data[0x07] -= 1 #data[0x07] -= 1 = 6 data[0x07] -= 1 = 2
data[0x0b] = data[0x04] #data[0x0b] = 0 0 0 Z3 data[0x0b] = 0 0 0 Y3
data[0x0b] = data[0x0b] << 0x0008 #data[0x0b] = 0 0 Z3 0 data[0x0b] = 0 0 Y3 0
data[0x0a] = data[0x07]
data[0x0a] = data[0x0a] << 0x0010 & 0xFFFFFFFF #data[0x0a] = 0 6 0 0 data[0x0a] = 0 2 0 0
data[0x0a] += data[0x0b] #data[0x0a] = 0 6 Z3 0 data[0x0a] = 0 2 Y3 0
data[0x0a] += data[0x06] #data[0x0a] = 0 6 Z3 X1 data[0x0a] = 0 2 Y3 X1
data[0x0a] += 0x1000 #data[0x0a] = 0 6 Z3 X1 +0x1000 data[0x0a] = 0 2 Y3 X1
data[0x05] = get_value_from_adr(data[0x0a]) #data[0x05] = *(0 6 Z3 X1 +0x1000) data[0x05] = *(0 2 Y3 X1)
data[0x05] &= 0x00ff #data[0x05] &= 0xFF (byte) = X2 data[0x05] &= 0xFF (byte) = X2
#print("")
#print("adr: %06x" % data[0x0a])
#print("data05: %02x" % data[0x05])
if data[0x07] == 0:
data[0x07] = 0xa
data[0x07] -= 1 #data[0x07] -= 1 = 5 data[0x07] -= 1 = 1
data[0x0b] = data[0x06] #data[0x0b] = X1 data[0x0b] = X1
data[0x0b] = data[0x0b] << 0x0008 #data[0x0b] = 0 0 X1 0 data[0x0b] = 0 0 X1 0
data[0x0a] = data[0x07]
data[0x0a] = data[0x0a] << 0x0010 & 0xFFFFFFFF #data[0x0a] = 0 5 0 0 data[0x0a] = 0 1 0 0
data[0x0a] += data[0x0b] #data[0x0a] = 0 5 X1 0 data[0x0a] = 0 1 X1 0
data[0x0a] += data[0x05] #data[0x0a] = 0 5 X1 X2 data[0x0a] = 0 1 X1 X2
data[0x0a] += 0x1000 #data[0x0a] = 0 5 X1 X2 +0x10000 data[0x0a] = 0 1 X1 X2
data[0x04] = get_value_from_adr(data[0x0a]) #data[0x04] = *(0 5 X1 X2 + 0x1000) data[0x04] = *(0 1 X1 X2)
data[0x04] &= 0x00ff #data[0x04] &= 0xFF (byte) = X3 data[0x04] = X3
#print("")
#print("adr: %06x" % data[0x0a])
#print("data04: %02x" % data[0x04])
data[0x07] -= 1 #data[0x07] -= 1 = 4 data[0x07] -= 1 = 0
data[0x0b] = data[0x05] #data[0x0b] = X2 data[0x0b] = X2
data[0x0b] = data[0x0b] << 0x0008 #data[0x0b] = 0 0 X2 0 data[0x0b] = 0 0 X2 0
data[0x0a] = data[0x07]
data[0x0a] = data[0x0a] << 0x0010 & 0xFFFFFFFF #data[0x0a] = 0 4 0 0 data[0x0a] = 0 0 0 0
data[0x0a] += data[0x0b] #data[0x0a] = 0 4 X2 0 data[0x0a] = 0 0 X2 0
data[0x0a] += data[0x04] #data[0x0a] = 0 4 X2 X3 data[0x0a] = 0 0 X2 X3
data[0x0a] += 0x1000 #data[0x0a] = 0 4 X2 X3 +0x1000 data[0x0a] = 0 0 X2 X3
data[0x06] = get_value_from_adr(data[0x0a]) #data[0x06] = *(0 4 X2 X3 +0x1000) data[0x06] = *(0 0 X2 X3)
data[0x06] &= 0x00ff #data[0x06] &= 0xFF (byte) = X4 data[0x06] &= 0xFF (byte) = X4
#print("")
#print("adr: %06x" % data[0x0a])
#print("data06: %02x" % data[0x06])
if data[0x07] == 0:
data[0x07] = 0xa # data[0x07] = 0xa
data[0x07] -= 1 #data[0x07] -= 1 = 3 data[0x07] -= 1 = 9 5th time: data[0x07] = 8
data[0x09] = data[0x06] #data[0x09] = X4 data[0x09] = X4
data[0x09] = data[0x09] << 0x0008 #data[0x09] = 0 0 X4 0 data[0x09] = 0 0 X4 0
data[0x09] += data[0x04] #data[0x09] = 0 0 X4 X3 data[0x09] = 0 0 X4 X3
data[0x08] = data[0x0e] #data[0x08] = 0 0 0 1f data[0x08] = 0 0 0 1e 0 0 0 1b
data[0x08] = data[0x08] >> 0x0003 #data[0x08] = 0 0 0 7 data[0x08] = 0 0 0 7 0 0 0 6
data[0x08] &= 0x0001 #data[0x08] = 0 0 0 1 data[0x08] = 0 0 0 1 0 0 0 0
#print("debug: " + str(data[0x08]))
#010b get adr[data[0x0e]/4 - 1], const[data[0x0e]/4 - 1]
#010b: insert 0x7465622e at adr 0x100010, offset 0x0c #4
#010b: insert 0x70766766 at adr 0x100010, offset 0x08 #4
#010b: insert 0x66407279 at adr 0x100010, offset 0x04 #4
#010b: insert 0x76706e73 at adr 0x100010, offset 0x00 #4
#010b: insert 0x2e76662e at adr 0x100000, offset 0x0c #4
#010b: insert 0x666e632e at adr 0x100000, offset 0x08 #4
#010b: insert 0x6766722e at adr 0x100000, offset 0x04 #4
#010b: insert 0x612e7270 at adr 0x100000, offset 0x00 #1
if data[0x08] == 0:
#print("even")
data[0x08] = data[0x03] # data[0x08] = 0 0 Y7 Y8
data[0x03] = data[0x0e] # data[0x03] = 0 0 0 1b
data[0x03] += 0x0001 # data[0x03] = 0 0 0 1c
data[0x03] ^= data[0x00] # data[0x03] = 0 0 Y1 1c^Y2
data[0x03] ^= data[0x01] # data[0x03] = 0 0 Y1^Y3 1c^Y2^Y4
data[0x00] = data[0x09] # data[0x00] = 0 0 X4 X3
data[0x01] = data[0x02] # data[0x01] = 0 0 Y5 Y6
data[0x02] = data[0x08] # data[0x02] = 0 0 Y7 Y8
else: #data[x08] == 1
data[0x08] = data[0x00] #data[0x08] = 0 0 Z1 Z2 data[0x08] = 0 0 Y1 Y2
data[0x00] = data[0x09] #data[0x00] = 0 0 X4 X3 data[0x00] = 0 0 X4 X3
data[0x01] = data[0x0e] #data[0x01] = 0 0 0 1f data[0x01] = 0 0 0 1e
data[0x01] += 0x0001 #data[0x01] = 0 0 0 20 data[0x01] = 0 0 0 1f
data[0x01] ^= data[0x00] #data[0x01] = 0 0 X4 20^X3 data[0x01] = 0 0 X4 1f^X3
data[0x01] ^= data[0x02] #data[0x01] = 0 0 X4^Z5 20^X3^Z6 data[0x01] = 0 0 X4^Y5 1f^X3^Y6
data[0x02] = data[0x03] #data[0x02] = 0 0 Z7 Z8 data[0x02] = 0 0 Y7 Y8
data[0x03] = data[0x08] #data[0x03] = 0 0 Z1 Z2 data[0x03] = 0 0 Y1 Y2
#print("\n\n")
#print(str(32-data[0xe]) + " " + str(data[0xe]))
#print_data()
#print("DEBUG %02x %02x" % (data[0xe], data[0x7]))
security += 1
#if security == 33:
# raise Exception
data[0x00] = (data[0x00]>>8) + ((data[0x00] & 0xff)<<8)
data[0x01] = (data[0x01]>>8) + ((data[0x01] & 0xff)<<8)
data[0x01] = data[0x01] << 0x0010 & 0xFFFFFFFF
data[0x00] += data[0x01]
insert_value_at_adr(data[0x00], data[0xd])
#print("\n\n")
#print_data()
#print_sm4_data()
#raise Exception
#0159: insert 0x22926dbf (data[0x00]) at adr 0x100020 (adr pointed by data[0x0d]) <<<<
#0159: insert 0x6ffeed4d (data[0x00]) at adr 0x100028 (adr pointed by data[0x0d])
#0159: insert 0x10874ea1 (data[0x00]) at adr 0x100030 (adr pointed by data[0x0d])
#0159: insert 0x60e53499 (data[0x00]) at adr 0x100038 (adr pointed by data[0x0d])
data[0x0d] += 0x0004 #0x100024
data[0x02] = (data[0x02]>>8) + ((data[0x02] & 0xff)<<8)
data[0x03] = (data[0x03]>>8) + ((data[0x03] & 0xff)<<8)
data[0x03] = data[0x03] << 0x0010
data[0x02] += data[0x03]
insert_value_at_adr(data[0x02], data[0xd])
#print("\n\n")
#print_data()
#print_sm4_data()
#raise Exception
#016b: insert 0x4a7caf04 (data[0x02]) at adr 0x100024 (adr pointed by data[0xd]) <<<<
#016b: insert 0xd5ea9bc1 (data[0x02]) at adr 0x10002c (adr pointed by data[0xd])
#016b: insert 0x2e8e57d8 (data[0x02]) at adr 0x100034 (adr pointed by data[0xd])
#016b: insert 0xdc0bfbf0 (data[0x02]) at adr 0x10003c (adr pointed by data[0xd])
data[0x0d] += 0x0004 #0x100028
data[0x04] = data[0x0d]
data[0x0c] -= 1
data[0x0c] = 0x0010
data[0x0c] = data[0x0c] << 0x0010 & 0xFFFFFFFF #initialize data[0x0c] to 0x100000
data[0x0b] = 0x0020
data[0x0d] -= 0x0020 #set data[0x0d] to 0x100020
data[0x04] = 0x0000 #default result is set to correct
while (data[0x0b] != 0): #comparison over 32 bytes
data[0x00] = get_value_from_adr(data[0x0d]) #0x100020
data[0x00] &= 0x00ff
data[0x01] = get_value_from_adr(data[0x0c]) #0x100000 #reference key
data[0x01] &= 0x00ff
data[0x00] = abs(data[0x00] - data[0x01])
#advance offset to 01a1 since data[0x00] is not null
#comparison byte per byte
if data[0x00] != 0:
data[0x04] = 0x0001 #wrong result
else:
data[0x04] = data[0x04]
data[0x0d] += 0x0001
data[0x0c] += 0x0001
data[0x0b] -= 1
data[0x0] = data[0x04]
#deactivate for now
#if data[0x0] == 0:
# print("WIN !")
#else:
# print("LOOSE !")
#print_data()
#print_sm4_data()
print("")
loop()
print_data()
print_sm4_data() | 57.602778 | 185 | 0.401601 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 9,835 | 0.474273 |
ad62b39dc3d459b038919fec68d5b17bad7d8e64 | 6,454 | py | Python | util/list_store.py | natduca/ndbg | f8da43be62dac18072e9b0e6e5ecd0d1818aea4d | [
"Apache-2.0"
] | 5 | 2016-05-12T08:48:41.000Z | 2018-07-17T00:48:32.000Z | util/list_store.py | natduca/ndbg | f8da43be62dac18072e9b0e6e5ecd0d1818aea4d | [
"Apache-2.0"
] | 1 | 2022-01-16T12:18:50.000Z | 2022-01-16T12:18:50.000Z | util/list_store.py | natduca/ndbg | f8da43be62dac18072e9b0e6e5ecd0d1818aea4d | [
"Apache-2.0"
] | null | null | null | # Copyright 2011 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
import gtk
except:
gtk = None
import re
if gtk:
def liststore_get_children(ls):
res = []
for i in range(0,ls.iter_n_children(None)):
iter = ls.iter_nth_child(None,i)
res.append(iter)
return res
class _PListIter(object):
def __init__(self,pls,iter):
self._pls = pls
self._iter = iter
self._initialized = True
def __getattr__(self,k):
if self.__dict__.has_key(k):
return self.__dict__[k]
else:
i = self._pls._name_to_index[k]
return self._pls.get_value(self._iter,i)
def __setattr__(self,k,v):
if self.__dict__.has_key("_initialized"):
i = self._pls._name_to_index[k]
self._pls.set(self._iter,i,v)
return v
else:
return object.__setattr__(self,k,v)
class PListStore(gtk.ListStore):
def __init__(self, **kwargs):
keys = list(range(len(kwargs)))
types = list(range(len(kwargs)))
has_pos = False
has_nonpos = False
i = 0
for k in kwargs.keys():
m = re.match("(.+)_(\d+)",k)
if m:
if has_nonpos:
raise Exception("Cant mix _n arguments with implicitly positioned arguments")
pos = int(m.group(2))
types[pos] = kwargs[k]
keys[pos] = m.group(1)
has_pos = True
else:
if has_pos:
raise Exception("Cant mix _n arguments with implicit positioned")
pos = i
i += 1
keys[pos] = k
types[pos] = kwargs[k]
has_nonpos = True
self._is_nonpos = has_nonpos
gtk.ListStore.__init__(self, *types)
self._types = types
self._num_columns = len(keys)
self._column_names = keys
self._name_to_index = {}
self._is_nonpos = has_nonpos
for i in range(0,self._num_columns):
self._name_to_index[self._column_names[i]] = i
self._initialized = True
def append(self,*args,**kwargs):
if len(args) == 0 and len(kwargs) == 0:
iter = gtk.ListStore.append(self)
return _PListIter(self,iter)
elif len(args) == self._num_columns:
if self._is_nonpos:
raise Exception("Must use kwargs for append.")
iter = gtk.ListStore.append(self)
for i in range(0,self._num_columns):
self.set(iter,i,args[i])
return _PListIter(self,iter)
elif len(kwargs) == self._num_columns:
iter = gtk.ListStore.append(self)
for k in kwargs:
i = self._name_to_index[k]
self.set(iter,i,kwargs[k])
return _PListIter(self,iter)
def __len__(self):
return self.iter_n_children(None)
def __getitem__(self,idx):
if type(idx) == int:
return _PListIter(self,self.iter_nth_child(None,idx))
elif type(idx) == gtk.TreeIter:
return _PListIter(self,idx)
else:
raise Exception("Must be int or iter, got %s" % type(idx))
def __iter__(self):
for i in range(len(self)):
yield self[i]
def __getattr__(self,k):
if self.__dict__.has_key(k):
return self.__dict__[k]
else:
i = self._name_to_index[k]
return i
def find(self,pred):
for i in range(len(self)):
d = self[i]
if pred(d):
return d
def remove(self,iter):
if type(iter) != _PListIter:
raise Exception("Expected plistiter")
gtk.ListStore.remove(self,iter._iter)
class PListView(gtk.TreeView):
def __init__(self, pls, **kwargs):
self._pls = pls
gtk.TreeView.__init__(self, pls)
poslogic = False
neglogic = False
for k in kwargs:
if kwargs[k] == True:
poslogic = True
elif kwargs[k] == False:
neglogic = True
else:
raise Exception("Values must be true or false")
if poslogic and neglogic:
raise Exception("Make the args true or false but dont mix them")
if not poslogic and not neglogic:
raise Exception("Must pass one column to enable or disable")
cols = []
if poslogic:
cols=kwargs.keys
else: # neglogic
cols=list(pls._name_to_index.keys())
for k in kwargs.keys():
cols.remove(k)
# create views
txtCell = gtk.CellRendererText()
pixCell = gtk.CellRendererPixbuf()
for c in cols:
i = pls._name_to_index[c]
t = pls._types[i]
if t == str:
col = gtk.TreeViewColumn(c, txtCell, text=i)
elif t == gtk.gdk.Pixbuf:
col = gtk.TreeViewColumn(c, pixCell, pixbuf=i)
else:
raise Exception("Dont understand waht to do with %s" % t)
self.append_column(col)
def get_selected(self):
sel = self.get_selection()
m,iter = sel.get_selected()
if iter:
return _PListIter(self._pls,iter)
else:
return None
def set_selected(self,iter):
if iter == None:
self.get_selection.set_selected(None)
return
if type(iter) != _PListIter:
raise Exception("Expected plistiter")
sel = self.get_selection()
sel.set_selected(iter._iter)
if __name__ == "__main__":
w = gtk.Window()
ls = PListStore(Name_0 = str, Description_1 = str, Key_2 = object)
print "expect 0 got %s" % ls.Name
print "expect 1 got %s" % ls.Description
print "expect 2 got %s" % ls.Key
ls.append("1", "2", "3")
ls.append("4", "5", "6")
r = ls.append()
r.Name = "7"
r.Description = "8"
r.Key = "9"
print "expect 3 got %s" % len(ls)
print "expect 1 got %s" % ls[0].Name
print "expect 2 got %s" % ls[0].Description
print "expect 3 got %s" % ls[0].Key
print "expect 5 got %s" % ls[1].Description
ls[0].Key = "**3**"
print "expect **3** got %s" % ls[0].Key
tv = PListView(ls, Key = False)
w.add(tv)
w.show_all()
gtk.main()
| 28.431718 | 89 | 0.597614 | 4,924 | 0.762938 | 74 | 0.011466 | 0 | 0 | 0 | 0 | 1,176 | 0.182213 |
ad63cef726f7367efe5345d0f36197ebd8c709bc | 1,582 | py | Python | blogs/views/feed.py | daaawx/bearblog | 5e01e4443c632ff53b918cf8a0d3b1c648b352fe | [
"MIT"
] | 657 | 2020-05-26T16:16:07.000Z | 2022-03-26T22:35:01.000Z | blogs/views/feed.py | daaawx/bearblog | 5e01e4443c632ff53b918cf8a0d3b1c648b352fe | [
"MIT"
] | 107 | 2020-05-26T17:45:04.000Z | 2022-03-17T08:24:00.000Z | blogs/views/feed.py | daaawx/bearblog | 5e01e4443c632ff53b918cf8a0d3b1c648b352fe | [
"MIT"
] | 42 | 2020-05-26T23:57:58.000Z | 2022-03-15T04:20:26.000Z | from django.http.response import Http404
from django.http import HttpResponse
from blogs.helpers import unmark, clean_text
from blogs.views.blog import resolve_address
from feedgen.feed import FeedGenerator
import mistune
def feed(request):
blog = resolve_address(request)
if not blog:
raise Http404("Blog does not exist")
all_posts = blog.post_set.filter(publish=True, is_page=False).order_by('-published_date')
fg = FeedGenerator()
fg.id(blog.useful_domain())
fg.author({'name': blog.subdomain, 'email': 'hidden'})
fg.title(blog.title)
fg.subtitle(blog.meta_description or clean_text(unmark(blog.content)[:160]) or blog.title)
fg.link(href=f"{blog.useful_domain()}/", rel='alternate')
for post in all_posts:
fe = fg.add_entry()
fe.id(f"{blog.useful_domain()}/{post.slug}/")
fe.title(post.title)
fe.author({'name': blog.subdomain, 'email': 'hidden'})
fe.link(href=f"{blog.useful_domain()}/{post.slug}/")
fe.content(clean_text(mistune.html(post.content)), type="html")
fe.published(post.published_date)
fe.updated(post.published_date)
if request.GET.get('type') == 'rss':
fg.link(href=f"{blog.useful_domain()}/feed/?type=rss", rel='self')
rssfeed = fg.rss_str(pretty=True)
return HttpResponse(rssfeed, content_type='application/rss+xml')
else:
fg.link(href=f"{blog.useful_domain()}/feed/", rel='self')
atomfeed = fg.atom_str(pretty=True)
return HttpResponse(atomfeed, content_type='application/atom+xml')
| 35.954545 | 94 | 0.676359 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 336 | 0.212389 |
ad651f61bd321ebea386f33585e8cf5eacb6acdb | 894 | py | Python | src/data_hub/lcd/migrations/0053_alter_collectionfootprint_the_geom.py | TNRIS/api.tnris.org | 46620a4edf0682c158907f110158110801e9c398 | [
"MIT"
] | 6 | 2019-05-22T20:01:45.000Z | 2020-08-18T12:05:12.000Z | src/data_hub/lcd/migrations/0053_alter_collectionfootprint_the_geom.py | TNRIS/api.tnris.org | 46620a4edf0682c158907f110158110801e9c398 | [
"MIT"
] | 73 | 2019-05-22T19:57:30.000Z | 2022-03-12T00:59:33.000Z | src/data_hub/lcd/migrations/0053_alter_collectionfootprint_the_geom.py | TNRIS/api.tnris.org | 46620a4edf0682c158907f110158110801e9c398 | [
"MIT"
] | null | null | null | # Generated by Django 3.2.5 on 2021-08-10 20:12
import django.contrib.gis.db.models.fields
import django.contrib.gis.geos.collections
import django.contrib.gis.geos.polygon
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('lcd', '0052_alter_collectionfootprint_the_geom'),
]
operations = [
migrations.AlterField(
model_name='collectionfootprint',
name='the_geom',
field=django.contrib.gis.db.models.fields.MultiPolygonField(default=django.contrib.gis.geos.collections.MultiPolygon(django.contrib.gis.geos.polygon.Polygon(((-107.05078125, 25.60190226111573), (-93.07617187499999, 25.60190226111573), (-93.07617187499999, 36.66841891894786), (-107.05078125, 36.66841891894786), (-107.05078125, 25.60190226111573)))), null=True, srid=4326, verbose_name='The Geometry'),
),
]
| 40.636364 | 414 | 0.722595 | 684 | 0.765101 | 0 | 0 | 0 | 0 | 0 | 0 | 138 | 0.154362 |
ad67c8aef56c0e2dd3ee6398991df1cf46f8a5ba | 4,456 | py | Python | utility/timer.py | xlnwel/g2rl | e1261fdd2ce70724a99ddd174616cf013917b241 | [
"Apache-2.0"
] | 1 | 2022-03-27T08:25:57.000Z | 2022-03-27T08:25:57.000Z | utility/timer.py | xlnwel/g2rl | e1261fdd2ce70724a99ddd174616cf013917b241 | [
"Apache-2.0"
] | null | null | null | utility/timer.py | xlnwel/g2rl | e1261fdd2ce70724a99ddd174616cf013917b241 | [
"Apache-2.0"
] | 1 | 2021-11-09T08:33:35.000Z | 2021-11-09T08:33:35.000Z | from time import strftime, gmtime, time
from collections import defaultdict
import tensorflow as tf
from utility.aggregator import Aggregator
from utility.display import pwc
def timeit(func, *args, name=None, to_print=True,
return_duration=False, **kwargs):
start_time = gmtime()
start = time()
result = func(*args, **kwargs)
end = time()
end_time = gmtime()
if to_print:
pwc(f'{name if name else func.__name__}: '
f'Start "{strftime("%d %b %H:%M:%S", start_time)}"',
f'End "{strftime("%d %b %H:%M:%S", end_time)}" '
f'Duration "{end - start:.3g}s"', color='blue')
if return_duration:
return end - start, result
else:
return result
class Timer:
aggregators = defaultdict(Aggregator)
def __init__(self, summary_name, period=None, mode='average', to_record=True):
self._to_log = to_record
if self._to_log:
self._summary_name = summary_name
self._period = period
assert mode in ['average', 'sum']
self._mode = mode
def __enter__(self):
if self._to_log:
self._start = time()
return self
def __exit__(self, exc_type, exc_value, traceback):
if self._to_log:
duration = time() - self._start
aggregator = self.aggregators[self._summary_name]
aggregator.add(duration)
if self._period is not None and aggregator.count >= self._period:
if self._mode == 'average':
duration = aggregator.average()
duration = (f'{duration*1000:.3g}ms' if duration < 1e-1
else f'{duration:.3g}s')
pwc(f'{self._summary_name} duration: "{duration}" averaged over {self._period} times', color='blue')
aggregator.reset()
else:
duration = aggregator.sum
pwc(f'{self._summary_name} duration: "{duration}" for {aggregator.count} times', color='blue')
def reset(self):
aggregator = self.aggregators[self._summary_name]
aggregator.reset()
def average(self):
return self.aggregators[self._summary_name].average()
def last(self):
return self.aggregators[self._summary_name].last
def total(self):
return self.aggregators[self._summary_name].total
class TBTimer:
aggregators = defaultdict(Aggregator)
def __init__(self, summary_name, period=1, to_record=True, print_terminal_info=False):
self._to_log = to_record
if self._to_log:
self._summary_name = summary_name
self._period = period
self._print_terminal_info = print_terminal_info
def __enter__(self):
if self._to_log:
self._start = time()
return self
def __exit__(self, exc_type, exc_value, traceback):
if self._to_log:
duration = time() - self._start
aggregator = self.aggregators[self._summary_name]
aggregator.add(duration)
if aggregator.count >= self._period:
duration = aggregator.average()
step = tf.summary.experimental.get_step()
tf.summary.scalar(f'timer/{self._summary_name}', duration, step=step)
aggregator.reset()
if self._print_terminal_info:
pwc(f'{self._summary_name} duration: "{duration}" averaged over {self._period} times', color='blue')
class LoggerTimer:
def __init__(self, logger, summary_name, to_record=True):
self._to_log = to_record
if self._to_log:
self._logger = logger
self._summary_name = summary_name
def __enter__(self):
if self._to_log:
self._start = time()
return self
def __exit__(self, exc_type, exc_value, traceback):
if self._to_log:
duration = time() - self._start
self._logger.store(**{self._summary_name: duration})
class Every:
def __init__(self, period, start=0):
self._period = period
self._next = start
def __call__(self, step):
if step >= self._next:
while step >= self._next:
self._next += self._period
return True
return False
def step(self):
return self._next - self._period
| 33.503759 | 120 | 0.58842 | 3,706 | 0.831688 | 0 | 0 | 0 | 0 | 0 | 0 | 533 | 0.119614 |
ad67d66388f0984d00adcc7945f036f683b5cc41 | 1,497 | py | Python | tests/functional/test_tagged_unions_unknown.py | karim7262/botocore | 070204a0afb94c23dcfe040f4933c74ab1d8e089 | [
"Apache-2.0"
] | 1,063 | 2015-01-13T13:35:09.000Z | 2022-03-31T09:29:32.000Z | tests/functional/test_tagged_unions_unknown.py | karim7262/botocore | 070204a0afb94c23dcfe040f4933c74ab1d8e089 | [
"Apache-2.0"
] | 2,064 | 2015-01-03T15:53:33.000Z | 2022-03-31T23:12:08.000Z | tests/functional/test_tagged_unions_unknown.py | karim7262/botocore | 070204a0afb94c23dcfe040f4933c74ab1d8e089 | [
"Apache-2.0"
] | 1,065 | 2015-01-16T15:58:42.000Z | 2022-03-31T22:18:56.000Z | # Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from botocore.session import Session
from tests import unittest
class TestTaggedUnionsUnknown(unittest.TestCase):
def test_tagged_union_member_name_does_not_coincide_with_unknown_key(self):
# This test ensures that operation models do not use SDK_UNKNOWN_MEMBER
# as a member name. Thereby reserving SDK_UNKNOWN_MEMBER for the parser to
# set as a key on the reponse object. This is necessary when the client
# encounters a member that it is unaware of or not modeled.
session = Session()
for service_name in session.get_available_services():
service_model = session.get_service_model(service_name)
for shape_name in service_model.shape_names:
shape = service_model.shape_for(shape_name)
if hasattr(shape, 'is_tagged_union') and shape.is_tagged_union:
self.assertNotIn('SDK_UNKNOWN_MEMBER', shape.members)
| 49.9 | 82 | 0.734135 | 869 | 0.580494 | 0 | 0 | 0 | 0 | 0 | 0 | 861 | 0.57515 |
ad6939b73d6eb13be57d04a632bb4bb399ec7cb8 | 2,177 | py | Python | pytest_profiler/pytest_profiler.py | emilberwald/pytest_profiler | fc873d1b2247ec9355ab3521d67d815f0914e5ec | [
"MIT"
] | null | null | null | pytest_profiler/pytest_profiler.py | emilberwald/pytest_profiler | fc873d1b2247ec9355ab3521d67d815f0914e5ec | [
"MIT"
] | null | null | null | pytest_profiler/pytest_profiler.py | emilberwald/pytest_profiler | fc873d1b2247ec9355ab3521d67d815f0914e5ec | [
"MIT"
] | null | null | null | import io
import multiprocessing
import pathlib
from urllib.parse import quote_plus
import pytest
import yappi
semaphore = multiprocessing.Semaphore(1)
class PytestProfiler:
def __init__(self, outdir):
self.func_stats_summary = io.StringIO()
self.outdir = pathlib.Path(outdir)
def pytest_sessionstart(self, session):
pass
def pytest_sessionfinish(self):
yappi.clear_stats()
pass
def pytest_terminal_summary(self, terminalreporter):
terminalreporter.write("\n" + "=" * 80 + "\n")
terminalreporter.write(str(self.func_stats_summary.getvalue()))
terminalreporter.write("\n" + "=" * 80 + "\n")
@pytest.hookimpl(hookwrapper=True)
def pytest_runtest_call(self, item):
yappi.start(builtins=False, profile_threads=True)
yield
yappi.stop()
func_stats = yappi.get_func_stats()
self.outdir.mkdir(parents=True, exist_ok=True)
path = (
self.outdir / pathlib.Path(quote_plus(item.name, safe="/[],._") + ".prof")
).absolute()
func_stats.save(str(path), type="PSTAT")
self.func_stats_summary.write("\n" + "-" * 80 + "\n")
self.func_stats_summary.write(
f"function statistics\n{item.name}\n{str(path)}\n"
)
func_stats.sort("ttot").print_all(self.func_stats_summary)
self.func_stats_summary.write("\n" + "-" * 80 + "\n")
func_stats.clear()
def pytest_addoption(parser):
group = parser.getgroup("Profiling")
group.addoption(
"--profile",
action="store_true",
default=False,
help="generate profiling information",
)
group.addoption(
"--profile-outdir",
default="prof",
help="output directory (places pstat .prof files here)",
)
def pytest_configure(config):
with semaphore:
profiling_enabled = bool(config.getoption("--profile"))
if profiling_enabled:
if not config.pluginmanager.is_registered("pytest_profiler"):
config.pluginmanager.register(
PytestProfiler(config.getoption("--profile-outdir"))
)
| 29.821918 | 86 | 0.625172 | 1,298 | 0.596233 | 733 | 0.336702 | 772 | 0.354616 | 0 | 0 | 308 | 0.141479 |
ad696db21a9d22c4ded15d5a9823102815ab634c | 18,027 | py | Python | codewars/difficulty_level_6kyu/football_yellow_and_red_cards/test_solution_football_yellow_and_red_cards.py | aleattene/python-codewars-challenges | 86cfed8179193780763a0e36ef2f1ea4729a992f | [
"MIT"
] | 1 | 2021-12-16T16:31:11.000Z | 2021-12-16T16:31:11.000Z | codewars/difficulty_level_6kyu/football_yellow_and_red_cards/test_solution_football_yellow_and_red_cards.py | aleattene/python-codewars-challenges | 86cfed8179193780763a0e36ef2f1ea4729a992f | [
"MIT"
] | 2 | 2022-01-09T22:12:53.000Z | 2022-01-13T10:34:52.000Z | codewars/difficulty_level_6kyu/football_yellow_and_red_cards/test_solution_football_yellow_and_red_cards.py | aleattene/python-codewars-challenges | 86cfed8179193780763a0e36ef2f1ea4729a992f | [
"MIT"
] | 1 | 2022-03-10T05:17:10.000Z | 2022-03-10T05:17:10.000Z |
""" To start the tests, type from CLI: python test_solution_sum_of_missing_numbers.py """
import unittest
from solution_football_yellow_and_red_cards import men_still_standing
class TestSolution(unittest.TestCase):
def test_simple_cases(self):
self.assertEqual(men_still_standing([]), (11, 11))
self.assertEqual(men_still_standing(["A4Y", "A4Y"]), (10, 11))
self.assertEqual(men_still_standing(["A4Y", "A4R"]), (10, 11))
self.assertEqual(men_still_standing(["A4Y", "A5R", "B5R", "A4Y", "B6Y"]), (9, 10))
self.assertEqual(men_still_standing(["A4R", "A4R", "A4R"]), (10, 11))
self.assertEqual(men_still_standing(["A4R", "A6R", "A8R", "A10R", "A11R"]), (6, 11))
pass
def test_advanced_cases(self):
self.assertEqual(men_still_standing(['A11R', 'A8Y', 'B1R', 'A10Y', 'A9Y']), (10, 10))
self.assertEqual(men_still_standing(['A8Y', 'B5Y', 'A8Y', 'B9R', 'A6R', 'B6Y', 'B7R', 'B7R', 'A6Y', 'A11R',
'B9R', 'A4R', 'A2Y', 'B10Y']), (7, 9))
self.assertEqual(men_still_standing(['A5Y', 'A2Y', 'A10Y', 'A1R', 'A6Y', 'B2R', 'B6Y']), (10, 10))
self.assertEqual(men_still_standing(['B7Y', 'A5Y', 'B1Y', 'B6Y', 'A1Y', 'A9R', 'A10R', 'B7Y', 'B1Y', 'A10R',
'B7Y', 'A1Y', 'B2Y', 'A11R', 'A3Y', 'B3Y', 'A4Y', 'B10R', 'B1R',
'B2Y']), (7, 7))
self.assertEqual(men_still_standing(['A3Y', 'B3Y', 'B11Y', 'A2Y', 'A2Y', 'A1R', 'A7Y', 'A2R', 'B2Y', 'A9Y',
'A10Y', 'A6R', 'A6R', 'A1R', 'B3R', 'A8Y', 'A11Y', 'B4Y', 'B6Y']), (8, 10))
self.assertEqual(men_still_standing(['A4R', 'A8Y', 'A5Y', 'B5Y', 'A5Y', 'B1R', 'B2R', 'B8R', 'A9Y']), (9, 8))
self.assertEqual(men_still_standing(['A1R', 'A11R', 'A5R', 'A2Y', 'A10Y', 'B8R', 'B4R', 'B10R', 'A6R', 'A7Y',
'A1R', 'A8Y', 'A9R']), (6, 8))
self.assertEqual(men_still_standing(['A11R', 'B1Y', 'A9Y', 'B7R', 'A8Y', 'A3Y', 'B1Y', 'B5Y', 'B8Y', 'B4R',
'A2R', 'A11Y']), (9, 8))
self.assertEqual(men_still_standing(['B3R', 'B2R', 'A3Y', 'B1Y', 'B1Y', 'B7R', 'B3Y', 'A11R']), (10, 7))
self.assertEqual(men_still_standing(['A4Y', 'B11Y', 'A2R', 'B4R', 'A9Y', 'A4R', 'A9R', 'A11Y', 'B1Y', 'B4Y',
'B8Y', 'A1Y', 'B11R', 'A4Y', 'A10Y', 'B8R', 'B8R']), (8, 8))
self.assertEqual(men_still_standing(['A4Y', 'B11Y', 'A8Y', 'A10R', 'B7Y', 'A1Y', 'A11R', 'A2Y', 'A1R', 'A4Y',
'A5R', 'A4Y', 'B3Y', 'B6R', 'A7Y']), (6, 11))
self.assertEqual(men_still_standing(['B3Y', 'B7R', 'B1Y', 'B6Y', 'A11R', 'B6Y', 'B8Y', 'A4Y', 'B9Y', 'B11Y',
'B8Y']), (10, 8))
self.assertEqual(men_still_standing(['A1R', 'B5Y', 'B6Y', 'B9R', 'B1R', 'A11Y', 'B6Y', 'A4Y', 'B2Y', 'B9Y',
'A10Y', 'A11R', 'B5Y', 'B8R', 'A11R']), (9, 6))
self.assertEqual(men_still_standing(['A5Y', 'A1R', 'A3R', 'A10R', 'B6Y', 'B5Y', 'B8Y', 'B2Y', 'B11R', 'B7Y',
'A10Y']), (8, 10))
self.assertEqual(men_still_standing(['B2Y', 'A8Y', 'A6Y', 'B11Y', 'A1Y', 'A5Y', 'A6Y', 'A9R', 'A11Y', 'A1Y',
'A1R', 'A10R', 'B3Y', 'B8Y', 'A8Y']), (6, 11))
self.assertEqual(men_still_standing(['B5Y', 'A9Y', 'A5R', 'B8Y', 'A11Y', 'B9Y', 'A6Y', 'B8Y']), (10, 10))
self.assertEqual(men_still_standing(['A9R', 'B9R', 'B7Y', 'B9Y', 'A3R', 'B1Y', 'A3Y', 'A9R', 'A4Y', 'B7Y',
'B4Y', 'B8Y', 'A9Y', 'A8R', 'B7Y', 'B6Y', 'B10Y', 'A10Y', 'A5Y', 'A10R',
'B2Y', 'B10Y', 'A8Y']), (7, 8))
self.assertEqual(men_still_standing(['A9Y', 'B10R', 'A5Y', 'A1Y', 'B1R', 'A4R']), (10, 9))
self.assertEqual(men_still_standing(['B11R', 'B3R', 'A10Y', 'B1Y', 'A9Y', 'B11Y', 'B4Y', 'A6Y', 'A11Y', 'B1R',
'A9Y', 'B3R', 'A3Y', 'A2Y', 'B2Y', 'B4R']), (10, 7))
self.assertEqual(men_still_standing(['B4Y', 'B4Y', 'B4Y', 'A1Y', 'A9Y', 'B3Y', 'B1Y', 'A9Y', 'A11R', 'A1R',
'B7Y', 'B5R', 'B7Y', 'B1Y']), (8, 7))
self.assertEqual(men_still_standing(['A2R', 'A2Y', 'B9Y', 'B6Y']), (10, 11))
self.assertEqual(men_still_standing(['B6R', 'B6R', 'A9Y', 'B4R', 'A5Y', 'A9R', 'A10R', 'A9Y', 'A11Y', 'A2R',
'B2Y', 'A3R', 'B11Y', 'B4Y', 'A7Y', 'A3Y']), (7, 9))
self.assertEqual(men_still_standing(['B2Y', 'B2Y', 'A10R', 'A8R', 'A8Y', 'B11R', 'A11Y', 'A5R', 'A8R', 'A10R',
'B8R']), (8, 8))
self.assertEqual(men_still_standing(['B7R', 'B10Y', 'B6Y', 'A3Y']), (11, 10))
self.assertEqual(men_still_standing(['A10Y', 'A9R', 'A2R', 'B5Y', 'A8Y', 'B8R', 'A11R', 'B3Y', 'B5Y', 'A6R',
'B3Y', 'B7Y', 'B1Y', 'A6R', 'A6Y', 'A9R', 'B3Y']), (7, 8))
self.assertEqual(men_still_standing(['B6R', 'B7Y', 'A2Y', 'B2Y', 'A11Y', 'B11R', 'B10Y', 'B7R', 'A1Y', 'B7Y',
'B7Y', 'A3Y', 'A4Y', 'B5Y', 'B3R', 'B4R', 'A2Y', 'A2R', 'A2R', 'A3Y',
'A4R', 'A3Y', 'A5Y']), (11, 6))
self.assertEqual(men_still_standing(['B9R', 'B9Y', 'B7Y', 'B11Y', 'B6R', 'A1Y', 'B6R', 'B6Y', 'A8Y', 'B8Y',
'B11Y', 'B10R', 'A9Y', 'B10R', 'A2Y', 'A9R', 'B10Y']), (10, 7))
self.assertEqual(men_still_standing(['B5Y', 'B6Y', 'B4R', 'A3Y', 'A3Y', 'A3Y', 'A11R', 'B6Y', 'B9Y',
'B8Y']), (9, 9))
self.assertEqual(men_still_standing(['A5R', 'A4R', 'B8Y', 'A6Y', 'A8R', 'B7R', 'B9Y', 'B6Y', 'B4Y', 'A5Y',
'A1Y', 'A10Y', 'B6Y', 'B1R', 'B8R', 'B8R', 'B7R', 'A2Y', 'B6R', 'A2Y',
'B11Y', 'B10Y', 'B8R']), (7, 7))
self.assertEqual(men_still_standing(['A9Y', 'A11Y', 'A10Y', 'A3Y']), (11, 11))
self.assertEqual(men_still_standing(['B5R', 'A6Y', 'B2Y', 'A2Y', 'A5R', 'A6Y']), (9, 10))
self.assertEqual(men_still_standing(['A9R', 'B5Y', 'A11R', 'B11Y', 'A11R', 'A6Y', 'B5R', 'A10Y', 'B6Y', 'A9Y',
'B7R', 'A5R', 'B10R', 'B7Y', 'B11Y', 'B4Y', 'B7Y', 'A6Y', 'A10Y', 'B7Y',
'A11Y', 'A11Y', 'B10R', 'A2Y']), (6, 7))
self.assertEqual(men_still_standing(['B2Y', 'A4Y', 'A10Y', 'B10Y', 'A3Y', 'B3Y', 'A7R', 'A10Y', 'A5Y', 'A2R',
'A11Y', 'B5Y', 'A7Y', 'B1R', 'B1Y', 'B6Y', 'B9Y', 'A8R', 'B1Y', 'B1Y',
'A5Y']), (6, 10))
self.assertEqual(men_still_standing(['B4R']), (11, 10))
self.assertEqual(men_still_standing(['B10R']), (11, 10))
self.assertEqual(men_still_standing(['A5Y', 'A7Y']), (11, 11))
self.assertEqual(men_still_standing(['B10Y', 'B9Y', 'A8Y', 'A8Y', 'A3R', 'A8Y', 'A6Y', 'B11Y', 'A6Y', 'A3Y',
'A8R', 'B5Y', 'B5Y', 'B1Y', 'B10R', 'A2Y', 'A8R', 'A1Y', 'B7Y',
'B11R']), (8, 8))
self.assertEqual(men_still_standing(['B2Y', 'A3R', 'A2R', 'B5R', 'B2Y', 'B5Y', 'A8Y', 'B5Y', 'B8Y', 'B3Y',
'B11R', 'B11Y']), (9, 8))
self.assertEqual(men_still_standing(['B9Y', 'A9R', 'A9Y', 'B8R', 'A8R', 'A9R', 'A8R', 'B10R', 'B9R', 'A3R',
'B7Y', 'A7R', 'B9Y', 'B2R', 'A5Y', 'A7Y', 'B3R', 'A10Y', 'B1R', 'A10Y',
'B4Y', 'A11Y', 'A10R']), (7, 6))
self.assertEqual(men_still_standing(['A10R']), (10, 11))
self.assertEqual(men_still_standing(['A6Y', 'B2R', 'B8Y', 'A3R', 'A5Y', 'B6Y', 'B3Y', 'B6Y', 'A4Y', 'B9Y',
'A9Y', 'A6Y']), (9, 9))
self.assertEqual(men_still_standing(['A5Y', 'B4R', 'A7Y', 'A4Y', 'B11Y', 'B8Y', 'A11Y', 'A5R', 'A3Y', 'A8Y',
'B6Y', 'B5Y', 'A10R', 'A1R', 'B6R', 'A2R', 'B2R', 'B8R', 'B2R', 'A5Y',
'A8Y', 'A9R', 'B8Y', 'B5Y']), (6, 7))
self.assertEqual(men_still_standing(['B7Y', 'B8Y', 'B11R', 'B4R', 'B8Y', 'A11Y', 'B10Y', 'B2Y', 'B1Y', 'B3Y',
'B6R', 'A8R', 'B5Y', 'A6Y', 'B11Y', 'B2Y', 'A3R', 'A5Y', 'A10Y']), (10, 6))
self.assertEqual(men_still_standing(['B10Y', 'A7Y', 'A8Y', 'B5R', 'A2R', 'A2R', 'B1Y', 'A8R', 'A5Y', 'A9Y',
'B4Y', 'B5Y', 'A4Y']), (9, 10))
self.assertEqual(men_still_standing(['A5Y', 'B7Y', 'B8Y', 'B3R', 'A7R', 'B8Y', 'B7Y', 'B6Y', 'B1R', 'A5R',
'B4Y', 'B3Y', 'B1Y', 'B11R']), (9, 6))
self.assertEqual(men_still_standing(['A3Y', 'A3Y', 'A6Y', 'B11R', 'B3Y', 'A4Y', 'B3Y', 'B8Y', 'B9R', 'A10Y',
'A6Y', 'A8R', 'B4Y', 'A11Y']), (8, 8))
self.assertEqual(men_still_standing(['B5R', 'B6Y', 'A2R', 'B7Y', 'A7Y', 'A4Y', 'A4R', 'B9Y', 'A6Y', 'A5Y',
'B4Y', 'A10R', 'B9Y', 'B9Y', 'B7R', 'A9R', 'A10Y']), (7, 8))
self.assertEqual(men_still_standing(['A2Y', 'B6Y', 'B5R', 'B9R', 'B6Y', 'B9Y', 'B2Y', 'B2R', 'A7R', 'B5R',
'A2Y', 'A3R', 'A6Y', 'B4Y', 'A4Y', 'B5Y', 'A4R', 'B2Y', 'B3R']), (7, 6))
self.assertEqual(men_still_standing(['B7R', 'A8R', 'A3Y', 'B8Y', 'A3R', 'A8R', 'B6Y', 'B11R', 'B3Y', 'A2Y',
'A9R']), (8, 9))
self.assertEqual(men_still_standing(['A3Y', 'B6Y', 'A5Y', 'A3R', 'A11Y', 'B10Y', 'B6Y', 'A9Y', 'B1R', 'A7Y',
'A11Y', 'A8Y', 'B6Y', 'A7Y', 'B10Y', 'A4Y', 'B9R', 'B4Y', 'A4Y', 'B10Y',
'B11R', 'A3Y']), (7, 6))
self.assertEqual(men_still_standing(['A5Y', 'B2Y', 'B5R', 'B5R', 'A4R', 'B3Y', 'B4Y', 'A3Y', 'B4Y', 'B5R',
'B2Y', 'B2R', 'B1Y', 'B9Y', 'A8R', 'A4Y', 'A2R']), (8, 8))
self.assertEqual(men_still_standing(['B1Y', 'B1Y']), (11, 10))
self.assertEqual(men_still_standing(['A1R', 'B11Y', 'A8Y', 'B6Y', 'B8Y', 'A11R', 'A2Y', 'B3R', 'B2Y', 'A9Y',
'B2R', 'A9Y']), (8, 9))
self.assertEqual(men_still_standing(['A9Y', 'A3Y', 'A6Y', 'B6Y', 'A8Y', 'B4Y', 'A7Y', 'A2Y', 'A4R', 'B9Y',
'B6Y', 'B6R', 'A10R']), (9, 10))
self.assertEqual(men_still_standing(['B9Y', 'B4R', 'B3Y', 'A8Y', 'B6Y']), (11, 10))
self.assertEqual(men_still_standing(['B6R', 'B2Y', 'A5Y', 'B11Y', 'B7Y', 'A5Y', 'A3R', 'B10Y', 'B2Y',
'A4R']), (8, 9))
self.assertEqual(men_still_standing(['A7Y', 'B2Y', 'A6R', 'B5R', 'B5Y', 'B3R', 'B4Y', 'B11Y', 'A6Y', 'A9Y',
'B5R', 'A10Y', 'B1Y', 'A3Y', 'A11Y', 'A6Y', 'A9R']), (9, 9))
self.assertEqual(men_still_standing(['A10Y', 'A1Y', 'B8Y', 'A9Y', 'A4Y', 'B4Y', 'B2Y', 'A2Y',
'B7R']), (11, 10))
self.assertEqual(men_still_standing(['A7Y', 'B6Y', 'A1R', 'B8Y', 'B7R', 'B11Y', 'B2Y', 'A7R', 'A11Y', 'B3Y',
'B9Y', 'A5Y', 'B11Y', 'B3Y', 'B8Y', 'A2Y', 'A4Y', 'A6R', 'A4Y', 'A7Y',
'A2Y', 'A11Y', 'B3R', 'B1Y', 'A11Y']), (6, 7))
self.assertEqual(men_still_standing(['B5Y', 'A6Y', 'B5R', 'B9R', 'A7R', 'A7Y', 'B6Y', 'A1R', 'B9Y', 'A8Y',
'A5Y', 'B9Y', 'B6R', 'A11Y', 'A8Y', 'B2R', 'B6Y', 'A5Y', 'A10R', 'A11R',
'B4Y', 'B4Y', 'A4Y']), (6, 7))
self.assertEqual(men_still_standing(['B7R', 'A5Y', 'B10R', 'A2Y', 'B3R', 'A2Y', 'B6Y', 'B5R', 'B4R', 'B7Y',
'B10R', 'A2R', 'A4R', 'B8Y', 'B8Y', 'B10Y', 'A10R']), (10, 6))
self.assertEqual(men_still_standing(['B2Y', 'A8Y', 'B7Y', 'B8Y', 'A11Y', 'B10R', 'B2Y', 'B11Y', 'A4R', 'B3Y',
'B1Y', 'B5R', 'B5Y', 'B3Y', 'B1Y']), (10, 6))
self.assertEqual(men_still_standing(['B11R', 'A6R', 'A10Y', 'A3Y', 'A5R', 'A2Y', 'A10Y', 'B6Y', 'A11R', 'A9Y',
'A7Y', 'A2R', 'A3Y', 'B10Y']), (6, 10))
self.assertEqual(men_still_standing(['A1R', 'A7Y', 'A9Y', 'A2Y', 'B9Y', 'B1Y', 'A3R', 'A8Y']), (9, 11))
self.assertEqual(men_still_standing(['A5Y', 'A3Y', 'A5R', 'B3Y', 'A1Y', 'B9Y', 'A1R', 'B5Y']), (9, 11))
self.assertEqual(men_still_standing(['A4Y', 'B6R', 'A5R', 'A7Y', 'B7Y', 'B8Y', 'A9Y', 'B9Y', 'B1Y', 'B6Y',
'B2Y']), (10, 10))
self.assertEqual(men_still_standing(['A2Y', 'A10Y', 'A5Y', 'A2Y', 'B1Y', 'B4Y', 'B2Y', 'A10Y']), (9, 11))
self.assertEqual(men_still_standing(['B2Y', 'A4Y', 'A2R', 'A6Y', 'A2Y', 'A10R', 'A8Y', 'A6Y', 'A10R', 'A10R',
'B2Y', 'B2R', 'B10R', 'A3Y', 'A5Y', 'A1R', 'B5Y', 'B8R', 'A7Y', 'A2R',
'B1Y']), (7, 8))
self.assertEqual(men_still_standing(['A2R', 'B11Y', 'A9R', 'A9Y', 'A6Y', 'B4R', 'B3R', 'A7Y', 'B8Y', 'A4Y',
'A6R', 'B3Y']), (8, 9))
self.assertEqual(men_still_standing(['B10Y', 'B1R', 'A1Y', 'A10R', 'B10Y', 'A6R', 'A4Y', 'A2R', 'B9Y', 'A1Y',
'B5Y', 'A7R', 'A1R', 'A2Y', 'B7R', 'B4R', 'B6R', 'A7Y', 'A4R', 'A2Y',
'B2Y', 'A7R', 'B5Y', 'B7Y']), (6, 9))
self.assertEqual(men_still_standing(['B3R', 'A10Y', 'A3R', 'B7Y', 'B11Y', 'B1Y', 'B3Y', 'B10R', 'A1Y', 'B9Y',
'A4Y', 'A2Y', 'B2R']), (10, 8))
self.assertEqual(men_still_standing(['A9Y', 'A8R', 'A3Y', 'B4R', 'A9Y', 'A7Y', 'A2R', 'A2R', 'B9Y', 'B7Y',
'A10Y', 'B2Y', 'B9Y', 'A4R', 'A4Y', 'A1Y', 'A10R', 'A11R', 'B3Y', 'B3Y',
'A4Y', 'B6Y']), (6, 9))
self.assertEqual(men_still_standing(['B8Y', 'B1Y', 'A9R', 'A6Y', 'B2Y']), (10, 11))
self.assertEqual(men_still_standing(['A6R', 'A4R', 'B11Y', 'A10R', 'B6Y', 'B6Y', 'A5Y', 'B10R', 'A1Y', 'A4Y',
'A5Y', 'B2Y', 'B5Y', 'B4Y', 'B11Y', 'B11R', 'B6R', 'A6R', 'A9R', 'B11Y',
'A9Y', 'A10Y', 'B8Y', 'A6Y']), (6, 8))
self.assertEqual(men_still_standing(['A4R', 'A9Y', 'B3R', 'B5Y', 'A10R', 'B10Y', 'B6Y', 'A11Y', 'A7Y', 'B9R',
'B3Y']), (9, 9))
self.assertEqual(men_still_standing(['B9Y', 'B7Y', 'A4Y', 'A1Y', 'B8Y', 'A2R', 'B11Y', 'A1R', 'B11Y', 'A7Y',
'A6R']), (8, 10))
self.assertEqual(men_still_standing(['A11Y', 'B10Y', 'B7Y', 'A8R', 'B8R', 'A2Y', 'B7R', 'A9Y', 'B3R', 'A8Y',
'B9R', 'B8Y', 'A6Y', 'A9Y', 'B9Y', 'A2Y', 'B6Y', 'A1Y', 'A8Y', 'B11R',
'A5R', 'A11Y', 'A11Y', 'B8Y']), (8, 6))
self.assertEqual(men_still_standing(['A2Y', 'A2Y', 'B5Y', 'A11Y', 'B9Y', 'A6Y', 'B8R', 'B10R', 'B9R', 'A2Y',
'A10Y', 'A4Y', 'B10Y', 'B1Y', 'B3R']), (10, 7))
self.assertEqual(men_still_standing(['A10R', 'B10Y', 'A3R', 'A9R', 'A2Y', 'B10Y', 'B8Y', 'B2R', 'A3R',
'B7Y']), (8, 9))
self.assertEqual(men_still_standing(['B9Y', 'B5Y', 'A8Y']), (11, 11))
self.assertEqual(men_still_standing(['B10R', 'A10R', 'B7Y', 'B11Y', 'B11Y', 'B1R', 'A7Y', 'A6R']), (9, 8))
self.assertEqual(men_still_standing(['B11Y', 'A6R', 'B11Y', 'A9R', 'A2Y', 'B11R', 'B11Y', 'B8R', 'B9Y',
'B10Y', 'A6Y']), (9, 9))
self.assertEqual(men_still_standing(['B10Y', 'A3R', 'B8R', 'B10Y', 'A6Y', 'A2R', 'A11R', 'B7R', 'B3Y', 'A7R',
'B4Y', 'A5R', 'B8Y', 'A9Y', 'A11Y', 'A10Y', 'A6Y', 'A4R', 'A9R', 'B10R',
'B3Y']), (6, 8))
self.assertEqual(men_still_standing(['A1R', 'B7Y', 'A5Y', 'B10Y', 'A1Y', 'A7Y', 'B11Y', 'A3Y', 'B11Y', 'B1R',
'A11R', 'B11Y', 'A10Y', 'A10Y', 'B4Y', 'B4R', 'A9R']), (7, 8))
self.assertEqual(men_still_standing(['A9R', 'A11R', 'B5Y', 'A5Y']), (9, 11))
self.assertEqual(men_still_standing(['A5Y', 'A3Y', 'B5Y', 'B5Y', 'A7Y', 'B7Y', 'A2R', 'A1Y', 'B2Y', 'B11Y',
'A5R']), (9, 10))
self.assertEqual(men_still_standing(['B7Y', 'A10Y', 'A4R', 'A7R', 'B1R', 'A5R', 'B5Y', 'A11Y', 'A10R', 'A11Y',
'B3Y']), (6, 10))
self.assertEqual(men_still_standing(['B8Y', 'B11Y', 'A9R', 'A5Y', 'B7R', 'A5Y', 'A10Y']), (9, 10))
self.assertEqual(men_still_standing(['A9R', 'B9Y', 'A10Y', 'B8Y', 'A10Y', 'A10Y', 'A6Y', 'B2Y']), (9, 11))
if __name__ == '__main__':
""" The following instruction executes the tests
by discovering all classes present in this file
that inherit from unittest.TestCase.
"""
unittest.main()
| 90.58794 | 120 | 0.419315 | 17,643 | 0.978699 | 0 | 0 | 0 | 0 | 0 | 0 | 6,408 | 0.355467 |
ad6c85ce56ed9f4842b89a8439f0c9803a6f3462 | 3,801 | py | Python | iCount/tests/test_externals.py | zhouyu/iCount | c203a5b2c8fbcc2934bb2100be04d3290497cf7d | [
"MIT"
] | null | null | null | iCount/tests/test_externals.py | zhouyu/iCount | c203a5b2c8fbcc2934bb2100be04d3290497cf7d | [
"MIT"
] | null | null | null | iCount/tests/test_externals.py | zhouyu/iCount | c203a5b2c8fbcc2934bb2100be04d3290497cf7d | [
"MIT"
] | 1 | 2020-06-18T21:01:41.000Z | 2020-06-18T21:01:41.000Z | # pylint: disable=missing-docstring, protected-access
import warnings
import unittest
import iCount.externals.cutadapt as cutadapt
import iCount.externals.star as star
from iCount.tests.utils import make_fasta_file, make_fastq_file, get_temp_dir, \
get_temp_file_name, make_file_from_list
class TestCutadapt(unittest.TestCase):
def setUp(self):
self.adapter = 'AAAATTTTCCCCGGGG'
self.reads = make_fastq_file(adapter=self.adapter, num_sequences=100,
out_file=get_temp_file_name(extension='fastq'))
self.tmp = get_temp_file_name(extension='fastq')
warnings.simplefilter("ignore", ResourceWarning)
def test_get_version_ok(self):
version = cutadapt.get_version()
self.assertRegex(version, r'\d\.\d+')
def test_run(self):
return_code = cutadapt.run(self.reads, self.tmp, self.adapter, qual_base=64,
qual_trim=30, minimum_length=70)
self.assertEqual(return_code, 0)
class TestStar(unittest.TestCase):
def setUp(self):
self.dir = get_temp_dir()
self.index_dir = get_temp_dir()
self.genome = make_fasta_file(num_sequences=2, seq_len=1000)
self.reads = make_fastq_file(genome=self.genome)
self.annotation = make_file_from_list([
['1', '.', 'gene', '10', '20', '.', '+', '.',
'gene_id "A";'],
['1', '.', 'transcript', '10', '20', '.', '+', '.',
'gene_id "A"; transcript_id "AA";'],
['1', '.', 'exon', '10', '20', '.', '+', '.',
'gene_id "A"; transcript_id "AA"; exon_number "1";'],
])
warnings.simplefilter("ignore", ResourceWarning)
def test_get_version_ok(self):
version = star.get_version()
# Version example: STAR_2.5.0a
regex = r'STAR_\d\.[\d\w]+'
self.assertRegex(version, regex)
def test_build_index_bad_outdir(self):
message = r'Output directory does not exist. Make sure it does.'
with self.assertRaisesRegex(FileNotFoundError, message):
star.build_index(self.genome, '/unexisting/outdir')
def test_build_index(self):
# No annotation
return_code1 = star.build_index(self.genome, self.index_dir, overhang=100, overhang_min=8,
threads=1)
# With annotation
return_code2 = star.build_index(self.genome, self.index_dir, annotation=self.annotation,
overhang=100, overhang_min=8, threads=1)
self.assertEqual(return_code1, 0)
self.assertEqual(return_code2, 0)
def test_map_reads_bad_genomedir(self):
message = r'Directory with genome index does not exist. Make sure it does.'
with self.assertRaisesRegex(FileNotFoundError, message):
star.map_reads(self.reads, '/unexisting/genomedir', self.dir)
def test_map_reads_bad_outdir(self):
message = r'Output directory does not exist. Make sure it does.'
with self.assertRaisesRegex(FileNotFoundError, message):
star.map_reads(self.reads, self.dir, '/unexisting/outdir')
def test_map_reads(self):
# First: make index:
# Give logfile_path to some /tmp location to not pollute woking directory
star.build_index(self.genome, self.index_dir)
# No annotation
return_code1 = star.map_reads(self.reads, self.index_dir, self.dir)
# With annotation:
return_code2 = star.map_reads(
self.reads, self.index_dir, self.dir, annotation=self.annotation,
multimax=10, mismatches=2, threads=1)
self.assertEqual(return_code1, 0)
self.assertEqual(return_code2, 0)
if __name__ == '__main__':
unittest.main()
| 36.902913 | 98 | 0.631413 | 3,450 | 0.907656 | 0 | 0 | 0 | 0 | 0 | 0 | 756 | 0.198895 |
ad6caaf895bc1263240d8f9376ba437ced2dd6f3 | 216 | py | Python | source/auxiliary/other_utilities.py | JoZimmer/ParOptBeam | 50d15d8d822a2718f2932807e06c4a7e02f866a3 | [
"BSD-3-Clause"
] | 1 | 2021-04-09T14:08:20.000Z | 2021-04-09T14:08:20.000Z | source/auxiliary/other_utilities.py | JoZimmer/ParOptBeam | 50d15d8d822a2718f2932807e06c4a7e02f866a3 | [
"BSD-3-Clause"
] | 2 | 2021-04-28T15:05:01.000Z | 2021-11-10T15:12:56.000Z | source/auxiliary/other_utilities.py | JoZimmer/ParOptBeam | 50d15d8d822a2718f2932807e06c4a7e02f866a3 | [
"BSD-3-Clause"
] | 2 | 2021-02-01T08:49:45.000Z | 2021-08-10T02:07:36.000Z | from os.path import sep as os_sep
def get_adjusted_path_string(path_string):
for separator in ['\\\\', '\\', '/', '//']:
path_string = path_string.replace(separator, os_sep)
return path_string[:]
| 21.6 | 60 | 0.648148 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 17 | 0.078704 |
ad6dc8f5ec7abbc123e658c8a53c68b664ad7c1a | 271 | py | Python | modules/04/examples/dollar.py | edsu/inst126 | a14f2c6901759f87b1f199f79ed1b8a5c03c688d | [
"CC-BY-4.0"
] | 2 | 2019-08-07T07:49:09.000Z | 2019-08-24T02:07:39.000Z | modules/04/examples/dollar.py | edsu/inst126 | a14f2c6901759f87b1f199f79ed1b8a5c03c688d | [
"CC-BY-4.0"
] | 2 | 2020-07-18T02:43:50.000Z | 2022-02-10T19:04:57.000Z | modules/04/examples/dollar.py | edsu/inst126 | a14f2c6901759f87b1f199f79ed1b8a5c03c688d | [
"CC-BY-4.0"
] | null | null | null | # jaylin
hours = float(input("Enter hours worked: "))
rate = float(input("Enter hourly rate: "))
if (rate >= 15):
pay=(hours* rate)
print("Pay: $", pay)
else:
print("I'm sorry " + str(rate) + " is lower than the minimum wage!")
| 20.846154 | 76 | 0.535055 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 106 | 0.391144 |
ad7017adc96fad3d0362ec1d61ae2f9905d89711 | 297 | py | Python | Week 1/id_545/LeetCode_26_545.py | theshaodi/algorithm004-05 | cac0cd3bb1211d50936234c08f6ece38677e55cf | [
"Apache-2.0"
] | 1 | 2019-10-12T06:48:45.000Z | 2019-10-12T06:48:45.000Z | Week 1/id_545/LeetCode_26_545.py | theshaodi/algorithm004-05 | cac0cd3bb1211d50936234c08f6ece38677e55cf | [
"Apache-2.0"
] | 1 | 2019-12-01T10:02:03.000Z | 2019-12-01T10:02:03.000Z | Week 1/id_545/LeetCode_26_545.py | theshaodi/algorithm004-05 | cac0cd3bb1211d50936234c08f6ece38677e55cf | [
"Apache-2.0"
] | null | null | null | ## 删除排序数组中的重复项
# 方法: 快慢指针 时间:O(n) 空间:O(1)
class Solution:
def removeDuplicates(self, nums: List[int]) -> int:
s = 0
for f in range(0, len(nums)):
if nums[f] != nums[s]:
s += 1
nums[s] = nums[f]
return s + 1 | 27 | 55 | 0.430976 | 253 | 0.733333 | 0 | 0 | 0 | 0 | 0 | 0 | 90 | 0.26087 |
ad7112c3738411a5a1f2089e56459f416d494862 | 17,160 | py | Python | tests/diag/test_ccsd.py | fevangelista/pyWicked | 9bc0e13f6e45c86222ea95fdadf1cb66eb59862f | [
"MIT"
] | null | null | null | tests/diag/test_ccsd.py | fevangelista/pyWicked | 9bc0e13f6e45c86222ea95fdadf1cb66eb59862f | [
"MIT"
] | null | null | null | tests/diag/test_ccsd.py | fevangelista/pyWicked | 9bc0e13f6e45c86222ea95fdadf1cb66eb59862f | [
"MIT"
] | null | null | null | import wicked as w
def print_comparison(val, val2):
print(f"Result: {val}")
print(f"Test: {val2}")
def compare_expressions(test, ref):
test_expr = w.Expression()
ref_expr = w.Expression()
for s in ref:
ref_expr += w.string_to_expr(s)
for eq in test:
test_expr += eq.rhs_expression()
print_comparison(test_expr, ref_expr)
assert test_expr == ref_expr
def initialize():
w.reset_space()
w.add_space("o", "fermion", "occupied", ["i", "j", "k", "l", "m", "n"])
w.add_space("v", "fermion", "unoccupied", ["a", "b", "c", "d", "e", "f"])
def test_energy1():
"""CCSD Energy <F T1> (1)"""
initialize()
T1 = w.op("t", ["v+ o"])
Fov = w.op("f", ["o+ v"])
wt = w.WickTheorem()
val = wt.contract(w.rational(1), Fov @ T1, 0, 0)
val2 = w.expression("f^{v_0}_{o_0} t^{o_0}_{v_0}")
print_comparison(val, val2)
assert val == val2
def test_energy2():
"""CCSD Energy <V T2> (2)"""
initialize()
T2 = w.op("t", ["v+ v+ o o"])
Voovv = w.op("v", ["o+ o+ v v"])
wt = w.WickTheorem()
val = wt.contract(w.rational(1), Voovv @ T2, 0, 0)
val2 = w.expression("1/4 t^{o_0,o_1}_{v_0,v_1} v^{v_0,v_1}_{o_0,o_1}")
print_comparison(val, val2)
assert val == val2
def test_energy3():
"""CCSD Energy 1/2 <V T1 T1> (3)"""
initialize()
T1 = w.op("t", ["v+ o"])
Voovv = w.op("v", ["o+ o+ v v"])
wt = w.WickTheorem()
val = wt.contract(w.rational(1, 2), Voovv @ T1 @ T1, 0, 0)
val2 = w.expression("1/2 t^{o0}_{v0} t^{o1}_{v1} v^{v0,v1}_{o0,o1}")
print_comparison(val, val2)
assert val == val2
def test_r1_1():
"""CCSD T1 Residual Fov (1)"""
initialize()
Fvo = w.op("f", ["v+ o"])
wt = w.WickTheorem()
sum = wt.contract(w.rational(1), Fvo, 2, 2)
val = sum.to_manybody_equation("r")["o|v"][0].rhs_expression()
val2 = w.expression("f^{o0}_{v0}").canonicalize()
# print(val[0].rhs_term())
print_comparison(val, val2)
assert val == val2
def test_r1_2():
"""CCSD T1 Residual [Fvv,T1] (2)"""
initialize()
T1 = w.op("t", ["v+ o"])
Fvv = w.op("f", ["v+ v"])
wt = w.WickTheorem()
sum = wt.contract(w.rational(1), Fvv @ T1, 2, 2)
val = sum.to_manybody_equation("r")["o|v"][0].rhs_expression()
val2 = w.expression("f^{v1}_{v0} t^{o0}_{v1}")
print_comparison(val, val2)
assert val == val2
def test_r1_3():
"""CCSD T1 Residual [Foo,T1] (3)"""
initialize()
T1 = w.op("t", ["v+ o"])
Foo = w.op("f", ["o+ o"])
wt = w.WickTheorem()
sum = wt.contract(w.rational(1), Foo @ T1, 2, 2)
val = sum.to_manybody_equation("r")["o|v"][0].rhs_expression()
val2 = w.expression("-1 f^{o0}_{o1} t^{o1}_{v0}")
print_comparison(val, val2)
assert val == val2
def test_r1_4():
"""CCSD T1 Residual [Vovov,T1] (4)"""
initialize()
T1 = w.op("t", ["v+ o"])
Vovov = w.op("v", ["o+ v+ v o"])
wt = w.WickTheorem()
sum = wt.contract(w.rational(1), Vovov @ T1, 2, 2)
val = sum.to_manybody_equation("r")["o|v"][0].rhs_expression()
val2 = w.expression("-1 t^{o1}_{v1} v^{o0,v1}_{o1,v0}")
print_comparison(val, val2)
assert val == val2
def test_r1_5():
"""CCSD T1 Residual [Fvo,T2] (5)"""
initialize()
T2 = w.op("t", ["v+ v+ o o"])
Fov = w.op("f", ["o+ v"])
wt = w.WickTheorem()
sum = wt.contract(w.rational(1), Fov @ T2, 2, 2)
val = sum.to_manybody_equation("r")["o|v"][0].rhs_expression()
val2 = w.expression("1 f^{v1}_{o1} t^{o0,o1}_{v0,v1}")
print_comparison(val, val2)
assert val == val2
def test_r1_6():
"""CCSD T1 Residual [Vovvv,T2] (6)"""
initialize()
T2 = w.op("t", ["v+ v+ o o"])
Vovvv = w.op("v", ["o+ v+ v v"])
wt = w.WickTheorem()
sum = wt.contract(w.rational(1), Vovvv @ T2, 2, 2)
val = sum.to_manybody_equation("r")["o|v"][0].rhs_expression()
val2 = w.expression("-1/2 t^{o0,o1}_{v1,v2} v^{v1,v2}_{o1,v0}")
print_comparison(val, val2)
assert val == val2
def test_r1_7():
"""CCSD T1 Residual [Vooov,T2] (7)"""
initialize()
T2 = w.op("t", ["v+ v+ o o"])
Vooov = w.op("v", ["o+ o+ v o"])
wt = w.WickTheorem()
sum = wt.contract(w.rational(1), Vooov @ T2, 2, 2)
val = sum.to_manybody_equation("r")["o|v"][0].rhs_expression()
val2 = w.expression("-1/2 t^{o1,o2}_{v0,v1} v^{o0,v1}_{o1,o2}")
print_comparison(val, val2)
assert val == val2
def test_r1_8():
"""CCSD T1 Residual 1/2 [[Fov,T1],T1] (8)"""
initialize()
T1 = w.op("t", ["v+ o"])
Fov = w.op("f", ["o+ v"])
wt = w.WickTheorem()
sum = wt.contract(w.rational(1, 2), w.commutator(w.commutator(Fov, T1), T1), 2, 2)
val = sum.to_manybody_equation("r")["o|v"][0].rhs_expression()
val2 = w.expression("-1 f^{v1}_{o1} t^{o1}_{v0} t^{o0}_{v1}")
print_comparison(val, val2)
assert val == val2
def test_r1_9():
"""CCSD T1 Residual 1/2 [[Vooov,T1],T1] (9)"""
initialize()
T1 = w.op("t", ["v+ o"])
Vooov = w.op("v", ["o+ o+ v o"])
wt = w.WickTheorem()
sum = wt.contract(w.rational(1, 2), w.commutator(w.commutator(Vooov, T1), T1), 2, 2)
val = sum.to_manybody_equation("r")["o|v"][0].rhs_expression()
val2 = w.expression("-1 t^{o1}_{v0} t^{o2}_{v1} v^{o0,v1}_{o1,o2}")
print_comparison(val, val2)
assert val == val2
def test_r1_10():
"""CCSD T1 Residual 1/2 [[Vovvv,T1],T1] (10)"""
initialize()
T1 = w.op("t", ["v+ o"])
Vovvv = w.op("v", ["o+ v+ v v"])
wt = w.WickTheorem()
sum = wt.contract(w.rational(1, 2), w.commutator(w.commutator(Vovvv, T1), T1), 2, 2)
val = sum.to_manybody_equation("r")["o|v"][0].rhs_expression()
val2 = w.expression("-1 t^{o0}_{v1} t^{o1}_{v2} v^{v1,v2}_{o1,v0}")
print_comparison(val, val2)
assert val == val2
def test_r1_11():
"""CCSD T1 Residual 1/6 [[[Voovv,T1],T1],T1] (11)"""
initialize()
T1 = w.op("t", ["v+ o"])
Voovv = w.op("v", ["o+ o+ v v"])
wt = w.WickTheorem()
sum = wt.contract(
w.rational(1, 6),
w.commutator(w.commutator(w.commutator(Voovv, T1), T1), T1),
2,
2,
)
val = sum.to_manybody_equation("r")["o|v"][0].rhs_expression()
val2 = w.expression("-1 t^{o1}_{v0} t^{o0}_{v1} t^{o2}_{v2} v^{v1,v2}_{o1,o2}")
print_comparison(val, val2)
assert val == val2
def test_r1_12_14():
"""CCSD T1 Residual [[Voovv,T1],T2] (12-14)"""
initialize()
T1 = w.op("t", ["v+ o"])
T2 = w.op("t", ["v+ v+ o o"])
Voovv = w.op("v", ["o+ o+ v v"])
wt = w.WickTheorem()
sum = wt.contract(
w.rational(1),
w.commutator(w.commutator(Voovv, T1), T2),
2,
2,
)
val = sum.to_manybody_equation("r")["o|v"][0].rhs_expression()
val += sum.to_manybody_equation("r")["o|v"][1].rhs_expression()
val += sum.to_manybody_equation("r")["o|v"][2].rhs_expression()
val2 = (
w.expression("1 t^{o1}_{v1} t^{o0,o2}_{v0,v2} v^{v1,v2}_{o1,o2}")
+ w.expression("-1/2 t^{o0}_{v1} t^{o1,o2}_{v0,v2} v^{v1,v2}_{o1,o2}")
+ w.expression("-1/2 t^{o1}_{v0} t^{o0,o2}_{v1,v2} v^{v1,v2}_{o1,o2}")
)
print_comparison(val, val2)
assert val == val2
def test_r2_1():
"""CCSD T2 Residual Vvvoo (1)"""
Vvvoo = w.op("v", ["v+ v+ o o"])
wt = w.WickTheorem()
sum = wt.contract(w.rational(1), Vvvoo, 4, 4)
val = sum.to_manybody_equation("r")["oo|vv"][0].rhs_expression()
val2 = w.expression("1/4 v^{o0,o1}_{v0,v1}")
print_comparison(val, val2)
assert val == val2
def test_r2_2():
"""CCSD T2 Residual [Fvv,T2] (2)"""
T2 = w.op("t", ["v+ v+ o o"])
Fvv = w.op("f", ["v+ v"])
wt = w.WickTheorem()
sum = wt.contract(w.rational(1), w.commutator(Fvv, T2), 4, 4)
val = sum.to_manybody_equation("r")["oo|vv"][0].rhs_expression()
val2 = w.expression("-1/2 f^{v2}_{v0} t^{o0,o1}_{v1,v2}")
print_comparison(val, val2)
assert val == val2
def test_r2_3():
"""CCSD T2 Residual [Foo,T2] (3)"""
T2 = w.op("t", ["v+ v+ o o"])
Foo = w.op("f", ["o+ o"])
wt = w.WickTheorem()
sum = wt.contract(w.rational(1), w.commutator(Foo, T2), 4, 4)
val = sum.to_manybody_equation("r")["oo|vv"][0].rhs_expression()
val2 = w.expression("1/2 f^{o0}_{o2} t^{o1,o2}_{v0,v1}")
print_comparison(val, val2)
assert val == val2
def test_r2_4():
"""CCSD T2 Residual [Voooo,T2] (4)"""
T2 = w.op("t", ["v+ v+ o o"])
Voooo = w.op("v", ["o+ o+ o o"])
wt = w.WickTheorem()
sum = wt.contract(w.rational(1), w.commutator(Voooo, T2), 4, 4)
val = sum.to_manybody_equation("r")["oo|vv"][0].rhs_expression()
val2 = w.expression("1/8 t^{o2,o3}_{v0,v1} v^{o0,o1}_{o2,o3}")
print_comparison(val, val2)
assert val == val2
def test_r2_5():
"""CCSD T2 Residual [Vvvvv,T2] (5)"""
T2 = w.op("t", ["v+ v+ o o"])
Vvvvv = w.op("v", ["v+ v+ v v"])
wt = w.WickTheorem()
sum = wt.contract(w.rational(1), w.commutator(Vvvvv, T2), 4, 4)
val = sum.to_manybody_equation("r")["oo|vv"][0].rhs_expression()
val2 = w.expression("1/8 t^{o0,o1}_{v2,v3} v^{v2,v3}_{v0,v1}")
print_comparison(val, val2)
assert val == val2
def test_r2_6():
"""CCSD T2 Residual [Vovov,T2] (6)"""
T2 = w.op("t", ["v+ v+ o o"])
Vovov = w.op("v", ["o+ v+ v o"])
wt = w.WickTheorem()
sum = wt.contract(w.rational(1), w.commutator(Vovov, T2), 4, 4)
val = sum.to_manybody_equation("r")["oo|vv"][0].rhs_expression()
val2 = w.expression("- t^{o0,o2}_{v0,v2} v^{o1,v2}_{o2,v1}")
print_comparison(val, val2)
assert val == val2
def test_r2_7():
"""CCSD T2 Residual [Vvvov,T1] (7)"""
T1 = w.op("t", ["v+ o"])
Vvvov = w.op("v", ["v+ v+ v o"])
wt = w.WickTheorem()
sum = wt.contract(w.rational(1), w.commutator(Vvvov, T1), 4, 4)
val = sum.to_manybody_equation("r")["oo|vv"][0].rhs_expression()
val2 = w.expression("-1/2 t^{o0}_{v2} v^{o1,v2}_{v0,v1}")
print_comparison(val, val2)
assert val == val2
def test_r2_8():
"""CCSD T2 Residual [Vovoo,T1] (8)"""
T1 = w.op("t", ["v+ o"])
Vovoo = w.op("v", ["o+ v+ o o"])
wt = w.WickTheorem()
sum = wt.contract(w.rational(1), w.commutator(Vovoo, T1), 4, 4)
val = sum.to_manybody_equation("r")["oo|vv"][0].rhs_expression()
val2 = w.expression("-1/2 t^{o2}_{v0} v^{o0,o1}_{o2,v1}")
print_comparison(val, val2)
assert val == val2
def test_r2_9_12():
"""CCSD T2 Residual 1/2 [[Voovv,T2],T2] (9-12)"""
T2 = w.op("t", ["v+ v+ o o"])
Voovv = w.op("v", ["o+ o+ v v"])
wt = w.WickTheorem()
sum = wt.contract(w.rational(1, 2), w.commutator(w.commutator(Voovv, T2), T2), 4, 4)
compare_expressions(
sum.to_manybody_equation("r")["oo|vv"],
[
"1/2 t^{o0,o2}_{v0,v2} t^{o1,o3}_{v1,v3} v^{v2,v3}_{o2,o3}",
"-1/4 t^{o0,o1}_{v0,v2} t^{o2,o3}_{v1,v3} v^{v2,v3}_{o2,o3}",
"1/16 t^{o2,o3}_{v0,v1} t^{o0,o1}_{v2,v3} v^{v2,v3}_{o2,o3}",
"-1/4 t^{o0,o2}_{v0,v1} t^{o1,o3}_{v2,v3} v^{v2,v3}_{o2,o3}",
],
)
def test_r2_13():
"""CCSD T2 Residual 1/2 [[Voooo,T1],T1] (13)"""
T1 = w.op("t", ["v+ o"])
Voooo = w.op("v", ["o+ o+ o o"])
wt = w.WickTheorem()
sum = wt.contract(w.rational(1, 2), w.commutator(w.commutator(Voooo, T1), T1), 4, 4)
val = sum.to_manybody_equation("r")["oo|vv"][0].rhs_expression()
val2 = w.expression("1/4 t^{o2}_{v0} t^{o3}_{v1} v^{o0,o1}_{o2,o3}")
print_comparison(val, val2)
assert val == val2
def test_r2_14():
"""CCSD T2 Residual 1/2 [[Vvvvv,T1],T1] (14)"""
T1 = w.op("t", ["v+ o"])
Vvvvv = w.op("v", ["v+ v+ v v"])
wt = w.WickTheorem()
sum = wt.contract(w.rational(1, 2), w.commutator(w.commutator(Vvvvv, T1), T1), 4, 4)
val = sum.to_manybody_equation("r")["oo|vv"][0].rhs_expression()
val2 = w.expression("1/4 t^{o0}_{v2} t^{o1}_{v3} v^{v2,v3}_{v0,v1}")
print_comparison(val, val2)
assert val == val2
def test_r2_15():
"""CCSD T2 Residual 1/2 [[Vovov,T1],T1] (15)"""
T1 = w.op("t", ["v+ o"])
Vovov = w.op("v", ["o+ v+ v o"])
wt = w.WickTheorem()
sum = wt.contract(w.rational(1, 2), w.commutator(w.commutator(Vovov, T1), T1), 4, 4)
val = sum.to_manybody_equation("r")["oo|vv"][0].rhs_expression()
val2 = w.expression("t^{o2}_{v0} t^{o0}_{v2} v^{o1,v2}_{o2,v1}")
print_comparison(val, val2)
assert val == val2
def test_r2_16_17():
"""CCSD T2 Residual [[Fov,T1],T2] (16-17)"""
T1 = w.op("t", ["v+ o"])
T2 = w.op("t", ["v+ v+ o o"])
Fov = w.op("f", ["o+ v"])
wt = w.WickTheorem()
sum = wt.contract(w.rational(1, 1), w.commutator(w.commutator(Fov, T1), T2), 4, 4)
compare_expressions(
sum.to_manybody_equation("r")["oo|vv"],
[
"1/2 f^{v2}_{o2} t^{o0}_{v2} t^{o1,o2}_{v0,v1}",
"1/2 f^{v2}_{o2} t^{o2}_{v0} t^{o0,o1}_{v1,v2}",
],
)
def test_r2_18_21_22():
"""CCSD T2 Residual [[Vooov,T1],T2] (18,21,22)"""
T1 = w.op("t", ["v+ o"])
T2 = w.op("t", ["v+ v+ o o"])
Vooov = w.op("v", ["o+ o+ v o"])
wt = w.WickTheorem()
sum = wt.contract(w.rational(1, 1), w.commutator(w.commutator(Vooov, T1), T2), 4, 4)
compare_expressions(
sum.to_manybody_equation("r")["oo|vv"],
[
"1/2 t^{o2}_{v2} t^{o0,o3}_{v0,v1} v^{o1,v2}_{o2,o3}",
"-1/4 t^{o0}_{v2} t^{o2,o3}_{v0,v1} v^{o1,v2}_{o2,o3}",
"t^{o2}_{v0} t^{o0,o3}_{v1,v2} v^{o1,v2}_{o2,o3}",
],
)
def test_r2_19_20_23():
"""CCSD T2 Residual [[Vovvv,T1],T2] (19,20,23)"""
T1 = w.op("t", ["v+ o"])
T2 = w.op("t", ["v+ v+ o o"])
Vovvv = w.op("v", ["o+ v+ v v"])
wt = w.WickTheorem()
sum = wt.contract(w.rational(1, 1), w.commutator(w.commutator(Vovvv, T1), T2), 4, 4)
compare_expressions(
sum.to_manybody_equation("r")["oo|vv"],
[
"1/2 t^{o2}_{v2} t^{o0,o1}_{v0,v3} v^{v2,v3}_{o2,v1}",
"t^{o0}_{v2} t^{o1,o2}_{v0,v3} v^{v2,v3}_{o2,v1}",
"-1/4 t^{o2}_{v0} t^{o0,o1}_{v2,v3} v^{v2,v3}_{o2,v1}",
],
)
def test_r2_24():
"""CCSD T2 Residual 1/6 [[[Vovvv,T1],T1],T1] (24)"""
T1 = w.op("t", ["v+ o"])
Vovvv = w.op("v", ["o+ v+ v v"])
wt = w.WickTheorem()
sum = wt.contract(
w.rational(1, 6),
w.commutator(w.commutator(w.commutator(Vovvv, T1), T1), T1),
4,
4,
)
val = sum.to_manybody_equation("r")["oo|vv"][0].rhs_expression()
val2 = w.expression("-1/2 t^{o2}_{v0} t^{o0}_{v2} t^{o1}_{v3} v^{v2,v3}_{o2,v1}")
print_comparison(val, val2)
assert val == val2
def test_r2_25():
"""CCSD T2 Residual 1/6 [[[Vooov,T1],T1],T1] (25)"""
T1 = w.op("t", ["v+ o"])
Vooov = w.op("v", ["o+ o+ v o"])
wt = w.WickTheorem()
sum = wt.contract(
w.rational(1, 6),
w.commutator(w.commutator(w.commutator(Vooov, T1), T1), T1),
4,
4,
)
val = sum.to_manybody_equation("r")["oo|vv"][0].rhs_expression()
val2 = w.expression("-1/2 t^{o2}_{v0} t^{o3}_{v1} t^{o0}_{v2} v^{o1,v2}_{o2,o3}")
print_comparison(val, val2)
assert val == val2
def test_r2_26_30():
"""CCSD T2 Residual [[[Voovv,T1],T1],T2] (26-30)"""
T1 = w.op("t", ["v+ o"])
T2 = w.op("t", ["v+ v+ o o"])
Voovv = w.op("v", ["o+ o+ v v"])
wt = w.WickTheorem()
sum = wt.contract(
w.rational(1, 2),
w.commutator(w.commutator(w.commutator(Voovv, T1), T1), T2),
4,
4,
)
compare_expressions(
sum.to_manybody_equation("r")["oo|vv"],
[
"-1/2 t^{o0}_{v2} t^{o2}_{v3} t^{o1,o3}_{v0,v1} v^{v2,v3}_{o2,o3}",
"1/8 t^{o0}_{v2} t^{o1}_{v3} t^{o2,o3}_{v0,v1} v^{v2,v3}_{o2,o3}",
"-1/2 t^{o2}_{v0} t^{o3}_{v2} t^{o0,o1}_{v1,v3} v^{v2,v3}_{o2,o3}",
"-1 t^{o2}_{v0} t^{o0}_{v2} t^{o1,o3}_{v1,v3} v^{v2,v3}_{o2,o3}",
"1/8 t^{o2}_{v0} t^{o3}_{v1} t^{o0,o1}_{v2,v3} v^{v2,v3}_{o2,o3}",
],
)
def test_r2_31():
"""CCSD T2 Residual 1/24 [[[[Voovv,T1],T1],T1],T1] (31)"""
T1 = w.op("t", ["v+ o"])
Voovv = w.op("v", ["o+ o+ v v"])
wt = w.WickTheorem()
sum = wt.contract(
w.rational(1, 24),
w.commutator(w.commutator(w.commutator(w.commutator(Voovv, T1), T1), T1), T1),
4,
4,
)
val = sum.to_manybody_equation("r")["oo|vv"][0].rhs_expression()
val2 = w.expression(
"1/4 t^{o2}_{v0} t^{o3}_{v1} t^{o0}_{v2} t^{o1}_{v3} v^{v2,v3}_{o2,o3}"
)
print_comparison(val, val2)
assert val == val2
if __name__ == "__main__":
test_energy1()
test_energy2()
test_energy3()
test_r1_1()
test_r1_2()
test_r1_3()
test_r1_4()
test_r1_5()
test_r1_6()
test_r1_7()
test_r1_8()
test_r1_9()
test_r1_10()
test_r1_11()
test_r1_12_14()
test_r2_1()
test_r2_2()
test_r2_3()
test_r2_4()
test_r2_5()
test_r2_6()
test_r2_7()
test_r2_8()
test_r2_9_12()
test_r2_13()
test_r2_14()
test_r2_15()
test_r2_16_17()
test_r2_18_21_22()
test_r2_19_20_23()
test_r2_24()
test_r2_25()
test_r2_26_30()
test_r2_31()
| 31.030741 | 88 | 0.534324 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4,973 | 0.289802 |
ad714fa4a31029ee0185a2f2a26418add93804bc | 3,301 | py | Python | notes/publish.py | simonrus/about | 4413401308f0a95e5c42c7eb65879e35dda9db29 | [
"MIT"
] | null | null | null | notes/publish.py | simonrus/about | 4413401308f0a95e5c42c7eb65879e35dda9db29 | [
"MIT"
] | null | null | null | notes/publish.py | simonrus/about | 4413401308f0a95e5c42c7eb65879e35dda9db29 | [
"MIT"
] | null | null | null | #/bin/python3
## Step1 scan recursively over all files
import os
import re
import pdb
import datetime
path = "./notes"
dest = "_posts"
magic_prefix = "Active-"
def extractModifiedDate(string):
regexp = r"\d+-\d+-\d+T\d+:\d+:\d+.\d+Z"
date_strings_all = re.findall(regexp,string)
date = None
if (len(date_strings_all) == 1):
date = datetime.datetime.strptime(date_strings_all[0], "%Y-%m-%dT%H:%M:%S.%fZ")
return date
def insert_str(string, str_to_insert, index):
return string[:index] + str_to_insert + string[index:]
def processFile(src, dest):
state_none = 0
state_hdr_start = 1
state_hdr_stop = 2
state_post_start = 3
modified_date = None
print("Process file ", src, " -> ", dest)
state = state_none
skiplines = 0
with open(src, "r") as f_in, open(dest, "w+") as f_out:
src_lines = f_in.readlines()
for line in src_lines:
#pdb.set_trace()
if ("---" in line):
state = state + 1
if (state == state_post_start):
break
skiplines = skiplines + 1
if state == state_hdr_start:
if ("modified" in line):
modified_date = extractModifiedDate(line)
dest_lines = src_lines[skiplines:]
for i in range(0, len(dest_lines)):
if state == state_post_start:
line = dest_lines[i]
# find lines with single $
start_pos = 0
pos = line.find('$', start_pos)
while (pos != -1):
if pos + 1 < len(line):
if (line[pos + 1] != '$'):
line = insert_str(line, '$' ,pos)
pos = pos + 1
else:
while(line[pos + 1] == '$'):
pos = pos + 1
else:
line = insert_str(line, '$' ,pos)
pos = pos + 1
start_pos = pos + 1
pos = line.find('$', start_pos)
dest_lines[i] = line
for i in dest_lines:
f_out.write(i)
if (modified_date is not None):
f_out.write(os.linesep)
f_out.write("*Last update:" + modified_date.strftime("%d %B %Y") + "*" + os.linesep)
f_in.close()
f_out.close()
for root,d_names,f_names in os.walk(path):
if ("notes" in root):
category = os.path.split(os.path.split(root)[0])[1]
for post_fn in f_names:
## Find all with name Active...dd
#print(root, post_fn, f_names)
if ((post_fn.startswith(magic_prefix)) and (".bak" not in post_fn)):
#print (root, post_fn)
new_filename = post_fn[len(magic_prefix):]
src = os.path.join(root, post_fn)
dest_filename = os.path.join(dest, new_filename)
print (root, category, src, "->", dest_filename)
processFile(src, dest_filename)
## Copy file with new name without Active prefix
## extract tag, remove first line
| 28.213675 | 96 | 0.49076 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 447 | 0.135414 |
ad72b53c026f5b811aebd943c7bb216b2e4dff3e | 726 | py | Python | unittest/test_unittest_runner.py | asisudai/practical_pipeline | 09b106dc70d0d9abf7bca117346e796ad542d534 | [
"MIT"
] | 3 | 2019-05-28T22:29:38.000Z | 2020-04-26T19:03:01.000Z | unittest/test_unittest_runner.py | asisudai/practical_pipeline | 09b106dc70d0d9abf7bca117346e796ad542d534 | [
"MIT"
] | null | null | null | unittest/test_unittest_runner.py | asisudai/practical_pipeline | 09b106dc70d0d9abf7bca117346e796ad542d534 | [
"MIT"
] | 1 | 2019-09-01T15:53:36.000Z | 2019-09-01T15:53:36.000Z | #!/usr/bin/env python
import unittest
# import your test modules
import test_unittest_01
import test_unittest_02
import test_unittest_03
import test_unittest_04
if __name__ == '__main__':
# initialize the test suite
loader = unittest.TestLoader()
suite = unittest.TestSuite()
# add tests to the test suite
suite.addTests(loader.loadTestsFromModule(test_unittest_01))
suite.addTests(loader.loadTestsFromModule(test_unittest_02))
suite.addTests(loader.loadTestsFromModule(test_unittest_03))
suite.addTests(loader.loadTestsFromModule(test_unittest_04))
# initialize a runner, pass it your suite and run it
runner = unittest.TextTestRunner(verbosity=3)
result = runner.run(suite)
| 29.04 | 64 | 0.77135 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 165 | 0.227273 |
ad756753130794b002c07aafb6f259c23435b543 | 1,535 | py | Python | secret_msg(tk).py | weijun-github/some-python-codes | db3d4b4ceb8b7c8ce0bd4b61da6227cd9e994718 | [
"MIT"
] | null | null | null | secret_msg(tk).py | weijun-github/some-python-codes | db3d4b4ceb8b7c8ce0bd4b61da6227cd9e994718 | [
"MIT"
] | null | null | null | secret_msg(tk).py | weijun-github/some-python-codes | db3d4b4ceb8b7c8ce0bd4b61da6227cd9e994718 | [
"MIT"
] | null | null | null | from tkinter import messagebox, simpledialog, Tk
def is_even(number):
return number % 2 == 0
def get_even_letters(message):
even_letters = []
for counter in range(0, len(message)):
if is_even(counter):
even_letters.append(message[counter])
return even_letters
def get_odd_letters(message):
odd_letters = []
for counter in range(0, len(message)):
if not is_even(counter):
odd_letters.append(message[counter])
return odd_letters
def swap_letters(message):
letter_list = []
if not is_even(len(message)):
message = message + 'x'
even_letters = get_even_letters(message)
odd_letters = get_odd_letters(message)
for counter in range(0, int(len(message) / 2)):
letter_list.append(odd_letters[counter])
letter_list.append(even_letters[counter])
new_message = ''.join(letter_list)
return new_message
def get_task():
task = simpledialog.askstring('task','encrypt or decrypt?')
return task
def get_message():
message = simpledialog.askstring('message', 'Enter your message:')
return message
root = Tk()
while True:
task = get_task()
if task == 'encrypt':
message = get_message()
encrypted = swap_letters(message)
messagebox.showinfo('encrypted message:', encrypted)
elif task == 'decrypt':
message = get_message()
decrypted = swap_letters(message)
messagebox.showinfo('decrypted message:', decrypted)
else:
break
root.mainloop() | 26.929825 | 70 | 0.661889 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 120 | 0.078176 |
ad7954a824bd880b95d100c19467ce1850e0e399 | 497 | py | Python | class1/backpropagation.py | janewen134/tensorflow_self_improment | 7872b3571f822a513c532d166cf2058b21fe7a6b | [
"MIT"
] | null | null | null | class1/backpropagation.py | janewen134/tensorflow_self_improment | 7872b3571f822a513c532d166cf2058b21fe7a6b | [
"MIT"
] | null | null | null | class1/backpropagation.py | janewen134/tensorflow_self_improment | 7872b3571f822a513c532d166cf2058b21fe7a6b | [
"MIT"
] | null | null | null | import tensorflow as tf
w = tf.Variable(tf.constant(5, dtype=tf.float32)) # set random initial value 5, and make it trainable
lr = 0.2 # learning rate
epoch = 40
for epoch in range(epoch):
with tf.GradientTape() as tape: # "with expression as variable"
loss = tf.square(w + 1)
grads = tape.gradient(loss, w) # gradient function
w.assign_sub(lr * grads) # .assign_sub, self-decrement
print("After %s epoch, w is %f, loss is %f" % (epoch+1, w.numpy(), loss)) | 35.5 | 102 | 0.649899 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 182 | 0.366197 |
ad7a1bbb5678c63627d8c2c4ee4f69245d892027 | 89 | py | Python | rorow/feusers/apps.py | derhelge/rorow | deac733dd8632773970b27325c9417a51c3491f3 | [
"MIT"
] | null | null | null | rorow/feusers/apps.py | derhelge/rorow | deac733dd8632773970b27325c9417a51c3491f3 | [
"MIT"
] | null | null | null | rorow/feusers/apps.py | derhelge/rorow | deac733dd8632773970b27325c9417a51c3491f3 | [
"MIT"
] | null | null | null | from django.apps import AppConfig
class FeusersConfig(AppConfig):
name = 'feusers'
| 14.833333 | 33 | 0.752809 | 52 | 0.58427 | 0 | 0 | 0 | 0 | 0 | 0 | 9 | 0.101124 |
ad7b47e123d628383cdc106058dd002388aefb9d | 238 | py | Python | buck/__init__.py | bukzor/buck.pprint | 3b3b2620838512cf8e39d3070964cda1f1b57025 | [
"MIT"
] | 4 | 2015-11-24T18:34:39.000Z | 2019-09-04T13:53:12.000Z | buck/__init__.py | bukzor/buck.pprint | 3b3b2620838512cf8e39d3070964cda1f1b57025 | [
"MIT"
] | 2 | 2017-02-01T01:29:13.000Z | 2020-11-10T03:55:45.000Z | buck/__init__.py | bukzor/buck.pprint | 3b3b2620838512cf8e39d3070964cda1f1b57025 | [
"MIT"
] | 1 | 2017-03-05T03:36:57.000Z | 2017-03-05T03:36:57.000Z | # This is a namespace package. See also:
# http://pythonhosted.org/distribute/setuptools.html#namespace-packages
# http://osdir.com/ml/python.distutils.devel/2006-08/msg00029.html
__import__('pkg_resources').declare_namespace(__name__)
| 47.6 | 73 | 0.798319 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 194 | 0.815126 |
ad7bd9466582c413f0454448c47639038f5336ef | 469 | py | Python | Python/Difference of times/main.py | drtierney/hyperskill-problems | b74da993f0ac7bcff1cbd5d89a3a1b06b05f33e0 | [
"MIT"
] | 5 | 2020-08-29T15:15:31.000Z | 2022-03-01T18:22:34.000Z | Python/Difference of times/main.py | drtierney/hyperskill-problems | b74da993f0ac7bcff1cbd5d89a3a1b06b05f33e0 | [
"MIT"
] | null | null | null | Python/Difference of times/main.py | drtierney/hyperskill-problems | b74da993f0ac7bcff1cbd5d89a3a1b06b05f33e0 | [
"MIT"
] | 1 | 2020-12-02T11:13:14.000Z | 2020-12-02T11:13:14.000Z | # put your python code here
def event_time(hours, minutes, seconds):
return (hours * 3600) + (minutes * 60) + seconds
def time_difference(a, b):
return abs(a - b)
hours_1 = int(input())
minutes_1 = int(input())
seconds_1 = int(input())
hours_2 = int(input())
minutes_2 = int(input())
seconds_2 = int(input())
event_1 = event_time(hours_1, minutes_1, seconds_1)
event_2 = event_time(hours_2, minutes_2, seconds_2)
print(time_difference(event_1, event_2))
| 21.318182 | 52 | 0.705757 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 27 | 0.057569 |
ad7dc5722d5464126b3e82b9529a1e48de237341 | 2,831 | bzl | Python | tools/build_defs/detect_root.bzl | slsyy/rules_foreign_cc | 34ab7f86a3ab1b2381cb4820d08a1c892f55bf54 | [
"Apache-2.0"
] | 2 | 2021-03-18T04:14:56.000Z | 2021-03-18T05:11:09.000Z | tools/build_defs/detect_root.bzl | slsyy/rules_foreign_cc | 34ab7f86a3ab1b2381cb4820d08a1c892f55bf54 | [
"Apache-2.0"
] | null | null | null | tools/build_defs/detect_root.bzl | slsyy/rules_foreign_cc | 34ab7f86a3ab1b2381cb4820d08a1c892f55bf54 | [
"Apache-2.0"
] | 1 | 2021-03-01T17:51:22.000Z | 2021-03-01T17:51:22.000Z | # buildifier: disable=module-docstring
# buildifier: disable=function-docstring-header
def detect_root(source):
"""Detects the path to the topmost directory of the 'source' outputs.
To be used with external build systems to point to the source code/tools directories.
Args:
source (Target): A filegroup of source files
Returns:
string: The relative path to the root source directory
"""
sources = source.files.to_list()
if len(sources) == 0:
return ""
root = None
level = -1
# find topmost directory
for file in sources:
file_level = _get_level(file.path)
# If there is no level set or the current file's level
# is greather than what we have logged, update the root
if level == -1 or level > file_level:
root = file
level = file_level
if not root:
fail("No root source or directory was found")
if root.is_source:
return root.dirname
# Note this code path will never be hit due to a bug upstream Bazel
# https://github.com/bazelbuild/bazel/issues/12954
# If the root is not a source file, it must be a directory.
# Thus the path is returned
return root.path
def _get_level(path):
"""Determine the number of sub directories `path` is contained in
Args:
path (string): The target path
Returns:
int: The directory depth of `path`
"""
normalized = path
# This for loop ensures there are no double `//` substrings.
# A for loop is used because there's not currently a `while`
# or a better mechanism for guaranteeing all `//` have been
# cleaned up.
for i in range(len(path)):
new_normalized = normalized.replace("//", "/")
if len(new_normalized) == len(normalized):
break
normalized = new_normalized
return normalized.count("/")
# buildifier: disable=function-docstring-header
# buildifier: disable=function-docstring-args
# buildifier: disable=function-docstring-return
def filter_containing_dirs_from_inputs(input_files_list):
"""When the directories are also passed in the filegroup with the sources,
we get into a situation when we have containing in the sources list,
which is not allowed by Bazel (execroot creation code fails).
The parent directories will be created for us in the execroot anyway,
so we filter them out."""
# This puts directories in front of their children in list
sorted_list = sorted(input_files_list)
contains_map = {}
for input in input_files_list:
# If the immediate parent directory is already in the list, remove it
if contains_map.get(input.dirname):
contains_map.pop(input.dirname)
contains_map[input.path] = input
return contains_map.values()
| 32.918605 | 89 | 0.673967 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,735 | 0.612858 |
ad7e4c246dd520ff153b0ca296f046cc27e64648 | 5,660 | py | Python | getcurrentexplorerfile.py | CailleauThierry/MyPython | 2301b012fc36d04788ea4640e81a1829b5f6598d | [
"MIT"
] | null | null | null | getcurrentexplorerfile.py | CailleauThierry/MyPython | 2301b012fc36d04788ea4640e81a1829b5f6598d | [
"MIT"
] | null | null | null | getcurrentexplorerfile.py | CailleauThierry/MyPython | 2301b012fc36d04788ea4640e81a1829b5f6598d | [
"MIT"
] | null | null | null | #!python3
# from https://stackoverflow.com/questions/21241708/python-get-a-list-of-selected-files-in-explorer-windows-7/52959617#52959617
import win32gui, time
from win32con import PAGE_READWRITE, MEM_COMMIT, MEM_RESERVE, MEM_RELEASE, PROCESS_ALL_ACCESS, WM_GETTEXTLENGTH, WM_GETTEXT
from commctrl import LVS_OWNERDATA, LVM_GETITEMCOUNT, LVM_GETNEXTITEM, LVNI_SELECTED
import os
import struct
import ctypes
import win32api
import datetime
import win32com.client as win32
import win32ui
import psutil
import subprocess
import time
import urllib.parse
clsid = '{9BA05972-F6A8-11CF-A442-00A0C90A8F39}' #Valid for IE as well!
def getEditText(hwnd):
# api returns 16 bit characters so buffer needs 1 more char for null and twice the num of chars
buf_size = (win32gui.SendMessage(hwnd, WM_GETTEXTLENGTH, 0, 0) +1 ) * 2
target_buff = ctypes.create_string_buffer(buf_size)
win32gui.SendMessage(hwnd, WM_GETTEXT, buf_size, ctypes.addressof(target_buff))
return target_buff.raw.decode('utf16')[:-1]# remove the null char on the end
def _normaliseText(controlText):
'''Remove '&' characters, and lower case.
Useful for matching control text.'''
return controlText.lower().replace('&', '')
def _windowEnumerationHandler(hwnd, resultList):
'''Pass to win32gui.EnumWindows() to generate list of window handle,
window text, window class tuples.'''
resultList.append((hwnd, win32gui.GetWindowText(hwnd), win32gui.GetClassName(hwnd)))
def searchChildWindows(currentHwnd,
wantedText=None,
wantedClass=None,
selectionFunction=None):
results = []
childWindows = []
try:
win32gui.EnumChildWindows(currentHwnd,
_windowEnumerationHandler,
childWindows)
except win32gui.error:
# This seems to mean that the control *cannot* have child windows,
# i.e. not a container.
return
for childHwnd, windowText, windowClass in childWindows:
descendentMatchingHwnds = searchChildWindows(childHwnd)
if descendentMatchingHwnds:
results += descendentMatchingHwnds
if wantedText and \
not _normaliseText(wantedText) in _normaliseText(windowText):
continue
if wantedClass and \
not windowClass == wantedClass:
continue
if selectionFunction and \
not selectionFunction(childHwnd):
continue
results.append(childHwnd)
return results
def explorer_fileselection():
global clsid
address_1=""
files = []
shellwindows = win32.Dispatch(clsid)
w=win32gui
window = w.GetForegroundWindow()
#print("window: %s" % window)
if (window != 0):
if (w.GetClassName(window) == 'CabinetWClass'): # the main explorer window
#print("class: %s" % w.GetClassName(window))
#print("text: %s " %w.GetWindowText(window))
children = list(set(searchChildWindows(window)))
addr_edit = None
file_view = None
for child in children:
if (w.GetClassName(child) == 'WorkerW'): # the address bar
addr_children = list(set(searchChildWindows(child)))
for addr_child in addr_children:
if (w.GetClassName(addr_child) == 'ReBarWindow32'):
addr_edit = addr_child
addr_children = list(set(searchChildWindows(child)))
for addr_child in addr_children:
if (w.GetClassName(addr_child) == 'Address Band Root'):
addr_edit = addr_child
addr_children = list(set(searchChildWindows(child)))
for addr_child in addr_children:
if (w.GetClassName(addr_child) == 'msctls_progress32'):
addr_edit = addr_child
addr_children = list(set(searchChildWindows(child)))
for addr_child in addr_children:
if (w.GetClassName(addr_child) == 'Breadcrumb Parent'):
addr_edit = addr_child
addr_children = list(set(searchChildWindows(child)))
for addr_child in addr_children:
if (w.GetClassName(addr_child) == 'ToolbarWindow32'):
text=getEditText(addr_child)
if "\\" in text:
address_1=getEditText(addr_child)[text.index(" ")+1:]
print("Address --> "+address_1)
for window in range(shellwindows.Count):
window_URL = urllib.parse.unquote(shellwindows[window].LocationURL,encoding='ISO 8859-1')
window_dir = window_URL.split("///")[1].replace("/", "\\")
print("Directory --> "+window_dir)
if window_dir==address_1:
selected_files = shellwindows[window].Document.SelectedItems()
for file in range(selected_files.Count):
files.append(selected_files.Item(file).Path)
print("Files --> "+str(files))
while True:
explorer_fileselection()
time.sleep(1) | 46.393443 | 127 | 0.573852 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 966 | 0.170671 |
ad808e43a2af907aea81485551df9f8197837f25 | 1,472 | py | Python | cookiecutter-project/pages/views.py | goldhand/cookiecutter-project | b7c9189ca0ccda43d34ec1573b14138e979d6e78 | [
"BSD-3-Clause"
] | 1 | 2017-03-20T05:54:30.000Z | 2017-03-20T05:54:30.000Z | cookiecutter-project/pages/views.py | goldhand/cookiecutter-project | b7c9189ca0ccda43d34ec1573b14138e979d6e78 | [
"BSD-3-Clause"
] | null | null | null | cookiecutter-project/pages/views.py | goldhand/cookiecutter-project | b7c9189ca0ccda43d34ec1573b14138e979d6e78 | [
"BSD-3-Clause"
] | null | null | null | from django.shortcuts import render, render_to_response
from django.core.mail import mail_admins
from django.contrib import messages
from django.template import RequestContext
from django.http import HttpResponseRedirect, Http404, HttpResponse
from django.views.generic.base import TemplateView
from .forms import ContactForm
class PageView(TemplateView):
template_name = "404.html"
def get_context_data(self, **kwargs):
context = super(PageView, self).get_context_data(**kwargs)
context['contact_form'] = ContactForm()
return context
def contact(request):
form = ContactForm()
if request.POST:
form = ContactForm(request.POST)
if form.is_valid():
subject = form.cleaned_data['subject']
email = form.cleaned_data['email']
message = '{} from {}'.format(form.cleaned_data['feedback'], email)
subject = unicode('Feedback: {}').format(subject)
mail_admins(subject, message)
_next = request.POST.get('next')
messages.success(request, 'Thanks for the feedback!')
if _next:
return HttpResponseRedirect(_next)
_next = ""
if request.GET.get('next'):
_next = request.GET.get('next')
context = {'form': form, 'next': _next}
return render_to_response('pages/contact.html',
context,
context_instance=RequestContext(request)) | 34.232558 | 79 | 0.644701 | 242 | 0.164402 | 0 | 0 | 0 | 0 | 0 | 0 | 154 | 0.10462 |
ad80c626fb9482243ada858e1c516fe0842d6cb2 | 541 | py | Python | ratelimitbackend/middleware.py | Edraak/django-ratelimit-backend | b325b62fcaff2d02eb677efe6c22a337df2c4c24 | [
"BSD-3-Clause"
] | 95 | 2015-01-05T02:05:43.000Z | 2022-02-08T11:22:18.000Z | ratelimitbackend/middleware.py | Edraak/django-ratelimit-backend | b325b62fcaff2d02eb677efe6c22a337df2c4c24 | [
"BSD-3-Clause"
] | 28 | 2015-03-27T16:40:42.000Z | 2021-02-22T09:59:09.000Z | ratelimitbackend/middleware.py | edx/django-ratelimit-backend | cf80e324820c48daad89c644e6bd809044ad26f4 | [
"BSD-3-Clause"
] | 28 | 2015-03-27T15:52:44.000Z | 2022-01-25T07:25:30.000Z | from django.http import HttpResponseForbidden
from django.utils.deprecation import MiddlewareMixin
from .exceptions import RateLimitException
class RateLimitMiddleware(MiddlewareMixin):
"""
Handles exceptions thrown by rate-limited login attepmts.
"""
def process_exception(self, request, exception):
if isinstance(exception, RateLimitException):
return HttpResponseForbidden(
'Too many failed login attempts. Try again later.',
content_type='text/plain',
)
| 31.823529 | 67 | 0.702403 | 395 | 0.730129 | 0 | 0 | 0 | 0 | 0 | 0 | 135 | 0.249538 |
ad813efc778d79e46d171b532dba5b2c0927f1e3 | 5,219 | py | Python | RNN/alternative_configurations.py | oncebasun/seq2seq-theano | 9d905ed2fb392193e28d67272d3e3f1b5da613ac | [
"MIT"
] | null | null | null | RNN/alternative_configurations.py | oncebasun/seq2seq-theano | 9d905ed2fb392193e28d67272d3e3f1b5da613ac | [
"MIT"
] | null | null | null | RNN/alternative_configurations.py | oncebasun/seq2seq-theano | 9d905ed2fb392193e28d67272d3e3f1b5da613ac | [
"MIT"
] | null | null | null | def get_config_cs2en():
config = {}
# Settings which should be given at start time, but are not, for convenience
config['the_task'] = 0
# Settings ----------------------------------------------------------------
config['allTagsSplit'] = 'allTagsSplit/' # can be 'allTagsSplit/', 'POSextra/' or ''
config['identity_init'] = True
config['early_stopping'] = False # this has no use for now
config['use_attention'] = True # if we want attention output at test time; still no effect for training
# Model related -----------------------------------------------------------
# Definition of the error function; right now only included in baseline_ets
config['error_fct'] = 'categorical_cross_entropy'
# Sequences longer than this will be discarded
config['seq_len'] = 50
# Number of hidden units in encoder/decoder GRU
config['enc_nhids'] = 100 # orig: 100
config['dec_nhids'] = 100 # orig: 100
# For the initialization of the parameters.
config['rng_value'] = 11
# Dimension of the word embedding matrix in encoder/decoder
config['enc_embed'] = 100 # orig: 300
config['dec_embed'] = 100 # orig: 300
# Where to save model, this corresponds to 'prefix' in groundhog
config['saveto'] = 'model'
# Optimization related ----------------------------------------------------
# Batch size
config['batch_size'] = 20
# This many batches will be read ahead and sorted
config['sort_k_batches'] = 12
# Optimization step rule
config['step_rule'] = 'AdaDelta'
# Gradient clipping threshold
config['step_clipping'] = 1.
# Std of weight initialization
config['weight_scale'] = 0.01
# Regularization related --------------------------------------------------
# Weight noise flag for feed forward layers
config['weight_noise_ff'] = False
# Weight noise flag for recurrent layers
config['weight_noise_rec'] = False
# Dropout ratio, applied only after readout maxout
config['dropout'] = 0.5
# Vocabulary/dataset/embeddings related ----------------------------------------------
# Corpus vocabulary pickle file
config['corpus_data'] = '/mounts/Users/cisintern/huiming/SIGMORPHON/Code/data/Corpora/corpus_voc_'
# Root directory for dataset
datadir = '/mounts/Users/cisintern/huiming/SIGMORPHON/Code/src/baseline/'
# Module name of the stream that will be used
config['stream'] = 'stream'
# Source and target vocabularies
if config['the_task'] > 1:
config['src_vocab'] = ['/mounts/Users/cisintern/huiming/SIGMORPHON/Code/data/forRnn/', '_src_voc_task' + str(config['the_task']) + '.pkl']
config['trg_vocab'] = ['/mounts/Users/cisintern/huiming/SIGMORPHON/Code/data/forRnn/', '_trg_voc_task' + str(config['the_task']) + '.pkl'] # introduce "german" or so here
else:
config['src_vocab'] = ['/mounts/Users/cisintern/huiming/SIGMORPHON/Code/data/forRnn/', '_src_voc.pkl']
config['trg_vocab'] = ['/mounts/Users/cisintern/huiming/SIGMORPHON/Code/data/forRnn/', '_trg_voc.pkl'] # introduce "german" or so here
# Source and target datasets
config['src_data'] = ['/mounts/Users/cisintern/huiming/SIGMORPHON/Code/data/forRnn/', '-task' + str(config['the_task']) + '-train_src']
config['trg_data'] = ['/mounts/Users/cisintern/huiming/SIGMORPHON/Code/data/forRnn/', '-task' + str(config['the_task']) + '-train_trg']
# Source and target vocabulary sizes, should include bos, eos, unk tokens
# This will be read at runtime from a file.
config['src_vocab_size'] = 159
config['trg_vocab_size'] = 61
# Special tokens and indexes
config['unk_id'] = 1
config['bow_token'] = '<S>'
config['eow_token'] = '</S>'
config['unk_token'] = '<UNK>'
# Validation set source file; this is the test file, because there is only a test set for two languages
config['val_set'] = ['/mounts/Users/cisintern/huiming/SIGMORPHON/Code/data/forRnn/', '-task' + str(config['the_task']) + '-test_src']
# Validation set gold file
config['val_set_grndtruth'] = ['/mounts/Users/cisintern/huiming/SIGMORPHON/Code/data/forRnn/', '-task' + str(config['the_task']) + '-test_trg']
# Print validation output to file
config['output_val_set'] = False
# Validation output file
config['val_set_out'] = config['saveto'] + '/validation_out.txt'
# Beam-size
config['beam_size'] = 12
# Path to pretrained embeddings
config['embeddings'] = ['/mounts/Users/cisintern/huiming/universal-mri/Code/_FINAL_ST_MODELS/t1_high/task2/Ens_1_model_', '_100_100']
# Timing/monitoring related -----------------------------------------------
# Maximum number of epochs
config['finish_after'] = 100
# Reload model from files if exist
config['reload'] = True
# Save model after this many updates
config['save_freq'] = 500
# Show samples from model after this many updates
config['sampling_freq'] = 50
# Show this many samples at each sampling
config['hook_samples'] = 2
# Start bleu validation after this many updates
config['val_burn_in'] = 80000
config['lang'] = None
return config
| 37.818841 | 176 | 0.633646 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,814 | 0.730791 |
ad82be8113236ba6ef1a019056b5a21f96562145 | 589 | py | Python | setup.py | Shadofer/dogey | 1d9f1b82aa7ecfe6d9776feb03364ef9eb00bd63 | [
"MIT"
] | 3 | 2021-05-18T09:46:30.000Z | 2022-03-26T14:23:24.000Z | setup.py | Shadofer/dogey | 1d9f1b82aa7ecfe6d9776feb03364ef9eb00bd63 | [
"MIT"
] | null | null | null | setup.py | Shadofer/dogey | 1d9f1b82aa7ecfe6d9776feb03364ef9eb00bd63 | [
"MIT"
] | null | null | null | from setuptools import setup
with open('README.md', 'r') as f:
long_description = f.read()
setup(
name = 'dogey',
version = '0.1',
description = 'A pythonic dogehouse API.',
long_description = long_description,
long_description_content_type = 'text/markdown',
author = 'Shadofer#7312',
author_email = 'shadowrlrs@gmail.com',
python_requires = '>=3.8.0',
url = 'https://github.com/Shadofer/dogey',
packages = ['dogey'],
install_requires = ['websockets'],
extras_require = {
'sound': ['pymediasoup']
},
license = 'MIT'
)
| 25.608696 | 52 | 0.626486 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 193 | 0.327674 |
ad8464a5e90866608323322de1a9bc098cc0a1d3 | 476 | py | Python | settings/testing.py | skylifewww/artdelo | 55d235a59d8a3abdf0f904336c1c75a2be903699 | [
"MIT"
] | null | null | null | settings/testing.py | skylifewww/artdelo | 55d235a59d8a3abdf0f904336c1c75a2be903699 | [
"MIT"
] | null | null | null | settings/testing.py | skylifewww/artdelo | 55d235a59d8a3abdf0f904336c1c75a2be903699 | [
"MIT"
] | null | null | null | ALLOWED_HOSTS = ['testserver']
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:'
}
}
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'iosDevCourse'
},
'local': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'iosDevCourse'
}
}
| 22.666667 | 67 | 0.596639 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 297 | 0.62395 |
ad8619a24bcb752efa61539552ec1f87e1e97167 | 8,140 | py | Python | h1/models/billing.py | hyperonecom/h1-client-python | 4ce355852ba3120ec1b8f509ab5894a5c08da730 | [
"MIT"
] | null | null | null | h1/models/billing.py | hyperonecom/h1-client-python | 4ce355852ba3120ec1b8f509ab5894a5c08da730 | [
"MIT"
] | null | null | null | h1/models/billing.py | hyperonecom/h1-client-python | 4ce355852ba3120ec1b8f509ab5894a5c08da730 | [
"MIT"
] | null | null | null | # coding: utf-8
"""
HyperOne
HyperOne API # noqa: E501
The version of the OpenAPI document: 0.1.0
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from h1.configuration import Configuration
class Billing(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'id': 'str',
'period': 'str',
'price': 'float',
'quantity': 'float',
'project': 'str',
'one_time': 'bool',
'service': 'BillingService',
'resource': 'BillingResource',
'charges': 'list[BillingCharges]'
}
attribute_map = {
'id': 'id',
'period': 'period',
'price': 'price',
'quantity': 'quantity',
'project': 'project',
'one_time': 'oneTime',
'service': 'service',
'resource': 'resource',
'charges': 'charges'
}
def __init__(self, id=None, period=None, price=None, quantity=None, project=None, one_time=None, service=None, resource=None, charges=None, local_vars_configuration=None): # noqa: E501
"""Billing - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._id = None
self._period = None
self._price = None
self._quantity = None
self._project = None
self._one_time = None
self._service = None
self._resource = None
self._charges = None
self.discriminator = None
if id is not None:
self.id = id
if period is not None:
self.period = period
if price is not None:
self.price = price
if quantity is not None:
self.quantity = quantity
if project is not None:
self.project = project
if one_time is not None:
self.one_time = one_time
if service is not None:
self.service = service
if resource is not None:
self.resource = resource
if charges is not None:
self.charges = charges
@property
def id(self):
"""Gets the id of this Billing. # noqa: E501
:return: The id of this Billing. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this Billing.
:param id: The id of this Billing. # noqa: E501
:type: str
"""
self._id = id
@property
def period(self):
"""Gets the period of this Billing. # noqa: E501
:return: The period of this Billing. # noqa: E501
:rtype: str
"""
return self._period
@period.setter
def period(self, period):
"""Sets the period of this Billing.
:param period: The period of this Billing. # noqa: E501
:type: str
"""
self._period = period
@property
def price(self):
"""Gets the price of this Billing. # noqa: E501
:return: The price of this Billing. # noqa: E501
:rtype: float
"""
return self._price
@price.setter
def price(self, price):
"""Sets the price of this Billing.
:param price: The price of this Billing. # noqa: E501
:type: float
"""
self._price = price
@property
def quantity(self):
"""Gets the quantity of this Billing. # noqa: E501
:return: The quantity of this Billing. # noqa: E501
:rtype: float
"""
return self._quantity
@quantity.setter
def quantity(self, quantity):
"""Sets the quantity of this Billing.
:param quantity: The quantity of this Billing. # noqa: E501
:type: float
"""
self._quantity = quantity
@property
def project(self):
"""Gets the project of this Billing. # noqa: E501
:return: The project of this Billing. # noqa: E501
:rtype: str
"""
return self._project
@project.setter
def project(self, project):
"""Sets the project of this Billing.
:param project: The project of this Billing. # noqa: E501
:type: str
"""
self._project = project
@property
def one_time(self):
"""Gets the one_time of this Billing. # noqa: E501
:return: The one_time of this Billing. # noqa: E501
:rtype: bool
"""
return self._one_time
@one_time.setter
def one_time(self, one_time):
"""Sets the one_time of this Billing.
:param one_time: The one_time of this Billing. # noqa: E501
:type: bool
"""
self._one_time = one_time
@property
def service(self):
"""Gets the service of this Billing. # noqa: E501
:return: The service of this Billing. # noqa: E501
:rtype: BillingService
"""
return self._service
@service.setter
def service(self, service):
"""Sets the service of this Billing.
:param service: The service of this Billing. # noqa: E501
:type: BillingService
"""
self._service = service
@property
def resource(self):
"""Gets the resource of this Billing. # noqa: E501
:return: The resource of this Billing. # noqa: E501
:rtype: BillingResource
"""
return self._resource
@resource.setter
def resource(self, resource):
"""Sets the resource of this Billing.
:param resource: The resource of this Billing. # noqa: E501
:type: BillingResource
"""
self._resource = resource
@property
def charges(self):
"""Gets the charges of this Billing. # noqa: E501
:return: The charges of this Billing. # noqa: E501
:rtype: list[BillingCharges]
"""
return self._charges
@charges.setter
def charges(self, charges):
"""Sets the charges of this Billing.
:param charges: The charges of this Billing. # noqa: E501
:type: list[BillingCharges]
"""
self._charges = charges
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Billing):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, Billing):
return True
return self.to_dict() != other.to_dict()
| 24.741641 | 189 | 0.552703 | 7,874 | 0.967322 | 0 | 0 | 3,981 | 0.489066 | 0 | 0 | 3,798 | 0.466585 |
ad86259a30d53b22181afbe5c6707aa6fcfa5c27 | 1,810 | py | Python | docs/names/examples/gethostbyname.py | ndg63276/twisted | f672a20395e8beece6350631a70514f06c391bae | [
"Unlicense",
"MIT"
] | 1 | 2020-12-18T06:32:58.000Z | 2020-12-18T06:32:58.000Z | docs/names/examples/gethostbyname.py | ndg63276/twisted | f672a20395e8beece6350631a70514f06c391bae | [
"Unlicense",
"MIT"
] | null | null | null | docs/names/examples/gethostbyname.py | ndg63276/twisted | f672a20395e8beece6350631a70514f06c391bae | [
"Unlicense",
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- test-case-name: twisted.names.test.test_examples -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Print the IP address for a given hostname. eg
python gethostbyname.py www.google.com
This script does a host lookup using the default Twisted Names
resolver, a chained resolver, which attempts to lookup a name from:
* local hosts file
* memory cache of previous lookup results
* system recursive DNS servers
"""
import sys
from twisted.names import client, error
from twisted.internet.task import react
from twisted.python import usage
class Options(usage.Options):
synopsis = "Usage: gethostbyname.py HOSTNAME"
def parseArgs(self, hostname):
self["hostname"] = hostname
def printResult(address, hostname):
"""
Print the IP address or an error message if an IP address was not
found.
"""
if address:
sys.stdout.write(address + "\n")
else:
sys.stderr.write(
"ERROR: No IP addresses found for name {!r}\n".format(hostname)
)
def printError(failure, hostname):
"""
Print a friendly error message if the hostname could not be
resolved.
"""
failure.trap(error.DNSNameError)
sys.stderr.write("ERROR: hostname not found {!r}\n".format(hostname))
def main(reactor, *argv):
options = Options()
try:
options.parseOptions(argv)
except usage.UsageError as errortext:
sys.stderr.write(str(options) + "\n")
sys.stderr.write("ERROR: {}\n".format(errortext))
raise SystemExit(1)
hostname = options["hostname"]
d = client.getHostByName(hostname)
d.addCallback(printResult, hostname)
d.addErrback(printError, hostname)
return d
if __name__ == "__main__":
react(main, sys.argv[1:])
| 24.794521 | 75 | 0.68011 | 151 | 0.083425 | 0 | 0 | 0 | 0 | 0 | 0 | 816 | 0.450829 |
ad8655b4f82e7a25c6632660cfa325c2cad9ee23 | 645 | py | Python | consumers/venv/lib/python3.7/site-packages/faust/cli/faust.py | spencerpomme/Public-Transit-Status-with-Apache-Kafka | 2c85d7daadf4614fe7ce2eabcd13ff87236b1c7e | [
"MIT"
] | null | null | null | consumers/venv/lib/python3.7/site-packages/faust/cli/faust.py | spencerpomme/Public-Transit-Status-with-Apache-Kafka | 2c85d7daadf4614fe7ce2eabcd13ff87236b1c7e | [
"MIT"
] | null | null | null | consumers/venv/lib/python3.7/site-packages/faust/cli/faust.py | spencerpomme/Public-Transit-Status-with-Apache-Kafka | 2c85d7daadf4614fe7ce2eabcd13ff87236b1c7e | [
"MIT"
] | null | null | null | """Program ``faust`` (umbrella command)."""
# Note: The command options above are defined in .cli.base.builtin_options
from .agents import agents
from .base import call_command, cli
from .clean_versions import clean_versions
from .completion import completion
from .livecheck import livecheck
from .model import model
from .models import models
from .reset import reset
from .send import send
from .tables import tables
from .worker import worker
__all__ = [
'agents',
'call_command',
'clean_versions',
'cli',
'completion',
'livecheck',
'model',
'models',
'reset',
'send',
'tables',
'worker',
]
| 21.5 | 74 | 0.699225 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 227 | 0.351938 |
ad86d9d40c3dcc454a710b3a5148587ea08bb4f9 | 1,364 | py | Python | generator.py | cenarturkmen/watercolor-CycleGAN | 94673e5f723904faab3114a9b63ae5d9e1de3de3 | [
"MIT"
] | 9 | 2021-04-23T21:57:04.000Z | 2021-09-01T08:06:48.000Z | generator.py | cenarturkmen/watercolor-CycleGAN | 94673e5f723904faab3114a9b63ae5d9e1de3de3 | [
"MIT"
] | null | null | null | generator.py | cenarturkmen/watercolor-CycleGAN | 94673e5f723904faab3114a9b63ae5d9e1de3de3 | [
"MIT"
] | null | null | null | from model_utils import Upsample, Downsample
from torch import nn
class CycleGAN_Unet_Generator(nn.Module):
def __init__(self, filter=64):
super(CycleGAN_Unet_Generator, self).__init__()
self.downsamples = nn.ModuleList([
Downsample(3, filter, kernel_size=4, apply_instancenorm=False), # (b, filter, 128, 128)
Downsample(filter, filter * 2), # (b, filter * 2, 64, 64)
Downsample(filter * 2, filter * 4), # (b, filter * 4, 32, 32)
Downsample(filter * 4, filter * 8), # (b, filter * 8, 16, 16)
Downsample(filter * 8, filter * 8), # (b, filter * 8, 8, 8)
])
self.upsamples = nn.ModuleList([
Upsample(filter * 8, filter * 8),
Upsample(filter * 16, filter * 4, dropout=False),
Upsample(filter * 8, filter * 2, dropout=False),
Upsample(filter * 4, filter, dropout=False)
])
self.last = nn.Sequential(
nn.ConvTranspose2d(filter * 2, 3, kernel_size=4, stride=2, padding=1),
nn.Tanh()
)
def forward(self, x):
skips = []
for l in self.downsamples:
x = l(x)
skips.append(x)
skips = reversed(skips[:-1])
for l, s in zip(self.upsamples, skips):
x = l(x, s)
out = self.last(x)
return out | 34.974359 | 100 | 0.544721 | 1,297 | 0.95088 | 0 | 0 | 0 | 0 | 0 | 0 | 121 | 0.08871 |
ad88dd61bbd09019864be52cbe4cf8c91fba88d8 | 337 | py | Python | accelerator/models/ethno_racial_identity.py | masschallenge/django-accelerator | 8af898b574be3b8335edc8961924d1c6fa8b5fd5 | [
"MIT"
] | 6 | 2017-06-14T19:34:01.000Z | 2020-03-08T07:16:59.000Z | accelerator/models/ethno_racial_identity.py | masschallenge/django-accelerator | 8af898b574be3b8335edc8961924d1c6fa8b5fd5 | [
"MIT"
] | 160 | 2017-06-20T17:12:13.000Z | 2022-03-30T13:53:12.000Z | accelerator/models/ethno_racial_identity.py | masschallenge/django-accelerator | 8af898b574be3b8335edc8961924d1c6fa8b5fd5 | [
"MIT"
] | null | null | null | import swapper
from accelerator_abstract.models.base_ethno_racial_identity import (
BaseEthnoRacialIdentity,
)
class EthnoRacialIdentity(BaseEthnoRacialIdentity):
class Meta(BaseEthnoRacialIdentity.Meta):
swappable = swapper.swappable_setting(
BaseEthnoRacialIdentity.Meta.app_label, 'EthnoRacialIdentity')
| 30.636364 | 74 | 0.79822 | 219 | 0.649852 | 0 | 0 | 0 | 0 | 0 | 0 | 21 | 0.062315 |
ad8b2d0ea0221f96e6e009e09186a0d67f3d1e7e | 2,303 | py | Python | setup.py | HaaLeo/vague-requirements-scripts | e08b66aa6c0d17718bec1deb8c694d6b8237259b | [
"BSD-3-Clause"
] | null | null | null | setup.py | HaaLeo/vague-requirements-scripts | e08b66aa6c0d17718bec1deb8c694d6b8237259b | [
"BSD-3-Clause"
] | null | null | null | setup.py | HaaLeo/vague-requirements-scripts | e08b66aa6c0d17718bec1deb8c694d6b8237259b | [
"BSD-3-Clause"
] | null | null | null | # ------------------------------------------------------------------------------------------------------
# Copyright (c) Leo Hanisch. All rights reserved.
# Licensed under the BSD 3-Clause License. See LICENSE.txt in the project root for license information.
# ------------------------------------------------------------------------------------------------------
from os import path
from setuptools import find_packages, setup
# pylint: disable=exec-used,undefined-variable
with open(path.join(path.abspath(path.dirname(__file__)), './README.md'), 'r', encoding='utf8') as rf:
LONG_DESCRIPTION = rf.read()
# with open(path.join(path.abspath(path.dirname(__file__)), 'vague-requirements-scripts/_version.py'), 'r', encoding='utf8') as f:
# exec(f.read())
setup(
name='vaguerequirementslib', # PEP8: Packages should also have short, all-lowercase names, the use of underscores is discouraged
version='0.0.1',
packages=find_packages('scripts'),
package_dir={"": "scripts"},
# Include files specified in MANIFEST.in
# include_package_data=True,
description='Some helper for the vague requirements thesis.',
long_description=LONG_DESCRIPTION,
long_description_content_type='text/markdown',
url='https://github.com/HaaLeo/vague-requirements-scripts',
author='Leo Hanisch',
license='BSD 3-Clause License',
install_requires=[
'pandas',
'numpy'
],
project_urls={
'Issue Tracker': 'https://github.com/HaaLeo/vague-requirements-scripts/issues',
# 'Changelog': 'https://github.com/HaaLeo/vague-requirements-scripts/blob/master/CHANGELOG.md#changelog'
},
python_requires='>=3.6',
keywords=[
'vague',
'requirements'
],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'Topic :: Education',
'Topic :: Scientific/Engineering :: Artificial Intelligence'
]
)
| 40.403509 | 133 | 0.605297 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,593 | 0.691706 |
ad8bbb939788d04c4a798ed4657ccace4dec673e | 43 | py | Python | src/python/WMCore/WMRuntime/Scripts/__init__.py | khurtado/WMCore | f74e252412e49189a92962945a94f93bec81cd1e | [
"Apache-2.0"
] | 21 | 2015-11-19T16:18:45.000Z | 2021-12-02T18:20:39.000Z | src/python/WMCore/WMRuntime/Scripts/__init__.py | khurtado/WMCore | f74e252412e49189a92962945a94f93bec81cd1e | [
"Apache-2.0"
] | 5,671 | 2015-01-06T14:38:52.000Z | 2022-03-31T22:11:14.000Z | src/python/WMCore/WMRuntime/Scripts/__init__.py | khurtado/WMCore | f74e252412e49189a92962945a94f93bec81cd1e | [
"Apache-2.0"
] | 67 | 2015-01-21T15:55:38.000Z | 2022-02-03T19:53:13.000Z | #!/usr/bin/env python
"""
_Scripts_
"""
| 5.375 | 21 | 0.55814 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 41 | 0.953488 |
ad8c13ec67ec87fbb7e52ba3ef0d5416c7d7a8bc | 6,691 | py | Python | combine-json.py | efficient/catbench | 4f66541efd8318109c4ac150898d60f023e7aba5 | [
"Apache-2.0"
] | 10 | 2017-12-12T17:20:41.000Z | 2021-05-03T14:40:35.000Z | combine-json.py | efficient/catbench | 4f66541efd8318109c4ac150898d60f023e7aba5 | [
"Apache-2.0"
] | null | null | null | combine-json.py | efficient/catbench | 4f66541efd8318109c4ac150898d60f023e7aba5 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
import argparse;
import os;
import sys;
import json;
def setup_optparse():
parser = argparse.ArgumentParser();
parser.add_argument('--input', '-i', dest='file1',
help='json to append to');
parser.add_argument('--append', '-a', nargs='+', dest='files2',
help='json(s) to be appended to --input.');
parser.add_argument('--suffix', '-s', dest='suffix', default="",
help='Suffix to attach to series from the second file');
parser.add_argument('--outfile', '-o', dest='outfile',
help='Output json. Note that if -i and -o are the same, -i will be overwritten.');
parser.add_argument('--norm', '-n', dest='norm', default="",
help='Norm to normalize all (other) series against');
parser.add_argument('--norm-suffix', dest='norm_suffix', default="",
help='Suffix to add to normalized series');
parser.add_argument('--norm-x', dest='norm_x', default="",
help='Do not normalize these values');
parser.add_argument('--series', '-d', nargs='+', dest='series', default=[],
help='Only copy specified data series (still applies suffix). Note that if suffix is empty, a replacement will be done.')
parser.add_argument('--baseline-contention', '-b', dest='baselinecontention', action='store_true', default=False,
help='Only copy baseline and contention (leave suffix blank for best results). Overrides -d switch!');
parser.add_argument('--median', '-m', dest='median', default=None,
help='Select each point in the specified --series from a group of --append files based on the median of the specified field. ' +
'Using this with suffix is untested, and probably not a good idea, and your data files should probably all have the same domain...' +
'Normalization is right out.');
args = parser.parse_args();
if args.median:
if not isinstance(args.files2, list):
sys.stderr.write('ERROR: Use of --median requires more than one file to --append');
sys.exit(1);
else:
if not isinstance(args.files2, list):
args.files2 = [args.files2];
elif len(args.files2) != 1:
sys.stderr.write('ERROR: I don\'t know what to do with more than one --append file');
sys.exit(1);
if args.baselinecontention:
args.series = ["baseline", "contention"];
return args.file1, args.files2, args.suffix, args.outfile, args.norm, args.norm_suffix, args.norm_x, set(args.series), args.median;
constant_keys=("cache_ways", "mite_tput_limit", "zipf_alpha");
def verify(file1, file2):
fd1 = open(file1, 'r');
fd2 = open(file2, 'r');
json1 = json.load(fd1);
json2 = json.load(fd2);
data1 = json1.get("data");
data2 = json2.get("data");
found_violation = {};
for key in data1.keys():
for entry in data1[key]["samples"]:
for const_key in constant_keys:
if(const_key not in entry):
continue;
for entry2 in data2["baseline"]["samples"]:
if(const_key not in entry2):
continue;
print(entry2[const_key] + " = " + entry[const_key]);
if(entry2[const_key] != entry[const_key]):
found_violation[const_key] = True;
for key in found_violation.keys():
if(found_violation[key]):
print("Warning, variable " + key + " mismatch between baseline file and experiment file");
def combine(file1, files2, suffix, outfile, norm, norm_suffix, norm_x, series, median):
fd1 = open(file1, 'r');
fds2 = [open(each, 'r') for each in files2];
json1 = json.load(fd1);
jsons2 = [json.load(each) for each in fds2];
data1 = json1.get("data");
datas2 = [each.get("data") for each in jsons2];
if median:
alldat = [data1] + datas2;
if not len(series):
series = data1.keys();
for group in series:
samps = [each[group]['samples'] for each in alldat];
res = samps[0];
if len(samps) != len(alldat):
sys.stderr.write('ERROR: Couldn\'t find series \'series\' in all files')
exit(1)
nsamps = len(res);
if filter(lambda elm: len(elm) != nsamps, samps):
sys.stderr.write('ERROR: Not all input files have the same number of elements in \'series\'')
exit(1)
for idx in range(nsamps):
order = sorted([each[idx] for each in samps], key=lambda elm: elm[median]);
res[idx] = order[len(order) / 2];
print('Chose ' + group + '.samples[' + str(idx) + '].' + median + ' as \'' + str(res[idx][median]) + '\' out of: ' + str([each[median] for each in order]));
else:
data2 = datas2[0];
for key in data2.keys():
if(len(series) and key not in series):
continue;
new_key = key + suffix;
if(new_key in data1):
print("Warning, overwriting " + new_key + " in " + file1);
data1[new_key] = data2[key];
data1[new_key]["description"] = data1[new_key]["description"] + suffix;
if(norm != ""):
for key in data2.keys():
if(key == norm):
continue;
new_key = key + suffix + norm_suffix
index = 0;
while(index < len(data2[key]["samples"])):
sample = data2[key]["samples"][index];
base_sample = data2[norm]["samples"][index];
for ylabel in sample:
if(base_sample[ylabel] != 0 and ylabel != norm_x):
data2[key]["samples"][index][ylabel] = sample[ylabel] / base_sample[ylabel];
index += 1
data1[new_key] = data2[key];
data1[new_key]["description"] = data1[new_key]["description"] + suffix + " normalized to " + norm;
fd1.close();
for each in fds2:
each.close();
outfd = open(outfile, 'w');
json.dump(json1, outfd, indent=4, sort_keys=True);
def main():
file1, files2, suffix, outfile, norm, norm_suffix, norm_x, series, median = setup_optparse();
#if(baselinecontention == True):
# verify(file1, file2);
if((norm == "" and norm_suffix == "" and norm_x == "") or (norm != "" and norm_suffix != "" and norm_x != "")):
combine(file1, files2, suffix, outfile, norm, norm_suffix, norm_x, series, median);
else:
print("Missing one of: --norm, --norm-suffix, --norm-x\n");
main();
| 46.144828 | 166 | 0.567329 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,846 | 0.275893 |
ad8c7679c4fcaabd0666e9de8206c186f2f5bf7b | 773 | py | Python | following.py | yoshualukash/insta-crawler | c2b9b150e4fff70cb03ea49c08fb46ffc8f23dd0 | [
"MIT"
] | null | null | null | following.py | yoshualukash/insta-crawler | c2b9b150e4fff70cb03ea49c08fb46ffc8f23dd0 | [
"MIT"
] | null | null | null | following.py | yoshualukash/insta-crawler | c2b9b150e4fff70cb03ea49c08fb46ffc8f23dd0 | [
"MIT"
] | null | null | null | # Get instance
import instaloader
import json
L = instaloader.Instaloader(max_connection_attempts=0)
# Login or load session
username = ''
password = ''
L.login(username, password) # (login)
# Obtain profile metadata
instagram_target = ''
profile = instaloader.Profile.from_username(L.context, instagram_target)
following_list = []
count=1
for followee in profile.get_followees():
username = followee.username
following_list.append(username)
print(str(count) + ". " + username)
count = count + 1
following_list_json = json.dumps(following_list)
open("list_following_" + instagram_target +".json","w").write(following_list_json)
print("selesai")
print("cek file json di file : list_following_" + instagram_target +".json") | 29.730769 | 83 | 0.720569 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 169 | 0.218629 |
ad8d60c4379e74e8ce58021ef988f95871703eca | 11,966 | py | Python | ihs/collector/tasks.py | la-mar/ihs-deo | 250d3edeb9e3ae4a407285e136b7e911f1d75e82 | [
"Apache-2.0"
] | null | null | null | ihs/collector/tasks.py | la-mar/ihs-deo | 250d3edeb9e3ae4a407285e136b7e911f1d75e82 | [
"Apache-2.0"
] | null | null | null | ihs/collector/tasks.py | la-mar/ihs-deo | 250d3edeb9e3ae4a407285e136b7e911f1d75e82 | [
"Apache-2.0"
] | null | null | null | from __future__ import annotations
import logging
from datetime import date, datetime, timedelta
from typing import Dict, Generator, List, Optional, Union, Tuple
import pandas as pd
import metrics
from api.models import ( # noqa
ChangeDeleteLog,
County,
ProductionHorizontal,
ProductionMasterHorizontal,
ProductionMasterVertical,
ProductionVertical,
WellHorizontal,
WellMasterHorizontal,
WellMasterVertical,
WellVertical,
)
from collector import ExportJob # noqa
from collector import (
CDExporter,
Collector,
Endpoint,
ExportBuilder,
ExportParameter,
ExportRetriever,
ProductionList,
ProductionTransformer,
WellboreTransformer,
WellList,
XMLParser,
)
from collector.identity_list import IdentityList
from collector.task import Task
from config import ExportDataTypes, IdentityTemplates, get_active_config
from exc import CollectorError, NoIdsError
from ihs import create_app
logger = logging.getLogger(__name__)
conf = get_active_config()
endpoints = Endpoint.load_from_config(conf)
def run_endpoint_task(
endpoint_name: str, task_name: str
) -> Generator[dict, None, None]:
""" Unpack task options and assemble metadata for job configuration """
endpoint = endpoints[endpoint_name]
task = endpoint.tasks[task_name]
metrics.post(
"task.execution", 1, tags={"endpoint": endpoint_name, "task": task_name}
)
for config in task.configs:
yield config
def submit_job(job_options: dict, metadata: dict) -> Optional[ExportJob]:
endpoint_name = metadata.get("endpoint")
endpoint = endpoints[endpoint_name]
# name = metadata.get("name", None)
target_model = metadata.get("target_model", None)
task_name = metadata.get("task", None)
source_name = metadata.get("source_name", None)
try:
ep = ExportParameter(**job_options)
requestor = ExportBuilder(endpoint)
job = requestor.submit(ep, metadata=metadata or {})
return job
except CollectorError as e:
logger.warning(
f"({target_model}) Skipping job {task_name} -> {source_name}: {e}"
)
return None
def collect(job: Union[dict, ExportJob]):
if isinstance(job, dict):
job = ExportJob(**job)
is_identity_export = IdentityTemplates.has_member(job.template)
data = get_job_results(job)
if is_identity_export:
collect_identities(job, data)
else:
collect_data(job, data)
def get_job_results(job: Union[ExportJob, dict]) -> bytes:
if not isinstance(job, ExportJob):
job = ExportJob(**job)
retr = ExportRetriever(job, base_url=job.url, endpoint=endpoints[job.endpoint])
data = retr.get(auto_delete=True)
return data
def collect_data(job: ExportJob, xml: bytes):
if xml:
parser = XMLParser.load_from_config(conf.PARSER_CONFIG)
document = parser.parse(xml)
model = endpoints[job.endpoint].model
collector = Collector(model)
data: List[Dict] = []
if job.data_type == ExportDataTypes.WELL.value:
data = WellboreTransformer.extract_from_collection(document, model=model)
elif job.data_type == ExportDataTypes.PRODUCTION.value:
data = ProductionTransformer.extract_from_collection(document, model=model)
metrics.post("job.collection.success", len(data), tags=job.limited_dict())
collector.save(data, replace=True)
def collect_identities(job: ExportJob, data: bytes) -> IdentityList:
interface = None
if job.data_type == ExportDataTypes.WELL.value:
interface = WellList(job.name, job.hole_direction)
interface.ids = data
elif job.data_type == ExportDataTypes.PRODUCTION.value:
interface = ProductionList(job.name, job.hole_direction)
interface.ids = data
return interface
# def delete_job(job: ExportJob) -> bool:
# endpoint = endpoints[job.endpoint]
# requestor = ExportBuilder(endpoint)
# result = False
# if requestor.job_exists(job):
# result = requestor.delete_job(job)
# return result
def purge_remote_exports() -> bool:
eb = ExportBuilder(None)
eb.delete_all_jobs()
return True
def calc_remote_export_capacity() -> Dict[str, Union[float, int]]:
"""Calculate the amount of storage space currently consumed by job exports on IHS' servers.
Returns:
dict -- {
capacity_used: space used in KB,
njobs: number of existing completed jobs
"""
mean_doc_size_bytes = (
18000 * conf.TASK_BATCH_SIZE
) # average single entity document size
inflation_pct: float = 0.1 # over estimate the used capacity by this percentage
doc_size_bytes = mean_doc_size_bytes + (inflation_pct * mean_doc_size_bytes)
remote_capacity_bytes: int = 1000000000 # 1 GB
eb = ExportBuilder(None)
try:
njobs = len(eb.list_completed_jobs())
except CollectorError as e:
logger.exception(f"Unable to calculate export capacity -- {e}", stack_info=True)
return {}
return {
"remote.capacity.used": njobs * doc_size_bytes,
"remote.capacity.available": remote_capacity_bytes - (njobs * doc_size_bytes),
"remote.capacity.total": remote_capacity_bytes,
"remote.jobs": njobs,
}
def download_changes_and_deletes() -> int:
max_date = ChangeDeleteLog.max_date()
max_sequence = ChangeDeleteLog.max_sequence() or 0
today = datetime.now()
if max_date:
last_date = max_date - timedelta(days=1)
else:
last_date = date.today() - timedelta(days=30)
cde = CDExporter(from_date=last_date, to_date=today)
results = cde.get_all()
logger.info(f"Downloaded {len(results)} changes and deletes")
records: List[Dict] = []
for r in results:
new = {}
for k, v in r.items():
if v is not None:
if "uwi" in k:
v = str(v)
if k == "reasoncode":
k = "reason_code"
elif k == "activecode":
k = "active_code"
elif k == "referenceuwi":
k = "reference_uwi"
elif k == "newuwi":
k = "new_uwi"
new[k] = v
if new.get("sequence", 0) > max_sequence:
new["processed"] = False
records.append(new)
logger.info(
f"Found {len(records)} changes and deletes (filtered {len(results) - len(records)})"
)
collector = Collector(ChangeDeleteLog)
return collector.save(records)
# def process_changes_and_deletes():
# # reason_action_map = {
# # "no_action": [0, 6],
# # "update_to_new_uwi": [1, 5, 7, 8, 9],
# # "update_to_ref_uwi": [2],
# # "delete": [3, 4],
# # }
# reason_action_map = {
# 0: "no_action",
# 1: "update_to_new_uwi",
# 2: "update_to_ref_uwi",
# 3: "delete",
# 4: "delete",
# 5: "update_to_new_uwi",
# 6: "no_action",
# 7: "update_to_new_uwi",
# 8: "update_to_new_uwi",
# 9: "update_to_new_uwi",
# }
# objs = ChangeDeleteLog.objects(processed=False)
# obj = objs[len(objs) - 80]
# obj._data
# #! unfinished
# # for obj in objs:
# # if obj.processed is False:
# # action = reason_action_map[obj.reason_code]
# # if action == "delete":
# # document = WellHorizontal.objects(api14=obj.uwi).first()
# # document = WellVertical.objects(api14=obj.uwi).first()
def synchronize_master_lists():
county_model_name = County.__name__.split(".")[-1]
master_counties = County.as_df().index.tolist()
for model in [
WellMasterHorizontal,
WellMasterVertical,
ProductionMasterHorizontal,
ProductionMasterVertical,
]:
target_model_name = model.__name__.split(".")[-1]
model_counties = model.as_df().index.tolist()
missing_from_model = [x for x in master_counties if x not in model_counties]
# add missing counties to model
added = []
for county in missing_from_model:
i = model(name=county)
i.save()
added.append(county)
if added:
logger.info(
f"({target_model_name}) Added {len(added)} entries from {county_model_name} master: {added}" # noqa
)
missing_from_master = [x for x in model_counties if x not in master_counties]
if missing_from_master:
logger.info(
f"({target_model_name}) has {len(missing_from_master)} entries missing from {county_model_name} master" # noqa
)
logger.info(f"({target_model_name}) synchronized to {county_model_name} master")
def refresh_master_lists() -> List[Tuple[List[Dict], str, str]]:
endpoints = Endpoint.from_yaml(conf.COLLECTOR_CONFIG_PATH)
endpoints = {
k: v for k, v in endpoints.items() if "master" in v.model.__name__.lower()
}
all_endpoint_configs: List[Tuple[List[Dict], str, str]] = []
for endpoint_name, endpoint in endpoints.items():
# endpoint_name, endpoint = list(endpoints.items())[0]
target_model_name = endpoint.model.__name__.split(".")[-1]
county_record_dict = (
County.as_df().loc[:, ["county_code", "state_code"]].to_dict(orient="index")
)
task = endpoint.tasks["sync"]
task.options.matrix = county_record_dict # override the yaml defined matrix
configs = task.configs
logger.warning(f"({target_model_name}) refreshing {len(configs)} counties")
all_endpoint_configs.append((configs, endpoint_name, task.task_name))
return all_endpoint_configs
# job_options, metadata = task.configs[0].values()
# ep = ExportParameter(**job_options)
# print(ep.params["Query"])
if __name__ == "__main__":
import loggers
loggers.config(10)
logging.getLogger("collector.parser").setLevel(30)
logging.getLogger("zeep").setLevel(30)
from time import sleep
# from uuid import UUID
from ihs import create_app
logging.basicConfig(level=10)
app = create_app()
app.app_context().push()
# endpoint_name = "well_master_vertical"
# endpoint_name = "well_master_vertical"
# task_name = "sync"
# endpoint = endpoints[endpoint_name]
# task = endpoint.tasks[task_name]
# # configs =
# job_options, metadata = task.configs[0].values()
# for configs, endpoint_name, task_name in refresh_master_lists():
# for job
# ep = ExportParameter(**job_options)
# print(ep.params["Query"])
# requestor = ExportBuilder(endpoint)
# job = submit_job(job_options=job_options, metadata=metadata)
# # job.to_dict()
# sleep(5)
# if job:
# collect(job)
# xml = get_job_results(job)
# parser = XMLParser.load_from_config(conf.PARSER_CONFIG)
# document = parser.parse(xml)
# model = endpoint.model
# data = WellboreTransformer.extract_from_collection(document, model=model)
# len(data)
# [x["api14"] for x in data]
# collector = Collector(model)
# collector.save(data, replace=True)
# from api.models import County, WellMasterHorizontal
# import pandas as pd
# df = pd.DataFrame([x._data for x in County.objects.all()]).set_index("name")
# df.columns
# df = df.drop(columns=["state_code", "county_code"]).sort_values("well_h_last_run")
# df.shape
# hz_ids = (
# pd.DataFrame([x._data for x in WellMasterHorizontal.objects.all()])
# .set_index("name")
# .sort_index()
# )
# hz_ids.loc[~hz_ids.index.str.contains("County")].shape
# joined = df.join(hz_ids.ids)
# joined[joined.ids.isna()]
# # data[7]
# self = task.options
| 31.161458 | 127 | 0.636637 | 0 | 0 | 406 | 0.033929 | 0 | 0 | 0 | 0 | 4,309 | 0.360104 |
ad8e0f494686da07f4c61d50f681147ad0112a38 | 5,271 | py | Python | hivprotmut/structures/pdbcuration.py | victor-gil-sepulveda/PhD-HIVProteaseMutation | 164e723605ceaaef246d2b8916fd5aca980e7734 | [
"MIT"
] | null | null | null | hivprotmut/structures/pdbcuration.py | victor-gil-sepulveda/PhD-HIVProteaseMutation | 164e723605ceaaef246d2b8916fd5aca980e7734 | [
"MIT"
] | null | null | null | hivprotmut/structures/pdbcuration.py | victor-gil-sepulveda/PhD-HIVProteaseMutation | 164e723605ceaaef246d2b8916fd5aca980e7734 | [
"MIT"
] | null | null | null | """
Created on 25/8/2014
@author: victor
"""
import prody
import numpy
class CurationSelections():
LIGAND_SELECTION = "hetero not water not ion"
HEAVY_LIGAND_SELECTION = "hetero and not water and not ion and not hydrogen"
PROTEIN_CHAIN_TEMPLATE = "protein chain %s"
def __init__(self):
pass
def choose_main_chains(initial_pdb):
"""
We can have complexes attached to the chain or even duplicated chains
that cover the same space (ex. in the same model, A and B are one structure
and C and B form a duplicated protein). We only have to leave two of that
main chains, and that's what this function does :) .
:param initial_pdb: The pdb (prody structure) we want to extract the chains.
:return: An array containing the chain ids of the main chains.
"""
hw = prody.HierView(initial_pdb.select("protein"))
chain_lengths = []
for chain in hw.iterChains():
chain_lengths.append((len(chain.getSequence()), chain.getChid()))
leave_chains = sorted(chain_lengths)[-2:]
leave_chains = [chain_id for _, chain_id in leave_chains]
return leave_chains
def process_water_structures(initial_pdb, main_chains, ligand):
"""
Detects the waters we have to keep (important for the simulation) and returns
a structure holding them.
Important waters are the ones closer to Template residue 50 (Ile), the aa is not
but it is not guaranteed to be conserved, which means we have to rely into the
residue number to choose it, and take any offset into account if needed.
Extra: water molecules must be also close to the binding site. We will pick then the
water that has minimum distance to the binding site and residue 50
:param initial_pdb: The pdb (prody structure) we want to extract the chains.
:return: A dictionary indexed by the water id (res. num. + chain id) holding the prody pdb
structure of that water.
"""
hw = prody.HierView(initial_pdb.select("protein"))
water_structs = {}
for chain in hw.iterChains():
if chain.getChid() in main_chains:
# We cannot do a direct selection, instead we iterate
for i, residue in enumerate(chain.iterResidues()):
if i == 50: # 50th residue
break
residue_com = prody.calcCenter(residue)
if ligand is None:
ligand_com = prody.calcCenter(initial_pdb)
else:
ligand_com =prody.calcCenter(ligand)
# Identify closer water
waters = initial_pdb.select("name O and water")
if waters is not None:
distance_to_R50 = numpy.sqrt(((residue_com - waters.getCoords())**2).sum(axis=1))
distance_to_BindSite = numpy.sqrt(((ligand_com - waters.getCoords())**2).sum(axis=1))
distances = distance_to_R50 + distance_to_BindSite
min_dist = numpy.min(distances)
min_dist_index = numpy.where(distances == min_dist)
water_resnum = waters.getResnums()[min_dist_index]
water_chid = waters.getChids()[min_dist_index][0]
water_id = "%d:%s"%(water_resnum, water_chid)
# We use a dict in order to get rid of repeats
selection_string = "resnum %d and chain %s"%(water_resnum,
water_chid)
water_structs[water_id] = initial_pdb.water.select(selection_string).copy()
return water_structs
def curate_struct(initial_pdb, main_chains, pdb_alignment, parameters):
"""
Returns the "curated" pdb. A curated pdb has potentially 2 waters around residue
50 of each chain, a ligand and two main (symmetric) chains; everything else must be
deleted. This function will work even in the case that the 2 later are not present,
which can happen when processing any of the "mandatory" structures (those can pass
the filters automatically).
:param initial_pdb: The prody pdb structure we want to extract the chains.
:return: The "curated" pdb and the ligand
"""
# Get chain info (without ligand or waters)
hw = prody.HierView(initial_pdb.select("protein"))
pdb_alignment["pdb"]["num_chains"] = hw.numChains()
# Pick main chains
prot_struct = initial_pdb.select(CurationSelections.PROTEIN_CHAIN_TEMPLATE%(" ".join(main_chains))).copy()
# Add the ligand (if found), must be part of other chains (not main_chains)
ligand_struct = initial_pdb.select(CurationSelections.LIGAND_SELECTION)
if ligand_struct is not None and ligand_struct.numAtoms() >= parameters["min_ligand_atoms"]:
tmp_struct = prot_struct + ligand_struct.copy()
else:
tmp_struct = prot_struct
# Add "important" waters, if found
water_structs = process_water_structures(initial_pdb, main_chains, ligand_struct)
pdb_alignment["pdb"]["waters"] = water_structs.keys() # Keep track of added waters in the alignment file
for water_id in water_structs:
tmp_struct = tmp_struct + water_structs[water_id]
return tmp_struct, ligand_struct
| 43.561983 | 110 | 0.658509 | 248 | 0.04705 | 0 | 0 | 0 | 0 | 0 | 0 | 2,364 | 0.448492 |
ad8f7bbcca004c832305ceeebbf23ba748e94eff | 933 | py | Python | Session 3/Dictionaries/Accessing, writing & deleting data.py | Tassneem04Hamdy/AUG-Problem-Solving-For-Bioinformatics-Level-1- | 7273610cc2e37acb65530dc384472e78ee8c30f7 | [
"MIT"
] | 4 | 2021-04-16T12:27:16.000Z | 2021-10-08T19:05:33.000Z | Session 3/Dictionaries/Accessing, writing & deleting data.py | Tassneem04Hamdy/AUG-Problem-Solving-For-Bioinformatics-Level-1- | 7273610cc2e37acb65530dc384472e78ee8c30f7 | [
"MIT"
] | null | null | null | Session 3/Dictionaries/Accessing, writing & deleting data.py | Tassneem04Hamdy/AUG-Problem-Solving-For-Bioinformatics-Level-1- | 7273610cc2e37acb65530dc384472e78ee8c30f7 | [
"MIT"
] | 5 | 2021-04-18T10:46:44.000Z | 2021-05-03T16:13:25.000Z | my_dictionary = {
'type': 'Fruits',
'name': 'Apple',
'color': 'Green',
'available': True,
'number': 25
}
print(my_dictionary)
print(my_dictionary['name'])
# searching with wrong key
print(my_dictionary['weight'])
###############################################################
# printing keys and values
for d in my_dictionary:
print(d, my_dictionary[d])
# printing values direct
for data in my_dictionary.values():
print(data)
###############################################################
# modify a value
my_dictionary['color'] = 'Red'
print(my_dictionary)
###############################################################
# inserting new item
my_dictionary['weight'] = '5k'
print(my_dictionary)
###############################################################
# deleting an item
del my_dictionary['weight']
print(my_dictionary)
###############################################################
| 21.697674 | 63 | 0.45552 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 551 | 0.590568 |
ad8fb8dc439638934875f09ce4b704d202abf421 | 18,426 | py | Python | old/data_handler_VALVE.py | dlaredo/NASA_RUL_-CMAPS- | b4fc4267e2abb4b0542e4658fd8ee931ba848fd1 | [
"BSD-3-Clause"
] | 27 | 2018-05-09T09:18:04.000Z | 2022-01-14T06:37:53.000Z | old/data_handler_VALVE.py | hard10086/NASA_RUL_-CMAPS- | b4fc4267e2abb4b0542e4658fd8ee931ba848fd1 | [
"BSD-3-Clause"
] | 1 | 2019-06-11T09:09:22.000Z | 2019-10-08T21:23:07.000Z | old/data_handler_VALVE.py | hard10086/NASA_RUL_-CMAPS- | b4fc4267e2abb4b0542e4658fd8ee931ba848fd1 | [
"BSD-3-Clause"
] | 9 | 2018-07-06T03:40:47.000Z | 2022-01-06T07:30:26.000Z | import numpy as np
import random
import pandas as pd
import sqlalchemy
from sqlalchemy.orm import sessionmaker
from sqlalchemy.sql import select
from sqlalchemy import and_
from sqlalchemy import between
from sqlalchemy.sql import exists
from sqlalchemy import desc
from datetime import datetime, timezone, timedelta
from damadicsDBMapping import *
from sequenced_data_handler import SequenceDataHandler
# IP Address: 169.236.181.40
# User: dbAdmin
# Password: dbAdmin
# Database: damadics
class ValveDataHandler(SequenceDataHandler):
'''
TODO: column information here
'''
#Method definition
def __init__(self, start_time, end_time, selected_features, sequence_length = 1, sequence_stride = 1, data_scaler = None):
#Public properties
self._start_time = start_time
self._end_time = end_time
self._selected_features = selected_features
self._rectify_labels = False
self._data_scaler = data_scaler
# Database connection
# self._db_connection = mysql.connector.connect(user = 'root', password = 'Ying6102#', database = 'damadics')
self._load_from_db = True
self._column_names = {0: 'timestamp', 1: 'externalControllerOutput', 2: 'undisturbedMediumFlow', 3: 'pressureValveInlet', 4:'pressureValveOutlet',
5: 'mediumTemperature', 6: 'rodDisplacement', 7: 'disturbedMediumFlow', 8: 'selectedFault', 9: 'faultType', 10: 'faultIntensity'}
# Entire Dataset
self._df = None
self._X = None
self._y = None
# Splitting. This is what is used to train
self._df_train = None
self._df_test = None
#create one time session
self._sqlsession = None
print("init")
#super init
super().__init__(sequence_length, sequence_stride, len(selected_features), data_scaler)
def connect_to_db(self,username,pasw,host,dbname):
# self.username = username
# self.pasw = pasw
# self.host = host
self.dbname = dbname
databaseString = "mysql+mysqldb://"+username+":"+pasw+"@"+host+"/"+dbname
self._sqlsession = None
try:
sqlengine = sqlalchemy.create_engine(databaseString)
SQLSession = sessionmaker(bind=sqlengine)
self._sqlsession = SQLSession()
print("Connection to " + databaseString + " successfull")
except Exception as e:
print("e:", e)
print("Error in connection to the database")
def extract_data_from_db(self):
startTime = datetime.now()
self._df = self._sqlsession.query(ValveReading).filter(ValveReading.timestamp.between (self._start_time,self._end_time) )
self._df = pd.read_sql(self._df.statement, self._df.session.bind)
#dataPoints = self._sqlsession.query(exists().where(ValveReading.timestamp == '2018-07-27 15:56:22')).scalar()
#dataPoints = self._sqlsession.query(ValveReading).order_by(ValveReading.timestamp)
# TODO: need to check whether dataPoints is of type DataFrame. Needs to be in type DataFrame
# TODO: check whether column names are extracted out
# All the data with selected features is saved in this variable
# TODO: check if self._selected_features is an array of indexes or strings
# self._df = df.iloc[:, self._selected_features].values
# Assumption that the output is only one column and is located at the last column out of all the selected features
# Below if self._selected_features is an array of indexes
column_names = ['externalControllerOutput', 'pressureValveInlet',
'pressureValveOutlet', 'mediumTemperature','rodDisplacement', 'disturbedMediumFlow', 'selectedFault']
self._X = self._df.loc[:, column_names[:-1]].values
self._y = self._df.loc[:, column_names[len(column_names) - 1]].values
# Below if self._selected_features is an array of strings
# inputs = df.loc[:, column_names[:-1]].values
# outputs = df.loc[:, column_names[len(column_names) - 1]].values
# for data in self._df:
# print(self._df)
print("Extracting data from database runtime:", datetime.now() - startTime)
def one_hot_encode(self, num_readings):
startTime = datetime.now()
fault_column = list()
one_hot_matrix = np.zeros((num_readings, 20))
fault_column = self._y
for i in range(num_readings):
one_hot_matrix[i, int(fault_column[i] - 1)] = 1
print("One-hot-encoding:", datetime.now() - startTime)
return one_hot_matrix
# Private
def find_samples(self, data_samples):
'''
Assumptions made when using this functions
1.) The value always starts of as NOT BROKEN. First faultType value is 20.
2.) Function is used to entire dataset and not in chunks
'''
# TODO: handle cases when the first readings start of as a broken value
# TODO: ask David if he wants a minimum amount of samples in the dataset
startTime = datetime.now()
small_list, big_list = list(), list()
normal_status = 20.0
isBroken = False
counter = 0
for i in range(len(self._y)):
# If True, then the current status of the valve is that it is broken
if (isBroken):
# The valve has been fixed and is back to its normal status
if (self._y[i] == normal_status):
isBroken = False
counter += 1
# Save everything from the small_list into the big_list
small_list = np.vstack(small_list)
big_list.append(small_list)
small_list = list()
small_list.append(data_samples[i, :])
# The current status of the valve is that it is not broken
else:
if (self._y[i] != normal_status):
isBroken = True
# small_list = np.append(data_samples[i, :], small_list)
small_list.append(data_samples[i, :])
print("Splitting into samples:",datetime.now() - startTime)
print("counter:", counter)
return big_list, counter
#
#
#
#
#
#
#
# # Private
# def find_samples(self, data_samples):
#
# '''
# Assumptions made when using this function
# 1.) The valve always starts of as NOT BROKEN. First faultType value is 20.
# 2.) Function is used to entire dataset and not in chunks
# '''
#
# # TODO: handle cases when the first readings starts of as a broken valve
# # TODO: ask David if he wants a minimum amount of samples in the dataset
#
# small_list, big_list = list(), list()``
# normal_status = 20.0
# isBroken = False
# # Counter for the number of samples there are in the dataset
# counter = 0
#
# for i in range(len(self._y)):
# # If True, then the current status of the valve is that it is broken
# if (isBroken):
# # The valve has been fixed and is back to its normal status
# if (self._y[i] == normal_status):
# isBroken = False
# counter += 1
# # Save everything from the small_list into the big_list
# small_list = np.vstack(small_list)
# big_list.append(small_list)
# # Clear the small_list (reinitialize)
# small_list = list()
# small_list.append(data_samples[i, :])
# # The current status of the valve is that it is not broken
# else:
# # Broken valve discovered
# if (self._y[i] != normal_status):
# isBroken = True
# small_list.append(data_samples[i, :])
#
# # SPECIAL CASE: the simulation does not end with a fixed valve. Therefore we shall whatever is inside the small_list and say that it is an entire sample
# if (self._y[i] != 20):
# counter += 1
# small_list = np.vstack(small_list)
# big_list.append(small_list)
#
# return big_list, counter
# Public
def load_data(self, verbose = 0, cross_validation_ratio = 0, test_ratio = 0, unroll = True):
"""Load the data using the specified parameters"""
'''
TODO: extracting data from MySQL database using SQLALCHEMY
Functions called here: generate_df_with_rul(self, df), generate_train_arrays(self, cross_validation_ratio = 0), generate_test_arrays(self),
create_sequenced_train_data(self), create_sequenced_test_data(self)
X: df[timestamp, ..., selectedFault]
y: df['faultType']
'''
# dataPoints = self._sqlsession.query(ValveReading)
if verbose == 1:
print("Loading data for dataset {} with window_size of {}, stride of {}. Cros-Validation ratio {}".format(self._dataset_number,
self._sequence_length, self._sequence_stride, cross_validation_ratio))
if cross_validation_ratio < 0 or cross_validation_ratio > 1:
print("Error, cross validation must be between 0 and 1")
return
if test_ratio < 0 or test_ratio > 1:
print("Error, test ratio must be between 0 and 1")
return
if cross_validation_ratio + test_ratio > 1:
print("Sum of cross validation and test ratios is greater than 1. Need to pick smaller ratios.")
return
if self._load_from_db == True:
print("Loading data from database")
# These variables are where the entire data is saved at
self.extract_data_from_db()
# One hot encoding
output_one_hot_matrix = self.one_hot_encode(self._df.shape[0])
# Finds samples within the inputs
self._X, num_samples = self.find_samples(self._X)
self._y, _ = self.find_samples(output_one_hot_matrix)
# self._df_train = self.load_db_into_df(self._file_train_data)
# self._df_test = self.load_db_into_df(self._file_test_data)
# self._df_train, num_units, trimmed_rul_train = self.generate_df_with_rul(self._df_train)
else:
print("Loading data from memory")
#Reset arrays
"""
self._X_train_list = list()
self._X_crossVal_list = list()
self._X_test_list = list()
self._y_train_list = list()
self._y_crossVal_list = list()
self._y_test_list = list()
"""
# Split up the data into its different samples
#Modify properties in the parent class, and let the parent class finish the data processing
self.train_cv_test_split(cross_validation_ratio, test_ratio, num_samples)
self.print_sequence_shapes()
# Unroll = True for ANN
# Unroll = False for RNN
self.generate_train_data(unroll)
self.generate_crossValidation_data(unroll)
self.generate_test_data(unroll)
#
self._load_from_db = False # As long as the dataframe doesnt change, there is no need to reload from file
# Private
def train_cv_test_split(self, cross_validation_ratio, test_ratio, num_samples):
''' From the dataframes generate the feature arrays and their labels'''
print("split_samples num_samples:", num_samples)
print("cross_validation_ratio:", cross_validation_ratio)
print("test_ratio:", test_ratio)
startTime = datetime.now()
X_train_list, y_train_list = list(), list()
X_crossVal_list, y_crossVal_list = list(), list()
X_test_list, y_test_list = list(), list()
if cross_validation_ratio < 0 or cross_validation_ratio > 1:
print("Error, cross validation must be between 0 and 1")
return
if test_ratio < 0 or test_ratio > 1:
print("Error, test ratio must be between 0 and 1")
return
if cross_validation_ratio != 0 or test_ratio != 0:
self._X_train_list, self._y_train_list, self._X_crossVal_list, self._y_crossVal_list, self._X_test_list, self._y_test_list = self.split_samples(cross_validation_ratio, test_ratio, num_samples)
print("Train, cv, and test splitting:",datetime.now() - startTime)
print()
# Private
def split_samples(self, cross_validation_ratio, test_ratio, num_samples):
'''Split the samples according to their respective ratios'''
shuffled_samples = list(range(0, num_samples))
random.shuffle(shuffled_samples)
num_crossVal = int(cross_validation_ratio * num_samples)
#print("num_crossVal:", num_crossVal)
num_test = int(test_ratio * num_samples)
#print("num_test:", num_test)
num_train = num_samples - num_crossVal - num_test
#print("num_train:", num_train)
X_train_list, y_train_list = list(), list()
X_crossVal_list, y_crossVal_list = list(), list()
X_test_list, y_test_list = list(), list()
print(self._y[0])
for i in range(num_train):
#print("i:", i)
X_train_list.append(self._X[shuffled_samples[i]])
y_train_list.append(self._y[shuffled_samples[i]])
# y_train_list.append(self._y[shuffled_samples[i]][-1].reshape(1, 20))
# x = 0
# while(len(y_train_list) == 0):
# if (self._y[shuffled_samples[i]][x][19] != 1):
# y_train_list.append(self._y[shuffled_samples[i]])
# x += 1
# for x in range(self._y[shuffled_samples[i]].shape[0]):
# if (self._y[shuffled_samples[i]][x][19] != 1 and len(y_train_list) == 0):
# y_train_list.append(self._y[shuffled_samples[i]])
# print(len(y_train_list))
for j in range(num_train, num_train + num_crossVal):
#print("j:", j)
X_crossVal_list.append(self._X[shuffled_samples[j]])
y_crossVal_list.append(self._y[shuffled_samples[j]][-1].reshape(1, 20))
# y = 0
# while(len(y_train_list) == 0):
# if (self._y[shuffled_samples[i]][y][19] != 1):
# y_crossVal_list.append(self._y[shuffled_samples[i]])
# y += 1
# for y in range(self._y[shuffled_samples[j]].shape[0]):
# if (self._y[shuffled_samples[j]][y][19] != 1 and len(y_crossVal_list) == 0):
# y_crossVal_list.append(self._y[shuffled_samples[j]])
for k in range(num_train + num_crossVal, num_samples):
#print("k:", k)
X_test_list.append(self._X[shuffled_samples[k]])
y_test_list.append(self._y[shuffled_samples[k]][-1].reshape(1, 20))
# z = 0
# while(len(y_train_list) == 0):
# if (self._y[shuffled_samples[i]][x][19] != 1):
# y_test_list.append(self._y[shuffled_samples[i]])
# z += 1
# for z in range(self._y[shuffled_samples[k]].shape[0]):
# if (self._y[shuffled_samples[k]][z][19] != 1 and len(y_test_list) == 0):
# y_test_list.append(self._y[shuffled_samples[k]])
#print("X_test_list shape:", len(X_test_list[0].shape))
return X_train_list, y_train_list, X_crossVal_list, y_crossVal_list, X_test_list, y_test_list
# def train_cv_test_split(self, cross_validation_ratio = 0, test_ratio = 0, num_samples):
# """From the dataframes generate the feature arrays and their labels"""
#
# '''
# Functions called here: split_samples(self, df, splitting_ratio), generate_cross_validation_from_df(self, df, sequence_length)
# '''
#
# X_train_list, y_train_list = list(), list()
# X_crossVal_list, y_crossVal_list = list(), list()
# X_test_list, y_test_list = list()
#
# if cross_validation_ratio < 0 or cross_validation_ratio > 1 :
# print("Error, cross validation must be between 0 and 1")
# return
#
# if test_ratio < 0 or test_ratio > 1 :
# print("Error, test ratio must be between 0 and 1")
# return
#
# if cross_validation_ratio != 0 or test_ratio != 0:
# X_train_list, X_test_list, X_crossVal_list, y_crossVal_list, y_train_list, y_test_list = self.split_samples(cross_validation_ratio, test_ratio, num_samples)
#
# return X_train_list, y_train_list, X_crossVal_list, y_crossVal_list, X_test_list, y_test_list
# Private
# def split_samples(self, cross_validation_ratio, test_ratio, num_samples):
# """Split the samples according to their respective ratios"""
#
# shuffled_samples = list(range(0, num_samples))
# random.shuffle(shuffled_samples)
#
# num_crossVal = int(cross_validation_ratio * num_samples)
# num_test = int(test_ratio * num_samples)
# num_train = num_samples - num_crossVal - num_test
#
# X_train_list, y_train_list = list(), list()
# X_crossVal, y_crossVal_list = list(), list()
# X_test_list, y_test_list = list(), list()
#
# for i in range(num_train):
# X_train_list.append(self._X[shuffled_samples[i]])
# y_train_list.append(self._y[shuffled_samples[i]])
#
# for j in range(num_train, num_train + num_crossVal):
# X_crossVal.append(self._X[shuffled_samples[j]])
# y_crossVal_list.append(self._y[shuffled_samples[j]])
#
# for k in range(num_train + num_crossVal, num_samples):
# X_test.append(self._X[shuffled_samples[k]])
# y_test_list.append(self._y[shuffled_samples[k]])
#
# return X_train_list, X_test, X_crossVal, y_crossVal_list, y_train_list, y_test
#Property definition
@property
def df(self):
return self._df
@df.setter
def df(self, df):
self._df = df
@property
def X(self):
return self.X
@X.setter
def X(self, X):
self.X = X
@property
def y(self):
return self._y
@y.setter
def df(self, y):
self._y = y
@property
def start_time(self):
return self._start_time
@start_time.setter
def start_time(self,start_time):
self._start_time = start_time
@property
def sqlsession(self):
return self._sqlsession
@sqlsession.setter
def sqlsession(self,sqlsession):
self._sqlsession = sqlsession
def __str__(self):
return "<ValveReading(timestamp='%s',externalControllerOutput='%s',undisturbedMediumFlow='%s',pressureValveInlet='%s',pressureValveOutlet='%s',mediumTemperature='%s',\
rodDisplacement='%s',disturbedMediumFlow='%s',selectedFault='%s',faultType='%s',faultIntensity='%s')>"\
%(str(self._timestamp),self._externalControllerOutput,self._undisturbedMediumFlow,self.pressureValveInlet,\
self.pressureValveOutlet,self.mediumTemperature,self.rodDisplacement,self.disturbedMediumFlow,self.selectedFault,\
self.faultType,self.faultIntensity)
# def selectedFeatures(self):
# return self._selectedFeatures
#
# @selectedFeatures.setter
# def selectedFeatures(self, selectedFeatures):
# self._selectedFeatures = selectedFeatures
#
# @property
# def max_rul(self):
# return self._max_rul
#
# @max_rul.setter
# def max_rul(self, max_rul):
# self._max_rul = max_rul
#
# @property
# def rectify_labels(self):
# return self._rectify_labels
#
# @rectify_labels.setter
# def rectify_labels(self, rectify_labels):
# self._rectify_labels = rectify_labels
#
# #ReadOnly Properties
#
# @property
# def dataset_number(self):
# return self._dataset_number
#
# @property
# def data_folder(self):
# return self._data_folder
#
# @property
# def file_train_data(self):
# return self._file_train_data
#
# @property
# def file_test_data(self):
# return self._file_test_data
#
# @property
# def file_rul(self):
# return self._file_rul
#
# @property
# def load_from_file(self):
# return self._load_from_db
#
# @property
# def column_names(self):
# return self._column_names
#
# @property
# def df_train(self):
# return self._df_train
#
# @property
# def df_test(self):
# return self._df_test
#
#
#
# #Auxiliary functions
#
# def compute_training_rul(df_row, *args):
# """Compute the RUL at each entry of the DF"""
#
# max_rul = args[1]
# rul_vector = args[0]
# rul_vector_index = int(df_row['Unit Number']) - 1
#
#
# if max_rul > 0 and rul_vector[rul_vector_index] - df_row['Cycle'] > max_rul:
# return max_rul
# else:
# return rul_vector[rul_vector_index] - df_row['Cycle']
| 31.714286 | 195 | 0.711712 | 16,349 | 0.887279 | 0 | 0 | 530 | 0.028764 | 0 | 0 | 11,121 | 0.603549 |
ad9068a44289e0f08b1e0f06b78ce22398e4bb52 | 350 | py | Python | Course 01 - Getting Started with Python/Extra Studies/Basics/ex036.py | marcoshsq/python_practical_exercises | 77136cd4bc0f34acde3380ffdc5af74f7a960670 | [
"MIT"
] | 9 | 2022-03-22T16:45:17.000Z | 2022-03-25T20:22:35.000Z | Course 01 - Getting Started with Python/Extra Studies/Basics/ex036.py | marcoshsq/python_practical_exercises | 77136cd4bc0f34acde3380ffdc5af74f7a960670 | [
"MIT"
] | null | null | null | Course 01 - Getting Started with Python/Extra Studies/Basics/ex036.py | marcoshsq/python_practical_exercises | 77136cd4bc0f34acde3380ffdc5af74f7a960670 | [
"MIT"
] | 3 | 2022-03-22T17:03:38.000Z | 2022-03-29T17:20:55.000Z | import math
# Extra Exercise 004
"""Write a program that asks for the radius of a circle, calculates and displays its area."""
radius = float(input("Enter the radius of the circle: "))
area = math.pi * radius**2
circumference = 2 * math.pi * radius
print(
f"The area of the circle is {area:.2f} and its circumference is {circumference:.2f}"
)
| 26.923077 | 93 | 0.708571 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 231 | 0.66 |
ad908fb8710091b44d78b703219fe574d5101cb4 | 4,753 | py | Python | bw_tools/modules/bw_framer/bw_framer.py | ben-wilson-github/bw_tools | 5a0701a39b5af3fcd15021a2600ff2ff1ce41284 | [
"MIT"
] | 4 | 2021-10-21T08:28:43.000Z | 2022-03-17T04:01:55.000Z | bw_tools/modules/bw_framer/bw_framer.py | ben-wilson-github/bw_tools | 5a0701a39b5af3fcd15021a2600ff2ff1ce41284 | [
"MIT"
] | null | null | null | bw_tools/modules/bw_framer/bw_framer.py | ben-wilson-github/bw_tools | 5a0701a39b5af3fcd15021a2600ff2ff1ce41284 | [
"MIT"
] | null | null | null | from __future__ import annotations
import os
from functools import partial
from pathlib import Path
from typing import TYPE_CHECKING, Dict
from PySide2.QtGui import QIcon, QKeySequence
from bw_tools.common.bw_node import BWNode
from bw_tools.modules.bw_settings.bw_settings import BWModuleSettings
from PySide2.QtWidgets import QAction
from sd.api import sdbasetypes
from sd.api.sdgraph import SDGraph
from sd.api.sdgraphobject import SDGraphObject
from sd.api.sdgraphobjectframe import SDGraphObjectFrame
from sd.api.sdhistoryutils import SDHistoryUtils
from sd.api.sdnode import SDNode
if TYPE_CHECKING:
from bw_tools.common.bw_api_tool import BWAPITool
class BWFramerSettings(BWModuleSettings):
def __init__(self, file_path: Path):
super().__init__(file_path)
self.hotkey: str = self.get("Hotkey;value")
self.margin: float = self.get("Margin;value")
self.default_color: list = self.get("Default Color;value")
self.default_title: str = self.get("Default Title;value")
self.default_description: str = self.get("Default Description;value")
def get_frames(graph_objects: list[SDGraphObject]) -> list[SDGraphObjectFrame]:
return [obj for obj in graph_objects if isinstance(obj, SDGraphObjectFrame)]
def delete_frames(
graph: SDGraph,
frames: list[SDGraphObjectFrame],
):
[graph.deleteGraphObject(frame) for frame in frames]
def run_framer(
nodes: list[SDNode],
graph_objects: list[SDGraphObject],
graph: SDGraph,
settings: BWFramerSettings,
):
x0 = min(nodes, key=lambda node: node.getPosition().x)
x1 = max(nodes, key=lambda node: node.getPosition().x)
y0 = max(nodes, key=lambda node: node.getPosition().y)
y1 = min(nodes, key=lambda node: node.getPosition().y)
x0 = BWNode(x0)
x1 = BWNode(x1)
y0 = BWNode(y0)
y1 = BWNode(y1)
min_x = x0.pos.x - x0.width / 2
max_x = x1.pos.x - x1.width / 2
min_y = y1.pos.y - y1.width / 2
max_y = y0.pos.y - y0.width / 2
width = (max_x - min_x) + x1.width + settings.margin * 2
height = (max_y - min_y) + y0.height + settings.margin * 3
frames = get_frames(graph_objects)
if frames:
frames.sort(key=lambda f: f.getPosition().x)
frame = frames[0]
delete_frames(graph, frames[1:])
else:
frame: SDGraphObjectFrame = SDGraphObjectFrame.sNew(graph)
frame.setTitle(settings.default_title)
frame.setColor(
sdbasetypes.ColorRGBA(
settings.default_color[0],
settings.default_color[1],
settings.default_color[2],
settings.default_color[3],
)
)
frame.setDescription(settings.default_description)
frame.setPosition(sdbasetypes.float2(min_x - settings.margin, min_y - settings.margin * 2))
frame.setSize(sdbasetypes.float2(width, height))
def on_clicked_run_framer(api: BWAPITool):
if not api.current_graph_is_supported:
api.log.error("Graph type is unsupported")
return
pkg = api.current_package
file_path = Path(pkg.getFilePath())
if not os.access(file_path, os.W_OK):
api.log.error("Permission denied to write to package")
return
with SDHistoryUtils.UndoGroup("Framer"):
settings = BWFramerSettings(Path(__file__).parent / "bw_framer_settings.json")
nodes = api.current_node_selection
if len(nodes) == 0:
return
run_framer(
nodes,
api.current_graph_object_selection,
api.current_graph,
settings,
)
def on_graph_view_created(graph_view_id, api: BWAPITool):
toolbar = api.get_graph_view_toolbar(graph_view_id)
settings = BWFramerSettings(Path(__file__).parent / "bw_framer_settings.json")
icon = Path(__file__).parent / "resources" / "bw_framer_icon.png"
tooltip = f"""
Frames the selected nodes by reusing an existing frame, or drawing
a new one.
Shortcut: {settings.hotkey}
"""
action = QAction()
action.setIcon(QIcon(str(icon.resolve())))
action.setToolTip(tooltip)
action.setShortcut(QKeySequence(settings.hotkey))
action.triggered.connect(lambda: on_clicked_run_framer(api))
toolbar.add_action("bw_framer", action)
def on_initialize(api: BWAPITool):
api.register_on_graph_view_created_callback(partial(on_graph_view_created, api=api))
def get_default_settings() -> Dict:
return {
"Hotkey": {"widget": 1, "value": "Alt+D"},
"Margin": {"widget": 2, "value": 32},
"Default Color": {"widget": 6, "value": [0.0, 0.0, 0.0, 0.25]},
"Default Title": {"widget": 1, "value": ""},
"Default Description": {"widget": 1, "value": ""},
}
| 32.77931 | 95 | 0.676415 | 435 | 0.091521 | 0 | 0 | 0 | 0 | 0 | 0 | 547 | 0.115085 |
ad924de515f8ae85983543885c9a8879cf74af0c | 9,674 | py | Python | CodePipeline.py | larroy/codebuild_pipeline_skeleton | 20c180e6e9e92df86c7fc38f3a90ba96b1afc711 | [
"MIT"
] | null | null | null | CodePipeline.py | larroy/codebuild_pipeline_skeleton | 20c180e6e9e92df86c7fc38f3a90ba96b1afc711 | [
"MIT"
] | null | null | null | CodePipeline.py | larroy/codebuild_pipeline_skeleton | 20c180e6e9e92df86c7fc38f3a90ba96b1afc711 | [
"MIT"
] | 1 | 2020-03-05T23:49:04.000Z | 2020-03-05T23:49:04.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Github attached AWS Code Pipeline"""
__author__ = 'Pedro Larroy'
__version__ = '0.1'
import boto3
import os
import sys
import subprocess
import logging
from troposphere import Parameter, Ref, Template, iam
from troposphere.iam import Role
from troposphere.s3 import Bucket
from troposphere.codepipeline import (
Pipeline, Stages, Actions, ActionTypeId, OutputArtifacts, InputArtifacts, Webhook,
WebhookAuthConfiguration, WebhookFilterRule,
ArtifactStore, DisableInboundStageTransitions)
import troposphere.codebuild as cb
import argparse
from awacs.aws import Allow, Statement, Principal, PolicyDocument, Policy
from awacs.sts import AssumeRole
from util import *
def create_codebuild_project(template) -> cb.Project:
from troposphere.codebuild import Project, Environment, Artifacts, Source
environment = Environment(
ComputeType='BUILD_GENERAL1_SMALL',
Image='aws/codebuild/standard:3.0',
Type='LINUX_CONTAINER',
)
codebuild_role = template.add_resource(
Role(
"CodeBuildRole",
AssumeRolePolicyDocument=Policy(
Statement=[
Statement(
Effect=Allow,
Action=[AssumeRole],
Principal=Principal("Service", ["codebuild.amazonaws.com"])
)
]
),
ManagedPolicyArns=[
'arn:aws:iam::aws:policy/AmazonS3FullAccess',
'arn:aws:iam::aws:policy/CloudWatchFullAccess',
'arn:aws:iam::aws:policy/AWSCodeBuildAdminAccess',
],
)
)
# https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-codebuild-project-source.html
return Project(
"ContinuousCodeBuild",
Name = "ContinuousCodeBuild",
Description = 'Continous pipeline',
Artifacts = Artifacts(Type='CODEPIPELINE'),
Environment = environment,
Source = Source(Type='CODEPIPELINE'),
ServiceRole = Ref(codebuild_role)
)
def create_pipeline_template(config) -> Template:
t = Template()
github_token = t.add_parameter(Parameter(
"GithubToken",
Type = "String"
))
github_owner = t.add_parameter(Parameter(
"GitHubOwner",
Type = 'String',
Default = 'aiengines',
AllowedPattern = "[A-Za-z0-9-_]+"
))
github_repo = t.add_parameter(Parameter(
"GitHubRepo",
Type = 'String',
Default = 'codebuild_pipeline_skeleton',
AllowedPattern = "[A-Za-z0-9-_]+"
))
github_branch = t.add_parameter(Parameter(
"GitHubBranch",
Type = 'String',
Default = 'master',
AllowedPattern = "[A-Za-z0-9-_]+"
))
artifact_store_s3_bucket = t.add_resource(Bucket(
"S3Bucket",
))
cloudformationrole = t.add_resource(Role(
"CloudformationRole",
AssumeRolePolicyDocument = PolicyDocument(
Version = "2012-10-17",
Statement = [
Statement(
Effect = Allow,
Action = [AssumeRole],
Principal = Principal("Service", ["cloudformation.amazonaws.com"])
)
]
),
ManagedPolicyArns = ['arn:aws:iam::aws:policy/AdministratorAccess']
))
codepipelinerole = t.add_resource(Role(
"CodePipelineRole",
AssumeRolePolicyDocument = PolicyDocument(
Statement = [
Statement(
Effect = Allow,
Action = [AssumeRole],
Principal = Principal("Service", ["codepipeline.amazonaws.com"])
)
]
),
ManagedPolicyArns = ['arn:aws:iam::aws:policy/AdministratorAccess']
))
codebuild_project = t.add_resource(create_codebuild_project(t))
pipeline = t.add_resource(Pipeline(
"CDPipeline",
ArtifactStore = ArtifactStore(
Type = "S3",
Location = Ref(artifact_store_s3_bucket)
),
# DisableInboundStageTransitions = [
# DisableInboundStageTransitions(
# StageName = "Release",
# Reason = "Disabling the transition until "
# "integration tests are completed"
# )
# ],
RestartExecutionOnUpdate = True,
RoleArn = codepipelinerole.GetAtt('Arn'),
Stages = [
Stages(
Name = "Source",
Actions = [
Actions(
Name = "SourceAction",
ActionTypeId = ActionTypeId(
Category = "Source",
Owner = "ThirdParty",
Provider = "GitHub",
Version = "1",
),
OutputArtifacts = [
OutputArtifacts(
Name = "GitHubSourceCode"
)
],
Configuration = {
'Owner': Ref(github_owner),
'Repo': Ref(github_repo),
'Branch': Ref(github_branch),
'PollForSourceChanges': False,
'OAuthToken': Ref(github_token)
},
RunOrder = "1"
)
]
),
Stages(
Name = "Build",
Actions = [
Actions(
Name = "BuildAction",
ActionTypeId = ActionTypeId(
Category = "Build",
Owner = "AWS",
Provider = "CodeBuild",
Version = "1"
),
InputArtifacts = [
InputArtifacts(
Name = "GitHubSourceCode"
)
],
OutputArtifacts = [
OutputArtifacts(
Name = "BuildArtifacts"
)
],
Configuration = {
'ProjectName': Ref(codebuild_project),
},
RunOrder = "1"
)
]
),
],
))
t.add_resource(Webhook(
"GitHubWebHook",
Authentication = 'GITHUB_HMAC',
AuthenticationConfiguration = WebhookAuthConfiguration(
SecretToken = Ref(github_token)
),
Filters = [
WebhookFilterRule(
JsonPath = '$.ref',
MatchEquals = 'refs/heads/{Branch}'
)
],
TargetPipeline = Ref(pipeline),
TargetAction = 'Source',
TargetPipelineVersion = pipeline.GetAtt('Version')
))
return t
def parameters_interactive(template: Template) -> List[dict]:
"""
Fill template parameters from standard input
:param template:
:return: A list of Parameter dictionary suitable to instantiate the template
"""
print("Please provide values for the Cloud Formation template parameters.")
parameter_values = []
for name, parameter in template.parameters.items():
paramdict = parameter.to_dict()
if 'Default' in paramdict:
default_value = paramdict['Default']
param_value = input(f"{name} [{default_value}]: ")
if not param_value:
param_value = default_value
else:
param_value = input(f"{name}: ")
parameter_values.append({'ParameterKey': name, 'ParameterValue': param_value})
return parameter_values
def config_logging():
import time
logging.getLogger().setLevel(os.environ.get('LOGLEVEL', logging.INFO))
logging.getLogger("requests").setLevel(logging.WARNING)
logging.basicConfig(format='{}: %(asctime)sZ %(levelname)s %(message)s'.format(script_name()))
logging.Formatter.converter = time.gmtime
def script_name() -> str:
""":returns: script name with leading paths removed"""
return os.path.split(sys.argv[0])[1]
def config_argparse() -> argparse.ArgumentParser:
parser = argparse.ArgumentParser(description="Code pipeline",
epilog="""
""")
parser.add_argument('config', nargs='?', help='config file', default='config.yaml')
return parser
def main():
config_logging()
parser = config_argparse()
args = parser.parse_args()
with open(args.config, 'r') as fh:
config = yaml.load(fh, Loader=yaml.SafeLoader)
boto3.setup_default_session(region_name=config['aws_region'], profile_name=config['aws_profile'])
template = create_pipeline_template(config)
client = boto3.client('cloudformation')
logging.info(f"Creating stack {config['stack_name']}")
client = boto3.client('cloudformation')
delete_stack(client, config['stack_name'])
param_values_dict = parameters_interactive(template)
tparams = dict(
TemplateBody = template.to_yaml(),
Parameters = param_values_dict,
Capabilities=['CAPABILITY_IAM'],
#OnFailure = 'DELETE',
)
instantiate_CF_template(template, config['stack_name'], **tparams)
return 0
if __name__ == '__main__':
sys.exit(main())
| 32.139535 | 113 | 0.535766 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,216 | 0.229068 |
ad92a0289e1e8498d72a439196ee28b52ed801be | 2,723 | py | Python | src/huaytools/_demo/argparse_demo.py | imhuay/studies-gitbook | 69a31c20c91d131d0fafce0622f4035b9b95e93a | [
"MIT"
] | 100 | 2021-10-13T01:22:27.000Z | 2022-03-31T09:52:49.000Z | src/huaytools/_demo/argparse_demo.py | imhuay/studies-gitbook | 69a31c20c91d131d0fafce0622f4035b9b95e93a | [
"MIT"
] | null | null | null | src/huaytools/_demo/argparse_demo.py | imhuay/studies-gitbook | 69a31c20c91d131d0fafce0622f4035b9b95e93a | [
"MIT"
] | 27 | 2021-11-01T01:05:09.000Z | 2022-03-31T03:32:01.000Z | #!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
Time:
2021-01-18 19:20
Author:
huayang
Subject:
argparse usage demo
References:
https://docs.python.org/zh-cn/3/library/argparse.html#the-add-argument-method
"""
import argparse
def get_args(test_arg_ls: list = None):
"""
主要是 `.add_argument()` 方法的使用:
.add_argument(
name or flags... # 参数名,带 '-' 前缀的为关键字参数,不带为位置参数
[, action] # 当参数在命令行中出现时使用的动作基本类型,默认 `store`,满足绝大多数情况,其他 action 详见文档
[, nargs] # 命令行参数应当消耗的数目
[, const] # 被一些 action 和 nargs 选择所需求的常数
[, default] # 参数的默认值
[, type] # 参数会被转换成的类型
[, choices] # 该选项的值应当从一组受限值中选择
[, required] # 该命令行选项是否可省略
[, help] # 该选项作用的描述
[, metavar] # 在使用方法消息中使用的参数值示例
[, dest] # 被添加到 parse_args() 所返回对象上的属性名
)
"""
p = argparse.ArgumentParser(description='argparse demo')
# 位置参数
p.add_argument(
'foo',
# required=True, # 位置参数默认且只能是必需的
type=str,
help='示例参数1:foo,这是一个位置参数',
)
# 关键词参数
p.add_argument(
'--bar', '-b', # 一个全称,一个简称
required=True, # 该关键词参数是必须的
type=int, # 传入值会转换成 int 类型
choices={1, 2, 3}, # 该选项的值必须是 {1,2,3} 之一
help='示例参数2:bar,这是一个关键词参数,且是必须的',
)
# store_const 行为的参数
p.add_argument(
'--ccc',
action='store_const', # 如果在命令行中出现这个选项,则 ccc=CCC,否则为 ccc=None
const='CCC', # 该选项的默认值为 'CCC',可以通过 args.ccc = 'XXX' 来修改
help='示例参数3:这是一个 store_const 行为的参数',
)
# bool 类型的参数
p.add_argument(
'--ddd',
action='store_false', # 如果在命令行中出现这个选项则 ddd=False,否则默认为 ddd=True
help='这是一个 bool 类型的参数,',
)
args = p.parse_args(test_arg_ls)
return args
if __name__ == '__main__':
"""
python argparse_demo.py FOO --bar 2 --ccc --ddd
"""
# 模拟命令行参数
test_arg_ls = 'FOO --bar 2 --ccc --ddd'.split(' ')
args = get_args(test_arg_ls)
args.some_new = 1 # 可以直接加新的参数
for k, v in args.__dict__.items():
print(k, v)
"""
foo FOO
bar 2
ccc CCC
ddd False
"""
print()
test_arg_ls = 'FOO --bar 2 --ccc'.split(' ')
args = get_args(test_arg_ls)
args.ccc = 'XXX'
for k, v in args.__dict__.items():
print(k, v)
"""
foo FOO
bar 2
ccc XXX
ddd True
"""
print()
test_arg_ls = 'FOO --bar 2'.split(' ')
args = get_args(test_arg_ls)
for k, v in args.__dict__.items():
print(k, v)
"""
foo FOO
bar 2
ccc None
ddd True
"""
| 23.474138 | 89 | 0.517811 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,432 | 0.700259 |
ad93a1fe02a22cf958094f14b1d5c32570598491 | 318 | py | Python | src/test.py | qitar888/ga2016_final_project | f573f683cc2b5cb73a863f3e83f90fc3ced6454a | [
"MIT"
] | null | null | null | src/test.py | qitar888/ga2016_final_project | f573f683cc2b5cb73a863f3e83f90fc3ced6454a | [
"MIT"
] | null | null | null | src/test.py | qitar888/ga2016_final_project | f573f683cc2b5cb73a863f3e83f90fc3ced6454a | [
"MIT"
] | null | null | null | import cost_function as cf
import pic
target_image = pic.pic2rgb("../data/img03.jpg", 50, 50)
cf.set_target_image(target_image)
s = "(H 0.73 (V 0.451 (H 0.963 (L color)(L color))(V 0.549 (L color)(L color)))(L color))"
matrix = cf.to_array(s, 50, 50, 1)
#print(matrix)
pic.rgb2pic(matrix, 'LAB', "./master_piece.png")
| 35.333333 | 90 | 0.68239 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 144 | 0.45283 |
ad942e6997ea2601c7bed854c8c21558ab9e8c01 | 444 | py | Python | vkquick/pretty_view.py | lordralinc/vkquick | 55ca7bb34f7ae5a5b4eb148e94483b995ef9caad | [
"MIT"
] | 47 | 2021-01-12T15:27:04.000Z | 2022-03-26T19:37:54.000Z | vkquick/pretty_view.py | lordralinc/vkquick | 55ca7bb34f7ae5a5b4eb148e94483b995ef9caad | [
"MIT"
] | 40 | 2020-07-21T15:36:01.000Z | 2021-01-10T15:42:34.000Z | vkquick/pretty_view.py | lordralinc/vkquick | 55ca7bb34f7ae5a5b4eb148e94483b995ef9caad | [
"MIT"
] | 23 | 2020-07-20T03:31:11.000Z | 2021-01-07T12:18:49.000Z | import json
import pygments.formatters
import pygments.lexers
def pretty_view(mapping: dict, /) -> str:
"""
Args:
mapping:
Returns:
"""
dumped_mapping = json.dumps(mapping, ensure_ascii=False, indent=4)
pretty_mapping = pygments.highlight(
dumped_mapping,
pygments.lexers.JsonLexer(), # noqa
pygments.formatters.TerminalFormatter(bg="light"), # noqa
)
return pretty_mapping
| 19.304348 | 70 | 0.657658 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 71 | 0.15991 |
ad9529080b4c0067a7c91cc54c080ae6193fb41f | 873 | py | Python | firefox/install.py | lfkeitel/dotfiles | 7fa891451fee1834b5347638f6405afa654a55a6 | [
"BSD-3-Clause"
] | 2 | 2018-11-19T07:57:00.000Z | 2020-04-01T22:42:45.000Z | firefox/install.py | lfkeitel/dotfiles | 7fa891451fee1834b5347638f6405afa654a55a6 | [
"BSD-3-Clause"
] | 19 | 2017-10-13T02:42:47.000Z | 2020-08-13T20:42:28.000Z | firefox/install.py | lfkeitel/dotfiles | 7fa891451fee1834b5347638f6405afa654a55a6 | [
"BSD-3-Clause"
] | null | null | null | from pathlib import Path
from configparser import ConfigParser
from utils.installer import Installer
from utils.chalk import print_header
from utils.utils import link_file
import utils.platform as platform
MOZILLA_DIR = Path.home().joinpath(".mozilla", "firefox")
SCRIPT_DIR = Path(__file__).parent
class Main(Installer):
def run(self):
if platform.is_mac:
return
print_header("Setting up Firefox profile")
profiles = ConfigParser()
profiles.read(MOZILLA_DIR.joinpath("profiles.ini"))
default_profile = ""
for k, v in profiles.items():
if v.get("Default", fallback=0) == "1":
default_profile = v
break
profile_dir = MOZILLA_DIR.joinpath(default_profile.get("Path"))
link_file(SCRIPT_DIR.joinpath("user.js"), profile_dir.joinpath("user.js"))
| 28.16129 | 82 | 0.667812 | 569 | 0.651775 | 0 | 0 | 0 | 0 | 0 | 0 | 99 | 0.113402 |
ad9650632cdafda2546e12eb7a9297786c36ea75 | 917 | py | Python | Example/wangyi.py | Willshon/Python | a10bba4a1e4b7deb3dce12fa11b4fee6f07f91e0 | [
"MIT"
] | null | null | null | Example/wangyi.py | Willshon/Python | a10bba4a1e4b7deb3dce12fa11b4fee6f07f91e0 | [
"MIT"
] | null | null | null | Example/wangyi.py | Willshon/Python | a10bba4a1e4b7deb3dce12fa11b4fee6f07f91e0 | [
"MIT"
] | null | null | null | # 网易云音乐批量下载
# By Tsing
# Python3.4.4
import requests
import urllib
# 榜单歌曲批量下载
# r = requests.get('http://music.163.com/api/playlist/detail?id=2884035') # 网易原创歌曲榜
# r = requests.get('http://music.163.com/api/playlist/detail?id=19723756') # 云音乐飙升榜
# r = requests.get('http://music.163.com/api/playlist/detail?id=3778678') # 云音乐热歌榜
r = requests.get('http://music.163.com/api/playlist/detail?id=3779629') # 云音乐新歌榜
# 歌单歌曲批量下载
# r = requests.get('http://music.163.com/api/playlist/detail?id=123415635') # 云音乐歌单——【华语】中国风的韵律,中国人的印记
# r = requests.get('http://music.163.com/api/playlist/detail?id=122732380') # 云音乐歌单——那不是爱,只是寂寞说的谎
arr = r.json()['result']['tracks'] # 共有100首歌
for i in range(10): # 输入要下载音乐的数量,1到100。
name = str(i+1) + ' ' + arr[i]['name'] + '.mp3'
link = arr[i]['mp3Url']
urllib.request.urlretrieve(link, '网易云音乐\\' + name) # 提前要创建文件夹
print(name + ' 下载完成') | 38.208333 | 103 | 0.651036 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 928 | 0.792485 |
ad974a16f5e2eba5c65ce3b727dca372dec76010 | 1,353 | py | Python | cloudkittyclient/v1/info.py | mmariani/python-cloudkittyclient | 92f51ded48b261231f226669f75c52f199584d5c | [
"Apache-2.0"
] | null | null | null | cloudkittyclient/v1/info.py | mmariani/python-cloudkittyclient | 92f51ded48b261231f226669f75c52f199584d5c | [
"Apache-2.0"
] | null | null | null | cloudkittyclient/v1/info.py | mmariani/python-cloudkittyclient | 92f51ded48b261231f226669f75c52f199584d5c | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2018 Objectif Libre
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from cloudkittyclient.v1 import base
class InfoManager(base.BaseManager):
"""Class used to handle /v1/info endpoint"""
url = '/v1/info/{endpoint}/{metric_name}'
def get_metric(self, **kwargs):
"""Returns info for the given service.
If metric_name is not specified, returns info for all services.
:param metric_name: Name of the service on which you want information
:type metric_name: str
"""
url = self.get_url('metrics', kwargs)
return self.api_client.get(url).json()
def get_config(self, **kwargs):
"""Returns the current configuration."""
url = self.get_url('config', kwargs)
return self.api_client.get(url).json()
| 35.605263 | 78 | 0.679231 | 681 | 0.503326 | 0 | 0 | 0 | 0 | 0 | 0 | 986 | 0.728751 |
ad97e5e6daa0d77eaf62c07024df8c1d58d486a8 | 2,450 | py | Python | PiCN/Layers/PacketEncodingLayer/BasicPacketEncodingLayer.py | NikolaiRutz/PiCN | 7775c61caae506a88af2e4ec34349e8bd9098459 | [
"BSD-3-Clause"
] | null | null | null | PiCN/Layers/PacketEncodingLayer/BasicPacketEncodingLayer.py | NikolaiRutz/PiCN | 7775c61caae506a88af2e4ec34349e8bd9098459 | [
"BSD-3-Clause"
] | null | null | null | PiCN/Layers/PacketEncodingLayer/BasicPacketEncodingLayer.py | NikolaiRutz/PiCN | 7775c61caae506a88af2e4ec34349e8bd9098459 | [
"BSD-3-Clause"
] | null | null | null | """ De- and Encoding Layer, using a predefined Encoder """
import multiprocessing
from PiCN.Layers.PacketEncodingLayer.Encoder import BasicEncoder
from PiCN.Processes import LayerProcess
class BasicPacketEncodingLayer(LayerProcess):
""" De- and Encoding Layer, using a predefined Encoder """
def __init__(self, encoder: BasicEncoder=None, log_level=255):
LayerProcess.__init__(self, logger_name="PktEncLayer", log_level=log_level)
self._encoder: BasicEncoder = encoder
@property
def encoder(self):
return self._encoder
@encoder.setter
def encoder(self, encoder):
self._encoder = encoder
def data_from_higher(self, to_lower: multiprocessing.Queue, to_higher: multiprocessing.Queue, data):
face_id, packet = self.check_data(data)
if face_id == None or packet is None:
return
self.logger.info("Packet from higher, Faceid: " + str(face_id) + ", Name: " + str(packet.name))
encoded_packet = self.encode(packet)
if encoded_packet is None:
self.logger.info("Dropping Packet since None")
return
to_lower.put([face_id, encoded_packet])
def data_from_lower(self, to_lower: multiprocessing.Queue, to_higher: multiprocessing.Queue, data):
face_id, packet = self.check_data(data)
if face_id == None or packet == None:
return
decoded_packet = self.decode(packet)
if decoded_packet is None:
self.logger.info("Dropping Packet since None")
return
self.logger.info("Packet from lower, Faceid: " + str(face_id) + ", Name: " + str(decoded_packet.name))
to_higher.put([face_id, decoded_packet])
def encode(self, data):
self.logger.info("Encode packet")
return self._encoder.encode(data)
def decode(self, data):
self.logger.info("Decode packet")
return self._encoder.decode(data)
def check_data(self, data):
"""check if data from queue match the requirements"""
if len(data) != 2:
self.logger.warning("PacketEncoding Layer expects queue elements to have size 2")
return (None, None)
if type(data[0]) != int:
self.logger.warning("PacketEncoding Layer expects first element to be a faceid (int)")
return (None, None)
#TODO test if data[1] has type packet or bin data? howto?
return data[0], data[1]
| 38.888889 | 110 | 0.655102 | 2,259 | 0.922041 | 0 | 0 | 140 | 0.057143 | 0 | 0 | 529 | 0.215918 |
ad9a51daee278697203eb4c710d5438cdf50f670 | 6,187 | py | Python | bounce2.py | Yokohama-Miyazawa/bounce_games | 6dcc23a254cdae56e4bdf9bf40a7609d5995e4e6 | [
"MIT"
] | 4 | 2019-03-03T01:18:56.000Z | 2021-02-26T19:53:01.000Z | bounce2.py | Yokohama-Miyazawa/bounce_games | 6dcc23a254cdae56e4bdf9bf40a7609d5995e4e6 | [
"MIT"
] | null | null | null | bounce2.py | Yokohama-Miyazawa/bounce_games | 6dcc23a254cdae56e4bdf9bf40a7609d5995e4e6 | [
"MIT"
] | 1 | 2021-12-12T00:52:02.000Z | 2021-12-12T00:52:02.000Z | from tkinter import *
import random
import time
class Widget(object): # 画面上で動く物の基本となるクラス
def __init__(self, window, size, color, pos, speed=[0, 0]):
self.window = window
self.size = size
self.color = color
self.pos = pos
self.speed = speed
def acty(self): # インスタンスを動かす
self.window.move(self.id, self.speed[0], self.speed[1])
def xturn(self): # 横軸の方向転換
self.speed[0] *= -1
def yturn(self): # 縦軸の方向転換
self.speed[1] *= -1
def current_speed(self): # 現在の速度
return self.speed
class Ball(Widget): # Widgetを継承する、ボールのためのクラス
def __init__(self, window, size, color, pos, speed):
super().__init__(window, size, color, pos, speed)
self.id = self.window.create_oval(self.pos[0], self.pos[1],
self.pos[0]+self.size,
self.pos[1]+self.size,
fill=self.color)
def current_place(self): # 今いる場所
return self.window.coords(self.id)
def hit_check(self, obj): # 当たったかどうかのチェック
own_pos = self.current_place()
obj_pos = obj.current_place()
own_center = (own_pos[0] + own_pos[2])/2
if (own_center > obj_pos[0] and own_center < obj_pos[2]) \
and (own_pos[1] <= obj_pos[3] and own_pos[3] >= obj_pos[1]):
return 1
else:
return 0
class Bar(Widget): # Widgetを継承する、長方形物体用のクラス
def __init__(self, window, size, color, pos):
super().__init__(window, size, color, pos)
self.point = 0
self.id = self.window.create_rectangle(self.pos[0], self.pos[1],
self.pos[0]+self.size[0],
self.pos[1]+self.size[1],
fill=self.color)
def current_place(self): # 今いる場所
return self.window.coords(self.id)
def current_point(self): # ☆現在の得点
return self.point
def add_point(self, add=1): # ☆得点加算
self.point += add
class Player_Racket(Bar): # Barを継承する、プレイヤーラケット用のクラス
def __init__(self, window, size, color, pos, step=10):
super().__init__(window, size, color, pos)
self.step = step
self.window.bind_all('<Key>', self.control)
def control(self, event): # 操作設定
if event.keysym == "Right":
self.speed = [self.step, 0]
elif event.keysym == "Left":
self.speed = [-self.step, 0]
else:
return
self.acty()
class COM_Racket(Bar): # ☆Barを継承する、COMラケット用のクラス
def __init__(self, window, size, color, pos, step=10, count=10,
distance=100):
super().__init__(window, size, color, pos)
self.step = step
self.count_range = count
self.counter = 0
self.distance = distance
def control(self, obj):
self.counter += 1
if self.counter == self.count_range:
self.counter = 0
self.speed[0] = random.randrange(-self.step, self.step)
if (obj.current_place()[0] - self.current_place()[0] >=
self.distance and self.speed[0] < 0) \
or (self.current_place()[2] - obj.current_place()[2] >=
self.distance and self.speed[0] > 0):
self.xturn()
self.acty()
# ウィンドウの設定
tk = Tk()
canvas_size = [500, 400]
canvas = Canvas(tk, width=canvas_size[0], height=canvas_size[1])
tk.title("熱くなれよ!!!")
canvas.pack()
# ☆画面表示の設定
canvas.create_text(50, 150, text='COM', fill='green', font=('メイリオ', 20))
canvas.create_text(50, 250, text='YOU', fill='red', font=('メイリオ', 20))
canvas.create_text(50, 200, text='TIME', fill='purple', font=('メイリオ', 20))
enemy_score = canvas.create_text(130, 150, fill='green', font=('メイリオ', 20))
my_score = canvas.create_text(130, 250, fill='red', font=('メイリオ', 20))
play_time = canvas.create_text(130, 200, fill='purple', font=('メイリオ', 20))
def show_score(player_score, score):
canvas.itemconfig(player_score, text=str(score))
def show_time(time_text, time_game):
canvas.itemconfig(time_text, text=str(time_game))
# ☆試合の設定
finish_point = 3
# ボールとラケットの設定
ball_radius = 50
ball_start = [random.randrange(50, 400), random.randrange(50, 100)]
ball_init_speed = [2.0, 2.0]
bar_size = [100, 10]
player_start = [200, 340]
# ☆COMの設定
com_start = [200, 50]
com_distance = 100
# ボールとラケットのインスタンス作成
ball = Ball(canvas, ball_radius, 'blue', ball_start, ball_init_speed)
player_racket = Player_Racket(canvas, bar_size, 'red', pos=player_start)
com_racket = COM_Racket(canvas, bar_size, 'green', pos=com_start,
distance=com_distance)
# ☆時刻設定
game_start = int(time.perf_counter())
game_time = game_start
while True:
ball.acty() # ボールを動かす
ball_pos = ball.current_place()
ball_speed = ball.current_speed()
com_racket.control(ball) # ☆COMを動かす
# ☆画面表示の更新
show_score(my_score, player_racket.current_point())
show_score(enemy_score, com_racket.current_point())
show_time(play_time, game_time-game_start)
now_time = int(time.perf_counter())
if now_time - game_time >= 1: # ☆一秒経過したら時刻表示切り替え
game_time = now_time
if player_racket.current_point() >= finish_point: # ☆プレイヤーの勝利
judge_text = 'YOU WIN'
judge_color = 'blue'
break
if com_racket.current_point() >= finish_point: # ☆COMの勝利
judge_text = 'YOU LOSE'
judge_color = 'red'
break
if ball_pos[2] >= canvas_size[0] or ball_pos[0] <= 0:
ball.xturn()
if ball_pos[3] >= canvas_size[1]: # ☆COMの得点
com_racket.add_point()
ball.yturn()
if ball_pos[1] <= 0: # ☆プレイヤーの得点
player_racket.add_point()
ball.yturn()
if (ball.hit_check(player_racket) == 1 and ball_speed[1] > 0) \
or (ball.hit_check(com_racket) == 1 and ball_speed[1] < 0):
ball.yturn() # ☆相手のラケットに当たった場合を追加
tk.update()
time.sleep(0.01)
# ☆結果発表
canvas.create_text(250, 200, text=judge_text,
fill=judge_color, font=('メイリオ', 30))
tk.update()
time.sleep(10)
| 31.090452 | 76 | 0.591563 | 3,673 | 0.536753 | 0 | 0 | 0 | 0 | 0 | 0 | 1,224 | 0.178869 |
ad9ab3e056d036c86285fad9ebbf157d0f0bf489 | 4,074 | py | Python | simpleblog/blog/views.py | GrayAn/simpleblog | a3be8e3a6edf25caad80ae8013fcc6ea4e8003d4 | [
"MIT"
] | null | null | null | simpleblog/blog/views.py | GrayAn/simpleblog | a3be8e3a6edf25caad80ae8013fcc6ea4e8003d4 | [
"MIT"
] | null | null | null | simpleblog/blog/views.py | GrayAn/simpleblog | a3be8e3a6edf25caad80ae8013fcc6ea4e8003d4 | [
"MIT"
] | null | null | null | from django.contrib.auth.models import User
from django.http import JsonResponse, HttpResponse, HttpResponseForbidden
from django.views import generic
from .models import Post, Vote
class IndexView(generic.ListView):
context_object_name = 'posts'
model = Post
paginate_by = 50
template_name = 'blog/index.html'
available_orderings = {
'created': 'Creation time',
'rating': 'Rating',
}
def get_ordering(self):
ordering = self.request.GET.get('ordering')
if ordering is None or ordering.strip('-') not in self.available_orderings:
ordering = '-created'
return ordering
def get_queryset(self):
queryset = super().get_queryset()
return queryset.prefetch_related('author')
def get_context_data(self, *, object_list=None, **kwargs):
context = super().get_context_data(object_list=object_list, **kwargs)
context['ordering_with_direction'] = self.get_ordering()
context['ordering'] = context['ordering_with_direction'].strip('-')
context['available_orderings'] = self.available_orderings
if self.request.user.is_authenticated:
votes = Vote.objects.filter(author=self.request.user)
context['votes'] = {vote.post_id: vote.up for vote in votes}
else:
context['votes'] = {}
return context
class AuthorView(IndexView):
template_name = 'blog/author.html'
def get_queryset(self):
queryset = super().get_queryset()
return queryset.filter(author=self.kwargs['author_id'])
def get_context_data(self, *, object_list=None, **kwargs):
context = super().get_context_data(object_list=object_list, **kwargs)
try:
author = User.objects.get(pk=self.kwargs['author_id'])
except User.DoesNotExist:
pass
else:
context['author'] = author
return context
class CreateView(generic.CreateView):
fields = ('title', 'text')
model = Post
template_name = 'blog/create.html'
def form_valid(self, form):
if not self.request.user.is_authenticated:
return HttpResponse(status=401)
form.instance.author = self.request.user
return super().form_valid(form)
class UpdateView(generic.UpdateView):
fields = ('title', 'text')
model = Post
pk_url_kwarg = 'post_id'
template_name = 'blog/create.html'
def form_valid(self, form):
if self.request.user != form.instance.author:
return HttpResponseForbidden()
return super().form_valid(form)
class DetailView(generic.DetailView):
context_object_name = 'post'
model = Post
pk_url_kwarg = 'post_id'
template_name = 'blog/details.html'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
if self.request.user.is_authenticated:
votes = Vote.objects.filter(author=self.request.user, post_id=self.kwargs['post_id'])
context['votes'] = {vote.post_id: vote.up for vote in votes}
else:
context['votes'] = {}
return context
def cast_vote(request, post_id, direction):
try:
post = Post.objects.get(pk=post_id)
except Post.DoesNotExist:
return JsonResponse({'code': 404, 'msg': 'Post {} does not exist'.format(post_id)}, status=404)
try:
vote = Vote.objects.get(post=post, author=request.user)
except Vote.DoesNotExist:
vote_direction = None
else:
vote_direction = vote.up
post = vote.post # The same post but with updated ratings after vote removal
vote.delete()
if vote_direction is not bool(direction):
vote = Vote()
vote.post = post
vote.author = request.user
vote.up = bool(direction)
vote.save()
data = {
'vote': vote.up,
}
else:
data = {
'vote': None,
}
data['upvotes'] = post.upvotes
data['downvotes'] = post.downvotes
return JsonResponse(data)
| 30.631579 | 103 | 0.633284 | 2,949 | 0.723859 | 0 | 0 | 0 | 0 | 0 | 0 | 495 | 0.121502 |
ad9d9fb7a38ac292e01525e196638ba0f6d199ab | 2,303 | py | Python | pyatv/protocols/mrp/protobuf/PlayerClientPropertiesMessage_pb2.py | crxporter/pyatv | e694a210b3810c64044116bf40e7b75420b5fe75 | [
"MIT"
] | null | null | null | pyatv/protocols/mrp/protobuf/PlayerClientPropertiesMessage_pb2.py | crxporter/pyatv | e694a210b3810c64044116bf40e7b75420b5fe75 | [
"MIT"
] | null | null | null | pyatv/protocols/mrp/protobuf/PlayerClientPropertiesMessage_pb2.py | crxporter/pyatv | e694a210b3810c64044116bf40e7b75420b5fe75 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: pyatv/protocols/mrp/protobuf/PlayerClientPropertiesMessage.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from pyatv.protocols.mrp.protobuf import ProtocolMessage_pb2 as pyatv_dot_protocols_dot_mrp_dot_protobuf_dot_ProtocolMessage__pb2
from pyatv.protocols.mrp.protobuf import PlayerPath_pb2 as pyatv_dot_protocols_dot_mrp_dot_protobuf_dot_PlayerPath__pb2
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n@pyatv/protocols/mrp/protobuf/PlayerClientPropertiesMessage.proto\x1a\x32pyatv/protocols/mrp/protobuf/ProtocolMessage.proto\x1a-pyatv/protocols/mrp/protobuf/PlayerPath.proto\"^\n\x1dPlayerClientPropertiesMessage\x12\x1f\n\nplayerPath\x18\x01 \x01(\x0b\x32\x0b.PlayerPath\x12\x1c\n\x14lastPlayingTimestamp\x18\x02 \x01(\x01:W\n\x1dplayerClientPropertiesMessage\x12\x10.ProtocolMessage\x18V \x01(\x0b\x32\x1e.PlayerClientPropertiesMessage')
PLAYERCLIENTPROPERTIESMESSAGE_FIELD_NUMBER = 86
playerClientPropertiesMessage = DESCRIPTOR.extensions_by_name['playerClientPropertiesMessage']
_PLAYERCLIENTPROPERTIESMESSAGE = DESCRIPTOR.message_types_by_name['PlayerClientPropertiesMessage']
PlayerClientPropertiesMessage = _reflection.GeneratedProtocolMessageType('PlayerClientPropertiesMessage', (_message.Message,), {
'DESCRIPTOR' : _PLAYERCLIENTPROPERTIESMESSAGE,
'__module__' : 'pyatv.protocols.mrp.protobuf.PlayerClientPropertiesMessage_pb2'
# @@protoc_insertion_point(class_scope:PlayerClientPropertiesMessage)
})
_sym_db.RegisterMessage(PlayerClientPropertiesMessage)
if _descriptor._USE_C_DESCRIPTORS == False:
pyatv_dot_protocols_dot_mrp_dot_protobuf_dot_ProtocolMessage__pb2.ProtocolMessage.RegisterExtension(playerClientPropertiesMessage)
DESCRIPTOR._options = None
_PLAYERCLIENTPROPERTIESMESSAGE._serialized_start=167
_PLAYERCLIENTPROPERTIESMESSAGE._serialized_end=261
# @@protoc_insertion_point(module_scope)
| 57.575 | 500 | 0.860617 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 958 | 0.415979 |
ad9e41e35c13ed20157dba440d5e3dc168b4b9c8 | 524 | py | Python | app_challenges_sections_units/migrations/0036_auto_20190619_1903.py | Audiotuete/backend_wagtail_api | 3c5a4a610ffdbb75d45a57fc670e2ae3b7178c62 | [
"MIT"
] | null | null | null | app_challenges_sections_units/migrations/0036_auto_20190619_1903.py | Audiotuete/backend_wagtail_api | 3c5a4a610ffdbb75d45a57fc670e2ae3b7178c62 | [
"MIT"
] | null | null | null | app_challenges_sections_units/migrations/0036_auto_20190619_1903.py | Audiotuete/backend_wagtail_api | 3c5a4a610ffdbb75d45a57fc670e2ae3b7178c62 | [
"MIT"
] | null | null | null | # Generated by Django 2.0.8 on 2019-06-19 19:03
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('wagtailimages', '0001_squashed_0021'),
('app_challenges_sections_units', '0035_auto_20190619_1847'),
]
operations = [
migrations.RenameModel(
old_name='Slideshow',
new_name='Gallery',
),
migrations.RenameModel(
old_name='SlideshowImage',
new_name='GalleryImage',
),
]
| 22.782609 | 69 | 0.603053 | 439 | 0.837786 | 0 | 0 | 0 | 0 | 0 | 0 | 188 | 0.358779 |
ad9eadfbff4068c3a66cb70f04ac1b2216de5ebe | 2,534 | py | Python | self_organising_systems/texture_ca/losses.py | google-research/self-organizing-systems | 2f96d0b0cd6781f8a65d446dad5c5b394e5adf93 | [
"Apache-2.0"
] | 2 | 2020-02-04T08:05:52.000Z | 2020-02-04T08:06:18.000Z | self_organising_systems/texture_ca/losses.py | google-research/self-organizing-systems | 2f96d0b0cd6781f8a65d446dad5c5b394e5adf93 | [
"Apache-2.0"
] | null | null | null | self_organising_systems/texture_ca/losses.py | google-research/self-organizing-systems | 2f96d0b0cd6781f8a65d446dad5c5b394e5adf93 | [
"Apache-2.0"
] | null | null | null | from self_organising_systems.texture_ca.config import cfg
from self_organising_systems.shared.util import imread
import tensorflow as tf
import numpy as np
style_layers = ['block%d_conv1'%i for i in range(1, 6)]
content_layer = 'block4_conv2'
class StyleModel:
def __init__(self, input_texture_path):
vgg = tf.keras.applications.vgg16.VGG16(include_top=False, weights='imagenet')
vgg.trainable = False
layers = style_layers + [content_layer]
layers = {name:vgg.get_layer(name).output for name in layers}
self.model = tf.keras.Model([vgg.input], layers)
self.style_img = imread(input_texture_path, cfg.texture_ca.vgg_input_img_size)
self.target_style, _ = self.calc_style_content(self.style_img[None,...])
def run_model(self, img):
img = img[..., ::-1]*255.0 - np.float32([103.939, 116.779, 123.68])
layers = self.model(img)
style = [layers[name] for name in style_layers]
return style, layers[content_layer]
def calc_style_content(self, img):
style_layers, content = self.run_model(img)
style = [self.gram_style(a) for a in style_layers]
return style, content
@tf.function
def __call__(self, x):
gs, content = self.calc_style_content(x)
sl = tf.reduce_mean(self.style_loss(gs, self.target_style))
return sl
@tf.function
def style_loss(self, a, b):
return tf.add_n([tf.reduce_mean(tf.square(x-y), [-2, -1]) for x, y in zip(a, b)])
def gram_style(self, a):
n, h, w, ch = tf.unstack(tf.shape(a))
a = tf.sqrt(a+1.0)-1.0
gram = tf.einsum('bhwc, bhwd -> bcd', a, a)
return gram / tf.cast(h*w, tf.float32)
class Inception:
def __init__(self, layer, ch):
with tf.io.gfile.GFile(cfg.texture_ca.inception_pb, 'rb') as f:
self.graph_def = tf.compat.v1.GraphDef.FromString(f.read())
self.layer = layer
self.ch = ch
avgpool0_idx = [n.name for n in self.graph_def.node].index('avgpool0')
del self.graph_def.node[avgpool0_idx:]
# use pre_relu layers for Concat nodes
node = {n.name:n for n in self.graph_def.node}[layer]
self.outputs = [layer+':0']
if 'Concat' in node.op:
self.outputs = [inp+'_pre_relu:0' for inp in node.input[1:]]
@tf.function
def __call__(self, x):
overflow_loss = tf.reduce_mean(tf.square(tf.clip_by_value(x, 0.0, 1.0)-x))
imgs = x*255.0-117.0
outputs = tf.import_graph_def(self.graph_def, {'input':imgs}, self.outputs)
a = tf.concat(outputs, -1)
return -tf.reduce_mean(a[...,self.ch]) + overflow_loss*cfg.texture_ca.overflow_loss_coef
| 36.724638 | 92 | 0.686267 | 2,284 | 0.901342 | 0 | 0 | 633 | 0.249803 | 0 | 0 | 142 | 0.056038 |
ad9eda249a4ad6c95e811e1a7a874b595d6c8d1f | 2,713 | py | Python | edflow/hooks/runtime_input.py | rromb/edflow | 8681cadf1770ca1bc1515535768dc14cb0758b0f | [
"MIT"
] | 2 | 2021-03-10T13:42:12.000Z | 2021-03-10T14:29:53.000Z | edflow/hooks/runtime_input.py | rromb/edflow | 8681cadf1770ca1bc1515535768dc14cb0758b0f | [
"MIT"
] | null | null | null | edflow/hooks/runtime_input.py | rromb/edflow | 8681cadf1770ca1bc1515535768dc14cb0758b0f | [
"MIT"
] | null | null | null | import numpy as np
import os
import traceback
import yaml
from edflow.hooks.hook import Hook
from edflow.util import walk, retrieve, contains_key
from edflow.custom_logging import get_logger
class RuntimeInputHook(Hook):
"""Given a textfile reads that at each step and passes the results to
a callback function."""
def __init__(self, update_file, callback):
"""Args:
update_file (str): path/to/yaml-file containing the parameters of
interest.
callback (Callable): Each time something changes in the update_file
this function is called with the content of the file as
argument.
"""
self.logger = get_logger(self)
self.ufile = update_file
self.callback = callback
self.last_updates = None
if not os.path.exists(self.ufile):
msg = (
"# Automatically created file. Changes made in here will "
"be recognized during runtime."
)
with open(self.ufile, "w+") as f:
f.write(msg)
def before_step(self, *args, **kwargs):
"""Checks if something changed and if yes runs the callback."""
try:
updates = yaml.full_load(open(self.ufile, "r"))
if self.last_updates is not None:
changes = {}
def is_changed(key, val, changes=changes):
if contains_key(key, updates):
other_val = retrieve(key, updates)
change = np.any(val != other_val)
else:
# This key is new -> Changes did happen!
change = True
changes[key] = change
self.logger.debug("Pre CHANGES: {}".format(changes))
walk(self.last_updates, is_changed, pass_key=True)
self.logger.debug("Post CHANGES: {}".format(changes))
if np.any(list(changes.values())):
self.callback(updates)
self.logger.debug("Runtime inputs received.")
self.logger.debug("{}".format(updates))
self.last_updates = updates
else:
if updates is not None:
self.callback(updates)
self.logger.info("Runtime inputs received.")
self.logger.debug("{}".format(updates))
self.last_updates = updates
except Exception as e:
self.logger.error("Something bad happend :(")
self.logger.error("{}".format(e))
self.logger.error(traceback.format_exc())
| 33.493827 | 79 | 0.542941 | 2,518 | 0.928124 | 0 | 0 | 0 | 0 | 0 | 0 | 724 | 0.266863 |
ad9f4705eca81ea52720a66a8b32f82f6946af08 | 11,392 | py | Python | tools/launcher.py | agentx-cgn/Hannibal | 8157261c28bd67755dad38ef6b7862d1b736e644 | [
"JasPer-2.0"
] | 189 | 2015-01-10T07:35:16.000Z | 2021-05-05T08:21:22.000Z | tools/launcher.py | agentx-cgn/Hannibal | 8157261c28bd67755dad38ef6b7862d1b736e644 | [
"JasPer-2.0"
] | 6 | 2015-02-02T19:18:34.000Z | 2017-12-07T11:19:23.000Z | tools/launcher.py | agentx-cgn/Hannibal | 8157261c28bd67755dad38ef6b7862d1b736e644 | [
"JasPer-2.0"
] | 15 | 2016-03-14T12:27:59.000Z | 2020-04-28T23:24:05.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
https://docs.python.org/2/library/subprocess.html#popen-objects
http://stackoverflow.com/questions/1606795/catching-stdout-in-realtime-from-subprocess
http://askubuntu.com/questions/458041/find-x-window-name
http://stackoverflow.com/questions/9681959/how-can-i-use-xdotool-from-within-a-python-module-script
http://manpages.ubuntu.com/manpages/trusty/en/man1/avconv.1.html
http://stackoverflow.com/questions/287871/print-in-terminal-with-colors-using-python
xwininfo gives window info: xwininfo: Window id: 0x2800010 "0 A.D."
xdotool:
sudo apt-get install libx11-dev libxtst-dev libXinerama-dev
make
make install
https://github.com/nullkey/glc/wiki/Capture
glc-capture --start --fps=30 --resize=1.0 --disable-audio --out=pyro.glc ./launcher.py
glc-play pyro.glc -o - -y 1 | avconv -i - -an -y pyro.mp4
avconv -i pyro.mp4 -codec copy -ss 15 -y pyro01.mp4
qt-faststart pyro01.mp4 pyro02.mp4
mplayer pyro02.mp4
'''
VERSION = "0.2.0"
import os, sys, subprocess, time, json
from time import sleep
sys.dont_write_bytecode = True
## maps etc.
from data import data
bcolors = {
"Bold": "\033[1m",
"Header" : "\033[95m",
"LBlue" : "\033[94m", ## light blue
"DBlue" : "\033[34m", ## dark blue
"OKGreen" : "\033[32m", ## dark Green
"Green" : "\033[92m", ## light green
"Warn" : "\033[33m", ## orange
"Fail" : "\033[91m",
"End" : "\033[0m",
# orange='\033[33m'
}
def printc(color, text) :
print (bcolors[color] + text + bcolors["End"])
def stdc(color, text) :
sys.stdout.write (bcolors[color] + text + bcolors["End"])
folders = {
"pro" : "/home/noiv/Desktop/0ad", ## project
"rel" : "/usr/games/0ad", ## release
"trunk" : "/Daten/Projects/Osiris/ps/trunk", ## svn
"share" : "/home/noiv/.local/share", ## user mod
}
## the game binary
locations = {
"rel" : folders["rel"], ## release
"svn" : folders["trunk"] + "/binaries/system/pyrogenesis", ## svn
"hbl" : folders["share"] + "/0ad/mods/hannibal/simulation/ai/hannibal/", ## bot folder
"deb" : folders["share"] + "/0ad/mods/hannibal/simulation/ai/hannibal/_debug.js", ## bot folder
"log" : folders["pro"] + "/last.log", ## log file
"ana" : folders["pro"] + "/analysis/", ## analysis csv file
}
## Hannibal log/debug options + data, readable by JS and Python
DEBUG = {
## default map
"map": "scenarios/Arcadia 02",
## counter
"counter": [],
## num: 0=no numerus
## xdo: move window, sim speed
## fil can use files
## log: 0=silent, 1+=errors, 2+=warnings, 3+=info, 4=all
## col: log colors
## sup: suppress, bot does not intialize (saves startup time)
## tst: activate tester
"bots": {
"0" : {"num": 0, "xdo": 0, "fil": 0, "log": 4, "sup": 1, "tst": 0, "col": "" },
"1" : {"num": 1, "xdo": 1, "fil": 1, "log": 4, "sup": 0, "tst": 1, "col": "" },
"2" : {"num": 0, "xdo": 0, "fil": 0, "log": 3, "sup": 0, "tst": 1, "col": "" },
"3" : {"num": 0, "xdo": 0, "fil": 0, "log": 2, "sup": 1, "tst": 0, "col": "" },
"4" : {"num": 0, "xdo": 0, "fil": 0, "log": 2, "sup": 1, "tst": 0, "col": "" },
"5" : {"num": 0, "xdo": 0, "fil": 0, "log": 2, "sup": 1, "tst": 0, "col": "" },
"6" : {"num": 0, "xdo": 0, "fil": 0, "log": 2, "sup": 1, "tst": 0, "col": "" },
"7" : {"num": 0, "xdo": 0, "fil": 0, "log": 2, "sup": 1, "tst": 0, "col": "" },
"8" : {"num": 0, "xdo": 0, "fil": 0, "log": 2, "sup": 1, "tst": 0, "col": "" },
}
}
## keep track of open file handles
files = {}
## civs to choose from at start
civs = [
"athen",
"brit",
"cart",
"celt",
"gaul",
"hele",
"iber",
"mace",
"maur",
"pers",
"ptol",
"rome",
"sele",
"spart",
]
def buildCmd(typ="rel", map="Arcadia 02", bots=2) :
## see /ps/trunk/binaries/system/readme.txt
cmd = [
locations[typ],
"-quickstart", ## load faster (disables audio and some system info logging)
"-autostart=" + map, ## enables autostart and sets MAPNAME; TYPEDIR is skirmishes, scenarios, or random
"-mod=public", ## start the game using NAME mod
"-mod=charts",
"-mod=hannibal",
"-autostart-seed=0", ## sets random map SEED value (default 0, use -1 for random)
"-autostart-size=192", ## sets random map size in TILES (default 192)
# "-autostart-players=2", ## sets NUMBER of players on random map (default 2)
# "-autostart-ai=1:hannibal",
# "-autostart-civ=1:athen", ## sets PLAYER's civilisation to CIV (skirmish and random maps only)
# "-autostart-ai=2:hannibal", ## sets the AI for PLAYER (e.g. 2:petra)
# "-autostart-civ=2:cart", ## sets PLAYER's civilisation to CIV (skirmish and random maps only)
]
## svn does not autoload /user
if typ == "svn" : cmd.append("-mod=user")
## set # of players
cmd.append("-autostart-players=" + str(bots))
## add bots with civ
for bot in range(1, bots +1) :
cmd.append("-autostart-ai=" + str(bot) + ":hannibal")
cmd.append("-autostart-civ=" + str(bot) + ":" + civs[bot -1])
return cmd
def findWindow(title) :
process = subprocess.Popen("xdotool search --name '%s'" % (title), stdout=subprocess.PIPE, shell="FALSE")
windowid = process.stdout.readlines()[0].strip()
process.stdout.close()
return windowid
def xdotool(command) :
subprocess.call(("xdotool %s" % command).split(" "))
def cleanup() :
for k, v in files.iteritems() : v.close()
def writeDEBUG():
fTest = open(locations["deb"], 'w')
fTest.truncate()
fTest.write("var HANNIBAL_DEBUG = " + json.dumps(DEBUG, indent=2) + ";")
fTest.close()
def killDEBUG():
fTest = open(locations["deb"], 'w')
fTest.truncate()
fTest.close()
def processMaps():
proc0AD = None
DEBUG["OnUpdate"] = "print('#! terminate');"
for mp in data["testMaps"] :
DEBUG["map"] = mp
writeDEBUG()
cmd0AD = [pyrogenesis, "-quickstart", "-autostart=" + mp, "-mod=public", "-mod:hannibal", "-autostart-ai=1:hannibal"]
proc0AD = subprocess.Popen(cmd0AD, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
print " > " + " ".join(cmd0AD)
try:
for line in iter(proc0AD.stdout.readline, b'') :
sline = line.strip()
if sline.startswith("#! terminate") :
proc0AD.terminate()
sleep(2)
if proc0AD : proc0AD.wait()
if proc0AD : proc0AD.kill()
break
else :
pass
# sys.stdout.write(line)
except KeyboardInterrupt, e :
if proc0AD : proc0AD.terminate()
break
print "done."
def launch(typ="rel", map="Arcadia 02", bots=2):
winX = 1520; winY = 20
doWrite = False
curFileNum = None
idWindow = None
proc0AD = None
def terminate() :
if proc0AD : proc0AD.terminate()
files["log"] = open(locations["log"], 'w')
files["log"].truncate()
DEBUG['map'] = map
writeDEBUG()
cmd0AD = buildCmd(typ, map, bots)
print (" cmd: %s" % " ".join(cmd0AD));
proc0AD = subprocess.Popen(cmd0AD, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
try:
for line in iter(proc0AD.stdout.readline, b'') :
## line has everything
## sline is stripped
## bline is active bot line after colon
sline = line.strip() ## removes nl and wp
bline = ""
id = 0
bot = DEBUG["bots"]["0"]
## detect bot id
if len(sline) >= 2 and sline[1:3] == "::" :
id = sline[0]
bot = DEBUG["bots"][id]
bline = "" if bot["log"] == 0 else sline[3:]
files["log"].write(line)
## terminate everything
if sline.startswith("#! terminate") :
if bot["xdo"] :
print(sline)
terminate()
return
## clear console
elif bline.startswith("#! clear") :
print(sline)
sys.stderr.write("\x1b[2J\x1b[H") ## why not ??
## xdo init
elif bot["xdo"] and bline.startswith("#! xdotool init") :
idWindow = findWindow("0 A.D")
printc("DBlue", " xdo: window id: %s" % idWindow)
xdotool("windowmove %s %s %s" % (idWindow, winX, winY))
## xdo command with echo
elif bot["xdo"] and bline.startswith("#! xdotool ") :
params = " ".join(bline.split(" ")[2:])
printc("DBlue", " X11: " + params)
xdotool(params)
## xdo command without echo
elif bot["xdo"] and bline.startswith("## xdotool ") : ## same, no echo
params = " ".join(bline.split(" ")[2:])
xdotool(params)
## xdo command suppress
elif not bot["xdo"] and bline.startswith("## xdotool ") :
pass
## file open
elif bot["fil"] and bline.startswith("#! open ") :
filenum = bline.split(" ")[2]
filename = bline.split(" ")[3]
files[filenum] = open(filename, 'w')
files[filenum].truncate()
## file append
elif bot["fil"] and bline.startswith("#! append ") :
filenum = bline.split(" ")[2]
dataLine = ":".join(bline.split(":")[1:])
files[filenum].write(dataLine + "\n")
## file write
elif bot["fil"] and bline.startswith("#! write ") :
print(bline)
filenum = bline.split(" ")[2]
filename = bline.split(" ")[3]
files[filenum] = open(filename, 'w')
files[filenum].truncate()
curFileNum = filenum
## file close
elif bot["fil"] and bline.startswith("#! close ") :
filenum = bline.split(" ")[2]
files[filenum].close()
print("#! closed %s at %s" % (filenum, os.stat(filename).st_size))
## bot output
elif bot["log"] > 0 and bline :
if bline.startswith("ERROR :") : stdc("Fail", id + "::" + bline + "\n")
elif bline.startswith("WARN :") : stdc("Warn", id + "::" + bline + "\n")
elif bline.startswith("INFO :") : stdc("OKGreen", id + "::" + bline + "\n")
else : sys.stdout.write("" + bline + "\n")
## suppressed bots - no output
elif bot["log"] == 0:
pass
## hannibal or map or 0AD output
elif line :
if line.startswith("ERROR :") : stdc("Fail", line + "\n")
elif line.startswith("WARN :") : stdc("Warn", line + "\n")
elif line.startswith("INFO :") : stdc("OKGreen", line + "\n")
elif line.startswith("TIMER| ") : pass ## suppress 0AD debugs
elif line.startswith("sys_cursor_create:") : pass
elif line.startswith("AL lib:") : pass
elif line.startswith("Sound:") : pass
else :
sys.stdout.write("" + line)
except KeyboardInterrupt, e :
terminate()
if __name__ == '__main__':
args = sys.argv[1:]
if args[0] == "maps" :
print (" processing maps...")
processMaps(args)
else:
typ = args[0] if len(args) > 0 else "rel"
map = args[1] if len(args) > 1 else "Arcadia 02"
bots = args[2] if len(args) > 2 else "2"
launch(typ, map, int(bots))
cleanup()
print ("\nBye\n")
| 30.297872 | 122 | 0.545207 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4,937 | 0.433374 |
ad9f81a0cbf6847b29b2b1b0b3d68f028ae28dfe | 6,265 | py | Python | wurst/brightway/extract_database.py | kais-siala/wurst | 448dd4e9e0bfbde956c2913222222509ff2b14e1 | [
"BSD-2-Clause"
] | null | null | null | wurst/brightway/extract_database.py | kais-siala/wurst | 448dd4e9e0bfbde956c2913222222509ff2b14e1 | [
"BSD-2-Clause"
] | null | null | null | wurst/brightway/extract_database.py | kais-siala/wurst | 448dd4e9e0bfbde956c2913222222509ff2b14e1 | [
"BSD-2-Clause"
] | null | null | null | from bw2data.database import DatabaseChooser
try:
from bw2data.backends.peewee import SQLiteBackend, ActivityDataset, ExchangeDataset
except ImportError:
from bw2data.backends import SQLiteBackend, ActivityDataset, ExchangeDataset
from tqdm import tqdm
import copy
def _list_or_dict(obj):
if isinstance(obj, dict):
for key, value in obj.items():
cp = copy.deepcopy(value)
cp["name"] = key
yield cp
else:
for tmp in obj:
yield (tmp)
def extract_activity(proxy):
"""Get data in Wurst internal format for an ``ActivityDataset``"""
assert isinstance(proxy, ActivityDataset)
return {
"classifications": proxy.data.get("classifications", []),
"comment": proxy.data.get("comment", ""),
"location": proxy.location,
"database": proxy.database,
"code": proxy.code,
"name": proxy.name,
"reference product": proxy.product,
"unit": proxy.data.get("unit", ""),
"exchanges": [],
"parameters": {
obj["name"]: obj["amount"]
for obj in _list_or_dict(proxy.data.get("parameters", []))
},
"parameters full": list(_list_or_dict(proxy.data.get("parameters", []))),
}
def extract_exchange(proxy, add_properties=False):
"""Get data in Wurst internal format for an ``ExchangeDataset``"""
assert isinstance(proxy, ExchangeDataset)
uncertainty_fields = (
"uncertainty type",
"loc",
"scale",
"shape",
"minimum",
"maximum",
"amount",
"pedigree",
)
data = {key: proxy.data[key] for key in uncertainty_fields if key in proxy.data}
assert "amount" in data, "Exchange has no `amount` field"
if "uncertainty type" not in data:
data["uncertainty type"] = 0
data["loc"] = data["amount"]
data["type"] = proxy.type
data["production volume"] = proxy.data.get("production volume")
data["input"] = (proxy.input_database, proxy.input_code)
data["output"] = (proxy.output_database, proxy.output_code)
if add_properties:
data["properties"] = proxy.data.get("properties", {})
return data
def add_exchanges_to_consumers(activities, exchange_qs, add_properties=False):
"""Retrieve exchanges from database, and add to activities.
Assumes that activities are single output, and that the exchange code is the same as the activity code. This assumption is valid for ecoinvent 3.3 cutoff imported into Brightway2."""
lookup = {(o["database"], o["code"]): o for o in activities}
with tqdm(total=exchange_qs.count()) as pbar:
for i, exc in enumerate(exchange_qs):
exc = extract_exchange(exc, add_properties=add_properties)
output = tuple(exc.pop("output"))
lookup[output]["exchanges"].append(exc)
pbar.update(1)
return activities
def add_input_info_for_indigenous_exchanges(activities, names):
"""Add details on exchange inputs if these activities are already available"""
names = set(names)
lookup = {(o["database"], o["code"]): o for o in activities}
for ds in activities:
for exc in ds["exchanges"]:
if "input" not in exc or exc["input"][0] not in names:
continue
obj = lookup[exc["input"]]
exc["product"] = obj.get("reference product")
exc["name"] = obj.get("name")
exc["unit"] = obj.get("unit")
exc["location"] = obj.get("location")
exc["database"] = obj.get("database")
if exc["type"] == "biosphere":
exc["categories"] = obj.get("categories")
exc.pop("input")
def add_input_info_for_external_exchanges(activities, names):
"""Add details on exchange inputs from other databases"""
names = set(names)
cache = {}
for ds in tqdm(activities):
for exc in ds["exchanges"]:
if "input" not in exc or exc["input"][0] in names:
continue
if exc["input"] not in cache:
cache[exc["input"]] = ActivityDataset.get(
ActivityDataset.database == exc["input"][0],
ActivityDataset.code == exc["input"][1],
)
obj = cache[exc["input"]]
exc["name"] = obj.name
exc["product"] = obj.product
exc["unit"] = obj.data.get("unit")
exc["location"] = obj.location
exc["database"] = obj.database
if exc["type"] == "biosphere":
exc["categories"] = obj.data.get("categories")
def extract_brightway2_databases(database_names, add_properties=False):
"""Extract a Brightway2 SQLiteBackend database to the Wurst internal format.
``database_names`` is a list of database names. You should already be in the correct project.
Returns a list of dataset documents."""
ERROR = "Must pass list of database names"
if isinstance(database_names, str):
database_names = [database_names]
assert isinstance(database_names, (list, tuple, set)), ERROR
databases = [DatabaseChooser(name) for name in database_names]
ERROR = "Wrong type of database object (must be SQLiteBackend)"
assert all(isinstance(obj, SQLiteBackend) for obj in databases), ERROR
# Construct generators for both activities and exchanges
# Need to be clever to minimize copying and memory use
activity_qs = ActivityDataset.select().where(
ActivityDataset.database << database_names
)
exchange_qs = ExchangeDataset.select().where(
ExchangeDataset.output_database << database_names
)
# Retrieve all activity data
print("Getting activity data")
activities = [extract_activity(o) for o in tqdm(activity_qs)]
# Add each exchange to the activity list of exchanges
print("Adding exchange data to activities")
add_exchanges_to_consumers(activities, exchange_qs, add_properties)
# Add details on exchanges which come from our databases
print("Filling out exchange data")
add_input_info_for_indigenous_exchanges(activities, database_names)
add_input_info_for_external_exchanges(activities, database_names)
return activities
| 37.969697 | 186 | 0.635914 | 0 | 0 | 238 | 0.037989 | 0 | 0 | 0 | 0 | 1,975 | 0.315243 |
ad9faceb7907e8d07a0ac107b43606f9fd453a2b | 874 | py | Python | ex070.py | raphael-abrantes/exercises-python | 71f1e7cba2f56173c256d43e4fe33a43722b4484 | [
"MIT"
] | null | null | null | ex070.py | raphael-abrantes/exercises-python | 71f1e7cba2f56173c256d43e4fe33a43722b4484 | [
"MIT"
] | null | null | null | ex070.py | raphael-abrantes/exercises-python | 71f1e7cba2f56173c256d43e4fe33a43722b4484 | [
"MIT"
] | null | null | null | vTotal = 0
i = 0
vMenorValor = 0
cont = 1
vMenorValorItem = ''
while True:
vItem = str(input('Insira o nome do produto: '))
vValor = float(input('Valor do produto: R$'))
vTotal = vTotal + vValor
if vValor >= 1000:
i = i + 1
if cont == 1:
vMenorValor = vValor
vMenorValorItem = vItem
else:
if vValor < vMenorValor:
vMenorValor = vValor
vMenorValorItem = vItem
cont = cont + 1
vAns = ' '
while vAns not in 'YyNnSs':
vAns = str(input('Deseja continuar [Y/N]? '))
if vAns in 'Nn':
break
print('-'*40)
#print('{:-^40}'.format('Fim do Programa'))
print('Fim do Programa'.center(40,'-'))
print(f'Temos {i} produto(s) custando mais que R$1000.00')
print(f'O produto mais barato foi o {vMenorValorItem} custando R${vMenorValor:.2f}')
| 27.3125 | 85 | 0.565217 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 288 | 0.329519 |
ada0c87b43dddcb6a0efe0af0db1a22cebfdc36f | 26,826 | py | Python | sdscli/adapters/hysds/configure.py | sdskit/sdscli | 9bb96e880c8251d1dce56b901c1289ed80f83ce7 | [
"Apache-2.0"
] | null | null | null | sdscli/adapters/hysds/configure.py | sdskit/sdscli | 9bb96e880c8251d1dce56b901c1289ed80f83ce7 | [
"Apache-2.0"
] | 24 | 2018-03-14T15:37:38.000Z | 2021-11-30T21:59:44.000Z | sdscli/adapters/hysds/configure.py | sdskit/sdscli | 9bb96e880c8251d1dce56b901c1289ed80f83ce7 | [
"Apache-2.0"
] | 13 | 2018-02-22T15:01:35.000Z | 2019-02-07T18:58:57.000Z | """
Configuration for HySDS cluster.
"""
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from builtins import open
from builtins import str
from future import standard_library
standard_library.install_aliases()
import os
import yaml
import pwd
import shutil
import hashlib
import traceback
from pkg_resources import resource_filename
from glob import glob
from prompt_toolkit.shortcuts import prompt, print_tokens
from prompt_toolkit.styles import style_from_dict
from prompt_toolkit.validation import Validator, ValidationError
from pygments.token import Token
from sdscli.log_utils import logger
from sdscli.conf_utils import get_user_config_path, get_user_files_path, SettingsConf
from sdscli.os_utils import validate_dir
from sdscli.prompt_utils import YesNoValidator
prompt_style = style_from_dict({
Token.Alert: 'bg:#D8060C',
Token.Username: '#D8060C',
Token.Param: '#3CFF33',
})
CFG_TMPL = """# HySDS config
TYPE: hysds
# mozart
MOZART_PVT_IP: {MOZART_PVT_IP}
MOZART_PUB_IP: {MOZART_PUB_IP}
MOZART_FQDN: {MOZART_FQDN}
# mozart rabbitmq
MOZART_RABBIT_PVT_IP: {MOZART_RABBIT_PVT_IP}
MOZART_RABBIT_PUB_IP: {MOZART_RABBIT_PUB_IP}
MOZART_RABBIT_FQDN: {MOZART_RABBIT_FQDN}
MOZART_RABBIT_USER: {MOZART_RABBIT_USER}
MOZART_RABBIT_PASSWORD: {MOZART_RABBIT_PASSWORD}
# mozart redis
MOZART_REDIS_PVT_IP: {MOZART_REDIS_PVT_IP}
MOZART_REDIS_PUB_IP: {MOZART_REDIS_PUB_IP}
MOZART_REDIS_FQDN: {MOZART_REDIS_FQDN}
MOZART_REDIS_PASSWORD: {MOZART_REDIS_PASSWORD}
# mozart ES
MOZART_ES_PVT_IP: {MOZART_ES_PVT_IP}
MOZART_ES_PUB_IP: {MOZART_ES_PUB_IP}
MOZART_ES_FQDN: {MOZART_ES_FQDN}
OPS_USER: {OPS_USER}
OPS_HOME: {OPS_HOME}
OPS_PASSWORD_HASH: {OPS_PASSWORD_HASH}
LDAP_GROUPS: {LDAP_GROUPS}
KEY_FILENAME: {KEY_FILENAME}
JENKINS_USER: {JENKINS_USER}
JENKINS_DIR: {JENKINS_DIR}
# metrics
METRICS_PVT_IP: {METRICS_PVT_IP}
METRICS_PUB_IP: {METRICS_PUB_IP}
METRICS_FQDN: {METRICS_FQDN}
# metrics redis
METRICS_REDIS_PVT_IP: {METRICS_REDIS_PVT_IP}
METRICS_REDIS_PUB_IP: {METRICS_REDIS_PUB_IP}
METRICS_REDIS_FQDN: {METRICS_REDIS_FQDN}
METRICS_REDIS_PASSWORD: {METRICS_REDIS_PASSWORD}
# metrics ES
METRICS_ES_PVT_IP: {METRICS_ES_PVT_IP}
METRICS_ES_PUB_IP: {METRICS_ES_PUB_IP}
METRICS_ES_FQDN: {METRICS_ES_FQDN}
# grq
GRQ_PVT_IP: {GRQ_PVT_IP}
GRQ_PUB_IP: {GRQ_PUB_IP}
GRQ_FQDN: {GRQ_FQDN}
GRQ_PORT: {GRQ_PORT}
# grq ES
GRQ_ES_PVT_IP: {GRQ_ES_PVT_IP}
GRQ_ES_PUB_IP: {GRQ_ES_PUB_IP}
GRQ_ES_FQDN: {GRQ_ES_FQDN}
# factotum
FACTOTUM_PVT_IP: {FACTOTUM_PVT_IP}
FACTOTUM_PUB_IP: {FACTOTUM_PUB_IP}
FACTOTUM_FQDN: {FACTOTUM_FQDN}
# continuous integration server
CI_PVT_IP: {CI_PVT_IP}
CI_PUB_IP: {CI_PUB_IP}
CI_FQDN: {CI_FQDN}
JENKINS_API_USER: {JENKINS_API_USER}
JENKINS_API_KEY: {JENKINS_API_KEY}
# verdi build
VERDI_PVT_IP: {VERDI_PVT_IP}
VERDI_PUB_IP: {VERDI_PUB_IP}
VERDI_FQDN: {VERDI_FQDN}
# other non-autoscale verdi hosts (optional)
OTHER_VERDI_HOSTS:
- VERDI_PVT_IP:
VERDI_PUB_IP:
VERDI_FQDN:
# WebDAV product server
DAV_SERVER: {DAV_SERVER}
DAV_USER: {DAV_USER}
DAV_PASSWORD: {DAV_PASSWORD}
# AWS settings for product bucket
DATASET_AWS_ACCESS_KEY: {DATASET_AWS_ACCESS_KEY}
DATASET_AWS_SECRET_KEY: {DATASET_AWS_SECRET_KEY}
DATASET_AWS_REGION: {DATASET_AWS_REGION}
DATASET_S3_ENDPOINT: {DATASET_S3_ENDPOINT}
DATASET_S3_WEBSITE_ENDPOINT: {DATASET_S3_WEBSITE_ENDPOINT}
DATASET_BUCKET: {DATASET_BUCKET}
# AWS settings for autoscale workers
AWS_ACCESS_KEY: {AWS_ACCESS_KEY}
AWS_SECRET_KEY: {AWS_SECRET_KEY}
AWS_REGION: {AWS_REGION}
S3_ENDPOINT: {S3_ENDPOINT}
CODE_BUCKET: {CODE_BUCKET}
VERDI_PRIMER_IMAGE: {VERDI_PRIMER_IMAGE}
VERDI_TAG: {VERDI_TAG}
VERDI_UID: {VERDI_UID}
VERDI_GID: {VERDI_GID}
VENUE: {VENUE}
QUEUES:
- QUEUE_NAME: dumby-job_worker-small
INSTANCE_TYPES:
- t2.medium
- t3a.medium
- t3.medium
- QUEUE_NAME: dumby-job_worker-large
INSTANCE_TYPES:
- t2.medium
- t3a.medium
- t3.medium
# git oauth token
GIT_OAUTH_TOKEN: {GIT_OAUTH_TOKEN}
# container registry
CONTAINER_REGISTRY: {CONTAINER_REGISTRY}
CONTAINER_REGISTRY_BUCKET: {CONTAINER_REGISTRY_BUCKET}
# DO NOT EDIT ANYTHING BELOW THIS
# user_rules_dataset
PROVES_URL: https://prov-es.jpl.nasa.gov/beta
PROVES_IMPORT_URL: https://prov-es.jpl.nasa.gov/beta/api/v0.1/prov_es/import/json
DATASETS_CFG: {DATASETS_CFG}
# system jobs queue
SYSTEM_JOBS_QUEUE: system-jobs-queue
MOZART_ES_CLUSTER: resource_cluster
METRICS_ES_CLUSTER: metrics_cluster
DATASET_QUERY_INDEX: grq
USER_RULES_DATASET_INDEX: user_rules
"""
CFG_DEFAULTS = {
"mozart": [
["MOZART_PVT_IP", ""],
["MOZART_PUB_IP", ""],
["MOZART_FQDN", ""],
],
"mozart-rabbit": [
["MOZART_RABBIT_PVT_IP", ""],
["MOZART_RABBIT_PUB_IP", ""],
["MOZART_RABBIT_FQDN", ""],
["MOZART_RABBIT_USER", "guest"],
["MOZART_RABBIT_PASSWORD", "guest"],
],
"mozart-redis": [
["MOZART_REDIS_PVT_IP", ""],
["MOZART_REDIS_PUB_IP", ""],
["MOZART_REDIS_FQDN", ""],
["MOZART_REDIS_PASSWORD", ""],
],
"mozart-es": [
["MOZART_ES_PVT_IP", ""],
["MOZART_ES_PUB_IP", ""],
["MOZART_ES_FQDN", ""],
],
"ops": [
["OPS_USER", pwd.getpwuid(os.getuid())[0]],
["OPS_HOME", os.path.expanduser('~')],
["OPS_PASSWORD_HASH", ""],
["LDAP_GROUPS", ""],
["KEY_FILENAME", ""],
["DATASETS_CFG", os.path.join(os.path.expanduser(
'~'), 'verdi', 'etc', 'datasets.json')],
],
"metrics": [
["METRICS_PVT_IP", ""],
["METRICS_PUB_IP", ""],
["METRICS_FQDN", ""],
],
"metrics-redis": [
["METRICS_REDIS_PVT_IP", ""],
["METRICS_REDIS_PUB_IP", ""],
["METRICS_REDIS_FQDN", ""],
["METRICS_REDIS_PASSWORD", ""],
],
"metrics-es": [
["METRICS_ES_PVT_IP", ""],
["METRICS_ES_PUB_IP", ""],
["METRICS_ES_FQDN", ""],
],
"grq": [
["GRQ_PVT_IP", ""],
["GRQ_PUB_IP", ""],
["GRQ_FQDN", ""],
["GRQ_PORT", 8878],
],
"grq-es": [
["GRQ_ES_PVT_IP", ""],
["GRQ_ES_PUB_IP", ""],
["GRQ_ES_FQDN", ""],
],
"factotum": [
["FACTOTUM_PVT_IP", ""],
["FACTOTUM_PUB_IP", ""],
["FACTOTUM_FQDN", ""],
],
"ci": [
["CI_PVT_IP", ""],
["CI_PUB_IP", ""],
["CI_FQDN", ""],
["JENKINS_USER", "jenkins"],
["JENKINS_DIR", os.path.join(os.path.expanduser('~'), 'jenkins')],
["JENKINS_API_USER", ""],
["JENKINS_API_KEY", ""],
["GIT_OAUTH_TOKEN", ""],
],
"verdi": [
["VERDI_PVT_IP", ""],
["VERDI_PUB_IP", ""],
["VERDI_FQDN", ""],
["CONTAINER_REGISTRY", ""],
["CONTAINER_REGISTRY_BUCKET", ""],
],
"webdav": [
["DAV_SERVER", ""],
["DAV_USER", ""],
["DAV_PASSWORD", ""],
],
"aws-dataset": [
["DATASET_AWS_ACCESS_KEY", ""],
["DATASET_AWS_SECRET_KEY", ""],
["DATASET_AWS_REGION", "us-west-2"],
["DATASET_S3_ENDPOINT", "s3-us-west-2.amazonaws.com"],
["DATASET_S3_WEBSITE_ENDPOINT", "s3-website-us-west-2.amazonaws.com"],
["DATASET_BUCKET", ""],
],
"aws-asg": [
["AWS_ACCESS_KEY", ""],
["AWS_SECRET_KEY", ""],
["AWS_REGION", "us-west-2"],
["S3_ENDPOINT", "s3-us-west-2.amazonaws.com"],
["CODE_BUCKET", ""],
["VERDI_PRIMER_IMAGE", ""],
["VERDI_TAG", ""],
["VERDI_UID", os.getuid()],
["VERDI_GID", os.getgid()],
["VENUE", "ops"],
]
}
def copy_files():
"""Copy templates and files to user config files."""
files_path = get_user_files_path()
logger.debug('files_path: %s' % files_path)
validate_dir(files_path, mode=0o700)
sds_files_path = resource_filename(
'sdscli', os.path.join('adapters', 'hysds', 'files'))
sds_files = glob(os.path.join(sds_files_path, '*'))
for sds_file in sds_files:
if os.path.basename(sds_file) == 'cluster.py':
user_file = os.path.join(os.path.dirname(
get_user_config_path()), os.path.basename(sds_file))
if not os.path.exists(user_file):
shutil.copy(sds_file, user_file)
else:
user_file = os.path.join(files_path, os.path.basename(sds_file))
if os.path.isdir(sds_file) and not os.path.exists(user_file):
shutil.copytree(sds_file, user_file)
logger.debug("Copying dir %s to %s" % (sds_file, user_file))
elif os.path.isfile(sds_file) and not os.path.exists(user_file):
shutil.copy(sds_file, user_file)
logger.debug("Copying file %s to %s" % (sds_file, user_file))
def configure():
"""Configure SDS config file for HySDS."""
# copy templates/files
copy_files()
# config file
cfg_file = get_user_config_path()
if os.path.exists(cfg_file):
cont = prompt(get_prompt_tokens=lambda x: [(Token, cfg_file),
(Token, " already exists. "),
(Token.Alert,
"Customizations will be lost or overwritten!"),
(Token, " Continue [y/n]: ")],
validator=YesNoValidator(), style=prompt_style) == 'y'
# validator=YesNoValidator(), default='n', style=prompt_style) == 'y'
if not cont:
return 0
with open(cfg_file) as f:
cfg = yaml.load(f, Loader=yaml.FullLoader)
else:
cfg = {}
# mozart
for k, d in CFG_DEFAULTS['mozart']:
v = prompt(get_prompt_tokens=lambda x: [(Token, "Enter value for "),
(Token.Param, "%s" % k),
(Token, ": ")],
default=str(cfg.get(k, d)),
style=prompt_style)
cfg[k] = v
# mozart components
comps = [('mozart-rabbit', 'rabbitMQ'), ('mozart-redis', 'redis'),
('mozart-es', 'elasticsearch')]
for grp, comp in comps:
reuse = prompt("Is mozart %s on a different IP [y/n]: " % comp,
validator=YesNoValidator(), default='n') == 'n'
for k, d in CFG_DEFAULTS[grp]:
if reuse:
if k.endswith('_PVT_IP'):
cfg[k] = cfg['MOZART_PVT_IP']
continue
elif k.endswith('_PUB_IP'):
cfg[k] = cfg['MOZART_PUB_IP']
continue
elif k.endswith('_FQDN'):
cfg[k] = cfg['MOZART_FQDN']
continue
if k == 'MOZART_RABBIT_PASSWORD':
while True:
p1 = prompt(get_prompt_tokens=lambda x: [(Token, "Enter RabbitMQ password for user "),
(Token.Username, "%s" %
cfg['MOZART_RABBIT_USER']),
(Token, ": ")],
default=str(cfg.get(k, d)),
style=prompt_style,
is_password=True)
p2 = prompt(get_prompt_tokens=lambda x: [(Token, "Re-enter RabbitMQ password for user "),
(Token.Username, "%s" %
cfg['MOZART_RABBIT_USER']),
(Token, ": ")],
default=str(cfg.get(k, d)),
style=prompt_style,
is_password=True)
if p1 == p2:
if p1 == "":
print("Password can't be empty.")
continue
v = p1
break
print("Passwords don't match.")
elif k == 'MOZART_REDIS_PASSWORD':
while True:
p1 = prompt(get_prompt_tokens=lambda x: [(Token, "Enter Redis password: ")],
default=str(cfg.get(k, d)),
style=prompt_style,
is_password=True)
p2 = prompt(get_prompt_tokens=lambda x: [(Token, "Re-enter Redis password: ")],
default=str(cfg.get(k, d)),
style=prompt_style,
is_password=True)
if p1 == p2:
v = p1
break
print("Passwords don't match.")
else:
v = prompt(get_prompt_tokens=lambda x: [(Token, "Enter value for "),
(Token.Param, "%s" % k),
(Token, ": ")],
default=str(cfg.get(k, d)),
style=prompt_style)
cfg[k] = v
# ops
for k, d in CFG_DEFAULTS['ops']:
if k == 'OPS_PASSWORD_HASH':
while True:
p1 = prompt(get_prompt_tokens=lambda x: [(Token, "Enter web interface password for ops user "),
(Token.Username, "%s" %
cfg['OPS_USER']),
(Token, ": ")],
default="",
style=prompt_style,
is_password=True)
p2 = prompt(get_prompt_tokens=lambda x: [(Token, "Re-enter web interface password for ops user "),
(Token.Username, "%s" %
cfg['OPS_USER']),
(Token, ": ")],
default="",
style=prompt_style,
is_password=True)
if p1 == p2:
if p1 == "":
print("Password can't be empty.")
continue
v = hashlib.sha224(p1.encode()).hexdigest()
break
print("Passwords don't match.")
else:
v = prompt(get_prompt_tokens=lambda x: [(Token, "Enter value for "),
(Token.Param, "%s" % k),
(Token, ": ")],
default=str(cfg.get(k, d)),
style=prompt_style)
cfg[k] = v
# metrics
for k, d in CFG_DEFAULTS['metrics']:
v = prompt(get_prompt_tokens=lambda x: [(Token, "Enter value for "),
(Token.Param, "%s" % k),
(Token, ": ")],
default=str(cfg.get(k, d)),
style=prompt_style)
cfg[k] = v
# metrics components
comps = [('metrics-redis', 'redis'), ('metrics-es', 'elasticsearch')]
for grp, comp in comps:
reuse = prompt("Is metrics %s on a different IP [y/n]: " % comp,
validator=YesNoValidator(), default='n') == 'n'
for k, d in CFG_DEFAULTS[grp]:
if reuse:
if k.endswith('_PVT_IP'):
cfg[k] = cfg['METRICS_PVT_IP']
continue
elif k.endswith('_PUB_IP'):
cfg[k] = cfg['METRICS_PUB_IP']
continue
elif k.endswith('_FQDN'):
cfg[k] = cfg['METRICS_FQDN']
continue
if k == 'METRICS_REDIS_PASSWORD':
while True:
p1 = prompt(get_prompt_tokens=lambda x: [(Token, "Enter Redis password: ")],
default=str(cfg.get(k, d)),
style=prompt_style,
is_password=True)
p2 = prompt(get_prompt_tokens=lambda x: [(Token, "Re-enter Redis password: ")],
default=str(cfg.get(k, d)),
style=prompt_style,
is_password=True)
if p1 == p2:
v = p1
break
print("Passwords don't match.")
else:
v = prompt(get_prompt_tokens=lambda x: [(Token, "Enter value for "),
(Token.Param, "%s" % k),
(Token, ": ")],
default=str(cfg.get(k, d)),
style=prompt_style)
cfg[k] = v
# grq
for k, d in CFG_DEFAULTS['grq']:
v = prompt(get_prompt_tokens=lambda x: [(Token, "Enter value for "),
(Token.Param, "%s" % k),
(Token, ": ")],
default=str(cfg.get(k, d)),
style=prompt_style)
cfg[k] = v
# grq components
comps = [('grq-es', 'elasticsearch')]
for grp, comp in comps:
reuse = prompt("Is grq %s on a different IP [y/n]: " % comp,
validator=YesNoValidator(), default='n') == 'n'
for k, d in CFG_DEFAULTS[grp]:
if reuse:
if k.endswith('_PVT_IP'):
cfg[k] = cfg['GRQ_PVT_IP']
continue
elif k.endswith('_PUB_IP'):
cfg[k] = cfg['GRQ_PUB_IP']
continue
elif k.endswith('_FQDN'):
cfg[k] = cfg['GRQ_FQDN']
continue
v = prompt(get_prompt_tokens=lambda x: [(Token, "Enter value for "),
(Token.Param, "%s" % k),
(Token, ": ")],
default=str(cfg.get(k, d)),
style=prompt_style)
cfg[k] = v
# factotum
for k, d in CFG_DEFAULTS['factotum']:
v = prompt(get_prompt_tokens=lambda x: [(Token, "Enter value for "),
(Token.Param, "%s" % k),
(Token, ": ")],
default=str(cfg.get(k, d)),
style=prompt_style)
cfg[k] = v
# ci
for k, d in CFG_DEFAULTS['ci']:
if k in ('JENKINS_API_KEY', 'GIT_OAUTH_TOKEN'):
while True:
p1 = prompt(get_prompt_tokens=lambda x: [(Token, "Enter value for "),
(Token.Param, "%s" % k),
(Token, ": ")],
default=str(cfg.get(k, d)),
style=prompt_style,
is_password=True)
p2 = prompt(get_prompt_tokens=lambda x: [(Token, "Re-enter value for "),
(Token.Param, "%s" % k),
(Token, ": ")],
default=str(cfg.get(k, d)),
style=prompt_style,
is_password=True)
if p1 == p2:
v = p1
break
print("Values don't match.")
else:
v = prompt(get_prompt_tokens=lambda x: [(Token, "Enter value for "),
(Token.Param, "%s" % k),
(Token, ": ")],
default=str(cfg.get(k, d)),
style=prompt_style)
cfg[k] = v
# verdi
for k, d in CFG_DEFAULTS['verdi']:
v = prompt(get_prompt_tokens=lambda x: [(Token, "Enter value for "),
(Token.Param, "%s" % k),
(Token, ": ")],
default=str(cfg.get(k, d)),
style=prompt_style)
cfg[k] = v
# webdav
for k, d in CFG_DEFAULTS['webdav']:
if k == 'DAV_PASSWORD':
while True:
p1 = prompt(get_prompt_tokens=lambda x: [(Token, "Enter webdav password for user "),
(Token.Username, "%s" %
cfg['DAV_USER']),
(Token, ": ")],
default=str(cfg.get(k, d)),
style=prompt_style,
is_password=True)
p2 = prompt(get_prompt_tokens=lambda x: [(Token, "Re-enter webdav password for ops user "),
(Token.Username, "%s" %
cfg['DAV_USER']),
(Token, ": ")],
default=str(cfg.get(k, d)),
style=prompt_style,
is_password=True)
if p1 == p2:
v = p1
break
print("Passwords don't match.")
else:
v = prompt(get_prompt_tokens=lambda x: [(Token, "Enter value for "),
(Token.Param, "%s" % k),
(Token, ": ")],
default=str(cfg.get(k, d)),
style=prompt_style)
cfg[k] = v
# aws-dataset
for k, d in CFG_DEFAULTS['aws-dataset']:
if k == 'DATASET_AWS_SECRET_KEY':
if cfg['DATASET_AWS_ACCESS_KEY'] == "":
cfg['DATASET_AWS_SECRET_KEY'] = ""
continue
while True:
p1 = prompt(get_prompt_tokens=lambda x: [(Token, "Enter AWS secret key for "),
(Token.Username, "%s" %
cfg['DATASET_AWS_ACCESS_KEY']),
(Token, ": ")],
default=str(cfg.get(k, d)),
style=prompt_style,
is_password=True)
p2 = prompt(get_prompt_tokens=lambda x: [(Token, "Re-enter AWS secret key for "),
(Token.Username, "%s" %
cfg['DATASET_AWS_ACCESS_KEY']),
(Token, ": ")],
default=str(cfg.get(k, d)),
style=prompt_style,
is_password=True)
if p1 == p2:
v = p1
break
print("Keys don't match.")
elif k == 'DATASET_AWS_ACCESS_KEY':
v = prompt(get_prompt_tokens=lambda x: [(Token, "Enter value for "),
(Token.Param, "%s" % k),
(Token, ". If using instance roles, just press enter"),
(Token, ": ")],
default=str(cfg.get(k, d)),
style=prompt_style)
else:
v = prompt(get_prompt_tokens=lambda x: [(Token, "Enter value for "),
(Token.Param, "%s" % k),
(Token, ": ")],
default=str(cfg.get(k, d)),
style=prompt_style)
cfg[k] = v
# aws-asg
for k, d in CFG_DEFAULTS['aws-asg']:
if k == 'AWS_SECRET_KEY':
if cfg['AWS_ACCESS_KEY'] == "":
cfg['AWS_SECRET_KEY'] = ""
continue
while True:
p1 = prompt(get_prompt_tokens=lambda x: [(Token, "Enter AWS secret key for "),
(Token.Username, "%s" %
cfg['AWS_ACCESS_KEY']),
(Token, ": ")],
default=str(cfg.get(k, d)),
style=prompt_style,
is_password=True)
p2 = prompt(get_prompt_tokens=lambda x: [(Token, "Re-enter AWS secret key for "),
(Token.Username, "%s" %
cfg['AWS_ACCESS_KEY']),
(Token, ": ")],
default=str(cfg.get(k, d)),
style=prompt_style,
is_password=True)
if p1 == p2:
v = p1
break
print("Keys don't match.")
elif k == 'AWS_ACCESS_KEY':
v = prompt(get_prompt_tokens=lambda x: [(Token, "Enter value for "),
(Token.Param, "%s" % k),
(Token, ". If using instance roles, just press enter"),
(Token, ": ")],
default=str(cfg.get(k, d)),
style=prompt_style)
else:
v = prompt(get_prompt_tokens=lambda x: [(Token, "Enter value for "),
(Token.Param, "%s" % k),
(Token, ": ")],
default=str(cfg.get(k, d)),
style=prompt_style)
cfg[k] = v
# ensure directory exists
validate_dir(os.path.dirname(cfg_file), mode=0o700)
yml = CFG_TMPL.format(**cfg)
with open(cfg_file, 'w') as f:
f.write(yml)
| 38.213675 | 114 | 0.456348 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 8,127 | 0.302952 |
a8db9e49f13dfef65d13b5c20b4f322d715e8a17 | 17,625 | py | Python | robot_control/iiwa_ros-master/iiwa_gazebo/scripts/gazebo_iiwa_keyboard_cmd.py | stanFurrer/Multimodal-solution-for-grasp-stability-prediction | b7d07a217e2a4846f3fe782fe7c3f4942f3299b3 | [
"MIT"
] | null | null | null | robot_control/iiwa_ros-master/iiwa_gazebo/scripts/gazebo_iiwa_keyboard_cmd.py | stanFurrer/Multimodal-solution-for-grasp-stability-prediction | b7d07a217e2a4846f3fe782fe7c3f4942f3299b3 | [
"MIT"
] | null | null | null | robot_control/iiwa_ros-master/iiwa_gazebo/scripts/gazebo_iiwa_keyboard_cmd.py | stanFurrer/Multimodal-solution-for-grasp-stability-prediction | b7d07a217e2a4846f3fe782fe7c3f4942f3299b3 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
#
# Copyright (C) 2021 Learning Algorithms and Systems Laboratory, EPFL, Switzerland
# Authors:
# Lorenzo Panchett (lorenzo.panchetti@epfl.ch)
#
# Website: lasa.epfl.ch
#
# This file is part of iiwa_gazebo.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import rospy
import numpy as np
import argparse
import time
import tf2_ros
from tf2_geometry_msgs import PoseStamped
from std_msgs.msg import String
from geometry_msgs.msg import Pose
# ----- Argument parser -----
parser = argparse.ArgumentParser(description='User can decide to pass ee_pose_d (default) or to pass a ee_vel_d')
parser.add_argument('--velocity',
type=bool, default=False,
help='Set the ee_vel_d mode (default False)')
parser.add_argument('--v',
type=float, default=0,
help='Set desired velocity v m/s')
parser.add_argument('--nb_sub_rot',
type=int, default=1,
help='Number of integer sub_rotations e.g 4 sub_rot of 45 deg -> 180 deg')
args = parser.parse_args()
v_max = 1.0
v_min = 0
if args.v != 0:
if args.v > v_max:
args.v = v_max
elif args.v < v_min:
args.v = v_min
print('desired velocity is {}'.format(args.v))
v_des = args.v
if args.nb_sub_rot < 1:
args.nb_sub_rot = 1
# ----- Helper functions for quaternions-----
def euler_to_quaternion(roll, pitch, yaw):
qx = np.sin(roll/2) * np.cos(pitch/2) * np.cos(yaw/2) - np.cos(roll/2) * np.sin(pitch/2) * np.sin(yaw/2)
qy = np.cos(roll/2) * np.sin(pitch/2) * np.cos(yaw/2) + np.sin(roll/2) * np.cos(pitch/2) * np.sin(yaw/2)
qz = np.cos(roll/2) * np.cos(pitch/2) * np.sin(yaw/2) - np.sin(roll/2) * np.sin(pitch/2) * np.cos(yaw/2)
qw = np.cos(roll/2) * np.cos(pitch/2) * np.cos(yaw/2) + np.sin(roll/2) * np.sin(pitch/2) * np.sin(yaw/2)
return [qx, qy, qz, qw]
def normalize_quat(q):
""" Normalise the quaternion
Args:
q (Quaternion)
Return:
q (Quaternion): normalized quaternion
"""
norm = np.linalg.norm([q.x, q.y, q.z, q.w])
q.x = q.x / norm
q.y = q.y / norm
q.z = q.z / norm
q.w = q.w / norm
return q
def angle_between_quaternion(p1, p2):
"""Return angle between two quaternions
Args:
p1 (Quaternion): First Quaternion
p2 (Quaternion): Second Quaternion
Returns:
float: Angle between the quaternions [rad]
"""
q1 = np.array([p1.x, p1.y, p1.z, p1.w])
q2 = np.array([p2.x, p2.y, p2.z, p2.w])
# Inv of q2
q2_conj = q2 * [-1, -1, -1, 1]
q2_inv = q2_conj / np.linalg.norm(q2)**2
# norm [ vector component of q1 * q2^(-1) ] = sin(\theta / 2)
temp = q1[3] * q2_inv[:3] + q2_inv[3] * q1[:3] + np.cross(q1[:3], q2_inv[:3])
return 2 * np.arcsin(np.linalg.norm(temp))
# ----- Movements (predefined) -----
lat_displ = 0.1 # m
ang_rot = np.pi/1.5 # radians
# Desired rotation in the iiwa_link_7 reference frame
q_rot_cw = euler_to_quaternion(0.0, 0.0, ang_rot)
q_rot_ccw = euler_to_quaternion(0.0, 0.0, -ang_rot)
MOVEMENTS = {
"home": [0.8, 0.0, 0.4, 0.0, 0.7071068, 0.0, 0.7071068], # Reset default pose
"right": [0.0, -lat_displ, 0.0, 0.0, 0.0, 0.0, 0.0], # Move right
"left": [0.0, lat_displ, 0.0, 0.0, 0.0, 0.0, 0.0], # Move left
"down": [0.0, 0.0, -lat_displ, 0.0, 0.0, 0.0, 0.0], # Move downwards
"up": [0.0, 0.0, lat_displ, 0.0, 0.0, 0.0, 0.0], # Move upwards
"clockwise": [0.0, 0.0, 0.0, q_rot_cw[0], q_rot_cw[1], q_rot_cw[2], q_rot_cw[3]], # Rotate clockwise around x axis
"cclockwise": [0.0, 0.0, 0.0, q_rot_ccw[0], q_rot_ccw[1], q_rot_ccw[2], q_rot_ccw[3]], # Rotate counter-clockwise
"vertical": "vertical", # Sequence downward upward
"plane": "plane", # Sequence right left
"fullcw": "fullcw",
"fullccw": "fullccw",
}
# ----- Global Variables -----
TARGET = Pose()
MOVEMENT = MOVEMENTS["home"]
EE_POSE = Pose()
keyboard_cmd = False
tf_buffer = None
is_home = False
is_rotation_cw = False
is_rotation_ccw = False
is_sequence = False
end_cw = False
end_ccw = False
frame_id_ee = "iiwa_link_7"
delta_t = 0.0
# ----- Callbacks -----
def keyboard_callback(data):
"""Callback for keyboard commands
Args:
data (String): ROS `String` message
"""
global MOVEMENT
global keyboard_cmd
global is_home
global is_rotation_cw
global is_rotation_ccw
global is_sequence
# Check if known target, otherwise keep default pose
MOVEMENT = MOVEMENTS.get(data.data, MOVEMENTS["home"])
if data.data == "home":
is_home = True
if (data.data == "clockwise") or (data.data == "fullcw"):
is_rotation_cw = True
if (data.data == "cclockwise") or (data.data == "fullccw"):
is_rotation_ccw = True
if (data.data == "vertical") or (data.data == "plane") or (data.data == "fullcw") or (data.data == "fullccw"):
is_sequence = True
keyboard_cmd = True
def ee_pose_callback(data):
"""Callback for end effector pose
Args:
data (Pose): ROS `Pose` message
"""
global EE_POSE
EE_POSE = data
# ----- Helper functions for pose and target computation-----
def to_pose():
"""Transform corresponding MOVEMENT in Pose object
Return:
pose (Pose)
"""
pose = Pose()
pose.position.x = MOVEMENT[0]
pose.position.y = MOVEMENT[1]
pose.position.z = MOVEMENT[2]
pose.orientation.x = MOVEMENT[3]
pose.orientation.y = MOVEMENT[4]
pose.orientation.z = MOVEMENT[5]
pose.orientation.w = MOVEMENT[6]
return pose
def sum_poses(pose1, pose2):
"""Sum the position and orientation fields between two poses
Args:
pose1 (Pose)
pose2 (Pose)
Return:
sum_p (Pose)
"""
sum_p = Pose()
sum_p.position.x = pose1.position.x + pose2.position.x
sum_p.position.y = pose1.position.y + pose2.position.y
sum_p.position.z = pose1.position.z + pose2.position.z
sum_p.orientation.x = pose1.orientation.x + pose2.orientation.x
sum_p.orientation.y = pose1.orientation.y + pose2.orientation.y
sum_p.orientation.z = pose1.orientation.z + pose2.orientation.z
sum_p.orientation.w = pose1.orientation.w + pose2.orientation.w
if is_rotation_cw or is_rotation_ccw or is_home:
sum_p.orientation.x = pose2.orientation.x
sum_p.orientation.y = pose2.orientation.y
sum_p.orientation.z = pose2.orientation.z
sum_p.orientation.w = pose2.orientation.w
return check_feasible(sum_p)
def to_world_frame(pose_mvt):
"""Use tf transform to express the desired rotation in the world frame
Args:
pose_mvt (Pose):pose associated to the desired movement in the ee frame
Return:
rel_target_pose (Pose): relative desired pose in the world frame
"""
# Apply tf only if some rotation is involved
if (is_rotation_cw) or (is_rotation_ccw):
try:
pose_stamped = PoseStamped()
pose_stamped.pose = pose_mvt
pose_stamped.header.frame_id = frame_id_ee
# Hack to ensure that the target transformation is always in future
pose_stamped.header.stamp = rospy.Time.from_sec(rospy.Time.now().to_sec() + 0.5)
output_pose_stamped = tf_buffer.transform(pose_stamped, base_link, timeout=rospy.Duration(1.0))
except (tf2_ros.LookupException, tf2_ros.ConnectivityException, tf2_ros.ExtrapolationException):
raise
rel_target_pose = output_pose_stamped.pose
rel_target_pose.position = pose_mvt.position # The desired position was already expressed in the world frame
else:
rel_target_pose = pose_mvt
return rel_target_pose
def check_feasible(p):
"""Check if boundary conditions are respected, constraint ee rotation to physical limits
Args:
sum_p (Pose): computed sum of two poses
Return:
sum_p (Pose): saturate to limits if necessary
"""
global is_rotation_cw
global is_rotation_ccw
global end_cw
global end_ccw
if (is_rotation_cw):
if (not end_ccw and np.abs(p.orientation.w + p.orientation.y) < 0.3) or end_cw:
# Saturate all orientations to prevent overflow and keep quaternion normalized
p.orientation.x = 0.7071068
p.orientation.y = 0.0
p.orientation.z = 0.7071068
p.orientation.w = 0.0
rospy.loginfo("cw saturation")
end_cw = True
end_ccw = False
is_rotation_cw = False
if (is_rotation_ccw):
if (not end_cw and np.abs(p.orientation.w + p.orientation.y) < 0.3) or end_ccw:
p.orientation.x = 0.7071068
p.orientation.y = 0.0
p.orientation.z = 0.7071068
p.orientation.w = 0.0
rospy.loginfo("ccw saturation")
end_ccw = True
end_cw = False
is_rotation_ccw = False
return p
def get_error(p1, p2):
"""Return error between two poses
Args:
p1 (Pose): First Pose
p2 (Pose): Second Pose
Returns:
Tuple[float, float]: Tuple of position and orientation error
Position error: L2 distance between the poses
Orientation error: Angle between the poses [rad]
"""
pos_err = np.array([
np.linalg.norm([p1.position.x - p2.position.x]),
np.linalg.norm([p1.position.y - p2.position.y]),
np.linalg.norm([p1.position.z - p2.position.z]),
])
orient_err = np.abs(angle_between_quaternion(p1.orientation, p2.orientation))
return pos_err, orient_err
# ----- Trajectories definition -----
def pub_target(rel_mvt):
global TARGET
TARGET = sum_poses(EE_POSE, rel_mvt)
TARGET.orientation = normalize_quat(TARGET.orientation)
publisher.publish(TARGET)
def reach_lin_target(nb_axis, radius_norm = 0.01):
while not rospy.is_shutdown():
err = get_error(EE_POSE, TARGET)
if(np.linalg.norm(err[0][nb_axis]) < radius_norm): #define norm to change waypoint
break
def reach_ang_target(angular_norm = 0.3):
while not rospy.is_shutdown():
err = get_error(EE_POSE, TARGET)
if(np.linalg.norm(err[1]) < angular_norm):
break
def vertical_mvt(rep, lat_displ, dt):
global MOVEMENT
mvt = Pose()
mvt.position.x = 0
mvt.position.y = 0
mvt.position.z = 0
mvt.orientation.x = EE_POSE.orientation.x
mvt.orientation.y = EE_POSE.orientation.y
mvt.orientation.z = EE_POSE.orientation.z
mvt.orientation.w = EE_POSE.orientation.w
for i in range(rep):
if v_des != 0:
mvt.position.z = -v_des
TARGET = mvt
publisher_v.publish(TARGET)
t_init = time.time()
while (time.time() - t_init < dt):
pass
mvt.position.z = v_des
TARGET = mvt
publisher_v.publish(TARGET)
t_init = time.time()
while (time.time() - t_init < dt + 0.03):
pass
else:
rel_mvt = Pose()
rel_mvt.position.z = -lat_displ
pub_target(rel_mvt)
reach_lin_target(2)
rel_mvt.position.z = lat_displ
pub_target(rel_mvt)
reach_lin_target(2)
#Reset position
MOVEMENT = MOVEMENTS["home"]
TARGET = to_pose()
publisher.publish(TARGET)
def horizontal_mvt(rep, lat_displ, dt):
global MOVEMENT
mvt = Pose()
mvt.position.x = 0
mvt.position.y = 0
mvt.position.z = 0
mvt.orientation.x = EE_POSE.orientation.x
mvt.orientation.y = EE_POSE.orientation.y
mvt.orientation.z = EE_POSE.orientation.z
mvt.orientation.w = EE_POSE.orientation.w
for i in range(rep):
if v_des != 0:
mvt.position.y = -v_des
TARGET = mvt
publisher_v.publish(TARGET)
t_init = time.time()
while (time.time() - t_init < dt):
pass
mvt.position.y = v_des
TARGET = mvt
publisher_v.publish(TARGET)
t_init = time.time()
while (time.time() - t_init < dt):
pass
else:
rel_mvt = Pose()
rel_mvt.position.y = -lat_displ
pub_target(rel_mvt)
reach_lin_target(1)
rel_mvt.position.y = lat_displ
pub_target(rel_mvt)
reach_lin_target(1)
#Reset position
MOVEMENT = MOVEMENTS["home"]
TARGET = to_pose()
publisher.publish(TARGET)
def full_cw(ang_rot, rep = 2):
global MOVEMENT
global is_rotation_cw
global is_rotation_ccw
for i in range(rep):
is_rotation_cw = True #otherwise not computing tf
rel_mvt = Pose()
q_rot_cw = euler_to_quaternion(0.0, 0.0, ang_rot)
MOVEMENT = [0.0, 0.0, 0.0, q_rot_cw[0], q_rot_cw[1], q_rot_cw[2], q_rot_cw[3]]
pose_mvt = to_pose()
rel_mvt = to_world_frame(pose_mvt)
pub_target(rel_mvt)
reach_ang_target()
is_rotation_ccw =True #otherwise not computing tf
q_rot_ccw = euler_to_quaternion(0.0, 0.0, -ang_rot)
MOVEMENT = [0.0, 0.0, 0.0, q_rot_ccw[0], q_rot_ccw[1], q_rot_ccw[2], q_rot_ccw[3]]
pose_mvt = to_pose()
rel_mvt = to_world_frame(pose_mvt)
pub_target(rel_mvt)
reach_ang_target()
#Reset position
MOVEMENT = MOVEMENTS["home"]
TARGET = to_pose()
publisher.publish(TARGET)
def full_ccw(ang_rot, rep = 2):
global MOVEMENT
global is_rotation_cw
global is_rotation_ccw
for i in range(rep):
is_rotation_ccw = True
rel_mvt = Pose()
q_rot_ccw = euler_to_quaternion(0.0, 0.0, -ang_rot)
MOVEMENT = [0.0, 0.0, 0.0, q_rot_ccw[0], q_rot_ccw[1], q_rot_ccw[2], q_rot_ccw[3]]
pose_mvt = to_pose()
rel_mvt = to_world_frame(pose_mvt)
pub_target(rel_mvt)
reach_ang_target()
is_rotation_cw = True #otherwise not computing tf
q_rot_cw = euler_to_quaternion(0.0, 0.0, ang_rot)
MOVEMENT = [0.0, 0.0, 0.0, q_rot_cw[0], q_rot_cw[1], q_rot_cw[2], q_rot_cw[3]]
pose_mvt = to_pose()
rel_mvt = to_world_frame(pose_mvt)
pub_target(rel_mvt)
reach_ang_target()
#Reset position
MOVEMENT = MOVEMENTS["home"]
TARGET = to_pose()
publisher.publish(TARGET)
def run_sequence():
global delta_t
if v_des != 0:
delta_t = 0.3
if MOVEMENT == "vertical":
vertical_mvt(rep, lat_displ, delta_t)
if MOVEMENT == "plane":
horizontal_mvt(rep, lat_displ, delta_t)
if MOVEMENT == "fullcw":
full_cw(ang_rot)
if MOVEMENT == "fullccw":
full_ccw(ang_rot)
# ----- Main Script -----
if __name__ == '__main__':
rospy.init_node('gazebo_iiwa_keyboard_cmd')
rate = rospy.Rate(30)
base_link = rospy.get_param("~base_link", "world")
# Subscribers
tf_buffer = tf2_ros.Buffer()
listener = tf2_ros.TransformListener(tf_buffer)
rospy.Subscriber("iiwa/ee_info/Pose", Pose, ee_pose_callback)
rospy.Subscriber("iiwa/lib_cmd", String, keyboard_callback, queue_size=1)
# Publishers
publisher = rospy.Publisher("passive_control/pos_quat", Pose, queue_size=1)
#Publisher to ee_vel_d
publisher_v = rospy.Publisher("passive_control/vel_quat", Pose, queue_size=1)
# Main loop
while not rospy.is_shutdown():
rep = 2
if args.velocity and keyboard_cmd:
if is_home:
TARGET = to_pose()
is_home = False
TARGET.orientation = normalize_quat(TARGET.orientation)
publisher.publish(TARGET)
# Rotation
elif is_rotation_cw or is_rotation_ccw:
ang_rot = ang_rot/args.nb_sub_rot
run_sequence()
else: #Linear movement
v_des = args.v
if v_des == 0:
rospy.loginfo('ATTENTION DESIRED VELOCITY IS 0')
run_sequence()
is_sequence = False
keyboard_cmd = False
else:
if(is_sequence):
run_sequence()
is_sequence = False
keyboard_cmd = False
elif(keyboard_cmd):
if is_home:
TARGET = to_pose()
is_home = False
else:
pose_mvt = to_pose()
rel_target_pose = to_world_frame(pose_mvt)
TARGET = sum_poses(EE_POSE, rel_target_pose)
TARGET.orientation = normalize_quat(TARGET.orientation)
publisher.publish(TARGET)
keyboard_cmd = False
rospy.loginfo(TARGET) # print for sanity check
rate.sleep()
| 32.51845 | 119 | 0.607035 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4,338 | 0.246128 |
a8dd9e16ff0637026c75f14c84e32a7e547064f3 | 2,829 | py | Python | python_code/Quadrotor/ProblemStatement/targetSlowResponse.py | cholazzzb/APF_Swarm_Control_Simulator | a58a1f55cd709f12928cc31d2320f7833d761c50 | [
"MIT"
] | 2 | 2021-12-21T00:39:46.000Z | 2022-02-28T11:11:27.000Z | python_code/Quadrotor/ProblemStatement/targetSlowResponse.py | cholazzzb/APF_Swarm_Control_Simulator | a58a1f55cd709f12928cc31d2320f7833d761c50 | [
"MIT"
] | 1 | 2021-02-03T13:24:13.000Z | 2021-02-03T23:56:33.000Z | python_code/Quadrotor/ProblemStatement/targetSlowResponse.py | cholazzzb/APF_Swarm_Control_Simulator | a58a1f55cd709f12928cc31d2320f7833d761c50 | [
"MIT"
] | 1 | 2021-04-16T18:25:15.000Z | 2021-04-16T18:25:15.000Z | import matplotlib.pyplot as plt
import math
import sys
sys.path.append('../')
from Report import Report
from QuadrotorARSim import QuadrotorARSim
from Ship import Ship
sys.path.append('../')
from Agent import Agent
from Target import Target
from SwarmController import SwarmController
# Build Object for Attitude and Position Controller
specs = {"mass": 0.445, "inertia": [
0.0027, 0.0029, 0.0053], "armLength": 0.125}
initialState = [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0],
[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]
initialState2 = [[-1.0, 1.0, 0.0], [0.0, 0.0, 0.0],
[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]
initialState3 = [[7.0, -5.0, 0.0], [0.0, 0.0, 0.0],
[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]
initialInput = [0.0, 0.0, 0.0, 0.0]
attitudeControllerPID = [[1.43, 0, 0.13], # PID phi
[1.52, 0, 0.14], # PID theta
[2.43, 0, 0.26], # PID psi
[88.02, 44.5, 0 ]] # PID z dot
positionControllerPID = [[0, 0, 0], # PID x
[0, 0, 0], # PID y
[5, 1, 2]] # PID z
AR1 = QuadrotorARSim(0, "AR1", specs, initialState,
initialInput, attitudeControllerPID, positionControllerPID)
AR2 = QuadrotorARSim(1, "AR2", specs, initialState2,
initialInput, attitudeControllerPID, positionControllerPID)
AR3 = QuadrotorARSim(2, "AR3", specs, initialState3,
initialInput, attitudeControllerPID, positionControllerPID)
Target1 = Ship([4, 4, 0], 0.75)
# For Plotting System Response
Report1 = Report(AR1)
Report2 = Report(AR2)
Report3 = Report(AR3)
# Build Object for Swarm Controller
TPFconfig = {"damping_factor": 1, "gain":1, "target_detecting_range":1}
OPFconfig = {"positiveGain1": 1, "positiveGain2":1, "detecting_range": 1}
SPFconfig = {"min_allowable_dist": 10}
SwarmController1 = SwarmController(TPFconfig, OPFconfig, SPFconfig)
AR1.connectToSwarmController(SwarmController1)
AR2.connectToSwarmController(SwarmController1)
AR3.connectToSwarmController(SwarmController1)
Target1.connectToSwarmController(SwarmController1)
for iteration in range (100):
print('-------Time:', AR1.t, '-------')
# SwarmController1.calculateAgentsForces()
# AR1.controlSwarm(SwarmController1)
# AR2.controlSwarm(SwarmController1)
# AR3.controlSwarm(SwarmController1)
AR1.controlPosition([0,0,1])
AR2.controlPosition([0,0,1])
AR3.controlPosition([0,0,1])
AR1.updateState()
AR2.updateState()
AR3.updateState()
Report1.updateReport(AR1.getState(), AR1.thrust, AR1.moments)
Report2.updateReport(AR2.getState(), AR2.thrust, AR2.moments)
Report3.updateReport(AR3.getState(), AR3.thrust, AR3.moments)
Report1.generateReport()
Report2.generateReport()
Report3.generateReport()
plt.pause(20) | 36.269231 | 78 | 0.645811 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 514 | 0.18169 |
a8ded0f79cfee5cbfebfd883ed9fb8314fcc5c0e | 161 | py | Python | internetarchive/spew-shelf.py | wumpus/visigoth-data | 6887937f4547a5c8a9b52c5a0e75cb258cc55c97 | [
"Apache-2.0"
] | 1 | 2019-02-18T19:34:24.000Z | 2019-02-18T19:34:24.000Z | internetarchive/spew-shelf.py | wumpus/visigoth-data | 6887937f4547a5c8a9b52c5a0e75cb258cc55c97 | [
"Apache-2.0"
] | null | null | null | internetarchive/spew-shelf.py | wumpus/visigoth-data | 6887937f4547a5c8a9b52c5a0e75cb258cc55c97 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
import shelve
import sys
for f in sys.argv[1:]:
with shelve.open(f, flag='r') as d:
for k in d:
print(k,d[k])
| 13.416667 | 39 | 0.552795 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 25 | 0.15528 |
a8df969a1e348281f45c3f81e7d78518202537bb | 72 | py | Python | aioblescan/__init__.py | nasa-watchdog/aioblescan-ucsb | 1202906f6a96208f1887f0026a802c034019b068 | [
"MIT"
] | 2 | 2019-10-11T19:13:34.000Z | 2020-06-03T14:11:33.000Z | aioblescan/__init__.py | nasa-watchdog/aioblescan-ucsb | 1202906f6a96208f1887f0026a802c034019b068 | [
"MIT"
] | null | null | null | aioblescan/__init__.py | nasa-watchdog/aioblescan-ucsb | 1202906f6a96208f1887f0026a802c034019b068 | [
"MIT"
] | 4 | 2019-11-19T22:42:17.000Z | 2022-01-18T21:56:31.000Z | #
from .aioblescan import *
from . import plugins
__version__ = '0.2.1'
| 14.4 | 25 | 0.708333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 8 | 0.111111 |
a8dfb79a87fbf5130e974794f6839e17cb680477 | 1,767 | py | Python | cajas/users/api/views/validate_partner_withdraw.py | dmontoya1/cajas | 5eb3d5835250d5dafae398082200b79c1ca8063b | [
"MIT"
] | null | null | null | cajas/users/api/views/validate_partner_withdraw.py | dmontoya1/cajas | 5eb3d5835250d5dafae398082200b79c1ca8063b | [
"MIT"
] | null | null | null | cajas/users/api/views/validate_partner_withdraw.py | dmontoya1/cajas | 5eb3d5835250d5dafae398082200b79c1ca8063b | [
"MIT"
] | null | null | null |
from django.shortcuts import get_object_or_404
from rest_framework import status
from rest_framework.views import APIView
from rest_framework.response import Response
from cajas.users.models.partner import Partner
from cajas.loans.models.loan import Loan, LoanType
class ValidatePartnerWithdraw(APIView):
def post(self, request):
data = request.data
validate = self.validate_withdraw(data)
if validate == 'loan':
return Response(
"El socio tiene préstamos activos.",
status=status.HTTP_202_ACCEPTED
)
elif validate == 'value':
return Response(
"El socio no tiene los fondos suficientes en su caja para realizar el retiro.",
status=status.HTTP_202_ACCEPTED
)
else:
return Response(
"Validación exitosa. El socio puede hacer el retiro.",
status=status.HTTP_200_OK
)
def validate_withdraw(self, data):
if self.validate_loans(data):
return 'loan'
elif not self.validate_value(data):
return 'value'
return True
def validate_loans(self, data):
partner = get_object_or_404(Partner, pk=data['partner'])
loans = Loan.objects.filter(lender=partner.user, loan_type=LoanType.SOCIO_DIRECTO)
if loans.exists():
for loan in loans:
if loan.balance > 0:
return True
return False
return False
def validate_value(self, data):
partner = get_object_or_404(Partner, pk=data['partner'])
box = partner.box
if (int(data['value']) * 3) < box.balance:
return True
return False
| 31.553571 | 95 | 0.606678 | 1,498 | 0.846806 | 0 | 0 | 0 | 0 | 0 | 0 | 219 | 0.123799 |
a8dfd18189d8ca071a33df8e68b7e90fd7a7c3a0 | 2,666 | py | Python | pi88reader/pi88_to_excel.py | natter1/pi88reader | 9698b25c3df0f1175fcbcec6c6ab22f6fe3aca6b | [
"MIT"
] | null | null | null | pi88reader/pi88_to_excel.py | natter1/pi88reader | 9698b25c3df0f1175fcbcec6c6ab22f6fe3aca6b | [
"MIT"
] | null | null | null | pi88reader/pi88_to_excel.py | natter1/pi88reader | 9698b25c3df0f1175fcbcec6c6ab22f6fe3aca6b | [
"MIT"
] | null | null | null | """
todo: check pandas
"""
from openpyxl import Workbook
from openpyxl.styles import Font
from pi88reader.pi88_importer import PI88Measurement, SegmentType
def main():
filename = '..\\resources\\quasi_static_12000uN.tdm'
filename = '..\\resources\\AuSn_Creep\\1000uN 01 LC.tdm'
measurement = PI88Measurement(filename)
to_excel = PI88ToExcel(measurement)
to_excel.write("delme.xlsx")
class PI88ToExcel:
def __init__(self, pi88_measurement):
self.measurement = pi88_measurement
self.workbook = Workbook()
self.workbook.remove(self.workbook.active)
def write(self, filename):
self.add_sheet_quasi_static_data() # self.workbook.active)
self.add_sheet_segment_data()
self.workbook.save(filename=filename)
def add_sheet_quasi_static_data(self):
wb = self.workbook
#mws_title = self.measurement.filename.split('.')[2].split('\\')[2]
ws_title = self.measurement.filename.split('\\')[-1].split('.')[0]
ws = wb.create_sheet(title=ws_title)
data = self.measurement.get_quasi_static_curve()
self.write_data(ws, data)
def add_sheet_segment_data(self):
wb = self.workbook
ws_title = "segments"
ws = wb.create_sheet(title=ws_title)
ws.cell(row=1, column=1).value = "LOAD:"
data = self.measurement.get_segment_curve(SegmentType.LOAD)
self.write_data(ws, data, row=1, col=2)
ws.cell(row=1, column=5).value = "HOLD:"
data = self.measurement.get_segment_curve(SegmentType.HOLD)
self.write_data(ws, data, row=1, col=6)
ws.cell(row=1, column=9).value = "UNLOAD:"
data = self.measurement.get_segment_curve(SegmentType.UNLOAD)
self.write_data(ws, data, row=1, col=10)
@staticmethod
def write_row(ws, data, row, col):
font = Font(bold=True)
for i, value in enumerate(data):
ws.cell(row=row, column=col+i).value = value
ws.cell(row=row, column=col + i).font = font
@staticmethod
def write_cols(ws, data, row, col):
for i, value in enumerate(data[0]):
for j, column in enumerate(data):
ws.cell(row=row+i, column=col+j).value = column[i]
def write_data(self, ws, data, row=1, col=1):
header = data[0]
if header:
self.write_row(ws, header, row, col)
row += 1
self.write_cols(ws, data[1:], row, col)
# for i, value in enumerate(data[1]):
# for j, column in enumerate(data[1:]):
# ws.cell(row=row+i, column=col+j).value = column[i]
if __name__ == "__main__":
main()
| 32.91358 | 75 | 0.626782 | 2,216 | 0.831208 | 0 | 0 | 448 | 0.168042 | 0 | 0 | 404 | 0.151538 |
a8e1a924cdbc26ab3a5fbd0dc19d7a202d15d1ad | 55,228 | py | Python | caffe_files/caffe_traininglayers.py | excalib/interactive-deep-colorization | 8247da3cc83e54201b20d5a67b997120bb368436 | [
"MIT"
] | 1 | 2021-03-26T14:42:01.000Z | 2021-03-26T14:42:01.000Z | caffe_files/caffe_traininglayers.py | SleepProgger/interactive-deep-colorization | 8247da3cc83e54201b20d5a67b997120bb368436 | [
"MIT"
] | null | null | null | caffe_files/caffe_traininglayers.py | SleepProgger/interactive-deep-colorization | 8247da3cc83e54201b20d5a67b997120bb368436 | [
"MIT"
] | 1 | 2017-09-13T15:36:23.000Z | 2017-09-13T15:36:23.000Z | # **************************************
# ***** Richard Zhang / 2016.08.06 *****
# **************************************
import numpy as np
import warnings
import os
import sklearn.neighbors as nn
import caffe
from skimage import color
import matplotlib.pyplot as plt
import math
import platform
import cv2
import rz_fcns_nohdf5 as rz
# ***************************************
# ***** LAYERS FOR GLOBAL HISTOGRAM *****
# ***************************************
class SpatialRepLayer(caffe.Layer):
'''
INPUTS
bottom[0].data NxCx1x1
bottom[1].data NxCxXxY
OUTPUTS
top[0].data NxCxXxY repeat 0th input spatially '''
def setup(self,bottom,top):
if(len(bottom)!=2):
raise Exception("Layer needs 2 inputs")
self.param_str_split = self.param_str.split(' ')
# self.keep_ratio = float(self.param_str_split[0]) # frequency keep whole input
self.N = bottom[0].data.shape[0]
self.C = bottom[0].data.shape[1]
self.X = bottom[0].data.shape[2]
self.Y = bottom[0].data.shape[3]
if(self.X!=1 or self.Y!=1):
raise Exception("bottom[0] should have spatial dimensions 1x1")
# self.Nref = bottom[1].data.shape[0]
# self.Cref = bottom[1].data.shape[1]
self.Xref = bottom[1].data.shape[2]
self.Yref = bottom[1].data.shape[3]
def reshape(self,bottom,top):
top[0].reshape(self.N,self.C,self.Xref,self.Yref) # output shape
def forward(self,bottom,top):
top[0].data[...] = bottom[0].data[:,:,:,:] # will do singleton expansion
def backward(self,top,propagate_down,bottom):
bottom[0].diff[:,:,0,0] = np.sum(np.sum(top[0].diff,axis=2),axis=2)
bottom[1].diff[...] = 0
class ColorGlobalDropoutLayer(caffe.Layer):
'''
Inputs
bottom[0].data NxCx1x1
Outputs
top[0].data Nx(C+1)x1x1 last channel is whether or not to keep input
first C channels are copied from bottom (if kept)
'''
def setup(self,bottom,top):
if(len(bottom)==0):
raise Exception("Layer needs inputs")
self.param_str_split = self.param_str.split(' ')
self.keep_ratio = float(self.param_str_split[0]) # frequency keep whole input
self.cnt = 0
self.N = bottom[0].data.shape[0]
self.C = bottom[0].data.shape[1]
self.X = bottom[0].data.shape[2]
self.Y = bottom[0].data.shape[3]
def reshape(self,bottom,top):
top[0].reshape(self.N,self.C+1,self.X,self.Y) # output mask
def forward(self,bottom,top):
top[0].data[...] = 0
# top[0].data[:,:self.C,:,:] = bottom[0].data[...]
# determine which ones are kept
keeps = np.random.binomial(1,self.keep_ratio,size=self.N)
top[0].data[:,-1,:,:] = keeps[:,np.newaxis,np.newaxis]
top[0].data[:,:-1,:,:] = bottom[0].data[...]*keeps[:,np.newaxis,np.newaxis,np.newaxis]
def backward(self,top,propagate_down,bottom):
0; # backward not implemented
class ChooseOneDropoutLayer(caffe.Layer):
'''
Inputs
bottom[0].data NxCx1x1
Outputs
top[0].data Nx2Cx1x1 evens are the bottom data (0)
odds indicate which one is kept
'''
def setup(self,bottom,top):
if(len(bottom)==0):
raise Exception("Layer needs inputs")
self.param_str_split = self.param_str.split(' ')
self.drop_all_ratio = float(self.param_str_split[0]) # frequency keep whole input
self.cnt = 0
self.N = bottom[0].data.shape[0]
self.C = bottom[0].data.shape[1]
self.X = bottom[0].data.shape[2]
self.Y = bottom[0].data.shape[3]
def reshape(self,bottom,top):
top[0].reshape(self.N,2*self.C,self.X,self.Y) # output mask
def forward(self,bottom,top):
top[0].data[...] = 0 # clear everything
# determine which ones are kept
drop_alls = np.random.binomial(1,self.drop_all_ratio,size=self.N)
# determine which to keep
keep_inds = np.random.randint(self.C,size=self.N)
for nn in range(self.N):
if(drop_alls[nn]==0):
keep_ind = keep_inds[nn]
top[0].data[nn,2*keep_ind,0,0] = bottom[0].data[nn,keep_ind,0,0]
top[0].data[nn,2*keep_ind+1,0,0] = 1
# top[0].data[:,-1,:,:] = keeps[:,np.newaxis,np.newaxis]
def backward(self,top,propagate_down,bottom):
0; # backward not implemented
# **************************************
# ***** RANDOM REVEALING OF COLORS *****
# **************************************
class ColorRandPointLayer(caffe.Layer):
''' Layer which reveals random square chunks of the input color '''
def setup(self,bottom,top):
if(len(bottom)==0):
raise Exception("Layer needs inputs")
self.param_str_split = self.param_str.split(' ')
self.cnt = 0
self.mask_mult = 110.
self.N = bottom[0].data.shape[0]
self.C = bottom[0].data.shape[1]
self.X = bottom[0].data.shape[2]
self.Y = bottom[0].data.shape[3]
self.p_numpatch = 0.125 # probability for number of patches to use drawn from geometric distribution
self.p_min_size = 0 # half-patch min size
self.p_max_size = 4 # half-patch max size
self.p_std = .25 # percentage of image for std where patch is located
self.p_whole = .01 # probability of revealing whole image
def reshape(self,bottom,top):
top[0].reshape(self.N,self.C+1,self.X,self.Y) # output mask
def forward(self,bottom,top):
top[0].data[...] = 0
# top[0].data[:,:self.C,:,:] = bottom[0].data[...]
# determine number of points
Ns = np.random.geometric(p=self.p_numpatch,size=self.N)
# determine half-patch sizes
Ps = np.random.random_integers(self.p_min_size,high=self.p_max_size,size=np.sum(Ns))
#determine location
Xs = np.clip(np.random.normal(loc=self.X/2.,scale=self.X*self.p_std,size=np.sum(Ns)),0,self.X)
Ys = np.clip(np.random.normal(loc=self.Y/2.,scale=self.Y*self.p_std,size=np.sum(Ns)),0,self.Y)
use_wholes = np.random.binomial(1,self.p_whole,size=self.N)
cnt = 0
for nn in range(self.N):
if(use_wholes[nn]==1): # throw in whole image
# print('Using whole image')
top[0].data[nn,:self.C,:,:] = bottom[0].data[nn,:,:,:]
top[0].data[nn,-1,:,:] = self.mask_mult
cnt = cnt+Ns[nn]
else: # sample points
for nnn in range(Ns[nn]):
p = Ps[cnt]
x = Xs[cnt]
y = Ys[cnt]
# print '(%i,%i,%i)'%(x,y,p)
top[0].data[nn,:self.C,x-p:x+p+1,y-p:y+p+1] \
= np.mean(np.mean(bottom[0].data[nn,:,x-p:x+p+1,y-p:y+p+1],axis=1),axis=1)[:,np.newaxis,np.newaxis]
top[0].data[nn,-1,x-p:x+p+1,y-p:y+p+1] = self.mask_mult
cnt = cnt+1
def backward(self,top,propagate_down,bottom):
0; # backward not implemented
# Randomly reveal strokes
def gen_random_stroke(X,Nmin=0,Nmax=8,Lmin=4,Lmax=20):
''' Generate a random stroke
(1) Randomly pick a direction and location to begin with
(2) Randomly choose number of points, loop through points
(a) randomly generate delta_theta, length
(b) append to list of points
(c) if the point crosses the edge, exist loop
(3) Clip on boundaries and return
INPUTS
X size of image
Nmin min number of points
Nmax max number of points
Lmin min length of a segment
Lmax max length of a segment
'''
cur_theta = np.random.uniform(-math.pi,math.pi,size=1)
pts = []
cur_pt = np.random.uniform(.1*X,.9*X,size=2)
pts.append(cur_pt.copy())
N = np.random.randint(Nmin,Nmax) # number of points
dtheta_bnd = np.random.uniform(-.4,.4) # amount that curve will deviate
for nn in range(N):
delta_theta = np.random.uniform(-dtheta_bnd*math.pi,dtheta_bnd*math.pi,size=1) # deviation
cur_length = np.random.uniform(Lmin,Lmax,size=1) # pixels
cur_theta = cur_theta+delta_theta
cur_pt = cur_pt + np.array((cur_length*math.cos(cur_theta),cur_length*math.sin(cur_theta))).flatten()
pts.append(cur_pt.copy())
if(np.sum(cur_pt<0) or np.sum(cur_pt>X-1)): # went out of bounds
break
return np.array(np.clip(pts,0,X-1))
def stroke2mask(pts,W,X,returnFlat=False):
''' Given stroke endpoints and line thickness, return a mask '''
pts = pts.astype('int')
cur_img = np.zeros((X,X,1),dtype='uint8')
for pp in range(pts.shape[0]-1):
cur_img = cv2.line(cur_img,(pts[pp,0],pts[pp,1]),(pts[pp+1,0],pts[pp+1,1]),(255,255,255),thickness=W)
cur_img_mask = cur_img[:,:,0]>0
cur_img_mask_flt = cur_img_mask.flatten()
if(returnFlat):
return cur_img_mask_flt
else:
return cur_img_mask
class RandStrokePointLayer(caffe.Layer):
''' Layer reveals random strokes and points '''
def setup(self,bottom,top):
if(len(bottom)==0):
raise Exception("Layer needs inputs")
self.param_str_split = self.param_str.split(' ')
self.cnt = 0
self.mask_mult = 110.
self.N = bottom[0].data.shape[0]
self.C = bottom[0].data.shape[1]
self.X = bottom[0].data.shape[2]
self.Y = bottom[0].data.shape[3]
self.p_numpatch = 0.125 # probability for number of points/strokes to use, drawn from geometric distribution
self.p_stroke = 0.25 # probability of using a stroke (rather than a point)
# patch settings
self.p_min_size = 0 # half-patch min size
self.p_max_size = 4 # half-patch max size
self.p_std = .25 # percentage of image for std where patch is located
self.p_whole = .01 # probability of revealing whole image
# stroke settings
self.l_min_thick=1; self.l_max_thick=8; # thickness
self.l_min_seg=0; self.l_max_seg=10; # number of points per line
self.l_min_len=0; self.l_max_len=10; # length of each line segment
def reshape(self,bottom,top):
top[0].reshape(self.N,self.C+1,self.X,self.Y) # output mask
def forward(self,bottom,top):
top[0].data[...] = 0
# top[0].data[:,:self.C,:,:] = bottom[0].data[...]
# determine number of points/patches
Ns = np.random.geometric(p=self.p_numpatch,size=self.N)
use_wholes = np.random.binomial(1,self.p_whole,size=self.N)
# Patch settings
# determine half-patch sizes
Ps = np.random.random_integers(self.p_min_size,high=self.p_max_size,size=np.sum(Ns))
#determine location
Xs = np.clip(np.random.normal(loc=self.X/2.,scale=self.X*self.p_std,size=np.sum(Ns)),0,self.X)
Ys = np.clip(np.random.normal(loc=self.Y/2.,scale=self.Y*self.p_std,size=np.sum(Ns)),0,self.Y)
# stroke or patch
is_strokes = np.random.binomial(1,self.p_stroke,size=np.sum(Ns))
Ws = np.random.randint(self.l_min_thick,self.l_max_thick,np.sum(Ns))
cnt = 0
for nn in range(self.N):
if(use_wholes[nn]==1): # throw in whole image
# print('Using whole image')
top[0].data[nn,:self.C,:,:] = bottom[0].data[nn,:,:,:]
top[0].data[nn,-1,:,:] = self.mask_mult
cnt = cnt+Ns[nn]
else: # sample points
for nnn in range(Ns[nn]):
if(not is_strokes[nnn]): # point mode
p = Ps[cnt]
x = Xs[cnt]
y = Ys[cnt]
# print '(%i,%i,%i)'%(x,y,p)
top[0].data[nn,:self.C,x-p:x+p+1,y-p:y+p+1] \
= np.mean(np.mean(bottom[0].data[nn,:,x-p:x+p+1,y-p:y+p+1],axis=1),axis=1)[:,np.newaxis,np.newaxis]
top[0].data[nn,-1,x-p:x+p+1,y-p:y+p+1] = self.mask_mult
else: # stroke mode
stroke_pts = gen_random_stroke(self.X,Nmin=self.l_min_seg,Nmax=self.l_max_seg,\
Lmin=self.l_min_len,Lmax=self.l_max_len).astype('int')
cur_mask = stroke2mask(stroke_pts,Ws[nnn],self.X)
cur_mask_inds = rz.find_nd(cur_mask)
top[0].data[nn,:self.C,cur_mask_inds[:,0],cur_mask_inds[:,1]] \
= bottom[0].data[nn,:,cur_mask_inds[:,0],cur_mask_inds[:,1]]
top[0].data[nn,-1,cur_mask_inds[:,0],cur_mask_inds[:,1]] = self.mask_mult
cnt = cnt+1
def backward(self,top,propagate_down,bottom):
0; # backward not implemented
# **********************************
# ***** PREVIOUSLY MADE LAYERS *****
# **********************************
class DataDropoutLayer(caffe.Layer):
''' Layer which drops out chunks of the input '''
def setup(self,bottom,top):
if(len(bottom)==0):
raise Exception("Layer needs inputs")
self.param_str_split = self.param_str.split(' ')
self.dropout_ratio = float(self.param_str_split[0]) # dropout frequency
self.dropout_size = int(self.param_str_split[1]) # block size for dropout
self.refresh_period = int(self.param_str_split[2]) # regenerate every few iterations
self.channel_sync = bool(int(self.param_str_split[3])) # sync dropout through channels
self.retain_ratio = 1 - self.dropout_ratio
self.cnt = 0
self.N = bottom[0].data.shape[0]
self.C = bottom[0].data.shape[1]
self.X = bottom[0].data.shape[2]
self.Y = bottom[0].data.shape[3]
self.Xblock = self.X/self.dropout_size
self.Yblock = self.Y/self.dropout_size
def reshape(self,bottom,top):
top[0].reshape(self.N,self.C,self.X,self.Y) # output mask
top[1].reshape(self.N,self.C,self.X,self.Y) # masked input
def forward(self,bottom,top):
if(np.mod(self.cnt,self.refresh_period)==0):
if(self.channel_sync):
retain_block = np.random.binomial(1,self.retain_ratio,size=(self.N,1,self.Xblock,self.Yblock))
else:
retain_block = np.random.binomial(1,self.retain_ratio,size=(self.N,self.C,self.Xblock,self.Yblock))
top[0].data[...] = retain_block.repeat(self.dropout_size,axis=2).repeat(self.dropout_size,axis=3)
self.cnt = self.cnt+1
top[1].data[...] = bottom[0].data[...]*top[0].data[...] # mask image
def backward(self,top,propagate_down,bottom):
0; # backward not implemented
class LossMeterLayer(caffe.Layer):
''' Layer acts as a "meter" to track loss values '''
def setup(self,bottom,top):
if(len(bottom)==0):
raise Exception("Layer needs inputs")
self.param_str_split = self.param_str.split(' ')
self.LOSS_DIR = self.param_str_split[0]
self.P = int(self.param_str_split[1])
self.H = int(self.param_str_split[2])
if(len(self.param_str_split)==4):
self.prefix = self.param_str_split[3]
else:
self.prefix = ''
# self.P = 1000 # interval to print losses
# self.H = 1000 # history size
# self.LOSS_DIR = './loss_save'
self.cnt = 0 # loss track counter
# self.P = 1 # interval to print losses
self.h = 0 # index into history
self.L = len(bottom)
self.losses = np.zeros((self.L,self.H))
self.ITER_PATH = os.path.join(self.LOSS_DIR,'iter.npy')
self.LOG_PATH = os.path.join(self.LOSS_DIR,'loss_log')
rz.mkdir(self.LOSS_DIR)
if(os.path.exists(self.ITER_PATH)):
self.iter = np.load(self.ITER_PATH)
else:
self.iter = 0 # iteration counter
print 'Initial iteration: %i'%(self.iter+1)
def reshape(self,bottom,top):
0;
# top[0].reshape(1)
# print 'No'
def forward(self,bottom,top):
for ll in range(self.L):
self.losses[ll,self.h] = bottom[ll].data[...]
if(np.mod(self.cnt,self.P)==self.P-1): # print
if(self.cnt >= self.H-1):
tmp_str = 'NumAvg %i, Loss '%(self.H)
for ll in range(self.L):
tmp_str += '%.3f, '%np.mean(self.losses[ll,:])
else:
tmp_str = 'NumAvg %i, Loss '%(self.h)
for ll in range(self.L):
tmp_str += '%.3f, '%np.mean(self.losses[ll,:self.cnt+1])
print_str = '%s: Iter %i, %s'%(self.prefix,self.iter+1,tmp_str)
print print_str
self.f = open(self.LOG_PATH,'a')
self.f.write(print_str)
self.f.write('\n')
self.f.close()
np.save(self.ITER_PATH,self.iter)
self.h = np.mod(self.h+1,self.H) # roll through history
self.cnt = self.cnt+1
self.iter = self.iter+1
def backward(self,top,propagate_down,bottom):
for ll in range(self.L):
continue
class LossMeterLayer(caffe.Layer):
''' Layer acts as a "meter" to track loss values '''
def setup(self,bottom,top):
if(len(bottom)==0):
raise Exception("Layer needs inputs")
self.param_str_split = self.param_str.split(' ')
self.LOSS_DIR = self.param_str_split[0]
self.P = int(self.param_str_split[1])
self.H = int(self.param_str_split[2])
if(len(self.param_str_split)==4):
self.prefix = self.param_str_split[3]
else:
self.prefix = ''
# self.P = 1000 # interval to print losses
# self.H = 1000 # history size
# self.LOSS_DIR = './loss_save'
self.cnt = 0 # loss track counter
# self.P = 1 # interval to print losses
self.h = 0 # index into history
self.L = len(bottom)
self.losses = np.zeros((self.L,self.H))
self.ITER_PATH = os.path.join(self.LOSS_DIR,'iter.npy')
self.LOG_PATH = os.path.join(self.LOSS_DIR,'loss_log')
rz.mkdir(self.LOSS_DIR)
if(os.path.exists(self.ITER_PATH)):
self.iter = np.load(self.ITER_PATH)
else:
self.iter = 0 # iteration counter
print 'Initial iteration: %i'%(self.iter+1)
def reshape(self,bottom,top):
0;
# top[0].reshape(1)
# print 'No'
def forward(self,bottom,top):
for ll in range(self.L):
self.losses[ll,self.h] = bottom[ll].data[...]
if(np.mod(self.cnt,self.P)==self.P-1): # print
if(self.cnt >= self.H-1):
tmp_str = 'NumAvg %i, Loss '%(self.H)
for ll in range(self.L):
tmp_str += '%.3e, '%np.mean(self.losses[ll,:])
else:
tmp_str = 'NumAvg %i, Loss '%(self.h)
for ll in range(self.L):
tmp_str += '%.3e, '%np.mean(self.losses[ll,:self.cnt+1])
print_str = '%s: Iter %i, %s'%(self.prefix,self.iter+1,tmp_str)
print print_str
self.f = open(self.LOG_PATH,'a')
self.f.write(print_str)
self.f.write('\n')
self.f.close()
np.save(self.ITER_PATH,self.iter)
self.h = np.mod(self.h+1,self.H) # roll through history
self.cnt = self.cnt+1
self.iter = self.iter+1
def backward(self,top,propagate_down,bottom):
for ll in range(self.L):
continue
# ***********************************
# ***** PARSE LOSS LOG WRAPPERS *****
# ***********************************
def group_iter_losses(base_dirs,sets,LOSS_ROOTDIR,base_names=-1,set_names=-1,\
return_min_max=False,min_maxes=1,mask_max=True):
'''
INPUTS
base_dirs base subdirectory to search for loss logs in
sets subsubdirectory to find loss log
LOSS_ROOTDIR rootdir to attach to all base_dirs
base_names [base_dirs] base names to populate dictionary with
set_names [set_names] set names to populate dictionary with
return_min_maxs boolean whether or not to return min/max of dataset
min_maxs array of 0/1, 0 for min, 1 for max
OUTPUTS
(iters,losses)
'''
base_dirs = np.array(base_dirs)
sets = np.array(sets)
B = base_dirs.size
if(rz.check_value(base_names,-1)):
base_names = base_dirs
if(rz.check_value(set_names,-1)):
set_names = sets
min_maxes = rz.scalar_to_array(B,min_maxes)
iters = {}; losses = {}
if(return_min_max):
ret_min_maxes = {}
# if(return_max_iter):
# max_iter = {}
for (bb,base) in enumerate(base_dirs):
base_name = base_names[bb]
loss_paths = []; names = []
for (ss,set) in enumerate(sets):
set_name = set_names[ss]
loss_paths.append('%s/%s/loss_log'%(base,set))
if(return_min_max):
(iters[base_name],losses[base_name],ret_min_maxes[base_name])\
= parse_loss_logs(set_names,loss_paths,LOSS_ROOTDIR,return_min_max=True,min_maxes=min_maxes,mask_max=mask_max)
else:
(iters[base_name],losses[base_name])\
= parse_loss_logs(set_names,loss_paths,LOSS_ROOTDIR,return_min_max=False,mask_max=mask_max)
# rets = []
# if(return_min_max):
# rets.append(ret_min_maxes)
# if(return_max_iter):
# rets.append(max_iter)
# if(return_min_max or return_max_iter):
# return (iters,losses,rets)
if(return_min_max):
return (iters,losses,ret_min_maxes)
else:
return (iters,losses)
def parse_loss_logs(names,LOSS_LOG_PATHS,rootdir='',iter_norm_factor=1000,\
return_min_max=False,min_maxes=1,mask_max=True):
''' grab multiple loss_logs'''
LOSS_LOG_PATHS = np.array(LOSS_LOG_PATHS)
names = np.array(names)
N = names.size
min_maxes = rz.scalar_to_array(N,min_maxes)
iters = {}
losses = {}
if(return_min_max):
ret_min_maxes = {}
for (nn,name) in enumerate(names):
LOSS_LOG_PATH = os.path.join(rootdir,LOSS_LOG_PATHS[nn])
if(return_min_max):
(iters[name],losses[name],ret_min_maxes[name]) = parse_loss_log(LOSS_LOG_PATH,iter_norm_factor=iter_norm_factor,\
return_min_max=True,min_max=min_maxes[nn],mask_max=mask_max)
else:
(iters[name],losses[name]) = parse_loss_log(LOSS_LOG_PATH,iter_norm_factor=iter_norm_factor,\
return_min_max=False,mask_max=mask_max)
if(return_min_max):
return (iters,losses,ret_min_maxes)
else:
return (iters,losses)
def parse_loss_log(LOSS_LOG_PATH,iter_norm_factor=1000,\
return_min_max=False,min_max=1,mask_max=True):
if(os.path.exists(LOSS_LOG_PATH)):
f = open(LOSS_LOG_PATH,'r')
cnt = 0
cur_line = f.readline()
recs = []
while(cur_line!=''):
# print cur_line
cur_line_split = cur_line.split(',')
L = len(cur_line_split)-1
NL = L-2
cur_rec = np.zeros((L,))
for (cc,part) in enumerate(cur_line_split[:-1]):
cur_rec[cc] = float(part.split(' ')[-1])
recs.append(cur_rec)
cur_line = f.readline()
recs = np.array(recs)
f.close()
if(mask_max):
mask = recs[:,1]==np.max(recs[:,1])
else:
mask = np.zeros(recs[:,1].size,dtype=bool)+True
recs = recs[mask]
recs = np.array(recs)
iters = recs[:,0]
Navg = recs[:,1]
losses = recs[:,2:]
if(return_min_max):
if(min_max==0):
ret_min_max = np.min(losses,axis=0)
elif(min_max==1):
ret_min_max = np.max(losses,axis=0)
# print ret_min_max
return (iters/iter_norm_factor,losses,ret_min_max)
else:
return (iters/iter_norm_factor,losses)
else:
if(return_min_max):
return (np.zeros((1,1)),np.zeros((1,1)),np.zeros((1,1)))
else:
return (np.zeros((1,1)),np.zeros((1,1)))
def cmap_to_color(cmap,bb,B):
return cmap(1.*bb/(B))
def plot_losses(ax,iters,losses,base_names,set_names,\
cmap=plt.cm.hsv_r,set_lines='-',inds=0,mults=1,toNorm=False):
B = base_names.size
base_names = np.array(base_names).flatten()
set_names = np.array(set_names).flatten()
inds = rz.scalar_to_array(B,inds)
mults = rz.scalar_to_array(B,mults)
for (bb,base_name) in enumerate(base_names):
for (ss,set_name) in enumerate(set_names):
ax.plot(iters[base_name][set_name],mults[bb]*losses[base_name][set_name][:,inds[bb]],\
set_lines[ss],color=cmap_to_color(cmap,bb,B),\
linewidth=2,label='%s-%s'%(base_name,set_name),toNorm=toNorm)
def plot_losses_single(ax,iters,losses,set_names,\
cmap=plt.cm.hsv_r,set_lines='-',chars='',toNorm=False):
for (ss,set_name) in enumerate(set_names):
I = losses[set_name].shape[1]
chars_use = rz.scalar_to_array(I,chars)
for ii in range(I):
if(toNorm):
plot_vals = losses[set_name][:,ii]/(losses[set_name][-1,ii])
else:
plot_vals = losses[set_name][:,ii]
ax.plot(iters[set_name],plot_vals,\
set_lines[ss],color=cmap_to_color(cmap,ii,I),\
linewidth=2,label='[%i]-%s-%s'%(ii,set_name,chars_use[ii]))
class GradientMagnitudeMeterLayer(caffe.Layer):
''' Layer which acts as a "meter" to measure gradient magnitude '''
def setup(self,bottom,top):
if(len(bottom)==0):
raise Exception("Layer needs inputs")
self.cnt = 0 # iteration counter
self.I = 10 # interval of iterations to keep track
self.pp = 0
# self.P = 1 # interval to print gradient magnitudes
self.P = 10 # interval to print gradient magnitudes
# self.P = 100 # interval to print gradient magnitudes
self.h = 0 # index into history
# self.H = 100 # history size
self.H = 10 # history size
self.H_reached = False
self.L = len(bottom)
self.Ns = np.zeros((self.L,),dtype=int)
self.Cs = np.zeros((self.L,),dtype=int)
self.Xs = np.zeros((self.L,),dtype=int)
self.Ys = np.zeros((self.L,),dtype=int)
self.mags = np.zeros((self.L,self.H))
self.LOG_PATH = './grad_log'
def reshape(self,bottom,top):
# print self.L
for ll in range(self.L):
self.Ns[ll] = bottom[ll].data.shape[0]
self.Cs[ll] = bottom[ll].data.shape[1]
self.Xs[ll] = bottom[ll].data.shape[2]
self.Ys[ll] = bottom[ll].data.shape[3]
top[ll].reshape(self.Ns[ll],self.Cs[ll],self.Xs[ll],self.Ys[ll])
# for ll in range(self.L):
def forward(self,bottom,top):
for ll in range(self.L):
top[ll].data[...] = bottom[ll].data[...] # copy data through
def backward(self,top,propagate_down,bottom):
for ll in range(self.L):
if not propagate_down[ll]:
continue
bottom[ll].diff[...] = top[ll].diff[...] # copy diff through
if(np.mod(self.cnt,self.I)==0): # every Ith iteration, record
self.mags[ll,self.h] = np.linalg.norm(bottom[ll].diff[...])/self.Ns[ll]
# if(self.pp==0):
# if(self.H_reached==True): # average whole history
# print('GradMag %i/%i (%i): %.3f'%(ll,self.L,self.H,np.mean(self.mags[ll,:])))
# else: # haven't built whole history yet
# print('GradMag %i/%i (%i): %.3f'%(ll,self.L,self.h,np.mean(self.mags[ll,:self.h])))
# self.pp = np.mod(self.pp+1,self.P)
if(np.mod(self.cnt,self.I)==0): # every Ith iteration, record
if(self.pp==0):
if(self.H_reached==True): # average whole history
tmp_str = '(%i)'%self.H
for ll in range(self.L):
tmp_str += ' / %.3f'%(np.mean(self.mags[ll,:]))
else: # haven't built whole history yet
tmp_str = '(%i)'%self.h
for ll in range(self.L):
tmp_str += ' / %.3f'%(np.mean(self.mags[ll,:self.h+1]))
print_str = 'GradMag: %s'%tmp_str
print print_str
self.f = open(self.LOG_PATH,'a')
self.f.write(print_str)
self.f.write('\n')
self.f.close()
self.pp = np.mod(self.pp+1,self.P)
if((self.H_reached==False) and (self.h==self.H-1)):
self.H_reached = True
self.h = np.mod(self.h+1,self.H)
self.cnt = self.cnt+1
class ManhattanLossLayer(caffe.Layer):
''' Layer which computes L1 loss '''
def setup(self,bottom,top):
if(len(bottom)!=2):
raise Exception("Layer inputs != 2 (len(bottom)!=2)")
self.N = bottom[0].data.shape[0]
self.P = np.prod(np.array(bottom[0].data.shape[1:]))
# self.C = bottom[0].data.shape[1]
# self.X = bottom[0].data.shape[2]
# self.Y = bottom[0].data.shape[3]
# self.P = self.N*self.X*self.Y
def reshape(self, bottom, top):
top[0].reshape(1) # single loss value
def forward(self, bottom, top):
top[0].data[...] = np.sum(np.abs(bottom[1].data[...]-bottom[0].data[...]))/(self.N*self.P)
def backward(self, top, propagate_down, bottom):
sign_diff = np.sign(bottom[1].data[...]-bottom[0].data[...])
# no back-prop
for i in range(len(bottom)):
if not propagate_down[i]:
continue
if(i==0):
bottom[i].diff[...] = -1.*sign_diff/(self.N*self.P)
else:
bottom[i].diff[...] = 1.*sign_diff/(self.N*self.P)
class NNEnc2Layer(caffe.Layer):
''' Layer which encodes ab map into Q colors
INPUTS
bottom[0] Nx2xXxY
OUTPUTS
top[0].data NxQ
'''
def setup(self,bottom,top):
warnings.filterwarnings("ignore")
if len(bottom) == 0:
raise Exception("Layer should have inputs")
self.NN = 9 # this is hard-coded into the forward
# self.NN = 1 # this is hard-coded into the forward
self.sigma = 5.
self.ENC_DIR = './data/color_bins'
# self.nnenc = NNEncode(self.NN,self.sigma,km_filepath=os.path.join(self.ENC_DIR,'pts_in_hull.npy'))
self.pts_in_hull = np.load(os.path.join(self.ENC_DIR,'pts_in_hull.npy'))
self.prior_probs = np.load(os.path.join(self.ENC_DIR,'prior_probs.npy'))
self.ENC_DIR = './data/color_bins'
self.pts_in_hull = np.load(os.path.join(self.ENC_DIR,'pts_in_hull.npy'))
self.pts_grid = np.load(os.path.join(self.ENC_DIR,'pts_grid.npy'))
self.prior_probs = np.load(os.path.join(self.ENC_DIR,'prior_probs.npy'))
self.prior_probs_full = np.load(os.path.join(self.ENC_DIR,'prior_probs_full.npy'))
self.in_hull = np.load(os.path.join(self.ENC_DIR,'in_hull.npy'))
self.full_to_hull = np.cumsum(self.in_hull)-1
self.min_pt = np.min(self.pts_grid)
self.spacing = np.sort(np.unique(self.pts_grid))
self.spacing = self.spacing[1] - self.spacing[0]
self.S = np.sqrt(self.pts_grid.shape[0])
self.Q = self.pts_in_hull.shape[0]
self.N = bottom[0].data.shape[0]
self.X = bottom[0].data.shape[2]
self.Y = bottom[0].data.shape[3]
self.P = self.N*self.X*self.Y
self.dists_sq = np.zeros((self.P,self.NN))
self.inds = np.zeros((self.P,self.NN),dtype='int')
self.ab_enc_flt = np.zeros((self.P,self.Q))
self.inds_P = np.arange(0,self.P,dtype='int')[:,rz.na()]
self.ab_enc_flt_hard = np.zeros((self.P,self.Q))
if(len(top)==1):
self.HARD_ENC = False
else:
self.HARD_ENC = True
def reshape(self, bottom, top):
top[0].reshape(self.N,self.Q,self.X,self.Y) # soft encoding
if(self.HARD_ENC):
top[1].reshape(self.N,self.Q,self.X,self.Y) # hard encoding
def forward(self, bottom, top):
# print 'hello'
self.ab_enc_flt[...] = 0
# soft encoding
ab_val = bottom[0].data[...]
ab_val_flt = rz.flatten_nd_array(ab_val,axis=1)
ab_enc_sub = np.round((ab_val-self.min_pt)/self.spacing)
ab_enc_sub = np.clip(ab_enc_sub,1,self.S-1) # force points into margin
# ab_enc_sub_flt = rz.flatten_nd_array(ab_enc_sub,axis=1)
# inds_map = self.full_to_hull[rz.sub2ind2(ab_enc_sub_flt,np.array((self.S,self.S)))]
t = rz.Timer()
cnt = 0
for aa in np.array((0,-1,1)): # hard-coded to find 9-NN
for bb in np.array((0,-1,1)):
# for aa in np.array((0,)): # hard-coded to find 1-NN
# for bb in np.array((0,)):
tmp = ab_enc_sub.copy()
tmp[:,0,:,:] = tmp[:,0,:,:]+aa
tmp[:,1,:,:] = tmp[:,1,:,:]+bb
ab_enc_sub_flt = rz.flatten_nd_array(tmp,axis=1)
inds_hull = self.full_to_hull[rz.sub2ind2(ab_enc_sub_flt,np.array((self.S,self.S)))]
self.dists_sq[:,cnt] = np.sum((ab_val_flt-self.pts_in_hull[inds_hull,:])**2,axis=1)
self.inds[:,cnt] = inds_hull
cnt = cnt+1
# print t.tocStr()
wts = np.exp(-self.dists_sq/(2*self.sigma**2))
# print t.tocStr()
wts = wts/np.sum(wts,axis=1)[:,rz.na()]
# print t.tocStr()
self.ab_enc_flt[self.inds_P,self.inds] = wts
# print t.tocStr()
top[0].data[...] = rz.unflatten_2d_array(self.ab_enc_flt,ab_val,axis=1)
# print t.tocStr()
# hard encoding
if(self.HARD_ENC):
self.ab_enc_flt_hard[self.inds_P,self.inds[:,[0]]] = 1
# print t.tocStr()
top[1].data[...] = rz.unflatten_2d_array(self.ab_enc_flt_hard,ab_val,axis=1)
# print t.tocStr()
self.ab_enc_flt_hard[self.inds_P,self.inds[:,[0]]] = 0
# print t.tocStr()
def backward(self, top, propagate_down, bottom):
# no back-prop
for i in range(len(bottom)):
if not propagate_down[i]:
continue
bottom[i].diff[...] = np.zeros_like(bottom[i].data)
class NNEnc1HotLayer(caffe.Layer):
''' Layer which encodes ab map into Q colors
INPUTS
bottom[0] Nx2xXxY
OUTPUTS
top[0].data NxQ
'''
def setup(self,bottom,top):
warnings.filterwarnings("ignore")
if len(bottom) == 0:
raise Exception("Layer should have inputs")
self.NN = 1 # this is hard-coded into the forward
self.sigma = 5.
self.ENC_DIR = './data/color_bins'
# self.nnenc = NNEncode(self.NN,self.sigma,km_filepath=os.path.join(self.ENC_DIR,'pts_in_hull.npy'))
self.pts_in_hull = np.load(os.path.join(self.ENC_DIR,'pts_in_hull.npy'))
self.prior_probs = np.load(os.path.join(self.ENC_DIR,'prior_probs.npy'))
self.ENC_DIR = './data/color_bins'
self.pts_in_hull = np.load(os.path.join(self.ENC_DIR,'pts_in_hull.npy'))
self.pts_grid = np.load(os.path.join(self.ENC_DIR,'pts_grid.npy'))
self.prior_probs = np.load(os.path.join(self.ENC_DIR,'prior_probs.npy'))
self.prior_probs_full = np.load(os.path.join(self.ENC_DIR,'prior_probs_full.npy'))
self.in_hull = np.load(os.path.join(self.ENC_DIR,'in_hull.npy'))
self.full_to_hull = np.cumsum(self.in_hull)-1
self.min_pt = np.min(self.pts_grid)
self.spacing = np.sort(np.unique(self.pts_grid))
self.spacing = self.spacing[1] - self.spacing[0]
self.S = np.sqrt(self.pts_grid.shape[0])
self.Q = self.pts_in_hull.shape[0]
def reshape(self, bottom, top):
self.N = bottom[0].data.shape[0]
self.X = bottom[0].data.shape[2]
self.Y = bottom[0].data.shape[3]
self.P = self.N*self.X*self.Y
self.dists_sq = np.zeros((self.P,self.NN))
self.inds = np.zeros((self.P,self.NN),dtype='int')
self.ab_enc_flt = np.zeros((self.P,self.Q))
self.inds_P = np.arange(0,self.P,dtype='int')[:,rz.na()]
self.ab_enc_flt_hard = np.zeros((self.P,self.Q))
top[0].reshape(self.N,self.Q,self.X,self.Y) # hard encoding
def forward(self, bottom, top):
self.ab_enc_flt[...] = 0
# soft encoding
ab_val = bottom[0].data[...]
ab_val_flt = rz.flatten_nd_array(ab_val,axis=1)
ab_enc_sub = np.round((ab_val-self.min_pt)/self.spacing)
ab_enc_sub = np.clip(ab_enc_sub,1,self.S-1) # force points into margin
t = rz.Timer()
cnt = 0
for aa in np.array((0,)): # hard-coded to find 9-NN
for bb in np.array((0,)):
tmp = ab_enc_sub.copy()
tmp[:,0,:,:] = tmp[:,0,:,:]+aa
tmp[:,1,:,:] = tmp[:,1,:,:]+bb
ab_enc_sub_flt = rz.flatten_nd_array(tmp,axis=1)
inds_hull = self.full_to_hull[rz.sub2ind2(ab_enc_sub_flt,np.array((self.S,self.S)))]
self.dists_sq[:,cnt] = np.sum((ab_val_flt-self.pts_in_hull[inds_hull,:])**2,axis=1)
self.inds[:,cnt] = inds_hull
cnt = cnt+1
# print t.tocStr()
wts = np.exp(-self.dists_sq/(2*self.sigma**2))
# print t.tocStr()
wts = wts/np.sum(wts,axis=1)[:,rz.na()]
# print t.tocStr()
# print np.sum(wts)
self.ab_enc_flt[self.inds_P,self.inds] = wts
# print t.tocStr()
# top[0].data[...] = rz.unflatten_2d_array(self.ab_enc_flt,ab_val,axis=1)
# print t.tocStr()
# hard encoding
# if(self.HARD_ENC):
self.ab_enc_flt_hard[self.inds_P,self.inds[:,[0]]] = 1
# print t.tocStr()
top[0].data[...] = rz.unflatten_2d_array(self.ab_enc_flt_hard, ab_val, axis=1)
# print t.tocStr()
# self.ab_enc_flt_hard[self.inds_P,self.inds[:,[0]]] = 0
# print t.tocStr()
def backward(self, top, propagate_down, bottom):
# no back-prop
for i in range(len(bottom)):
if not propagate_down[i]:
continue
bottom[i].diff[...] = np.zeros_like(bottom[i].data)
# ************************
# ***** CAFFE LAYERS *****
# ************************
class BGR2HSVLayer(caffe.Layer):
''' Layer converts BGR to HSV
INPUTS
bottom[0] Nx3xXxY
OUTPUTS
top[0].data Nx3xXxY
'''
def setup(self,bottom, top):
warnings.filterwarnings("ignore")
if(len(bottom)!=1):
raise Exception("Layer should a single input")
if(bottom[0].data.shape[1]!=3):
raise Exception("Input should be 3-channel BGR image")
self.N = bottom[0].data.shape[0]
self.X = bottom[0].data.shape[2]
self.Y = bottom[0].data.shape[3]
def reshape(self, bottom, top):
top[0].reshape(self.N,3,self.X,self.Y)
def forward(self, bottom, top):
for nn in range(self.N):
top[0].data[nn,:,:,:] = color.rgb2hsv(bottom[0].data[nn,::-1,:,:].astype('uint8').transpose((1,2,0))).transpose((2,0,1))
def backward(self, top, propagate_down, bottom):
# no back-prop
for i in range(len(bottom)):
if not propagate_down[i]:
continue
# bottom[i].diff[...] = np.zeros_like(bottom[i].data)
class BGR2LabLayer(caffe.Layer):
''' Layer converts BGR to Lab
INPUTS
bottom[0] Nx3xXxY
OUTPUTS
top[0].data Nx3xXxY
'''
def setup(self,bottom, top):
warnings.filterwarnings("ignore")
if(len(bottom)!=1):
raise Exception("Layer should a single input")
if(bottom[0].data.shape[1]!=3):
raise Exception("Input should be 3-channel BGR image")
self.N = bottom[0].data.shape[0]
self.X = bottom[0].data.shape[2]
self.Y = bottom[0].data.shape[3]
def reshape(self, bottom, top):
top[0].reshape(self.N,3,self.X,self.Y)
def forward(self, bottom, top):
top[0].data[...] = color.rgb2lab(bottom[0].data[:,::-1,:,:].astype('uint8').transpose((2,3,0,1))).transpose((2,3,0,1))
def backward(self, top, propagate_down, bottom):
# no back-prop
for i in range(len(bottom)):
if not propagate_down[i]:
continue
# bottom[i].diff[...] = np.zeros_like(bottom[i].data)
class EncLayer(caffe.Layer):
''' Layer which does hard quantization into bins
INPUTS
bottom[0] Nx1xXxY
OUTPUTS
top[0].data NxQ
'''
def setup(self,bottom, top):
warnings.filterwarnings("ignore")
if len(bottom) == 0:
raise Exception("Layer should have inputs")
self.param_str_split = self.param_str.split(' ')
self.min = float(self.param_str_split[0])
self.max = float(self.param_str_split[1])
self.inc = float(self.param_str_split[2])
self.N = bottom[0].data.shape[0]
self.X = bottom[0].data.shape[2]
self.Y = bottom[0].data.shape[3]
def reshape(self, bottom, top):
top[0].reshape(self.N,1,self.X,self.Y)
def forward(self, bottom, top):
top[0].data[...] = (bottom[0].data[...]-self.min)/self.inc
def backward(self, top, propagate_down, bottom):
# no back-prop
for i in range(len(bottom)):
if not propagate_down[i]:
continue
bottom[i].diff[...] = np.zeros_like(bottom[i].data)
class NNEncLayer(caffe.Layer):
''' Layer which encodes ab map into Q colors
INPUTS
bottom[0] Nx2xXxY
OUTPUTS
top[0].data NxQ
'''
def setup(self,bottom,top):
warnings.filterwarnings("ignore")
if len(bottom) == 0:
raise Exception("Layer should have inputs")
self.NN = 10.
self.sigma = 5.
self.ENC_DIR = './data/color_bins'
self.nnenc = NNEncode(self.NN,self.sigma,km_filepath=os.path.join(self.ENC_DIR,'pts_in_hull.npy'))
self.HARD_FLAG = False
if(len(top)==2):
self.nnenc2 = NNEncode(1,self.sigma,km_filepath=os.path.join(self.ENC_DIR,'pts_in_hull.npy'))
self.HARD_FLAG = True
self.N = bottom[0].data.shape[0]
self.X = bottom[0].data.shape[2]
self.Y = bottom[0].data.shape[3]
self.Q = self.nnenc.K
def reshape(self, bottom, top):
self.N = bottom[0].data.shape[0]
self.X = bottom[0].data.shape[2]
self.Y = bottom[0].data.shape[3]
self.Q = self.nnenc.K
top[0].reshape(self.N,self.Q,self.X,self.Y)
if(self.HARD_FLAG):
top[1].reshape(self.N,self.Q,self.X,self.Y)
def forward(self, bottom, top):
# print bottom[0].data.shape
# top[0].data[...] = self.nnenc.encode_points_mtx_nd(bottom[0].data[...],axis=1)
if(self.HARD_FLAG):
top[1].data[...] = self.nnenc2.encode_points_mtx_nd(bottom[0].data[...],axis=1)
def backward(self, top, propagate_down, bottom):
# no back-prop
for i in range(len(bottom)):
if not propagate_down[i]:
continue
bottom[i].diff[...] = np.zeros_like(bottom[i].data)
class PriorBoostLayer(caffe.Layer):
''' Layer boosts ab values based on their rarity
INPUTS
bottom[0] NxQxXxY
OUTPUTS
top[0].data Nx1xXxY
'''
def setup(self,bottom, top):
if len(bottom) == 0:
raise Exception("Layer should have inputs")
self.ENC_DIR = './data/color_bins'
self.gamma = .5
self.alpha = 1.
self.pc = PriorFactor(self.alpha,gamma=self.gamma,priorFile=os.path.join(self.ENC_DIR,'prior_probs.npy'))
self.N = bottom[0].data.shape[0]
self.Q = bottom[0].data.shape[1]
self.X = bottom[0].data.shape[2]
self.Y = bottom[0].data.shape[3]
def reshape(self, bottom, top):
top[0].reshape(self.N,1,self.X,self.Y)
def forward(self, bottom, top):
top[0].data[...] = self.pc.forward(bottom[0].data[...],axis=1)
def backward(self, top, propagate_down, bottom):
# no back-prop
for i in range(len(bottom)):
if not propagate_down[i]:
continue
bottom[i].diff[...] = np.zeros_like(bottom[i].data)
class NonGrayMaskLayer(caffe.Layer):
''' Layer outputs a mask based on if the image is grayscale or not
INPUTS
bottom[0] Nx2xXxY ab values
OUTPUTS
top[0].data Nx1xXxY 1 if image is NOT grayscale
0 if image is grayscale
'''
def setup(self,bottom, top):
if len(bottom) == 0:
raise Exception("Layer should have inputs")
self.thresh = 5 # threshold on ab value
self.N = bottom[0].data.shape[0]
self.X = bottom[0].data.shape[2]
self.Y = bottom[0].data.shape[3]
def reshape(self, bottom, top):
top[0].reshape(self.N,1,self.X,self.Y)
def forward(self, bottom, top):
# if an image has any (a,b) value which exceeds threshold, output 1
top[0].data[...] = (np.sum(np.sum(np.sum(np.abs(bottom[0].data) > self.thresh,axis=1),axis=1),axis=1) > 0)[:,na(),na(),na()]
def backward(self, top, propagate_down, bottom):
# no back-prop
for i in range(len(bottom)):
if not propagate_down[i]:
continue
bottom[i].diff[...] = np.zeros_like(bottom[i].data)
class ClassRebalanceMultLayer(caffe.Layer):
# '''
# INPUTS
# bottom[0] NxMxXxY feature map
# bottom[1] Nx1xXxY boost coefficients
# OUTPUTS
# top[0] NxMxXxY on forward, gets copied from bottom[0]
# FUNCTIONALITY
# On forward pass, top[0] passes bottom[0]
# On backward pass, bottom[0] gets boosted by bottom[1]
# through pointwise multiplication (with singleton expansion) '''
def setup(self, bottom, top):
# check input pair
if len(bottom)==0:
raise Exception("Specify inputs")
def reshape(self, bottom, top):
i = 0
if(bottom[i].data.ndim==1):
top[i].reshape(bottom[i].data.shape[0])
elif(bottom[i].data.ndim==2):
top[i].reshape(bottom[i].data.shape[0], bottom[i].data.shape[1])
elif(bottom[i].data.ndim==4):
top[i].reshape(bottom[i].data.shape[0], bottom[i].data.shape[1], bottom[i].data.shape[2], bottom[i].data.shape[3])
def forward(self, bottom, top):
# output equation to negative of inputs
top[0].data[...] = bottom[0].data[...]
# top[0].data[...] = bottom[0].data[...]*bottom[1].data[...] # this was bad, would mess up the gradients going up
def backward(self, top, propagate_down, bottom):
for i in range(len(bottom)):
if not propagate_down[i]:
continue
bottom[0].diff[...] = top[0].diff[...]*bottom[1].data[...]
# print 'Back-propagating class rebalance, %i'%i
# ***************************
# ***** SUPPORT CLASSES *****
# ***************************
class PriorFactor():
''' Class handles prior factor '''
# def __init__(self,alpha,gamma=0,verbose=True,priorFile='/home/eecs/rich.zhang/src/projects/cross_domain/save/ab_grid_10/prior_probs.npy',genc=-1):
def __init__(self,alpha,gamma=0,verbose=True,priorFile=''):
# INPUTS
# alpha integer prior correction factor, 0 to ignore prior, 1 to divide by prior, alpha to divide by prior^alpha power
# gamma integer percentage to mix in prior probability
# priorFile file file which contains prior probabilities across classes
# settings
self.alpha = alpha
self.gamma = gamma
self.verbose = verbose
# empirical prior probability
self.prior_probs = np.load(priorFile)
# define uniform probability
self.uni_probs = np.zeros_like(self.prior_probs)
self.uni_probs[self.prior_probs!=0] = 1.
self.uni_probs = self.uni_probs/np.sum(self.uni_probs)
# convex combination of empirical prior and uniform distribution
self.prior_mix = (1-self.gamma)*self.prior_probs + self.gamma*self.uni_probs
# set prior factor
self.prior_factor = self.prior_mix**-self.alpha
self.prior_factor = self.prior_factor/np.sum(self.prior_probs*self.prior_factor) # re-normalize
# implied empirical prior
self.implied_prior = self.prior_probs*self.prior_factor
self.implied_prior = self.implied_prior/np.sum(self.implied_prior) # re-normalize
# add this to the softmax score
# self.softmax_correction = np.log(self.prior_probs/self.implied_prior * (1-self.implied_prior)/(1-self.prior_probs))
if(self.verbose):
self.print_correction_stats()
# if(not check_value(genc,-1)):
# self.expand_grid(genc)
# def expand_grid(self,genc):
# self.prior_probs_full_grid = genc.enc_full_grid_mtx_nd(self.prior_probs,axis=0,returnGrid=True)
# self.uni_probs_full_grid = genc.enc_full_grid_mtx_nd(self.uni_probs,axis=0,returnGrid=True)
# self.prior_mix_full_grid = genc.enc_full_grid_mtx_nd(self.prior_mix,axis=0,returnGrid=True)
# self.prior_factor_full_grid = genc.enc_full_grid_mtx_nd(self.prior_factor,axis=0,returnGrid=True)
# self.implied_prior_full_grid = genc.enc_full_grid_mtx_nd(self.implied_prior,axis=0,returnGrid=True)
# self.softmax_correction_full_grid = genc.enc_full_grid_mtx_nd(self.softmax_correction,axis=0,returnGrid=True)
def print_correction_stats(self):
print 'Prior factor correction:'
print ' (alpha,gamma) = (%.2f, %.2f)'%(self.alpha,self.gamma)
print ' (min,max,mean,med,exp) = (%.2f, %.2f, %.2f, %.2f, %.2f)'%(np.min(self.prior_factor),np.max(self.prior_factor),np.mean(self.prior_factor),np.median(self.prior_factor),np.sum(self.prior_factor*self.prior_probs))
def forward(self,data_ab_quant,axis=1):
# data_ab_quant = net.blobs['data_ab_quant_map_233'].data[...]
data_ab_maxind = np.argmax(data_ab_quant,axis=axis)
corr_factor = self.prior_factor[data_ab_maxind]
if(axis==0):
return corr_factor[na(),:]
elif(axis==1):
return corr_factor[:,na(),:]
elif(axis==2):
return corr_factor[:,:,na(),:]
elif(axis==3):
return corr_factor[:,:,:,na()]
class NNEncode():
''' Encode points using NN search and Gaussian kernel '''
def __init__(self,NN,sigma,km_filepath='',cc=-1):
if(check_value(cc,-1)):
self.cc = np.load(km_filepath)
else:
self.cc = cc
self.K = self.cc.shape[0]
# self.NN = NN
self.NN = int(NN)
self.sigma = sigma
self.nbrs = nn.NearestNeighbors(n_neighbors=NN, algorithm='ball_tree').fit(self.cc)
self.alreadyUsed = False
def encode_points_mtx_nd(self,pts_nd,axis=1,returnSparse=False,sameBlock=True):
t = rz.Timer();
pts_flt = flatten_nd_array(pts_nd,axis=axis)
P = pts_flt.shape[0]
if(sameBlock and self.alreadyUsed):
self.pts_enc_flt[...] = 0 # already pre-allocated
else:
self.alreadyUsed = True
self.pts_enc_flt = np.zeros((P,self.K))
self.p_inds = np.arange(0,P,dtype='int')[:,na()]
P = pts_flt.shape[0]
(dists,inds) = self.nbrs.kneighbors(pts_flt)
wts = np.exp(-dists**2/(2*self.sigma**2))
wts = wts/np.sum(wts,axis=1)[:,na()]
self.pts_enc_flt[self.p_inds,inds] = wts
pts_enc_nd = unflatten_2d_array(self.pts_enc_flt,pts_nd,axis=axis)
return pts_enc_nd
def decode_points_mtx_nd(self,pts_enc_nd,axis=1):
pts_enc_flt = flatten_nd_array(pts_enc_nd,axis=axis)
pts_dec_flt = np.dot(pts_enc_flt,self.cc)
pts_dec_nd = unflatten_2d_array(pts_dec_flt,pts_enc_nd,axis=axis)
return pts_dec_nd
def decode_1hot_mtx_nd(self,pts_enc_nd,axis=1,returnEncode=False):
pts_1hot_nd = nd_argmax_1hot(pts_enc_nd,axis=axis)
pts_dec_nd = self.decode_points_mtx_nd(pts_1hot_nd,axis=axis)
if(returnEncode):
return (pts_dec_nd,pts_1hot_nd)
else:
return pts_dec_nd
# *****************************
# ***** Utility functions *****
# *****************************
def check_value(inds, val):
''' Check to see if an array is a single element equaling a particular value
for pre-processing inputs in a function '''
if(np.array(inds).size==1):
if(inds==val):
return True
return False
def na(): # shorthand for new axis
return np.newaxis
def flatten_nd_array(pts_nd,axis=1):
''' Flatten an nd array into a 2d array with a certain axis
INPUTS
pts_nd N0xN1x...xNd array
axis integer
OUTPUTS
pts_flt prod(N \ N_axis) x N_axis array '''
NDIM = pts_nd.ndim
SHP = np.array(pts_nd.shape)
nax = np.setdiff1d(np.arange(0,NDIM),np.array((axis))) # non axis indices
NPTS = np.prod(SHP[nax])
axorder = np.concatenate((nax,np.array(axis).flatten()),axis=0)
pts_flt = pts_nd.transpose((axorder))
pts_flt = pts_flt.reshape(NPTS,SHP[axis])
return pts_flt
def unflatten_2d_array(pts_flt,pts_nd,axis=1,squeeze=False):
''' Unflatten a 2d array with a certain axis
INPUTS
pts_flt prod(N \ N_axis) x M array
pts_nd N0xN1x...xNd array
axis integer
squeeze bool if true, M=1, squeeze it out
OUTPUTS
pts_out N0xN1x...xNd array '''
NDIM = pts_nd.ndim
SHP = np.array(pts_nd.shape)
nax = np.setdiff1d(np.arange(0,NDIM),np.array((axis))) # non axis indices
NPTS = np.prod(SHP[nax])
if(squeeze):
axorder = nax
axorder_rev = np.argsort(axorder)
M = pts_flt.shape[1]
NEW_SHP = SHP[nax].tolist()
# print NEW_SHP
# print pts_flt.shape
pts_out = pts_flt.reshape(NEW_SHP)
pts_out = pts_out.transpose(axorder_rev)
else:
axorder = np.concatenate((nax,np.array(axis).flatten()),axis=0)
axorder_rev = np.argsort(axorder)
M = pts_flt.shape[1]
NEW_SHP = SHP[nax].tolist()
NEW_SHP.append(M)
pts_out = pts_flt.reshape(NEW_SHP)
pts_out = pts_out.transpose(axorder_rev)
return pts_out | 37.931319 | 226 | 0.573405 | 44,205 | 0.800409 | 0 | 0 | 0 | 0 | 0 | 0 | 14,221 | 0.257496 |
a8e1dced7a604f8c2b62fcf91f75b95634a99ffe | 22,631 | py | Python | services/python/app/lib/parsers/EmailParser.py | ace-ecosystem/eventsentry | 79cb67245f9c5bd49118d23e20764ee8feba8660 | [
"Apache-2.0"
] | null | null | null | services/python/app/lib/parsers/EmailParser.py | ace-ecosystem/eventsentry | 79cb67245f9c5bd49118d23e20764ee8feba8660 | [
"Apache-2.0"
] | null | null | null | services/python/app/lib/parsers/EmailParser.py | ace-ecosystem/eventsentry | 79cb67245f9c5bd49118d23e20764ee8feba8660 | [
"Apache-2.0"
] | null | null | null | import base64
import dateutil.parser
import email
import hashlib
import logging
import os
import re
from dateutil import tz
from email.header import decode_header, make_header
from urlfinderlib import find_urls
from lib import RegexHelpers
from lib.config import config
from lib.constants import HOME_DIR
from lib.indicator import Indicator
from lib.indicator import make_url_indicators
class EmailParser():
def __init__(self, smtp_path, whitelist):
# Initiate logging.
self.logger = logging.getLogger()
# Save the whitelist.
self.whitelist = whitelist
# Items we parse out of the email.
self.ace_url = ''
self.attachments = []
self.body = ''
self.cc_addresses = []
self.envelope_from = ''
self.envelope_to = ''
self.from_address = ''
self.headers = ''
self.html = ''
self.indicators = []
self.message_id = ''
self.original_recipient = ''
self.path = smtp_path
self.received = ''
self.received_time = ''
self.remediated = False
self.reply_to = ''
self.return_path = ''
self.screenshots = []
self.subject = ''
self.subject_decoded = ''
self.to_addresses = []
self.urls = []
self.x_auth_id = ''
self.x_mailer = ''
self.x_original_sender = ''
self.x_originating_ip = ''
self.x_sender = ''
self.x_sender_id = ''
self.x_sender_ip = ''
# Build the URL to the ACE alert.
ace_uuid_pattern = re.compile(r'([a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12})')
match = ace_uuid_pattern.search(self.path)
if match:
self.ace_url = '{}{}'.format(config['ace']['ace_alert_url'], match.group(1))
with open(self.path, encoding='utf-8', errors='ignore') as s:
smtp_stream = s.read().splitlines()
# Locate any screenshots for this email.
email_dir = os.path.dirname(self.path)
files = os.listdir(email_dir)
for f in files:
if 'text_html' in f and f.endswith('.png') and not f.startswith('email_screenshot'):
self.logger.debug('Found email screenshot: {}'.format(os.path.join(email_dir, f)))
self.screenshots.append(os.path.join(email_dir, f))
# Find the envelope from/to addresses. This will only work if given an
# "smtp.stream" file, since otherwise the SMTP commands will not exist.
envelope_address_pattern = re.compile(r'.*<(.*)>.*')
for line in smtp_stream:
if line.startswith('MAIL FROM:'):
try:
self.envelope_from = envelope_address_pattern.match(line).group(1)
except:
self.logger.exception('Unable to parse envelope from.')
if line.startswith('RCPT TO:'):
try:
self.envelope_to = envelope_address_pattern.match(line).group(1)
except:
self.logger.exception('Unable to parse envelope to.')
# Just in case we are dealing with an "smtp.stream" file that still has
# the SMTP commands above the actual e-mail, we need to strip those out.
# This will remove all lines prior to the Received: headers so that the
# email.parser can properly parse out the e-mail. If we were given an
# "smtp.email" type of file with the SMTP commands already removed, this
# should not affect anything. This is legacy code at this point.
try:
while not smtp_stream[0].startswith('Received:'):
smtp_stream.pop(0)
except IndexError:
smtp_stream = []
# Join the header lines into a single string.
self.email_text = '\n'.join(smtp_stream)
# Create the e-mail object.
email_obj = email.message_from_string(self.email_text)
# We want to try and parse an embedded/attached e-mail if there is one.
# Walk the full e-mail's parts.
for part in email_obj.walk():
# Continue if the part looks like a valid e-mail.
if part.get_content_type() == 'message/rfc822':
# Split the part lines into a list.
part_text = str(part).splitlines()
if any('Received:' in line for line in part_text):
# Make sure our part starts with the Received: headers.
try:
while not part_text[0].startswith('Received:'):
part_text.pop(0)
except IndexError:
pass
part_text = '\n'.join(part_text)
# Make the new e-mail object.
email_obj = email.message_from_string(part_text)
# Parse the e-mail object for its content.
parsed_email = self._parse_content(email_obj)
# Now that we have the e-mail object, parse out some of the interesting parts.
self.headers = self._get_all_headers_string(email_obj)
self.received = self.get_header(email_obj, 'received')
# Get the e-mail's plaintext body, HTML body, and the visible text from the HTML.
self.body = parsed_email['body']
self.html = parsed_email['html']
# Get any e-mail attachments.
self.attachments = parsed_email['attachments']
# From address
try:
self.from_address = self._get_address_list(email_obj, 'from')[0][1]
self.indicators.append(Indicator('Email - Address', self.from_address, tags=['from_address']))
except:
pass
# From domain
try:
self.indicators.append(Indicator('URI - Domain Name', self.from_address.split('@')[1], tags=['from_domain']))
except:
pass
# Reply-To address
try:
self.reply_to = self._get_address_list(email_obj, 'reply-to')[0][1]
self.indicators.append(Indicator('Email - Address', self.reply_to, tags=['reply_to']))
except:
pass
# X-Sender address
try:
self.x_sender = self._get_address_list(email_obj, 'X-Sender')[0][1]
self.indicators.append(Indicator('Email - Address', self.x_sender, tags=['x_sender']))
except:
pass
# X-Sender-Id address
try:
self.x_sender_id = self._get_address_list(email_obj, 'X-Sender-Id')[0][1]
self.indicators.append(Indicator('Email - Address', self.x_sender_id, tags=['x_sender_id']))
except:
pass
# X-Auth-Id address
try:
self.x_auth_id = self._get_address_list(email_obj, 'X-Auth-ID')[0][1]
self.indicators.append(Indicator('Email - Address', self.x_auth_id, tags=['x_auth_id']))
except:
pass
# Return-Path address
try:
self.return_path = self._get_address_list(email_obj, 'return_path')[0][1]
self.indicators.append(Indicator('Email - Address', self.return_path, tags=['return_path']))
except:
pass
# X-MS-Exchange-Organization-OriginalEnvelopeRecipients address
try:
self.original_recipient = self._get_address_list(email_obj, 'X-MS-Exchange-Organization-OriginalEnvelopeRecipients')[0][1].lower()
self.indicators.append(Indicator('Email - Address', self.original_recipient, status='Informational', tags=['original_recipient']))
except:
pass
# If the original_recipient was not found, check if this is a POTENTIAL PHISH e-mail and use the from address.
if not self.original_recipient and 'Subject: [POTENTIAL PHISH]' in self.email_text:
try:
temp_email_obj = email.message_from_string(self.email_text)
self.original_recipient = self._get_address_list(temp_email_obj, 'from')[0][1]
self.indicators.append(Indicator('Email - Address', self.original_recipient, status='Informational', tags=['original_recipient']))
except:
self.logger.exception('Error parsing original recipient from POTENTIAL PHISH e-mail.')
# Subject
try:
self.subject = ''.join(self.get_header(email_obj, 'subject')[0].splitlines())
if not self.subject.startswith('[POTENTIAL PHISH]'):
self.indicators.append(Indicator('Email - Subject', self.subject))
except:
pass
# Decoded subject
try:
self.subject_decoded = ''.join(str(make_header(decode_header(self.get_header(email_obj, 'subject')[0]))).splitlines())
if not self.subject_decoded.startswith('[POTENTIAL PHISH]'):
self.indicators.append(Indicator('Email - Subject', self.subject_decoded))
except:
pass
# To addresses
self.to_addresses = [x[1].lower() for x in self._get_address_list(email_obj, 'to')]
# CC addresses
self.cc_addresses = [x[1].lower() for x in self._get_address_list(email_obj, 'cc')]
# Message-Id
try:
self.message_id = self.get_header(email_obj, 'message-id')[0]
self.indicators.append(Indicator('Email Message ID', self.message_id, status='Informational'))
except:
pass
# X-Mailer
try:
self.x_mailer = self.get_header(email_obj, 'x-mailer')[0]
self.indicators.append(Indicator('Email - Xmailer', self.x_mailer, status='Informational'))
except:
pass
# X-Original-Sender address
try:
self.x_original_sender = self.get_header(email_obj, 'x-original-sender')[0]
self.indicators.append(Indicator('Email - Address', self.x_original_sender, tags=['x_original_sender']))
except:
pass
# X-Originating-Ip
try:
x_originating_ip = self.get_header(email_obj, 'x-originating-ip')[0]
# Sometimes this field is in the form: [1.1.1.1]
# Make sure we remove any non-IP characters.
ip = RegexHelpers.find_ip_addresses(x_originating_ip)
if ip:
self.x_originating_ip = ip[0]
self.indicators.append(Indicator('Address - ipv4-addr', self.x_originating_ip, tags=['x_originating_ip']))
except:
pass
# X-Sender-Ip
try:
x_sender_ip = self.get_header(email_obj, 'x-sender-ip')[0]
# Make sure like the X-Originating-IP that we only
# get the IP address and no other characters.
ip = RegexHelpers.find_ip_addresses(x_sender_ip)
if ip:
self.x_sender_ip = ip[0]
self.indicators.append(Indicator('Address - ipv4-addr', self.x_sender_ip, tags=['x_sender_ip']))
except:
pass
self.received_time = self._get_received_time(email_obj)
if not self.received_time:
self.received_time = self._get_date_time()
# Find any URLs in the plaintext body.
text_urls = find_urls(self.body)
# Find any URLs in the HTML body.
html_urls = find_urls(self.html)
# Get any strings URLs.
strings_urls = []
"""
for file in self.attachments:
try:
strings_urls += file['strings_urls']
except:
pass
"""
# Try and remove any URLs that look like partial versions of other URLs.
all_urls = set.union(text_urls, html_urls)
unique_urls = set()
for u in all_urls:
if not any(other_url.startswith(u) and other_url != u for other_url in all_urls):
unique_urls.add(u)
# Get rid of any invalid URLs.
self.urls = [u for u in unique_urls if RegexHelpers.is_url(u)]
# Make indicators for the URLs.
self.indicators += make_url_indicators(self.urls, from_email_content=True)
# Get rid of any invalid indicators.
self.indicators = [i for i in self.indicators if i.value]
# Add any extra tags to each indicator.
for i in self.indicators:
i.tags.append('phish')
def __eq__(self, other):
""" Returns True if the headers are equal. """
return self.headers.lower() == other.headers.lower()
def __hash__(self):
""" Use the headers as the hash. """
return hash((self.headers.lower()))
@property
def json(self):
""" Return a JSON compatible view of the email. """
json = {}
json['ace_url'] = self.ace_url
json['attachments'] = self.attachments
json['body'] = self.body
json['cc_addresses'] = self.cc_addresses
json['envelope_from'] = self.envelope_from
json['envelope_to'] = self.envelope_to
json['from_address'] = self.from_address
json['headers'] = self.headers
json['html'] = self.html
json['message_id'] = self.message_id
json['original_recipient'] = self.original_recipient
json['path'] = self.path
json['received'] = self.received
json['received_time'] = self.received_time
json['remediated'] = self.remediated
json['reply_to'] = self.reply_to
json['return_path'] = self.return_path
json['screenshots'] = self.screenshots
json['subject'] = self.subject
json['subject_decoded'] = self.subject_decoded
json['to_addresses'] = self.to_addresses
json['urls'] = self.urls
json['x_auth_id'] = self.x_auth_id
json['x_mailer'] = self.x_mailer
json['x_original_sender'] = self.x_original_sender
json['x_originating_ip'] = self.x_originating_ip
json['x_sender'] = self.x_sender
json['x_sender_id'] = self.x_sender_id
json['x_sender_ip'] = self.x_sender_ip
return json
def get_header(self, email_obj, header_name):
return email_obj.get_all(header_name, [])
def _get_all_headers_string(self, email_obj):
header_string = ''
try:
bad_headers = config['wiki']['ignore_headers']
except:
bad_headers = []
for header in email_obj.items():
if not any(bad_header in header[0] for bad_header in bad_headers):
header_string += ': '.join(header) + '\n'
return header_string
def _get_address_list(self, email_obj, header_name):
header = email_obj.get_all(header_name, [])
return email.utils.getaddresses(header)
def _get_date_time(self):
for line in self.email_text.splitlines():
if 'Date:' in line:
date_pattern = re.compile(r'[A-Z][a-z]{2,3},\s+\d+\s+[A-Z][a-z]{2,3}\s+[0-9]{4}\s+[0-9]{2}:[0-9]{2}:[0-9]{2}\s*(\+\d+|\-\d+)*')
date_time = re.search(date_pattern, line)
if date_time:
datetime_obj = dateutil.parser.parse(date_time.group(0), ignoretz=False)
localtime = dateutil.tz.tzlocal()
try:
localtime_string = str(datetime_obj.astimezone(localtime))
except ValueError:
localtime_string = str(datetime_obj)
return localtime_string
return ''
def _get_received_time(self, email_obj):
header=email_obj.get_all('received', [])
try:
last_received_lines = header[0]
except IndexError:
last_received_lines = ''
received_time_pattern = re.compile(r'[A-Z][a-z]{2,3},\s+\d+\s+[A-Z][a-z]{2,3}\s+[0-9]{4}\s+[0-9]{2}:[0-9]{2}:[0-9]{2}\s*(\+\d+|\-\d+)*')
last_received_time = re.search(received_time_pattern, last_received_lines)
if last_received_time:
datetime_obj = dateutil.parser.parse(last_received_time.group(0), ignoretz=False)
localtime = dateutil.tz.tzlocal()
try:
localtime_string = str(datetime_obj.astimezone(localtime))
except ValueError:
localtime_string = str(datetime_obj)
return localtime_string
else:
return ''
def _get_received_for_address(self, email_obj):
received_header = email_obj.get_all('received', [])
receivedfor_info = email.utils.getaddresses(received_header)
for tup in receivedfor_info:
if 'for' in tup[0] and '@' in tup[1]:
return tup[1]
return None
def _get_charset(self, obj, default='ascii'):
if obj.get_content_charset():
return obj.get_content_charset()
if obj.get_charset():
return obj.get_charset()
return default
# Adapted from: https://www.ianlewis.org/en/parsing-email-attachments-python
def _parse_content(self, email_obj):
attachments = []
body = ''
html = ''
for part in email_obj.walk():
charset = self._get_charset(part, self._get_charset(email_obj))
attachment = self._parse_attachment(part, charset)
# Only add the attachment to the list if we were able to get the MD5.
if attachment and attachment['md5']:
attachments.append(attachment)
elif part.get_content_type() == 'text/plain':
try:
body += part.get_payload(decode=True).decode(charset, errors='ignore')
except LookupError:
if 'windows-' in charset:
charset = charset.replace('windows-', 'cp')
body += part.get_payload(decode=True).decode(charset, errors='ignore')
elif part.get_content_type() == 'text/html':
try:
html += part.get_payload(decode=True).decode(charset, errors='ignore')
except LookupError:
if 'windows-' in charset:
charset = charset.replace('windows-', 'cp')
html += part.get_payload(decode=True).decode(charset, errors='ignore')
return {
'body' : body,
'html' : html,
'attachments': attachments
}
# Adapted from: https://www.ianlewis.org/en/parsing-email-attachments-python
def _parse_attachment(self, message_part, charset):
part_items = message_part.items()
for tup in part_items:
for value in tup:
if 'attachment' in value:
file_data = message_part.get_payload()
attachment_dict = {}
if message_part.get('Content-Transfer-Encoding', None) == 'base64':
file_data_b64 = file_data.replace('\n', '')
# For some reason, sometimes the attachments don't have the proper
# padding. Add a couple "==" on the end for good measure. This doesn't
# seem to harm correctly encoded attachments.
file_data_decoded = base64.b64decode(file_data_b64 + '==')
# Try and get strings out of the attachment.
strings_list = RegexHelpers.find_strings(file_data_decoded)
strings = ' '.join(strings_list)
# Look for any URLs that were in the strings.
strings_urls = find_urls(strings)
attachment_dict['strings_urls'] = strings_urls
elif message_part.get_content_type() == 'text/html':
file_data_decoded = message_part.get_payload(decode=True).decode(charset).encode('utf-8')
else:
file_data_decoded = file_data
try:
md5_hasher = hashlib.md5()
md5_hasher.update(file_data_decoded)
md5_hash = md5_hasher.hexdigest()
except TypeError:
md5_hash = ''
try:
sha256_hasher = hashlib.sha256()
sha256_hasher.update(file_data_decoded)
sha256_hash = sha256_hasher.hexdigest()
except TypeError:
sha256_hash = ''
attachment_dict['content_type'] = message_part.get_content_type()
attachment_dict['size'] = len(file_data_decoded)
attachment_dict['md5'] = md5_hash
attachment_dict['sha256'] = sha256_hash
attachment_dict['name'] = ''
attachment_dict['create_date'] = ''
attachment_dict['mod_date'] = ''
attachment_dict['read_date'] = ''
# Find the attachment name. Normally this follows a specific format
# and is called 'filename=' but recently I've seen some that are in
# different locations are are just called 'name='... Hence removing
# old code and replacing with a regex statement to account for either
# name in any location in the message part.
attachment_name_pattern = re.compile(r'(file)?name="?([^"]+)"?')
for tup in part_items:
for item in tup:
item_lines = item.splitlines()
for item_line in item_lines:
attachment_name = attachment_name_pattern.search(item_line)
if attachment_name:
attachment_dict['name'] = RegexHelpers.decode_utf_b64_string(attachment_name.groups()[1])
if attachment_dict['name'].endswith(';'):
attachment_dict['name'] = attachment_dict['name'][:-1]
# Make the attachment indicators.
self.indicators.append(Indicator('Windows - FileName', attachment_dict['name'], tags=['attachment']))
self.indicators.append(Indicator('Hash - MD5', attachment_dict['md5'], tags=['attachment']))
self.indicators.append(Indicator('Hash - SHA256', attachment_dict['sha256'], tags=['attachment']))
return attachment_dict
return None
| 41.601103 | 146 | 0.57271 | 22,239 | 0.982679 | 0 | 0 | 1,438 | 0.063541 | 0 | 0 | 5,716 | 0.252574 |