hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
5423f2c125c3cb768b4a0cd17051477a73148c1a | 16,691 | py | Python | gbot/libs/helper.py | dgw/goshu | 3cba300d92f9bde58cf7348ddc3183d52b4c4bcf | [
"ISC"
] | 5 | 2015-01-03T00:08:58.000Z | 2017-05-05T11:57:03.000Z | gbot/libs/helper.py | dgw/goshu | 3cba300d92f9bde58cf7348ddc3183d52b4c4bcf | [
"ISC"
] | 3 | 2016-02-07T07:35:13.000Z | 2016-11-26T19:29:02.000Z | gbot/libs/helper.py | dgw/goshu | 3cba300d92f9bde58cf7348ddc3183d52b4c4bcf | [
"ISC"
] | 1 | 2020-11-12T09:09:06.000Z | 2020-11-12T09:09:06.000Z | #!/usr/bin/env python3
# Goshu IRC Bot
# written by Daniel Oaks <daniel@danieloaks.net>
# licensed under the ISC license
"""extends several builtin functions and provides helper functions
The default Python library is extensive and well-stocked. There are some
times however, you wish a small task was taken care of for you. This module
if chock full of little extensions and helper functions I've needed while
writing Goshu.
Small, interesting, self-contained functions that can probably be reused
elsewhere.
"""
import collections.abc
import datetime
import imp
import json
import os
import re
import string
import sys
import urllib.parse
from girc.formatting import escape
from http_status import Status
from pyquery import PyQuery as pq
import importlib
import requests
import xml.sax.saxutils as saxutils
import yaml
valid_filename_chars = string.ascii_letters + string.digits + '#._- '
def true_or_false(in_str):
"""Returns True/False if string represents it, else None."""
in_str = in_str.lower()
if in_str.startswith(('true', 'y', '1', 'on')):
return True
elif in_str.startswith(('false', 'n', '0', 'off')):
return False
else:
return None
def split_num(line, chars=' ', maxsplits=1, empty=''):
"""/lazy/ wrapper, to stop us having to bounds-check when splitting.
Arguments:
line -- line to split
chars -- character(s) to split line on
maxsplits -- how many split items are returned
empty -- character to put in place of nothing
Returns:
line.split(chars, items); return value is padded until `maxsplits + 1` number of values
are present"""
line = line.split(chars, maxsplits)
while len(line) <= maxsplits:
line.append(empty)
return line
def is_ok(func, prompt, blank='', clearline=False):
"""Prompt the user for yes/no and returns True/False
Arguments:
prompt -- Prompt for the user
blank -- If True, a blank response will return True, ditto for False, the default ''
will not accept blank responses and ask until the user gives an appropriate
response
Returns:
True if user accepts, False if user does not"""
while True:
ok = func(prompt).lower().strip()
if len(ok) > 0:
if ok[0] == 'y' or ok[0] == 't' or ok[0] == '1': # yes, true, 1
return True
elif ok[0] == 'n' or ok[0] == 'f' or ok[0] == '0': # no, false, 0
return False
else:
if blank is True:
return True
elif blank is False:
return False
def bytes_to_str(bytes, base=2, precision=0):
"""Convert number of bytes to a human-readable format
Arguments:
bytes -- number of bytes
base -- base 2 'regular' multiplexer, or base 10 'storage' multiplexer
precision -- number of decimal places to output
Returns:
Human-readable string such as '1.32M'
"""
if base == 2:
multiplexer = 1024
elif base == 10:
multiplexer = 1000
else:
return None # raise error
precision_string = '%.' + str(precision) + 'f'
mebi_convert = True
if bytes >= (multiplexer ** 4):
terabytes = float(bytes / (multiplexer ** 4))
output = (precision_string % terabytes) + 'T'
elif bytes >= (multiplexer ** 3):
gigabytes = float(bytes / (multiplexer ** 3))
output = (precision_string % gigabytes) + 'G'
elif bytes >= (multiplexer ** 2):
megabytes = float(bytes / (multiplexer ** 2))
output = (precision_string % megabytes) + 'M'
elif bytes >= (multiplexer ** 1):
kilobytes = float(bytes / (multiplexer ** 1))
output = (precision_string % kilobytes) + 'K'
else:
output = (precision_string % float(bytes)) + 'B'
mebi_convert = False
# mebibytes and gibibytes all those weird HDD manufacturer terms
if base == 10 and mebi_convert:
num, base = output[:-1], output[-1]
output = num + base.lower() + 'B'
return output
def time_metric(secs=60, mins=0):
"""Returns user-readable string representing given number of seconds."""
if mins:
secs += (mins * 60)
time = ''
for metric_secs, metric_char in [[7 * 24 * 60 * 60, 'w'],
[24 * 60 * 60, 'd'],
[60 * 60, 'h'],
[60, 'm']]:
if secs > metric_secs:
time += '{}{}'.format(int(secs / metric_secs), metric_char)
secs -= int(secs / metric_secs) * metric_secs
if secs > 0:
time += '{}s'.format(secs)
return time
def metric(num, metric_list=[[10 ** 9, 'B'], [10 ** 6, 'M'], [10 ** 3, 'k']], additive=False):
"""Returns user-readable string representing given value.
Arguments:
num is the base value we're converting.
metric_list is the list of data we're working off.
additive is whether we add the various values together, or separate them.
Return:
a string such as 345K or 23w6d2h53s"""
output = ''
for metric_count, metric_char in metric_list:
if num > metric_count:
if additive:
format_str = '{}{}'
else:
format_str = '{:.1f}{}'
num = (num / metric_count)
if not additive:
num = float(num)
output += format_str.format(num, metric_char)
if not additive:
break
# just in case no output
if output == '':
output = str(num)
return output
def get_url(url, **kwargs):
"""Gets a url, handles all the icky requests stuff."""
try:
if 'timeout' not in kwargs:
kwargs['timeout'] = 20
r = requests.get(url, **kwargs)
r.status = Status(r.status_code)
if not r.ok:
return 'HTTP Error - {code} {name} - {description}'.format(**{
'code': r.status.code,
'name': r.status.name,
'description': r.status.description
})
except requests.exceptions.Timeout:
return 'Connection timed out'
except requests.exceptions.RequestException as x:
return '{}'.format(x.__class__.__name__)
return r
def json_element(input_dict, query, default=None):
"""Runs through a data structure and returns the selected element."""
for element in query:
is_list_index = isinstance(element, int) and isinstance(input_dict, (list, tuple))
if is_list_index or element in input_dict:
input_dict = input_dict[element]
else:
return default
return input_dict
def filename_escape(unsafe, replace_char='_', valid_chars=valid_filename_chars):
"""Escapes a string to provide a safe local filename
Arguments:
unsafe -- Unsafe string to escape
replace_char -- Character to replace unsafe characters with
valid_chars -- Valid filename characters
Returns:
Safe local filename string
"""
if not unsafe:
return ''
safe = ''
for character in unsafe:
if character in valid_chars:
safe += character
else:
safe += replace_char
return safe
_unescape_map = {
''': "'",
''': "'",
'"': "'",
}
def html_unescape(input):
"""Turns any html-escaped characters back to their normal equivalents."""
output = saxutils.unescape(input)
for char in _unescape_map.keys():
output = output.replace(char, _unescape_map[char])
return output
def utf8_bom(input):
"""Strips BOM from a utf8 string, because open() leaves it in for some reason."""
output = input.replace('\ufeff', '')
return output
# timedelta functions
_td_str_map = [
('d', 'days'),
('h', 'hours'),
('m', 'minutes'),
('s', 'seconds'),
]
_str_td = r''
for istr, td in _td_str_map:
_str_td += r'\s*(?:(?P<' + td + r'>[0-9]+)\s*' + istr + r')?'
_TD_STR_REGEX = re.compile(_str_td)
def timedelta_to_string(delta):
"""Converts a timedelta dict to a string."""
td_string = ''
for istr, td in _td_str_map:
if td in delta:
td_string += str(delta[td])
td_string += istr
return td_string
def string_to_timedelta(td_string):
"""Converts a string to a timedelta dict."""
match = _TD_STR_REGEX.match(td_string)
delta = {}
for istr, td in _td_str_map:
if match.group(td):
if '.' in match.group(td):
val = float(match.group(td))
else:
val = int(match.group(td))
delta[td] = val
return delta
# path
| 30.795203 | 94 | 0.549038 |
542466b53c52821ceb40707c73e0ab32ca5a0262 | 8,707 | py | Python | ptf/lib/runner.py | opennetworkinglab/tassen | 6e42ba79f83caa1bd6ecb40fd9bd1e9f8768ec09 | [
"Apache-2.0"
] | 4 | 2020-07-08T22:04:35.000Z | 2020-07-14T15:09:37.000Z | ptf/lib/runner.py | opennetworkinglab/tassen | 6e42ba79f83caa1bd6ecb40fd9bd1e9f8768ec09 | [
"Apache-2.0"
] | 1 | 2020-07-07T08:12:40.000Z | 2020-07-07T08:12:41.000Z | ptf/lib/runner.py | opennetworkinglab/tassen | 6e42ba79f83caa1bd6ecb40fd9bd1e9f8768ec09 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python2
# Copyright 2013-present Barefoot Networks, Inc.
# SPDX-FileCopyrightText: 2018-present Open Networking Foundation
#
# SPDX-License-Identifier: Apache-2.0
import Queue
import argparse
import json
import logging
import os
import re
import subprocess
import sys
import threading
import time
from collections import OrderedDict
import google.protobuf.text_format
import grpc
from p4.v1 import p4runtime_pb2, p4runtime_pb2_grpc
PTF_ROOT = os.path.dirname(os.path.realpath(__file__))
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger("PTF runner")
def check_ifaces(ifaces):
"""
Checks that required interfaces exist.
"""
ifconfig_out = subprocess.check_output(['ifconfig'])
iface_list = re.findall(r'^([a-zA-Z0-9]+)', ifconfig_out, re.S | re.M)
present_ifaces = set(iface_list)
ifaces = set(ifaces)
return ifaces <= present_ifaces
def build_bmv2_config(bmv2_json_path):
"""
Builds the device config for BMv2
"""
with open(bmv2_json_path) as f:
return f.read()
def run_test(p4info_path, grpc_addr, device_id, cpu_port, ptfdir, port_map_path,
extra_args=()):
"""
Runs PTF tests included in provided directory.
Device must be running and configfured with appropriate P4 program.
"""
# TODO: check schema?
# "ptf_port" is ignored for now, we assume that ports are provided by
# increasing values of ptf_port, in the range [0, NUM_IFACES[.
port_map = OrderedDict()
with open(port_map_path, 'r') as port_map_f:
port_list = json.load(port_map_f)
for entry in port_list:
p4_port = entry["p4_port"]
iface_name = entry["iface_name"]
port_map[p4_port] = iface_name
if not check_ifaces(port_map.values()):
error("Some interfaces are missing")
return False
ifaces = []
# FIXME
# find base_test.py
pypath = os.path.dirname(os.path.abspath(__file__))
if 'PYTHONPATH' in os.environ:
os.environ['PYTHONPATH'] += ":" + pypath
else:
os.environ['PYTHONPATH'] = pypath
for iface_idx, iface_name in port_map.items():
ifaces.extend(['-i', '{}@{}'.format(iface_idx, iface_name)])
cmd = ['ptf']
cmd.extend(['--test-dir', ptfdir])
cmd.extend(ifaces)
test_params = 'p4info=\'{}\''.format(p4info_path)
test_params += ';grpcaddr=\'{}\''.format(grpc_addr)
test_params += ';device_id=\'{}\''.format(device_id)
test_params += ';cpu_port=\'{}\''.format(cpu_port)
cmd.append('--test-params={}'.format(test_params))
cmd.extend(extra_args)
debug("Executing PTF command: {}".format(' '.join(cmd)))
try:
# we want the ptf output to be sent to stdout
p = subprocess.Popen(cmd)
p.wait()
except:
error("Error when running PTF tests")
return False
return p.returncode == 0
# noinspection PyTypeChecker
if __name__ == '__main__':
main()
| 31.547101 | 91 | 0.605949 |
5427881b2cdb695dc79fdf0dbaacbc4dd2f6b718 | 178 | py | Python | rsebs/__init__.py | gabrielperezs/recycling-snapshots | b0707e883bb6037505af815877e4ef8ce544e35e | [
"Apache-2.0"
] | 1 | 2017-05-23T05:58:47.000Z | 2017-05-23T05:58:47.000Z | rsebs/__init__.py | gabrielperezs/recycling-snapshots | b0707e883bb6037505af815877e4ef8ce544e35e | [
"Apache-2.0"
] | null | null | null | rsebs/__init__.py | gabrielperezs/recycling-snapshots | b0707e883bb6037505af815877e4ef8ce544e35e | [
"Apache-2.0"
] | null | null | null | from .snapshots import set_client
from .snapshots import get_snapshots
from .snapshots import tag_snapshot
from .snapshots import set_drymode
from .snapshots import unset_drymode | 35.6 | 36 | 0.865169 |
54286060601c97e4e84de6381203dae2af8365e8 | 1,184 | py | Python | predict_form.py | HuginnM/UsedCarsUA | aa871c1bc6cdc1a84810db265c732b04cb4935f0 | [
"Apache-2.0"
] | null | null | null | predict_form.py | HuginnM/UsedCarsUA | aa871c1bc6cdc1a84810db265c732b04cb4935f0 | [
"Apache-2.0"
] | null | null | null | predict_form.py | HuginnM/UsedCarsUA | aa871c1bc6cdc1a84810db265c732b04cb4935f0 | [
"Apache-2.0"
] | null | null | null | from flask_wtf import FlaskForm
from wtforms import SubmitField, SelectField, IntegerField, FloatField, StringField
from wtforms.validators import DataRequired
import pandas as pd
uniq_vals = pd.read_csv("data/unique_cat_vals.csv", index_col=0)
| 56.380952 | 121 | 0.754223 |
5429177713786c59d64d5d6d11764c591147502b | 2,764 | py | Python | color.py | laplacetw/color-codes-bot | e7afb5b09e7c4a5dde6608917781cc6a0ea05287 | [
"MIT"
] | 1 | 2020-10-21T01:26:09.000Z | 2020-10-21T01:26:09.000Z | color.py | laplacetw/color-codes-bot | e7afb5b09e7c4a5dde6608917781cc6a0ea05287 | [
"MIT"
] | null | null | null | color.py | laplacetw/color-codes-bot | e7afb5b09e7c4a5dde6608917781cc6a0ea05287 | [
"MIT"
] | null | null | null | #!usr/bin/env python3
color_chart = {
'1C1':[13.24, 88.89, 228.98, 0.], '1N1':[14.2, 95.37, 233.82, 0.], '1N2':[12.95, 91.79, 219.5, 0.],
'1W1':[14.67, 103.64, 229.41, 0.], '1W2':[14.69, 106.34, 227.28, 0.], '2C0':[15.73, 134.68, 222.32, 0.],
'2C1':[14.57, 125.89, 220.69, 0.], '2C3':[13.7, 103.72, 199.46, 0.], '2N1':[15., 104.25, 225.8, 0.],
'2W0':[15., 110.11, 224.22, 0.], '2W1':[14.42, 125.06, 224.55, 0.], '2W2':[17.13, 141.58, 209.99, 0.],
'3C1':[15.7, 118.18, 212.01, 0.], '3C2':[15.7, 118.18, 212.01, 0.], '3N1':[16.1, 150.1, 189.09, 0.],
'3N2':[15.18, 140.68, 202.63, 0.], '3W1':[15.66, 129.81, 209.44, 0.], '3W2':[17.05, 161.56, 184.85, 0.],
'4C3':[14.23, 148.1, 198.74, 0.], '4N1':[15.92, 159.35, 190.71, 0.], '4N2':[17.29, 166.95, 195.76, 0.],
'4W1':[14.67, 143.61, 208.85, 0.], '4W2':[17.76, 162.02, 189.44, 0.], '5C1':[13.09, 179.49, 160.58, 0.],
'5N1':[15.43, 187.36, 180.34, 0.], '5N2':[16.66, 207.88, 147.84, 0.], '5W1':[15.66, 163.85, 182.07, 0.],
'5W2':[14.95, 160.63, 189.17, 0.], '6C2':[12.85, 179.52, 131.66, 0.], '6N1':[14.94, 185.61, 162.16, 0.],
'6N2':[15.7, 183.46, 138.37, 0.], '6W1':[14.76, 166.57, 166.78, 0.], '6W2':[13.79, 176.99, 142.22, 0.],
'7C1':[12.2, 191.5, 121.34, 0.], '7N1':[12.7, 162.67, 109.41, 0.], '7W1':[13.25, 165.64, 126.03, 0.],
'8N1':[12.5, 191.83, 95.43, 0.], 'CR1':[14.09, 173.14, 163.66, 0.]}
color_chart_new = {
'1C1':[14.63, 79.35, 239.58, 0.], '1N1':[16.89, 77.75, 243.46, 0.], '1N2':[13.27, 104.13, 231.18, 0.],
'1W1':[17.78, 104.99, 236.54, 0.], '1W2':[16., 117.24, 234.86, 0.], '2C0':[17.16, 80.90, 240.48, 0.],
'2C1':[14., 116.60, 237.21, 0.], '2C3':[13.36, 94.80, 231.17, 0.], '2N1':[16., 115.65, 238.19, 0.],
'2W0':[15.79, 108.95, 237.93, 0.], '2W1':[15.01, 120.45, 240.01, 0.], '2W2':[17.97, 125.56, 243.83, 0.],
'3C1':[10.99, 115.63, 226.18, 0.], '3C2':[10.84, 117.73, 219.17, 0.], '3N1':[11.9, 126.73, 228.04, 0.],
'3N2':[11.43, 126.97, 224.13, 0.], '3W1':[13.14, 148.12, 229.10, 0.], '3W2':[14.01, 133.06, 234.48, 0.],
'4C3':[11.68, 150.85, 219.34, 0.], '4N1':[12., 151.75, 190.41, 0.], '4N2':[12.24, 138.18, 206.75, 0.],
'4W1':[12., 151.31, 224.04, 0.], '4W2':[12., 165.62, 201.74, 0.], '5C1':[10.4, 184.48, 176.72, 0.],
'5N1':[11.68, 188.46, 210.23, 0.], '5N2':[10.98, 183.80, 195.04, 0.], '5W1':[12.73, 185.75, 221.30, 0.],
'5W2':[10.83, 162.54, 211.10, 0.], '6C2':[9.29, 217.70, 111.99, 0.], '6N1':[11.24, 180.30, 156.76, 0.],
'6N2':[11., 173.55, 145.55, 0.], '6W1':[11.09, 188.43, 171.41, 0.], '6W2':[11., 182.77, 151.02, 0.],
'7C1':[8.07, 199.37, 115.59, 0.], '7N1':[9.93, 187.51, 122.57, 0.], '7W1':[9.86, 192.48, 135.62, 0.],
'8N1':[8.64, 181.83, 109.53, 0.]} | 86.375 | 109 | 0.48589 |
5429df166b3efe8e9b12e537d9c5a2b68d7af8f7 | 235 | py | Python | leetCode/algorithms/easy/occurrences_after_bigram.py | ferhatelmas/algo | a7149c7a605708bc01a5cd30bf5455644cefd04d | [
"WTFPL"
] | 25 | 2015-01-21T16:39:18.000Z | 2021-05-24T07:01:24.000Z | leetCode/algorithms/easy/occurrences_after_bigram.py | gauravsingh58/algo | 397859a53429e7a585e5f6964ad24146c6261326 | [
"WTFPL"
] | 2 | 2020-09-30T19:39:36.000Z | 2020-10-01T17:15:16.000Z | leetCode/algorithms/easy/occurrences_after_bigram.py | ferhatelmas/algo | a7149c7a605708bc01a5cd30bf5455644cefd04d | [
"WTFPL"
] | 15 | 2015-01-21T16:39:27.000Z | 2020-10-01T17:00:22.000Z | from typing import List
| 29.375 | 87 | 0.595745 |
542a62b48d45febc53b82e238fe6ed286841ea91 | 454 | py | Python | src/pyuwds3/utils/egocentric_spatial_relations.py | LAAS-HRI/uwds3 | 42390f62ed5701a32710341b01faa10efc448078 | [
"MIT"
] | 2 | 2020-08-19T06:15:14.000Z | 2021-05-23T09:55:18.000Z | src/pyuwds3/utils/egocentric_spatial_relations.py | LAAS-HRI/uwds3 | 42390f62ed5701a32710341b01faa10efc448078 | [
"MIT"
] | 5 | 2021-01-06T09:00:35.000Z | 2021-01-20T13:22:19.000Z | src/pyuwds3/utils/egocentric_spatial_relations.py | LAAS-HRI/uwds3 | 42390f62ed5701a32710341b01faa10efc448078 | [
"MIT"
] | 2 | 2020-11-18T17:34:43.000Z | 2021-05-23T16:14:17.000Z |
import math
from scipy.spatial.distance import euclidean
from ..types.bbox import BoundingBox
| 19.73913 | 44 | 0.634361 |
542b4553e4da40bd25e9c35ead38f8985d1d5c31 | 2,883 | py | Python | machine_replacement_action_probs.py | dsbrown1331/broil | 3c06e15c560db3242c0e331a2b16cc578a843606 | [
"MIT"
] | 1 | 2021-03-29T09:53:53.000Z | 2021-03-29T09:53:53.000Z | machine_replacement_action_probs.py | dsbrown1331/broil | 3c06e15c560db3242c0e331a2b16cc578a843606 | [
"MIT"
] | 1 | 2020-11-22T15:05:48.000Z | 2020-11-25T00:10:17.000Z | machine_replacement_action_probs.py | dsbrown1331/broil | 3c06e15c560db3242c0e331a2b16cc578a843606 | [
"MIT"
] | null | null | null | import bayesian_irl
import mdp_worlds
import utils
import mdp
import numpy as np
import scipy
import random
import generate_efficient_frontier
from machine_replacement import generate_posterior_samples
if __name__=="__main__":
seed = 1234
np.random.seed(seed)
scipy.random.seed(seed)
random.seed(seed)
num_states = 4
num_samples = 2000
gamma = 0.95
alpha = 0.99
posterior = generate_posterior_samples(num_samples)
r_sa = np.mean(posterior, axis=1)
init_distribution = np.ones(num_states)/num_states #uniform distribution
mdp_env = mdp.MachineReplacementMDP(num_states, r_sa, gamma, init_distribution)
print("mean MDP reward", r_sa)
u_sa = mdp.solve_mdp_lp(mdp_env, debug=True)
print("mean policy from posterior")
utils.print_stochastic_policy_action_probs(u_sa, mdp_env)
print("MAP/Mean policy from posterior")
utils.print_policy_from_occupancies(u_sa, mdp_env)
print("rewards")
print(mdp_env.r_sa)
print("expected value = ", np.dot(u_sa, r_sa))
stoch_pi = utils.get_optimal_policy_from_usa(u_sa, mdp_env)
print("expected return", mdp.get_policy_expected_return(stoch_pi, mdp_env))
print("values", mdp.get_state_values(u_sa, mdp_env))
print('q-values', mdp.get_q_values(u_sa, mdp_env))
#run CVaR optimization, just the robust version
u_expert = np.zeros(mdp_env.num_actions * mdp_env.num_states)
posterior_probs = np.ones(num_samples) / num_samples #uniform dist since samples from MCMC
#generate efficient frontier
lambda_range = [0.0, 0.3, 0.75, 0.95, 1.0]
import matplotlib.pyplot as plt
from matplotlib.pyplot import cm
bar_width = 0.15
opacity = 0.9
color=iter(cm.rainbow(np.linspace(0,1,6)))
cnt = 0
index = np.arange(num_states)
for i,lamda in enumerate(lambda_range):
print("lambda = ", lamda)
cvar_opt_usa, cvar, exp_ret = mdp.solve_max_cvar_policy(mdp_env, u_expert, posterior, posterior_probs, alpha, False, lamda)
print('action probs')
utils.print_stochastic_policy_action_probs(cvar_opt_usa, mdp_env)
stoch_pi = utils.get_optimal_policy_from_usa(cvar_opt_usa, mdp_env)
print(stoch_pi[:,1])
c = next(color)
plt.figure(1)
label = r"$\lambda={}$".format(lamda)
rects1 = plt.bar(index + cnt * bar_width,stoch_pi[:,0], bar_width,
alpha=opacity, label=label, color=c)
cnt += 1
plt.figure(1)
plt.axis([-1,5,0, 1])
plt.yticks(fontsize=18)
plt.xticks(index + 2*bar_width, ('1', '2', '3', '4'), fontsize=18)
plt.legend(loc='best', fontsize=16)
plt.xlabel('State',fontsize=20)
plt.ylabel('Pr(Do Nothing $\mid$ State)',fontsize=20)
plt.tight_layout()
plt.savefig("./figs/machine_replacement/action_probs_machine_replacement.png")
plt.show() | 27.990291 | 131 | 0.687825 |
542b464eeb35182c67fc88683f7b87c523d2bec7 | 5,982 | py | Python | sequential/seq_smnist/train_args_seq_smnist.py | mariacer/cl_in_rnns | 333b8e03391600a8e3df7d684a3f171b135d273a | [
"Apache-2.0"
] | 26 | 2020-06-17T08:44:15.000Z | 2022-03-20T04:21:13.000Z | sequential/seq_smnist/train_args_seq_smnist.py | mariacer/cl_in_rnns | 333b8e03391600a8e3df7d684a3f171b135d273a | [
"Apache-2.0"
] | null | null | null | sequential/seq_smnist/train_args_seq_smnist.py | mariacer/cl_in_rnns | 333b8e03391600a8e3df7d684a3f171b135d273a | [
"Apache-2.0"
] | 4 | 2020-10-26T02:19:38.000Z | 2021-12-26T02:26:05.000Z | #!/usr/bin/env python3
# Copyright 2019 Benjamin Ehret, Maria Cervera
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @title :sequential/smnist/train_args_seq_smnist.py
# @author :be
# @contact :behret@ethz.ch
# @created :24/03/2020
# @version :1.0
# @python_version :3.6.8
"""
Command-line arguments and default values for the sequential SMNIST task are
handled here.
"""
import argparse
import warnings
import utils.cli_args as cli
import sequential.train_args_sequential as seq
def parse_cmd_arguments(default=False, argv=None):
"""Parse command-line arguments.
Args:
default (optional): If True, command-line arguments will be ignored and
only the default values will be parsed.
argv (optional): If provided, it will be treated as a list of command-
line argument that is passed to the parser in place of sys.argv.
Returns:
The Namespace object containing argument names and values.
"""
description = 'Continual learning on sequential SMNIST task.'
parser = argparse.ArgumentParser(description=description)
cli.cl_args(parser, show_beta=True, dbeta=0.005,
show_from_scratch=True, show_multi_head=True,
show_split_head_cl3=False, show_cl_scenario=False,
show_num_tasks=True, dnum_tasks=45)
cli.train_args(parser, show_lr=True, show_epochs=False,
dbatch_size=64, dn_iter=5000,
dlr=1e-3, show_clip_grad_value=False, show_clip_grad_norm=True,
show_momentum=False, show_adam_beta1=True)
seq.rnn_args(parser, drnn_arch='256', dnet_act='tanh')
cli.hypernet_args(parser, dhyper_chunks=-1, dhnet_arch='50,50',
dtemb_size=32, demb_size=32, dhnet_act='relu')
# Args of new hnets.
nhnet_args = cli.hnet_args(parser, allowed_nets=['hmlp', 'chunked_hmlp',
'structured_hmlp', 'hdeconv', 'chunked_hdeconv'], dhmlp_arch='50,50',
show_cond_emb_size=True, dcond_emb_size=32, dchmlp_chunk_size=1000,
dchunk_emb_size=32, show_use_cond_chunk_embs=True,
dhdeconv_shape='512,512,3', prefix='nh_',
pf_name='new edition of a hyper-', show_net_act=True, dnet_act='relu',
show_no_bias=True, show_dropout_rate=True, ddropout_rate=-1,
show_specnorm=True, show_batchnorm=False, show_no_batchnorm=False)
seq.new_hnet_args(nhnet_args)
cli.init_args(parser, custom_option=False, show_normal_init=False,
show_hyper_fan_init=True)
cli.eval_args(parser, dval_iter=250, show_val_set_size=True,
dval_set_size=1000)
magroup = cli.miscellaneous_args(parser, big_data=False,
synthetic_data=True, show_plots=True, no_cuda=True,
show_publication_style=False)
seq.ewc_args(parser, dewc_lambda=5000., dn_fisher=-1, dtbptt_fisher=-1,
dts_weighting_fisher='last')
seq.si_args(parser, dsi_lambda=1.)
seq.context_mod_args(parser, dsparsification_reg_type='l1',
dsparsification_reg_strength=1., dcontext_mod_init='constant')
seq.miscellaneous_args(magroup, dmask_fraction=0.8, dclassification=True,
dts_weighting='last', show_use_ce_loss=False,
show_early_stopping_thld=True)
# Replay arguments.
rep_args = seq.replay_args(parser)
cli.generator_args(rep_args, dlatent_dim=100)
cli.main_net_args(parser, allowed_nets=['simple_rnn'],
dsrnn_rec_layers='256', dsrnn_pre_fc_layers='',
dsrnn_post_fc_layers='',
show_net_act=True, dnet_act='tanh', show_no_bias=True,
show_dropout_rate=False, show_specnorm=False, show_batchnorm=False,
prefix='dec_', pf_name='replay decoder')
seq_args(parser)
args = None
if argv is not None:
if default:
warnings.warn('Provided "argv" will be ignored since "default" ' +
'option was turned on.')
args = argv
if default:
args = []
config = parser.parse_args(args=args)
### Check argument values!
cli.check_invalid_argument_usage(config)
seq.check_invalid_args_sequential(config)
if config.train_from_scratch:
# FIXME We could get rid of this warning by properly checkpointing and
# loading all networks.
warnings.warn('When training from scratch, only during accuracies ' +
'make sense. All other outputs should be ignored!')
return config
def seq_args(parser):
"""This is a helper function of function :func:`parse_cmd_arguments` to add
specific arguments to the argument group related to seq smnist task.
Arguments specified in this function:
- `ssmnist_seq_len`
Args:
parser: Object of class :class:`argparse.ArgumentParser`.
"""
heading = 'SSMNIST options'
sgroup = parser.add_argument_group(heading)
sgroup.add_argument('--ssmnist_seq_len', type=int, default=2,
help='The number of digits used in a sequence. ' +
'Default: %(default)s.')
sgroup.add_argument('--ssmnist_two_classes', action='store_true',
help='If used, every task will have only 2 classes. ' +
'Instead of classifying every possible sequence ' +
'individually, sequences are randomly grouped ' +
'into 2 classes.')
if __name__=='__main__':
pass | 41.541667 | 80 | 0.674858 |
542b4d4125780654fe2bbd178dc02f72ba260ddd | 2,490 | py | Python | examples/compare.py | guo-yong-zhi/wordcloud2 | 43d34766323e8eec45d46eeaa98537849f48cd37 | [
"MIT"
] | null | null | null | examples/compare.py | guo-yong-zhi/wordcloud2 | 43d34766323e8eec45d46eeaa98537849f48cd37 | [
"MIT"
] | null | null | null | examples/compare.py | guo-yong-zhi/wordcloud2 | 43d34766323e8eec45d46eeaa98537849f48cd37 | [
"MIT"
] | null | null | null | from wordcloud2 import wordcloud as W
import os
from PIL import Image
stwords = {"us", "will"}
print("==Obama's==")
cs = W.randomscheme() #:Set1_8
as_ = W.randomangles() #(0,90,45,-45)
dens = 0.5 #not too high
wca = W.wordcloud(
W.processtext(open(W.pkgdir(W.WordCloud)+"/res/Barack Obama's First Inaugural Address.txt").read(),
stopwords=set(W.stopwords_en).union(stwords)),
colors = cs,
angles = as_,
density = dens)
wca.generate()
#md# ### Then generate the wordcloud on the right
print("==Trump's==")
wcb = W.wordcloud(
W.processtext(open(W.pkgdir(W.WordCloud)+"/res/Donald Trump's Inaugural Address.txt").read(),
stopwords=set(W.stopwords_en).union(stwords)),
mask = wca.getsvgmask(),
colors = cs,
angles = as_,
density = dens,
run = W.identity, #turn off the useless initimage! and placement! in advance
)
#md# Follow these steps to generate a wordcloud: initimage! -> placement! -> generate!
samewords = list(set(wca.getwords()).intersection(wcb.getwords()))
print(len(samewords), "same words")
for w in samewords:
wcb.setcolors(w, wca.getcolors(w))
wcb.setangles(w, wca.getangles(w))
wcb.initimages()
wcb.setstate(":placement!")
print("=ignore defferent words=")
with wcb.keep(samewords) as wcb:
assert set(wcb.getwords()) == set(samewords)
centers = wca.getpositions(samewords, type=W.Ju.getcenter)
wcb.setpositions(samewords, centers, type=W.Ju.setcenter_b) #manually initialize the position,
wcb.setstate(":placement!") #and set the state flag
wcb.generate(1000, patient=-1, retry=1) #patient=-1 means no teleport; retry=1 means no rescale
print("=pin same words=")
with wcb.pin(samewords):
wcb.placement()
wcb.generate(1000, retry=1) #allow teleport but dont allow rescale
if wcb.getstate() != ":generate!":
print("=overall tuning=")
wcb.generate(1000, patient=-1, retry=2) #allow rescale but dont allow teleport
ma = wca.paint()
mb = wcb.paint()
sp = ma.width//20
cmp = Image.new('RGBA', (ma.width*2+sp, ma.height))
cmp.paste(ma, (0, 0, ma.width, ma.height))
cmp.paste(mb, (ma.width+sp, 0, ma.width*2+sp, ma.height))
os.makedirs('address_compare', exist_ok=True)
print("results are saved in address_compare")
cmp.save("address_compare/compare.png")
gif = W.GIF("address_compare")
wca.record("Obama", gif)
wcb.record("Trump", gif)
W.gif_generate(gif, framerate=1)
#md# 
#md#  | 35.571429 | 104 | 0.685542 |
542b9661a1d12114a162b51bacab5cac808471e8 | 3,520 | py | Python | modules/insight/nbCurvesLevelSet.py | chrisidefix/devide | 99bfe156e710fa47ba7ae88b0ce1eef592a3a439 | [
"BSD-3-Clause"
] | 25 | 2015-08-24T16:05:14.000Z | 2020-12-09T20:07:14.000Z | modules/insight/nbCurvesLevelSet.py | chrisidefix/devide | 99bfe156e710fa47ba7ae88b0ce1eef592a3a439 | [
"BSD-3-Clause"
] | 1 | 2016-02-16T21:18:10.000Z | 2016-02-16T21:18:10.000Z | modules/insight/nbCurvesLevelSet.py | chrisidefix/devide | 99bfe156e710fa47ba7ae88b0ce1eef592a3a439 | [
"BSD-3-Clause"
] | 5 | 2016-02-16T20:05:37.000Z | 2020-01-31T11:27:39.000Z | # Copyright (c) Charl P. Botha, TU Delft
# All rights reserved.
# See COPYRIGHT for details.
import itk
import module_kits.itk_kit as itk_kit
from module_base import ModuleBase
from module_mixins import ScriptedConfigModuleMixin
| 32 | 76 | 0.600284 |
58087fdf8d89ae3ca538e157ca99613c2f7a205f | 2,835 | py | Python | setup.py | ThomasChiroux/ejabberd_external_auth_jwt | fce68cca70ca578b3c1c002a4dea2aa65e3150c1 | [
"MIT"
] | null | null | null | setup.py | ThomasChiroux/ejabberd_external_auth_jwt | fce68cca70ca578b3c1c002a4dea2aa65e3150c1 | [
"MIT"
] | null | null | null | setup.py | ThomasChiroux/ejabberd_external_auth_jwt | fce68cca70ca578b3c1c002a4dea2aa65e3150c1 | [
"MIT"
] | null | null | null | #
# Copyright 2018-2019 Happineo
#
"""setuptools installer for zamita."""
import os
import uuid
from setuptools import find_packages
from setuptools import setup
from setuptools.command.build_py import build_py
# local imports
from build_scripts.version import VersionInfo
HERE = os.path.abspath(os.path.dirname(__file__))
README = open(os.path.join(HERE, "README.md"), encoding="UTF-8").read()
NEWS = open(os.path.join(HERE, "NEWS.md"), encoding="UTF-8").read()
PROJECT_NAME = "ejabberd_external_auth_jwt"
VERSION = None
try:
VERSION = VersionInfo().version
except Exception:
pass
if VERSION is None or not VERSION:
try:
VERSION_FILE = open(f"{PROJECT_NAME}/RELEASE-VERSION", "r")
try:
VERSION = VERSION_FILE.readlines()[0]
VERSION = VERSION.strip()
except Exception:
VERSION = "0.0.0"
finally:
VERSION_FILE.close()
except IOError:
VERSION = "0.0.0"
with open("requirements.txt") as f:
requirements = f.read().splitlines()
if requirements[0].startswith("-i"):
requirements = requirements[1:]
setup(
name=PROJECT_NAME,
version=VERSION,
description="ejabberd_external_auth_jwt",
long_description=README + "\n\n" + NEWS,
cmdclass={"build_py": CustomBuild},
classifiers=[
"Programming Language :: Python",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Operating System :: Linux",
],
keywords="",
author="Thomas Chiroux",
author_email="",
url="https://www.github.com/ThomasChiroux/ejabberd_external_auth_jwt",
license="LICENSE.txt",
packages=find_packages(exclude=["ez_setup"]),
package_data={"": ["*.rst", "*.md", "*.yaml", "*.cfg"]},
include_package_data=True,
zip_safe=False,
test_suite="pytest",
tests_require=[],
install_requires=requirements,
entry_points={
"console_scripts": [
"ejabberd_external_auth_jwt=ejabberd_external_auth_jwt.main:main_sync"
]
},
)
| 27.794118 | 82 | 0.618342 |
5808c926d701d604229b7c9061a8576e5eb62676 | 4,724 | py | Python | Analysis/Feb2021/common_plotting.py | TimChild/dat_analysis | 2902e5cb2f2823a1c7a26faf6b3b6dfeb7633c73 | [
"MIT"
] | 2 | 2021-03-07T03:17:13.000Z | 2021-03-07T03:17:16.000Z | Analysis/Feb2021/common_plotting.py | TimChild/dat_analysis | 2902e5cb2f2823a1c7a26faf6b3b6dfeb7633c73 | [
"MIT"
] | 1 | 2021-03-09T00:00:52.000Z | 2021-03-09T00:00:52.000Z | Analysis/Feb2021/common_plotting.py | TimChild/dat_analysis | 2902e5cb2f2823a1c7a26faf6b3b6dfeb7633c73 | [
"MIT"
] | null | null | null | """
Sep 21 -- A few of the plots used in analysis, very far from a complete list, and probably most are too specific to be
useful again.
Moved useful functions from here.
"""
from __future__ import annotations
from typing import List, Callable, Optional, Union, TYPE_CHECKING
import numpy as np
from dat_analysis.analysis_tools.entropy import dat_integrated_sub_lin
from dat_analysis.plotting.plotly.hover_info import HoverInfo
if TYPE_CHECKING:
pass
def common_dat_hover_infos(datnum=True,
heater_bias=False,
fit_entropy_name: Optional[str] = None,
fit_entropy=False,
int_info_name: Optional[str] = None,
output_name: Optional[str] = None,
integrated_entropy=False,
sub_lin: bool = False,
sub_lin_width: Optional[Union[float, Callable]] = None,
int_info=False,
amplitude=False,
theta=False,
gamma=False,
) -> List[HoverInfo]:
"""
Returns a list of HoverInfos for the specified parameters. To do more complex things, append specific
HoverInfos before/after this.
Examples:
hover_infos = common_dat_hover_infos(datnum=True, amplitude=True, theta=True)
hover_group = HoverInfoGroup(hover_infos)
Args:
datnum ():
heater_bias ():
fit_entropy_name (): Name of saved fit_entropy if wanting fit_entropy
fit_entropy ():
int_info_name (): Name of int_info if wanting int_info or integrated_entropy
output_name (): Name of SE output to integrate (defaults to int_info_name)
integrated_entropy ():
sub_lin (): Whether to subtract linear term from integrated_info first
sub_lin_width (): Width of transition to avoid in determining linear terms
int_info (): amp/dT/sf from int_info
Returns:
List[HoverInfo]:
"""
hover_infos = []
if datnum:
hover_infos.append(HoverInfo(name='Dat', func=lambda dat: dat.datnum, precision='.d', units=''))
if heater_bias:
hover_infos.append(HoverInfo(name='Bias', func=lambda dat: dat.AWG.max(0) / 10, precision='.1f', units='nA'))
if fit_entropy:
hover_infos.append(HoverInfo(name='Fit Entropy',
func=lambda dat: dat.Entropy.get_fit(name=fit_entropy_name,
check_exists=True).best_values.dS,
precision='.2f', units='kB'), )
if integrated_entropy:
if output_name is None:
output_name = int_info_name
if sub_lin:
if sub_lin_width is None:
raise ValueError(f'Must specify sub_lin_width if subtrating linear term from integrated entropy')
elif not isinstance(sub_lin_width, Callable):
sub_lin_width = lambda _: sub_lin_width # make a value into a function so so that can assume function
data = lambda dat: dat_integrated_sub_lin(dat, signal_width=sub_lin_width(dat), int_info_name=int_info_name,
output_name=output_name)
hover_infos.append(HoverInfo(name='Sub lin width', func=sub_lin_width, precision='.1f', units='mV'))
else:
data = lambda dat: dat.Entropy.get_integrated_entropy(
name=int_info_name,
data=dat.SquareEntropy.get_Outputs(
name=output_name).average_entropy_signal)
hover_infos.append(HoverInfo(name='Integrated Entropy',
func=lambda dat: np.nanmean(data(dat)[-10:]),
precision='.2f', units='kB'))
if int_info:
info = lambda dat: dat.Entropy.get_integration_info(name=int_info_name)
hover_infos.append(HoverInfo(name='SF amp',
func=lambda dat: info(dat).amp,
precision='.3f',
units='nA'))
hover_infos.append(HoverInfo(name='SF dT',
func=lambda dat: info(dat).dT,
precision='.3f',
units='mV'))
hover_infos.append(HoverInfo(name='SF',
func=lambda dat: info(dat).sf,
precision='.3f',
units=''))
return hover_infos
| 44.990476 | 120 | 0.556308 |
580a05b1f8e364040a8ccda54856a6eead097400 | 9,980 | py | Python | Code/sphero_learn.py | rvarga601/IER | 1cf05e641dea2fb3b4ad5329e3e556713cc199fe | [
"MIT"
] | null | null | null | Code/sphero_learn.py | rvarga601/IER | 1cf05e641dea2fb3b4ad5329e3e556713cc199fe | [
"MIT"
] | null | null | null | Code/sphero_learn.py | rvarga601/IER | 1cf05e641dea2fb3b4ad5329e3e556713cc199fe | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon May 10 23:54:16 2021
@author: rolandvarga
"""
import gym
import numpy as np
import matplotlib.pyplot as plt
import time
from scipy.signal import savgol_filter
import pickle
#%matplotlib qt
#%matplotlib inline
# Set to 1 to repeat SARSA learning (With Intel Core i7-8750H it takes
# around 70 minutes), 0 for loading previous result
REPEAT_LEARNING = 0
# Parameter to set which tests to do
DO_TEST1 = 1 # Simulate the system once and plot the trajectory
DO_TEST2 = 0 # Simulate the system 1000 times and plot success-rate
# Set to 1 to plot a projection of the state-value function V
PLOT_STATEVALUE = 1
#%% Load previous result
if REPEAT_LEARNING == 0:
filename='train_6x6x20x60000.pickle'
with open(filename, 'rb') as f:
cell_nums, dhat, durations, Q, reward_set, rhat, start_time, end_time, states_high, max_steps = pickle.load(f)
#%% SARSA learning
env = gym.make('SphericalRobot-v0')
#Function to choose the next action
#Convert continuous state-space to discrete
if REPEAT_LEARNING == 1:
# Learning parameters
epsilon = 0.3 # For start
total_episodes = 100
max_steps = 300
alpha = 0.1
gamma = 0.99
# The discretization of the states
states_high = np.array([6,6,2*np.pi/env.c]) # Set boundaries for the values
cell_nums = np.array([6,6,20]) # Set the number of discrete cells
#Initializing the Q-matrix
Q = np.ones(np.append(cell_nums,[3,3]))
#Function to update the Q-value
#Initializing the reward
# reward=0
reward_set = []
durations = []
start_time = time.time()
# Starting the SARSA learning
for episode in range(total_episodes):
t = 0
cumm_reward = 0
state1 = env.reset()
state1_d = discretize_state(state1, -states_high, states_high, cell_nums)
action1 = choose_action(tuple(state1_d), epsilon)
states = [state1]
while t < max_steps:
# Visualizing the training, TODO
# env.render()
# Getting the next state
state2, reward, done, info = env.step(action1)
# Note: The 3rd state is the difference between the wheel angles
state1_d = discretize_state(np.array([state1[0],state1[1], state1[2]-state1[3]]),
-states_high, states_high, cell_nums)
state2_d = discretize_state(np.array([state2[0],state2[1], state2[2]-state2[3]]),
-states_high, states_high, cell_nums)
# Choosing the next action
action2 = choose_action(tuple(state2_d), epsilon)
# Updating the Q-value
update(tuple(state1_d), tuple(state2_d), reward, tuple(action1), tuple(action2))
# Update variables for next iteration
state1 = state2
action1 = action2
# Save state to be able to plot trajectories
states.append(state2)
#Updating the respective vaLues
t += 1
cumm_reward += reward
#If at the end of learning process
if done:
break
reward_set.append(cumm_reward)
durations.append(t)
# plt.figure(0)
# x = np.array(states)[:,0]
# y = np.array(states)[:,1]
# plt.scatter(x, y)
# plt.xlim(-5, 5)
# plt.ylim(-5, 5)
# plt.show()
# Print time it took to run the learning
end_time = time.time()
print("--- %s seconds ---" % (end_time - start_time))
# Plot the filtered rewards during the learning
plt.figure(1)
#plt.plot(reward_set)
rhat = savgol_filter(reward_set, 501, 3) # window size 501, polynomial order 3
plt.plot(rhat)
#plt.ylim(-500, 500)
plt.xlabel(r"Episode [-]")
plt.ylabel(r"Reward [-]")
plt.legend()
plt.savefig('reward_learning.eps', format='eps', bbox_inches='tight')
plt.show()
# Plot the filtered episode lengths during the learning
plt.figure(2)
#plt.plot(durations)
dhat = savgol_filter(durations, 51, 3) # window size 51, polynomial order 3
plt.plot(dhat)
plt.show()
#%% Test 1: Generate one trajectory
if DO_TEST1 == 1:
t = 0
cumm_reward = 0
state1 = env.reset()
state1_d = discretize_state(state1, -states_high, states_high, cell_nums)
action1 = choose_action(tuple(state1_d), 0.0)
states = [state1]
actions = [action1]
while t < max_steps:
#Visualizing the training
# env.render()
#Getting the next state
state2, reward, done, info = env.step(action1)
state1_d = discretize_state(np.array([state1[0],state1[1], state1[2]-state1[3]]),
-states_high, states_high, cell_nums)
state2_d = discretize_state(np.array([state2[0],state2[1], state2[2]-state2[3]]),
-states_high, states_high, cell_nums)
#Choosing the next action
action2 = choose_action(tuple(state2_d), 0.0)
#Learning the Q-value
#update(tuple(state1_d), tuple(state2_d), reward, tuple(action1), tuple(action2))
state1 = state2
action1 = action2
states.append(state2)
actions.append(action2)
#Updating the respective vaLues
t += 1
cumm_reward += reward
#If at the end of learning process
if done:
break
print(reward)
# Plot trajectory on 2D plot
plt.figure(3)
x = np.array(states)[:,0]
y = np.array(states)[:,1]
plt.scatter(x, y)
plt.xlim(-5, 5)
plt.ylim(-5, 5)
plt.xticks(np.arange(-5, 6, 1))
plt.yticks(np.arange(-5, 6, 1))
plt.gca().set_aspect('equal', adjustable='box')
plt.xlabel(r"$x_1$ [m]")
plt.ylabel(r"$x_2$ [m]")
plt.legend()
plt.savefig('trajectory.eps', format='eps', bbox_inches='tight')
plt.show()
# Plot position states separately
plt.figure(4)
plt.plot(x, label="x1")
plt.plot(y, label="x2")
plt.xlabel(r"Time step [-]")
plt.ylabel(r"Coordinate [m]")
plt.legend()
plt.savefig('trajectory_plot.eps', format='eps', bbox_inches='tight')
plt.show()
#%% Test 2: Successful-unsuccessful tries
if DO_TEST2 == 1:
cumm_rewards = []
for k in range(1000):
t = 0
cumm_reward = 0
state1 = env.reset()
state1_d = discretize_state(state1, -states_high, states_high, cell_nums)
action1 = choose_action(tuple(state1_d), 0.0)
while t < max_steps:
#Visualizing the training
# env.render()
#Getting the next state
state2, reward, done, info = env.step(action1)
state1_d = discretize_state(np.array([state1[0],state1[1], state1[2]-state1[3]]),
-states_high, states_high, cell_nums)
state2_d = discretize_state(np.array([state2[0],state2[1], state2[2]-state2[3]]),
-states_high, states_high, cell_nums)
#Choosing the next action
action2 = choose_action(tuple(state2_d), 0.0)
#Learning the Q-value
#update(tuple(state1_d), tuple(state2_d), reward, tuple(action1), tuple(action2))
state1 = state2
action1 = action2
#states.append(state2)
#actions.append(action2)
#Updating the respective vaLues
t += 1
cumm_reward += reward
#If at the end of learning process
if done:
break
cumm_rewards.append(cumm_reward)
print("Average reward out of 1000 try: " + str(np.average(np.array(cumm_rewards))))
plt.figure(5)
plt.hist(cumm_rewards,np.array([-1000,0,1000]))
plt.show()
#%% Additional plot: State-value function
if PLOT_STATEVALUE == 1:
V = np.zeros([cell_nums[0],cell_nums[1]])
for k in range(V.shape[0]):
for l in range(V.shape[1]):
V[k,l]=np.amax(Q[k,l,:])
plt.figure(6)
plt.imshow(V, cmap='coolwarm', interpolation='nearest')
plt.colorbar()
plt.savefig('state_value.eps', format='eps', bbox_inches='tight')
plt.show()
| 30.993789 | 118 | 0.577154 |
580b61225012c491f65cb5e42655216093dbdb35 | 8,952 | py | Python | HW7/kernel_eigenface.py | joycenerd/Machine_Learning_2021 | ecb634a9f2f1112a393a9707ce69c3bc751c4542 | [
"MIT"
] | 1 | 2021-11-18T09:22:21.000Z | 2021-11-18T09:22:21.000Z | HW7/kernel_eigenface.py | joycenerd/Machine_Learning_2021 | ecb634a9f2f1112a393a9707ce69c3bc751c4542 | [
"MIT"
] | null | null | null | HW7/kernel_eigenface.py | joycenerd/Machine_Learning_2021 | ecb634a9f2f1112a393a9707ce69c3bc751c4542 | [
"MIT"
] | null | null | null | from scipy.spatial.distance import cdist
from numpy.linalg import eig, norm, pinv
import matplotlib.pyplot as plt
from PIL import Image
import numpy as np
import argparse
import ntpath
import glob
import os
parser = argparse.ArgumentParser()
parser.add_argument("--option", type=str, default="PCA",
help="Choose which task to do: [PCA, LDA]")
parser.add_argument("--img-size", type=int, default=50,
help="image resize shape")
parser.add_argument("--kernel-type", type=str, default="linear",
help="kernel type for PCA/LDA: [linear, polynomial, rbf]")
parser.add_argument("--gamma", type=float, default=1,
help="gamma value for polynomial or rbf kernel")
parser.add_argument("--coeff", type=int, default=2,
help="coeff value for polynomial kernel")
parser.add_argument("--degree", type=int, default=20,
help="degree value for polynomial kernel")
args = parser.parse_args()
DATA_PATH = "./Yale_Face_Database/"
SAVE_PATH = "./results/"
if __name__ == "__main__":
option = args.option
kernel_type = args.kernel_type
# read training and testing data
train_data, train_filepath, train_label = read_data(DATA_PATH+"Training/")
test_data, test_filepath, test_label = read_data(DATA_PATH+"Testing/")
data = np.vstack((train_data, test_data)) # (165,10000)
filepath = np.hstack((train_filepath, test_filepath)) # (165,)
label = np.hstack((train_label, test_label)) # (165,)
num_of_data = label.shape[0]
print(f"Num of data: {num_of_data}")
if option == "PCA":
rand_idx = np.random.randint(num_of_data, size=10)
samples = data[rand_idx, :] # (10,10000)
x_bar, W = pca(data)
draw_eigenface(W, "eigenface")
print("eigenface completed...")
reconstruct(samples, W, "pca", x_bar)
print("reconstruction completed...")
train_proj, test_proj = project(train_data, test_data, W, x_bar)
face_recognition(train_proj, train_label, test_proj, test_label)
print("pca face recognition completed...\n")
# python kernel_eigenface.py --option PCA --kernel-type polynomial --gamma 5 --coeff 1 --degree 2
# python kernel_eigenface.py --option PCA --kernel-type rbf --gamma 1e-7
kernel = get_kernel(data)
_, W = pca(data, kernel_type, kernel)
train_kernel = kernel[:train_label.shape[0], :]
test_kernel = kernel[train_label.shape[0]:, :]
train_proj, test_proj = project(train_kernel, test_kernel, W)
face_recognition(train_proj, train_label, test_proj, test_label)
print(
f"kernel pca with {kernel_type} kernel face recognition completed...")
if option == "LDA":
rand_idx = np.random.randint(num_of_data, size=10)
samples = data[rand_idx, :] # (10,10000)
W = lda(data, label)
draw_eigenface(W, "fisherface")
print("fisherface completed...")
reconstruct(samples, W, "lda")
print("reconstruction completed...")
train_proj, test_proj = project(train_data, test_data, W)
face_recognition(train_proj, train_label, test_proj, test_label)
print("lda face recognition completed...\n")
# python kernel_eigenface.py --option LDA --kernel-type polynomial --gamma 1 --coeff 2 --degree 20
# python kernel_eigenface.py --option PCA --kernel-type rbf --gamma 1e-4
kernel = get_kernel(data.T)
W = lda(kernel, kernel_type)
train_kernel = kernel[:train_label.shape[0], :]
test_kernel = kernel[train_label.shape[0]:, :]
train_proj, test_proj = project(train_kernel, test_kernel, W)
face_recognition(train_proj, train_label, test_proj, test_label)
print(
f"kernel lda with {kernel_type} kernel face recognition completed...")
| 31.632509 | 106 | 0.601095 |
580c8290606fc382a91ddcb30034d1076a50dc58 | 18,427 | py | Python | duqo/optimization/predict.py | canbooo/pyRDO | f7143438aa30cc79587c9f35fc9ff6aa262fc4d3 | [
"BSD-3-Clause"
] | 11 | 2021-08-17T05:55:01.000Z | 2022-02-03T13:16:42.000Z | duqo/optimization/predict.py | canbooo/pyRDO | f7143438aa30cc79587c9f35fc9ff6aa262fc4d3 | [
"BSD-3-Clause"
] | null | null | null | duqo/optimization/predict.py | canbooo/pyRDO | f7143438aa30cc79587c9f35fc9ff6aa262fc4d3 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Fri Aug 9 15:33:47 2019
@author: Bogoclu
"""
import typing
import multiprocessing as mp
import warnings
import numpy as np
from scipy import stats
from .space import FullSpace
from duqo.proba import DS, MC, SUSE, ISPUD, FORM
from duqo.doe.lhs import make_doe
def _check_obj_wgt(obj_weights, num_obj):
""" Check obj_wgt argument passed to CondMom """
if obj_weights is None:
return None
try:
_ = obj_weights[0]
except (TypeError, IndexError):
obj_weights = np.ones(num_obj) * obj_weights
if len(obj_weights) != num_obj:
msg = f"Mismatch between the number of entries ({len(obj_weights)} in "
msg += f"obj_wgt and the number of stochastic objectives ({num_obj})."
raise ValueError(msg)
return np.array(obj_weights).ravel()
def _check_std_inds(use_std, num_obj):
""" Check use_std argument passed to CondMom and
convert it to a slice definition
"""
if isinstance(use_std, bool):
inds = [use_std] * num_obj
if len(inds) != num_obj:
msg = "Mismatch between the number of entries in "
msg += "use_std and the number of stochastic objectives."
raise ValueError(msg)
return np.array(use_std, dtype=bool)
def _find_integrator_cls(integrator):
"""
Find the Integrator class as defined by the string integrator
"""
integrator = integrator.upper()
if integrator == "DS":
IntCls = DS
elif integrator == "MC":
IntCls = MC
elif integrator == "ISPUD":
IntCls = ISPUD
elif integrator == "FORM":
IntCls = FORM
elif integrator == "SUSE":
IntCls = SUSE
else:
msg = f"Requested integrator {integrator} is not found."
raise ValueError(msg)
return IntCls
def _make_chain(methods: list):
"""Makes the chain given a list of method names"""
try:
first = methods[0]
except TypeError:
raise TypeError(f"methods must be a list of strings or classes, not {type(methods)}")
try:
_ = first.upper()
except AttributeError:
return methods
return [_find_integrator_cls(name.upper()) for name in methods]
def _n_para_chk(num_parallel: int = None):
""" Check the num_parallel argument as passed to CondProb """
n_procs = max(1, mp.cpu_count()) # could cpu_count ever be < 1?
if num_parallel is None or num_parallel > n_procs:
print(f"Number of parallel processes was set to {n_procs}")
return n_procs
return num_parallel
def _default_init(targ_prob: float, acc_max: float, num_inp: int,
num_para: int):
"""Decide the default integrator chain methods and arguments depending
on the problem
Parameters
----------
targ_prob : float
target failure probability
acc_max : float
target tolerance for the estimation
num_inp : int
number of stochastic inputs of the constraints
num_para : int
number of parallel processes to use
Returns
-------
integrators : list
Integrator classes, that are to be initiated
int_args : dict
Keyword arguments to pass to integrators
"""
if targ_prob * acc_max >= 1e-5:
if targ_prob * acc_max >= 1e-4:
integrators = ["MC"]
else:
integrators = ["SUSE", "MC"]
int_args = {"num_starts": 1, "batch_size": 1e5}
elif num_inp < 15:
integrators = ["SUSE", "DS"]
int_args = {"num_starts": 1}
else:
integrators = ["SUSE"]
int_args = {"num_starts": num_para}
print("Using", integrators, "as default chain.")
return integrators, int_args
def _is_worker(workers, name):
""" check if name is in workers list of classes"""
for worker in workers:
wname = read_integrator_name(worker)
if name.upper() in wname.upper():
return True
return False
def read_integrator_name(worker):
""" read the name of the integrator instance worker """
name = str(worker).split(".")[-1]
return "".join([c for c in name if c.isalnum()])
def gen_doe(self, x_opt):
"""Get DoE for the Moment estimation for x_opt"""
if x_opt.ndim == 1:
x_opt = x_opt.reshape((1, -1))
if self.base_doe is None:
return self.full_space.inp_space.sto_obj_doe(x_opt, self._doe_size)
mean, std = self.full_space.inp_space.opt_moms(x_opt)
names = self.full_space.inp_space.mulvar.names
names = [names[i] for i in self.full_space.inp_space.mv_inds("sto_obj")]
# Translating is not sufficient for lognormal and truncated normal
inds = [i for i, x in enumerate(names) if "log" in x or "trunc" in x]
if not inds:
return self.base_doe * std + mean
# Handle Lognormal
binds = np.ones(self.base_doe.shape[1], dtype=bool)
binds[inds] = False
base_doe = self.base_doe.copy()
base_doe[:, binds] = base_doe[:, binds] * std[binds] + mean[binds]
mean = mean[inds]
std = std[inds]
cur_mv = self.full_space.inp_space.opt_mulvar(x_opt, domain="sto_obj")
for ind in inds:
base_doe[:, ind] = cur_mv.dists[ind].marg.ppf(base_doe[:, ind])
return base_doe
def est_mom(self, x_opt):
""" Estimate conditional moments for a single optimization point x_opt
Conditional moments are E[Y | x_opt] and Var[Y | x_opt]
Parameters
----------
x_opt : numpy.ndarray
the coordinates of the optimization variables to compute
the moments
Returns
-------
mus : numpy.ndarray
Estimated means, or if obj_wgt was not None,
the combined mu + obj_wgt * sigma
sigmas : numpy.ndarray
Estimated variances or std. dev. depending on the settings.
only returned if obj_wgt is None.
"""
if x_opt.ndim == 1:
x_opt = x_opt.reshape((1, -1))
doe = self.gen_doe(x_opt)
res = self.full_space.sto_obj(doe, x_opt)
mus = np.mean(res, axis=0)
sigmas = np.zeros(mus.shape)
std_inds = self.use_std
sigmas[std_inds] = np.std(res[:, std_inds], axis=0, ddof=1)
var_inds = np.logical_not(std_inds)
sigmas[var_inds] = np.var(res[:, var_inds], axis=0, ddof=1)
if self.obj_wgt is None:
return mus, sigmas
return mus + self.obj_wgt * sigmas
class CondProba:
"""A chain of integtrators for the calculation of the probability
This starts with a fast integrator to get an initial guess. If the
guess is too far away from target_pf, this stops further calculations
and returns the failure probability. Used for accelerating the
optimization process. Chains with a single element are also possible.
Parameters
----------
num_inputs : int
Number of stochastic inputs used for the constraints
target_fail_prob : float
Target failure probability. If unsure, just set it sufficiently low
i.e. >=1e-6. Note that Numerical unstabilities start at 1e-9 due to
scipy stats returning nans and infs
num_parallel : int
Number of parallel computations, if the used integrator supports it.
If passed, the entry in call_args will override this.
methods : None or list of str
Names of the methods to use for the estimation. If None, a default
chain will be selected depending the problem definition, which is
recommended for new users.
Currently the following names are supported:
MC - Crude Monte Carlo
DS - Directional simulation
FORM - First order reliability method
ISPUD - Importance sampling using design point (MPP)
call_args : None or list
keyword argument dict to pass to the integrator calc_prob_fail
as call arguments. Any argument in this will override the
initialization arguments with the same name i.e. target_fp and
num_parallel
target_tol : float
Target tolerance for the failure probability. Also used
for stopping the chain, if the computed failure probability
is either smaller than target_fp * target_tol or larger than
target_fp / target_tol.
"""
def _prob_tol(self):
prob_tol = self._tar_fp * self._tar_tol
if _is_worker(self.workers, "MC") and prob_tol < 1e-6:
msg = "Crude Monte Carlo can be very inefficient for "
msg += "such low probabilities of failure."
warnings.warn(msg)
self.call_args["prob_tol"] = prob_tol
def calc_fail_prob(self, input_mv, constraints, const_args, verbose: int = 0):
""" Calculate failure probability using the worker chain
Parameters
----------
input_mv : MultiVar instance
Definition of the multivariate input
constraints : list
constraint functions to initialize the integrator
const_args : None or list
arguments to pass to the constraints
Returns:
--------
pof : float
probability of failure
feasible : bool
pof <= target_pf
"""
if not self.workers:
raise ValueError("No estimators defined")
for worker in self.workers:
estimator = worker(input_mv, constraints, const_args)
try:
pof = estimator.calc_fail_prob(**self.call_args)[0]
except ValueError:
if worker == self.workers[-1]:
print("Fatal error while calculating probability of failure with", worker)
print(input_mv)
print("Setting it to 100%.")
pof = 1.
continue
if verbose > 1:
name = read_integrator_name(worker)
print(f"{name} estimated the failure probability as {pof:.2e}.")
if pof > self._tar_fp:
prob_ratio = self._tar_fp / pof
else:
prob_ratio = pof / self._tar_fp
if prob_ratio <= self._tar_tol:
break
if verbose > 0:
try:
name = read_integrator_name(worker)
print(f"{name} estimated the failure probability as {pof:.2e}.")
except NameError:
pass
return pof, pof <= self._tar_fp
| 35.920078 | 115 | 0.61475 |
580d37ef443f31d16e61142142999c038e7fd18f | 5,352 | py | Python | mymodule/twitter_json_parsing.py | sobkovych/TwitterFriendsMap | 7fb1a844264334fba443feba3830cca2c86b55c9 | [
"MIT"
] | null | null | null | mymodule/twitter_json_parsing.py | sobkovych/TwitterFriendsMap | 7fb1a844264334fba443feba3830cca2c86b55c9 | [
"MIT"
] | null | null | null | mymodule/twitter_json_parsing.py | sobkovych/TwitterFriendsMap | 7fb1a844264334fba443feba3830cca2c86b55c9 | [
"MIT"
] | 1 | 2020-02-26T09:20:17.000Z | 2020-02-26T09:20:17.000Z | """Parse json files."""
import json
import re
def search_for_key(final_key: str, tree: dict, space: list = []):
"""Search all data for a key.
:param final_key: the key
:param tree: the data
:param space: found values
:return: all found values
"""
if isinstance(tree, dict) and final_key in tree.keys():
space.append(tree[final_key])
tree.pop(final_key)
if isinstance(tree, dict):
for key in tree:
search_for_key(final_key, tree[key])
elif isinstance(tree, list):
for item in tree:
search_for_key(final_key, item)
else:
return None
return space
def check_response(prompt: str, to_return: bool = False,
field: (tuple, None) = ({"yes", "y", "true", "t", "1"},
{"no", "n", "false", "f", "0"}),
expression: str = None,
max_len: int = None,
min_len: int = None) -> (bool, str):
"""Check responce by params.
:param prompt: input message
:param to_return: whether to return responce
:param field: values to avoid/look for
:param expression: regular expr check
:param max_len: max len check
:param min_len: min len check
:return: bool or value
"""
if field:
affirm = field[0] if field[0] else None
negat = field[1] if field[1] else None
else:
affirm = negat = None
while True:
resp = input(prompt).lower()
ret_value = resp if to_return else True
if affirm and resp in affirm:
return ret_value
if negat and resp in negat:
return False
if expression:
print(re.compile(expression))
if expression and re.fullmatch(expression, resp):
return ret_value
if min_len and len(resp) >= min_len:
return ret_value
if max_len and len(resp) <= max_len:
return ret_value
else:
print("The response is incorrect, try again!")
def get_step_by_step(obj):
"""Parse obj step by step.
:param obj: list, dict or other
:return: found value or None
"""
space = [(obj, "JSON")]
unsure = check_response("Ask to come back at every step?\n")
while True:
if isinstance(obj, dict):
print("This obj is a dict. These are the available keys:")
fill_len = len(max(obj.keys(), key=len)) + 10
for i, key in enumerate(obj):
if i % 2 == 0:
row = "{}.){}".format(i+1, key)
row = row.ljust(fill_len, " ")
else:
row = "{}.){}\n".format(i+1, key)
print(row, end='')
key = check_response("\nChose your key by name: ",
True, field=(obj, None))
obj = obj[key]
elif isinstance(obj, list):
print("This obj is a list.")
last_key = len(obj)-1
key = check_response(
"Choose an index from 0 to {}: ".format(last_key),
to_return=True,
field=({str(i) for i in range(last_key+1)}, None)
)
obj = obj[int(key)]
else:
print("Your final obj is: {}.".format(obj))
if check_response("Return: {} (y/n)?\n".format(obj)):
return obj
elif check_response("Come back to any step?\n"):
for i, step in enumerate(space):
print("Step {}: {}".format(i+1, step[1]))
l_space = len(space)
step = check_response("Which step to come back to "
"within range "
"[1, {}]?\n".format(l_space),
to_return=True,
field=(
{str(i+1) for i in range(l_space)},
None
))
step = int(step)
obj = space[step-1][0]
del space[step:]
continue
else:
print("Returning None...")
return None
space.append((obj, key))
if unsure:
while (len(space) > 1 and
check_response("Come back to previous step(y/n)?\n")):
space.pop()
obj = space[-1][0]
print("Now at step {}: {}".format(len(space), space[-1][1]))
def main(get: str, store: str = None, mode: str = "step"):
"""Find the leaf(user input) in the tree(method - user input).
(from 'kved.json' file)
:param store: where to store the result tree.
"""
with open(get, encoding="utf-8") as f:
tree = json.load(f)
if check_response("Analyse step by step(y/n)?\n"):
print(get_step_by_step(tree))
if check_response("Search for key(y/n)?\n"):
user_key = input("Enter your key: ")
print(search_for_key(user_key, tree=tree))
if store:
with open(store, mode="w+", encoding="utf-8") as outfile:
json.dump(tree, outfile, indent=4, ensure_ascii=False)
if __name__ == "__main__":
main("form.json")
| 32.047904 | 77 | 0.496076 |
580d445ca9f82fbb66ddc5c165290139ca728a53 | 2,795 | py | Python | meet/migrations/0001_initial.py | bjones-tech/speedy-meety | a7d557788a544b69fd6ad454d921d9cf02cfa636 | [
"MIT"
] | null | null | null | meet/migrations/0001_initial.py | bjones-tech/speedy-meety | a7d557788a544b69fd6ad454d921d9cf02cfa636 | [
"MIT"
] | null | null | null | meet/migrations/0001_initial.py | bjones-tech/speedy-meety | a7d557788a544b69fd6ad454d921d9cf02cfa636 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-03-17 02:58
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import meet.models
| 43.671875 | 131 | 0.586047 |
580de9ae168cc442b87908dac6e8235e1d9361f3 | 284 | py | Python | setup.py | jrspruitt/pyfa_gpio | d0f189724b34a2a888dd01b33d237b79ace5becf | [
"MIT"
] | null | null | null | setup.py | jrspruitt/pyfa_gpio | d0f189724b34a2a888dd01b33d237b79ace5becf | [
"MIT"
] | null | null | null | setup.py | jrspruitt/pyfa_gpio | d0f189724b34a2a888dd01b33d237b79ace5becf | [
"MIT"
] | null | null | null | #!/usr/bin/env python
from setuptools import setup,find_packages
version = '0.1'
setup(
name='pyfa_gpio',
version=version,
description='',
author='Jason Pruitt',
url='https://github.com/jrspruitt/pyfa_gpio',
license='MIT',
packages = find_packages(),
)
| 17.75 | 49 | 0.661972 |
580ec4cbc90960d845dfc3bbcd5951593510c1c2 | 4,093 | py | Python | dps/env/basic/path_discovery.py | alcinos/dps | 5467db1216e9f9089376d2c71f524ced2382e4f6 | [
"Apache-2.0"
] | null | null | null | dps/env/basic/path_discovery.py | alcinos/dps | 5467db1216e9f9089376d2c71f524ced2382e4f6 | [
"Apache-2.0"
] | null | null | null | dps/env/basic/path_discovery.py | alcinos/dps | 5467db1216e9f9089376d2c71f524ced2382e4f6 | [
"Apache-2.0"
] | null | null | null | import tensorflow as tf
import numpy as np
from dps.register import RegisterBank
from dps.env import TensorFlowEnv
from dps.utils import Param, Config
config = Config(
build_env=build_env,
curriculum=[
dict(shape=(2, 2), threshold=6),
dict(shape=(3, 3), threshold=4),
dict(shape=(4, 4), threshold=2)
],
env_name='path_discovery',
shape=(3, 3),
T=10,
stopping_criteria="reward_per_ep,max",
)
| 36.221239 | 109 | 0.590765 |
5810e3bb40adfc4d345436082de3af836eeff704 | 14,812 | py | Python | utils/github/query.py | malkfilipp/ClickHouse | 79a206b092cd465731020f331bc41f6951dbe751 | [
"Apache-2.0"
] | 1 | 2019-09-16T11:07:32.000Z | 2019-09-16T11:07:32.000Z | utils/github/query.py | malkfilipp/ClickHouse | 79a206b092cd465731020f331bc41f6951dbe751 | [
"Apache-2.0"
] | null | null | null | utils/github/query.py | malkfilipp/ClickHouse | 79a206b092cd465731020f331bc41f6951dbe751 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
import requests
| 41.96034 | 128 | 0.369498 |
5811d6d7e749badbaa3acffda48486b057d48a0e | 4,404 | py | Python | mortgage/mortgage.py | roelbertens/mortgages | b5fe415024933c772e6c7c57f041bf065ac86176 | [
"MIT"
] | 1 | 2019-08-19T07:09:58.000Z | 2019-08-19T07:09:58.000Z | mortgage/mortgage.py | roelbertens/mortgages | b5fe415024933c772e6c7c57f041bf065ac86176 | [
"MIT"
] | null | null | null | mortgage/mortgage.py | roelbertens/mortgages | b5fe415024933c772e6c7c57f041bf065ac86176 | [
"MIT"
] | null | null | null | from typing import List
import matplotlib.pyplot as plt
def _align_mortgages(periods_a: List[int],
periods_b: List[int],
fees_a: List[int],
fees_b: List[int]) -> (List[int], List[int]):
""" Align periods and fees of two mortgages and compute the exact fee for each period.
:param periods_a: periods for Mortgage a
:param periods_b: periods for Mortgage b
:param fees_a: monthly fees for Mortgage a
:param fees_b: monthly fees for Mortgage b
:return: tuple of aligned periods and fees for the combined Mortgages a and b
"""
periods_a, periods_b, fees_a, fees_b = \
periods_a.copy(), periods_b.copy(), fees_a.copy(), fees_b.copy()
if not periods_a:
if not periods_b:
return [], []
else:
return periods_b, fees_b
elif not periods_b:
return periods_a, fees_a
if periods_b[0] < periods_a[0]:
periods_a, periods_b = periods_b, periods_a
fees_a, fees_b = fees_b, fees_a
first_period_fee = ([periods_a[0]], [fees_a[0] + fees_b[0]])
if periods_a[0] == periods_b[0]:
recursive_result = _align_mortgages(periods_a[1:], periods_b[1:], fees_a[1:], fees_b[1:])
else:
periods_b[0] -= periods_a[0]
recursive_result = _align_mortgages(periods_a[1:], periods_b, fees_a[1:], fees_b)
return tuple(a + b for a, b in zip(first_period_fee, recursive_result))
| 38.631579 | 98 | 0.584923 |
58127a028ca7d4bb09bc84dec02f9d31b1e190c3 | 32,827 | py | Python | training/wml_train.py | corvy/MAX-Object-Detector | 2a21183e6bb9d0c35bac297ee3cf1fc67f4c048f | [
"Apache-2.0"
] | 1 | 2019-10-25T11:36:46.000Z | 2019-10-25T11:36:46.000Z | training/wml_train.py | karankrish/MAX-Image-Segmenter | 2d5d080f4a3d7db1aa4cf320ab35b3e157a6f485 | [
"Apache-2.0"
] | 1 | 2019-07-08T17:58:45.000Z | 2019-09-05T18:07:45.000Z | training/wml_train.py | karankrish/MAX-Image-Segmenter | 2d5d080f4a3d7db1aa4cf320ab35b3e157a6f485 | [
"Apache-2.0"
] | 1 | 2019-10-30T20:42:46.000Z | 2019-10-30T20:42:46.000Z | #!/usr/bin/env python
#
# Copyright 2018-2019 IBM Corp. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import glob
import os
import re
import shutil
import sys
import tarfile
import time
from enum import Enum
from zipfile import ZipFile
from utils.debug import debug
from utils.os_util import copy_dir
from utils.config import YAMLReader, ConfigParseError, ConfigurationError
from utils.wml import WMLWrapper, WMLWrapperError
from utils.cos import COSWrapper, COSWrapperError, BucketNotFoundError
TRAINING_LOG_NAME = 'training-log.txt' # fixed; do not change
TRAINING_OUTPUT_ARCHIVE_NAME = 'model_training_output.tar.gz' # do not change
# --------------------------------------------------------
# Process command line parameters
# --------------------------------------------------------
def process_cmd_parameters():
"""
Process command line parameters. This function terminates the
application if an invocation error was detected.
:returns: dict, containing two properties: 'config_file' and
'command'
:rtype: dict
"""
if len(sys.argv) <= 1:
# no arguments were provided; display usage information
display_usage()
sys.exit(ExitCode.SUCCESS.value)
if os.path.isfile(sys.argv[1]) is False:
print('Invocation error. "{}" is not a file.'.format(sys.argv[1]))
display_usage()
sys.exit(ExitCode.INCORRECT_INVOCATION.value)
if len(sys.argv) < 3:
print('Invocation error. You must specify a command.')
display_usage()
sys.exit(ExitCode.INCORRECT_INVOCATION.value)
cmd_parameters = {
'config_file': sys.argv[1],
'command': sys.argv[2].strip().lower(),
'training_id': None
}
if cmd_parameters['command'] not in ['clean',
'prepare',
'train',
'package']:
print('Invocation error. "{}" is not a valid command.'
.format(sys.argv[2]))
display_usage()
sys.exit(ExitCode.INCORRECT_INVOCATION.value)
if cmd_parameters['command'] == 'package':
# package accepts as optional parameter an existing training id
if len(sys.argv) == 4:
cmd_parameters['training_id'] = sys.argv[3]
return cmd_parameters
cmd_parameters = process_cmd_parameters()
# --------------------------------------------------------
# Verify that the required environment variables are set
# --------------------------------------------------------
verify_env_settings()
# --------------------------------------------------------
# Process configuration file
# --------------------------------------------------------
print_banner('Validating configuration file "{}" ...'
.format(cmd_parameters['config_file']))
config = None
try:
r = YAMLReader(cmd_parameters['config_file'])
config = r.read()
except ConfigurationError as ce:
for missing_setting in ce.get_missing_settings():
print('Error. Configuration file "{}" does not'
' define setting "{}".'
.format(cmd_parameters['config_file'],
missing_setting.get('yaml_path')))
sys.exit(ExitCode.CONFIGURATION_ERROR.value)
except ConfigParseError as cpe:
print('Error. Configuration file "{}" is invalid: {}'
.format(cmd_parameters['config_file'],
str(cpe)))
sys.exit(ExitCode.CONFIGURATION_ERROR.value)
except FileNotFoundError:
print('Error. Configuration file "{}" was not found.'
.format(cmd_parameters['config_file']))
sys.exit(ExitCode.INVOCATION_ERROR.value)
debug('Using the following configuration settings: ', config)
cw = None # COS wrapper handle
w = None # WML wrapper handle
training_guid = cmd_parameters.get('training_id', None)
if cmd_parameters['command'] == 'package' and training_guid is not None:
# monitor status of an existing training run; skip preparation steps
try:
# instantiate Cloud Object Storage wrapper
cw = COSWrapper(os.environ['AWS_ACCESS_KEY_ID'],
os.environ['AWS_SECRET_ACCESS_KEY'])
except COSWrapperError as cwe:
print('Error. Cloud Object Storage preparation failed: {}'.format(cwe))
sys.exit(ExitCode.PRE_PROCESSING_FAILED.value)
print_banner('Verifying that "{}" is a valid training id ...'
.format(training_guid))
try:
# instantiate Watson Machine Learning wrapper
w = WMLWrapper(os.environ['ML_ENV'],
os.environ['ML_APIKEY'],
os.environ['ML_INSTANCE'])
# verify that the provided training id is valid
if not w.is_known_training_id(training_guid):
print('Error. "{}" is an unknown training id.'
.format(training_guid))
sys.exit(ExitCode.INCORRECT_INVOCATION.value)
except WMLWrapperError as wmle:
print(wmle)
sys.exit(ExitCode.PRE_PROCESSING_FAILED.value)
except Exception as ex:
print(' Exception type: {}'.format(type(ex)))
print(' Exception: {}'.format(ex))
sys.exit(ExitCode.PRE_PROCESSING_FAILED.value)
else:
# --------------------------------------------------------
# Remove existing model training artifacts
# --------------------------------------------------------
print_banner('Removing temporary work files ...')
for file in [config['model_code_archive']]:
if os.path.isfile(file):
os.remove(file)
# terminate if the "clean" command was specified
# when the utility was invoked
if cmd_parameters['command'] == 'clean':
print('Skipping model training.')
sys.exit(ExitCode.SUCCESS.value)
# --------------------------------------------------------
# Verify the Cloud Object Storage configuration:
# - the results bucket must exist
# --------------------------------------------------------
print_banner('Verifying Cloud Object Storage setup ...')
try:
# instantiate the Cloud Object Storage wrapper
cw = COSWrapper(os.environ['AWS_ACCESS_KEY_ID'],
os.environ['AWS_SECRET_ACCESS_KEY'])
print(' Verifying that training results bucket "{}" exists. '
' It will be created if necessary ...'
.format(config['results_bucket']))
# make sure the training results bucket exists;
# it can be empty, but doesn't have to be
cw.create_bucket(config['results_bucket'],
exist_ok=True)
print(' Verifying that training data bucket "{}" exists. '
' It will be created if necessary ...'
.format(config['training_bucket']))
# make sure the training data bucket exists;
cw.create_bucket(config['training_bucket'],
exist_ok=True)
# if there are any initial_model artifacts in ther training bucket
# remove them
im_object_list = cw.get_object_list(config['training_bucket'],
key_name_prefix='initial_model')
if len(im_object_list) > 0:
print(' Removing model artifacts from training bucket "{}" ... '
.format(config['training_bucket']))
cw.delete_objects(config['training_bucket'], im_object_list)
# is there training data in the bucket?
no_training_data = cw.is_bucket_empty(config['training_bucket'])
if config.get('local_data_dir') and \
os.path.isdir(config['local_data_dir']):
config['local_data_dir'] = \
os.path.abspath(config['local_data_dir'])
# add initial_model artifacts to bucket
if config.get('local_data_dir') and \
os.path.isdir(config['local_data_dir']):
initial_model_path = os.path.join(config['local_data_dir'],
'initial_model')
print(' Looking for model artifacts in "{}" ... '
.format(initial_model_path))
for file in glob.iglob(initial_model_path + '/**/*',
recursive=True):
if os.path.isfile(file):
print(' Uploading model artifact "{}" to '
'training data bucket "{}" ...'
.format(file[len(initial_model_path):].lstrip('/'),
config['training_bucket']))
cw.upload_file(file,
config['training_bucket'],
'initial_model',
file[len(initial_model_path):]
.lstrip('/'))
print(' Looking for training data in bucket "{}" ... '
.format(config['training_bucket']))
# if there's no training data in the training data bucket
# upload whatever is found locally
if no_training_data:
print(' No training data was found.')
if config.get('local_data_dir', None) is None:
# error. there is no local training data either;
# abort processing
print('Error. No local training data was found. '
'Please check your configuration settings.')
sys.exit(ExitCode.PRE_PROCESSING_FAILED.value)
# verify that local_data_dir is a directory
if not os.path.isdir(config['local_data_dir']):
print('Error. "{}" is not a directory or cannot be accessed.'
.format(config['local_data_dir']))
sys.exit(ExitCode.PRE_PROCESSING_FAILED.value)
# upload training data from the local data directory
print(' Looking for training data in "{}" ... '
.format(config['local_data_dir']))
file_count = 0
ignore_list = []
ignore_list.append(os.path.join(config['local_data_dir'],
'README.md'))
for file in glob.iglob(config['local_data_dir'] + '/**/*',
recursive=True):
if file in ignore_list or file.startswith(initial_model_path):
continue
if os.path.isfile(file):
print(' Uploading "{}" to training data bucket "{}" ...'
.format(file[len(config['local_data_dir']):]
.lstrip('/'),
config['training_bucket']))
cw.upload_file(file,
config['training_bucket'],
config.get('training_data_key_prefix'),
file[len(config['local_data_dir']):]
.lstrip('/'))
file_count += 1
if file_count == 0:
print('Error. No local training data was found in "{}".'
.format(config['local_data_dir']))
sys.exit(ExitCode.PRE_PROCESSING_FAILED.value)
else:
print('Uploaded {} data files to training data bucket "{}".'
.format(file_count, config['training_bucket']))
else:
print(' Found data in training data bucket "{}". Skipping upload.'
.format(config['training_bucket']))
except ValueError as ve:
print('Error. {}'.format(ve))
sys.exit(ExitCode.PRE_PROCESSING_FAILED.value)
except BucketNotFoundError as bnfe:
print('Error. {}'.format(bnfe))
sys.exit(ExitCode.PRE_PROCESSING_FAILED.value)
except FileNotFoundError as fnfe:
print('Error. {}'.format(fnfe))
sys.exit(ExitCode.PRE_PROCESSING_FAILED.value)
except COSWrapperError as cwe:
print('Error. Cloud Object Storage preparation failed: {}'.format(cwe))
sys.exit(ExitCode.PRE_PROCESSING_FAILED.value)
# --------------------------------------------------------
# Create model building ZIP
# --------------------------------------------------------
print_banner('Locating model building files ...')
#
# 1. Assure that the model building directory
# config['model_building_code_dir'] exists
# 2. If there are no files in config['model_building_code_dir']:
# - determine whether model-building code is stored in a COS bucket
# - download model-building code to config['model_building_code_dir']
# 3. ZIP files in config['model_building_code_dir']
try:
# task 1: make sure the specified model building code directory exists
os.makedirs(config['model_building_code_dir'], exist_ok=True)
except Exception as ex:
debug(' Exception type: {}'.format(type(ex)))
print('Error. Model building code preparation failed: {}'.format(ex))
sys.exit(ExitCode.PRE_PROCESSING_FAILED.value)
if len(os.listdir(config['model_building_code_dir'])) == 0:
# Task 2: try to download model building code from Cloud Object Storage
# bucket
#
print('No model building code was found in "{}".'
.format(config['model_building_code_dir']))
try:
if config.get('model_bucket') is None or \
cw.is_bucket_empty(config['model_bucket'],
config.get('model_key_prefix')):
print('Error. Model building code preparation failed: '
'No source code was found locally in "{}" or '
' in Cloud Object Storage.'
.format(config['model_building_code_dir']))
sys.exit(ExitCode.PRE_PROCESSING_FAILED.value)
print('Found model building code in bucket "{}".'
.format(config['model_bucket']))
for object_key in cw.get_object_list(config['model_bucket'],
config.get(
'model_key_prefix')):
cw.download_file(config['model_bucket'],
object_key,
config['model_building_code_dir'])
except BucketNotFoundError as bnfe:
print('Error. {}'.format(bnfe))
sys.exit(ExitCode.PRE_PROCESSING_FAILED.value)
except COSWrapperError as cwe:
print('Error. {}'.format(cwe))
sys.exit(ExitCode.PRE_PROCESSING_FAILED.value)
except Exception as ex:
debug(' Exception type: {}'.format(type(ex)))
print('Error. {}'.format(ex))
sys.exit(ExitCode.PRE_PROCESSING_FAILED.value)
print_banner('Packaging model building files in "{}" ...'
.format(config['model_building_code_dir']))
try:
shutil.make_archive(re.sub('.zip$', '', config['model_code_archive']),
'zip',
config['model_building_code_dir'])
except Exception as ex:
print('Error. Packaging failed: {}'.format(str(ex)))
sys.exit(ExitCode.PRE_PROCESSING_FAILED.value)
if os.path.isfile(config['model_code_archive']):
# display archive content
print('Model building package "{}" contains the following entries:'
.format(config['model_code_archive']))
with ZipFile(config['model_code_archive'], 'r') as archive:
for entry in sorted(archive.namelist()):
print(' {}'.format(entry))
# check archive size; WML limits size to 4MB
archive_size = os.path.getsize(config['model_code_archive'])
archive_size_limit = 1024 * 1024 * 4
if archive_size > archive_size_limit:
print('Error. Your model building code archive "{}" is too large '
'({:.2f} MB). WLM rejects archives larger than {} MB. '
'Please remove unnecessary files from the "{}" directory '
'and try again.'
.format(config['model_code_archive'],
archive_size / (1024 * 1024),
archive_size_limit / (1024 * 1024),
config['model_building_code_dir']))
sys.exit(ExitCode.PRE_PROCESSING_FAILED.value)
# Status:
# - The model training job can now be started.
if cmd_parameters['command'] == 'prepare':
print('Skipping model training and post processing steps.')
sys.exit(ExitCode.SUCCESS.value)
# ---------------------------------------------------------
# Start model training
# --------------------------------------------------------
print_banner('Starting model training ...')
try:
# instantiate the WML client
w = WMLWrapper(os.environ['ML_ENV'],
os.environ['ML_APIKEY'],
os.environ['ML_INSTANCE'])
except WMLWrapperError as wmle:
print(wmle)
sys.exit(ExitCode.PRE_PROCESSING_FAILED.value)
# define training metadata
model_definition_metadata = {
w.get_client().repository.DefinitionMetaNames.NAME:
config['training_run_name'],
w.get_client().repository.DefinitionMetaNames.DESCRIPTION:
config['training_run_description'],
w.get_client().repository.DefinitionMetaNames.AUTHOR_NAME:
config['author_name'],
w.get_client().repository.DefinitionMetaNames.FRAMEWORK_NAME:
config['framework_name'],
w.get_client().repository.DefinitionMetaNames.FRAMEWORK_VERSION:
config['framework_version'],
w.get_client().repository.DefinitionMetaNames.RUNTIME_NAME:
config['runtime_name'],
w.get_client().repository.DefinitionMetaNames.RUNTIME_VERSION:
config['runtime_version'],
w.get_client().repository.DefinitionMetaNames.EXECUTION_COMMAND:
config['training_run_execution_command']
}
training_configuration_metadata = {
w.get_client().training.ConfigurationMetaNames.NAME:
config['training_run_name'],
w.get_client().training.ConfigurationMetaNames.AUTHOR_NAME:
config['author_name'],
w.get_client().training.ConfigurationMetaNames.DESCRIPTION:
config['training_run_description'],
w.get_client().training.ConfigurationMetaNames.COMPUTE_CONFIGURATION:
{'name': config['training_run_compute_configuration_name']},
w.get_client().training.ConfigurationMetaNames
.TRAINING_DATA_REFERENCE: {
'connection': {
'endpoint_url': config['cos_endpoint_url'],
'access_key_id': os.environ['AWS_ACCESS_KEY_ID'],
'secret_access_key': os.environ['AWS_SECRET_ACCESS_KEY']
},
'source': {
'bucket': config['training_bucket'],
},
'type': 's3'
},
w.get_client().training.ConfigurationMetaNames
.TRAINING_RESULTS_REFERENCE: {
'connection': {
'endpoint_url': config['cos_endpoint_url'],
'access_key_id': os.environ['AWS_ACCESS_KEY_ID'],
'secret_access_key': os.environ['AWS_SECRET_ACCESS_KEY']
},
'target': {
'bucket': config['results_bucket'],
},
'type': 's3'
}
}
print('Training configuration summary:')
print(' Training run name : {}'.format(config['training_run_name']))
print(' Training data bucket : {}'.format(config['training_bucket']))
print(' Results bucket : {}'.format(config['results_bucket']))
print(' Model-building archive: {}'.format(config['model_code_archive']))
try:
training_guid = w.start_training(config['model_code_archive'],
model_definition_metadata,
training_configuration_metadata)
except Exception as ex:
print('Error. Model training could not be started: {}'.format(ex))
sys.exit(ExitCode.TRAINING_FAILED.value)
print('Model training was started. Training id: {}'.format(training_guid))
# --------------------------------------------------------
# Monitor the training run until it completes
# successfully or throws an error
# --------------------------------------------------------
#
print('Checking model training status every {} seconds.'
' Press Ctrl+C once to stop monitoring or '
' press Ctrl+C twice to cancel training.'
.format(config['training_progress_monitoring_interval']))
print('Status - (p)ending (r)unning (e)rror (c)ompleted or canceled:')
try:
training_in_progress = True
while training_in_progress:
try:
# poll training status; ignore server errors (e.g. caused
# by temporary issues not specific to our training run)
status = w.get_training_status(training_guid,
ignore_server_error=True)
if status:
training_status = status.get('state') or '?'
else:
# unknown status; continue and leave it up to the user
# to terminate monitoring
training_status = '?'
# display training status indicator
# [p]ending
# [r]unning
# [c]ompleted
# [e]rror
# [?]
print(training_status[0:1], end='', flush=True)
if training_status == 'completed':
# training completed successfully
print('\nTraining completed.')
training_in_progress = False
elif training_status == 'error':
print('\nTraining failed.')
# training ended with error
training_in_progress = False
elif training_status == 'canceled':
print('\nTraining canceled.')
# training ended with error
training_in_progress = False
else:
time.sleep(
int(config['training_progress_monitoring_interval']))
except KeyboardInterrupt:
print('\nTraining monitoring was stopped.')
try:
input('Press Ctrl+C again to cancel model training or '
'any other key to continue training.')
print('To resume monitoring, run "python {} {} {} {}"'
.format(sys.argv[0],
sys.argv[1],
'package',
training_guid))
sys.exit(ExitCode.SUCCESS.value)
except KeyboardInterrupt:
try:
w.cancel_training(training_guid)
print('\nModel training was canceled.')
except Exception as ex:
print('Model training could not be canceled: {}'
.format(ex))
debug(' Exception type: {}'.format(type(ex)))
debug(' Exception: {}'.format(ex))
sys.exit(ExitCode.TRAINING_FAILED.value)
except Exception as ex:
print('Error. Model training monitoring failed with an exception: {}'
.format(ex))
debug(' Exception type: {}'.format(type(ex)))
debug(' Exception: {}'.format(ex))
sys.exit(ExitCode.TRAINING_FAILED.value)
# Status:
# - The model training job completed.
# - The training log file TRAINING_LOG_NAME can now be downloaded from COS.
results_references = None
try:
# --------------------------------------------------------
# Identify where the training artifacts are stored on COS
# {
# 'bucket': 'ademoout3',
# 'model_location': 'training-BA8P0BgZg'
# }
# Re-try to fetch information multiple times in case the WML service
# encounters a temporary issue
max_tries = 5
ise = True
for count in range(max_tries):
results_references = \
w.get_training_results_references(training_guid,
ignore_server_error=ise)
if results_references:
# got a response; move on
break
if count + 1 == max_tries:
# last attempt; if it fails stop trying
ise = False
time.sleep(3)
# --------------------------------------------------------
# Download the training log file from the results
# bucket on COS to config['local_download_directory']
# --------------------------------------------------------
print_banner('Downloading training log file "{}" ...'
.format(TRAINING_LOG_NAME))
training_log = cw.download_file(results_references['bucket'],
TRAINING_LOG_NAME,
config['local_download_directory'],
results_references['model_location'])
if training_status in ['error', 'canceled']:
# Training ended with an error or was canceled.
# Notify the user where the training log file was stored and exit.
print('The training log file "{}" was saved in "{}".'
.format(TRAINING_LOG_NAME,
config['local_download_directory']))
sys.exit(ExitCode.TRAINING_FAILED.value)
except Exception as ex:
print('Error. Download of training log file "{}" failed: {}'
.format(TRAINING_LOG_NAME, ex))
sys.exit(ExitCode.DOWNLOAD_FAILED.value)
# terminate if the "train" command was specified
# when the utility was invoked
if cmd_parameters['command'] == 'train':
print('Skipping post-processing steps.')
sys.exit(ExitCode.SUCCESS.value)
# - If training completed successfully, the trained model archive
# TRAINING_OUTPUT_ARCHIVE_NAME can now be downloaded from COS.
try:
# --------------------------------------------------------
# Download the trained model archive from the results
# bucket on COS to LOCAL_DOWNLOAD_DIRECTORY
# --------------------------------------------------------
print_banner('Downloading trained model archive "{}" ...'
.format(TRAINING_OUTPUT_ARCHIVE_NAME))
training_output = cw.download_file(results_references['bucket'],
TRAINING_OUTPUT_ARCHIVE_NAME,
config['local_download_directory'],
results_references['model_location'])
except Exception as ex:
print('Error. Trained model archive "{}" could not be '
'downloaded from Cloud Object Storage bucket "{}": {}'
.format(TRAINING_OUTPUT_ARCHIVE_NAME,
results_references['bucket'],
ex))
sys.exit(ExitCode.DOWNLOAD_FAILED.value)
# Status:
# - The trained model archive and training log file were
# downloaded to the directory identified by
# config['local_download_directory'].
# --------------------------------------------------------
# Extract the downloaded model archive
# --------------------------------------------------------
archive = os.path.join(config['local_download_directory'],
TRAINING_OUTPUT_ARCHIVE_NAME)
print_banner('Extracting trained model artifacts from "{}" ...'
.format(archive))
extraction_ok = False
try:
if tarfile.is_tarfile(archive):
tf = tarfile.open(archive,
mode='r:gz')
for file in tf.getnames():
print(file)
tf.extractall(config['local_download_directory'])
print('Trained model artifacts are located in the "{}" directory.'
.format(config['local_download_directory']))
extraction_ok = True
else:
print('Error. The downloaded file "{}" is not a valid tar file.'
.format(archive))
except FileNotFoundError:
print('Error. "{}" was not found.'.format(archive))
except tarfile.TarError as te:
print(te)
if extraction_ok is False:
sys.exit(ExitCode.EXTRACTION_FAILED.value)
# Status:
# - The trained model archive was downloaded to LOCAL_DOWNLOAD_DIRECTORY.
# The directory structure inshould look as follows:
# /trained_model/<framework-name-1>/<format>/<file-1>
# /trained_model/<framework-name-1>/<format>/<file-2>
# /trained_model/<framework-name-1>/<format-2>/<subdirectory>/<file-3>
# /trained_model/<framework-name-2>/<file-4>
# -------------------------------------------------------------------
# Copy the appropriate framework and format specific artifacts
# to the final destination, where the Docker build will pick them up
# -------------------------------------------------------------------
trained_model_path = config['trained_model_path']
trained_assets_dir = os.path.join(config['local_download_directory'],
trained_model_path)
print_banner('Copying trained model artifacts from "{}" to "{}" ...'
.format(trained_assets_dir,
config['docker_model_asset_directory']))
try:
copy_dir(trained_assets_dir,
config['docker_model_asset_directory'])
except Exception as ex:
print('Error. Trained model files could not be copied: {}'.format(str(ex)))
sys.exit(ExitCode.COPY_FAILED.value)
# Status:
# - The trained model artifacts were copied to the Docker image's asset
# directory, where the model-serving microservice will load them from.
print('Done')
sys.exit(ExitCode.SUCCESS.value)
| 41.03375 | 79 | 0.559113 |
58146fc12bca47d19303bba6584622a1dcef7fcd | 57 | py | Python | tests/unit/sim_client/__init__.py | rkm/bluebird | 2325ebb151724d4444c092c095a040d7365dda79 | [
"MIT"
] | 8 | 2019-01-29T15:19:39.000Z | 2020-07-16T03:55:36.000Z | tests/unit/sim_client/__init__.py | rkm/bluebird | 2325ebb151724d4444c092c095a040d7365dda79 | [
"MIT"
] | 46 | 2019-02-08T14:23:11.000Z | 2021-04-06T13:45:10.000Z | tests/unit/sim_client/__init__.py | rkm/bluebird | 2325ebb151724d4444c092c095a040d7365dda79 | [
"MIT"
] | 3 | 2019-05-06T14:18:07.000Z | 2021-06-17T10:39:59.000Z | """
Module contains tests for the sim_client package
"""
| 14.25 | 48 | 0.736842 |
581495876b03363b5fef74a09d461c434b90c0d7 | 8,344 | py | Python | glog.py | leoll2/python-glog | c809d16352bf061d0ee38e590c6f28d553d740e7 | [
"BSD-2-Clause"
] | null | null | null | glog.py | leoll2/python-glog | c809d16352bf061d0ee38e590c6f28d553d740e7 | [
"BSD-2-Clause"
] | null | null | null | glog.py | leoll2/python-glog | c809d16352bf061d0ee38e590c6f28d553d740e7 | [
"BSD-2-Clause"
] | null | null | null | """A simple Google-style logging wrapper."""
import logging
import time
import traceback
import os
import sys
import gflags as flags
FLAGS = flags.FLAGS
debug = logging.debug
info = logging.info
warning = logging.warning
warn = logging.warning
error = logging.error
exception = logging.exception
fatal = logging.fatal
log = logging.log
DEBUG = logging.DEBUG
INFO = logging.INFO
WARNING = logging.WARNING
WARN = logging.WARN
ERROR = logging.ERROR
FATAL = logging.FATAL
_level_names = {
DEBUG: "DEBUG",
INFO: "INFO",
WARN: "WARN",
ERROR: "ERROR",
FATAL: "FATAL",
}
_level_letters = [name[0] for name in _level_names.values()]
GLOG_PREFIX_REGEX = (
(
r"""
(?x) ^
(?P<severity>[%s])
(?P<month>\d\d)(?P<day>\d\d)\s
(?P<hour>\d\d):(?P<minute>\d\d):(?P<second>\d\d)
\.(?P<microsecond>\d{6})\s+
(?P<process_id>-?\d+)\s
(?P<filename>[a-zA-Z<_][\w._<>-]+):(?P<line>\d+)
\]\s
"""
)
% "".join(_level_letters)
)
"""Regex you can use to parse glog line prefixes."""
global_logger = logging.getLogger()
stdout_handler = logging.StreamHandler(sys.stdout)
stderr_handler = logging.StreamHandler(sys.stderr)
file_handlers = dict()
flags.DEFINE_flag(CaptureWarningsFlag())
flags.DEFINE(
parser=VerbosityParser(),
serializer=flags.ArgumentSerializer(),
name="verbosity",
default=logging.INFO,
help="Logging verbosity",
)
init(global_logger)
# Define functions emulating C++ glog check-macros
# https://htmlpreview.github.io/?https://github.com/google/glog/master/doc/glog.html#check
def format_stacktrace(stack):
"""Print a stack trace that is easier to read.
* Reduce paths to basename component
* Truncates the part of the stack after the check failure
"""
lines = []
for _, f in enumerate(stack):
fname = os.path.basename(f[0])
line = "\t%s:%d\t%s" % (fname + "::" + f[2], f[1], f[3])
lines.append(line)
return lines
def check(condition, message=None):
"""Raise exception with message if condition is False."""
if not condition:
if message is None:
message = "Check failed."
check_failed(message)
def check_eq(obj1, obj2, message=None):
"""Raise exception with message if obj1 != obj2."""
if obj1 != obj2:
if message is None:
message = "Check failed: %s != %s" % (str(obj1), str(obj2))
check_failed(message)
def check_ne(obj1, obj2, message=None):
"""Raise exception with message if obj1 == obj2."""
if obj1 == obj2:
if message is None:
message = "Check failed: %s == %s" % (str(obj1), str(obj2))
check_failed(message)
def check_le(obj1, obj2, message=None):
"""Raise exception with message if not (obj1 <= obj2)."""
if obj1 > obj2:
if message is None:
message = "Check failed: %s > %s" % (str(obj1), str(obj2))
check_failed(message)
def check_ge(obj1, obj2, message=None):
"""Raise exception with message unless (obj1 >= obj2)."""
if obj1 < obj2:
if message is None:
message = "Check failed: %s < %s" % (str(obj1), str(obj2))
check_failed(message)
def check_lt(obj1, obj2, message=None):
"""Raise exception with message unless (obj1 < obj2)."""
if obj1 >= obj2:
if message is None:
message = "Check failed: %s >= %s" % (str(obj1), str(obj2))
check_failed(message)
def check_gt(obj1, obj2, message=None):
"""Raise exception with message unless (obj1 > obj2)."""
if obj1 <= obj2:
if message is None:
message = "Check failed: %s <= %s" % (str(obj1), str(obj2))
check_failed(message)
def check_notnone(obj, message=None):
"""Raise exception with message if obj is None."""
if obj is None:
if message is None:
message = "Check failed: Object is None."
check_failed(message)
| 27.447368 | 90 | 0.615532 |
581495ab37cf4df801b88c86040220d6464bbc32 | 4,141 | py | Python | ref_rna.py | entn-at/warp-rna | f6bf19634564068f23f9906373754e04f9b653a3 | [
"MIT"
] | 39 | 2019-08-11T09:06:55.000Z | 2022-03-30T03:24:34.000Z | ref_rna.py | entn-at/warp-rna | f6bf19634564068f23f9906373754e04f9b653a3 | [
"MIT"
] | null | null | null | ref_rna.py | entn-at/warp-rna | f6bf19634564068f23f9906373754e04f9b653a3 | [
"MIT"
] | 6 | 2019-12-11T03:02:48.000Z | 2021-11-29T09:01:51.000Z | """
Python reference implementation of the Recurrent Neural Aligner.
Author: Ivan Sorokin
Based on the papers:
- "Recurrent Neural Aligner: An Encoder-Decoder Neural Network Model for Sequence to Sequence Mapping"
Hasim Sak, et al., 2017
- "Extending Recurrent Neural Aligner for Streaming End-to-End Speech Recognition in Mandarin"
Linhao Dong, et al., 2018
"""
import numpy as np
NEG_INF = -float("inf")
def logsumexp(*args):
"""
Stable log sum exp.
"""
if all(a == NEG_INF for a in args):
return NEG_INF
a_max = max(args)
lsp = np.log(sum(np.exp(a - a_max) for a in args))
return a_max + lsp
def log_softmax(acts, axis):
"""
Log softmax over the last axis of the 3D array.
"""
acts = acts - np.max(acts, axis=axis, keepdims=True)
probs = np.sum(np.exp(acts), axis=axis, keepdims=True)
log_probs = acts - np.log(probs)
return log_probs
if __name__ == "__main__":
test()
| 26.544872 | 103 | 0.59744 |
581517f5427032699dff194265e55b485b52ab39 | 2,994 | py | Python | tests/coretests.py | thomasms/coiny | 1f51eac2542e46b03abd9f66fd3b58fbd80cb177 | [
"MIT"
] | null | null | null | tests/coretests.py | thomasms/coiny | 1f51eac2542e46b03abd9f66fd3b58fbd80cb177 | [
"MIT"
] | null | null | null | tests/coretests.py | thomasms/coiny | 1f51eac2542e46b03abd9f66fd3b58fbd80cb177 | [
"MIT"
] | null | null | null | import unittest
from typing import Any
from coiny.core import CoinPrice, CoinyQueue, CoinySession, price_now_url, price_task
from coiny.utils import NullCoinPrice
__all__ = ["PriceTaskTests"]
| 34.022727 | 85 | 0.62024 |
5816e949ba4a9d3600362e45768d66548fbd4d4b | 969 | py | Python | legacy/dx/simulator/simulator_diagnoser/test/graph/traversal/forward_test.py | GaloisInc/adapt | 2ccff778d3e77505899266572f8f7caacb5b630f | [
"BSD-3-Clause"
] | 2 | 2020-04-09T13:04:25.000Z | 2021-09-24T14:17:26.000Z | legacy/dx/simulator/simulator_diagnoser/test/graph/traversal/forward_test.py | GaloisInc/adapt | 2ccff778d3e77505899266572f8f7caacb5b630f | [
"BSD-3-Clause"
] | null | null | null | legacy/dx/simulator/simulator_diagnoser/test/graph/traversal/forward_test.py | GaloisInc/adapt | 2ccff778d3e77505899266572f8f7caacb5b630f | [
"BSD-3-Clause"
] | 3 | 2019-09-20T20:49:54.000Z | 2021-09-02T17:33:47.000Z | import unittest
from simulator_diagnoser.graph import InmemoryGraph
from simulator_diagnoser.graph.traversal import ForwardAnalysis
if __name__ == '__main__':
unittest.main()
| 24.846154 | 69 | 0.49742 |
581774fbaaecfebcc97c105cd9ba5717bc57c3de | 5,396 | py | Python | SONOS/sonos-fadein-alarm.py | tksunw/IoT | 2148c49e9a90822400f195be7b1de3f8e8b8ba2a | [
"MIT"
] | 1 | 2018-01-30T23:30:27.000Z | 2018-01-30T23:30:27.000Z | SONOS/sonos-fadein-alarm.py | tksunw/IoT | 2148c49e9a90822400f195be7b1de3f8e8b8ba2a | [
"MIT"
] | 1 | 2018-02-14T19:58:56.000Z | 2018-02-14T19:58:56.000Z | SONOS/sonos-fadein-alarm.py | tksunw/IoT | 2148c49e9a90822400f195be7b1de3f8e8b8ba2a | [
"MIT"
] | 2 | 2018-02-13T18:52:09.000Z | 2021-09-29T14:27:49.000Z | #!/usr/bin/python
# -*- coding: utf-8 -*-
'''
sonos-fadein-alarm.py - a gentle alarm using Sonos Favorites.
This module allows a user to choose a SONOS favorite channel to
play for a gentle alarm. Select the maximum desired volume, the
number of minutes over which to ramp volume from 0 to the chosen
maxium, and choose a favorite to use (by title), and the script
will do the rest.
2017-01-21 my new alarm clock.
2017-09-15 added ability to group a second speaker to the main speaker
also aded the ability to specify 'all' to group all
available speakers to the main speaker.
'''
import argparse
import datetime
import time
import os.path
import soco
# Set some default values. These are mine. The channel is listed
# by name, and comes from the Sonos players 'favorites'. Volume
# on the player(s) specified will ramp up from 0 to MAXVOL over
# the number of minutes specified. For me, I like a 30 minute
# ramp from 0 to 12. So the volume will increase by 1 every 2.5
# minutes.
# Set _WEEKEND days to skip certain days of the week, if you want
# to skip your days off work.
_SPEAKER = 'master bedroom'
_CHANNEL = 'Everybody Talks Radio'
_MINUTES = 30
_MAXVOL = 12
_WEEKEND = ('Saturday', 'Sunday')
def get_sonos_favorites(from_speaker):
''' get_sonos_favorites: gets the saved "favorites" from a Sonos speaker.
Args:
from_speaker (soco.core.Soco object): the speaker to pull favorites from.
Returns:
favs (list): a list of Sonos Favorites (title, meta, uri)
'''
favs = from_speaker.get_sonos_favorites()['favorites']
return favs
def main():
''' main function:
Args:
None
Returns:
None
Process command line arguments, and turn a Sonos speaker into an alarm
clock, with the flexibility to ramp the volume slowly over a defined
time period, to a "max vol" limit.
'''
parser = argparse.ArgumentParser(description='Sonos/Favorites ramping alarm.')
parser.add_argument('-S', '--speaker', type=str,
help='The Sonos speaker to use for the alarm',
default=_SPEAKER)
parser.add_argument('-s', '--slave', type=str,
help='The Sonos speaker(s) to join to a group for the alarm. Use the word "all" to join all available players.')
parser.add_argument('-c', '--channel', type=str,
help='The Sonos Favorite Channel to use for the alarm',
default=_CHANNEL)
parser.add_argument('-m', '--minutes', type=int,
help='The number of minutes the alarm will ramp up over',
default=_MINUTES)
parser.add_argument('-v', '--volume', type=int,
help='Set the maximum volume for the alarm',
default=_MAXVOL)
parser.add_argument('-p', '--pause',
help='Pause a speaker that is playing.',
action='store_true')
parser.epilog = "The channel you select must be a Sonos Favorite. Because\n"
parser.epilog += "I'm lazy and didn't feel like figuring out SoCo to get\n"
parser.epilog += "it working directly with Pandora, which SoCo doesn't seem\n"
parser.epilog += "to work with yet."
args = parser.parse_args()
speakers = soco.discover()
player = [x for x in speakers if x.player_name.lower() == args.speaker.lower()][0]
if args.slave:
if args.slave.lower() == 'all':
[x.join(player) for x in speakers if x.player_name.lower() != player.player_name.lower()]
else:
slave = [x for x in speakers if x.player_name.lower() == args.slave.lower()][0]
slave.join(player)
if args.pause:
''' this will stop the indicated sonos speaker. even if the alarm is
still running.
'''
player.stop()
else:
favorites = get_sonos_favorites(player)
for favorite in favorites:
if args.channel.lower() in favorite['title'].lower():
my_choice = favorite
break
print "Playing {} on {}".format(my_choice['title'], player.player_name)
player.play_uri(uri=my_choice['uri'], meta=my_choice['meta'], start=True)
if args.minutes == 0:
player.volume = args.volume
else:
player.volume = 0
seconds = args.minutes * 60
ramp_interval = seconds / args.volume
for _ in xrange(args.volume):
player.volume += 1
time.sleep(ramp_interval)
if __name__ == "__main__":
today = datetime.datetime.today().strftime('%A')
date = datetime.datetime.today().strftime('%Y-%m-%d')
holidays = set(line.strip() for line in open('holidays.txt'))
if today in _WEEKEND:
print today, 'is a scheduled weekend day. Not running.'
elif date in holidays:
print date, 'is a scheduled holiday. Not running.'
elif os.path.isfile('/tmp/holiday'):
''' /tmp/holiday allows us to mark when we don't want the alarm to run
tomorrow. Especially when we're using cron. Just touch the file.
'''
print "Today is marked as a holiday via /tmp/holiday, not running the alarm"
else:
main()
else:
print "This file is not intended to be included by other scripts."
| 38.542857 | 137 | 0.623981 |
58183b1abecb86537c0a52b35966e7d8ef3e9a5f | 5,775 | py | Python | Agent5_a_0_5_knots_512_d_0_02/step_node_Agent6_rewards.py | schigeru/Bachelorarbeit_Code | 261b2552221f768e7022abc60a4e5a7d2fedbbae | [
"MIT"
] | null | null | null | Agent5_a_0_5_knots_512_d_0_02/step_node_Agent6_rewards.py | schigeru/Bachelorarbeit_Code | 261b2552221f768e7022abc60a4e5a7d2fedbbae | [
"MIT"
] | null | null | null | Agent5_a_0_5_knots_512_d_0_02/step_node_Agent6_rewards.py | schigeru/Bachelorarbeit_Code | 261b2552221f768e7022abc60a4e5a7d2fedbbae | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import math
import os
import numpy as np
import time
import sys
import copy
import rospy
import moveit_msgs.msg
import geometry_msgs.msg
import random
import csv
from sensor_msgs.msg import JointState
from gazebo_msgs.msg import LinkStates
from gazebo_msgs.msg import LinkState
from std_msgs.msg import Float64
from std_msgs.msg import String
from sensor_msgs.msg import Joy
import moveit_commander
from panda_rl.srv import StepAction, StepActionResponse
group_name = "panda_arm_hand"
move_group = moveit_commander.MoveGroupCommander(group_name)
quat_goal = np.array([1, 0, 0.0075, 0])
joint1_threshold_min = -2.8973
joint2_threshold_min = -1.7628
joint3_threshold_min = -2.8973
joint4_threshold_min = -3.0718
joint5_threshold_min = -2.8973
joint6_threshold_min = -0.0175
joint1_threshold_max = 2.8973
joint2_threshold_max = 1.7628
joint3_threshold_max = 2.8973
joint4_threshold_max = -0.0698
joint5_threshold_max = 2.8973
joint6_threshold_max = 3.7525
rospy.init_node('step_service', anonymous=False)
print("step_nodeaktiv")
s = rospy.Service('step_env', StepAction, take_action)
rospy.spin()
| 35.429448 | 151 | 0.675152 |
5818909f1789bffb946f4dcc647ac54b08e00f22 | 10,043 | py | Python | pwnlib/elf/corefile.py | jdsecurity/binjitsu | 999ad632004bfc3e623eead20eb11de98fc1f4dd | [
"MIT"
] | 5 | 2018-05-15T13:00:56.000Z | 2020-02-09T14:29:00.000Z | pwnlib/elf/corefile.py | FDlucifer/binjitsu | 999ad632004bfc3e623eead20eb11de98fc1f4dd | [
"MIT"
] | null | null | null | pwnlib/elf/corefile.py | FDlucifer/binjitsu | 999ad632004bfc3e623eead20eb11de98fc1f4dd | [
"MIT"
] | 6 | 2017-09-07T02:31:11.000Z | 2021-07-05T16:59:18.000Z | import collections
import ctypes
import elftools
from elftools.common.utils import roundup, struct_parse
from elftools.common.py3compat import bytes2str
from elftools.construct import CString
from ..context import context
from ..log import getLogger
from .datatypes import *
from .elf import ELF
from ..tubes.tube import tube
log = getLogger(__name__)
types = {
'i386': elf_prstatus_i386,
'amd64': elf_prstatus_amd64,
}
# Slightly modified copy of the pyelftools version of the same function,
# until they fix this issue:
# https://github.com/eliben/pyelftools/issues/93
| 34.631034 | 103 | 0.54416 |
5819716bac9c4b729336569c993ab6648380ee01 | 2,875 | py | Python | kNN.py | skywind3000/ml | d3ac3d6070b66d84e25537915ee634723ddb8c51 | [
"MIT"
] | 9 | 2019-03-25T02:14:23.000Z | 2020-05-19T20:46:10.000Z | kNN.py | skywind3000/ml | d3ac3d6070b66d84e25537915ee634723ddb8c51 | [
"MIT"
] | null | null | null | kNN.py | skywind3000/ml | d3ac3d6070b66d84e25537915ee634723ddb8c51 | [
"MIT"
] | 2 | 2020-07-06T04:44:02.000Z | 2022-02-17T01:27:55.000Z | from __future__ import print_function
import numpy as np
import operator
import os
import sys
if sys.version_info[0] >= 3:
xrange = range
# kNN classifier
# load image
# hand writing classifier
# testing case
if __name__ == '__main__':
test3()
| 33.823529 | 85 | 0.619478 |
5819a9286725e2bb1d31cefd9b8edf4e2e05b208 | 642 | py | Python | simfin/revenue/personal_taxes.py | CREEi-models/simfin | a7c632ac8bc8f795cd46028c1a49e65a1c1b44eb | [
"MIT"
] | 1 | 2021-06-11T15:16:13.000Z | 2021-06-11T15:16:13.000Z | simfin/revenue/personal_taxes.py | CREEi-models/simfin | a7c632ac8bc8f795cd46028c1a49e65a1c1b44eb | [
"MIT"
] | 1 | 2021-06-07T14:39:27.000Z | 2021-06-07T14:39:27.000Z | simfin/revenue/personal_taxes.py | CREEi-models/simfin | a7c632ac8bc8f795cd46028c1a49e65a1c1b44eb | [
"MIT"
] | 1 | 2021-03-17T03:52:21.000Z | 2021-03-17T03:52:21.000Z | from simfin.tools import account
| 33.789474 | 95 | 0.65109 |
5819cc4c01f213155dbdad2c086e2c95b1ccd432 | 16,094 | py | Python | pandaserver/brokerage/PandaSiteIDs.py | rybkine/panda-server | 30fdeaa658a38fe2049849446c300c1e1f5b5231 | [
"Apache-2.0"
] | 1 | 2019-08-30T13:47:51.000Z | 2019-08-30T13:47:51.000Z | pandaserver/brokerage/PandaSiteIDs.py | mkycanopus/panda-server | 0f7c36800c033fada8bbde53dceaab98770b6df2 | [
"Apache-2.0"
] | null | null | null | pandaserver/brokerage/PandaSiteIDs.py | mkycanopus/panda-server | 0f7c36800c033fada8bbde53dceaab98770b6df2 | [
"Apache-2.0"
] | null | null | null | # !!!!!!! This file is OBSOLETE. Its content has been absorbed into pilotController.py in the autopilot repository.
# !!!!!!! Questions to Torre Wenaus.
PandaSiteIDs = {
'AGLT2' : {'nickname':'AGLT2-condor','status':'OK'},
'ALBERTA-LCG2' : {'nickname':'ALBERTA-LCG2-lcgce01-atlas-lcgpbs','status':'OK'},
'ANALY_AGLT2' : {'nickname':'ANALY_AGLT2-condor','status':'OK'},
'ANALY_ALBERTA' : {'nickname':'ALBERTA-LCG2-lcgce01-atlas-lcgpbs','status':'OK'},
'ANALY_BEIJING' : {'nickname':'BEIJING-LCG2-lcg002-atlas-lcgpbs','status':'OK'},
'ANALY_BNL' : {'nickname':'BNL_ATLAS_1-condor','status':'OK'},
'ANALY_BNL_ATLAS_1' : {'nickname':'BNL_ATLAS_1-condor','status':'OK'},
'ANALY_BNL_ATLAS_2' : {'nickname':'BNL_ATLAS_2-condor','status':'OK'},
#'ANALY_BNL_LOCAL' : {'nickname':'BNL_ATLAS_1-condor','status':'OK'},
'ANALY_BNL_test' : {'nickname':'BNL_ATLAS_1-condor','status':'OK'},
'ANALY_BNL_test2' : {'nickname':'ANALY_BNL_ATLAS_1-condor','status':'OK'},
'ANALY_BNL_test3' : {'nickname':'BNL_ATLAS_1-condor','status':'OK'},
'ANALY_BRUNEL' : {'nickname':'UKI-LT2-Brunel-dgc-grid-44-atlas-lcgpbs','status':'notOK'},
'ANALY_CERN' : {'nickname':'CERN-PROD-ce123-grid_atlas-lcglsf','status':'notOK'},
'ANALY_CNAF' : {'nickname':'INFN-CNAF-gridit-ce-001-lcg-lcgpbs','status':'notOK'},
'ANALY_CPPM' : {'nickname':'IN2P3-CPPM-marce01-atlas-pbs','status':'OK'},
'ANALY_FZK' : {'nickname':'FZK-LCG2-ce-5-fzk-atlasXS-pbspro','status':'OK'},
'ANALY_GLASGOW' : {'nickname':'UKI-SCOTGRID-GLASGOW-svr021-q3d-lcgpbs','status':'OK'},
'ANALY_GLOW-ATLAS' : {'nickname':'GLOW-ATLAS-condor','status':'OK'},
'ANALY_GRIF-IRFU' : {'nickname':'GRIF-IRFU-node07-atlas-lcgpbs','status':'OK'},
'ANALY_GRIF-LAL' : {'nickname':'GRIF-LAL-grid10-atlasana-pbs','status':'notOK'},
'ANALY_GRIF-LPNHE' : {'nickname':'GRIF-LPNHE-lpnce-atlas-pbs','status':'notOK'},
'ANALY_HU_ATLAS_Tier2' : {'nickname':'ANALY_HU_ATLAS_Tier2-lsf','status':'OK'},
'ANALY_LANCS' : {'nickname':'UKI-NORTHGRID-LANCS-HEP-fal-pygrid-18-atlas-lcgpbs','status':'notOK'},
'ANALY_LAPP' : {'nickname':'IN2P3-LAPP-lapp-ce01-atlas-pbs','status':'notOK'},
'ANALY_LIV' : {'nickname':'UKI-NORTHGRID-LIV-HEP-hepgrid2-atlas-lcgpbs','status':'notOK'},
'ANALY_LONG_BNL' : {'nickname':'BNL_ATLAS_1-condor','status':'OK'},
'ANALY_LONG_BNL_ATLAS' : {'nickname':'BNL_ATLAS_2-condor','status':'OK'},
'ANALY_LONG_BNL_LOCAL' : {'nickname':'BNL_ATLAS_1-condor','status':'OK'},
'ANALY_LONG_LYON' : {'nickname':'IN2P3-CC-T2-cclcgceli05-long-bqs','status':'OK'},
'ANALY_LPC' : {'nickname':'IN2P3-LPC-clrlcgce03-atlas-lcgpbs','status':'notOK'},
'ANALY_LPSC' : {'nickname':'IN2P3-LPSC-lpsc-ce-atlas-pbs','status':'OK'},
'ANALY_LYON' : {'nickname':'IN2P3-CC-T2-cclcgceli05-medium-bqs','status':'OK'},
'ANALY_MANC' : {'nickname':'UKI-NORTHGRID-MAN-HEP-ce01-atlas-lcgpbs','status':'OK'},
'ANALY_MCGILL' : {'nickname':'MCGILL-LCG2-atlas-ce-atlas-lcgpbs','status':'OK'},
'ANALY_MWT2' : {'nickname':'ANALY_MWT2-condor','status':'notOK'},
'ANALY_MWT2_SHORT' : {'nickname':'ANALY_MWT2_SHORT-pbs','status':'notOK'},
'ANALY_NET2' : {'nickname':'ANALY_NET2-pbs','status':'OK'},
'ANALY_OU_OCHEP_SWT2' : {'nickname':'ANALY_OU_OCHEP_SWT2-condor','status':'notOK'},
'ANALY_PIC' : {'nickname':'pic-ce07-gshort-lcgpbs','status':'OK'},
'ANALY_RAL' : {'nickname':'RAL-LCG2-lcgce01-atlasL-lcgpbs','status':'OK'},
'ANALY_ROMANIA02' : {'nickname':'RO-02-NIPNE-tbat01-atlas-lcgpbs','status':'notOK'},
'ANALY_ROMANIA07' : {'nickname':'RO-07-NIPNE-tbit01-atlas-lcgpbs','status':'notOK'},
'ANALY_SARA' : {'nickname':'SARA-MATRIX-mu6-short-pbs','status':'notOK'},
'ANALY_SFU' : {'nickname':'SFU-LCG2-snowpatch-hep-atlas-lcgpbs','status':'notOK'},
'ANALY_SHEF' : {'nickname':'UKI-NORTHGRID-SHEF-HEP-lcgce0-atlas-lcgpbs','status':'OK'},
'ANALY_SLAC' : {'nickname':'ANALY_SLAC-lsf','status':'OK'},
'ANALY_SWT2_CPB' : {'nickname':'ANALY_SWT2_CPB-pbs','status':'OK'},
'ANALY_TAIWAN' : {'nickname':'Taiwan-LCG2-w-ce01-atlas-lcgpbs','status':'OK'},
'ANALY_TEST' : {'nickname':'ANALY_TEST','status':'OK'},
'ANALY_TORONTO' : {'nickname':'TORONTO-LCG2-bigmac-lcg-ce2-atlas-pbs','status':'OK'},
'ANALY_TOKYO' : {'nickname':'TOKYO-LCG2-lcg-ce01-atlas-lcgpbs','status':'OK'},
'ANALY_TRIUMF' : {'nickname':'TRIUMF-LCG2-ce1-atlas-lcgpbs','status':'OK'},
'ANALY_UBC' : {'nickname':'UBC-pbs','status':'OK'},
'ANALY_UIUC-HEP' : {'nickname':'ANALY_UIUC-HEP-condor','status':'OK'},
'ANALY_UTA' : {'nickname':'UTA-DPCC-pbs','status':'OK'},
'ANALY_UTA-DPCC' : {'nickname':'UTA-DPCC-test-pbs','status':'notOK'},
'ANALY_VICTORIA' : {'nickname':'VICTORIA-LCG2-lcg-ce-general-lcgpbs','status':'OK'},
'AUVERGRID' : {'nickname':'AUVERGRID-iut15auvergridce01-atlas-lcgpbs','status':'notOK'},
'ASGC' : {'nickname':'Taiwan-LCG2-w-ce01-atlas-lcgpbs','status':'OK'},
'ASGC_REPRO' : {'nickname':'ASGC_REPRO','status':'notOK'},
'Australia-ATLAS' : {'nickname':'Australia-ATLAS-agh2-atlas-lcgpbs','status':'OK'},
'BARNETT_TEST' : {'nickname':'BARNETT_TEST','status':'notOK'},
'BEIJING' : {'nickname':'BEIJING-LCG2-lcg002-atlas-lcgpbs','status':'OK'},
'BNLPROD' : {'nickname':'BNL_ATLAS_1-condor','status':'notOK'},
'BNL_ATLAS_1' : {'nickname':'BNL_ATLAS_1-condor','status':'OK'},
'BNL_ATLAS_2' : {'nickname':'BNL_ATLAS_2-condor','status':'OK'},
'BNL_ATLAS_DDM' : {'nickname':'BNL_DDM-condor','status':'notOK'},
'BNL_ATLAS_test' : {'nickname':'BNL_ATLAS_2-condor','status':'notOK'},
'BU_ATLAS_Tier2' : {'nickname':'BU_ATLAS_Tier2-pbs','status':'OK'},
'BU_ATLAS_Tier2o' : {'nickname':'BU_ATLAS_Tier2o-pbs','status':'OK'},
'BU_ATLAS_test' : {'nickname':'BU_ATLAS_Tier2-pbs','status':'NOTOK'},
'HU_ATLAS_Tier2' : {'nickname':'HU_ATLAS_Tier2-lsf','status':'OK'},
'CERN-BUILDS' : {'nickname':'CERN-BUILDS','status':'notOK'},
'CERN-RELEASE' : {'nickname':'CERN-RELEASE','status':'notOK'},
'CERN-UNVALID' : {'nickname':'CERN-UNVALID','status':'notOK'},
'CGG' : {'nickname':'CGG-LCG2-ce1-atlas-lcgpbs','status':'notOK'},
'CHARMM' : {'nickname':'CHARMM','status':'notOK'},
'CNR-ILC-PISA' : {'nickname':'CNR-ILC-PISA-gridce-atlas-lcgpbs','status':'notOK'},
'CPPM' : {'nickname':'IN2P3-CPPM-marce01-atlas-pbs','status':'OK'},
'CSCS-LCG2' : {'nickname':'CSCS-LCG2-ce01-egee48h-lcgpbs','status':'OK'},
'csTCDie' : {'nickname':'csTCDie-gridgate-himem-pbs','status':'OK'},
'CYF' : {'nickname':'CYFRONET-LCG2-ce-atlas-pbs','status':'OK'},
'DESY-HH' : {'nickname':'DESY-HH-grid-ce3-default-lcgpbs','status':'OK'},
'DESY-ZN' : {'nickname':'DESY-ZN-lcg-ce0-atlas-lcgpbs','status':'OK'},
'EFDA-JET' : {'nickname':'EFDA-JET-grid002-atlas-lcgpbs','status':'notok'},
'FZK-LCG2' : {'nickname':'FZK-LCG2-ce-1-fzk-atlasXL-pbspro','status':'OK'},
'FZK_REPRO' : {'nickname':'FZK_REPRO','status':'notOK'},
'FZU' : {'nickname':'praguelcg2-golias25-lcgatlas-lcgpbs','status':'OK'},
'GLOW' : {'nickname':'GLOW-CMS-cmsgrid02-atlas-condor','status':'notOK'},
'GLOW-ATLAS' : {'nickname':'GLOW-ATLAS-condor','status':'OK'},
'GoeGrid' : {'nickname':'GoeGrid-ce-goegrid-atlas-lcgpbs','status':'OK'},
'GRIF-IRFU' : {'nickname':'GRIF-IRFU-node07-atlas-lcgpbs','status':'OK'},
'GRIF-LAL' : {'nickname':'GRIF-LAL-grid10-atlas-pbs','status':'OK'},
'GRIF-LPNHE' : {'nickname':'GRIF-LPNHE-lpnce-atlas-pbs','status':'OK'},
'HEPHY-UIBK' : {'nickname':'HEPHY-UIBK-hepx4-atlas-lcgpbs','status':'OK'},
'IFAE' : {'nickname':'ifae-ifaece01-ifae-lcgpbs','status':'OK'},
'IFIC' : {'nickname':'IFIC-LCG2-ce01-atlas-pbs','status':'OK'},
'IHEP' : {'nickname':'BEIJING-LCG2-lcg002-atlas-lcgpbs','status':'OK'},
'ITEP' : {'nickname':'ITEP-ceglite-atlas-lcgpbs','status':'OK'},
'IN2P3-LPSC' : {'nickname':'IN2P3-LPSC-lpsc-ce-atlas-pbs','status':'OK'},
'JINR-LCG2' : {'nickname':'JINR-LCG2-lcgce01-atlas-lcgpbs', 'status':'OK'},
'LAPP' : {'nickname':'IN2P3-LAPP-lapp-ce01-atlas-pbs','status':'OK'},
'LIP-COIMBRA' : {'nickname':'LIP-Coimbra-grid006-atlas-lcgpbs','status':'OK'},
'LIP-LISBON' : {'nickname':'LIP-Lisbon-ce02-atlasgrid-lcgsge','status':'OK'},
'LLR' : {'nickname':'GRIF-LLR-polgrid1-atlas-pbs','status':'notOK'},
'LPC' : {'nickname':'IN2P3-LPC-clrlcgce03-atlas-lcgpbs','status':'OK'},
'LRZ' : {'nickname':'LRZ-LMU-lcg-lrz-ce-atlas-sge','status':'OK'},
'LYON' : {'nickname':'IN2P3-CC-cclcgceli02-long-bqs','status':'OK'},
'LYON_REPRO' : {'nickname':'LYON_REPRO','status':'notOK'},
'Lyon-T2' : {'nickname':'IN2P3-CC-T2-cclcgceli05-long-bqs','status':'OK'},
'LTU_CCT' : {'nickname':'LTU_CCT-pbs','status':'OK'},
'MANC' : {'nickname':'UKI-NORTHGRID-MAN-HEP-ce02-atlas-lcgpbs','status':'OK'},
'MCGILL-LCG2' : {'nickname':'MCGILL-LCG2-atlas-ce-atlas-pbs','status':'OK'},
'MONTREAL' : {'nickname':'Umontreal-LCG2-lcg-ce-atlas-lcgpbs','status':'notOK'},
'MPP' : {'nickname':'MPPMU-grid-ce-long-sge','status':'OK'},
'MWT2_IU' : {'nickname':'MWT2_IU-pbs','status':'OK'},
'MWT2_UC' : {'nickname':'MWT2_UC-pbs','status':'OK'},
'NDGF' : {'nickname':'NDGF-condor','status':'OK'},
'NIKHEF-ELPROD' : {'nickname':'NIKHEF-ELPROD-gazon-atlas-pbs','status':'OK'},
'NIKHEF_REPRO' : {'nickname':'NIKHEF_REPRO','status':'notOK'},
'OUHEP_ITB' : {'nickname':'OUHEP_ITB-condor','status':'notOK'},
'OU_PAUL_TEST' : {'nickname':'OU_OCHEP_SWT2-condor','status':'notOK'},
'OU_OCHEP_SWT2' : {'nickname':'OU_OCHEP_SWT2-condor','status':'OK'},
'OU_OSCER_ATLAS' : {'nickname':'OU_OSCER_ATLAS-lsf','status':'OK'},
'OU_OSCER_ATLASdeb' : {'nickname':'OU_OSCER_ATLASdeb-lsf','status':'notOK'},
'PSNC' : {'nickname':'PSNC-ce-atlas-pbs','status':'OK'},
'PIC' : {'nickname':'pic-ce05-glong-lcgpbs','status':'OK'},
'PIC_REPRO' : {'nickname':'PIC_REPRO','status':'notOK'},
'prague_cesnet_lcg2' : {'nickname':'prague_cesnet_lcg2-skurut17-egee_atlas-lcgpbs','status':'notOK'},
'RAL' : {'nickname':'RAL-LCG2-lcgce02-grid1000M-lcgpbs','status':'OK'},
'RAL_REPRO' : {'nickname':'RAL_REPRO','status':'notOK'},
'ru-Moscow-SINP-LCG2' : {'nickname':'ru-Moscow-SINP-LCG2-lcg02-atlas-lcgpbs','status':'OK'},
'ru-PNPI' : {'nickname':'ru-PNPI-cluster-atlas-pbs','status':'OK'},
'RDIGTEST' : {'nickname':'RDIGTEST','status':'notOK'},
'ROMANIA02' : {'nickname':'RO-02-NIPNE-tbat01-atlas-lcgpbs','status':'OK'},
'ROMANIA07' : {'nickname':'RO-07-NIPNE-tbit01-atlas-lcgpbs','status':'OK'},
'RRC-KI' : {'nickname':'RRC-KI-gate-atlas-lcgpbs','status':'OK'},
'RU-Protvino-IHEP' : {'nickname':'RU-Protvino-IHEP-ce0003-atlas-lcgpbs','status':'OK'},
'SARA_REPRO' : {'nickname':'SARA_REPRO','status':'notOK'},
'SFU-LCG2' : {'nickname':'SFU-LCG2-snowpatch-atlas-lcgpbs','status':'OK'},
'SLACXRD' : {'nickname':'SLACXRD-lsf','status':'OK'},
'SLAC_PAUL_TEST' : {'nickname':'SLACXRD-lsf','status':'notOK'},
'SNS-PISA' : {'nickname':'SNS-PISA-gridce-atlas-lcgpbs','status':'notOK'},
'SPACI-CS-IA64' : {'nickname':'SPACI-CS-IA64-square-atlas-lsf','status':'notOK'},
'SWT2_CPB' : {'nickname':'SWT2_CPB-pbs','status':'OK'},
'Taiwan-IPAS-LCG2' : {'nickname':'Taiwan-IPAS-LCG2-atlasce-atlas-lcgcondor','status':'notOK'},
'TEST1' : {'nickname':'TEST1','status':'notOK'},
'TEST2' : {'nickname':'TEST2','status':'notOK'},
'TEST3' : {'nickname':'TEST3','status':'notOK'},
'TEST4' : {'nickname':'TEST4','status':'notOK'},
'TESTCHARMM' : {'nickname':'TESTCHARMM','status':'notOK'},
'TESTGLIDE' : {'nickname':'TESTGLIDE','status':'notOK'},
'TOKYO' : {'nickname':'TOKYO-LCG2-lcg-ce01-atlas-lcgpbs','status':'OK'},
'TORONTO-LCG2' : {'nickname':'TORONTO-LCG2-bigmac-lcg-ce2-atlas-pbs','status':'OK'},
'TPATHENA' : {'nickname':'TPATHENA','status':'notOK'},
'TPPROD' : {'nickname':'TPPROD','status':'notOK'},
'TRIUMF' : {'nickname':'TRIUMF-LCG2-ce1-atlas-lcgpbs','status':'OK'},
'TRIUMF_DDM' : {'nickname':'TRIUMF_DDM','status':'notOK'},
'TRIUMF_REPRO' : {'nickname':'TRIUMF_REPRO','status':'notOK'},
'TW-FTT' : {'nickname':'TW-FTT-f-ce01-atlas-lcgpbs','status':'OK'},
'TWTEST' : {'nickname':'TWTEST','status':'notOK'},
'TestPilot' : {'nickname':'TestPilot','status':'notOK'},
'UAM-LCG2' : {'nickname':'UAM-LCG2-grid003-atlas-lcgpbs','status':'OK'},
'UBC' : {'nickname':'UBC-pbs','status':'OK'},
'UBC_PAUL_TEST' : {'nickname':'UBC-pbs','status':'notOK'},
'UIUC-HEP' : {'nickname':'UIUC-HEP-condor','status':'OK'},
'UCITB_EDGE7' : {'nickname':'UCITB_EDGE7-pbs','status':'OK'},
'UC_ATLAS_MWT2' : {'nickname':'UC_ATLAS_MWT2-condor','status':'OK'},
'UC_ATLAS_test' : {'nickname':'UC_ATLAS_MWT2-condor','status':'OK'},
'UC_Teraport' : {'nickname':'UC_Teraport-pbs','status':'notOK'},
'UMESHTEST' : {'nickname':'UMESHTEST','status':'notOK'},
'UNI-FREIBURG' : {'nickname':'UNI-FREIBURG-ce-atlas-pbs','status':'OK'},
'UTA-DPCC' : {'nickname':'UTA-DPCC-pbs','status':'OK'},
'UTA-DPCC-test' : {'nickname':'UTA-DPCC-test-pbs','status':'OK'},
'UTA_PAUL_TEST' : {'nickname':'UTA-SWT2-pbs','status':'notOK'},
'UTA_SWT2' : {'nickname':'UTA-SWT2-pbs','status':'OK'},
'UTD-HEP' : {'nickname':'UTD-HEP-pbs','status':'OK'},
'VICTORIA-LCG2' : {'nickname':'VICTORIA-LCG2-lcg-ce-general-lcgpbs','status':'OK'},
'Wuppertal' : {'nickname':'wuppertalprod-grid-ce-dg_long-lcgpbs','status':'OK'},
}
# cloud-MoverID mapping
PandaMoverIDs = {
'US' : 'BNL_ATLAS_DDM',
'CA' : 'TRIUMF_DDM',
'FR' : 'TRIUMF_DDM',
'IT' : 'TRIUMF_DDM',
'NL' : 'TRIUMF_DDM',
'DE' : 'TRIUMF_DDM',
'TW' : 'TRIUMF_DDM',
'UK' : 'TRIUMF_DDM',
'ES' : 'TRIUMF_DDM',
}
| 80.874372 | 115 | 0.541258 |
581d47d6e3101d07297475a1a84d27b2898647b8 | 1,002 | py | Python | explain.py | jcsalterego/gh-contest | 033f87c5338e3066ee4c80df2ee8e1ae4d6f1c7b | [
"BSD-3-Clause"
] | 1 | 2015-11-05T02:50:57.000Z | 2015-11-05T02:50:57.000Z | explain.py | jcsalterego/gh-contest | 033f87c5338e3066ee4c80df2ee8e1ae4d6f1c7b | [
"BSD-3-Clause"
] | null | null | null | explain.py | jcsalterego/gh-contest | 033f87c5338e3066ee4c80df2ee8e1ae4d6f1c7b | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
from pprint import pprint
from matchmaker.database import *
import sys
if __name__ == '__main__':
sys.exit(main(sys.argv))
| 22.266667 | 58 | 0.505988 |
581e242497be1d7d21237861371ea688ae66e1e5 | 3,862 | py | Python | qiskit/pulse/commands/command.py | EnriqueL8/qiskit-terra | 08b801f1f8598c4e44680b4a75c232ed92db0262 | [
"Apache-2.0"
] | 2 | 2019-06-28T19:58:42.000Z | 2019-07-26T05:04:02.000Z | qiskit/pulse/commands/command.py | EnriqueL8/qiskit-terra | 08b801f1f8598c4e44680b4a75c232ed92db0262 | [
"Apache-2.0"
] | 3 | 2018-11-13T17:33:37.000Z | 2018-12-03T09:35:00.000Z | qiskit/pulse/commands/command.py | EnriqueL8/qiskit-terra | 08b801f1f8598c4e44680b4a75c232ed92db0262 | [
"Apache-2.0"
] | 2 | 2017-12-03T15:48:14.000Z | 2018-03-11T13:08:03.000Z | # -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
Base command.
"""
import re
from abc import ABCMeta, abstractmethod
from typing import List, Optional, Union
import numpy as np
from qiskit.pulse.exceptions import PulseError
from qiskit.pulse.channels import Channel
def __eq__(self, other: 'Command'):
"""Two Commands are the same if they are of the same type
and have the same duration and name.
Args:
other: other Command
Returns:
bool: are self and other equal
"""
return (type(self) is type(other)) and (self.duration == other.duration)
def __hash__(self):
return hash((type(self), self.duration, self.name))
def __repr__(self):
return '%s(duration=%d, name="%s")' % (self.__class__.__name__,
self.duration,
self.name)
| 31.398374 | 98 | 0.599689 |
581f418d3f23d0acfebe881f3102cd64dfbdffef | 6,654 | py | Python | data_loader/data_loaders.py | brendanwallison/birds | b70c01b8c953dfa172c65a51f7bdf100e47853d3 | [
"MIT"
] | null | null | null | data_loader/data_loaders.py | brendanwallison/birds | b70c01b8c953dfa172c65a51f7bdf100e47853d3 | [
"MIT"
] | null | null | null | data_loader/data_loaders.py | brendanwallison/birds | b70c01b8c953dfa172c65a51f7bdf100e47853d3 | [
"MIT"
] | null | null | null | from torchvision import datasets, transforms
from torchvision.transforms import functional as TF
from base import BaseDataLoader
from six.moves import urllib
from parse_config import ConfigParser
# downloads
import requests
import json
from collections import Counter
import os
import errno
import csv
import numpy as np
import pandas as pd
import splitfolders
import pathlib
import torchaudio
import torch
# Note: horizontal dimension = 2 * time_window * sample_rate // n_fft + 1
# vertical crop = n_fft // 2 + 1
# Assumes one image file
# Assumes one image file
| 37.382022 | 172 | 0.622483 |
5820628189dcbe4c683064fd6478349ee7f02524 | 5,855 | py | Python | stockscanner/model/portfolio/portfolio.py | adityazagade/StockScanner | 4aecf886a8858757e4720b68d0af5ed94f4d371a | [
"Apache-2.0"
] | null | null | null | stockscanner/model/portfolio/portfolio.py | adityazagade/StockScanner | 4aecf886a8858757e4720b68d0af5ed94f4d371a | [
"Apache-2.0"
] | null | null | null | stockscanner/model/portfolio/portfolio.py | adityazagade/StockScanner | 4aecf886a8858757e4720b68d0af5ed94f4d371a | [
"Apache-2.0"
] | null | null | null | from datetime import date
from typing import List
from stockscanner.model.asset.asset_type import AssetType
from stockscanner.model.exceptions.exceptions import AssetNotFoundException
from stockscanner.model.asset.asset import Asset
from stockscanner.model.asset.cash import Cash
from stockscanner.model.asset.debt import Debt
from stockscanner.model.asset.equity import Equity
from stockscanner.model.strategies.strategy import Strategy
| 37.056962 | 113 | 0.640649 |
5820f326461279dab8c970a64d716534511d2f87 | 2,478 | py | Python | python/zdl/error_logger/error_logger/url_rules/report.py | wjiec/packages | 4ccaf8f717265a1f8a9af533f9a998b935efb32a | [
"MIT"
] | null | null | null | python/zdl/error_logger/error_logger/url_rules/report.py | wjiec/packages | 4ccaf8f717265a1f8a9af533f9a998b935efb32a | [
"MIT"
] | 1 | 2016-09-15T07:06:15.000Z | 2016-09-15T07:06:15.000Z | python/zdl/error_logger/error_logger/url_rules/report.py | wjiec/packages | 4ccaf8f717265a1f8a9af533f9a998b935efb32a | [
"MIT"
] | null | null | null | #!/usr/bin/env python
#
# Copyright (C) 2017
import json
import time
from error_logger.url_rules import _base_url_rule
# from error_logger.net import sms_notification, email_notification
from error_logger.utils import generic
| 36.441176 | 84 | 0.506053 |
58230301eafe03e15cb587a17b91ac8b8de815f2 | 246 | py | Python | cli/commands/update.py | gamesbrainiac/cli | bba7285607a8644911f720d1ceb1404ab502bf00 | [
"Apache-2.0"
] | null | null | null | cli/commands/update.py | gamesbrainiac/cli | bba7285607a8644911f720d1ceb1404ab502bf00 | [
"Apache-2.0"
] | null | null | null | cli/commands/update.py | gamesbrainiac/cli | bba7285607a8644911f720d1ceb1404ab502bf00 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
import click
from .. import cli
| 16.4 | 52 | 0.630081 |
5823914afc52a344ae37dba70fad832cd069531a | 2,397 | py | Python | test/test_sl/test_model/test_data.py | jlaumonier/mlsurvey | 373598d067c7f0930ba13fe8da9756ce26eecbaf | [
"MIT"
] | null | null | null | test/test_sl/test_model/test_data.py | jlaumonier/mlsurvey | 373598d067c7f0930ba13fe8da9756ce26eecbaf | [
"MIT"
] | null | null | null | test/test_sl/test_model/test_data.py | jlaumonier/mlsurvey | 373598d067c7f0930ba13fe8da9756ce26eecbaf | [
"MIT"
] | null | null | null | import unittest
import numpy as np
import pandas as pd
import mlsurvey as mls
| 35.776119 | 93 | 0.570296 |
582469a40acf21b2f0921b0060688c700c098a03 | 1,126 | py | Python | baidu_verify_response.py | CodingDogzxg/Verifycode_ocr | 6f1bdac2137993695cb4591afd1b47931680b204 | [
"MIT"
] | null | null | null | baidu_verify_response.py | CodingDogzxg/Verifycode_ocr | 6f1bdac2137993695cb4591afd1b47931680b204 | [
"MIT"
] | null | null | null | baidu_verify_response.py | CodingDogzxg/Verifycode_ocr | 6f1bdac2137993695cb4591afd1b47931680b204 | [
"MIT"
] | null | null | null | # encoding:utf-8
import requests
import base64
import time
'''
'''
request_url = "https://aip.baidubce.com/rest/2.0/ocr/v1/general_basic"
access_token = '' # AItoken access
request_url = request_url + "?access_token=" + access_token
headers = {'content-type': 'application/x-www-form-urlencoded'}
for file_index in range(10000):
file_name = 'vcode_imgs/' + str(file_index) + '.png'
f_obj = open(file_name, 'rb')
img = base64.b64encode(f_obj.read())
f_obj.close()
params = {"image": img}
response = requests.post(request_url, data=params, headers=headers)
if response:
answer = response.content.decode().split(",")[-1].split("\"")[-2].replace(' ', '').lower()
if len(answer) < 5:
with open('baidu_ocr_verify_response.json', 'a') as f:
f.write('{}:{}\n'.format(str(file_index) + '.png', answer))
else:
with open('baidu_ocr_verify_response.json', 'a') as f:
f.write('{}:{}\n'.format(str(file_index) + '.png', ''))
print('{}.png '.format(file_index))
time.sleep(0.2) | 35.1875 | 98 | 0.619005 |
5824ba4bea2f64074dbcd56d9e462c95a3407e0f | 11,478 | py | Python | nets/efficientdet_training.py | BikesSaver/efficientdet-pytorch | c1e02484733cf2080ecb2ee57c184038a77a09e8 | [
"MIT"
] | 1 | 2020-09-17T00:51:38.000Z | 2020-09-17T00:51:38.000Z | nets/efficientdet_training.py | BikesSaver/efficientdet-pytorch | c1e02484733cf2080ecb2ee57c184038a77a09e8 | [
"MIT"
] | null | null | null | nets/efficientdet_training.py | BikesSaver/efficientdet-pytorch | c1e02484733cf2080ecb2ee57c184038a77a09e8 | [
"MIT"
] | null | null | null |
from random import shuffle
import numpy as np
import torch
import torch.nn as nn
import math
import torch.nn.functional as F
import cv2
from matplotlib.colors import rgb_to_hsv, hsv_to_rgb
from PIL import Image
from .RepulsionLoss.my_repulsion_loss import repulsion
| 37.756579 | 141 | 0.560202 |
58252e686b16a8b93824251a6782b7d24afd2761 | 267 | py | Python | project/wsgi.py | devluci/django-rest-base-boilerplate | 0cf512e00aca66ebf9908351527d701cd421ccd4 | [
"MIT"
] | null | null | null | project/wsgi.py | devluci/django-rest-base-boilerplate | 0cf512e00aca66ebf9908351527d701cd421ccd4 | [
"MIT"
] | null | null | null | project/wsgi.py | devluci/django-rest-base-boilerplate | 0cf512e00aca66ebf9908351527d701cd421ccd4 | [
"MIT"
] | null | null | null | import os
from django.core.wsgi import get_wsgi_application
from rest_base.utils import dotenv
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'project.settings')
dotenv.load(os.path.join(os.path.dirname(__file__), '../.env'))
application = get_wsgi_application()
| 26.7 | 67 | 0.797753 |
5825efbd85281c5ef1426be58d4c0871b10dcdf9 | 3,445 | py | Python | tests/test_coco_dataset.py | petersiemen/CVND---Image-Captioning-Project | 53d15c5f2b9d5e04b007f4f8b1e4f9dd17425c06 | [
"MIT"
] | null | null | null | tests/test_coco_dataset.py | petersiemen/CVND---Image-Captioning-Project | 53d15c5f2b9d5e04b007f4f8b1e4f9dd17425c06 | [
"MIT"
] | null | null | null | tests/test_coco_dataset.py | petersiemen/CVND---Image-Captioning-Project | 53d15c5f2b9d5e04b007f4f8b1e4f9dd17425c06 | [
"MIT"
] | null | null | null | from .context import CoCoDataset
import os
from torchvision import transforms
import torch.utils.data as data
from src.data_loader import get_loader
from context import COCO_SMALL
from context import clean_sentence
| 39.147727 | 93 | 0.608128 |
5828ffc478a57b5d3a54d1d5409d86dcb72100d1 | 5,019 | py | Python | test/retro-fuse-test.py | jaylogue/retro-fuse | b300865c1aa4c38930adea66de364f182c73b3b5 | [
"Apache-2.0"
] | 28 | 2021-02-23T06:00:16.000Z | 2022-02-28T13:38:48.000Z | test/retro-fuse-test.py | jaylogue/retro-fuse | b300865c1aa4c38930adea66de364f182c73b3b5 | [
"Apache-2.0"
] | 3 | 2021-09-22T12:37:59.000Z | 2022-02-01T00:33:25.000Z | test/retro-fuse-test.py | jaylogue/retro-fuse | b300865c1aa4c38930adea66de364f182c73b3b5 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
#
# Copyright 2021 Jay Logue
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# @file A test driver for testing retro-fuse filesystem handlers.
#
import os
import sys
import unittest
import argparse
scriptName = os.path.basename(__file__)
scriptDirName = os.path.dirname(os.path.abspath(os.path.realpath(__file__)))
# Parse command line arguments
argParser = argparse.ArgumentParser()
argParser.add_argument('-s', '--simh', dest='simhCmd', default='pdp11',
help='Path to pdp11 simh executable')
argParser.add_argument('-v', '--verbose', dest='verbosity', action='store_const', const=2, default=1,
help='Verbose output')
argParser.add_argument('-q', '--quiet', dest='verbosity', action='store_const', const=0,
help='Quiet output')
argParser.add_argument('-f', '--failfast', dest='failfast', action='store_true', default=False,
help='Stop on first test failure')
argParser.add_argument('-k', '--keep', dest='keepFS', action='store_true', default=False,
help='Retain the test filesystem on exit')
argParser.add_argument('-i', '--fs-image', dest='fsImage',
help='Use specified file/device as backing store for test filesystem (implies -k)')
argParser.add_argument('fsHandler', help='Filesystem handler executable to be tested')
testOpts = argParser.parse_args()
if testOpts.fsImage is not None:
testOpts.keepFS = True
# Verify access to filesystem handler executable
if not os.access(testOpts.fsHandler, os.F_OK):
print(f'{scriptName}: File not found: {testOpts.fsHandler}', file=sys.stderr)
sys.exit(1)
if not os.access(testOpts.fsHandler, os.X_OK):
print(f'{scriptName}: Unable to execute filesystem handler: {testOpts.fsHandler}', file=sys.stderr)
sys.exit(1)
# Load the appropriate test cases
fsHandlerBaseName = os.path.basename(testOpts.fsHandler)
if fsHandlerBaseName == 'bsd29fs':
import BSD29Tests
testSuite = unittest.TestLoader().loadTestsFromModule(BSD29Tests)
elif fsHandlerBaseName == 'v7fs':
import V7Tests
testSuite = unittest.TestLoader().loadTestsFromModule(V7Tests)
elif fsHandlerBaseName == 'v6fs':
import V6Tests
testSuite = unittest.TestLoader().loadTestsFromModule(V6Tests)
else:
print(f'{scriptName}: Unknown filesystem handler: {testOpts.fsHandler}', file=sys.stderr)
print('Expected a file named v6fs, v7fs or bsd29fs', file=sys.stderr)
sys.exit(1)
# Run the tests
if testOpts.verbosity > 0:
resultStream = sys.stderr
else:
resultStream = open(os.devnull, 'a')
testRunner = unittest.TextTestRunner(stream=resultStream, resultclass=TestResult, verbosity=testOpts.verbosity, failfast=testOpts.failfast)
result = testRunner.run(testSuite)
sys.exit(0 if result.wasSuccessful() else 1)
| 38.312977 | 139 | 0.695557 |
582a2d15de4e22e6a4241b45670672383e57c857 | 387 | py | Python | docker/app.py | dramasamy/kubernetes_training | a5f48d540b7b6e9a79b5ab60f62a13a792f1b0e5 | [
"Apache-2.0"
] | 1 | 2022-03-22T22:31:32.000Z | 2022-03-22T22:31:32.000Z | docker/app.py | dramasamy/training | af7b9352b56c10aaa957062f24f1302a7a4c5797 | [
"Apache-2.0"
] | null | null | null | docker/app.py | dramasamy/training | af7b9352b56c10aaa957062f24f1302a7a4c5797 | [
"Apache-2.0"
] | null | null | null | #! /bin/python
from flask import Flask
app = Flask(__name__)
if __name__ == '__main__':
app.run(host='0.0.0.0')
| 13.821429 | 40 | 0.596899 |
582b2e616da4b6c095b0fcc22d4f757b4b8fddc7 | 4,374 | py | Python | creme/cluster/k_means.py | tweakyllama/creme | 6bb8e34789947a943e7e6a8a1af1341e4c1de144 | [
"BSD-3-Clause"
] | null | null | null | creme/cluster/k_means.py | tweakyllama/creme | 6bb8e34789947a943e7e6a8a1af1341e4c1de144 | [
"BSD-3-Clause"
] | null | null | null | creme/cluster/k_means.py | tweakyllama/creme | 6bb8e34789947a943e7e6a8a1af1341e4c1de144 | [
"BSD-3-Clause"
] | 2 | 2021-06-20T09:29:38.000Z | 2021-06-23T07:47:21.000Z | import collections
import numpy as np
from sklearn import utils
from .. import base
__all__ = ['KMeans']
| 37.384615 | 123 | 0.633973 |
582ee3ae3eed760c8ee30d3cb820c5796139122b | 42,165 | py | Python | fasttrips/TAZ.py | pedrocamargo/fast-trips | a2549936b2707b00d6c21b4e6ae4be8fefd0aa46 | [
"Apache-2.0"
] | 3 | 2017-11-03T00:18:23.000Z | 2020-11-30T18:54:46.000Z | fasttrips/TAZ.py | pedrocamargo/fast-trips | a2549936b2707b00d6c21b4e6ae4be8fefd0aa46 | [
"Apache-2.0"
] | null | null | null | fasttrips/TAZ.py | pedrocamargo/fast-trips | a2549936b2707b00d6c21b4e6ae4be8fefd0aa46 | [
"Apache-2.0"
] | null | null | null | from __future__ import print_function
from __future__ import division
from builtins import str
from builtins import object
__copyright__ = "Copyright 2015 Contributing Entities"
__license__ = """
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import datetime
import os
import numpy as np
import pandas as pd
from .Error import NetworkInputError
from .Logger import FastTripsLogger
from .Route import Route
from .Stop import Stop
from .Transfer import Transfer
| 57.681259 | 157 | 0.628958 |
58309191f39ca5397068401c1360251a2a11c48a | 2,686 | py | Python | tests/test_stardist2D.py | ianbgroves/stardist | 6524c27d01c625dabfd75b1443dd46ccb1cb3dcd | [
"BSD-3-Clause"
] | 1 | 2021-02-05T11:59:39.000Z | 2021-02-05T11:59:39.000Z | tests/test_stardist2D.py | ianbgroves/stardist | 6524c27d01c625dabfd75b1443dd46ccb1cb3dcd | [
"BSD-3-Clause"
] | 1 | 2020-06-17T09:06:29.000Z | 2020-06-17T09:06:29.000Z | tests/test_stardist2D.py | ianbgroves/stardist | 6524c27d01c625dabfd75b1443dd46ccb1cb3dcd | [
"BSD-3-Clause"
] | null | null | null | import numpy as np
from stardist import star_dist, relabel_image_stardist
import pytest
from utils import random_image, real_image2d, check_similar, circle_image
if __name__ == '__main__':
lbl1, lbl2 = test_relabel_consistency(32,eps = (.7,1), plot = True)
| 36.794521 | 95 | 0.655249 |
583228f93313973cc02c96e9d032138aeb10b053 | 26,395 | py | Python | all_call/infer_new.py | jbudis/dante | 90177c33825d5f9ce3fba5463092fbcf20b72fe2 | [
"Apache-2.0"
] | 4 | 2018-09-28T14:50:47.000Z | 2021-08-09T12:46:12.000Z | all_call/infer_new.py | jbudis/dante | 90177c33825d5f9ce3fba5463092fbcf20b72fe2 | [
"Apache-2.0"
] | 6 | 2019-01-02T13:08:31.000Z | 2021-03-25T21:45:40.000Z | all_call/infer_new.py | jbudis/dante | 90177c33825d5f9ce3fba5463092fbcf20b72fe2 | [
"Apache-2.0"
] | 1 | 2017-12-12T10:38:26.000Z | 2017-12-12T10:38:26.000Z | import math
import functools
from scipy.stats import binom
import numpy as np
import itertools
import sys
import matplotlib.pyplot as plt
import matplotlib.patheffects as PathEffects
from copy import copy
def combine_distribs(deletes, inserts):
"""
Combine insert and delete models/distributions
:param deletes: ndarray - delete distribution
:param inserts: ndarray - insert distribution
:return: ndarray - combined array of the same length
"""
# how much to fill?
to_fill = sum(deletes == 0.0) + 1
while to_fill < len(inserts) and inserts[to_fill] > 0.0001:
to_fill += 1
# create the end array
len_del = len(deletes)
end_distr = np.zeros_like(deletes, dtype=float)
# fill it!
for i, a in enumerate(inserts[:to_fill]):
# print i,a,(deletes*a)[:len_del-i]
end_distr[i:] += (deletes * a)[:len_del - i]
# print("end_distr", end_distr[:3], deletes[:3], inserts[:3])
return end_distr
def const_rate(n, p1=0.0, p2=1.0, p3=1.0):
"""
Constant rate function.
:param n: int - allele number (unused)
:param p1: float - constant parameter
:param p2: float - linear parameter (unused)
:param p3: float - additional parameter (unused)
:return: float - p1
"""
return p1
def linear_rate(n, p1=0.0, p2=1.0, p3=1.0):
"""
Linear rate function.
:param n: int - allele number
:param p1: float - constant parameter
:param p2: float - linear parameter
:param p3: float - additional parameter (unused)
:return: float - p1 + p2 * n
"""
return p1 + p2 * n
def n2_rate(n, p1=0.0, p2=1.0, p3=1.0):
"""
Quadratic rate function.
:param n: int - allele number
:param p1: float - constant parameter
:param p2: float - linear parameter
:param p3: float - quadratic parameter
:return: float - p1 + p2 * n + p3 * n * n
"""
return p1 + p2 * n + p3 * n * n
def exp_rate(n, p1=0.0, p2=1.0, p3=1.0):
"""
Exponential rate function.
:param n: int - allele number
:param p1: float - constant parameter
:param p2: float - linear parameter
:param p3: float - exponential parameter
:return: float - p1 + p2 * e^(p3 * n)
"""
return p1 + p2 * math.exp(p3 * n)
def clip(value, minimal, maximal):
"""
Clips value to range <minimal, maximal>
:param value: ? - value
:param minimal: ? - minimal value
:param maximal: ? - maximal value
:return: ? - clipped value
"""
return min(max(minimal, value), maximal)
def model_full(rng, model_params, n, rate_func=linear_rate):
"""
Create binomial model for both deletes and inserts of STRs
:param rng: int - max_range of distribution
:param model_params: 4-tuple - parameters for inserts and deletes
:param n: int - target allele number
:param rate_func: function - rate function for deletes
:return: ndarray - combined distribution
"""
p1, p2, p3, q = model_params
deletes = binom.pmf(np.arange(rng), n, clip(1 - rate_func(n, p1, p2, p3), 0.0, 1.0))
inserts = binom.pmf(np.arange(rng), n, q)
return combine_distribs(deletes, inserts)
def model_template(rng, model_params, rate_func=linear_rate):
"""
Partial function for model creation.
:param rng: int - max_range of distribution
:param model_params: 4-tuple - parameters for inserts and deletes
:param rate_func: function - rate function for deletes
:return: partial function with only 1 parameter - n - target allele number
"""
return functools.partial(model_full, rng, model_params, rate_func=rate_func)
| 44.21273 | 183 | 0.633567 |
583374a576c3edb6be71e460848c9177cb1eee6a | 18,398 | py | Python | createbag.py | axfelix/moveit | a0d4207fdd90af8f05a5c55b4b247757cd6d7bb2 | [
"Unlicense"
] | null | null | null | createbag.py | axfelix/moveit | a0d4207fdd90af8f05a5c55b4b247757cd6d7bb2 | [
"Unlicense"
] | null | null | null | createbag.py | axfelix/moveit | a0d4207fdd90af8f05a5c55b4b247757cd6d7bb2 | [
"Unlicense"
] | null | null | null | """
GUI tool to create a Bag from a filesystem folder.
"""
import sys
import os
import shutil
import bagit
import platform
import random
import string
import re
from time import strftime
import subprocess
from paramiko import SSHClient
from paramiko import AutoAddPolicy
from paramiko import AuthenticationException
from scp import SCPClient
from distutils.dir_util import copy_tree
import zipfile
import hashlib
import tempfile
from zipfile import ZipFile
import platform
pyversion = platform.python_version_tuple()[0]
if pyversion == "2":
from urllib import urlencode
import urllib2
else:
from urllib.parse import urlencode
import urllib.request as urllib2
# These are toggled at build time. TODO: switch to argument parser.
# toggle this if depositing to an Active Directory server
internalDepositor = 0
# toggle this if depositing to SFU Library
radar = 0
# toggle this if bypassing the Bagit step
nobag = 0
# toggle this if bypassing the transfer and only creating a Bag on desktop
ziponly = 1
bagit_checksum_algorithms = ['md5']
confirmation_message_win = "The transfer package will be created and placed on your\n desktop after this; large packages may take a moment.\n\nAre all the transfer details correct?\n\n"
#confirmation_message_mac = "The transfer package will be created and placed on your desktop after this; large packages may take a moment.\n\nAre all the transfer details correct?\n\n"
confirmation_message_mac = "The transfer package will be created and placed on your desktop after this; large packages may take a moment.\n\n"
session_message = "Session Number"
session_message_final_win = "The transfer package will be created and placed on your\n desktop after this; large packages may take a moment.\n\nSession Number"
session_message_final_mac = "The transfer package will be created and placed on your desktop after this; large packages may take a moment.\n\nSession Number"
transfer_message = "Transfer Number"
if internalDepositor == 0:
username_message = "Username"
password_message = "Password"
else:
username_message = "SFU Computing ID"
password_message = "SFU Computing password"
close_session_message = "Is this the final session for this transfer?\nThe transfer will begin in the background after this \nand let you know when it is complete."
close_session_osx_title = "Is this the final session for this transfer?"
close_session_osx_informative = "The transfer will begin in the background and let you know when it is complete."
if radar == 0:
sfu_success_message = "Files have been successfuly transferred to SFU Archives. \nAn archivist will be in contact with you if further attention is needed."
bag_success_message = "Files have been successfully packaged and placed in a new folder on your desktop for transfer."
else:
sfu_success_message = "Files have been successfuly transferred to SFU Library. \nA librarian will be in contact with you if further attention is needed."
password_message = "Please input your SFU Computing password. \nTransfer will commence after clicking OK and you will be notified when it is complete."
sfu_failure_message = "Transfer did not complete successfully. \nPlease contact moveit@sfu.ca for help."
if platform.system() != 'Darwin' and platform.system() != 'Windows':
# The Linux/Gtk config has been removed for now
from gi.repository import Gtk
elif platform.system() == 'Windows':
from PyQt4 import QtGui, QtCore
elif platform.system() == 'Darwin':
# Sets up Cocoadialog for error message popup on OSX.
CD_PATH = os.path.join("~/.createbag/", "CocoaDialog.app/Contents/MacOS/CocoaDialog")
# Dummied temporarily because of issues w/ CocoaDialog under High Sierra
# Windows/Qt-specific code (can also work on Linux but Gtk is nicer)
if platform.system() == 'Windows':
app = QtGui.QApplication(sys.argv)
ex = QtChooserWindow()
sys.exit(app.exec_())
# OSX-specific code.
elif platform.system() == 'Darwin':
# add progress bar code eventually
# Python 3 needs .decode() because Cocoa returns bytestrings
archivesUsername = cocoaUsername().decode()
if ziponly == 0:
archivesPassword = cocoaPassword().decode()
else:
archivesPassword = ""
transferno = cocoaTransferNo().decode()
sessionno = cocoaSessionNo().decode()
confirmation_mac = confirmation_message_mac + "\nUsername: " + archivesUsername + "\nTransfer: " + transferno + "-" + sessionno
confirmation = cocoaConfirmation(confirmation_mac)
bag_dir = make_bag(sys.argv[1])
parent_path = os.path.basename(os.path.normpath(sys.argv[1]))
if ziponly == 0:
close_session = cocoaCloseSession()
else:
close_session = 0
script_output = check_zip_and_send(bag_dir, sessionno, transferno, archivesUsername, archivesPassword, close_session, parent_path)
if script_output == "bagged":
cocoaTransferSuccess(bag_success_message)
else:
cocoaTransferSuccess(sfu_success_message)
| 35.655039 | 236 | 0.72709 |
583491d9c92a8b53e562e95c5e8cebcf67dc3f00 | 10,937 | py | Python | code/python-neo/neo/core/basesignal.py | qniksefat/macaque_brain_causality_test | 24cd5caee3ae79066ca37844cab931d04dcad977 | [
"CC-BY-4.0",
"BSD-3-Clause"
] | null | null | null | code/python-neo/neo/core/basesignal.py | qniksefat/macaque_brain_causality_test | 24cd5caee3ae79066ca37844cab931d04dcad977 | [
"CC-BY-4.0",
"BSD-3-Clause"
] | null | null | null | code/python-neo/neo/core/basesignal.py | qniksefat/macaque_brain_causality_test | 24cd5caee3ae79066ca37844cab931d04dcad977 | [
"CC-BY-4.0",
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
'''
This module implements :class:`BaseSignal`, an array of signals.
This is a parent class from which all signal objects inherit:
:class:`AnalogSignal` and :class:`IrregularlySampledSignal`
:class:`BaseSignal` inherits from :class:`quantities.Quantity`, which
inherits from :class:`numpy.array`.
Inheritance from :class:`numpy.array` is explained here:
http://docs.scipy.org/doc/numpy/user/basics.subclassing.html
In brief:
* Constructor :meth:`__new__` for :class:`BaseSignal` doesn't exist.
Only child objects :class:`AnalogSignal` and :class:`IrregularlySampledSignal`
can be created.
'''
# needed for Python 3 compatibility
from __future__ import absolute_import, division, print_function
import copy
import logging
import numpy as np
import quantities as pq
from neo.core.baseneo import BaseNeo, MergeError, merge_annotations
from neo.core.dataobject import DataObject, ArrayDict
from neo.core.channelindex import ChannelIndex
logger = logging.getLogger("Neo")
| 37.713793 | 98 | 0.633172 |
5835a4f4779f435b367bd40c05663242713c67ad | 3,038 | py | Python | Morocco model/scripts/cropland_processing.py | KTH-dESA/FAO | 74459217a9e8ad8107b1d3a96fd52eebd93daebd | [
"MIT"
] | 3 | 2020-09-17T11:12:52.000Z | 2021-03-31T09:24:02.000Z | Morocco model/scripts/cropland_processing.py | KTH-dESA/FAO | 74459217a9e8ad8107b1d3a96fd52eebd93daebd | [
"MIT"
] | 101 | 2019-10-02T10:16:28.000Z | 2021-06-05T06:42:55.000Z | Morocco model/scripts/cropland_processing.py | KTH-dESA/FAO | 74459217a9e8ad8107b1d3a96fd52eebd93daebd | [
"MIT"
] | 2 | 2020-02-23T13:28:00.000Z | 2021-03-31T10:02:46.000Z | import sys
sys.path.append("..") #this is to add the avobe folder to the package directory
import geopandas as gpd
import pandas as pd
import numpy as np
import os
from nexustool.gis_tools import download_data, create_time_data, get_area_share, get_zonal_stats
from nexustool.weap_tools import reproject_raster, sample_raster
## Downloading solar irradiation and water table depth data
url = 'https://biogeo.ucdavis.edu/data/worldclim/v2.1/base/wc2.1_30s_srad.zip'
file_path = os.path.join('data', 'gis', 'srad', 'wc2.1_30s_srad.zip')
download_data(url, file_path)
url = 'https://souss-massa-dev.s3.us-east-2.amazonaws.com/post_build/Africa_model_wtd_v2.nc'
file_path = os.path.join('data', 'gis', 'wtd', 'Africa_model_wtd_v2.nc')
download_data(url, file_path)
## Reading the input data
demand_path = str(snakemake.input.demand_points)
cropland_path = str(snakemake.input.cropland)
crop_df = pd.read_csv(cropland_path, encoding='utf-8')
geometry = crop_df['WKT'].map(shapely.wkt.loads)
cropland = gpd.GeoDataFrame(crop_df.drop(columns=['WKT']), crs="EPSG:26192", geometry=geometry)
provinces = gpd.read_file(os.path.join('data', 'gis', 'admin', 'provinces.gpkg'), encoding='utf-8')
output_file = str(snakemake.output)
output_folder = output_file.split(os.path.basename(output_file))[0]
## Convert coordenate reference system (crs)
MerchidSudMoroc = 26192
for gdf in [provinces, provinces]:
gdf.to_crs(epsg=MerchidSudMoroc, inplace=True)
cropland = cropland.loc[cropland.area_m2>=100] #choose
## Solar irradiation zonal statistics
Loops through the 12 months of the year and gets the mean solar irradiation of each month within each cropland polygon
cropland.to_crs(epsg=4326, inplace=True)
for month in range(1, 13):
cropland = get_zonal_stats(cropland,
os.path.join('data', 'gis', 'srad',
f'wc2.1_30s_srad_{str(month).zfill(2)}.tif'),
['mean'], all_touched=True).rename(columns={'mean': f'srad{month}'})
## Water table depth zonal statistics
cropland.crs = 4326
cropland = get_zonal_stats(cropland,
os.path.join('data', 'gis', 'wtd',
'Africa_model_wtd_v2.nc'),
['mean'], all_touched=True).rename(columns={'mean': 'wtd'})
cropland.crs = 4326
cropland.to_crs(epsg=MerchidSudMoroc, inplace=True)
## Creating time series data
df_cropland = create_time_data(cropland, 2019, 2050)
## Calculating the area share of each croplan area within each province
cropland.loc[cropland['province']=='Inezgane-At Melloul', 'province'] = 'Taroudannt' #Including Inezgane-At Melloul irrigated area into results from Taroudant due to lack of data for the former
cropland['area_share'] = get_area_share(cropland, 'province', 'area_m2')
df_cropland = pd.merge(df_cropland, cropland[['Demand point', 'area_share']], on='Demand point')
os.makedirs(output_folder, exist_ok = True)
df_cropland.to_csv(output_file, index=False) | 40.506667 | 195 | 0.711982 |
58394701554d3a507c68ce7bd347905779a7cb27 | 891 | py | Python | dl_data_validation_toolset/framework/report_gen/group.py | kwierman/dl_data_validation_toolset | fb0486abd000ba28c6474f8979762c92fb4ee038 | [
"MIT"
] | 1 | 2017-08-24T00:46:47.000Z | 2017-08-24T00:46:47.000Z | dl_data_validation_toolset/framework/report_gen/group.py | kwierman/dl_data_validation_toolset | fb0486abd000ba28c6474f8979762c92fb4ee038 | [
"MIT"
] | 177 | 2017-04-10T23:03:27.000Z | 2022-03-28T22:07:54.000Z | dl_data_validation_toolset/framework/report_gen/group.py | HEP-DL/dl_data_validation_toolset | fb0486abd000ba28c6474f8979762c92fb4ee038 | [
"MIT"
] | null | null | null | from .file import FileGenerator
from ..report.group import GroupReport
import logging
import asyncio
import os
| 31.821429 | 75 | 0.725028 |
583a1302a3f7562a97c1476d70bc500c24d60c4f | 174 | py | Python | glanceclient/common/exceptions.py | citrix-openstack-build/python-glanceclient | 32d9c42816b608220ae5692e573142dab6534604 | [
"Apache-2.0"
] | 1 | 2019-09-11T11:56:19.000Z | 2019-09-11T11:56:19.000Z | tools/dockerize/webportal/usr/lib/python2.7/site-packages/glanceclient/common/exceptions.py | foruy/openflow-multiopenstack | 74140b041ac25ed83898ff3998e8dcbed35572bb | [
"Apache-2.0"
] | null | null | null | tools/dockerize/webportal/usr/lib/python2.7/site-packages/glanceclient/common/exceptions.py | foruy/openflow-multiopenstack | 74140b041ac25ed83898ff3998e8dcbed35572bb | [
"Apache-2.0"
] | null | null | null | # This is here for compatability purposes. Once all known OpenStack clients
# are updated to use glanceclient.exc, this file should be removed
from glanceclient.exc import *
| 43.5 | 75 | 0.804598 |
583a2eef001a72cf9b9737ee6ef5ed10dc5f494d | 1,458 | py | Python | api/scpca_portal/views/filter_options.py | AlexsLemonade/scpca-portal | d60d6db5abe892ed58764128269df936778c6fd7 | [
"BSD-3-Clause"
] | null | null | null | api/scpca_portal/views/filter_options.py | AlexsLemonade/scpca-portal | d60d6db5abe892ed58764128269df936778c6fd7 | [
"BSD-3-Clause"
] | 85 | 2021-07-27T14:33:55.000Z | 2022-03-28T20:18:41.000Z | api/scpca_portal/views/filter_options.py | AlexsLemonade/scpca-portal | d60d6db5abe892ed58764128269df936778c6fd7 | [
"BSD-3-Clause"
] | null | null | null | from django.http import JsonResponse
from rest_framework import status, viewsets
from scpca_portal.models import Project
| 33.136364 | 75 | 0.577503 |
583a4439342b3be3a1f5a61fbbd79630bf4f80cd | 409 | py | Python | cords/selectionstrategies/SL/__init__.py | krishnatejakk/AUTOMATA | fd0cf58058e39660f88d9d6b4101e30a497f6ce2 | [
"MIT"
] | null | null | null | cords/selectionstrategies/SL/__init__.py | krishnatejakk/AUTOMATA | fd0cf58058e39660f88d9d6b4101e30a497f6ce2 | [
"MIT"
] | null | null | null | cords/selectionstrategies/SL/__init__.py | krishnatejakk/AUTOMATA | fd0cf58058e39660f88d9d6b4101e30a497f6ce2 | [
"MIT"
] | 1 | 2022-03-16T05:55:12.000Z | 2022-03-16T05:55:12.000Z | from .craigstrategy import CRAIGStrategy
from .dataselectionstrategy import DataSelectionStrategy
from .glisterstrategy import GLISTERStrategy
from .randomstrategy import RandomStrategy
from .submodularselectionstrategy import SubmodularSelectionStrategy
from .gradmatchstrategy import GradMatchStrategy
from .fixedweightstrategy import FixedWeightStrategy
from .adapweightsstrategy import AdapWeightsStrategy | 51.125 | 68 | 0.904645 |
583a53eef1dad89d42938f5028c87aba4efb30bb | 10,917 | py | Python | pycost/rocch.py | tfawcett/pycost | 69f96866295dba937a23f36c8f24f2f6acdaccbd | [
"BSD-3-Clause"
] | 1 | 2019-10-05T10:37:47.000Z | 2019-10-05T10:37:47.000Z | pycost/rocch.py | tfawcett/pycost | 69f96866295dba937a23f36c8f24f2f6acdaccbd | [
"BSD-3-Clause"
] | null | null | null | pycost/rocch.py | tfawcett/pycost | 69f96866295dba937a23f36c8f24f2f6acdaccbd | [
"BSD-3-Clause"
] | 1 | 2020-06-12T17:13:14.000Z | 2020-06-12T17:13:14.000Z | """
Metrics to calculate and manipulate the ROC Convex Hull on a classification task given scores.
"""
# Author: Tom Fawcett <tom.fawcett@gmail.com>
from collections import namedtuple
from math import sqrt
from typing import List, Dict, Tuple, Union
# DESCRIPTION:
#
# This program computes the convex hull of a set of ROC points
# (technically, the upper left triangular convex hull, bounded
# by (0,0) and (1,1)). The ROC Convex Hull is used to find dominant
# (and locally best) classifiers in ROC space. For more information
# on the ROC convex hull and its uses, see the references below.
#
# FP and TP are the False Positive (X axis) and True Positive (Y axis)
# values for the point.
#
#
# REFERENCES:
#
# The first paper below is probably best for an introduction and
# general discussion of the ROC Convex Hull and its uses.
#
# 1) Provost, F. and Fawcett, T. "Analysis and visualization of
# classifier performance: Comparison under imprecise class and cost
# distributions". In Proceedings of the Third International
# Conference on Knowledge Discovery and Data Mining (KDD-97),
# pp.43-48. AAAI Press.
#
# 2) Provost, F. and Fawcett, T. "Robust Classification Systems for
# Imprecise Environments".
#
# 3) Provost, F., Fawcett, T., and Kohavi, R. "The Case
# Against Accuracy Estimation for Comparing Induction Algorithms".
# Available from:
#
#
# BUG REPORTS / SUGGESTIONS / QUESTIONS: Tom Fawcett <tom.fawcett@gmail.com>
#
#
"""
Typical use is something like this:
rocch = ROCCH(keep_intermediate=False)
for clf in classifiers:
y_scores = clf.decision_function(y_test)
rocch.fit(clfname, roc_curve(y_scores, y_true))
...
plt.plot(rocch.hull())
rocch.describe()
"""
Point = namedtuple( "Point", ["x", "y", "clfname"] )
Point.__new__.__defaults__ = ("",) # make clfname optional
INFINITY: float = float( "inf" )
def calculate_slope(pt1, pt2: Point):
"""
Return the slope from pt1 to pt2, or inf if slope is infinite
:param pt1:
:type pt1: Point
:param pt2:
:type pt2: Point
:return:
:rtype: float
"""
dx = pt2.x - pt1.x
dy = pt2.y - pt1.y
if dx == 0:
return INFINITY
else:
return dy / dx
def _check_hull(hull):
"""Check a list of hull points for convexity.
This is a simple utility function for testing.
Throws an AssertionError if a hull segment is concave.
Colinear segments (turn==0) are not considered violations.
:param hull: A list of Point instances describing an ROC convex hull.
:return: None
"""
for hull_idx in range( len( hull ) - 2 ):
segment = hull[hull_idx: hull_idx + 3]
assert turn( *segment ) <= 0, f"Concavity in hull: {segment}"
def ROC_order(pt1, pt2: Point) -> bool:
"""Predicate for determining ROC_order for sorting.
Either pt1's x is ahead of pt2's x, or the x's are equal and pt1's y is ahead of pt2's y.
"""
return (pt1.x < pt2.x) or (pt1.x == pt2.x and pt1.y < pt2.y)
def compute_theta(p1, p2: Point) -> float:
"""Compute theta, an ordering function on a point pair.
Theta has the same properties as the angle between the horizontal axis and
the line segment between the points, but is much faster to compute than
arctangent. Range is 0 to 360. Defined on P.353 of _Algorithms in C_.
"""
dx = p2.x - p1.x
ax = abs( dx )
dy = p2.y - p1.y
ay = abs( dy )
if dx == 0 and dy == 0:
t = 0
else:
t = dy / (ax + ay)
# Adjust for quadrants two through four
if dx < 0:
t = 2 - t
elif dy < 0:
t = 4 + t
return t * 90.0
def euclidean(p1, p2: Point) -> float:
"""Compute Euclidean distance.
"""
return sqrt( (p1.x - p2.x)**2 + (p1.y - p2.y)**2 )
def turn(a, b, c: Point) -> float:
"""Determine the turn direction going from a to b to c.
Going from a->b->c, is the turn clockwise, counterclockwise, or straight.
positive => CCW
negative => CW
zero => colinear
See: https://algs4.cs.princeton.edu/91primitives/
>>> a = Point(1,1)
>>> b = Point(2,2)
>>> turn(a, b, Point(3,2))
-1
>>> turn(a, b, Point(2,3))
1
>>> turn(a, b, Point(3,3))
0
>>> turn(a, b, Point(1.5, 1.5)) == 0
True
>>> turn(a, b, Point(1.5,1.7)) > 0
True
:param Point a:
:param Point b:
:param Point c:
:rtype: float
"""
return (b.x - a.x) * (c.y - a.y) - (c.x - a.x) * (b.y - a.y)
if __name__ == "__main__":
import doctest
doctest.testmod()
# End of rocch.py
| 33.798762 | 100 | 0.612989 |
583a8bbe4d63a96ce53555ed1fbf8f8d31b49bdb | 846 | py | Python | all_raspi_code_backup/DriveTesting.py | lord-pradhan/SnowBot | 82a0b3439dc203bf27725e293d6e56bcad720c09 | [
"MIT"
] | 2 | 2020-05-31T07:37:59.000Z | 2021-03-24T13:43:39.000Z | all_raspi_code_backup/DriveTesting.py | lord-pradhan/SnowBot | 82a0b3439dc203bf27725e293d6e56bcad720c09 | [
"MIT"
] | null | null | null | all_raspi_code_backup/DriveTesting.py | lord-pradhan/SnowBot | 82a0b3439dc203bf27725e293d6e56bcad720c09 | [
"MIT"
] | 1 | 2019-12-13T19:21:12.000Z | 2019-12-13T19:21:12.000Z | """
Program: DriveTesting.py
Revised On: 12/01/2019
"""
### Library Imports
from DriveArduino import DriveArduino
import numpy as np
from time import sleep
from sys import exit
from signal import signal, SIGINT
###
### CTRL + C Signal Handler & Resource Cleanup
def signal_handler(sig, frame):
"""Handler for CTRL + C clean exit."""
print('Quitting program.')
cleanup()
def cleanup():
"""Resource cleanup."""
drive.close()
print('Resource cleanup completed.')
exit(0)
signal(SIGINT, signal_handler)
###
### Arduino Configuration
addr = 0x08
drive = DriveArduino(addr)
###
### Main Program
print('Press CTRL + C to exit.')
while True:
setpoints = np.array([25, 25, -25, -25])
drive.set_rpm(setpoints)
sleep(1)
drive.update()
print(drive.rpm)
print(drive.pwm)
print()
###
| 16.92 | 46 | 0.652482 |
583ba4ab4b346b94532e02cbbc5e159874800f72 | 363 | py | Python | src/sentry/utils/strings.py | rogerhu/sentry | ee2b190e92003abe0f538b2df5b686e425df1200 | [
"BSD-3-Clause"
] | 1 | 2015-12-13T18:27:54.000Z | 2015-12-13T18:27:54.000Z | src/sentry/utils/strings.py | simmetria/sentry | 9731f26adb44847d1c883cca108afc0755cf21cc | [
"BSD-3-Clause"
] | null | null | null | src/sentry/utils/strings.py | simmetria/sentry | 9731f26adb44847d1c883cca108afc0755cf21cc | [
"BSD-3-Clause"
] | null | null | null | def truncatechars(value, arg):
"""
Truncates a string after a certain number of chars.
Argument: Number of chars to truncate after.
"""
try:
length = int(arg)
except ValueError: # Invalid literal for int().
return value # Fail silently.
if len(value) > length:
return value[:length] + '...'
return value
| 25.928571 | 55 | 0.606061 |
583d59db015ae71e12d80d6cb5e3e2aba7e8e79c | 817 | py | Python | setup.py | Ozencb/cli-pto | 445e5133340adb25dcf5d14c4203643b7a8741c2 | [
"MIT"
] | 6 | 2020-04-30T18:32:38.000Z | 2020-07-28T15:37:04.000Z | setup.py | Ozencb/cli-pto | 445e5133340adb25dcf5d14c4203643b7a8741c2 | [
"MIT"
] | 1 | 2020-04-30T18:34:08.000Z | 2020-05-01T10:16:49.000Z | setup.py | Ozencb/cli-pto | 445e5133340adb25dcf5d14c4203643b7a8741c2 | [
"MIT"
] | null | null | null | import os
import re
from setuptools import find_packages, setup
setup(
name='cli-pto',
author='zen Bilgili',
description='A CLI text editor with encryption.',
version=get_version('cli_pto'),
url='https://github.com/ozencb/cli-pto',
packages=find_packages(),
install_requires=['prompt-toolkit', 'Pygments', 'pycryptodome'],
entry_points={'console_scripts': 'cli-pto = cli_pto.clipto:main'},
license=open('LICENSE').read(),
keywords=['text', 'editor', 'encryption', 'encrypted', 'password', 'manager']
)
| 31.423077 | 85 | 0.641371 |
583f4f6dd761e12a8aa4ad8d387f0bdd2b82f1de | 9,545 | py | Python | users/models.py | scoremaza/church_alive_backend | 2ee7260aea51ec39972588dc4a346aa152356aa3 | [
"MIT"
] | null | null | null | users/models.py | scoremaza/church_alive_backend | 2ee7260aea51ec39972588dc4a346aa152356aa3 | [
"MIT"
] | null | null | null | users/models.py | scoremaza/church_alive_backend | 2ee7260aea51ec39972588dc4a346aa152356aa3 | [
"MIT"
] | null | null | null | import os
import uuid
from django.db import models
from django.contrib.auth.models import User
from django.db.models.base import Model
from django.db.models.enums import Choices, ChoicesMeta
from django.db.models.fields.related import ForeignKey
from django.utils.deconstruct import deconstructible
user_profile_image_path = GenerateProfileImagePath()
| 32.355932 | 103 | 0.683394 |
5840120e03a13bb96c98c4c82966a3349be1a938 | 1,012 | py | Python | format_errors.py | drupchen/correct-ewts | 0a23db216b2fb78a8c73476ca55cebf23a7d2706 | [
"Apache-2.0"
] | null | null | null | format_errors.py | drupchen/correct-ewts | 0a23db216b2fb78a8c73476ca55cebf23a7d2706 | [
"Apache-2.0"
] | null | null | null | format_errors.py | drupchen/correct-ewts | 0a23db216b2fb78a8c73476ca55cebf23a7d2706 | [
"Apache-2.0"
] | null | null | null | import re
from collections import defaultdict
with open('input/errors-ewts.csv') as f:
raw = f.read()
#raw = raw.replace('`not expected', '` not expected')
lines = raw.split('\n')
data = []
for line in lines:
columns = re.split(r'(?:^"|","|",,"|"$)', line)
msgs = [a for a in columns[3].split(',') if a != '']
entry = [columns[1], columns[2], msgs]
data.append(entry)
error_types = []
by_error_type = defaultdict(list)
for entry in data:
msgs = entry[2]
for msg in msgs:
msg = msg.replace('line 1: ', '')
error_pattern = re.sub(r'`[^`]*`', r'`X`', msg)
error_types.append(error_pattern)
by_error_type[error_pattern].append(entry)
error_types = sorted(list(set(error_types)))
for type, entries in by_error_type.items():
print('{} occurences:\t\t{}'.format(len(entries), type))
etc_count = 0
for line in lines:
if 'character `.`.' in line:
etc_count += 1
print('number of lines with misplaced dots:', etc_count)
print('ok') | 27.351351 | 60 | 0.614625 |
5840ef989a734ba50cfa0c0f408fab21378c995e | 344 | py | Python | exercise-django/user/views.py | theseana/goodfellas | 9ad9d9759d193cd64ec71876b1dab155bb9ba2c7 | [
"MIT"
] | null | null | null | exercise-django/user/views.py | theseana/goodfellas | 9ad9d9759d193cd64ec71876b1dab155bb9ba2c7 | [
"MIT"
] | null | null | null | exercise-django/user/views.py | theseana/goodfellas | 9ad9d9759d193cd64ec71876b1dab155bb9ba2c7 | [
"MIT"
] | null | null | null | from django.shortcuts import render
# Create your views here.
from user.forms import UserForm
| 24.571429 | 78 | 0.659884 |
5841ecc637b36ee324105b2737f2b6315d8d0459 | 3,609 | py | Python | shark/example/env/catch_ball_env.py | 7starsea/shark | 5030f576da6f5998728d80170480e68a3debfe79 | [
"MIT"
] | null | null | null | shark/example/env/catch_ball_env.py | 7starsea/shark | 5030f576da6f5998728d80170480e68a3debfe79 | [
"MIT"
] | null | null | null | shark/example/env/catch_ball_env.py | 7starsea/shark | 5030f576da6f5998728d80170480e68a3debfe79 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import numpy as np
import PIL
import torch
import torchvision.transforms as TF
from types import SimpleNamespace
from gym import spaces, Env
from .SharkExampleEnv import CatchBallSimulate
# internal_screen_h, internal_screen_w = 80, 140
| 31.112069 | 107 | 0.600443 |
5842b3ae714ec5029aefbd5f4f522395e8920892 | 4,652 | py | Python | examples/launch_tor_with_simplehttpd.py | kneufeld/txtorcon | fbe2fc70cae00aa6228a2920ef048b282872dbab | [
"MIT"
] | null | null | null | examples/launch_tor_with_simplehttpd.py | kneufeld/txtorcon | fbe2fc70cae00aa6228a2920ef048b282872dbab | [
"MIT"
] | null | null | null | examples/launch_tor_with_simplehttpd.py | kneufeld/txtorcon | fbe2fc70cae00aa6228a2920ef048b282872dbab | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''Create a new tor node and add a simple http server to it, serving a given
directory over http. The server is single-threaded and very limited.
There are two arguments that can be passed via the commandline:
-p\tThe internet-facing port the hidden service should listen on
-d\tThe directory to serve via http
Example:
./launch_tor_with_simplehttpd.py -p 8080 -d /opt/files/
'''
import SimpleHTTPServer
import SocketServer
import functools
import getopt
import os
import sys
import tempfile
import thread
from twisted.internet import reactor
import txtorcon
if __name__ == '__main__':
sys.exit(main())
| 33.710145 | 96 | 0.635211 |
5842cd8ea1a4359a03a5653c005a52f4e2eeeb68 | 5,123 | py | Python | homeroom/wsgi.py | openshift-labs/workshop-homeroom | a0f0c144eef679e35a93201d11973329be9924fb | [
"Apache-2.0"
] | 14 | 2019-09-28T20:42:29.000Z | 2021-11-23T13:12:42.000Z | homeroom/wsgi.py | openshift-homeroom/workshop-homeroom | a0f0c144eef679e35a93201d11973329be9924fb | [
"Apache-2.0"
] | 1 | 2019-10-15T02:55:57.000Z | 2019-10-15T02:55:57.000Z | homeroom/wsgi.py | openshift-homeroom/workshop-homeroom | a0f0c144eef679e35a93201d11973329be9924fb | [
"Apache-2.0"
] | 3 | 2020-02-11T16:55:59.000Z | 2021-08-13T13:16:27.000Z | import os
import json
import threading
import time
import yaml
from flask import Flask
from flask import render_template
from kubernetes.client.rest import ApiException
from kubernetes.client.configuration import Configuration
from kubernetes.config.incluster_config import load_incluster_config
from kubernetes.client.api_client import ApiClient
from openshift.dynamic import DynamicClient
from openshift.dynamic.exceptions import ResourceNotFoundError
# Work out namespace operating in.
service_account_path = '/var/run/secrets/kubernetes.io/serviceaccount'
with open(os.path.join(service_account_path, 'namespace')) as fp:
namespace = fp.read().strip()
# Setup REST API client access.
load_incluster_config()
import urllib3
urllib3.disable_warnings()
instance = Configuration()
instance.verify_ssl = False
Configuration.set_default(instance)
api_client = DynamicClient(ApiClient())
try:
route_resource = api_client.resources.get(
api_version='route.openshift.io/v1', kind='Route')
except ResourceNotFoundError:
route_resource = None
ingress_resource = api_client.resources.get(
api_version='extensions/v1beta1', kind='Ingress')
# Setup loading or workshops or live monitor.
workshops = []
application_name = os.environ.get('APPLICATION_NAME', 'homeroom')
if os.path.exists('/opt/app-root/configs/workshops.yaml'):
with open('/opt/app-root/configs/workshops.yaml') as fp:
content = fp.read()
if content:
workshops = list(filter_out_hidden(yaml.safe_load(content)))
if os.path.exists('/opt/app-root/configs/workshops.json'):
with open('/opt/app-root/configs/workshops.json') as fp:
content = fp.read()
workshops = list(filter_out_hidden(json.loads(content)))
if not workshops:
monitor_thread = threading.Thread(target=monitor_workshops)
monitor_thread.daemon = True
monitor_thread.start()
# Setup the Flask application.
app = Flask(__name__)
banner_images = {
'homeroom': 'homeroom-logo.png',
'openshift': 'openshift-logo.svg',
'dedicated': 'openshift-dedicated-logo.svg',
'okd': 'okd-logo.svg',
}
| 31.819876 | 87 | 0.622877 |
584381c8993e76aeeaae4fc35eb8cf9d4869915b | 3,417 | py | Python | rever/__init__.py | limecrayon/rever | 0446ad9707fb1e81b3101625959fd16bdaac1853 | [
"MIT"
] | 2 | 2018-02-16T08:31:48.000Z | 2018-11-19T02:31:07.000Z | rever/__init__.py | limecrayon/rever | 0446ad9707fb1e81b3101625959fd16bdaac1853 | [
"MIT"
] | null | null | null | rever/__init__.py | limecrayon/rever | 0446ad9707fb1e81b3101625959fd16bdaac1853 | [
"MIT"
] | null | null | null | import functools
import time
__all__ = ('ReachedMaxRetries', 'rever')
def rever(**rever_kwargs):
"""
rever_kwargs default values defined:
If backoff is True, then times and pause will not be initialized, but they will be calculated.
backoff: True
total_pause: 30
steps: 10
exception: BaseException
raises: True
prior: None
If backoff is False, then total_pause and steps will be initialized, but do not get used.
backoff: False
times: 1
pause: 0
exception: BaseException
raises: True
prior: None
"""
backoff = True
total_pause = 1
steps = 10
times = 1
pause = 0
exception = BaseException
raises = True
prior = None
if "backoff" not in rever_kwargs:
rever_kwargs["backoff"] = backoff
if "total_pause" not in rever_kwargs:
rever_kwargs["total_pause"] = total_pause
if "steps" not in rever_kwargs:
rever_kwargs["steps"] = steps
if "times" not in rever_kwargs:
if not rever_kwargs["backoff"]:
rever_kwargs["times"] = times
if "pause" not in rever_kwargs:
if not rever_kwargs["backoff"]:
rever_kwargs["pause"] = pause
if "exception" not in rever_kwargs:
rever_kwargs["exception"] = exception
if "raises" not in rever_kwargs:
rever_kwargs["raises"] = raises
if "prior" not in rever_kwargs:
rever_kwargs["prior"] = prior
initialized_kwargs = {key: rever_kwargs[key] for key in rever_kwargs}
return rever_decorator
| 30.238938 | 116 | 0.550776 |
5844f2ad1f289327e37c42bac510107e36f8f9d5 | 25,811 | py | Python | gui(12102018).py | hanhydro/T2H | f4922ce721eb450c7d91370f180e6c860e9ec6be | [
"MIT"
] | null | null | null | gui(12102018).py | hanhydro/T2H | f4922ce721eb450c7d91370f180e6c860e9ec6be | [
"MIT"
] | null | null | null | gui(12102018).py | hanhydro/T2H | f4922ce721eb450c7d91370f180e6c860e9ec6be | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'gui.ui'
#
# Created by: PyQt5 UI code generator 5.6
#
# WARNING! All changes made in this file will be lost!
import os
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import (QApplication, QDialog,
QProgressBar, QPushButton, QMessageBox)
import matplotlib.pyplot as plt
from matplotlib import style
import T2H, PLOT
import flopy
from matplotlib.backends.qt_compat import QtCore, QtWidgets, is_pyqt5
if is_pyqt5():
from matplotlib.backends.backend_qt5agg import (
FigureCanvas, NavigationToolbar2QT as NavigationToolbar)
else:
from matplotlib.backends.backend_qt4agg import (
FigureCanvas, NavigationToolbar2QT as NavigationToolbar)
from matplotlib.figure import Figure
#%%
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
| 47.975836 | 109 | 0.618728 |
584603df6f6456851f5001f52a65f8c0ba217511 | 226 | py | Python | py/loadpage.py | katiehuang1221/onl_ds5_project_2 | dc9243d6bdc0c1952a761b2ed3e91a8548202b42 | [
"MIT"
] | null | null | null | py/loadpage.py | katiehuang1221/onl_ds5_project_2 | dc9243d6bdc0c1952a761b2ed3e91a8548202b42 | [
"MIT"
] | 1 | 2021-01-20T02:34:07.000Z | 2021-01-20T02:34:07.000Z | py/loadpage.py | katiehuang1221/onl_ds5_project_2 | dc9243d6bdc0c1952a761b2ed3e91a8548202b42 | [
"MIT"
] | null | null | null | from bs4 import BeautifulSoup
import requests
from IPython.core.display import display, HTML
| 22.6 | 46 | 0.734513 |
58483a9eb35db037bda84433b79608b84ed9f2c4 | 1,912 | py | Python | hard-gists/5409581/snippet.py | jjhenkel/dockerizeme | eaa4fe5366f6b9adf74399eab01c712cacaeb279 | [
"Apache-2.0"
] | 21 | 2019-07-08T08:26:45.000Z | 2022-01-24T23:53:25.000Z | hard-gists/5409581/snippet.py | jjhenkel/dockerizeme | eaa4fe5366f6b9adf74399eab01c712cacaeb279 | [
"Apache-2.0"
] | 5 | 2019-06-15T14:47:47.000Z | 2022-02-26T05:02:56.000Z | hard-gists/5409581/snippet.py | jjhenkel/dockerizeme | eaa4fe5366f6b9adf74399eab01c712cacaeb279 | [
"Apache-2.0"
] | 17 | 2019-05-16T03:50:34.000Z | 2021-01-14T14:35:12.000Z | '''
rename_selected_relation_box.py
Written by Alex Forsythe (awforsythe.com)
When executed, attempts to locate any selected box within any relation
constraint in the scene. If a selected relation box is found, prompts the user
to enter a new name for that box. Allows relation boxes to be given more
descriptive names. I'd recommend binding this script to a keyboard shortcut
(see MotionBuilder/bin/config/Scripts/ActionScript.txt) for quick access.
'''
from pyfbsdk import *
def get_first(f, xs):
'''
Returns the first x in xs for which f returns True, or else None.
'''
for x in xs:
if f(x):
return x
return None
def get_selected_relation_box():
'''
Returns a relation constraint box which has been selected by the user, or
None if no relation boxes are selected.
'''
for relation in [c for c in FBSystem().Scene.Constraints if c.Is(FBConstraintRelation_TypeInfo())]:
box = get_first(lambda box: box.Selected, relation.Boxes)
if box:
return box
return None
def get_new_box_name(box):
'''
Prompts the user to enter a new name for the given box. Returns the new
name if the user confirms the rename operation, or None if the user
cancels.
'''
button, string = FBMessageBoxGetUserValue(
'Rename Box?',
'Current name: %s' % box.Name,
box.Name,
FBPopupInputType.kFBPopupString,
'Rename',
'Cancel')
return string if button == 1 else None
def rename_selected_relation_box():
'''
Prompts the user to enter a new name for a selected relation constraint
box. If no boxes are selected, has no effect.
'''
box = get_selected_relation_box()
if box:
name = get_new_box_name(box)
if name:
box.Name = name
if __name__ in ('__main__', '__builtin__'):
rename_selected_relation_box()
| 30.83871 | 103 | 0.670502 |
584861b23601a5bd9f5d5e6bce09eb691a44f1c2 | 4,010 | py | Python | osu_scene_switcher.py | FunOrange/osu-scene-switcher | 471fc654fe4a222abaf4fbcf062e8302dd52bb18 | [
"MIT"
] | 4 | 2021-05-22T20:56:36.000Z | 2022-03-02T00:19:45.000Z | osu_scene_switcher.py | FunOrange/obs-osu-noise-suppression-switcher | 471fc654fe4a222abaf4fbcf062e8302dd52bb18 | [
"MIT"
] | null | null | null | osu_scene_switcher.py | FunOrange/obs-osu-noise-suppression-switcher | 471fc654fe4a222abaf4fbcf062e8302dd52bb18 | [
"MIT"
] | 1 | 2021-01-29T18:28:04.000Z | 2021-01-29T18:28:04.000Z | import os
import time
import obspython as obs
initial_load = False
status_file = ''
idle_scene = ''
playing_scene = ''
"""
Checks if status file exists and both scenes exist, then starts the main script timer
"""
"""
Checks the osu! status file for 'Playing',
then toggles Noise Suppression accordingly
"""
previous_status = ''
| 33.416667 | 122 | 0.721696 |
5849254d7b154fa7533602568ea01800f7eb9d68 | 3,386 | py | Python | donkey_gym/envs/donkey_env.py | mint26/donkey_gym | 4d0302da5818d56f92247b9dbf389994961f487e | [
"MIT"
] | 5 | 2018-11-01T18:48:03.000Z | 2021-03-11T14:36:22.000Z | donkey_gym/envs/donkey_env.py | mint26/donkey_gym | 4d0302da5818d56f92247b9dbf389994961f487e | [
"MIT"
] | null | null | null | donkey_gym/envs/donkey_env.py | mint26/donkey_gym | 4d0302da5818d56f92247b9dbf389994961f487e | [
"MIT"
] | 7 | 2018-10-13T19:48:14.000Z | 2021-10-31T15:10:52.000Z | '''
file: donkey_env.py
author: Tawn Kramer
date: 2018-08-31
'''
import os
from threading import Thread
import numpy as np
import gym
from gym import error, spaces, utils
from gym.utils import seeding
from donkey_gym.envs.donkey_sim import DonkeyUnitySimContoller
from donkey_gym.envs.donkey_proc import DonkeyUnityProcess
## ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ##
| 27.528455 | 99 | 0.60189 |
5849a619f304aa85187564eba6cb5913a8f7354f | 2,403 | py | Python | tests/unit/backend/corpora/common/entities/datasets/test_revision.py | chanzuckerberg/dcp-prototype | 24d2323ba5ae1482395da35ea11c42708e3a52ce | [
"MIT"
] | 2 | 2020-02-07T18:12:12.000Z | 2020-02-11T14:59:03.000Z | tests/unit/backend/corpora/common/entities/datasets/test_revision.py | HumanCellAtlas/dcp-prototype | 44ca66a266004124f39d7d3e3dd75e9076012ff0 | [
"MIT"
] | 173 | 2020-01-29T17:48:02.000Z | 2020-03-20T02:52:58.000Z | tests/unit/backend/corpora/common/entities/datasets/test_revision.py | HumanCellAtlas/dcp-prototype | 44ca66a266004124f39d7d3e3dd75e9076012ff0 | [
"MIT"
] | 1 | 2020-03-20T17:06:54.000Z | 2020-03-20T17:06:54.000Z | from tests.unit.backend.corpora.common.entities.datasets import TestDataset
| 52.23913 | 117 | 0.665418 |
584a11d14b64edf45f4d6711e52adb48c3e934c3 | 3,966 | py | Python | main.py | parzibyte/login-flask | 165e10980f6a34c7587a53578ed361506eb37475 | [
"MIT"
] | 2 | 2021-09-21T16:37:41.000Z | 2021-12-09T17:38:18.000Z | main.py | parzibyte/login-flask | 165e10980f6a34c7587a53578ed361506eb37475 | [
"MIT"
] | null | null | null | main.py | parzibyte/login-flask | 165e10980f6a34c7587a53578ed361506eb37475 | [
"MIT"
] | 1 | 2021-08-16T01:36:58.000Z | 2021-08-16T01:36:58.000Z | """
____ _____ _ _ _
| _ \ | __ \ (_) | | |
| |_) |_ _ | |__) |_ _ _ __ _____| |__ _ _| |_ ___
| _ <| | | | | ___/ _` | '__|_ / | '_ \| | | | __/ _ \
| |_) | |_| | | | | (_| | | / /| | |_) | |_| | || __/
|____/ \__, | |_| \__,_|_| /___|_|_.__/ \__, |\__\___|
__/ | __/ |
|___/ |___/
____________________________________
/ Si necesitas ayuda, contctame en \
\ https://parzibyte.me /
------------------------------------
\ ^__^
\ (oo)\_______
(__)\ )\/\
||----w |
|| ||
Creado por Parzibyte (https://parzibyte.me).
------------------------------------------------------------------------------------------------
| IMPORTANTE |
Si vas a borrar este encabezado, considera:
Seguirme: https://parzibyte.me/blog/sigueme/
Y compartir mi blog con tus amigos
Tambin tengo canal de YouTube: https://www.youtube.com/channel/UCroP4BTWjfM0CkGB6AFUoBg?sub_confirmation=1
Twitter: https://twitter.com/parzibyte
Facebook: https://facebook.com/parzibyte.fanpage
Instagram: https://instagram.com/parzibyte
Hacer una donacin va PayPal: https://paypal.me/LuisCabreraBenito
------------------------------------------------------------------------------------------------
"""
from flask import Flask, render_template, request, redirect, session, flash
app = Flask(__name__)
"""
Clave secreta. Esta debe ser aleatoria, puedes generarla t.
Primero instala Python y agrega python a la PATH: https://parzibyte.me/blog/2019/10/08/instalar-python-pip-64-bits-windows/
Luego abre una terminal y ejecuta:
python
Entrars a la CLI de Python, ah ejecuta:
import os; print(os.urandom(16));
Eso te dar algo como:
b'\x11\xad\xec\t\x99\x8f\xfa\x86\xe8A\xd9\x1a\xf6\x12Z\xf4'
Simplemente remplaza la clave que se ve a continuacin con los bytes aleatorios que generaste
"""
app.secret_key = b'\xaa\xe4V}y~\x84G\xb5\x95\xa0\xe0\x96\xca\xa7\xe7'
"""
Definicin de rutas
"""
# Protegida. Solo pueden entrar los que han iniciado sesin
# Formulario para iniciar sesin
# Manejar login
# Cerrar sesin
# Un "middleware" que se ejecuta antes de responder a cualquier ruta. Aqu verificamos si el usuario ha iniciado sesin
# Iniciar el servidor
if __name__ == "__main__":
app.run(host='0.0.0.0', port=8000, debug=True)
| 36.054545 | 137 | 0.594049 |
584b5746e6a8959beb85942376ecc9e56d8276af | 707 | py | Python | events/kawacon2016/migrations/0003_auto_20160127_1924.py | jlaunonen/turska | fc6ec4e0ae50a823e931152ce8835098b96f5966 | [
"CC-BY-3.0"
] | null | null | null | events/kawacon2016/migrations/0003_auto_20160127_1924.py | jlaunonen/turska | fc6ec4e0ae50a823e931152ce8835098b96f5966 | [
"CC-BY-3.0"
] | null | null | null | events/kawacon2016/migrations/0003_auto_20160127_1924.py | jlaunonen/turska | fc6ec4e0ae50a823e931152ce8835098b96f5966 | [
"CC-BY-3.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-01-27 17:24
from __future__ import unicode_literals
from django.db import migrations, models
| 33.666667 | 291 | 0.700141 |
584b6938b21baa80544be5899accf3e8f5524589 | 218 | py | Python | Modulo 3/HelloWorld.py | antonio343/clase | fda04a606246695aa5d93c8b2b5e2890a16d5973 | [
"MIT"
] | null | null | null | Modulo 3/HelloWorld.py | antonio343/clase | fda04a606246695aa5d93c8b2b5e2890a16d5973 | [
"MIT"
] | null | null | null | Modulo 3/HelloWorld.py | antonio343/clase | fda04a606246695aa5d93c8b2b5e2890a16d5973 | [
"MIT"
] | null | null | null | import sys
print("Hello world, I am",sys.executable, sys.version)
x=input("Dame un numero mayor que cero: ")
x = int(x)
if x < 0:
print('Negative changed to zero')
x = 0
print(f"El valor final de x es: {x}") | 19.818182 | 54 | 0.646789 |
584b955b3560453b5439bd686f05b35e554caf34 | 436 | py | Python | rasiberryPiGPIOBaseController/driver/test.py | onwebbe/rasiberryPiBaseController | bdb81cb5a0e62414fa091635a83db799017249e7 | [
"MIT"
] | null | null | null | rasiberryPiGPIOBaseController/driver/test.py | onwebbe/rasiberryPiBaseController | bdb81cb5a0e62414fa091635a83db799017249e7 | [
"MIT"
] | null | null | null | rasiberryPiGPIOBaseController/driver/test.py | onwebbe/rasiberryPiBaseController | bdb81cb5a0e62414fa091635a83db799017249e7 | [
"MIT"
] | null | null | null |
main() | 22.947368 | 63 | 0.713303 |
584bc0b37c22b1a874521a0f4acbe34fb62b2cde | 17,670 | py | Python | Acquire/Client/_user.py | michellab/BioSimSpaceCloud | 456b146a2131565e354352872d3e75a08c3652d1 | [
"Apache-2.0"
] | 2 | 2019-02-15T16:04:19.000Z | 2019-02-19T15:42:27.000Z | Acquire/Client/_user.py | michellab/BioSimSpaceCloud | 456b146a2131565e354352872d3e75a08c3652d1 | [
"Apache-2.0"
] | null | null | null | Acquire/Client/_user.py | michellab/BioSimSpaceCloud | 456b146a2131565e354352872d3e75a08c3652d1 | [
"Apache-2.0"
] | null | null | null |
import os as _os
from enum import Enum as _Enum
from datetime import datetime as _datetime
import time as _time
from Acquire.Service import call_function as _call_function
from Acquire.Service import Service as _Service
from Acquire.ObjectStore import bytes_to_string as _bytes_to_string
from Acquire.ObjectStore import string_to_bytes as _string_to_bytes
from Acquire.Crypto import PrivateKey as _PrivateKey
from Acquire.Crypto import PublicKey as _PublicKey
from ._qrcode import create_qrcode as _create_qrcode
from ._qrcode import has_qrcode as _has_qrcode
from ._errors import UserError, LoginError
# If we can, import socket to get the hostname and IP address
try:
import socket as _socket
_has_socket = True
except:
_has_socket = False
__all__ = ["User", "username_to_uid", "uid_to_username", "get_session_keys"]
def _get_identity_url():
"""Function to discover and return the default identity url"""
return "http://130.61.60.88:8080/t/identity"
def _get_identity_service(identity_url=None):
"""Function to return the identity service for the system"""
if identity_url is None:
identity_url = _get_identity_url()
privkey = _PrivateKey()
response = _call_function(identity_url, response_key=privkey)
try:
service = _Service.from_data(response["service_info"])
except:
raise LoginError("Have not received the identity service info from "
"the identity service at '%s' - got '%s'" %
(identity_url, response))
if not service.is_identity_service():
raise LoginError(
"You can only use a valid identity service to log in! "
"The service at '%s' is a '%s'" %
(identity_url, service.service_type()))
if identity_url != service.service_url():
service.update_service_url(identity_url)
return service
def uid_to_username(user_uid, identity_url=None):
"""Function to return the username for the passed uid"""
if identity_url is None:
identity_url = _get_identity_url()
response = _call_function(identity_url, "whois",
user_uid=str(user_uid))
return response["username"]
def username_to_uid(username, identity_url=None):
"""Function to return the uid for the passed username"""
if identity_url is None:
identity_url = _get_identity_url()
response = _call_function(identity_url, "whois",
username=str(username))
return response["user_uid"]
def get_session_keys(username=None, user_uid=None, session_uid=None,
identity_url=None):
"""Function to return the session keys for the specified user"""
if username is None and user_uid is None:
raise ValueError("You must supply either the username or user_uid!")
if session_uid is None:
raise ValueError("You must supply a valid UID for a login session")
if identity_url is None:
identity_url = _get_identity_url()
response = _call_function(identity_url, "whois",
username=username,
user_uid=user_uid,
session_uid=session_uid)
try:
response["public_key"] = _PublicKey.from_data(response["public_key"])
except:
pass
try:
response["public_cert"] = _PublicKey.from_data(response["public_cert"])
except:
pass
return response
| 32.244526 | 79 | 0.590323 |
584c241bf384f1ee86da8eb49a7b42c532f3a92a | 8,007 | py | Python | botasky/utils/MyMAIL.py | 5atouristspot/sql_audit | 54c6d5ac9f8178ab1a17b7ff2d04ff738f14e0b7 | [
"MIT"
] | null | null | null | botasky/utils/MyMAIL.py | 5atouristspot/sql_audit | 54c6d5ac9f8178ab1a17b7ff2d04ff738f14e0b7 | [
"MIT"
] | null | null | null | botasky/utils/MyMAIL.py | 5atouristspot/sql_audit | 54c6d5ac9f8178ab1a17b7ff2d04ff738f14e0b7 | [
"MIT"
] | null | null | null | #! /usr/bin/python2.7
# -*- coding: utf-8 -*-
"""
Created on 2017-4-06
@module: MyMAIL
@used: send mail
"""
import smtplib
import mimetypes
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from email.mime.image import MIMEImage
from MyLOG import MyLog
from botasky.utils.MyFILE import project_abdir, recursiveSearchFile
logConfig = recursiveSearchFile(project_abdir, '*logConfig.ini')[0]
mylog = MyLog(logConfig, 'MyMAIL.py')
logger = mylog.outputLog()
__all__ = ['MyMail']
__author__ = 'zhihao'
mail_info = {'mail_host': 'smtp.163.com',
'mail_user': '15895890858',
'mail_pass': 'zhi@hao@111',
'mail_postfix': '163.com'}
if __name__ == '__main__':
'''
mail_info = {'mail_host': 'smtp.163.com',
'mail_user': '15002283621',
'mail_pass': 'zhihao1206',
'mail_postfix': '163.com'}
#to_list = ['15002283621@163.com']
to_list = ['1204207658@qq.com']
subject = 'xxxxxxxxxxxxx'
content = 'xxxxxxxxxxxxx'
#attachment_list = ['F:\img\img.rar', 'F:\img\img2.rar']
attachment_list = []
#img_list = ['F:\img\\1025.jpg', 'F:\img\\1041.jpg']
img_list = []
mail = MyMail(mail_info)
mail.send_mail(to_list, 'plain', subject, content, attachment_list, img_list)
'''
import MyMAIL
help(MyMAIL)
| 36.729358 | 114 | 0.429374 |
584dcc24968eeec28c6969e280feb5d4d108b6e6 | 7,694 | py | Python | db_adapter/curw_fcst/source/source_utils.py | CUrW-SL/curw_db_adapter | 9d9ef24f42080910e0bd251bc7f001b0a4b0ab31 | [
"MIT"
] | 2 | 2019-04-26T07:50:33.000Z | 2019-09-28T20:15:33.000Z | db_adapter/curw_fcst/source/source_utils.py | CUrW-SL/curw_db_adapter | 9d9ef24f42080910e0bd251bc7f001b0a4b0ab31 | [
"MIT"
] | 1 | 2019-04-03T09:30:38.000Z | 2019-04-20T18:11:59.000Z | db_adapter/curw_fcst/source/source_utils.py | shadhini/curw_db_adapter | 4db8e1ea8794ffbd0dce29ac954a13315e83d843 | [
"MIT"
] | null | null | null | import json
import traceback
from db_adapter.exceptions import DatabaseAdapterError
from db_adapter.logger import logger
"""
Source JSON Object would looks like this
e.g.:
{
'model' : 'wrfSE',
'version' : 'v3',
'parameters': { }
}
{
'model' : 'OBS_WATER_LEVEL',
'version' : '',
'parameters': {
"CHANNEL_CELL_MAP" : {
"594" : "Wellawatta", "1547": "Ingurukade", "3255": "Yakbedda", "3730": "Wellampitiya",
"7033": "Janakala Kendraya"
}, "FLOOD_PLAIN_CELL_MAP": { }
}
}
"""
def get_source_by_id(pool, id_):
"""
Retrieve source by id
:param pool: database connection pool
:param id_: source id
:return: Source if source exists in the database, else None
"""
connection = pool.connection()
try:
with connection.cursor() as cursor:
sql_statement = "SELECT * FROM `source` WHERE `id`=%s"
row_count = cursor.execute(sql_statement, id_)
if row_count > 0:
return cursor.fetchone()
else:
return None
except Exception as exception:
error_message = "Retrieving source with source_id {} failed".format(id_)
logger.error(error_message)
traceback.print_exc()
raise exception
finally:
if connection is not None:
connection.close()
def get_source_id(pool, model, version) -> str:
"""
Retrieve Source id
:param pool: database connection pool
:param model:
:param version:
:return: str: source id if source exists in the database, else None
"""
connection = pool.connection()
try:
with connection.cursor() as cursor:
sql_statement = "SELECT `id` FROM `source` WHERE `model`=%s and `version`=%s"
row_count = cursor.execute(sql_statement, (model, version))
if row_count > 0:
return cursor.fetchone()['id']
else:
return None
except Exception as exception:
error_message = "Retrieving source id: model={} and version={} failed.".format(model, version)
logger.error(error_message)
traceback.print_exc()
raise exception
finally:
if connection is not None:
connection.close()
def add_source(pool, model, version, parameters=None):
"""
Insert sources into the database
:param pool: database connection pool
:param model: string
:param version: string
:param parameters: JSON
:return: True if the source has been added to the "Source' table of the database, else False
"""
connection = pool.connection()
try:
if get_source_id(pool=pool, model=model, version=version) is None:
with connection.cursor() as cursor:
sql_statement = "INSERT INTO `source` (`model`, `version`, `parameters`) VALUES ( %s, %s, %s)"
row_count = cursor.execute(sql_statement, (model, version, json.dumps(parameters)))
connection.commit()
return True if row_count > 0 else False
else:
logger.info("Source with model={} and version={} already exists in the database".format(model, version))
return False
except Exception as exception:
connection.rollback()
error_message = "Insertion of source: model={}, version={} and parameters={} failed".format(model, version, parameters)
logger.error(error_message)
traceback.print_exc()
raise exception
finally:
if connection is not None:
connection.close()
def add_sources(sources, pool):
"""
Add sources into Source table
:param sources: list of json objects that define source attributes
e.g.:
{
'model' : 'wrfSE',
'version' : 'v3',
'parameters': { }
}
{
'model' : 'OBS_WATER_LEVEL',
'version' : '',
'parameters': {
"CHANNEL_CELL_MAP" : {
"594" : "Wellawatta", "1547": "Ingurukade", "3255": "Yakbedda", "3730": "Wellampitiya",
"7033": "Janakala Kendraya"
}, "FLOOD_PLAIN_CELL_MAP": { }
}
}
:return:
"""
for source in sources:
print(add_source(pool=pool, model=source.get('model'), version=source.get('version'),
parameters=source.get('parameters')))
print(source.get('model'))
def delete_source(pool, model, version):
"""
Delete source from Source table, given model and version
:param pool: database connection pool
:param model: str
:param version: str
:return: True if the deletion was successful
"""
connection = pool.connection()
try:
with connection.cursor() as cursor:
sql_statement = "DELETE FROM `source` WHERE `model`=%s and `version`=%s"
row_count = cursor.execute(sql_statement, (model, version))
connection.commit()
if row_count > 0:
return True
else:
logger.info("There's no record of source in the database with model={} and version={}".format(model, version))
return False
except Exception as exception:
connection.rollback()
error_message = "Deleting source with model={} and version={} failed.".format(model, version)
logger.error(error_message)
traceback.print_exc()
raise exception
finally:
if connection is not None:
connection.close()
def delete_source_by_id(pool, id_):
"""
Delete source from Source table by id
:param pool: database connection pool
:param id_:
:return: True if the deletion was successful, else False
"""
connection = pool.connection()
try:
with connection.cursor() as cursor:
sql_statement = "DELETE FROM `source` WHERE `id`=%s"
row_count = cursor.execute(sql_statement, id_)
connection.commit()
if row_count > 0 :
return True
else:
logger.info("There's no record of source in the database with the source id {}".format(id_))
return False
except Exception as exception:
connection.rollback()
error_message = "Deleting source with id {} failed.".format(id_)
logger.error(error_message)
traceback.print_exc()
raise exception
finally:
if connection is not None:
connection.close()
def get_source_parameters(pool, model, version):
"""
Retrieve Source parameters
:param pool: database connection pool
:param model:
:param version:
:return: str: json object parameters if source exists in the database, else None
"""
connection = pool.connection()
try:
with connection.cursor() as cursor:
sql_statement = "SELECT `parameters` FROM `source` WHERE `model`=%s and `version`=%s"
row_count = cursor.execute(sql_statement, (model, version))
if row_count > 0:
return cursor.fetchone()['parameters']
else:
return None
except Exception as exception:
error_message = "Retrieving source parameters: model={} and version={} failed.".format(model, version)
logger.error(error_message)
traceback.print_exc()
raise exception
finally:
if connection is not None:
connection.close()
| 32.601695 | 127 | 0.583442 |
584f6d166970adb6f3793037f401b85f026ce2ab | 511 | py | Python | tests/kyu_7_tests/test_binary_addition.py | the-zebulan/CodeWars | 1eafd1247d60955a5dfb63e4882e8ce86019f43a | [
"MIT"
] | 40 | 2016-03-09T12:26:20.000Z | 2022-03-23T08:44:51.000Z | tests/kyu_7_tests/test_binary_addition.py | akalynych/CodeWars | 1eafd1247d60955a5dfb63e4882e8ce86019f43a | [
"MIT"
] | null | null | null | tests/kyu_7_tests/test_binary_addition.py | akalynych/CodeWars | 1eafd1247d60955a5dfb63e4882e8ce86019f43a | [
"MIT"
] | 36 | 2016-11-07T19:59:58.000Z | 2022-03-31T11:18:27.000Z | import unittest
from katas.kyu_7.binary_addition import add_binary
| 24.333333 | 54 | 0.675147 |
584ff888d14bb4a1085d283e99cd26c1976fee31 | 739 | py | Python | var/spack/repos/builtin/packages/netdata/package.py | MiddelkoopT/spack | 4d94c4c4600f42a7a3bb3d06ec879140bc259304 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null | var/spack/repos/builtin/packages/netdata/package.py | MiddelkoopT/spack | 4d94c4c4600f42a7a3bb3d06ec879140bc259304 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null | var/spack/repos/builtin/packages/netdata/package.py | MiddelkoopT/spack | 4d94c4c4600f42a7a3bb3d06ec879140bc259304 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
| 32.130435 | 96 | 0.70636 |
5850feed17b8dae7b2795290112a605c61fbeef1 | 1,727 | py | Python | examples/my_quickstart.py | 87boy/sisu | 823d12c9a8126ab41bb14b6d91cad9acbb95bc47 | [
"Apache-2.0"
] | null | null | null | examples/my_quickstart.py | 87boy/sisu | 823d12c9a8126ab41bb14b6d91cad9acbb95bc47 | [
"Apache-2.0"
] | null | null | null | examples/my_quickstart.py | 87boy/sisu | 823d12c9a8126ab41bb14b6d91cad9acbb95bc47 | [
"Apache-2.0"
] | null | null | null | import flask
import flask.ext.sqlalchemy
import flask.ext.restless
# Create the Flask application and the Flask-SQLAlchemy object.
app = flask.Flask(__name__)
app.config['DEBUG'] = True
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:////tmp/test.db'
db = flask.ext.sqlalchemy.SQLAlchemy(app)
# Create your Flask-SQLALchemy models as usual but with the following two
# (reasonable) restrictions:
# 1. They must have a primary key column of type sqlalchemy.Integer or
# type sqlalchemy.Unicode.
# 2. They must have an __init__ method which accepts keyword arguments for
# all columns (the constructor in flask.ext.sqlalchemy.SQLAlchemy.Model
# supplies such a method, so you don't need to declare a new one).
# Create the database tables.
db.create_all()
# Create the Flask-Restless API manager.
manager = flask.ext.restless.APIManager(app, flask_sqlalchemy_db=db)
# Create API endpoints, which will be available at /api/<tablename> by
# default. Allowed HTTP methods can be specified as well.
manager.create_api(Person, methods=['GET', 'POST', 'DELETE'])
manager.create_api(Computer, methods=['GET'])
# start the flask loop
app.run()
| 35.244898 | 76 | 0.711639 |
5853ac3ad2b07e0bcfbda162b15356c29c25cefe | 4,687 | py | Python | src/crypto_wallet/crypto_wallet.py | Sedosa/Blockchain-Analytics | a09de9cfd308c70e38a05d4127fb372af5b919b7 | [
"MIT"
] | null | null | null | src/crypto_wallet/crypto_wallet.py | Sedosa/Blockchain-Analytics | a09de9cfd308c70e38a05d4127fb372af5b919b7 | [
"MIT"
] | null | null | null | src/crypto_wallet/crypto_wallet.py | Sedosa/Blockchain-Analytics | a09de9cfd308c70e38a05d4127fb372af5b919b7 | [
"MIT"
] | null | null | null | """
This is a script that takes a calculates the value of a cryptocurrency portfolio
It uses JSON in the with quantities of different cryptocurrencies in the form
{
"ticker" : volume,
"ticker" : volume
}
gets the live price from an API endpoint and returns the price of each item in the portfolio and the total
It also writes these into a sqlite3 database for future reference with a timestamp
"""
import os, logging, argparse, json
import sqlite3
import requests
import datetime
import time
"""
TODO: Error handling & logging
"""
# Need API from https://min-api.cryptocompare.com/
API_KEY = os.getenv("CRYPTO_API_KEY")
HEADER = {"authorization": f"Apikey {API_KEY}"}
# Taken from https://docs.python.org/3/library/sqlite3.html#registering-an-adapter-callable
sqlite3.register_adapter(datetime.datetime, adapt_datetime)
def setup_db(db_path):
"""
Initialises a local sqlite3 database and create the table required to hold data.
Parameters
-------------
db_path
string : A filepath to a target sqlite database
Returns
-------------
con:
Connection : Returns a connection to that database
"""
con = sqlite3.connect(db_path)
# Create table
with con:
con.execute(
"""CREATE TABLE IF NOT EXISTS CRYPTO_PRICE
(DATE timestamp, TICKER text, QTY real, PRICE real, VALUE real )"""
)
logging.info("Database and table created")
return con
def insert_into_db(connection, ticker, price, dict):
"""
Writes crypto price data to specified sqlite3 database
Parameters
-------------
connection
string : Connection to sqlite3 database output of setup_db() fn
ticker
string : String of the Ticker for a cryptocurrency e.g. BTC
price
float : Price of a cryptocurrency
dict
Dictionary : Dictionary loaded from portfolio JSON. output of parse_json() fn
"""
now = datetime.datetime.now()
with connection as con:
if ticker != "SUM":
con.execute(
"""insert into CRYPTO_PRICE
values (?,?,?,?,?)""",
(now, ticker, dict[ticker], price, price * dict[ticker]),
)
else:
con.execute(
"""insert into CRYPTO_PRICE
values (?,?,?,?,?)""",
(now, ticker, 0, price, price),
)
logging.info(f"Inserted {ticker} values into database")
def parse_json(json_path):
"""
Loads portfolio in JSON into a python dictionary.
Parameters
-------------
json_path
string : Path to portfolio JSON described in header documentation
Returns
-------------
crypto_dict
Dictionary : Dictionary loaded from portfolio json. output of parse_json() fn
"""
with open(json_path) as j:
crypto_dict = json.load(j)
return crypto_dict
def get_price(ticker):
"""
Returns the live price of a unit a cryptocurrency in GBP.
Parameters
-------------
ticker
string : String of the Ticker for a cryptocurrency e.g. BTC
Returns
-------------
price
float : Price of a cryptocurrency
"""
API_ENDPOINT = f"https://min-api.cryptocompare.com/data/price?fsym={ticker}&tsyms=GBP"
response = requests.get(API_ENDPOINT, headers=HEADER)
price = response.json()["GBP"]
return price
if __name__ == "__main__":
logging.basicConfig(
level=logging.INFO, format="[%(levelname)s: %(asctime)s] %(filename)s, %(funcName)s, line %(lineno)d : %(message)s"
)
parser = argparse.ArgumentParser()
parser.add_argument(
"--filepath_in", required=False, type=str, default=os.getcwd(), help="Filepath to json holding volumes of crypto"
)
parser.add_argument(
"--db_path", required=False, type=str, default=f"{os.getcwd()}/crypto.db", help="Filepath to sqlite database"
)
args = parser.parse_args()
FILEPATH_IN = args.filepath_in
con = setup_db(args.db_path)
main(FILEPATH_IN, con)
con.close()
| 26.480226 | 123 | 0.631961 |
5853d100285433e6202ec4adff867b94b7270769 | 1,685 | py | Python | np43s.py | Muraru-taketa/100_knocks | d34c0157d15a0fda45ac60e41e93bd6b73cebb58 | [
"MIT"
] | null | null | null | np43s.py | Muraru-taketa/100_knocks | d34c0157d15a0fda45ac60e41e93bd6b73cebb58 | [
"MIT"
] | null | null | null | np43s.py | Muraru-taketa/100_knocks | d34c0157d15a0fda45ac60e41e93bd6b73cebb58 | [
"MIT"
] | null | null | null | #np43.py
#43.
"""
"""
import re
#
separator = re.compile('\t|,')
#
kakari = re.compile(r'''(?:\*\s\d+\s) #
(-?\d+) # ()
''', re.VERBOSE)
# ,Chunk
def append_sentence(chunks, sentences):
#
for i, chunk in enumerate(chunks):
if chunk.dst != -1:
chunks[chunk.dst].srcs.append(i)
sentences.append(chunks)
return sentences
import np41sss
sentences = np41sss.Ai_morphs()
sentence = sentences[1]
for chunk in sentence:
if int(chunk.dst) != -1:
modifier = ''.join([morph.surface if morph.pos != '' else '' for morph in chunk.morphs])
modifier_pos = [morph.pos for morph in chunk.morphs]#chunk,morphs
modifiee = ''.join([morph.surface if morph.pos != '' else '' for morph in sentence[int(chunk.dst)].morphs])
modifiee_pos = [morph.pos for morph in sentence[int(chunk.dst)].morphs]
if '' in modifier_pos and '' in modifiee_pos:#
print(modifier, modifiee, sep='\t')# | 30.636364 | 113 | 0.619585 |
585453c1a7dceaddf108fc0199e9890c1f5860d6 | 4,026 | py | Python | backend/presentation/Viewsets/comment_view.py | Weida-W/CMPUT404-project-socialdistribution | 41d8a7f7f013723d2a3878156953fbc11c2e6156 | [
"W3C-20150513"
] | null | null | null | backend/presentation/Viewsets/comment_view.py | Weida-W/CMPUT404-project-socialdistribution | 41d8a7f7f013723d2a3878156953fbc11c2e6156 | [
"W3C-20150513"
] | 75 | 2021-01-13T23:48:48.000Z | 2021-04-16T19:39:38.000Z | backend/presentation/Viewsets/comment_view.py | Weida-W/CMPUT404-project-socialdistribution | 41d8a7f7f013723d2a3878156953fbc11c2e6156 | [
"W3C-20150513"
] | 12 | 2021-01-13T23:22:35.000Z | 2021-04-28T08:13:38.000Z | from presentation.models import Author, Follower, Post, Comment
from django.shortcuts import get_object_or_404
from presentation.Serializers.comment_serializer import CommentSerializer
from rest_framework import viewsets, status
from django.http import JsonResponse
from rest_framework.response import Response
import uuid
from urllib.parse import urlparse
from . import urlutil
'''
URL: ://service/author/{author_id}/posts/{post_id}/comments access
GET get comments of the post
POST if you post an object of type:comment, it will add your comment to the post
'''
| 37.981132 | 115 | 0.655986 |
5854bedf049dafa402041ca2798dee49d6f30c6d | 11,520 | py | Python | bundle/vim-pandoc-master/python3/vim_pandoc/command.py | ian-mitchell-001/my-vim-configs | 198747234df311179185ce9fb8424bb1c1c64771 | [
"Unlicense"
] | null | null | null | bundle/vim-pandoc-master/python3/vim_pandoc/command.py | ian-mitchell-001/my-vim-configs | 198747234df311179185ce9fb8424bb1c1c64771 | [
"Unlicense"
] | null | null | null | bundle/vim-pandoc-master/python3/vim_pandoc/command.py | ian-mitchell-001/my-vim-configs | 198747234df311179185ce9fb8424bb1c1c64771 | [
"Unlicense"
] | null | null | null | # encoding=utf-8
import vim
import re
import sys
import os.path
import argparse
import shlex
from subprocess import Popen, PIPE
from itertools import chain
from vim_pandoc.utils import plugin_enabled_modules, ensure_string
from vim_pandoc.bib.vim_completer import find_bibfiles
from vim_pandoc.helpparser import PandocInfo
pandoc = PandocCommand()
| 44.307692 | 118 | 0.521354 |
58560f5398484c07794db5199083195112cafef3 | 10,955 | py | Python | databricks/koalas/strings.py | mercileesb/koalas | 685176c512f31166f0e472aa0f461d0f1449fb0c | [
"Apache-2.0"
] | 1 | 2021-01-17T18:26:33.000Z | 2021-01-17T18:26:33.000Z | databricks/koalas/strings.py | mercileesb/koalas | 685176c512f31166f0e472aa0f461d0f1449fb0c | [
"Apache-2.0"
] | null | null | null | databricks/koalas/strings.py | mercileesb/koalas | 685176c512f31166f0e472aa0f461d0f1449fb0c | [
"Apache-2.0"
] | null | null | null | #
# Copyright (C) 2019 Databricks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
String functions on Koalas Series
"""
from typing import TYPE_CHECKING
import numpy as np
from pyspark.sql.types import StringType, BinaryType, BooleanType
from databricks.koalas.base import _wrap_accessor_pandas
if TYPE_CHECKING:
import databricks.koalas as ks
| 31.033994 | 76 | 0.583204 |
585693264a6958d193fa10022658456c7350638b | 807 | py | Python | python/turbodbc_test/test_cursor_async_io.py | fjetter/turbodbc | b11f0a1bc7d67bc3cbc60f564594f0e735f524f4 | [
"MIT"
] | null | null | null | python/turbodbc_test/test_cursor_async_io.py | fjetter/turbodbc | b11f0a1bc7d67bc3cbc60f564594f0e735f524f4 | [
"MIT"
] | null | null | null | python/turbodbc_test/test_cursor_async_io.py | fjetter/turbodbc | b11f0a1bc7d67bc3cbc60f564594f0e735f524f4 | [
"MIT"
] | null | null | null | import pytest
import six
from turbodbc import connect
from query_fixture import query_fixture
from helpers import for_one_database, open_cursor
| 36.681818 | 84 | 0.629492 |
5856c891983edcd6b2efc2d720455bfccf0cdf79 | 1,491 | py | Python | llist_gameboard/urls.py | Plongesam/data-structures-game | a47c849ea97763eff1005273a58aa3d8ab663ff2 | [
"Apache-2.0"
] | 2 | 2021-03-02T20:06:34.000Z | 2021-03-31T02:51:35.000Z | llist_gameboard/urls.py | Plongesam/data-structures-game | a47c849ea97763eff1005273a58aa3d8ab663ff2 | [
"Apache-2.0"
] | 68 | 2021-03-02T20:20:21.000Z | 2021-05-13T02:21:57.000Z | llist_gameboard/urls.py | Plongesam/data-structures-game | a47c849ea97763eff1005273a58aa3d8ab663ff2 | [
"Apache-2.0"
] | null | null | null | """
URL's for the LList Game Board app.
"""
from django.urls import path
from llist_gameboard.api import llist_api
from . import views
urlpatterns = [
# Views
path('', views.llist_game_board, name='llist-game-board'),
#Game Play API Calls For Linked List
path('llist_api', llist_api.api_overview, name='llist-game-board-api_overview'),
path('llist_api/start_game/<str:difficulty>/<str:player_ids>/<str:data_structures>', llist_api.start_game, name='llist-game-board-start_game'),
path('llist_api/board/<str:game_id>', llist_api.board, name='llist-game-board-game_status'),
path('llist_api/dig_tunnel/<str:game_id>/<str:origin>/<str:destination>', llist_api.dig_tunnel, name='llist-game-board-dig_tunnel'),
path('llist_api/dig_chamber/<str:game_id>/<str:origin>/<str:move_ant>/<str:ant>', llist_api.dig_chamber, name='llist-game-board-dig_chamber'),
path('llist_api/fill_chamber/<str:game_id>/<str:to_fill>', llist_api.fill_chamber, name='llist-game-board-fill_chamber'),
path('llist_api/spawn_ant/<str:game_id>', llist_api.spawn_ant, name='llist-game-board-spawn_ant'),
path('llist_api/forage/<str:game_id>/<str:difficulty>/<str:dest>', llist_api.forage, name='llist-game-board-forage'),
path('llist_api/move_food/<str:game_id>/<str:start>/<str:dest>', llist_api.move_food, name='llist-game-board-move_food'),
path('llist_api/move_ant/<str:game_id>/<str:start>/<str:dest>', llist_api.move_ant, name='llist-game-board-move_ant'),
] | 59.64 | 147 | 0.733736 |
5857c8cf49629013e2ff3dd558ee69aaefccf283 | 208 | py | Python | tests/test_most_invoices.py | swimmio/sqlalchemy_swimm | d24accb7792743cf586bd7062531d108e7063eba | [
"MIT"
] | null | null | null | tests/test_most_invoices.py | swimmio/sqlalchemy_swimm | d24accb7792743cf586bd7062531d108e7063eba | [
"MIT"
] | null | null | null | tests/test_most_invoices.py | swimmio/sqlalchemy_swimm | d24accb7792743cf586bd7062531d108e7063eba | [
"MIT"
] | null | null | null | from src import most_invoices
EXPECTED_RESULT = (14, 'Berlin')
| 23.111111 | 63 | 0.774038 |
58593da1cc559e0383548c77af9516f78e6dbe07 | 8,223 | py | Python | VIP_modules/widgets/ResultCanvas_QTAgg.py | Nikolaj-K/lab-control-GUI | 3c7811de57f110870cf4740743fd84b76d918ad3 | [
"MIT"
] | 17 | 2017-05-24T13:31:31.000Z | 2021-12-04T22:47:33.000Z | VIP_modules/widgets/ResultCanvas_QTAgg.py | Nikolaj-K/lab-control-GUI | 3c7811de57f110870cf4740743fd84b76d918ad3 | [
"MIT"
] | null | null | null | VIP_modules/widgets/ResultCanvas_QTAgg.py | Nikolaj-K/lab-control-GUI | 3c7811de57f110870cf4740743fd84b76d918ad3 | [
"MIT"
] | 6 | 2017-11-21T01:32:33.000Z | 2020-12-15T05:28:17.000Z | import random
import numpy as np
import operator
from scipy import optimize
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg
from matplotlib.figure import Figure as MatplotlibFigure
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm as color_map
from matplotlib.ticker import LinearLocator, FormatStrFormatter
import interface.auxiliary_functions as auxi
import dictionaries.constants as cs
#################################################################################
#################################################################################
#################################################################################
| 46.721591 | 147 | 0.531558 |
585a68e41b2ee9276af7dd0a8f001bc6f258c0ac | 4,538 | py | Python | data/external/repositories_2to3/42139/KDDCup13Track2-master/cluster_kruskal.py | Keesiu/meta-kaggle | 87de739aba2399fd31072ee81b391f9b7a63f540 | [
"MIT"
] | null | null | null | data/external/repositories_2to3/42139/KDDCup13Track2-master/cluster_kruskal.py | Keesiu/meta-kaggle | 87de739aba2399fd31072ee81b391f9b7a63f540 | [
"MIT"
] | null | null | null | data/external/repositories_2to3/42139/KDDCup13Track2-master/cluster_kruskal.py | Keesiu/meta-kaggle | 87de739aba2399fd31072ee81b391f9b7a63f540 | [
"MIT"
] | 1 | 2019-12-04T08:23:33.000Z | 2019-12-04T08:23:33.000Z | #!/usr/bin/env python
# Given weighted graph, perform kruskal-based clustering
from common import *
from cluster_common import *
import argparse
import csv
import pickle as pickle
from collections import defaultdict
if __name__ == "__main__":
main()
| 29.855263 | 133 | 0.642794 |
585b50403351ad785a902fa91bf54e0474f5e68a | 4,019 | py | Python | third_party/gsutil/oauth2_plugin/oauth2_helper.py | bdero/depot_tools | 685577439cbf9cb8c660e3da39bdcbb64c197c95 | [
"BSD-3-Clause"
] | 20 | 2015-12-07T06:08:27.000Z | 2021-11-08T11:06:18.000Z | third_party/gsutil/oauth2_plugin/oauth2_helper.py | bdero/depot_tools | 685577439cbf9cb8c660e3da39bdcbb64c197c95 | [
"BSD-3-Clause"
] | 1 | 2019-01-14T00:36:35.000Z | 2019-01-14T00:36:35.000Z | third_party/gsutil/oauth2_plugin/oauth2_helper.py | bdero/depot_tools | 685577439cbf9cb8c660e3da39bdcbb64c197c95 | [
"BSD-3-Clause"
] | 23 | 2015-05-05T08:22:59.000Z | 2021-11-10T06:24:46.000Z | # Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper routines to facilitate use of oauth2_client in gsutil."""
import sys
import time
import webbrowser
import oauth2_client
GSUTIL_CLIENT_ID = '909320924072.apps.googleusercontent.com'
# Google OAuth2 clients always have a secret, even if the client is an installed
# application/utility such as gsutil. Of course, in such cases the "secret" is
# actually publicly known; security depends entirly on the secrecy of refresh
# tokens, which effectively become bearer tokens.
GSUTIL_CLIENT_NOTSOSECRET = 'p3RlpR10xMFh9ZXBS/ZNLYUu'
GOOGLE_OAUTH2_PROVIDER_LABEL = 'Google'
GOOGLE_OAUTH2_PROVIDER_AUTHORIZATION_URI = (
'https://accounts.google.com/o/oauth2/auth')
GOOGLE_OAUTH2_PROVIDER_TOKEN_URI = (
'https://accounts.google.com/o/oauth2/token')
OOB_REDIRECT_URI = 'urn:ietf:wg:oauth:2.0:oob'
| 38.644231 | 91 | 0.736502 |
585b570f1181a34255df0bd7a81ffc1c67034916 | 5,311 | py | Python | csl-tracking-dependents.py | Marcool04/utilities | d9bf0aae7decdad111fc0c8cefacf10c230ce9ee | [
"MIT"
] | 10 | 2015-04-14T16:49:43.000Z | 2020-06-01T14:31:04.000Z | csl-tracking-dependents.py | Marcool04/utilities | d9bf0aae7decdad111fc0c8cefacf10c230ce9ee | [
"MIT"
] | 23 | 2015-01-20T04:13:35.000Z | 2021-09-07T18:36:00.000Z | csl-tracking-dependents.py | Marcool04/utilities | d9bf0aae7decdad111fc0c8cefacf10c230ce9ee | [
"MIT"
] | 6 | 2015-01-10T13:00:37.000Z | 2021-09-19T09:25:22.000Z | # -*- coding: utf-8 -*-
# Python script to manage automatically generated dependents
# Author: Rintze M. Zelle
# Version: 2014-04-17
# * Requires lxml library (http://lxml.de/)
import os, glob, re, inspect, shutil
from lxml import etree
# http://stackoverflow.com/questions/50499
folderPath = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentFolderPath = os.path.dirname (folderPath)
path = os.path.join(parentFolderPath, 'styles')
pathGeneratedStyles = os.path.join(parentFolderPath, 'utilities', 'generate_dependent_styles', 'generated_styles', 'aacr')
pathRemovedStyles = os.path.join(parentFolderPath, 'removed-styles')
dependentStyles = []
commentMatchingStyles = []
parentMatchingStyles = []
timestampMatchingStyles = []
generatedStyles = []
for stylepath in glob.glob( os.path.join(path, 'dependent', '*.csl') ):
dependentStyles.append(os.path.join(stylepath))
for stylepath in glob.glob( os.path.join(pathGeneratedStyles, '*.csl') ):
generatedStyles.append(os.path.basename(stylepath))
#Filter dependent styles by their parent (set A), print number
#Of set A, print style ID if XML comment doesn't match that of dependent style template
#Of set A, print style ID if timestamp doesn't match that of dependent style template
#Have a toggle to move remaining styles out of root folder
#(it would be better to filter by the XML comment on the first pass, since styles from
#a set may have different parents, but XML comments aren't currently unique to a set)
for style in dependentStyles:
parser = etree.XMLParser(remove_blank_text=True)
parsedStyle = etree.parse(style, parser)
styleElement = parsedStyle.getroot()
parentLink = styleElement.find(".//{http://purl.org/net/xbiblio/csl}link[@rel='independent-parent']")
if(parentLink.attrib.get("href") == "http://www.zotero.org/styles/american-association-for-cancer-research"):
parentMatchingStyles.append(os.path.basename(style))
comments = styleElement.xpath("//comment()", namespaces={"cs": "http://purl.org/net/xbiblio/csl"})
for comment in comments:
if(comment.text == " Generated with https://github.com/citation-style-language/utilities/tree/master/generate_dependent_styles/data/aacr "):
commentMatchingStyles.append(os.path.basename(style))
timestamp = styleElement.find(".//{http://purl.org/net/xbiblio/csl}updated")
if(timestamp.text == "2014-04-23T12:00:00+00:00"):
timestampMatchingStyles.append(os.path.basename(style))
print("Number of dependent styles with selected parent: " + str(len(parentMatchingStyles)))
print("Number of generated styles: " + str(len(generatedStyles)))
for style in parentMatchingStyles:
badStyle = False
if not (style in commentMatchingStyles):
print "bad comment!: " + style
badStyle = True
if not (style in timestampMatchingStyles):
print "bad timestamp!: " + style
badStyle = True
if not (style in generatedStyles):
print "not generated!: " + style
badStyle = True
if badStyle:
parentMatchingStyles.remove(style)
print("Number of consistent styles: " + str(len(parentMatchingStyles)))
moveStyles = False
if moveStyles == True:
#move styles out of "styles/dependent" folder
if not os.path.exists(pathRemovedStyles):
os.makedirs(pathRemovedStyles)
for style in parentMatchingStyles:
shutil.move(os.path.join(path, 'dependent', style), os.path.join(pathRemovedStyles, style))
# counter = []
# for infoNodeIndex, infoNode in enumerate(csInfo):
# # check if node is an element
# if isinstance(infoNode.tag, basestring):
# # get rid of namespace
# infoElement = infoNode.tag.replace("{http://purl.org/net/xbiblio/csl}","")
# if(infoElement == "link"):
# infoElement += "[@" + infoNode.get("rel") + "]"
# if((infoElement == "category") & (infoNode.get("citation-format") is not None)):
# infoElement += "[@citation-format]"
# if((infoElement == "category") & (infoNode.get("field") is not None)):
# infoElement += "[@field]"
# # check if node is a comment
# elif (etree.tostring(infoNode, encoding='UTF-8', xml_declaration=False) == ("<!--" + infoNode.text.encode("utf-8") + "-->")):
# # keep comments that precede any element at the top
# if(sum(counter) == 0):
# counter.append(desiredOrder.index("preceding-comment"))
# # keep a comment at the end at the end
# elif(len(counter) == (len(csInfo) - 1)):
# counter.append(desiredOrder.index("end-comment"))
# # keep other comments with preceding element
# else:
# counter.append(counter[-1])
#
# # Possible improvements:
# # * exceptions for recognizable comments (issn, category)
# else:
# print(infoNode)
#
# # Reorder attributes on cs:link
# try:
# links = styleElement.findall(".//{http://purl.org/net/xbiblio/csl}link")
# for link in links:
# rel = link.get("rel")
# del link.attrib["rel"]
# link.set("rel",rel)
# except:
# pass
| 44.630252 | 148 | 0.656562 |
585f06a860286b312d33973ef25ef2866dfc0808 | 642 | py | Python | selenium_browser/__resources/constants.py | kkristof200/selenium_browser | b8144fe935073367911e90b50f078bfa985d6c0f | [
"MIT"
] | 1 | 2021-06-25T06:55:43.000Z | 2021-06-25T06:55:43.000Z | selenium_browser/__resources/constants.py | kkristof200/selenium_browser | b8144fe935073367911e90b50f078bfa985d6c0f | [
"MIT"
] | null | null | null | selenium_browser/__resources/constants.py | kkristof200/selenium_browser | b8144fe935073367911e90b50f078bfa985d6c0f | [
"MIT"
] | null | null | null | # ------------------------------------------------------- class: Constants ------------------------------------------------------- #
# -------------------------------------------------------------------------------------------------------------------------------- # | 45.857143 | 132 | 0.311526 |
585fbd132230f1c1b7c7d02416766ecbbe4a68a2 | 2,893 | py | Python | api/models/__init__.py | victorabarros/challenge-alloy-card | a3188fea298541130c24ebf4639d2af4700ba362 | [
"MIT"
] | null | null | null | api/models/__init__.py | victorabarros/challenge-alloy-card | a3188fea298541130c24ebf4639d2af4700ba362 | [
"MIT"
] | null | null | null | api/models/__init__.py | victorabarros/challenge-alloy-card | a3188fea298541130c24ebf4639d2af4700ba362 | [
"MIT"
] | null | null | null | },
6: {
{ii: Piece(self.player_1, "pawn", ii, (6, ii))
}
}
pieces = {
self.player_0: {
'rook': {0: self.board[0][0], 1: self.board[0][7]},
'knight': {0: self.board[0][1], 1: self.board[0][6]},
'bishop': {0: self.board[0][2], 1: self.board[0][5]},
'king': {0: self.board[0][3]},
'queen': {0: self.board[0][4]},
'pawn': {}
},
self.player_1: {
'rook': {0: self.board[7][0], 1: self.board[7][7]},
'knight': {0: self.board[7][1], 1: self.board[7][6]},
'bishop': {0: self.board[7][2], 1: self.board[7][5]},
'king': {0: self.board[7][3]},
'queen': {0: self.board[7][4]},
'pawn': {}
}
}
for ii in range(0, 8):
pieces[self.player_0]["pawn"][ii] = self.board[1][ii]
pieces[self.player_1]["pawn"][ii] = [6][ii]
self.pieces = pieces
def find_piece(self, x_coordinate, y_coordinate):
piece = self.board.get(x_coordinate, {}).get(y_coordinate)
return piece
def to_dict(self):
return {'current_player_turn': self.current_player_turn,
'pieces': self.pieces}
class Piece:
def __init__(self, player, kind, ii, coordinate):
self.player = player
self.kind = kind
self.ii = ii
self.coordinate = coordinate
| 36.620253 | 69 | 0.444867 |
58610c3f91576fd189f2c5eb7bc06289b39922a3 | 50,976 | py | Python | spinta/manifests/tabular/helpers.py | atviriduomenys/spinta | 77a10e201f8cdc63143fce7996fd0898acb1ff58 | [
"MIT"
] | 2 | 2019-03-14T06:41:14.000Z | 2019-03-26T11:48:14.000Z | spinta/manifests/tabular/helpers.py | sirex/spinta | 77a10e201f8cdc63143fce7996fd0898acb1ff58 | [
"MIT"
] | 44 | 2019-04-05T15:52:45.000Z | 2022-03-30T07:41:33.000Z | spinta/manifests/tabular/helpers.py | sirex/spinta | 77a10e201f8cdc63143fce7996fd0898acb1ff58 | [
"MIT"
] | 1 | 2019-04-01T09:54:27.000Z | 2019-04-01T09:54:27.000Z | from __future__ import annotations
import csv
import pathlib
import textwrap
from operator import itemgetter
from typing import Any
from typing import Callable
from typing import Dict
from typing import IO
from typing import Iterable
from typing import Iterator
from typing import List
from typing import NamedTuple
from typing import Optional
from typing import Set
from typing import Tuple
from typing import TypeVar
from typing import Union
from typing import cast
import openpyxl
import xlsxwriter
from lark import ParseError
from spinta import commands
from spinta import spyna
from spinta.backends import Backend
from spinta.backends.components import BackendOrigin
from spinta.components import Context
from spinta.datasets.components import Resource
from spinta.dimensions.comments.components import Comment
from spinta.dimensions.enum.components import EnumItem
from spinta.components import Model
from spinta.components import Namespace
from spinta.components import Property
from spinta.core.enums import Access
from spinta.core.ufuncs import unparse
from spinta.datasets.components import Dataset
from spinta.dimensions.enum.components import Enums
from spinta.dimensions.lang.components import LangData
from spinta.dimensions.prefix.components import UriPrefix
from spinta.exceptions import MultipleErrors
from spinta.exceptions import PropertyNotFound
from spinta.manifests.components import Manifest
from spinta.manifests.helpers import load_manifest_nodes
from spinta.manifests.tabular.components import ACCESS
from spinta.manifests.tabular.components import BackendRow
from spinta.manifests.tabular.components import BaseRow
from spinta.manifests.tabular.components import CommentData
from spinta.manifests.tabular.components import DESCRIPTION
from spinta.manifests.tabular.components import DatasetRow
from spinta.manifests.tabular.components import ParamRow
from spinta.manifests.tabular.components import EnumRow
from spinta.manifests.tabular.components import ID
from spinta.manifests.tabular.components import MANIFEST_COLUMNS
from spinta.manifests.tabular.components import ManifestColumn
from spinta.manifests.tabular.components import ManifestRow
from spinta.manifests.tabular.components import ManifestTableRow
from spinta.manifests.tabular.components import ModelRow
from spinta.manifests.tabular.components import PREPARE
from spinta.manifests.tabular.components import PROPERTY
from spinta.manifests.tabular.components import PrefixRow
from spinta.manifests.tabular.components import PropertyRow
from spinta.manifests.tabular.components import REF
from spinta.manifests.tabular.components import ResourceRow
from spinta.manifests.tabular.components import SOURCE
from spinta.manifests.tabular.components import TITLE
from spinta.manifests.tabular.components import TabularFormat
from spinta.manifests.tabular.constants import DATASET
from spinta.manifests.tabular.formats.gsheets import read_gsheets_manifest
from spinta.spyna import SpynaAST
from spinta.types.datatype import Ref
from spinta.utils.data import take
from spinta.utils.schema import NA
from spinta.utils.schema import NotAvailable
ParsedRow = Tuple[int, Dict[str, Any]]
MAIN_DIMENSIONS = [
'dataset',
'resource',
'base',
'model',
'property',
]
EXTRA_DIMENSIONS = [
'',
'prefix',
'enum',
'param',
'comment',
'ns',
'lang',
]
def _parse_property_ref(ref: str) -> Tuple[str, List[str]]:
if '[' in ref:
ref = ref.rstrip(']')
ref_model, ref_props = ref.split('[', 1)
ref_props = [p.strip() for p in ref_props.split(',')]
else:
ref_model = ref
ref_props = []
return ref_model, ref_props
READERS = {
# Main dimensions
'dataset': DatasetReader,
'resource': ResourceReader,
'base': BaseReader,
'model': ModelReader,
'property': PropertyReader,
# Extra dimensions
'': AppendReader,
'prefix': PrefixReader,
'ns': NamespaceReader,
'param': ParamReader,
'enum': EnumReader,
'lang': LangReader,
'comment': CommentReader,
}
def striptable(table):
return textwrap.dedent(table).strip()
def _join_escapes(row: List[str]) -> List[str]:
res = []
for v in row:
if res and res[-1] and res[-1].endswith('\\'):
res[-1] = res[-1][:-1] + '|' + v
else:
res.append(v)
return res
def load_ascii_tabular_manifest(
context: Context,
manifest: Manifest,
manifest_ascii_table: str,
*,
strip: bool = False,
) -> None:
schemas = read_ascii_tabular_manifest(manifest_ascii_table, strip=strip)
load_manifest_nodes(context, manifest, schemas)
commands.link(context, manifest)
def get_relative_model_name(dataset: dict, name: str) -> str:
if name.startswith('/'):
return name[1:]
elif dataset is None:
return name
else:
return '/'.join([
dataset['name'],
name,
])
def to_relative_model_name(model: Model, dataset: Dataset = None) -> str:
"""Convert absolute model `name` to relative."""
if dataset is None:
return model.name
if model.name.startswith(dataset.name):
prefix = dataset.name
return model.name[len(prefix) + 1:]
else:
return '/' + model.name
def tabular_eid(model: Model):
if isinstance(model.eid, int):
return model.eid
else:
return 0
DATASETS_ORDER_BY = {
'access': OrderBy(_order_datasets_by_access, reverse=True),
'default': OrderBy(_order_datasets_by_name),
}
MODELS_ORDER_BY = {
'access': OrderBy(_order_models_by_access, reverse=True),
'default': OrderBy(tabular_eid),
}
PROPERTIES_ORDER_BY = {
'access': OrderBy(_order_properties_by_access, reverse=True),
}
T = TypeVar('T', Dataset, Model, Property, EnumItem)
def _order_enums_by_access(item: EnumItem):
return item.access or Access.private
ENUMS_ORDER_BY = {
'access': OrderBy(_order_enums_by_access, reverse=True),
}
def torow(keys, values) -> ManifestRow:
return {k: values.get(k) for k in keys}
def render_tabular_manifest(
manifest: Manifest,
cols: List[ManifestColumn] = None,
*,
sizes: Dict[ManifestColumn, int] = None,
) -> str:
rows = datasets_to_tabular(manifest)
return render_tabular_manifest_rows(rows, cols, sizes=sizes)
def render_tabular_manifest_rows(
rows: Iterable[ManifestRow],
cols: List[ManifestColumn] = None,
*,
sizes: Dict[ManifestColumn, int] = None,
) -> str:
cols = cols or MANIFEST_COLUMNS
hs = 1 if ID in cols else 0 # hierarchical cols start
he = cols.index(PROPERTY) # hierarchical cols end
hsize = 1 # hierarchical column size
bsize = 3 # border size
if sizes is None:
sizes = dict(
[(c, len(c)) for c in cols[:hs]] +
[(c, 1) for c in cols[hs:he]] +
[(c, len(c)) for c in cols[he:]]
)
rows = list(rows)
for row in rows:
for i, col in enumerate(cols):
val = '' if row[col] is None else str(row[col])
if col == ID:
sizes[col] = 2
elif i < he:
size = (hsize + bsize) * (he - hs - i) + sizes[PROPERTY]
if size < len(val):
sizes[PROPERTY] += len(val) - size
elif sizes[col] < len(val):
sizes[col] = len(val)
line = []
for col in cols:
size = sizes[col]
line.append(col[:size].ljust(size))
lines = [line]
for row in rows:
if ID in cols:
line = [row[ID][:2] if row[ID] else ' ']
else:
line = []
for i, col in enumerate(cols[hs:he + 1]):
val = row[col] or ''
if val:
depth = i
break
else:
val = ''
depth = 0
line += [' ' * hsize] * depth
size = (hsize + bsize) * (he - hs - depth) + sizes[PROPERTY]
line += [val.ljust(size)]
for col in cols[he + 1:]:
val = '' if row[col] is None else str(row[col])
val = val.replace('|', '\\|')
size = sizes[col]
line.append(val.ljust(size))
lines.append(line)
lines = [' | '.join(line) for line in lines]
lines = [l.rstrip() for l in lines]
return '\n'.join(lines)
SHORT_NAMES = {
'd': 'dataset',
'r': 'resource',
'b': 'base',
'm': 'model',
'p': 'property',
't': 'type',
}
| 27.779837 | 80 | 0.568464 |