hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 248 | max_stars_repo_name stringlengths 5 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 248 | max_issues_repo_name stringlengths 5 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 248 | max_forks_repo_name stringlengths 5 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 5 2.06M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.03M | alphanum_fraction float64 0 1 | count_classes int64 0 1.6M | score_classes float64 0 1 | count_generators int64 0 651k | score_generators float64 0 1 | count_decorators int64 0 990k | score_decorators float64 0 1 | count_async_functions int64 0 235k | score_async_functions float64 0 1 | count_documentation int64 0 1.04M | score_documentation float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4f0516d6a42f51c998184fee233eec9bb65f272a | 427 | py | Python | codewars/7kyu/mohamedashrafamin/Ultimate_Array_Reverser/test_bench.py | mohamedashrafamin/Training_one | 11748fdde85cdc9083e2b0bde7519b51a7acfa62 | [
"MIT"
] | null | null | null | codewars/7kyu/mohamedashrafamin/Ultimate_Array_Reverser/test_bench.py | mohamedashrafamin/Training_one | 11748fdde85cdc9083e2b0bde7519b51a7acfa62 | [
"MIT"
] | 2 | 2019-01-22T10:53:42.000Z | 2019-01-31T08:02:48.000Z | codewars/7kyu/mohamedashrafamin/Ultimate_Array_Reverser/test_bench.py | mohamedashrafamin/Training_one | 11748fdde85cdc9083e2b0bde7519b51a7acfa62 | [
"MIT"
] | 13 | 2019-01-22T10:37:42.000Z | 2019-01-25T13:30:43.000Z | from main import reverse, reverse1
def test1(benchmark):
assert benchmark(reverse, ["I", "like", "big", "butts", "and", "I", "cannot", "lie!"]) == \
["!", "eilt", "onn", "acIdn", "ast", "t", "ubgibe", "kilI"]
def test(benchmark):
assert benchmark(reverse1, ["I", "like", "big", "butts", "and", "I", "cannot", "lie!"]) == \
["!", "eilt", "onn", "acIdn", "ast", "t", "ubgibe", "kilI"]
# py.test | 32.846154 | 96 | 0.508197 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 181 | 0.423888 |
4f0673060aa2a1a406e83c4abf1c5a23f61d0412 | 226 | py | Python | scripts/instagramy.py | lathropd/instanewsroom | 2e78a490f7c37b34d7f3020a6fab2cfcf4ebd46a | [
"Artistic-2.0"
] | null | null | null | scripts/instagramy.py | lathropd/instanewsroom | 2e78a490f7c37b34d7f3020a6fab2cfcf4ebd46a | [
"Artistic-2.0"
] | null | null | null | scripts/instagramy.py | lathropd/instanewsroom | 2e78a490f7c37b34d7f3020a6fab2cfcf4ebd46a | [
"Artistic-2.0"
] | null | null | null | from instagram.client import InstagramAPI
from pprint import pprint
test_access_token = "1890268.7c3f1ab.e1c64ca8df38410099d98bff8a868bb6"
api = InstagramAPI(access_token=test_access_token)
pprint( api.user_recent_media())
| 25.111111 | 70 | 0.849558 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 50 | 0.221239 |
4f06852d79a4234cb404a8d1df2fcff4ed2b0dcb | 2,356 | py | Python | rodario/registry.py | haliphax/rodario | c31f9a99f80a19cd911edb61e86f4050003dbd90 | [
"MIT"
] | 4 | 2015-08-18T16:54:38.000Z | 2021-02-21T13:07:08.000Z | rodario/registry.py | haliphax/rodario | c31f9a99f80a19cd911edb61e86f4050003dbd90 | [
"MIT"
] | 14 | 2015-08-29T06:04:52.000Z | 2017-04-12T17:58:09.000Z | rodario/registry.py | haliphax/rodario | c31f9a99f80a19cd911edb61e86f4050003dbd90 | [
"MIT"
] | 2 | 2016-10-14T21:27:54.000Z | 2017-05-07T12:59:08.000Z | """ Actor registry for rodario framework """
# local
from rodario import get_redis_connection
from rodario.exceptions import RegistrationException
# pylint: disable=C1001
class _RegistrySingleton(object):
""" Singleton for actor registry """
def __init__(self, prefix=None):
"""
Initialize the registry.
:param str prefix: Optional prefix for redis key names
"""
self._redis = get_redis_connection()
self._list = '{prefix}actors'.format(prefix=prefix)
@property
def actors(self):
"""
Retrieve a list of registered actors.
:rtype: :class:`set`
"""
return self._redis.smembers(self._list)
def register(self, uuid):
"""
Register a new actor.
:param str uuid: The UUID of the actor to register
"""
if self._redis.sadd(self._list, uuid) == 0:
raise RegistrationException('Failed adding member to set')
def unregister(self, uuid):
"""
Unregister an existing actor.
:param str uuid: The UUID of the actor to unregister
"""
self._redis.srem(self._list, uuid)
def exists(self, uuid):
"""
Test whether an actor exists in the registry.
:param str uuid: UUID of the actor to check for
:rtype: :class:`bool`
"""
return self._redis.sismember(self._list, uuid) == 1
# pylint: disable=R0201
def get_proxy(self, uuid):
"""
Return an ActorProxy for the given UUID.
:param str uuid: The UUID to return a proxy object for
:rtype: :class:`rodario.actors.ActorProxy`
"""
# avoid cyclic import
proxy_module = __import__('rodario.actors',
fromlist=('ActorProxy',))
return proxy_module.ActorProxy(uuid=uuid)
# pylint: disable=R0903
class Registry(object):
""" Actor registry class (singleton wrapper) """
_instance = None
def __new__(cls, prefix=None):
"""
Retrieve the singleton instance for Registry.
:param str prefix: Optional prefix for redis key names
:rtype: :class:`rodario.registry._RegistrySingleton`
"""
if not cls._instance:
cls._instance = _RegistrySingleton(prefix=prefix)
return cls._instance
| 24.28866 | 70 | 0.604414 | 2,154 | 0.914261 | 0 | 0 | 180 | 0.076401 | 0 | 0 | 1,250 | 0.53056 |
4f080165a526d26946f0e9abfb0e9d778b0984e7 | 1,851 | py | Python | research/recommend/Fat-DeepFFM/eval310.py | leelige/mindspore | 5199e05ba3888963473f2b07da3f7bca5b9ef6dc | [
"Apache-2.0"
] | 77 | 2021-10-15T08:32:37.000Z | 2022-03-30T13:09:11.000Z | research/recommend/Fat-DeepFFM/eval310.py | leelige/mindspore | 5199e05ba3888963473f2b07da3f7bca5b9ef6dc | [
"Apache-2.0"
] | 3 | 2021-10-30T14:44:57.000Z | 2022-02-14T06:57:57.000Z | research/recommend/Fat-DeepFFM/eval310.py | leelige/mindspore | 5199e05ba3888963473f2b07da3f7bca5b9ef6dc | [
"Apache-2.0"
] | 24 | 2021-10-15T08:32:45.000Z | 2022-03-24T18:45:20.000Z | # Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""postprocess."""
import argparse
import os
import numpy as np
from mindspore import Tensor
from src.config import ModelConfig
from src.metrics import AUCMetric
parser = argparse.ArgumentParser(description='CTR Prediction')
parser.add_argument('--result_path', type=str, default="./result_Files", help='Dataset path')
parser.add_argument('--label_path', type=str, default="./CriteoBinary/batch_labels", help='Checkpoint path')
args = parser.parse_args()
def get_acc():
''' get accuracy '''
config = ModelConfig()
batch_size = config.batch_size
auc_metric = AUCMetric()
files = os.listdir(args.label_path)
for f in files:
rst_file = os.path.join(args.result_path, f.split('.')[0] + '_0.bin')
label_file = os.path.join(args.label_path, f)
logit = Tensor(np.fromfile(rst_file, np.float32).reshape(batch_size, 1))
label = Tensor(np.fromfile(label_file, np.float32).reshape(batch_size, 1))
res = []
res.append(logit)
res.append(logit)
res.append(label)
auc_metric.update(*res)
auc = auc_metric.eval()
print("auc : {}".format(auc))
if __name__ == '__main__':
get_acc()
| 31.913793 | 108 | 0.67477 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 843 | 0.455429 |
4f0807b07f68c7bfcd9808bf07ec2c1b431bde18 | 2,834 | py | Python | organized/cli.py | lukeplausin/organised | d60e87e3ba94ad5d5673904e75f34eda9b471a43 | [
"MIT"
] | null | null | null | organized/cli.py | lukeplausin/organised | d60e87e3ba94ad5d5673904e75f34eda9b471a43 | [
"MIT"
] | null | null | null | organized/cli.py | lukeplausin/organised | d60e87e3ba94ad5d5673904e75f34eda9b471a43 | [
"MIT"
] | null | null | null | import click
import logging
from . import logger, log_handler
from .organize import main, ORGANIZER_NAMES
from .storage.amazon_s3_storage import AWS_CLI_PARAMETERS
@click.group()
@click.option('--debug/--no-debug', default=False)
def organize(debug=False):
if debug:
logger.setLevel(logging.DEBUG)
log_handler.setLevel(logging.DEBUG)
logger.debug('Debug mode is ON.')
GENERIC_CLI_PARAMETERS = [
click.Option(
['--dry-run/--no-dry-run'], required=False, default=False,
show_default=True,
help='If specified, does nothing but explains what would change.'
),
click.Option(
['--prompt/--no-prompt'], required=False, default=True,
show_default=True,
help='If specified, ask user for confirmation before doing anything.'
),
]
ALL_CLI_PARAMETERS = GENERIC_CLI_PARAMETERS + AWS_CLI_PARAMETERS
print(AWS_CLI_PARAMETERS)
def _add_click_parameters(params, **kwargs):
"""
Given a list of click parameter objects,
attach them in the decorator to the click command.
"""
def decorator(f):
# Add in reverse order
for idx in range(len(params)-1, -1, -1):
param = params[idx]
click.decorators._param_memo(f, param)
return f
return decorator
from .helpers.camera_organizer import DEFAULT_BASE_PATH, DEFAULT_EXTENSIONS, DEFAULT_FILE_PATH
@organize.command(help='Organize photograph files based on data in the exif tags.')
@click.argument('input-dir')
# @click.option(
# '--organizers', multiple=True, show_default=True,
# help=f'Which organizers to run?'
# )
@click.option(
'--base-path', default=DEFAULT_BASE_PATH, show_default=True,
help='Where should I send the photos to?')
@click.option(
'--file-path', default=DEFAULT_FILE_PATH, show_default=True,
help='''What should I save the photos as?
Use the exif tags as in the python format function.
''')
# Add generic options
@_add_click_parameters(ALL_CLI_PARAMETERS)
def camera(input_dir, **kwargs):
main(organizers=['camera'], input_dir=input_dir, **kwargs)
from .helpers.junkfile_organizer import JUNK_FILENAMES, JUNK_EXTENSIONS
@organize.command(help='Organize files and directories according to your preferences')
@click.argument('input-dir')
@click.option(
'--cleanup-empty-dirs/--no-cleanup-empty-dirs', default=True, show_default=True,
help='If specified, deletes empty directories.')
@click.option(
'--junk-filenames', default=JUNK_FILENAMES, show_default=True,
help='Any files matching these filenames will be deleted.')
@click.option(
'--junk-extensions', default=JUNK_EXTENSIONS, show_default=True,
help='Any files with these filename extensions will be deleted.')
def dejunk(input_dir, **kwargs):
main(organizers=['junk'], input_dir=input_dir, **kwargs)
| 34.987654 | 94 | 0.711009 | 0 | 0 | 0 | 0 | 1,600 | 0.564573 | 0 | 0 | 1,031 | 0.363797 |
4f08474d84da7c04739199fefa825d37fe709888 | 5,798 | py | Python | scripts/msresults.py | aw32/sched | b6ef35c5b517875a5954c70e2dc366fab3721a60 | [
"BSD-2-Clause"
] | null | null | null | scripts/msresults.py | aw32/sched | b6ef35c5b517875a5954c70e2dc366fab3721a60 | [
"BSD-2-Clause"
] | null | null | null | scripts/msresults.py | aw32/sched | b6ef35c5b517875a5954c70e2dc366fab3721a60 | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/env python3
# Copyright 2019, Alex Wiens <awiens@mail.upb.de>, Achim Lösch <achim.loesch@upb.de>
# SPDX-License-Identifier: BSD-2-Clause
# Reads ms task measurement results from CSV files.
#
# If the path to the results is not given it is read from the following environment variables:
# * SCHED_MSRESULTS
# * SCHED_RESULTS and SCHED_HOST: $SCHED_RESULTS/$SCHED_HOST/ms_results
#
# Use MSResults.load_results() get a MSResults object containing the results.
import os
import os.path
import re
import csv
# Regex for format of CSV files
# e.g. "ms_markov(1024)@IntelXeon_energy.csv"
# e.g. "ms_gaussblur(512)@NvidiaTesla_time.csv"
re_msresult_filename = re.compile("^ms_([^(]+)\(([0-9]+)\)@([^_]+)_(energy|time).csv")
# Contains measurement results read from energy and time CSV files
class MSResult:
def __init__(self, task, size, res):
self.task = task
self.size = size
self.res = res
self.time = [] # all csv entries
self.energy = [] # all csv entries
self.avgtime = [] # averaged over all measurements
self.avgenergy = [] # averaged over all measurements
def avg_time(self):
if len(self.avgtime) == 0:
self.computeAvg()
return self.avgtime[3]
def avg_init(self):
if len(self.avgtime) == 0:
self.computeAvg()
return self.avgtime[4]
def avg_fini(self):
if len(self.avgtime) == 0:
self.computeAvg()
return self.avgtime[6]
def avg_energy(self):
if len(self.avgenergy) == 0:
self.computeAvg()
return self.avgenergy[2]
def read(task, size, resource, mspath):
res = MSResult(task, size, resource)
time = []
energy = []
try:
csvpath = os.path.join(mspath,"ms_"+task+"("+str(size)+")@"+resource+"_time.csv")
#print(csvpath)
with open(csvpath,"r") as f:
csvr = csv.reader(f, delimiter=";")
for ix,row in enumerate(csvr):
if ix < 2:
continue
frow = []
for string in row:
if string == "":
continue
frow.append(float(string))
time.append(frow)
except Exception as e:
print("MSResults read failed: ",e,"time",task,resource,size)
try:
csvpath = os.path.join(mspath,"ms_"+task+"("+str(size)+")@"+resource+"_energy.csv")
#print(csvpath)
with open(csvpath,"r") as f:
csvr = csv.reader(f, delimiter=";")
for ix,row in enumerate(csvr):
if ix < 2:
continue
frow = []
for string in row:
if string == "":
continue
frow.append(float(string))
energy.append(frow)
except Exception as e:
print("MSResults read failed:",e,"energy",task,resource,size)
if len(time) == 0 and len(energy) == 0:
return None
res.time = time
res.energy = energy
res.computeAvg()
return res
def computeAvg(self):
self.avgtime = []
self.avgenergy = []
for c in range(0,7):
a = 0.0
n = 0
for r in self.time:
n += 1
a += r[c]
avg = 0
if n != 0:
avg = a/n
self.avgtime.append(avg)
for c in range(0,7):
a = 0.0
n = 0
for r in self.energy:
n += 1
a += r[c]
avg = 0.0
if n != 0:
avg = a/n
self.avgenergy.append(avg)
# Contains list of available results and loaded task measurments
# MSResult objects are created lazily on first access
class MSResults:
def __init__(self, mspath=None):
self.mspath = mspath
self.result_list = []
self.results = {}
# Return list of available sizes for $task
def task_sizes(self, task):
sizes = []
for t in self.result_list:
if t[0] == task and t[1] not in sizes:
sizes.append(t[1])
return sorted(sizes)
def task_res_sizes(self, task, res):
sizes = []
for t in self.result_list:
if t[0] == task and t[2] == res and t[1] not in sizes:
sizes.append(t[1])
return sorted(sizes)
# Read ms results directory with task measurement results
# If $mspath is not given, the environment variables are used
def load_results(mspath=None):
if mspath == None:
# try to find mspath
if "SCHED_MSRESULTS" in os.environ:
mspath = os.environ["SCHED_MSRESULTS"]
elif "SCHED_RESULTS" in os.environ or "SCHED_HOST" in os.environ:
mspath = os.path.join(os.environ["SCHED_RESULTS"], os.environ["SCHED_HOST"], "ms_results")
if mspath == None:
print("Error: SCHED_MSRESULTS or SCHED_RESULTS and SCHED_HOST environment variables not defined, can't locate ms results")
return None
if os.path.isdir(mspath) == False:
print("Error: ms results path seems not to exist: ", mspath)
print("Check SCHED_MSRESULTS, SCHED_RESULTS and SCHED_HOST environment variables")
return None
msres = MSResults(mspath=mspath)
allresults = []
results = []
# Check list of files in $mspath matching the $re_msresult_filename regex
for dirname, dirnames, filenames in os.walk(mspath):
for name in filenames:
match = re_msresult_filename.match(name)
if match == None:
continue
task, size, res, restype = match.groups()
entry = (task, int(size), res)
if entry not in results:
results.append(entry)
allresults.append(match.groups())
# Check if both, energy and time, CSV files were found
# Only use results with both files existing
for entry in results:
task, size, res = entry
eentry = (task, str(size), res, "energy")
tentry = (task, str(size), res, "time")
if eentry in allresults and tentry in allresults:
msres.result_list.append(entry)
return msres
# Return result for $task with $size on resource $res
def result(self, task, size, res):
if (task, size, res) not in self.result_list:
print("task", task, size, res, "not in ms results" )
return None
# check if result was already loaded, else load the result and add it to the dict
if (task, size, res) not in self.results:
result = MSResult.read(task, size, res, self.mspath)
self.results[(task, size, res)] = result
return self.results[(task, size, res)]
| 31.340541 | 126 | 0.666954 | 4,875 | 0.840662 | 0 | 0 | 0 | 0 | 0 | 0 | 1,931 | 0.332988 |
4f086a50fd782142ca6d65148089e454c15ce8c9 | 931 | py | Python | py_temper_exporter/main.py | srl295/py-temper-exporter | e44249eaac63bae97babbddbe771c38c5bf9b708 | [
"Apache-2.0"
] | null | null | null | py_temper_exporter/main.py | srl295/py-temper-exporter | e44249eaac63bae97babbddbe771c38c5bf9b708 | [
"Apache-2.0"
] | null | null | null | py_temper_exporter/main.py | srl295/py-temper-exporter | e44249eaac63bae97babbddbe771c38c5bf9b708 | [
"Apache-2.0"
] | null | null | null | import time
from prometheus_client import start_http_server, Gauge, Enum
from temper import Temper
def main():
port = 9204
t = Temper()
label_names = ['vendorid','productid','busnum','devnum']
temp_c = Gauge('temper_internal_temperature_celsius', 'Temperature in °C', label_names)
humid = Gauge('temper_internal_humidity_percent', 'Humidity in percent', label_names)
report_time = Gauge('temper_time', 'Time of report', label_names)
print('Listening on port %d' % port)
start_http_server(port)
while True:
data = t.read()
# print(data)
for d in data:
l = []
for label in label_names:
l.append(str(d[label]))
# print(l)
temp_c.labels(*l).set(d['internal temperature'])
humid.labels(*l).set(d['internal humidity'])
report_time.labels(*l).set_to_current_time()
time.sleep(500)
| 35.807692 | 91 | 0.625134 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 264 | 0.283262 |
4f0910b1680523eb838126b34d6516b834cc9d25 | 217 | py | Python | test/test_mssql.py | samirsilwal/pyodbc | 53f6bd17fe52a180729b4f0db2201a569f697582 | [
"MIT"
] | 20 | 2019-11-22T02:47:37.000Z | 2022-03-18T19:25:28.000Z | test/test_mssql.py | samirsilwal/pyodbc | 53f6bd17fe52a180729b4f0db2201a569f697582 | [
"MIT"
] | 29 | 2019-09-09T07:17:19.000Z | 2020-08-06T04:18:13.000Z | test/test_mssql.py | samirsilwal/pyodbc | 53f6bd17fe52a180729b4f0db2201a569f697582 | [
"MIT"
] | 19 | 2019-09-30T18:36:24.000Z | 2022-01-04T16:53:42.000Z | ''' Tests for MSSQL. '''
from util import exec_query, MSSQL
def test_connection():
''' Test connection to MSSQL Server. '''
result = exec_query(MSSQL, "SELECT 'It works!'")
assert result == 'It works!'
| 21.7 | 52 | 0.645161 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 95 | 0.437788 |
4f0960ee0a22ce87b6a0db2ab3c9c96334279396 | 61,597 | py | Python | fraunhofer/specfit.py | dnidever/fraunhofer | 49d34ff879501ce0e959f06046e2f32b0ee7ca43 | [
"MIT"
] | null | null | null | fraunhofer/specfit.py | dnidever/fraunhofer | 49d34ff879501ce0e959f06046e2f32b0ee7ca43 | [
"MIT"
] | null | null | null | fraunhofer/specfit.py | dnidever/fraunhofer | 49d34ff879501ce0e959f06046e2f32b0ee7ca43 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""SPECFIT.PY - Generic stellar abundance determination software
"""
from __future__ import print_function
__authors__ = 'David Nidever <dnidever@montana.edu>'
__version__ = '20200711' # yyyymmdd
import os
import shutil
import contextlib, io, sys
import numpy as np
import warnings
from astropy.io import fits
from astropy.table import Table
from dlnpyutils.minpack import curve_fit
from dlnpyutils.least_squares import least_squares
from scipy.interpolate import interp1d
from dlnpyutils import utils as dln, bindata, astro
import doppler
from doppler.spec1d import Spec1D
from doppler import (cannon,utils,reader)
import copy
import logging
import time
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib.legend import Legend
import tempfile
from . import models
from synple import synple
# Ignore these warnings, it's a bug
warnings.filterwarnings("ignore", message="numpy.dtype size changed")
warnings.filterwarnings("ignore", message="numpy.ufunc size changed")
cspeed = 2.99792458e5 # speed of light in km/s
def synmodel(spec,params,alinefile=None,mlinefile=None,verbose=False,normalize=True):
"""
Synthetic spectrum model.
Parameters
----------
spec : Spec1D object or str
The observed Spec1D spectrum to match or the name of a spectrum file.
params : dict
Dictionary of initial values to use or parameters/elements to hold fixed.
normalize : bool, optional
Renormalize the model spectrum using the observed spectrum's continuum function. The
synthetic spectrum will already have been normalized using the "true" continuum. This
step is to simulate any systematic effects of the spectrum normalization algorithm that
the observed spectrum undergoes. Default is True.
verbose : int, optional
Verbosity level (0, 1, or 2). The default is 0 and verbose=2 is for debugging.
alinefile : str, optional
The atomic linelist to use. Default is None which means the default synple linelist is used.
mlinefile : str, optional
The molecular linelist to use. Default is None which means the default synple linelist is used.
Returns
-------
model : Spec1D object
The synthetic spectrum. The "true" continuum is in model.cont.
Example
-------
.. code-block:: python
model = synmodel(spec,params)
"""
# Read in the spectrum
if type(spec) is str:
filename = spec
spec = doppler.read(filename)
if spec is None:
print('Problem loading '+filename)
return
params = dict((key.upper(), value) for (key, value) in params.items()) # all CAPS
# Initialize the fitter
fitparams = ['TEFF'] # "dummy" fitting variable
spfitter = SpecFitter(spec,params,fitparams=fitparams,verbose=(verbose>=2),
alinefile=alinefile,mlinefile=mlinefile)
spfitter.norm = normalize # normalize the synthetic spectrum
model = spfitter.model(spec.wave.flatten(),params['TEFF'],retobj=True)
model.instrument = 'Model'
return model
class SpecFitter:
def __init__ (self,spec,params,fitparams=None,norm=True,verbose=False,
alinefile=None,mlinefile=None):
# Parameters
self.params = params
if fitparams is not None:
self.fitparams = fitparams
else:
self.fitparams = list(params.keys()) # by default fit all parameters
self.nsynfev = 0 # number of synthetic spectra made
self.njac = 0 # number of times jacobian called
# Save spectrum information
self.spec = spec.copy()
self.flux = spec.flux.flatten()
self.err = spec.err.flatten()
self.wave = spec.wave.flatten()
self.lsf = spec.lsf.copy()
self.lsf.wavevac = spec.wavevac # need this later for synspec prep
self.wavevac = spec.wavevac
self.verbose = verbose
self.norm = norm # normalize
self.continuum_func = spec.continuum_func
self.alinefile = alinefile
self.mlinefile = mlinefile
# Convert vacuum to air wavelengths
# synspec uses air wavelengths
if spec.wavevac is True:
wave = astro.vactoair(spec.wave.copy().flatten()).reshape(spec.wave.shape)
else:
wave = spec.wave.copy()
if wave.ndim==1:
wave = np.atleast_2d(wave).T
# Figure out the wavelength parameters
npix = spec.npix
norder = spec.norder
xp = np.arange(npix//20)*20
wr = np.zeros((spec.lsf.norder,2),np.float64)
dw = np.zeros(spec.lsf.norder,np.float64)
mindw = np.zeros(norder,np.float64)
for o in range(spec.norder):
dw[o] = np.median(dln.slope(wave[:,o]))
wr[o,0] = np.min(wave[:,o])
wr[o,1] = np.max(wave[:,o])
fwhm = spec.lsf.fwhm(wave[xp,o],xtype='Wave',order=o)
# FWHM is in units of lsf.xtype, convert to wavelength/angstroms, if necessary
if spec.lsf.xtype.lower().find('pix')>-1:
fwhm *= np.abs(dw[o])
# need at least ~4 pixels per LSF FWHM across the spectrum
# using 3 affects the final profile shape
mindw[o] = np.min(fwhm/4)
self._dwair = np.min(mindw) # IN AIR WAVELENGTHS!!
self._w0air = np.min(wave)
self._w1air = np.max(wave)
# parameters to save
self._all_pars = []
self._all_model = []
self._all_chisq = []
self._jac_array = None
@property
def params(self):
return self._params
@params.setter
def params(self,params):
""" Dictionary, keys must be all CAPS."""
self._params = dict((key.upper(), value) for (key, value) in params.items()) # all CAPS
@property
def fitparams(self):
return self._fitparams
@fitparams.setter
def fitparams(self,fitparams):
""" list, keys must be all CAPS."""
self._fitparams = [v.upper() for v in fitparams] # all CAPS
def mkinputs(self,args):
""" Make INPUTS dictionary."""
# Create INPUTS with all arguments needed to make the spectrum
inputs = self.params.copy() # initialize with initial/fixed values
for k in range(len(self.fitparams)): # this overwrites the values for the fitted values
inputs[self.fitparams[k]] = args[k]
inputs['DW'] = self._dwair # add in wavelength parameters
inputs['W0'] = self._w0air
inputs['W1'] = self._w1air
return inputs
def chisq(self,model):
return np.sqrt( np.sum( (self.flux-model)**2/self.err**2 )/len(self.flux) )
def model(self, xx, *args, retobj=False):
""" Return a model spectrum flux with the given input arguments."""
# The input arguments correspond to FITPARAMS
# This corrects for air/vacuum wavelength differences
if self.verbose:
print(args)
# The arguments correspond to the fitting parameters
inputs = self.mkinputs(args)
if self.verbose:
print(inputs)
# Create the synthetic spectrum
synspec = model_spectrum(inputs,verbose=self.verbose, # always returns air wavelengths
alinefile=self.alinefile,mlinefile=self.mlinefile)
self.nsynfev += 1
# Convolve with the LSF and do air/vacuum wave conversion
pspec = prepare_synthspec(synspec,self.lsf,norm=self.norm,
continuum_func=self.continuum_func)
# Save models/pars/chisq
self._all_pars.append(list(args).copy())
self._all_model.append(pspec.flux.flatten().copy())
self._all_chisq.append(self.chisq(pspec.flux.flatten()))
# Return flattened spectrum
if retobj:
return pspec
else:
return pspec.flux.flatten()
def getstep(self,name,val,relstep=0.02):
""" Calculate step for a parameter."""
# It mainly deals with edge cases
#if val != 0.0:
# step = relstep*val
#else:
# if name=='RV':
# step = 1.0
# elif name=='VROT':
# step = 0.5
# elif name=='VMICRO':
# step = 0.5
# elif name.endswith('_H'):
# step = 0.02
# else:
# step = 0.02
if name=='TEFF':
step = 5.0
elif name=='RV':
step = 0.1
elif name=='VROT':
step = 0.5
elif name=='VMICRO':
step = 0.5
elif name.endswith('_H'):
step = 0.01
else:
step = 0.01
return step
return step
def jac(self,x,*args):
""" Compute the Jacobian matrix (an m-by-n matrix, where element (i, j)
is the partial derivative of f[i] with respect to x[j]). """
if hasattr(self,'logger') is False:
logger = dln.basiclogger()
else:
logger = self.logger
logger.info(args)
if self.verbose:
logger.info(' ')
logger.info('##### Calculating Jacobian Matrix #####')
logger.info(' ')
# A new synthetic spectrum does not need to be generated RV, vmicro or vsini.
# Some time can be saved by not remaking those.
# Use a one-sided derivative.
# Boundaries
lbounds,ubounds = mkbounds(self.fitparams)
relstep = 0.02
npix = len(x)
npar = len(args)
# Get INPUTS dictionary and make keys all CAPS
inputs = self.mkinputs(args)
inputs = dict((key.upper(), value) for (key, value) in inputs.items())
# Some important parameters
w0 = inputs['W0']
w1 = inputs['W1']
dw = inputs['DW']
rv = inputs.get('RV')
vrot = inputs.get('VROT')
vmicro = inputs.get('VMICRO')
# Create synthetic spectrum at current values
# set vrot=vmicro=rv=0, will modify later if necessary
if self.verbose:
logger.info('--- Current values ---')
logger.info(args)
tinputs = inputs.copy()
tinputs['VMICRO'] = 0
tinputs['VROT'] = 0
tinputs['RV'] = 0
origspec = model_spectrum(tinputs,keepextend=True, # always are wavelengths
alinefile=self.alinefile,mlinefile=self.mlinefile)
self.nsynfev += 1
# Smooth and shift
smorigspec = smoothshift_spectrum(origspec,vrot=vrot,vmicro=vmicro,rv=rv)
# Trim to final wavelengths
smorigspec = trim_spectrum(smorigspec,w0,w1)
# Convolve with the LSF and do air/vacuum wave conversion
pspec = prepare_synthspec(smorigspec,self.lsf,norm=self.norm,
continuum_func=self.continuum_func)
# Flatten the spectrum
f0 = pspec.flux.flatten()
# Save models/pars/chisq
self._all_pars.append(list(args).copy())
self._all_model.append(f0.copy())
self._all_chisq.append(self.chisq(f0))
chisq = np.sqrt( np.sum( (self.flux-f0)**2/self.err**2 )/len(self.flux) )
self
if self.verbose:
logger.info('chisq = '+str(chisq))
# MASK PIXELS!?
# Initialize jacobian matrix
jac = np.zeros((npix,npar),np.float64)
# Loop over parameters
for i in range(npar):
pars = np.array(copy.deepcopy(args))
step = self.getstep(self.fitparams[i],pars[i],relstep)
# Check boundaries, if above upper boundary
# go the opposite way
if pars[i]>ubounds[i]:
step *= -1
pars[i] += step
tinputs = self.mkinputs(pars)
if self.verbose:
logger.info(' ')
logger.info('--- '+str(i+1)+' '+self.fitparams[i]+' '+str(pars[i])+' ---')
logger.info(pars)
# VROT/VMICRO/RV, just shift/smooth original spectrum
if self.fitparams[i]=='VROT' or self.fitparams[i]=='VMICRO' or self.fitparams[i]=='RV':
tvrot = tinputs.get('VROT')
tvmicro = tinputs.get('VMICRO')
trv = tinputs.get('RV')
#import pdb; pdb.set_trace()
# Smooth and shift
synspec = smoothshift_spectrum(origspec,vrot=tvrot,vmicro=tvmicro,rv=trv)
# Trim to final wavelengths
synspec = trim_spectrum(synspec,w0,w1)
else:
synspec = model_spectrum(tinputs,alinefile=self.alinefile,
mlinefile=self.mlinefile) # always returns air wavelengths
self.nsynfev += 1
# Convert to vacuum wavelengths if necessary
if self.wavevac:
synspec.wave = astro.airtovac(synspec.wave)
synspec.wavevac = True
# Convolve with the LSF and do air/vacuum wave conversion
pspec = prepare_synthspec(synspec,self.lsf,norm=self.norm,
continuum_func=self.continuum_func)
# Flatten the spectrum
f1 = pspec.flux.flatten()
# Save models/pars/chisq
self._all_pars.append(list(pars).copy())
self._all_model.append(f1.copy())
self._all_chisq.append(self.chisq(f1))
if np.sum(~np.isfinite(f1))>0:
print('some nans/infs')
import pdb; pdb.set_trace()
jac[:,i] = (f1-f0)/step
if np.sum(~np.isfinite(jac))>0:
print('some nans/infs')
import pdb; pdb.set_trace()
self._jac_array = jac.copy() # keep a copy
self.njac += 1
return jac
def trim_spectrum(spec,w0,w1):
""" Trim a synthetic spectrum to [w0,w1]."""
# This assumes that the spectrum has a single order
wv1, ind1 = dln.closest(spec.wave,w0)
wv2, ind2 = dln.closest(spec.wave,w1)
# Nothing to do
if ind1==0 and ind2==(spec.npix-1):
return spec
outspec = spec.copy()
outspec.flux = outspec.flux[ind1:ind2+1]
outspec.wave = outspec.wave[ind1:ind2+1]
if outspec.err is not None:
outspec.err = outspec.err[ind1:ind2+1]
if outspec.mask is not None:
outspec.mask = outspec.mask[ind1:ind2+1]
if hasattr(outspec,'cont'):
if outspec.cont is not None:
outspec.cont = outspec.cont[ind1:ind2+1]
outspec.npix = len(outspec.flux)
return outspec
def getabund(inputs,verbose=False):
""" Grab the abundances out of the input file and return array of abundances."""
# Create the input 99-element abundance array
codedir = os.path.dirname(os.path.abspath(__file__))
pertab = Table.read(codedir+'/data/periodic_table.txt',format='ascii')
feh = inputs.get('FEH')
if feh is None:
feh = inputs.get('FE_H')
if feh is None:
raise ValueError('FE_H missing from inputs')
# Read model atmosphere
modelfile = inputs.get('modelfile')
if modelfile is None:
raise ValueError('modelfile missing from inputs')
atmostype, teff, logg, vmicro2, mabu, nd, atmos = synple.read_model(modelfile,verbose=verbose)
mlines = dln.readlines(modelfile)
# solar abundances
# first two are Teff and logg
# last two are Hydrogen and Helium
solar_abund = np.array([ 4750., 2.5,
-10.99, -10.66, -9.34, -3.61, -4.21,
-3.35, -7.48, -4.11, -5.80, -4.44,
-5.59, -4.53, -6.63, -4.92, -6.54,
-5.64, -7.01, -5.70, -8.89, -7.09,
-8.11, -6.40, -6.61, -4.54, -7.05,
-5.82, -7.85, -7.48, -9.00, -8.39,
-9.74, -8.70, -9.50, -8.79, -9.52,
-9.17, -9.83, -9.46, -10.58, -10.16,
-20.00, -10.29, -11.13, -10.47, -11.10,
-10.33, -11.24, -10.00, -11.03, -9.86,
-10.49, -9.80, -10.96, -9.86, -10.94,
-10.46, -11.32, -10.62, -20.00, -11.08,
-11.52, -10.97, -11.74, -10.94, -11.56,
-11.12, -11.94, -11.20, -11.94, -11.19,
-12.16, -11.19, -11.78, -10.64, -10.66,
-10.42, -11.12, -10.87, -11.14, -10.29,
-11.39, -20.00, -20.00, -20.00, -20.00,
-20.00, -20.00, -12.02, -20.00, -12.58,
-20.00, -20.00, -20.00, -20.00, -20.00,
-20.00, -20.00])
# Deal with alpha abundances
# only add the individual alpha abundance if it's not already there
# sometimes we might fit a single alpha element but want to use
# ALPHA_H to set the rest of them
if inputs.get('ALPHA_H') is not None:
alpha = inputs['ALPHA_H']
elem = ['O','MG','SI','S','CA','TI']
for k in range(len(elem)):
if inputs.get(elem[k]+'_H') is None:
inputs[elem[k]+'_H'] = alpha
# Scale global metallicity
abu = solar_abund.copy()
abu[2:] += feh
# Now offset the elements with [X/Fe], [X/Fe]=[X/H]-[Fe/H]
g, = np.where( (np.char.array(list(inputs.keys())).find('_H') != -1) &
(np.char.array(list(inputs.keys())) != 'FE_H') )
if len(g)>0:
ind1,ind2 = dln.match(np.char.array(list(inputs.keys()))[g],np.char.array(pertab['symbol']).upper()+'_H')
for k in range(len(ind1)):
key1 = np.char.array(list(inputs.keys()))[g[ind1[k]]]
abu[ind2[k]] += float(inputs[key1]) - feh
if verbose:
print('%s %f' % (key1,float(inputs[key1])))
# convert to linear
abu[2:] = 10**abu[2:]
# Divide by N(H)
g, = np.where(np.char.array(mlines).find('ABUNDANCE SCALE') != -1)
nhtot = np.float64(mlines[g[0]].split()[6])
abu[2:] /= nhtot
# use model values for H and He
abu[0:2] = mabu[0:2]
return abu
def synple_wrapper(inputs,verbose=False,tmpbase='/tmp',alinefile=None,mlinefile=None):
""" This is a wrapper around synple to generate a new synthetic spectrum."""
# Wavelengths are all AIR!!
# inputs is a dictionary with all of the inputs
# Teff, logg, [Fe/H], some [X/Fe], and the wavelength parameters (w0, w1, dw).
# Make temporary directory for synple to work in
curdir = os.path.abspath(os.curdir)
tdir = os.path.abspath(tempfile.mkdtemp(prefix="syn",dir=tmpbase))
os.chdir(tdir)
# Linelists to use
linelist = ['gfallx3_bpo.19','kmol3_0.01_30.20'] # default values
if alinefile is not None: # atomic linelist input
linelist[0] = alinefile
if mlinefile is not None: # molecular linelist input
linelist[1] = mlinefile
if verbose:
print('Using linelist: ',linelist)
# Make key names all CAPS
inputs = dict((key.upper(), value) for (key, value) in inputs.items())
# Make the model atmosphere file
teff = inputs['TEFF']
logg = inputs['LOGG']
metal = inputs['FE_H']
tid,modelfile = tempfile.mkstemp(prefix="mod",dir=".")
os.close(tid) # close the open file
# Limit values
# of course the logg/feh ranges vary with Teff
mteff = dln.limit(teff,3500.0,60000.0)
mlogg = dln.limit(logg,0.0,5.0)
mmetal = dln.limit(metal,-2.5,0.5)
model, header, tail = models.mkmodel(mteff,mlogg,mmetal,modelfile)
inputs['modelfile'] = modelfile
if os.path.exists(modelfile) is False or os.stat(modelfile).st_size==0:
print('model atmosphere file does NOT exist')
import pdb; pdb.set_trace()
# Create the synspec synthetic spectrum
w0 = inputs['W0']
w1 = inputs['W1']
dw = inputs['DW']
vmicro = inputs.get('VMICRO')
vrot = inputs.get('VROT')
if vrot is None:
vrot = 0.0
# Get the abundances
abu = getabund(inputs,verbose=verbose)
wave,flux,cont = synple.syn(modelfile,(w0,w1),dw,vmicro=vmicro,vrot=vrot,
abu=list(abu),verbose=verbose,linelist=linelist)
# Delete temporary files
shutil.rmtree(tdir)
os.chdir(curdir)
return (wave,flux,cont)
def smoothshift_spectrum(inpspec,vmicro=None,vrot=None,rv=None):
""" This smoothes the spectrum by Vrot+Vmicro and
shifts it by RV."""
#vmicro = inputs.get('VMICRO')
#vrot = inputs.get('VROT')
#rv = inputs.get('RV')
# Nothing to do
if vmicro is None and vrot is None and rv is None:
return inpspec.copy()
# Initialize output spectrum
spec = inpspec.copy()
# Some broadening
if vmicro is not None or vrot is not None:
flux = utils.broaden(spec.wave,spec.flux,vgauss=vmicro,vsini=vrot)
spec.flux = flux
## Vrot/Vsini (km/s) and Vmicro (in km/s)
#if vrot is not None or vmicro is not None:
# wave, flux = synple.call_rotin(wave, flux, vrot, fwhm, space, steprot, stepfwhm, clean=False, reuseinputfiles=True)
# Doppler shift only (in km/s)
if rv is not None:
if rv != 0.0:
shiftwave = spec.wave*(1+rv/cspeed)
gd,ngd,bd,nbd = dln.where( (spec.wave >= np.min(shiftwave)) & (spec.wave <= np.max(shiftwave)), comp=True)
# Doppler shift and interpolate onto wavelength array
if hasattr(spec,'cont'):
cont = synple.interp_spl(spec.wave[gd], shiftwave, spec.cont)
spec.cont *= 0
spec.cont[gd] = cont
# interpolate the continuing to the missing pixels
if nbd>0:
contmissing = dln.interp(spec.wave[gd],spec.cont[gd],spec.wave[bd],kind='linear',assume_sorted=False)
spec.cont[bd] = contmissing
flux = synple.interp_spl(spec.wave[gd], shiftwave, spec.flux)
spec.flux *= 0
spec.flux[gd] = flux
if nbd>0:
# Fill in missing values with interpolated values
if np.sum(np.isfinite(spec.flux[gd]))>0:
coef = dln.poly_fit(spec.wave[gd],spec.flux[gd],2)
fluxmissing = dln.poly(spec.wave[bd],coef)
spec.flux[bd] = fluxmissing
# Mask these pixels
if spec.mask is None:
spec.mask = np.zeros(len(spec.flux),bool)
spec.mask[bd] = True
return spec
def model_spectrum(inputs,verbose=False,keepextend=False,alinefile=None,mlinefile=None):
"""
This creates a model spectrum given the inputs:
RV, Teff, logg, vmicro, vsini, [Fe/H], [X/Fe], w0, w1, dw.
This creates the new synthetic spectrum and then convolves with vmicro, vsini and
shifts to velocity RV.
The returned spectrum always uses AIR wavelengths!!!
Parameters
----------
inputs : dictionary
Input parameters, stellar parameters, abundances.
keepextend : bool, optional
Keep the extensions on the ends. Default is False.
alinefile : str, optional
Atomic linelist filename. Default is None (use synple's default one).
mlinefile : str, optional
Molecular linelist filename. Default is None (use synple's default one).
verbose : bool, optional
Verbose output. Default is False.
Returns
-------
synspec : Spec1D
The synthetic spectrum as Spec1D object.
"""
# Make key names all CAPS
inputs = dict((key.upper(), value) for (key, value) in inputs.items())
# Extend on the ends for RV/convolution purposes
w0 = inputs['W0']
w1 = inputs['W1']
dw = inputs['DW']
rv = inputs.get('RV')
vrot = inputs.get('VROT')
vmicro = inputs.get('VMICRO')
inputsext = inputs.copy()
if rv is not None or vrot is not None or vmicro is not None:
numext = int(np.ceil(w1*(1.0+1500/cspeed)-w1))
inputsext['W0'] = w0-numext*dw
inputsext['W1'] = w1+numext*dw
if verbose:
print('Extending wavelength by '+str(numext)+' pixels on each end')
# Create the synthetic spectrum
# set vrot=vmicro=0, will convolve later if necessary
inputsext['VMICRO'] = 0
inputsext['VROT'] = 0
wave1,flux1,cont1 = synple_wrapper(inputsext,verbose=verbose,alinefile=alinefile,
mlinefile=mlinefile)
# Get final wavelength array
wv1, ind1 = dln.closest(wave1,w0)
wv2, ind2 = dln.closest(wave1,w1)
synspec = Spec1D(flux1/cont1,err=flux1*0,wave=wave1,lsfpars=np.array(0.0))
synspec.cont = cont1
synspec.wavevac = False
# Smooth and shift
if rv is not None or vrot is not None or vmicro is not None:
synspec = smoothshift_spectrum(synspec,vrot=vrot,vmicro=vmicro,rv=rv)
# Trim to final wavelengths
if keepextend is False:
synspec = trim_spectrum(synspec,w0,w1)
return synspec
def prepare_synthspec(synspec,lsf,norm=True,continuum_func=None):
""" Prepare a synthetic spectrum to be compared to an observed spectrum."""
# Convolve with LSF and do air<->vacuum wavelength conversion
# Convert wavelength from air->vacuum or vice versa
if synspec.wavevac != lsf.wavevac:
# Air -> Vacuum
if synspec.wavevac is False:
synspec.wave = astro.airtovac(synspec.wave)
synspec.wavevac = True
# Vacuum -> Air
else:
synspec.dispersion = astro.vactoair(synspec.wave)
synspec.wavevac = False
# Initialize the output spectrum
if lsf.wave.ndim==2:
npix,norder = lsf.wave.shape
else:
npix = len(lsf.wave)
norder = 1
pspec = Spec1D(np.zeros((npix,norder),np.float32),err=np.zeros((npix,norder),np.float32),
wave=lsf.wave,lsfpars=lsf.pars,lsftype=lsf.lsftype,lsfxtype=lsf.xtype)
pspec.cont = np.zeros((npix,norder),np.float32)
if continuum_func is not None:
pspec.continuum_func = continuum_func
# Loop over orders
if lsf.wave.ndim==1:
wave = np.atleast_2d(lsf.wave.copy()).T
else:
wave = lsf.wave.copy()
for o in range(lsf.norder):
wobs = wave[:,o]
dw = np.median(dln.slope(wobs))
wv1,ind1 = dln.closest(synspec.wave,np.min(wobs)-2*np.abs(dw))
wv2,ind2 = dln.closest(synspec.wave,np.max(wobs)+2*np.abs(dw))
modelflux = synspec.flux[ind1:ind2+1]
modelwave = synspec.wave[ind1:ind2+1]
modelcont = synspec.cont[ind1:ind2+1]
# Rebin, if necessary
# get LSF FWHM (A) for a handful of positions across the spectrum
xp = np.arange(npix//20)*20
fwhm = lsf.fwhm(wobs[xp],xtype='Wave',order=o)
# FWHM is in units of lsf.xtype, convert to wavelength/angstroms, if necessary
if lsf.xtype.lower().find('pix')>-1:
fwhm *= np.abs(dw)
# convert FWHM (A) in number of model pixels at those positions
dwmod = dln.slope(modelwave)
dwmod = np.hstack((dwmod,dwmod[-1]))
xpmod = dln.interp(modelwave,np.arange(len(modelwave)),wobs[xp],kind='cubic',assume_sorted=False,extrapolate=True)
xpmod = np.round(xpmod).astype(int)
fwhmpix = np.abs(fwhm/dwmod[xpmod])
# need at least ~4 pixels per LSF FWHM across the spectrum
# using 3 affects the final profile shape
nbin = np.round(np.min(fwhmpix)//4).astype(int)
if np.min(fwhmpix) < 3.7:
warnings.warn('Model has lower resolution than the observed spectrum. Only '+str(np.min(fwhmpix))+' model pixels per resolution element')
if np.min(fwhmpix) < 2.8:
raise Exception('Model has lower resolution than the observed spectrum. Only '+str(np.min(fwhmpix))+' model pixels per resolution element')
if nbin>1:
npix2 = np.round(len(synspec.flux) // nbin).astype(int)
modelflux = dln.rebin(modelflux[0:npix2*nbin],npix2)
modelwave = dln.rebin(modelwave[0:npix2*nbin],npix2)
modelcont = dln.rebin(modelcont[0:npix2*nbin],npix2)
# Convolve
lsf2d = lsf.anyarray(modelwave,xtype='Wave',order=o,original=False)
cflux = utils.convolve_sparse(modelflux,lsf2d)
# Interpolate onto final wavelength array
flux = synple.interp_spl(wobs, modelwave, cflux)
cont = synple.interp_spl(wobs, modelwave, modelcont)
pspec.flux[:,o] = flux
pspec.cont[:,o] = cont
pspec.normalized = True
# Normalize
if norm is True:
newcont = pspec.continuum_func(pspec)
pspec.flux /= newcont
pspec.cont *= newcont
return pspec
def mkbounds(params,paramlims=None):
""" Make lower and upper boundaries for parameters """
params = np.char.array(params).upper()
if paramlims is not None:
limkeys = np.char.array(list(paramlims.keys())).upper()
n = len(params)
lbounds = np.zeros(n,np.float64)
ubounds = np.zeros(n,np.float64)
# Teff
g, = np.where(params=='TEFF')
if len(g)>0:
lbounds[g[0]] = 3500
ubounds[g[0]] = 60000
# logg
g, = np.where(params=='LOGG')
if len(g)>0:
lbounds[g[0]] = 0
ubounds[g[0]] = 5
# fe_h
g, = np.where(params=='FE_H')
if len(g)>0:
lbounds[g[0]] = -3
ubounds[g[0]] = 1
# Vmicro
g, = np.where(params=='VMICRO')
if len(g)>0:
lbounds[g[0]] = 0
ubounds[g[0]] = 5
# Vsini/vrot
g, = np.where(params=='VROT')
if len(g)>0:
lbounds[g[0]] = 0
ubounds[g[0]] = 500
# RV
g, = np.where(params=='RV')
if len(g)>0:
lbounds[g[0]] = -1500
ubounds[g[0]] = 1500
# abundances
g, = np.where( (params.find('_H') != -1) & (params != 'FE_H') )
if len(g)>0:
lbounds[g] = -3
ubounds[g] = 10
# Use input parameter limits
if paramlims is not None:
for i,f in enumerate(params):
g, = np.where(limkeys==f)
if len(g)>0:
lbounds[i] = paramlims[limkeys[g[0]]][0]
ubounds[i] = paramlims[limkeys[g[0]]][1]
bounds = (lbounds,ubounds)
return bounds
def mkdxlim(fitparams):
""" Make array of parameter changes at which curve_fit should finish."""
npar = len(fitparams)
dx_lim = np.zeros(npar,float)
for k in range(npar):
if fitparams[k]=='TEFF':
dx_lim[k] = 1.0
elif fitparams[k]=='LOGG':
dx_lim[k] = 0.005
elif fitparams[k]=='VMICRO':
dx_lim[k] = 0.1
elif fitparams[k]=='VROT':
dx_lim[k] = 0.1
elif fitparams[k]=='RV':
dx_lim[k] = 0.01
elif fitparams[k].endswith('_H'):
dx_lim[k] = 0.005
else:
dx_lim[k] = 0.01
return dx_lim
def initpars(params,fitparams,bounds=None):
""" Make initial set of parameters given PARAMS and
FITPARAMS."""
params = dict((key.upper(), value) for (key, value) in params.items()) # all CAPS
fitparams = [v.upper() for v in fitparams] # all CAPS
npars = len(fitparams)
pinit = np.zeros(npars,np.float64)
# Loop over parameters
for k in range(npars):
ind, = np.where(np.char.array(list(params.keys()))==fitparams[k])
# This parameter is in PARAMS
if len(ind)>0:
pinit[k] = params[fitparams[k]]
# Not in PARAMS
else:
if fitparams[k]=='RV':
pinit[k] = 0.0
elif fitparams[k]=='VMICRO':
pinit[k] = 2.0
elif fitparams[k]=='VROT':
pinit[k] = 0.0
elif fitparams[k]=='TEFF':
pinit[k] = 5000.0
elif fitparams[k]=='LOGG':
pinit[k] = 3.0
elif fitparams[k].endswith('_H'):
# Abundances, use FE_H if possible
if 'FE_H' in params.keys():
pinit[k] = params['FE_H']
else:
pinit[k] = 0.0
else:
pinit[k] = 0.0
# Make sure inital parameters are within the boundary limits
if bounds is not None:
for k in range(npars):
pinit[k] = dln.limit(pinit[k],bounds[0][k],bounds[1][k])
return pinit
def specfigure(figfile,spec,fmodel,out,original=None,verbose=True,figsize=10):
""" Make diagnostic figure."""
#import matplotlib
matplotlib.use('Agg')
#import matplotlib.pyplot as plt
if os.path.exists(figfile): os.remove(figfile)
norder = spec.norder
nlegcol = 2
if original is not None: nlegcol=3
# Single-order plot
if norder==1:
fig,ax = plt.subplots()
fig.set_figheight(figsize*0.5)
fig.set_figwidth(figsize)
if original is not None:
plt.plot(original.wave,original.flux,color='green',label='Original',linewidth=1)
plt.plot(spec.wave,spec.flux,'b',label='Masked Data',linewidth=0.5)
plt.plot(fmodel.wave,fmodel.flux,'r',label='Model',linewidth=0.5,alpha=0.8)
leg = ax.legend(loc='upper left', frameon=True, framealpha=0.8, ncol=nlegcol)
plt.xlabel('Wavelength (Angstroms)')
plt.ylabel('Normalized Flux')
xr = dln.minmax(spec.wave)
yr = [np.min([spec.flux,fmodel.flux]), np.max([spec.flux,fmodel.flux])]
if original is not None:
yr = [np.min([original.flux,spec.flux,fmodel.flux]), np.max([spec.flux,fmodel.flux])]
yr = [yr[0]-dln.valrange(yr)*0.15,yr[1]+dln.valrange(yr)*0.005]
yr = [np.max([yr[0],-0.2]), np.min([yr[1],2.0])]
plt.xlim(xr)
plt.ylim(yr)
snr = np.nanmedian(spec.flux/spec.err)
plt.title(spec.filename)
#ax.annotate(r'S/N=%5.1f Teff=%5.1f$\pm$%5.1f logg=%5.2f$\pm$%5.2f [Fe/H]=%5.2f$\pm$%5.2f Vrel=%5.2f$\pm$%5.2f chisq=%5.2f' %
# (snr, out['TEFF'], out['tefferr'], out['LOGG'], out['loggerr'], out['FE_H'], out['feherr'], out['RV'], out['vrelerr'], out['chisq']),
# xy=(np.mean(xr), yr[0]+dln.valrange(yr)*0.05),ha='center')
# Multi-order plot
else:
fig,ax = plt.subplots(norder)
fig.set_figheight(figsize)
fig.set_figwidth(figsize)
for i in range(norder):
if original is not None:
ax[i].plot(original.wave[:,i],original.flux[:,i],color='green',label='Original',linewidth=1)
ax[i].plot(spec.wave[:,i],spec.flux[:,i],'b',label='Masked Data',linewidth=0.5)
ax[i].plot(fmodel.wave[:,i],fmodel.flux[:,i],'r',label='Model',linewidth=0.5,alpha=0.8)
if i==0:
leg = ax[i].legend(loc='upper left', frameon=True, framealpha=0.8, ncol=nlegcol)
ax[i].set_xlabel('Wavelength (Angstroms)')
ax[i].set_ylabel('Normalized Flux')
xr = dln.minmax(spec.wave[:,i])
yr = [np.min([spec.flux[:,i],fmodel.flux[:,i]]), np.max([spec.flux[:,i],fmodel.flux[:,i]])]
if original is not None:
yr = [np.min([original.flux[:,i],spec.flux[:,i],fmodel.flux[:,i]]), np.max([spec.flux[:,i],fmodel.flux[:,i]])]
yr = [yr[0]-dln.valrange(yr)*0.05,yr[1]+dln.valrange(yr)*0.05]
if i==0:
yr = [yr[0]-dln.valrange(yr)*0.15,yr[1]+dln.valrange(yr)*0.05]
yr = [np.max([yr[0],-0.2]), np.min([yr[1],2.0])]
ax[i].set_xlim(xr)
ax[i].set_ylim(yr)
# legend
if i==0:
snr = np.nanmedian(spec.flux/spec.err)
ax[i].set_title(spec.filename)
#ax[i].annotate(r'S/N=%5.1f Teff=%5.1f$\pm$%5.1f logg=%5.2f$\pm$%5.2f [Fe/H]=%5.2f$\pm$%5.2f Vrel=%5.2f$\pm$%5.2f chisq=%5.2f' %
# (snr,out['teff'],out['tefferr'],out['logg'],out['loggerr'],out['feh'],out['feherr'],out['vrel'],out['vrelerr'],out['chisq']),
# xy=(np.mean(xr), yr[0]+dln.valrange(yr)*0.05),ha='center')
plt.savefig(figfile,bbox_inches='tight')
plt.close(fig)
if verbose is True: print('Figure saved to '+figfile)
def dopvrot_lsq(spec,models=None,initpar=None,verbose=False,logger=None):
"""
Least Squares fitting with forward modeling of the spectrum.
Parameters
----------
spec : Spec1D object
The observed spectrum to match.
models : list of Cannon models, optional
A list of Cannon models to use. The default is to load all of the Cannon
models in the data/ directory and use those.
initpar : numpy array, optional
Initial estimate for [teff, logg, feh, RV, vsini], optional.
verbose : bool, optional
Verbose output of the various steps. This is False by default.
Returns
-------
out : numpy structured array
The output structured array of the final derived RVs, stellar parameters and errors.
bmodel : Spec1D object
The best-fitting Cannon model spectrum (as Spec1D object).
Example
-------
.. code-block:: python
out, bmodel = fit_lsq(spec)
"""
if logger is None:
logger = dln.basiclogger()
# Load and prepare the Cannon models
#-------------------------------------------
if models is None:
models = cannon.models.copy()
models.prepare(spec)
# Get initial estimates
if initpar is None:
initpar = np.array([6000.0, 2.5, -0.5, 0.0, 0.0])
initpar = np.array(initpar).flatten()
# Calculate the bounds
lbounds = np.zeros(5,float)+1e5
ubounds = np.zeros(5,float)-1e5
for p in models:
lbounds[0:3] = np.minimum(lbounds[0:3],np.min(p.ranges,axis=1))
ubounds[0:3] = np.maximum(ubounds[0:3],np.max(p.ranges,axis=1))
lbounds[3] = -1000
ubounds[3] = 1000
lbounds[4] = 0.0
ubounds[4] = 500.0
bounds = (lbounds, ubounds)
# function to use with curve_fit
def spec_interp_vsini(x,teff,logg,feh,rv,vsini):
""" This returns the interpolated model for a given spectrum."""
# The "models" and "spec" must already exist outside of this function
m = models(teff=teff,logg=logg,feh=feh,rv=rv)
if m is None: # there was a problem
return np.zeros(spec.flux.shape,float).flatten()+1e30
# Broaden to vsini
if spec.norder>1:
smflux = spec.flux*0
for k in range(spec.norder):
smflux[:,k] = utils.broaden(m.wave[:,k],m.flux[:,k],vsini=vsini)
else:
smflux = utils.broaden(m.wave.flatten(),m.flux.flatten(),vsini=vsini)
return smflux.flatten()
def spec_interp_vsini_jac(x,*args):
""" Compute the Jacobian matrix (an m-by-n matrix, where element (i, j)
is the partial derivative of f[i] with respect to x[j]). """
relstep = 0.02
npix = len(x)
npar = len(args)
# Current values
f0 = spec_interp_vsini(x,*args)
# Initialize jacobian matrix
jac = np.zeros((npix,npar),np.float64)
# Loop over parameters
for i in range(npar):
pars = np.array(copy.deepcopy(args))
step = relstep*pars[i]
if step<=0.0:
step = 0.02
pars[i] += step
f1 = spec_interp_vsini(x,*pars)
jac[:,i] = (f1-f0)/step
return jac
# Use curve_fit
lspars, lscov = curve_fit(spec_interp_vsini, spec.wave.flatten(), spec.flux.flatten(), sigma=spec.err.flatten(),
p0=initpar, bounds=bounds, jac=spec_interp_vsini_jac)
# If it hits a boundary then the solution won't change much compared to initpar
# setting absolute_sigma=True gives crazy low lsperror values
lsperror = np.sqrt(np.diag(lscov))
if verbose is True:
logger.info('Least Squares RV and stellar parameters:')
for k,n in enumerate(['Teff','logg','[Fe/H]','RV','Vsini']):
logger.info('%s = %f' % (n,lspars[k]))
lsmodel = spec_interp_vsini(spec.wave,teff=lspars[0],logg=lspars[1],feh=lspars[2],rv=lspars[3],vsini=lspars[4])
lschisq = np.sqrt(np.sum(((spec.flux.flatten()-lsmodel)/spec.err.flatten())**2)/len(lsmodel))
if verbose is True: logger.info('chisq = %5.2f' % lschisq)
# Put it into the output structure
npar = len(lspars)
dtype = np.dtype([('pars',float,npar),('parerr',float,npar),('parcov',float,(npar,npar)),('chisq',float)])
out = np.zeros(1,dtype=dtype)
out['pars'] = lspars
out['parerr'] = lsperror
out['parcov'] = lscov
out['chisq'] = lschisq
return out, lsmodel
def fit_elem(spec,params,elem,verbose=0,alinefile=None,mlinefile=None,logger=None):
""" Fit an individual element."""
t0 = time.time()
if logger is None:
logger = dln.basiclogger()
# Create fitparams
#fitparams = [e+'_H' for e in elem]
fitparams = elem.copy()
if verbose>0:
logger.info('Fitting: '+', '.join(fitparams))
# Initialize the fitter
spfitter = SpecFitter(spec,params,fitparams=fitparams,verbose=(verbose>=2),
alinefile=alinefile,mlinefile=mlinefile)
spfitter.logger = logger
spfitter.norm = True # normalize the synthetic spectrum
#spfitter.verbose = True
bounds = mkbounds(elem)
pinit = initpars(params,elem,bounds)
# Initalize output
npar = len(fitparams)
dtyp = []
for f in fitparams:
dtyp += [(f,float)]
dtyp += [('pars',float,npar),('chisq',float),('nsynfev',int)]
dtype = np.dtype(dtyp)
out = np.zeros(1,dtype=dtype)
# Loop over elemental abundances
flag = 0
abund = -2.0
dabund = 1.0
count = 0
abundarr = []
chisq = []
modelarr = []
# Loop from -2 to +1 or until we get through the minimum
while (flag==0):
model = spfitter.model(spec.wave.flatten(),abund)
chisq1 = spfitter.chisq(model)
abundarr.append(abund)
modelarr.append(model)
chisq.append(chisq1)
if verbose>0:
logger.info('%f %f' % (abund,chisq1))
# Are we done?
if (abund>=1) and (chisq1 != np.min(np.array(chisq))):
flag = 1
if (abund >= 10):
flag = 1
# Increment the abundance
abund += dabund
count += 1
# Best value is at the end, just return that value
bestind = np.argmin(chisq)
if (bestind==0) or (bestind==len(chisq)-1):
bestabund = abundarr[bestind]
for k,f in enumerate(fitparams):
out[f] = bestabund
out['pars'] = bestabund
out['chisq'] = np.min(chisq)
out['nsynfev'] = spfitter.nsynfev
model = modelarr[bestind]
if verbose>0:
logger.info('%f %f' % (bestabund,np.min(chisq)))
logger.info('nfev = %i' % spfitter.nsynfev)
logger.info('dt = %.2f sec.' % (time.time()-t0))
logger.info(' ')
return out, model
# Now refine twice
for i in range(2):
# Get best value
bestind = np.argmin(np.array(chisq))
# get values half-way to left and right
# Left
lftind = bestind-1
lftabund = np.mean([abundarr[lftind],abundarr[bestind]])
lftmodel = spfitter.model(spec.wave.flatten(),lftabund)
lftchisq = spfitter.chisq(lftmodel)
abundarr.append(lftabund)
modelarr.append(lftmodel)
chisq.append(lftchisq)
if verbose>0:
logger.info('%f %f' % (lftabund,lftchisq))
# Right
rgtind = bestind+1
rgtabund = np.mean([abundarr[bestind],abundarr[rgtind]])
rgtmodel = spfitter.model(spec.wave.flatten(),rgtabund)
rgtchisq = spfitter.chisq(rgtmodel)
abundarr.append(rgtabund)
modelarr.append(rgtmodel)
chisq.append(rgtchisq)
if verbose>0:
logger.info('%f %f' % (rgtabund,rgtchisq))
# Sort arrays
si = np.argsort(abundarr)
abundarr = [abundarr[k] for k in si]
chisq = [chisq[k] for k in si]
modelarr = [modelarr[k] for k in si]
# Now interpolate to find the best value
abundarr2 = np.linspace(np.min(abundarr),np.max(abundarr),1000)
chisq2 = interp1d(abundarr,chisq,kind='quadratic')(abundarr2)
bestind = np.argmin(chisq2)
bestabund = abundarr2[bestind]
# Get the model at the best value
model = spfitter.model(spec.wave.flatten(),bestabund)
bestchisq = spfitter.chisq(model)
# Populate output structure
for k,f in enumerate(fitparams):
out[f] = bestabund
out['pars'] = bestabund
out['chisq'] = bestchisq
out['nsynfev'] = spfitter.nsynfev
if verbose>0:
logger.info('%f %f' % (bestabund,bestchisq))
logger.info('nfev = %i' % spfitter.nsynfev)
logger.info('dt = %.2f sec.' % (time.time()-t0))
logger.info(' ')
return out, model
def fit_lsq(spec,params,fitparams=None,fparamlims=None,verbose=0,alinefile=None,mlinefile=None,logger=None):
"""
Fit a spectrum with a synspec synthetic spectrum and determine stellar parameters and
abundances using least-squares.
Parameters
----------
spec : Spec1D object
The observed spectrum to match.
params : dict
Dictionary of initial values to use or parameters/elements to hold fixed.
fitparams : list, optional
List of parameter names to fit (e.g., TEFF, LOGG, FE_H, RV). By default all values
in PARAMS are fit.
fparamlims : dict, optional
Dictionary of lower and upper limits for each of the fitparams.
verbose : int, optional
Verbosity level (0, 1, or 2). The default is 0 and verbose=2 is for debugging.
alinefile : str, optional
The atomic linelist to use. Default is None which means the default synple linelist is used.
mlinefile : str, optional
The molecular linelist to use. Default is None which means the default synple linelist is used.
logger : logging object, optional
Logging object.
Returns
-------
out : numpy structured array
Catalog of best-fit values.
model : numpy array
The best-fit synthetic stellar spectrum.
Example
-------
.. code-block:: python
spec = doppler.read(file)
params = {'teff':5500,'logg':3.0,'fe_h':-1.0,'rv':0.0,'ca_h':-1.0}
fitparams = ['teff','logg','fe_h','rv','ca_h']
out,model = specfit.fit_lsq(spec,params,fitparams=fitparams)
"""
t0 = time.time()
if logger is None:
logger = dln.basiclogger()
# Normalize the spectrum
if spec.normalized==False:
spec.normalize()
# Capitalize the inputs
# Make key names all CAPS
params = dict((key.upper(), value) for (key, value) in params.items())
# Fitting parameters
if fitparams is None:
fitparams = list(params.keys())
fitparams = [v.upper() for v in fitparams] # all CAPS
npar = len(fitparams)
# Initialize the fitter
spfitter = SpecFitter(spec,params,fitparams=fitparams,verbose=(verbose>=2),
alinefile=alinefile,mlinefile=mlinefile)
spfitter.logger = logger
spfitter.norm = True # normalize the synthetic spectrum
bounds = mkbounds(fitparams,fparamlims)
pinit = initpars(params,fitparams,bounds)
if verbose>0:
logger.info('Fitting: '+', '.join(fitparams))
# Fit the spectrum using curve_fit
dx_lim = mkdxlim(fitparams)
pars, cov = curve_fit(spfitter.model,spfitter.wave,spfitter.flux,dx_lim=dx_lim,
sigma=spfitter.err,p0=pinit,bounds=bounds,jac=spfitter.jac)
error = np.sqrt(np.diag(cov))
if verbose>0:
logger.info('Best values:')
for k in range(npar):
logger.info('%s = %.3f +/- %.3f' % (fitparams[k],pars[k],error[k]))
model = spfitter.model(spfitter.wave,*pars)
chisq = np.sqrt(np.sum(((spfitter.flux-model)/spfitter.err)**2)/len(model))
if verbose>0:
logger.info('chisq = %.2f' % chisq)
logger.info('nfev = %i' % spfitter.nsynfev)
logger.info('dt = %.2f sec.' % (time.time()-t0))
# Put it into the output structure
dtyp = []
for f in fitparams:
dtyp += [(f,float),(f+'_ERR',float)]
dtyp += [('pars',float,npar),('parerr',float,npar),('parcov',float,(npar,npar)),('chisq',float),('nsynfev',int)]
dtype = np.dtype(dtyp)
out = np.zeros(1,dtype=dtype)
for k,f in enumerate(fitparams):
out[f] = pars[k]
out[f+'_ERR'] = error[k]
out['pars'] = pars
out['parerr'] = error
out['parcov'] = cov
out['chisq'] = chisq
out['nsynfev'] = spfitter.nsynfev
# Reshape final model spectrum
model = model.reshape(spec.flux.shape)
return out, model
def fit(spec,params=None,elem=None,figfile=None,fitvsini=False,fitvmicro=False,
fparamlims=None,verbose=1,alinefile=None,mlinefile=None,logger=None):
"""
Fit a spectrum with a synspec synthetic spectrum and determine stellar parameters and
abundances using a multi-step iterative method.
Step 1: Fit Teff/logg/[Fe/H]/RV using Doppler
Step 2: Fit Teff/logg/[Fe/H]/RV + vsini with Doppler model
Step 3: Fit stellar parameters (Teff/logg/[Fe/H]/[alpha/H]), RV and broadening (Vrot/Vmicro)
Step 4: Fit each element one at a time holding everything else fixed.
Step 5: Fit everything simultaneously
Parameters
----------
spec : Spec1D object
The observed spectrum to match.
params : dict, optional
Dictionary of initial values to use or parameters/elements to hold fixed.
elem : list, optional
List of elements to fit. The default is:
elem = ['C','N','O','NA','MG','AL','SI','K','CA','TI','V','CR','MN','CO','NI','CU','CE','ND']
Input an empty list [] to fit no elements.
figfile : string, optional
The filename for a diagnostic plot showing the observed spectrum and model spectrum.
fitvsini : bool, optional
Fit rotational velocity (vsini). By default, Vsini will be fit initially with a Doppler
model, but only included in the final fit if it improved chisq.
fitvmicro : bool, optional
Fit Vmicro. Default is False. By default, Vmicro is set (if not included in PARAMS)
logg>=3.8: vmicro = 2.0
logg<3.8: vmicro = 10^(0.226−0.0228*logg+0.0297*(logg)^2−0.0113*(logg)^3 )
fparamlims : dict, optional
Dictionary of lower and upper limits for each of the fitted parameter.
For example, if params is {'teff': 9000, 'logg': 4.00, 'rv': -16.124}, fparamlims
could be {'teff': [8000,10000], 'logg': [3.50,4.50], 'rv': [-20.124,-12.124]}.
verbose : int, optional
Verbosity level (0, 1, or 2). The default is 0 and verbose=2 is for debugging.
alinefile : str, optional
The atomic linelist to use. Default is None which means the default synple linelist is used.
mlinefile : str, optional
The molecular linelist to use. Default is None which means the default synple linelist is used.
logger : logging object, optional
Logging object.
Returns
-------
out : numpy structured array
Catalog of best-fit values.
model : numpy array
The best-fit synthetic stellar spectrum.
Example
-------
.. code-block:: python
spec = doppler.read(file)
out,model = specfit.fit(spec)
"""
t0 = time.time()
if logger is None:
logger = dln.basiclogger()
logger.handlers[0].setFormatter(logging.Formatter("%(asctime)s [%(levelname)-5.5s] %(message)s"))
logger.handlers[0].setStream(sys.stdout)
# Default set of elements
if elem is None:
elem = ['C','N','O','NA','MG','AL','SI','K','CA','TI','V','CR','MN','CO','NI','CU','SR','CE','ND']
# Normalize the spectrum
if spec.normalized==False:
spec.normalize()
# Print out inputs
if verbose>0:
logger.info('Inputs:')
if params is not None:
logger.info('PARAMS:')
for k,n in enumerate(params.keys()):
logger.info('%s = %f' % (n,params[n]))
else:
logger.info('PARAMS: None')
if fitvmicro:
logger.info('Fitting VMICRO')
if fitvsini:
logger.info('Fitting VSINI')
if len(elem)>0:
logger.info('Elements to fit: '+', '.join(elem))
else:
logger.info('No elements to fit')
logger.info(' ')
# Input linelists
if verbose and alinefile is not None:
logger.info('Using input atomic linelist: ',alinefile)
if verbose and mlinefile is not None:
logger.info('Using input molecular linelist: ',mlinefile)
# 1) Doppler (Teff, logg, feh, RV)
#---------------------------------
t1 = time.time()
if verbose>0:
logger.info('Step 1: Running Doppler')
# Use Doppler to get initial guess of stellar parameters and RV
dopout, dopfmodel, dopspecm = doppler.fit(spec)
if verbose>0:
logger.info('Teff = %.2f +/- %.2f' % (dopout['teff'][0],dopout['tefferr'][0]))
logger.info('logg = %.3f +/- %.3f' % (dopout['logg'][0],dopout['loggerr'][0]))
logger.info('[Fe/H] = %.3f +/- %.3f' % (dopout['feh'][0],dopout['feherr'][0]))
logger.info('Vrel = %.4f +/- %.4f' % (dopout['vrel'][0],dopout['vrelerr'][0]))
logger.info('chisq = %.3f' % dopout['chisq'][0])
logger.info('dt = %.2f sec.' % (time.time()-t1))
# typically 5 sec
# 2) Fit vsini as well with Doppler model
#-----------------------------------------
t2 = time.time()
if verbose>0:
logger.info(' ')
logger.info('Step 2: Fitting vsini with Doppler model')
# For APOGEE resolution you need vsini~4 km/s or greater to see an effect
initpar2 = [dopout['teff'][0], dopout['logg'][0], dopout['feh'][0], dopout['vrel'][0], 5.0]
out2, model2 = dopvrot_lsq(spec,initpar=initpar2,verbose=verbose,logger=logger)
if verbose>0:
logger.info('Teff = %.2f +/- %.2f' % (out2['pars'][0][0],out2['parerr'][0][0]))
logger.info('logg = %.3f +/- %.3f' % (out2['pars'][0][1],out2['parerr'][0][1]))
logger.info('[Fe/H] = %.3f +/- %.3f' % (out2['pars'][0][2],out2['parerr'][0][2]))
logger.info('Vrel = %.4f +/- %.4f' % (out2['pars'][0][3],out2['parerr'][0][3]))
logger.info('Vsini = %.3f +/- %.3f' % (out2['pars'][0][4],out2['parerr'][0][4]))
logger.info('chisq = %.3f' % out2['chisq'][0])
logger.info('dt = %.2f sec.' % (time.time()-t2))
# typically 5 sec
if out2['chisq'][0] > dopout['chisq'][0]:
if verbose>0:
logger.info('Doppler Vrot=0 chisq is better')
out2['pars'][0] = [dopout['teff'][0],dopout['logg'][0],dopout['feh'][0],dopout['vrel'][0],0.0]
# Initialize params
if params is None:
params = {}
else:
params = dict((key.upper(), value) for (key, value) in params.items()) # all CAPS
# Using input values when possible, otherwise Doppler values
for k,name in enumerate(['TEFF','LOGG','FE_H','RV','VROT']):
if params.get(name) is None:
params[name] = out2['pars'][0][k]
# Get Vmicro using Teff/logg relation
# APOGEE DR14 vmicro relation (Holtzman et al. 2018)
# for stars with [M/H]>-1 and logg<3.8
# vmicro = 10^(0.226−0.0228*logg+0.0297*(logg)^2−0.0113*(logg)^3 )
# coef = [0.226,0.0228,0.0297,−0.0113]
# only giants, was fit in dwarfs
if params.get('VMICRO') is None:
vmicro = 2.0 # default
if params['LOGG']<3.8:
vmcoef = [0.226,0.0228,0.0297,-0.0113]
vmicro = 10**dln.poly(params['LOGG'],vmcoef[::-1])
params['VMICRO'] = vmicro
# for giants
# vmacro = 10^(0.741−0.0998*logg−0.225[M/H])
# maximum of 15 km/s
# 3) Fit stellar parameters (Teff, logg, feh, alpha, RV, Vsini)
#--------------------------------------------------------------
t3 = time.time()
if verbose>0:
logger.info(' ')
logger.info('Step 3: Fitting stellar parameters, RV and broadening')
params3 = params.copy()
fitparams3 = ['TEFF','LOGG','FE_H','ALPHA_H','RV']
if params3['VROT']>0 or fitvsini is True:
fitparams3.append('VROT')
# Fit Vmicro as well if it's a dwarf
if params3['LOGG']>3.8 or params3['TEFF']>8000 or fitvmicro is True:
fitparams3.append('VMICRO')
out3, model3 = fit_lsq(spec,params3,fitparams3,fparamlims,verbose=verbose,
alinefile=alinefile,mlinefile=mlinefile,logger=logger)
# typically 9 min.
# Should we fit C_H and N_H as well??
# Tweak the continuum
if verbose is not None:
logger.info('Tweaking continuum using best-fit synthetic model')
tmodel = Spec1D(model3,wave=spec.wave.copy(),lsfpars=np.array(0.0))
spec = doppler.rv.tweakcontinuum(spec,tmodel)
# 4) Fit each element separately
#-------------------------------
t4 = time.time()
if verbose>0:
logger.info(' ')
logger.info('Step 4: Fitting each element separately')
params4 = params3.copy()
for k in range(len(fitparams3)):
params4[fitparams3[k]] = out3['pars'][0][k]
nelem = len(elem)
if nelem>0:
if verbose>0:
logger.info('Elements: '+', '.join(elem))
elemcat = np.zeros(nelem,dtype=np.dtype([('name',np.str,10),('par',np.float64),('parerr',np.float64)]))
elemcat['name'] = elem
for k in range(nelem):
t4b = time.time()
parselem = params4.copy()
if elem[k] in ['O','MG','SI','S','CA','TI']:
parselem[elem[k]+'_H'] = params4['ALPHA_H']
else:
parselem[elem[k]+'_H'] = params4['FE_H']
fitparselem = [elem[k]+'_H']
#out4, model4 = fit_lsq(spec,parselem,fitparselem,verbose=verbose,logger=logger)
out4, model4 = fit_elem(spec,parselem,fitparselem,verbose=verbose,
alinefile=alinefile,mlinefile=mlinefile,logger=logger)
elemcat['par'][k] = out4['pars'][0]
#elemcat['parerr'][k] = out4['parerr'][0]
if verbose>0:
logger.info('dt = %f sec.' % (time.time()-t4))
logger.info(' ')
else:
if verbose>0:
logger.info('No elements to fit')
# about 50 min.
# 5) Fit all parameters simultaneously
#---------------------------------------
# if NO elements to fit, then nothing to do
if nelem>0:
t5 = time.time()
if verbose>0:
logger.info('Step 5: Fit all parameters simultaneously')
params5 = params4.copy()
for k in range(nelem):
params5[elem[k]+'_H'] = elemcat['par'][k]
if params5.get('ALPHA_H') is not None:
del params5['ALPHA_H']
fitparams5 = ['TEFF','LOGG','FE_H','RV']
if 'VROT' in fitparams3 or fitvsini is True:
fitparams5.append('VROT')
if 'VMICRO' in fitparams3 or fitvmicro is True:
fitparams5.append('VMICRO')
fitparams5 = fitparams5+list(np.char.array(elem)+'_H')
out5, model5 = fit_lsq(spec,params5,fitparams5,fparamlims,verbose=verbose,
alinefile=alinefile,mlinefile=mlinefile,logger=logger)
else:
out5 = out3
model5 = model3
fitparams5 = fitparams3
# Make final structure and save the figure
out = out5
dtyp = []
npar = len(fitparams5)
for f in fitparams5:
dtyp += [(f,float),(f+'_ERR',float)]
dtyp += [('pars',float,npar),('parerr',float,npar),('parcov',float,(npar,npar)),('chisq',float),('vhelio',float)]
dtype = np.dtype(dtyp)
out = np.zeros(1,dtype=dtype)
for k,f in enumerate(fitparams5):
out[f] = out5['pars'][0][k]
out[f+'_ERR'] = out5['parerr'][0][k]
out['pars'] = out5['pars'][0]
out['parerr'] = out5['parerr'][0]
out['parcov'] = out5['parcov'][0]
out['chisq'] = out5['chisq'][0]
out['vhelio'] = out5['RV']+spec.barycorr()
if verbose>0:
logger.info('Vhelio = %.3f' % out['vhelio'])
# Final model
model = Spec1D(model5,wave=spec.wave.copy(),lsfpars=np.array(0.0))
model.lsf = spec.lsf.copy()
# Make figure
if figfile is not None:
specfigure(figfile,spec,model,out,verbose=(verbose>=2))
if verbose>0:
logger.info('dt = %.2f sec.' % (time.time()-t0))
return out, model
| 37.720147 | 157 | 0.572674 | 10,974 | 0.178118 | 0 | 0 | 483 | 0.00784 | 0 | 0 | 20,710 | 0.336141 |
4f09d6a22b4bd4f753a23f83daa8bf2c0724d4b9 | 2,832 | py | Python | src/split_data.py | sunsiche/DSCI522-2020-g22 | bf13a13a47053496fc68a624240b4d81071b60b0 | [
"MIT"
] | null | null | null | src/split_data.py | sunsiche/DSCI522-2020-g22 | bf13a13a47053496fc68a624240b4d81071b60b0 | [
"MIT"
] | 30 | 2020-11-19T00:03:14.000Z | 2020-12-15T07:32:34.000Z | src/split_data.py | sunsiche/DSCI522-2020-g22 | bf13a13a47053496fc68a624240b4d81071b60b0 | [
"MIT"
] | 5 | 2020-11-18T22:21:40.000Z | 2020-11-28T15:13:48.000Z | # author: Kevin Shahnazari
# date: 2020-11-25
"""
This script Splits the raw cleaned data to train and test splits
based on the user input and saves them into two separate csv files
Usage: clean_data.py --input_file_path=<input_file_path> --saving_path_train=<saving_path_train> --saving_path_test=<saving_path_test> --test_size=<test_size>
Options:
--input_file_path=<file_path> Path to the cleaned input data file
--saving_path_train=<saving_path_train> Path the training data file must be saved as csv file
--saving_path_test=<saving_path_test> Path the testing data file must be saved as csv file
--test_size=<saving_path_test> The proportion of test data to all the data. must be between 0 and 1.
"""
import pandas as pd
from docopt import docopt
from sklearn.model_selection import train_test_split
opt = docopt(__doc__)
def main(input_file_path, saving_path_train, saving_path_test, test_size):
"""
The main function of script
which splits the cleaned data to train and test
portions for the predictive model
Args:
input_file_path (string): the file path to cleaned data file
saving_path_train (string): the file path the script will
save the train data to the csv file.
saving_path_test (string): the file path the script will
save the test data to the csv file.
test_size (float) : the test portion of the data. must be
between 0 and 1.
Returns:
0 if main was successful
-1 if main failed.
"""
# read in data
try:
df = pd.read_csv(input_file_path)
except Exception as e:
print(f"The script failed to open the cleaned data file with the error {e}")
return -1
# Check test size is valid
try:
test_size = float(test_size)
if test_size < 0 or test_size > 1:
print("The test_size argument must be between 0 and 1")
return -1
except:
print("The test_size argument must be a numeric number")
return -1
# Split dataframe
try:
train_data, test_data = train_test_split(
df, test_size=test_size, random_state=123
)
except Exception as e:
print(f"The script failed to split data with error {e}")
return -1
# Save data
try:
# save train portion
train_data.to_csv(saving_path_train, index_label=False, index=False)
# save test portion
test_data.to_csv(saving_path_test, index_label=False, index=False)
except Exception as e:
print(f"The script failed to save the save train or test with the error {e}")
return -1
return 0
if __name__ == "__main__":
main(
opt["--input_file_path"],
opt["--saving_path_train"],
opt["--saving_path_test"],
opt["--test_size"],
)
| 31.820225 | 161 | 0.673376 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,778 | 0.627825 |
4f0a3de229c7c8c0a0849ab5caedb8c1a65f7b3b | 3,607 | py | Python | minerva/controllers/dataset.py | takuseno/minerva | c073cd309e84bf1785de8f5b8b5d46d3599d1010 | [
"MIT"
] | 68 | 2020-09-09T04:22:35.000Z | 2022-03-20T10:15:51.000Z | minerva/controllers/dataset.py | takuseno/minerva | c073cd309e84bf1785de8f5b8b5d46d3599d1010 | [
"MIT"
] | 3 | 2021-05-15T03:15:47.000Z | 2021-07-09T21:35:22.000Z | minerva/controllers/dataset.py | takuseno/minerva | c073cd309e84bf1785de8f5b8b5d46d3599d1010 | [
"MIT"
] | 5 | 2020-09-09T02:35:10.000Z | 2022-03-04T02:23:00.000Z | import base64
import json
import os
import tempfile
import uuid
import zipfile
from io import BytesIO
import werkzeug
from flask import Blueprint, jsonify, request
from ..config import get_config
from ..dataset import convert_ndarray_to_image, import_csv_as_mdp_dataset
from ..models.dataset import Dataset, DatasetSchema
from .generator import generate_for_model
dataset_route = Blueprint("dataset", __name__)
generate_for_model(dataset_route, Dataset, DatasetSchema)
@dataset_route.route("/upload", methods=["POST"])
def upload_dataset():
# validation
if "dataset" not in request.files:
return jsonify({"status": "dataset is empty"}), 400
# save uploaded files and create MDPDataset
with tempfile.TemporaryDirectory() as dname:
# save file
file = request.files["dataset"]
file_name = werkzeug.utils.secure_filename(file.filename)
file_path = os.path.join(dname, file_name)
file.save(file_path)
# save image files
is_image = request.form.get("is_image") == "true"
if is_image:
# save zip file
zip_file = request.files["zip_file"]
zip_file_name = werkzeug.utils.secure_filename(zip_file.filename)
zip_file_path = os.path.join(dname, zip_file_name)
zip_file.save(zip_file_path)
# decompress zip file
with zipfile.ZipFile(zip_file_path) as zip_fd:
zip_fd.extractall(dname)
# convert uploaded data to MDPDataset
try:
mdp_dataset = import_csv_as_mdp_dataset(file_path, image=is_image)
except ValueError:
return jsonify({"status": "dataset conversion failed."}), 400
# save MDPDataset object.
dataset_name = str(uuid.uuid1()) + ".h5"
dataset_path = os.path.join(get_config("DATASET_DIR"), dataset_name)
mdp_dataset.dump(dataset_path)
# get dataset size
data_size = os.path.getsize(dataset_path)
episode_size = len(mdp_dataset)
step_size = sum(map(len, mdp_dataset))
# compute statistics
stats = mdp_dataset.compute_stats()
stats["observation_shape"] = mdp_dataset.get_observation_shape()
stats["action_size"] = mdp_dataset.get_action_size()
# handle ndarray serialization
stats_json = json.dumps(jsonify(stats).json)
# insert record
dataset = Dataset.create(
file_name,
dataset_name,
episode_size,
step_size,
data_size,
is_image,
mdp_dataset.is_action_discrete(),
stats_json,
)
# return json
return jsonify(DatasetSchema().dump(dataset))
@dataset_route.route("/<dataset_id>/example", methods=["GET"])
def get_example_vector_observation(dataset_id):
dataset = Dataset.get(dataset_id, raise_404=True)
# take care of computational cost
mdp_dataset = dataset.load_mdp_dataset()
if dataset.is_image:
# take first 3 samples
ndarrays = mdp_dataset.observations[:3]
observations = []
for ndarray in ndarrays:
image = convert_ndarray_to_image(ndarray)
# encode image to base64
buffer = BytesIO()
image.save(buffer, format="PNG")
encoded_image = base64.b64encode(buffer.getvalue())
# return in string
observations.append(encoded_image.decode().replace("'", ""))
else:
# take first 100 samples
n_steps = min(100, mdp_dataset.observations.shape[0])
observations = mdp_dataset.observations[:n_steps]
return jsonify({"observations": observations})
| 31.365217 | 78 | 0.667868 | 0 | 0 | 0 | 0 | 3,128 | 0.867203 | 0 | 0 | 631 | 0.174938 |
4f0a4e418f04b1eac7667ced923ca16a38945606 | 234 | py | Python | onos_ric_sdk_py/__init__.py | MatthewWEdwards/onos-ric-sdk-py | 2c38542616b8c0d087315b05f12de6eefb2bb3f3 | [
"Apache-2.0"
] | null | null | null | onos_ric_sdk_py/__init__.py | MatthewWEdwards/onos-ric-sdk-py | 2c38542616b8c0d087315b05f12de6eefb2bb3f3 | [
"Apache-2.0"
] | null | null | null | onos_ric_sdk_py/__init__.py | MatthewWEdwards/onos-ric-sdk-py | 2c38542616b8c0d087315b05f12de6eefb2bb3f3 | [
"Apache-2.0"
] | null | null | null | # SPDX-FileCopyrightText: © 2021 Open Networking Foundation <support@opennetworking.org>
# SPDX-License-Identifier: Apache-2.0
from __future__ import absolute_import
from .e2 import E2Client, Subscription
from .sdl import SDLClient
| 29.25 | 88 | 0.816239 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 126 | 0.53617 |
4f0cae19f8a210a02c51f07cf3ab2e800f500a42 | 11,936 | py | Python | bump_release/__init__.py | frague59/bump-release | e1f0ae5c971075901fa4205dc6e85dfb14e160a2 | [
"MIT"
] | null | null | null | bump_release/__init__.py | frague59/bump-release | e1f0ae5c971075901fa4205dc6e85dfb14e160a2 | [
"MIT"
] | null | null | null | bump_release/__init__.py | frague59/bump-release | e1f0ae5c971075901fa4205dc6e85dfb14e160a2 | [
"MIT"
] | null | null | null | """
Update release numbers in various places, according to a release.ini file places at the project root
"""
import configparser
import logging
import sys
from configparser import ConfigParser
from pathlib import Path
from typing import Optional, Tuple
import click
from bump_release import helpers
from bump_release.helpers import split_version
# region Globals
__version__ = VERSION = "0.9.6"
RELEASE_FILE: Optional[Path] = None
RELEASE_CONFIG: Optional[ConfigParser] = None
# endregion Globals
@click.command()
@click.option(
"-r",
"--release-file",
"release_file",
help="Release file path, default `./release.ini`",
)
@click.option(
"-n",
"--dry-run",
"dry_run",
is_flag=True,
help="If set, no operation are performed on files",
default=False,
)
@click.option(
"-d",
"--debug",
"debug",
is_flag=True,
help="If set, more traces are printed for users",
default=False,
)
@click.version_option(version=__version__)
@click.argument("release")
def bump_release(
release: str,
release_file: Optional[str] = None,
dry_run: bool = False,
debug: bool = False,
) -> int:
"""
Updates the files according to the release.ini file
:param release: Version number, as "X.X.X"
:param release_file: path to the release.ini config file
:param dry_run: If `True`, no operation performed
:param debug: If `True`, more traces !
:return: 0 if no error...
"""
# Loads the release.ini file
global RELEASE_CONFIG, RELEASE_FILE
if release_file is None:
RELEASE_FILE = Path.cwd() / "release.ini"
else:
RELEASE_FILE = Path(release_file)
if not RELEASE_FILE.exists():
print(f"Unable to find release.ini file in the current directory {Path.cwd()}", file=sys.stderr)
return 1
RELEASE_CONFIG = helpers.load_release_file(release_file=RELEASE_FILE)
try:
return process_update(release_file=RELEASE_FILE, release=release, dry_run=dry_run, debug=debug)
except Exception as e:
print(f"ERROR: {e}", file=sys.stderr)
return 2
def process_update(release_file: Path, release: str, dry_run: bool, debug: bool = False) -> int:
version = split_version(release)
# Initialize the logging
if debug:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.INFO)
# region Updates the main project (DJANGO_SETTINGS_MODULE file for django projects, __init__.py file...)
try:
new_row = update_main_file(version=version, dry_run=dry_run)
if new_row is not None:
logging.debug(f"process_update() `main_project`: new_row = {new_row.strip()}")
except helpers.NothingToDoException as e:
logging.warning(f"process_update() No release section for `main_project`: {e}")
# endregion
# region Updates sonar-scanner properties
try:
new_row = update_sonar_properties(version=version, dry_run=dry_run)
if new_row is not None:
logging.debug(f"process_update() `sonar`: new_row = {new_row.strip()}")
except helpers.NothingToDoException as e:
logging.warning(f"process_update() No release section for `sonar`: {e}")
# endregion
# region Updates setup.py file
try:
new_row = update_setup_file(version=version, dry_run=dry_run)
if new_row is not None:
logging.debug(f"process_update() `setup`: new_row = {new_row.strip()}")
except helpers.NothingToDoException as e:
logging.warning(f"process_update() No release section for `setup`: {e}")
# endregion
# region Updates sphinx file
try:
new_row = update_docs_conf(version=version, dry_run=dry_run)
if new_row is not None:
logging.debug(f"process_update() `docs`: new_row = {new_row.strip()}")
except helpers.NothingToDoException as e:
logging.warning(f"process_update() No release section for `docs`: {e}")
# endregion
# region Updates node packages file
try:
new_row = update_node_package(version=version, dry_run=dry_run)
if new_row is not None:
logging.debug(
f"process_update() `node`: new_row = {new_row}",
)
except helpers.NothingToDoException as e:
logging.warning(f"process_update() No release section for `node`: {e}")
# endregion
# region Updates YAML file
try:
new_row = update_ansible_vars(version=version, dry_run=dry_run)
if new_row is not None:
logging.debug(f"process_update() `ansible`: new_row = {new_row.strip()}")
except helpers.NothingToDoException as e:
logging.warning(f"process_update() No release section for `ansible`: {e}")
# endregion
# region Updates the release.ini file with the new release number
new_row = update_release_ini(path=release_file, version=version, dry_run=dry_run)
if new_row is not None:
logging.warning(f"process_update() `release.ini`: new_row = {new_row.strip()}")
# endregion
return 0
def update_main_file(version: Tuple[str, str, str], dry_run: bool = True) -> Optional[str]:
"""
Updates the main django settings file, or a python script with
:param version: Release number tuple (major, minor, release)
:param dry_run: If `True`, no operation performed
:return: changed string
"""
assert RELEASE_CONFIG is not None
if not RELEASE_CONFIG.has_section("main_project"):
raise helpers.NothingToDoException("No `main_project` section in release.ini file")
try:
_path = RELEASE_CONFIG["main_project"].get("path")
if _path is None:
raise helpers.NothingToDoException("No action to perform for main project: No path provided.")
path = Path(_path)
pattern = RELEASE_CONFIG["main_project"].get("pattern", "").strip('"') or helpers.MAIN_PROJECT_PATTERN
template = RELEASE_CONFIG["main_project"].get("template", "").strip('"') or helpers.MAIN_PROJECT_TEMPLATE
except configparser.Error as e:
raise helpers.NothingToDoException("Unable to update main project file", e)
return helpers.update_file(path=path, pattern=pattern, template=template, version=version, dry_run=dry_run)
def update_setup_file(version: Tuple[str, str, str], dry_run: bool = False) -> Optional[str]:
"""
Updates the setup.py file
:param version: Release number tuple (major, minor, release)
:param dry_run: If `True`, no operation performed
:return: changed string
"""
assert RELEASE_CONFIG is not None
if not RELEASE_CONFIG.has_section("setup"):
raise helpers.NothingToDoException("No `setup` section in release.ini file")
try:
_path = RELEASE_CONFIG["setup"].get("path")
path = Path(_path)
pattern = RELEASE_CONFIG["setup"].get("pattern", "").strip('"') or helpers.SETUP_PATTERN
template = RELEASE_CONFIG["setup"].get("template", "").strip('"') or helpers.SETUP_TEMPLATE
except configparser.Error as e:
raise helpers.NothingToDoException("No action to perform for setup file", e)
return helpers.update_file(path=path, pattern=pattern, template=template, version=version, dry_run=dry_run)
def update_sonar_properties(version: Tuple[str, str, str], dry_run: bool = False) -> Optional[str]:
"""
Updates the sonar-project.properties file with the new release number
:param version: Release number tuple (major, minor, release)
:param dry_run: If `True`, no operation performed
:return: changed string
"""
assert RELEASE_CONFIG is not None
if not RELEASE_CONFIG.has_section("sonar"):
raise helpers.NothingToDoException("No `sonar` section in release.ini file")
try:
_path = RELEASE_CONFIG["sonar"].get("path")
path = Path(_path)
pattern = RELEASE_CONFIG["sonar"].get("pattern", "").strip('"') or helpers.SONAR_PATTERN
template = RELEASE_CONFIG["sonar"].get("template", "").strip('"') or helpers.SONAR_TEMPLATE
except configparser.Error as e:
raise helpers.NothingToDoException("No action to perform for sonar file", e)
return helpers.update_file(path=path, pattern=pattern, template=template, version=version, dry_run=dry_run)
def update_docs_conf(version: Tuple[str, str, str], dry_run: bool = False) -> Optional[str]:
"""
Updates the Sphinx conf.py file with the new release number
:param version: Release number tuple (major, minor, release)
:param dry_run: If `True`, no operation performed
:return: changed string
"""
assert RELEASE_CONFIG is not None
if not RELEASE_CONFIG.has_section("docs"):
raise helpers.NothingToDoException("No `docs` section in release.ini file")
try:
_path = RELEASE_CONFIG["docs"].get("path")
path = Path(_path)
pattern_release = RELEASE_CONFIG["docs"].get("pattern_release", "").strip('"') or helpers.DOCS_RELEASE_PATTERN
template_release = RELEASE_CONFIG["docs"].get("template_release", "").strip('"') or helpers.DOCS_RELEASE_FORMAT
pattern_version = RELEASE_CONFIG["docs"].get("pattern_version", "").strip('"') or helpers.DOCS_VERSION_PATTERN
template_version = RELEASE_CONFIG["docs"].get("template_version", "").strip('"') or helpers.DOCS_VERSION_FORMAT
except configparser.Error as e:
raise helpers.NothingToDoException("No action to perform for docs file", e)
update_release = helpers.update_file(
path=path,
pattern=pattern_release,
template=template_release,
version=version,
dry_run=dry_run,
)
update_version = helpers.update_file(
path=path,
pattern=pattern_version,
template=template_version,
version=version,
dry_run=dry_run,
)
return str(update_release) + str(update_version)
def update_node_package(version: Tuple[str, str, str], dry_run: bool = False) -> Optional[str]:
"""
Updates the nodejs package file with the new release number
:param version: Release number tuple (major, minor, release)
:param dry_run: If `True`, no operation performed
:return: changed string
"""
assert RELEASE_CONFIG is not None
try:
path = Path(RELEASE_CONFIG.get("node", "path"))
key = RELEASE_CONFIG.get("node", "key", fallback=helpers.NODE_KEY) # noqa
except configparser.Error as e:
raise helpers.NothingToDoException("No action to perform for node packages file", e)
return helpers.update_node_packages(path=path, version=version, key=key, dry_run=dry_run)
def update_ansible_vars(version: Tuple[str, str, str], dry_run: bool = False) -> Optional[str]:
"""
Updates the ansible project variables file with the new release number
:param version: Release number tuple (major, minor, release)
:param dry_run: If `True`, no operation performed
:return: changed string
"""
assert RELEASE_CONFIG is not None
try:
path = Path(RELEASE_CONFIG.get("ansible", "path"))
key = RELEASE_CONFIG.get("ansible", "key", fallback=helpers.ANSIBLE_KEY) # noqa
except configparser.Error as e:
raise helpers.NothingToDoException("No action to perform for ansible file", e)
return helpers.updates_yaml_file(path=path, version=version, key=key, dry_run=dry_run)
def update_release_ini(path: Path, version: Tuple[str, str, str], dry_run: bool = False) -> Optional[str]:
"""
Updates the release.ini file with the new release number
:param path: Release file path
:param version: release number, as (<major>, <minor>, <release>)
:param dry_run: If `True`, the operation WILL NOT be performed
:return: Updated lines
"""
return helpers.update_file(
path=path,
pattern=helpers.RELEASE_INI_PATTERN,
template=helpers.RELEASE_INI_TEMPLATE,
version=version,
dry_run=dry_run,
)
| 37.652997 | 119 | 0.683311 | 0 | 0 | 0 | 0 | 1,594 | 0.133546 | 0 | 0 | 4,428 | 0.370979 |
4f0d4ff16fed95ed4b85490e43c5507bd62fbc4e | 1,748 | py | Python | sense/core.py | pygeo/sense | 4610fe5247cfba04124a30a7d3db33ea1feb8c80 | [
"Apache-2.0"
] | 1 | 2020-05-27T07:05:04.000Z | 2020-05-27T07:05:04.000Z | sense/core.py | pygeo/sense | 4610fe5247cfba04124a30a7d3db33ea1feb8c80 | [
"Apache-2.0"
] | 6 | 2017-06-23T18:11:32.000Z | 2017-06-30T04:35:22.000Z | sense/core.py | pygeo/sense | 4610fe5247cfba04124a30a7d3db33ea1feb8c80 | [
"Apache-2.0"
] | 3 | 2017-07-25T11:48:39.000Z | 2020-09-28T05:13:24.000Z | import numpy as np
import matplotlib.pyplot as plt
class Fresnel0(object):
def __init__(self, e):
"""
calculate the Nadir Fresnel reflectivity
e.g. Ulaby (2014), eq. 10.36
Parameters
----------
e : complex
complex relative dielectric permitivity
"""
self.x = self._calc(e)
def _calc(self, e):
return np.abs( (1.-np.sqrt(e))/(1.+np.sqrt(e)) )**2.
class Reflectivity(object):
"""
calculate the reflectivity for H and V polarization
"""
def __init__(self, eps, theta):
"""
table 2.5 Ulaby (2014)
assumes specular surface
Parameters
----------
eps : complex
relative dielectric permitivity
theta : float, ndarray
incidence angle [rad]
can be specified
"""
self.eps = eps
self.theta = theta
self._calc_reflection_coefficients()
self.v = np.abs(self.rho_v)**2.
self.h = np.abs(self.rho_h)**2.
def _calc_reflection_coefficients(self):
"""
calculate reflection coefficients
Woodhouse, 2006; Eq. 5.54, 5.55
"""
co = np.cos(self.theta)
si2 = np.sin(self.theta)**2.
self.rho_v = (self.eps*co-np.sqrt(self.eps-si2))/(self.eps*co+np.sqrt(self.eps-si2))
self.rho_h = (co-np.sqrt(self.eps-si2))/(co+np.sqrt(self.eps-si2))
def plot(self):
f = plt.figure()
ax = f.add_subplot(111)
ax.plot(np.rad2deg(self.theta), self.v, color='red', linestyle='-', label='V')
ax.plot(np.rad2deg(self.theta), self.h, color='blue', linestyle='--', label='H')
ax.grid()
ax.legend()
| 22.701299 | 92 | 0.540618 | 1,678 | 0.959954 | 0 | 0 | 0 | 0 | 0 | 0 | 685 | 0.391876 |
877bfe2267259aa2db8872716813b19fd65e5f86 | 6,319 | py | Python | src/cs_519_files/ML_Project_v1.0.py | CJCascalheira/ml-gender-dysphoria | 2b20c19020342bd5b3c09aa0c107f26770aa541c | [
"MIT"
] | null | null | null | src/cs_519_files/ML_Project_v1.0.py | CJCascalheira/ml-gender-dysphoria | 2b20c19020342bd5b3c09aa0c107f26770aa541c | [
"MIT"
] | null | null | null | src/cs_519_files/ML_Project_v1.0.py | CJCascalheira/ml-gender-dysphoria | 2b20c19020342bd5b3c09aa0c107f26770aa541c | [
"MIT"
] | null | null | null | import numpy as np
import pandas as pd
import nltk
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
import matplotlib.pyplot as plt
import contractions # Expanding contractions
from nltk.stem.wordnet import WordNetLemmatizer
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.svm import SVC
from sklearn.linear_model import SGDClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import accuracy_score
from sklearn.metrics import classification_report
print(' ------------------------------------------')
print('| Classifying Gender Dysphoria Disclosures |')
print('| on Social Media with Machine Learning. |')
print(' ------------------------------------------')
print()
print('Team members: Cory J. Cascalheira')
print(' Ivan Nieto Gomez ')
print(' Edgar Corrales Sotelo')
print()
print('Data Processing....')
print()
#num_of_lines = 2
dataset = pd.read_csv('df_truth.csv')
dataset.tail()
#print('Dataset size: ',dataset.shape)
# ------ ORIGINAL DATA --------
#print('Original Dataset: \n',dataset)
headers = list(dataset.columns.values)
#print(headers)
text = dataset.iloc[:,1] # text = dataset['text']
#print(text.shape)
#print(text)
# ---------------- EXPANDING CONTRACTIONS -------------------
n_text = []
expanded_words = []
for i in range(len(text)):
a = str(text[i])
# -------------- LOWERCASE ----------
a_lower = a.lower()
line = a_lower.split()
for h in line:
expanded_words.append(contractions.fix(h))
expanded_text = ' '.join(expanded_words)
n_text.append(expanded_text)
expanded_words.clear() # Clearing List
#print(n_text)
#print('Original text: ' + text)
#print('Expanded_text: ' + n_text)
mySeries = pd.Series(n_text)
#print(mySeries)
# ----------------------------------------------------------
new_text = []
w_stopwords_text = []
for k in range(len(mySeries)):
a = str(mySeries[k])
# ----------------- REMOVING NUMBERS --------
text_ = ''.join([i for i in a if not i.isdigit()])
# -------- REMOVING SPECIAL CHARACTERS AND PUNCTUATION --------
punc = '''!()-[]{};:'"\,“”<>’./?@#$%^&*ðÿ˜=∆+_~'''
for j in text_:
if j in punc:
text_ = text_.replace(j,'')
#print(text_)
new_text.append(text_)
#print(new_text)
# -------------------- REMOVING STOP WORDS -------------------
for j in range(len(new_text)):
text_tokens = word_tokenize(new_text[j])
tokens_without_sw = [word for word in text_tokens if not word in stopwords.words('english')]
filtered_sentence = (" ").join(tokens_without_sw)
w_stopwords_text.append(filtered_sentence)
#print(w_stopwords_text)
col_text = pd.DataFrame(w_stopwords_text)
final_text = col_text[0]
#print(final_text)
# -------------------------------- NORMALIZING WORDS VIA LEMMATIZATION ---------------------------------
f_sent = []
xxx = []
yyy = []
for count in range(len(w_stopwords_text)):
b = str(w_stopwords_text[count])
words_sent = b.split()
for j in words_sent:
lemmatizer = WordNetLemmatizer()
lem_sent = lemmatizer.lemmatize(j)
f_sent.append(lem_sent)
xxx = ' '.join(f_sent)
yyy.append(xxx)
f_sent.clear()
#print(yyy)
col_text = pd.DataFrame(yyy)
final_text = col_text[0]
# --------------- CLEANED DATA PLACED IN COLUMN #2 -----------
dataset.insert(2,'new_text',final_text)
#print('Clean Dataset: \n',dataset['new_text'].values)
print('1. Text Preprocessing Done!')
X = dataset['new_text'].values
y = dataset['dysphoria'].values
y_labels = np.unique(y)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.30, random_state=1)
#print(X_train.shape)
#print(X_test.shape)
vectorizer = TfidfVectorizer()
X_train = vectorizer.fit_transform(X_train)
X_test = vectorizer.transform(X_test)
# ---------------------------------------------------------------------------------
print('2. Classifiers')
print()
# ---------------------------------------------------------------------------------
print('2.1. Support Vector Machine (SVM - RBF)')
print()
svm = SVC(kernel = 'rbf', gamma = 0.1, C = 10.0, random_state = 1)
svm.fit(X_train,y_train)
y_pred = svm.predict(X_test)
svm_predictions = svm.predict(X_test)
print(' Misclassified samples (linear model): %d'%(y_test!=y_pred).sum())
print(' Accuracy: %.3f'%accuracy_score(y_test,y_pred))
print(classification_report(y_test, svm_predictions))
# ---------------------------------------------------------------------------------
print('2.2. Decision Tree')
print()
dt = DecisionTreeClassifier(criterion="entropy", random_state = 1)
dt.fit(X_train,y_train)
y_pred = dt.predict(X_test)
dt_predictions = dt.predict(X_test)
print(' Misclassified samples: %d'%(y_test!=y_pred).sum())
print(' Accuracy: %.2f'%accuracy_score(y_test,y_pred))
print(classification_report(y_test, dt_predictions))
print()
# ---------------------------------------------------------------------------------
print('2.3. Logistic Regression')
print()
log_reg = LogisticRegression(penalty='l2', C = 10, random_state = 1)
log_reg.fit(X_train, y_train)
y_pred = log_reg.predict(X_test)
log_reg_predictions = log_reg.predict(X_test)
print(' Misclassified samples: %d'%(y_test!=y_pred).sum())
print(' Accuracy: %.2f'%accuracy_score(y_test,y_pred))
print(classification_report(y_test, log_reg_predictions))
print()
# ---------------------------------------------------------------------------------
#print('2.4. Linear Regression')
#print()
#lr = LogisticRegression()
#lr.fit(X_train, y_train)
#y_pred = lr.predict(X_test)
#lr_predictions = lr.predict(X_test)
#print(' Misclassified samples: %d'%(y_test!=y_pred).sum())
#print(' Accuracy: %.2f'%accuracy_score(y_test,y_pred))
#print(classification_report(y_test, lr_predictions))
#print()
# ---------------------------------------------------------------------------------
| 33.08377 | 140 | 0.59424 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,723 | 0.430174 |
877c2a87452398113982e4fe50b67db15a8d4d93 | 796 | py | Python | applications/startupconfort/templatetags/item_extra.py | guinslym/python-color-palette | 3643e4d1d7d12f9a57b130337f50ee4e072ea08f | [
"MIT"
] | null | null | null | applications/startupconfort/templatetags/item_extra.py | guinslym/python-color-palette | 3643e4d1d7d12f9a57b130337f50ee4e072ea08f | [
"MIT"
] | null | null | null | applications/startupconfort/templatetags/item_extra.py | guinslym/python-color-palette | 3643e4d1d7d12f9a57b130337f50ee4e072ea08f | [
"MIT"
] | null | null | null | from django import template
import datetime
from applications.startupconfort.models import CartItem
#https://github.com/guinslym/mywadiyabi/blob/master/applications/wadiyabi/templatetags/registration.py
register = template.Library()
@register.filter
def replace_commas(string):
return string.replace(',', '_')
@register.simple_tag
def get_total_for_this_cart(items):
# import ipdb; ipdb.set_trace()
try:
user = items.first().customer
except:
return ("{0:.2f}".format(0))
number_of_products = CartItem.objects.filter(customer=user).count()
if (number_of_products > 0):
total = 8 + sum([item.product.price * item.quantity for item in CartItem.objects.filter(customer=user) ] )
else:
total = 0
return ("{0:.2f}".format(total))
| 26.533333 | 114 | 0.704774 | 0 | 0 | 0 | 0 | 553 | 0.694724 | 0 | 0 | 157 | 0.197236 |
877c7d49d1b9f81b157093af1820422a112322bb | 4,069 | py | Python | main.py | artin222/Password-generator | 7649377b578267b899737b4f7d312bc2b489cc0e | [
"MIT"
] | 1 | 2021-11-20T13:47:27.000Z | 2021-11-20T13:47:27.000Z | main.py | artin222/Password-generator | 7649377b578267b899737b4f7d312bc2b489cc0e | [
"MIT"
] | null | null | null | main.py | artin222/Password-generator | 7649377b578267b899737b4f7d312bc2b489cc0e | [
"MIT"
] | null | null | null | from csv import reader , writer
from random import randrange
#These are lists of characters that program can use to generate password
smlLetters = ["a" , "b" , "c" , "d" , "e" , "f" , "g" , "h" , "i" , "j" ,
"k" , "l" , "m" , "n" , "o" , "p" , "q" , "r" , "s" , "t" , "u" , "v" , "w" ,
"x" , "y" , "z"]
capLetters = ["A" , "B" , "C" , "D" , "E" , "F" , "G" , "H" , "I" , "J" , "K" ,
"L" , "M" , "N" , "O" , "P" , "Q" , "R" , "S" , "T" , "U" , "V" , "W" , "X" ,
"Y" , "Z"]
numbers = ["1" , "2" , "3" , "4" , "5" , "6" , "7" , "8" , "9" , "0"]
symbols = ["!" , "@" , "#" , "$" , "%"]
#genpass function generate password.
def genpass(useNumbers , useSymbols , useCapLetters , useSmlLetters , numbers , symbols , capLetters , lenOfPassword , nameOfPassword):
#This is password. every time program choose character for password program add character to password string.
password = ""
#This is list of characters that program can use
liOfChars = []
if useNumbers == True:
liOfChars.append(numbers)
if useSymbols == True:
liOfChars.append(symbols)
if useCapLetters == True:
liOfChars.append(capLetters)
if useSmlLetters == True:
liOfChars.append(smlLetters)
for char in range(lenOfPassword):
#choose characters
choosingList = liOfChars[(randrange(0 , (liOfChars.index(liOfChars[-1]) + 1)))]
choosingChar = choosingList[(randrange(0 , (choosingList.index(choosingList[-1]) + 1)))]
password += choosingChar
#write password in file.txt
with open("./file.csv" , "a") as csvFile:
csvReader = reader(csvFile)
csvWriter = writer(csvFile)
password = [nameOfPassword , password]
csvWriter.writerow(password)
def boolInput(prompt):
inputVal = input(prompt).capitalize().strip()
while inputVal != "True" and inputVal != "False":
print("Input is not True of False. please write with True of False")
inputVal = input(prompt).capitalize().strip()
if inputVal == "True":
inputVal = True
elif inputVal == "False":
inputVal = False
return inputVal
numOfPasswords = input("Please enter how many password you want: ")
while not numOfPasswords.isdigit() and not numOfPasswords > 0:
print("\n" + "Entered value is not natural number. please enter natural number")
numOfPasswords = input("Please enter how many password you want: ")
numOfPasswords = int(numOfPasswords)
for run in range(numOfPasswords):
useNumbers = boolInput("\n" + "Do you want to use numbers? True of False: ")
useSymbols = boolInput("Do you want to use symbols? True of False: ")
useCapLetters = boolInput("Do you want to use capital letters? True of False: ")
useSmlLetters = boolInput("Do you want to use small letters? True of False: ")
while useNumbers == False and useSymbols == False and useCapLetters == False and useSmlLetters == False:
print("\n" + "\n" + "All of options of password was False. please make one of them True. \n")
useNumbers = boolInput("Do you want to use numbers? True of False: ")
useSymbols = boolInput("Do you want to use symbols? True of False: ")
useCapLetters = boolInput("Do you want to use capital letters? True of False: ")
useSmlLetters = boolInput("Do you want to use small letters? True of False: ")
nameOfPassword = input("\n" + "What is name of this password? ")
while nameOfPassword == "":
nameOfPassword = input("You must choose name for password. please enter something: ")
lenOfPassword = int(input("\n" + "How much is length of password?(maximum is 4096) "))
while lenOfPassword > 4096 or lenOfPassword < 1:
print("\n" + "You entered number that was bigger than 4096 or lower than 1. please enter smaller or bigger number.(maximum is 4096)")
lenOfPassword = int(input("How much is length of password?(maximum is 4096) "))
genpass(useNumbers , useSymbols , useCapLetters , useSmlLetters , numbers , symbols , capLetters , lenOfPassword , nameOfPassword)
| 42.831579 | 141 | 0.632834 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,572 | 0.386336 |
877c8d0d7d7e4e7123818535f0c941d6e42050fa | 328 | py | Python | prefect/token_test.py | kvnkho/demos | c8c33993e00baf6a25d0ffdc44db924b327cbffa | [
"MIT"
] | 13 | 2021-05-13T23:07:17.000Z | 2022-03-19T00:00:41.000Z | prefect/token_test.py | kvnkho/demos | c8c33993e00baf6a25d0ffdc44db924b327cbffa | [
"MIT"
] | null | null | null | prefect/token_test.py | kvnkho/demos | c8c33993e00baf6a25d0ffdc44db924b327cbffa | [
"MIT"
] | 7 | 2021-06-16T18:16:55.000Z | 2022-03-21T03:34:43.000Z | from prefect import task, Flow, Parameter
from prefect.tasks.prefect import StartFlowRun
from prefect.storage import GitHub
with Flow("token-test") as flow:
StartFlowRun(project_name="testing", flow_name="flow_must_fail")()
flow.storage = GitHub(repo="kvnkho/demos", path="/prefect/token_test.py")
flow.register("testing") | 36.444444 | 73 | 0.780488 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 84 | 0.256098 |
877caa671ed5d222230ea90a669e72fef113794e | 792 | py | Python | setup.py | turbolent/spacy-thrift | 9b45caca91ae7ceb1e790c5467f994c6b2d9ecef | [
"MIT"
] | 5 | 2017-06-15T05:19:48.000Z | 2021-11-21T16:17:14.000Z | setup.py | turbolent/spacy-thrift | 9b45caca91ae7ceb1e790c5467f994c6b2d9ecef | [
"MIT"
] | null | null | null | setup.py | turbolent/spacy-thrift | 9b45caca91ae7ceb1e790c5467f994c6b2d9ecef | [
"MIT"
] | 1 | 2020-01-25T21:46:22.000Z | 2020-01-25T21:46:22.000Z | #!/usr/bin/env python
from setuptools import setup
with open('README.md') as f:
long_description = f.read()
setup(name='spacy-thrift',
version='0.5.0',
description='spaCy-as-a-service using Thrift',
long_description=long_description,
long_description_content_type="text/markdown",
keywords=["natural language processing", "nlp"],
url='https://github.com/turbolent/spacy-thrift',
author='Bastian Mueller',
author_email='bastian@turbolent.com',
classifiers=[
"License :: OSI Approved :: MIT License",
"Topic :: Text Processing",
],
packages=['spacyThrift'],
install_requires=[
"click==6.7",
"thrift==0.11.0",
"spacy==2.0.12",
"coloredlogs==10.0"
])
| 28.285714 | 54 | 0.602273 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 359 | 0.453283 |
877cfe2e99691a97f1e6e9c1d95453bf03ed6513 | 471 | py | Python | swexpert/d3/sw_2817_1.py | ruslanlvivsky/python-algorithm | 2b49bed33cd0e95b8a1e758008191f4392b3f667 | [
"MIT"
] | 3 | 2021-07-18T14:40:24.000Z | 2021-08-14T18:08:13.000Z | swexpert/d3/sw_2817_1.py | jinsuSang/python-algorithm | 524849a0a7e71034d329fef63c4f384930334177 | [
"MIT"
] | null | null | null | swexpert/d3/sw_2817_1.py | jinsuSang/python-algorithm | 524849a0a7e71034d329fef63c4f384930334177 | [
"MIT"
] | null | null | null | test_cases = int(input().strip())
def sum_sub_nums(idx, value):
global result
if value == K:
result += 1
return
if value > K or idx >= N:
return
sum_sub_nums(idx + 1, value)
sum_sub_nums(idx + 1, value + nums[idx])
for t in range(1, test_cases + 1):
N, K = map(int, input().strip().split())
nums = list(map(int, input().strip().split()))
result = 0
sum_sub_nums(0, 0)
print('#{} {}'.format(t, result))
| 21.409091 | 50 | 0.556263 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 8 | 0.016985 |
877d10e737235988632bc1400d9926fa9155da16 | 1,463 | py | Python | data_action/data_loader.py | namanshrimali/simplif-ai | d4178bd2d82dd651789904f4889c99f0929c8578 | [
"MIT"
] | null | null | null | data_action/data_loader.py | namanshrimali/simplif-ai | d4178bd2d82dd651789904f4889c99f0929c8578 | [
"MIT"
] | null | null | null | data_action/data_loader.py | namanshrimali/simplif-ai | d4178bd2d82dd651789904f4889c99f0929c8578 | [
"MIT"
] | null | null | null | from data_action.transformations import *
class Data_Loader:
def __init__(self, device, batch_size, dataset, mean, std, transform_type='pmda'):
self.device = device
self.batch_size = batch_size
self.transform_type = transform_type
self.dataset = dataset
self.kwargs = {'num_workers': 2, 'pin_memory': True} if device=="cuda" else {}
self.mean = mean
self.std = std
def load_training_data(self):
print(f'Loading training data. Dataset: {self.dataset}')
trainloader = None
if self.dataset == 'CIFAR10':
trainloader = torch.utils.data.DataLoader(
torchvision.datasets.CIFAR10(root='../data', train=True, download=True, transform=get_transforms(self.transform_type, self.mean, self.std)),
self.batch_size,
shuffle=True,
**self.kwargs)
print('Training data loaded\n')
return trainloader
def load_testing_data(self):
print('Loading testing data.')
testloader = None
if self.dataset == 'CIFAR10':
testloader = torch.utils.data.DataLoader(
torchvision.datasets.CIFAR10(root='../data', train=False, download=True, transform=get_transforms('pmda', self.mean, self.std)),
self.batch_size,
shuffle=False,
**self.kwargs)
print('Test data loaded\n')
return testloader
| 40.638889 | 157 | 0.607656 | 1,419 | 0.969925 | 0 | 0 | 0 | 0 | 0 | 0 | 195 | 0.133288 |
877ea49df20cc55a5d0735462644f88406735b14 | 291 | py | Python | src/dots/token.py | Mokin711/dots-python | 6bc0c98daa331302df9c9829a7579be6e1bd828c | [
"MIT"
] | 1 | 2021-06-14T18:43:53.000Z | 2021-06-14T18:43:53.000Z | src/dots/token.py | Mokin711/dots-python | 6bc0c98daa331302df9c9829a7579be6e1bd828c | [
"MIT"
] | 1 | 2021-11-15T21:33:27.000Z | 2021-11-16T19:22:34.000Z | src/dots/token.py | Mokin711/dots-python | 6bc0c98daa331302df9c9829a7579be6e1bd828c | [
"MIT"
] | 1 | 2022-02-09T19:39:15.000Z | 2022-02-09T19:39:15.000Z | import base64
import dots
def get_auth_token():
if dots.client_id == None or dots.api_key == None:
raise AssertionError('api_key and/or client_id not set')
token = base64.b64encode(bytes(dots.client_id + ':' + dots.api_key, 'utf-8')).decode('utf-8')
return token
| 24.25 | 97 | 0.670103 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 51 | 0.175258 |
877eab8db5158dc55512e44d21ca46730b5e208f | 946 | py | Python | setup.py | ctsit/lineman | d90e876d70fbc3d6ca18425d2748d70eb00ab485 | [
"Apache-2.0"
] | null | null | null | setup.py | ctsit/lineman | d90e876d70fbc3d6ca18425d2748d70eb00ab485 | [
"Apache-2.0"
] | 2 | 2017-05-23T18:45:01.000Z | 2017-09-26T17:02:34.000Z | setup.py | ctsit/lineman | d90e876d70fbc3d6ca18425d2748d70eb00ab485 | [
"Apache-2.0"
] | 3 | 2017-04-28T13:35:34.000Z | 2017-05-16T14:01:13.000Z | from setuptools import setup
#bring in __version__ from sourcecode
#per https://stackoverflow.com/a/17626524
#and https://stackoverflow.com/a/2073599
with open('lineman/version.py') as ver:
exec(ver.read())
setup(name='lineman',
version=__version__,
description='Lineman fixes data problems that will keep your data from going into redcap.',
url='http://github.com/ctsit/lineman',
author='Patrick White',
author_email='pfwhite9@gmail.com',
license='Apache License 2.0',
packages=['lineman'],
entry_points={
'console_scripts': [
'lineman = lineman.__main__:cli_run',
],
},
install_requires=['cappy==1.1.1',
'docopt==0.6.2',
'pyyaml==3.12',
'python-dateutil==2.6.1'],
dependency_links=["git+https://github.com/ctsit/cappy@1.1.1#egg=cappy-1.1.1"],
zip_safe=False)
| 32.62069 | 97 | 0.598309 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 500 | 0.528541 |
877f7ac996e50b2661470b15b06e9881cac05a69 | 2,266 | py | Python | main.py | a2824256/pyrealsense_collection | 75e931f1744138b10336a3a62eb569b352ef171d | [
"MIT"
] | null | null | null | main.py | a2824256/pyrealsense_collection | 75e931f1744138b10336a3a62eb569b352ef171d | [
"MIT"
] | null | null | null | main.py | a2824256/pyrealsense_collection | 75e931f1744138b10336a3a62eb569b352ef171d | [
"MIT"
] | null | null | null | import png
import pyrealsense2 as rs
import logging
logging.basicConfig(level=logging.INFO)
import numpy as np
import cv2
import os
def make_directories():
if not os.path.exists("JPEGImages/"):
os.makedirs("JPEGImages/")
if not os.path.exists("depth/"):
os.makedirs("depth/")
if not os.path.exists("8bit_depth/"):
os.makedirs("8bit_depth/")
if __name__ == "__main__":
make_directories()
pipeline = rs.pipeline()
config = rs.config()
config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)
config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)
profile = pipeline.start(config)
frames = pipeline.wait_for_frames()
color_frame = frames.get_color_frame()
intr = color_frame.profile.as_video_stream_profile().intrinsics
align_to = rs.stream.color
align = rs.align(align_to)
number = 0
while True:
filecad = "JPEGImages/%s.jpg" % number
filedepth = "depth/%s.png" % number
filedepth_8b = "8bit_depth/%s.png" % number
frames = pipeline.wait_for_frames()
aligned_frames = align.process(frames)
aligned_depth_frame = aligned_frames.get_depth_frame()
color_frame = aligned_frames.get_color_frame()
if not aligned_depth_frame or not color_frame:
continue
d = np.asanyarray(aligned_depth_frame.get_data())
d8 = cv2.convertScaleAbs(d, alpha=0.3)
pos = np.where(d8 == 0)
d8[pos] = 255
c = np.asanyarray(color_frame.get_data())
cv2.imshow('COLOR IMAGE', c)
if cv2.waitKey(1) & 0xFF == ord('q'):
cv2.imwrite(filecad, c)
writer16 = png.Writer(width=d.shape[1], height=d.shape[0],
bitdepth=16, greyscale=True)
writer8 = png.Writer(width=d.shape[1], height=d.shape[0],
bitdepth=8, greyscale=True)
with open(filedepth, 'wb') as f:
zgray2list = d.tolist()
writer16.write(f, zgray2list)
with open(filedepth_8b, 'wb') as f2:
zgray2list_b8 = d8.tolist()
writer8.write(f2, zgray2list_b8)
number += 1
cv2.destroyAllWindows()
| 30.621622 | 71 | 0.609003 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 154 | 0.067961 |
877f91e2ba9ede10de324960019c237eee7ed5b7 | 1,711 | py | Python | VoC/Librispeech_enhancement/use_model.py | dugudongfangshuo/DeepLearn | c9689abedd7f6de0efd4effffb204aa32a8e4ef3 | [
"Apache-2.0"
] | 1 | 2021-07-20T06:44:05.000Z | 2021-07-20T06:44:05.000Z | VoC/Librispeech_enhancement/use_model.py | dugudongfangshuo/DeepLearn | c9689abedd7f6de0efd4effffb204aa32a8e4ef3 | [
"Apache-2.0"
] | null | null | null | VoC/Librispeech_enhancement/use_model.py | dugudongfangshuo/DeepLearn | c9689abedd7f6de0efd4effffb204aa32a8e4ef3 | [
"Apache-2.0"
] | null | null | null | import torch
import speechbrain as sb
from hyperpyyaml import load_hyperpyyaml
hparams_file, overrides = 'train.yaml',''
PATH = './results/4234/save/CKPT+2021-04-17+16-05-06+00/model.ckpt'
# 加载超参数文件
with open(hparams_file) as fin:
hparams = load_hyperpyyaml(fin, overrides)
# 加载模型
model=hparams["model"]
model=model.eval()
state_dict = torch.load(PATH)
model.load_state_dict(state_dict)
# 输入一个音频文件
wav = ".\\data\\LibriSpeech\\test-clean\\1089\\134686\\1089-134686-0000.flac"
# 生成噪音文件
def generat_noisy(wav):
clean_sig = sb.dataio.dataio.read_audio(wav)
noisy_sig = hparams["env_corruption"](
clean_sig.unsqueeze(0), torch.ones(1)
).squeeze(0)
return noisy_sig
noisy_wav = generat_noisy(wav)
# 保存噪音文件
tmpfile = './noisy.wav'
sb.dataio.dataio.write_audio(tmpfile, noisy_wav, 16000)
# 计算特征值
def compute_feats(wavs):
"""Returns corresponding log-spectral features of the input waveforms.
Arguments
---------
wavs : torch.Tensor
The batch of waveforms to convert to log-spectral features.
"""
# Log-spectral features
feats = hparams['compute_STFT'](wavs)
feats = sb.processing.features.spectral_magnitude(feats, power=0.5)
# Log1p reduces the emphasis on small differences
feats = torch.log1p(feats)
return feats
noisy_wav = noisy_wav.unsqueeze(0)
inputdata = compute_feats(noisy_wav)
# 输入模型
with torch.no_grad():
output = model(inputdata)
# 转为音频
predict_spec = torch.mul(output, inputdata)
# 还原原始的音频信号
predict_wav =hparams['resynth'](
torch.expm1(predict_spec), noisy_wav
)
predict_wav = predict_wav.squeeze(0)
# 保存增强后的文件
tmpfile_au = './agument.wav'
sb.dataio.dataio.write_audio(tmpfile_au, predict_wav, 16000)
| 24.442857 | 77 | 0.725891 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 693 | 0.378069 |
877f98e93dce8cdcf07b8ca45bcea83be78b902e | 3,737 | py | Python | vit_jax/inference_time.py | fensence/Mixup-VT | 50eebaa04ffc974ecce4fac4a631d75a3f2361cc | [
"Apache-2.0"
] | 4,825 | 2020-10-21T12:35:18.000Z | 2022-03-31T13:35:21.000Z | vit_jax/inference_time.py | fensence/Mixup-VT | 50eebaa04ffc974ecce4fac4a631d75a3f2361cc | [
"Apache-2.0"
] | 154 | 2020-10-23T14:49:48.000Z | 2022-03-21T13:19:01.000Z | vit_jax/inference_time.py | fensence/Mixup-VT | 50eebaa04ffc974ecce4fac4a631d75a3f2361cc | [
"Apache-2.0"
] | 681 | 2020-10-23T01:34:20.000Z | 2022-03-30T05:51:57.000Z | # Copyright 2021 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import os
import time
from absl import logging
from clu import metric_writers
import flax
import flax.jax_utils as flax_utils
import jax
import jax.numpy as jnp
import ml_collections
import numpy as np
import tensorflow as tf
from vit_jax import checkpoint
from vit_jax import models
from vit_jax.configs import models as config_lib
def inference_time(config: ml_collections.ConfigDict, workdir: str):
"""Runs a number of steps and measures inference time."""
assert config.batch, f'Expected --config.batch={config.batch} > 0'
assert config.num_classes, (
f'Expected --config.num_classes={config.num_classes} > 0')
assert config.image_size, (
f'Expected --config.image_size={config.image_size} > 0')
# Build VisionTransformer architecture
model_config = config_lib.MODEL_CONFIGS[config.model_name]
model = models.VisionTransformer(
num_classes=config.num_classes, **model_config)
# Make sure initial model parameters (before replication) are on CPU only.
@functools.partial(jax.jit, backend='cpu')
def init(rng):
return model.init(
rng,
# Discard the "num_local_devices" dimension for initialization.
inputs=jnp.ones([1, config.image_size, config.image_size, 3],
jnp.float32),
train=False)
variables = init(jax.random.PRNGKey(0))
params_repl = flax_utils.replicate(variables['params'])
# pmap replicates the models over all TPUs/GPUs
vit_fn_repl = jax.pmap(functools.partial(model.apply, train=False))
images = jnp.ones([
jax.local_device_count(), config.batch // jax.local_device_count(),
config.image_size, config.image_size, 3
], jnp.float32)
writer = metric_writers.create_default_writer(workdir, asynchronous=False)
writer.write_hparams(config.to_dict())
logging.info('Starting training loop; initial compile can take a while...')
logits = vit_fn_repl(flax.core.FrozenDict(params=params_repl), images)
logits.block_until_ready()
logging.info('Done.')
logging.info('Going to run %d inferences WITHOUT measuring...',
config.initial_steps)
for _ in range(config.initial_steps):
logits = vit_fn_repl(flax.core.FrozenDict(params=params_repl), images)
logits.block_until_ready()
logging.info('Going to run %d inferences measuring...', config.steps)
times = []
for _ in range(config.initial_steps):
t0 = time.time()
logits = vit_fn_repl(flax.core.FrozenDict(params=params_repl), images)
logits.block_until_ready()
times.append(time.time() - t0)
logging.info('times=%s', times)
imgs_sec_core = config.batch / jax.local_device_count() / np.array(times)
logging.info('imgs_sec_core_min=%f', imgs_sec_core.min())
logging.info('imgs_sec_core_max=%f', imgs_sec_core.max())
logging.info('imgs_sec_core_mean=%f', imgs_sec_core.mean())
logging.info('imgs_sec_core_std=%f', imgs_sec_core.std())
writer.write_scalars(
0,
dict(
imgs_sec_core_min=imgs_sec_core.min(),
imgs_sec_core_max=imgs_sec_core.max(),
imgs_sec_core_mean=imgs_sec_core.mean(),
imgs_sec_core_std=imgs_sec_core.std(),
))
| 35.932692 | 77 | 0.730265 | 0 | 0 | 0 | 0 | 296 | 0.079208 | 0 | 0 | 1,268 | 0.33931 |
87823ea34fec0208210e9f22e5e30fc8a7e93695 | 52 | py | Python | vpos/exceptions.py | txiocoder/django-vpos | 7ee39c7b90675689b0b1b9c83767d48149da539a | [
"MIT"
] | 3 | 2021-11-18T16:31:05.000Z | 2022-01-21T13:41:58.000Z | vpos/exceptions.py | txiocoder/django-vpos | 7ee39c7b90675689b0b1b9c83767d48149da539a | [
"MIT"
] | 1 | 2021-11-22T19:11:38.000Z | 2021-11-22T20:21:29.000Z | vpos/exceptions.py | txiocoder/django-vpos | 7ee39c7b90675689b0b1b9c83767d48149da539a | [
"MIT"
] | null | null | null |
class VposConfigurationError(Exception):
pass
| 10.4 | 40 | 0.769231 | 49 | 0.942308 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
87829589d5f65d2ab2ae28e5f39a9cb66fefb9e4 | 629 | py | Python | PythonExercicios/ex039.py | lordvinick/Python | c03fd08d4c204104bf0196b0bd129427fd2067ae | [
"MIT"
] | null | null | null | PythonExercicios/ex039.py | lordvinick/Python | c03fd08d4c204104bf0196b0bd129427fd2067ae | [
"MIT"
] | null | null | null | PythonExercicios/ex039.py | lordvinick/Python | c03fd08d4c204104bf0196b0bd129427fd2067ae | [
"MIT"
] | null | null | null | print('\033[36m='*12, '\033[32mAlistamento Militar', '\033[36m='*12)
from datetime import date
ano = int(input('Em que ano você nasceu? '))
atual = date.today().year
idade = atual - ano
print(f'Você tem \033[32m{idade} \033[36manos em \033[32m{atual}.')
if idade == 18:
print('\033[36mTa na hora de se alistar.')
elif idade < 18:
print(f'\033[36mAinda falta(m) \033[32m{18 - idade} \033[36mano(s) para se alistar \nSeu alistamento será em \033[32m{ano+18}.')
elif idade > 18:
print(f'\033[31mVocê deveria ter se alistado há \033[32m{idade - 18} \033[31manos atrás.\n\033[36mSeu alistamento foi em \033[32m{ano+18}.')
| 48.384615 | 144 | 0.682035 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 432 | 0.680315 |
8783f86fdd42c8b014bb807d8e86e286c8b1658b | 2,548 | py | Python | code/stats/dim_reduction.py | wethepeopleonline/law-net | e8b01136360078c89b666e2b127672644ed0c54b | [
"MIT"
] | 17 | 2016-09-02T19:39:11.000Z | 2021-11-15T21:22:48.000Z | code/stats/dim_reduction.py | wethepeopleonline/law-net | e8b01136360078c89b666e2b127672644ed0c54b | [
"MIT"
] | 7 | 2016-09-04T17:19:13.000Z | 2017-01-19T19:17:10.000Z | code/stats/dim_reduction.py | idc9/law-net | e8b01136360078c89b666e2b127672644ed0c54b | [
"MIT"
] | 8 | 2017-01-19T04:24:09.000Z | 2021-09-13T20:22:58.000Z | import numpy as np
from scipy import stats
import pandas as pd
from sklearn.cross_decomposition import PLSRegression
def standardize_vector(v, center=True, scale=False):
if center:
v = v - np.mean(v)
if scale:
if np.std(v) == 0:
return v
else:
return (v + 0.0) / np.std(v)
def standardize_vec(v, center='mean', scale='std'):
""""
Standardizes a vector by centering and scaling it
This function will ignore scaling if the scale value is zero and will
instead set the scale value to 1
"""
# choose the center value
if not center:
cent_val = 0.0
elif center == 'mean':
cent_val = np.mean(v)
elif center == 'median':
cent_val = np.median(v)
elif type(center) in [float, int]:
cent_val = center
else:
raise ValueError('improper center value')
# choose the scale value
if not scale:
scale = 1.0
elif scale == 'max':
scale_val = max(v)
elif scale == 'std':
scale_val = np.std(v)
elif scale == 'mean':
scale_val = np.mean(v)
elif scale == 'median':
scale_val = np.median(v)
elif type(scale) in [float, int]:
scale_val = scale
else:
raise ValueError('improper scale value')
# don't scale if scale value is zero
if scale_val == 0:
scale_val = 1
return (v - cent_val + 0.0) / scale_val
def get_PCA(X, scale=False):
"""
Returns the PCA decomposition of data frame X.
Rows of X are observations and columns are features.
Centers columns then performs PCA.
Optionally scales columns by standard deviation
X = U D V^t
Output
------
U, D, V
"""
if type(X) == np.ndarray:
X = pd.DataFrame(X)
# center columns
X_stand = X.apply(lambda c: standardize_vector(c,
center=True, scale=scale))
# do SVD
return np.linalg.svd(X_stand, full_matrices=False)
def get_pls(X, Y, n_comp):
"""
returns the PLS scores
parameters
----------
X: pandas data frame
Y: list
"""
# center and scale both X and y data
x = np.array(X.apply(lambda c: standardize_vector(c, center=True,
scale=True)))
y = standardize_vector(Y, center=True, scale=True)
# compute PLS direcections
pls = PLSRegression(n_components=int(n_comp), scale=True)
pls.fit(x, y)
return np.array(pls.x_scores_), pls.x_loadings_
| 25.227723 | 77 | 0.582418 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 807 | 0.316719 |
8784142013ef93ac4ae61c954de934c0b7a1cd9b | 2,247 | py | Python | cloudmarker/test/test_azwebapphttp20event.py | TinLe/cloudmarker | 29698420457a86d5d8a0bac156bc98bd656198e1 | [
"MIT"
] | 208 | 2019-04-10T05:15:11.000Z | 2022-03-16T17:41:29.000Z | cloudmarker/test/test_azwebapphttp20event.py | TinLe/cloudmarker | 29698420457a86d5d8a0bac156bc98bd656198e1 | [
"MIT"
] | 88 | 2018-12-17T18:24:13.000Z | 2021-05-15T04:19:53.000Z | cloudmarker/test/test_azwebapphttp20event.py | TinLe/cloudmarker | 29698420457a86d5d8a0bac156bc98bd656198e1 | [
"MIT"
] | 15 | 2019-01-03T04:18:33.000Z | 2021-06-03T09:24:31.000Z | """Tests for AzWebAppHttp20Event plugin."""
import copy
import unittest
from cloudmarker.events import azwebapphttp20event
base_record = {
'ext': {
'record_type': 'web_app_config',
'cloud_type': 'azure',
'http20_enabled': True
},
'com': {
'cloud_type': 'azure'
}
}
class AzWebAppHttp20EventTest(unittest.TestCase):
"""Tests for AzWebAppHttp20Event plugin."""
def test_com_bucket_missing(self):
record = copy.deepcopy(base_record)
record['com'] = None
plugin = azwebapphttp20event.AzWebAppHttp20Event()
events = list(plugin.eval(record))
self.assertEqual(events, [])
def test_cloud_type_non_azure(self):
record = copy.deepcopy(base_record)
record['com']['cloud_type'] = 'non_azure'
plugin = azwebapphttp20event.AzWebAppHttp20Event()
events = list(plugin.eval(record))
self.assertEqual(events, [])
def test_ext_bucket_missing(self):
record = copy.deepcopy(base_record)
record['ext'] = None
plugin = azwebapphttp20event.AzWebAppHttp20Event()
events = list(plugin.eval(record))
self.assertEqual(events, [])
def test_record_type_non_web_app_config(self):
record = copy.deepcopy(base_record)
record['ext']['record_type'] = 'non_web_app_config'
plugin = azwebapphttp20event.AzWebAppHttp20Event()
events = list(plugin.eval(record))
self.assertEqual(events, [])
def test_http20_enabled(self):
record = copy.deepcopy(base_record)
record['ext']['http20_enabled'] = True
plugin = azwebapphttp20event.AzWebAppHttp20Event()
events = list(plugin.eval(record))
self.assertEqual(events, [])
def test_http20_disabled(self):
record = copy.deepcopy(base_record)
record['ext']['http20_enabled'] = False
plugin = azwebapphttp20event.AzWebAppHttp20Event()
events = list(plugin.eval(record))
self.assertEqual(len(events), 1)
self.assertEqual(events[0]['ext']['record_type'],
'web_app_http20_event')
self.assertEqual(events[0]['com']['record_type'],
'web_app_http20_event')
| 32.565217 | 59 | 0.640854 | 1,923 | 0.855808 | 0 | 0 | 0 | 0 | 0 | 0 | 377 | 0.167779 |
8784b5c8526896e5048632a3a82e5b228aabb640 | 31,270 | gyp | Python | build/pomdog.gyp | bis83/pomdog | 133a9262958d539ae6d93664e6cb2207b5b6c7ff | [
"MIT"
] | null | null | null | build/pomdog.gyp | bis83/pomdog | 133a9262958d539ae6d93664e6cb2207b5b6c7ff | [
"MIT"
] | null | null | null | build/pomdog.gyp | bis83/pomdog | 133a9262958d539ae6d93664e6cb2207b5b6c7ff | [
"MIT"
] | null | null | null | {
'includes': ['common.gypi'],
'conditions': [
['OS == "win"', {
'variables': {
'application_platform%': 'Win32',
'renderers%': ['Direct3D11', 'GL4'],
'audio%': 'XAudio2',
'input_devices%': ['DirectInput'],
},
}],
['OS == "mac"', {
'variables': {
'application_platform%': 'Cocoa',
'renderers%': ['GL4'],
'audio%': 'OpenAL',
'input_devices%': [],
},
}],
['OS == "ios"', {
'variables': {
'application_platform%': 'CocoaTouch',
'renderers%': ['Metal'],
'audio%': 'OpenAL',
'input_devices%': [],
},
}],
['OS == "linux" or OS == "freebsd" or OS == "openbsd"', {
'variables': {
'application_platform%': 'X11',
'renderers%': ['GL4'],
'audio%': 'OpenAL',
'input_devices%': [],
},
}],
],
'variables': {
'pomdog_third_party_dir%': '../third-party',
'pomdog_library_core_sources': [
'../include/Pomdog/Application/Duration.hpp',
'../include/Pomdog/Application/Game.hpp',
'../include/Pomdog/Application/GameClock.hpp',
'../include/Pomdog/Application/GameHost.hpp',
'../include/Pomdog/Application/GameWindow.hpp',
'../include/Pomdog/Application/MouseCursor.hpp',
'../include/Pomdog/Application/Timer.hpp',
'../include/Pomdog/Application/TimePoint.hpp',
# '../include/Pomdog/Async/Helpers.hpp',
# '../include/Pomdog/Async/ImmediateScheduler.hpp',
# '../include/Pomdog/Async/QueuedScheduler.hpp',
# '../include/Pomdog/Async/Scheduler.hpp',
# '../include/Pomdog/Async/Task.hpp',
'../include/Pomdog/Audio/AudioClip.hpp',
'../include/Pomdog/Audio/AudioChannels.hpp',
'../include/Pomdog/Audio/AudioEmitter.hpp',
'../include/Pomdog/Audio/AudioEngine.hpp',
'../include/Pomdog/Audio/AudioListener.hpp',
'../include/Pomdog/Audio/SoundEffect.hpp',
'../include/Pomdog/Audio/SoundState.hpp',
'../include/Pomdog/Audio/detail/ForwardDeclarations.hpp',
'../include/Pomdog/Basic/Export.hpp',
'../include/Pomdog/Basic/Platform.hpp',
'../include/Pomdog/Basic/Version.hpp',
'../include/Pomdog/Content/AssetManager.hpp',
'../include/Pomdog/Content/AssetBuilders/Builder.hpp',
'../include/Pomdog/Content/AssetBuilders/PipelineStateBuilder.hpp',
'../include/Pomdog/Content/AssetBuilders/ShaderBuilder.hpp',
'../include/Pomdog/Content/Utility/BinaryFileStream.hpp',
'../include/Pomdog/Content/Utility/BinaryReader.hpp',
'../include/Pomdog/Content/Utility/MakeFourCC.hpp',
'../include/Pomdog/Content/Utility/PathHelper.hpp',
'../include/Pomdog/Content/detail/AssetDictionary.hpp',
'../include/Pomdog/Content/detail/AssetLoaderContext.hpp',
'../include/Pomdog/Content/detail/AssetLoader.hpp',
'../include/Pomdog/Content/detail/AssetLoaders/AudioClipLoader.hpp',
'../include/Pomdog/Content/detail/AssetLoaders/Texture2DLoader.hpp',
'../include/Pomdog/Graphics/Blend.hpp',
'../include/Pomdog/Graphics/BlendDescription.hpp',
'../include/Pomdog/Graphics/BlendFunction.hpp',
'../include/Pomdog/Graphics/BufferUsage.hpp',
'../include/Pomdog/Graphics/ClearOptions.hpp',
'../include/Pomdog/Graphics/ComparisonFunction.hpp',
'../include/Pomdog/Graphics/ConstantBuffer.hpp',
'../include/Pomdog/Graphics/CullMode.hpp',
'../include/Pomdog/Graphics/DepthFormat.hpp',
'../include/Pomdog/Graphics/DepthStencilDescription.hpp',
'../include/Pomdog/Graphics/DepthStencilOperation.hpp',
'../include/Pomdog/Graphics/EffectAnnotation.hpp',
'../include/Pomdog/Graphics/EffectConstantDescription.hpp',
'../include/Pomdog/Graphics/EffectReflection.hpp',
'../include/Pomdog/Graphics/EffectVariableClass.hpp',
'../include/Pomdog/Graphics/EffectVariableType.hpp',
'../include/Pomdog/Graphics/EffectVariable.hpp',
'../include/Pomdog/Graphics/FillMode.hpp',
'../include/Pomdog/Graphics/GraphicsCommandList.hpp',
'../include/Pomdog/Graphics/GraphicsCommandQueue.hpp',
'../include/Pomdog/Graphics/GraphicsDevice.hpp',
'../include/Pomdog/Graphics/IndexBuffer.hpp',
'../include/Pomdog/Graphics/IndexElementSize.hpp',
'../include/Pomdog/Graphics/InputClassification.hpp',
'../include/Pomdog/Graphics/InputElement.hpp',
'../include/Pomdog/Graphics/InputElementFormat.hpp',
'../include/Pomdog/Graphics/InputLayoutDescription.hpp',
'../include/Pomdog/Graphics/InputLayoutHelper.hpp',
'../include/Pomdog/Graphics/PipelineState.hpp',
'../include/Pomdog/Graphics/PipelineStateDescription.hpp',
'../include/Pomdog/Graphics/PresentationParameters.hpp',
'../include/Pomdog/Graphics/PrimitiveTopology.hpp',
'../include/Pomdog/Graphics/RasterizerDescription.hpp',
'../include/Pomdog/Graphics/RenderTarget2D.hpp',
'../include/Pomdog/Graphics/RenderTargetBlendDescription.hpp',
'../include/Pomdog/Graphics/SamplerDescription.hpp',
'../include/Pomdog/Graphics/SamplerState.hpp',
'../include/Pomdog/Graphics/Shader.hpp',
'../include/Pomdog/Graphics/ShaderLanguage.hpp',
'../include/Pomdog/Graphics/ShaderPipelineStage.hpp',
'../include/Pomdog/Graphics/SurfaceFormat.hpp',
'../include/Pomdog/Graphics/StencilOperation.hpp',
'../include/Pomdog/Graphics/Texture.hpp',
'../include/Pomdog/Graphics/Texture2D.hpp',
'../include/Pomdog/Graphics/TextureAddressMode.hpp',
'../include/Pomdog/Graphics/TextureFilter.hpp',
'../include/Pomdog/Graphics/VertexBuffer.hpp',
'../include/Pomdog/Graphics/VertexBufferBinding.hpp',
'../include/Pomdog/Graphics/Viewport.hpp',
'../include/Pomdog/Graphics/detail/ForwardDeclarations.hpp',
'../include/Pomdog/Graphics/detail/EffectBinaryParameter.hpp',
'../include/Pomdog/Graphics/ShaderCompilers/GLSLCompiler.hpp',
'../include/Pomdog/Graphics/ShaderCompilers/HLSLCompiler.hpp',
'../include/Pomdog/Input/ButtonState.hpp',
'../include/Pomdog/Input/Gamepad.hpp',
'../include/Pomdog/Input/GamepadButtons.hpp',
'../include/Pomdog/Input/GamepadCapabilities.hpp',
'../include/Pomdog/Input/GamepadDPad.hpp',
'../include/Pomdog/Input/GamepadState.hpp',
'../include/Pomdog/Input/GamepadThumbSticks.hpp',
'../include/Pomdog/Input/GamepadType.hpp',
'../include/Pomdog/Input/Keyboard.hpp',
'../include/Pomdog/Input/KeyboardState.hpp',
'../include/Pomdog/Input/KeyState.hpp',
'../include/Pomdog/Input/Keys.hpp',
'../include/Pomdog/Input/Mouse.hpp',
'../include/Pomdog/Input/MouseState.hpp',
'../include/Pomdog/Input/PlayerIndex.hpp',
'../include/Pomdog/Input/TouchLocation.hpp',
'../include/Pomdog/Input/TouchLocationState.hpp',
'../include/Pomdog/Logging/Log.hpp',
'../include/Pomdog/Logging/LogChannel.hpp',
'../include/Pomdog/Logging/LogEntry.hpp',
'../include/Pomdog/Logging/LogLevel.hpp',
'../include/Pomdog/Logging/LogStream.hpp',
'../include/Pomdog/Math/BoundingBox.hpp',
'../include/Pomdog/Math/BoundingBox2D.hpp',
'../include/Pomdog/Math/BoundingCircle.hpp',
'../include/Pomdog/Math/BoundingSphere.hpp',
'../include/Pomdog/Math/Color.hpp',
'../include/Pomdog/Math/ContainmentType.hpp',
'../include/Pomdog/Math/Degree.hpp',
'../include/Pomdog/Math/MathHelper.hpp',
'../include/Pomdog/Math/Matrix2x2.hpp',
'../include/Pomdog/Math/Matrix3x2.hpp',
'../include/Pomdog/Math/Matrix3x3.hpp',
'../include/Pomdog/Math/Matrix4x4.hpp',
'../include/Pomdog/Math/Point2D.hpp',
'../include/Pomdog/Math/Point3D.hpp',
'../include/Pomdog/Math/Quaternion.hpp',
'../include/Pomdog/Math/Radian.hpp',
'../include/Pomdog/Math/Ray.hpp',
'../include/Pomdog/Math/Rectangle.hpp',
'../include/Pomdog/Math/Vector2.hpp',
'../include/Pomdog/Math/Vector3.hpp',
'../include/Pomdog/Math/Vector4.hpp',
'../include/Pomdog/Math/detail/Coordinate2D.hpp',
'../include/Pomdog/Math/detail/Coordinate2DImplementation.hpp',
'../include/Pomdog/Math/detail/Coordinate3D.hpp',
'../include/Pomdog/Math/detail/Coordinate3DImplementation.hpp',
'../include/Pomdog/Math/detail/FloatingPointMatrix2x2.hpp',
'../include/Pomdog/Math/detail/FloatingPointMatrix3x2.hpp',
'../include/Pomdog/Math/detail/FloatingPointMatrix3x3.hpp',
'../include/Pomdog/Math/detail/FloatingPointMatrix4x4.hpp',
'../include/Pomdog/Math/detail/FloatingPointQuaternion.hpp',
'../include/Pomdog/Math/detail/FloatingPointVector2.hpp',
'../include/Pomdog/Math/detail/FloatingPointVector3.hpp',
'../include/Pomdog/Math/detail/FloatingPointVector4.hpp',
'../include/Pomdog/Math/detail/ForwardDeclarations.hpp',
'../include/Pomdog/Math/detail/TaggedArithmetic.hpp',
'../include/Pomdog/Signals/Connection.hpp',
'../include/Pomdog/Signals/ConnectionList.hpp',
'../include/Pomdog/Signals/Event.hpp',
'../include/Pomdog/Signals/EventQueue.hpp',
'../include/Pomdog/Signals/Helpers.hpp',
'../include/Pomdog/Signals/ScopedConnection.hpp',
'../include/Pomdog/Signals/Signal.hpp',
'../include/Pomdog/Signals/detail/EventBody.hpp',
'../include/Pomdog/Signals/detail/ForwardDeclarations.hpp',
'../include/Pomdog/Signals/detail/SignalBody.hpp',
'../include/Pomdog/Utility/Any.hpp',
'../include/Pomdog/Utility/Assert.hpp',
'../include/Pomdog/Utility/Exception.hpp',
'../include/Pomdog/Utility/Optional.hpp',
'../include/Pomdog/Utility/StringHelper.hpp',
'../include/Pomdog/Utility/detail/CRC32.hpp',
'../include/Pomdog/Utility/detail/FileSystem.hpp',
'../include/Pomdog/Utility/detail/Tagged.hpp',
'../src/Application/GameClock.cpp',
'../src/Application/SubsystemScheduler.hpp',
'../src/Application/SystemEvents.hpp',
'../src/Application/Timer.cpp',
'../src/Application/TimeSource.hpp',
# '../src/Async/ImmediateScheduler.cpp',
# '../src/Async/QueuedScheduler.cpp',
# '../src/Async/Task.cpp',
'../src/Audio/AudioClip.cpp',
'../src/Audio/AudioEngine.cpp',
'../src/Audio/SoundEffect.cpp',
'../src/Content/AssetDictionary.cpp',
'../src/Content/AssetLoaderContext.cpp',
'../src/Content/AssetManager.cpp',
'../src/Content/AssetBuilders/PipelineStateBuilder.cpp',
'../src/Content/AssetBuilders/ShaderBuilder.cpp',
'../src/Content/AssetLoaders/AudioClipLoader.cpp',
'../src/Content/AssetLoaders/Texture2DLoader.cpp',
'../src/Content/Utility/DDSTextureReader.cpp',
'../src/Content/Utility/DDSTextureReader.hpp',
'../src/Content/Utility/MSWaveAudioLoader.cpp',
'../src/Content/Utility/MSWaveAudioLoader.hpp',
'../src/Content/Utility/PNGTextureReader.cpp',
'../src/Content/Utility/PNGTextureReader.hpp',
'../src/Graphics/ClearOptions.cpp',
'../src/Graphics/ConstantBuffer.cpp',
'../src/Graphics/EffectBinaryParameter.cpp',
'../src/Graphics/EffectReflection.cpp',
'../src/Graphics/GraphicsCommandList.cpp',
'../src/Graphics/GraphicsCommandQueue.cpp',
'../src/Graphics/GraphicsDevice.cpp',
'../src/Graphics/IndexBuffer.cpp',
'../src/Graphics/InputLayoutHelper.cpp',
'../src/Graphics/PipelineState.cpp',
'../src/Graphics/RenderTarget2D.cpp',
'../src/Graphics/SamplerState.cpp',
'../src/Graphics/Texture2D.cpp',
'../src/Graphics/Viewport.cpp',
'../src/Graphics/VertexBuffer.cpp',
'../src/Graphics/ShaderCompilers/GLSLCompiler.cpp',
'../src/Graphics/ShaderCompilers/HLSLCompiler.cpp',
'../src/Input/KeyboardState.cpp',
'../src/InputSystem/InputDeviceCreator.hpp',
'../src/InputSystem/InputDeviceFactory.cpp',
'../src/InputSystem/InputDeviceFactory.hpp',
'../src/Logging/Log.cpp',
'../src/Logging/LogChannel.cpp',
'../src/Logging/LogStream.cpp',
'../src/Math/BoundingBox.cpp',
'../src/Math/BoundingBox2D.cpp',
'../src/Math/BoundingCircle.cpp',
'../src/Math/BoundingSphere.cpp',
'../src/Math/Color.cpp',
'../src/Math/MathHelper.cpp',
'../src/Math/Ray.cpp',
'../src/Math/Rectangle.cpp',
'../src/Math/detail/FloatingPointMatrix2x2.cpp',
'../src/Math/detail/FloatingPointMatrix3x2.cpp',
'../src/Math/detail/FloatingPointMatrix3x3.cpp',
'../src/Math/detail/FloatingPointMatrix4x4.cpp',
'../src/Math/detail/FloatingPointQuaternion.cpp',
'../src/Math/detail/FloatingPointVector2.cpp',
'../src/Math/detail/FloatingPointVector3.cpp',
'../src/Math/detail/FloatingPointVector4.cpp',
'../src/RenderSystem/GraphicsCapabilities.hpp',
'../src/RenderSystem/GraphicsCommandListImmediate.cpp',
'../src/RenderSystem/GraphicsCommandListImmediate.hpp',
'../src/RenderSystem/GraphicsCommandQueueImmediate.cpp',
'../src/RenderSystem/GraphicsCommandQueueImmediate.hpp',
'../src/RenderSystem/GraphicsContext.cpp',
'../src/RenderSystem/GraphicsContext.hpp',
'../src/RenderSystem/NativeBuffer.hpp',
'../src/RenderSystem/NativeEffectReflection.hpp',
'../src/RenderSystem/NativeGraphicsCommandList.hpp',
'../src/RenderSystem/NativeGraphicsCommandQueue.hpp',
'../src/RenderSystem/NativeGraphicsContext.hpp',
'../src/RenderSystem/NativeGraphicsDevice.hpp',
'../src/RenderSystem/NativePipelineState.hpp',
'../src/RenderSystem/NativeRenderTarget2D.hpp',
'../src/RenderSystem/NativeSamplerState.hpp',
'../src/RenderSystem/NativeTexture2D.hpp',
'../src/RenderSystem/ShaderBytecode.hpp',
'../src/RenderSystem/ShaderCompileOptions.hpp',
'../src/RenderSystem/SurfaceFormatHelper.cpp',
'../src/RenderSystem/SurfaceFormatHelper.hpp',
'../src/RenderSystem/TextureHelper.cpp',
'../src/RenderSystem/TextureHelper.hpp',
'../src/Signals/Connection.cpp',
'../src/Signals/ConnectionList.cpp',
'../src/Signals/EventQueue.cpp',
'../src/Signals/ScopedConnection.cpp',
'../src/Utility/CRC32.cpp',
'../src/Utility/Noncopyable.hpp',
'../src/Utility/PathHelper.cpp',
'../src/Utility/ScopeGuard.hpp',
'../src/Utility/StringHelper.cpp',
],
'pomdog_library_opengl4_sources': [
'../src/RenderSystem.GL4/BlendStateGL4.cpp',
'../src/RenderSystem.GL4/BlendStateGL4.hpp',
'../src/RenderSystem.GL4/BufferGL4.cpp',
'../src/RenderSystem.GL4/BufferGL4.hpp',
'../src/RenderSystem.GL4/DepthStencilStateGL4.cpp',
'../src/RenderSystem.GL4/DepthStencilStateGL4.hpp',
'../src/RenderSystem.GL4/EffectReflectionGL4.cpp',
'../src/RenderSystem.GL4/EffectReflectionGL4.hpp',
'../src/RenderSystem.GL4/ErrorChecker.cpp',
'../src/RenderSystem.GL4/ErrorChecker.hpp',
'../src/RenderSystem.GL4/GraphicsContextGL4.cpp',
'../src/RenderSystem.GL4/GraphicsContextGL4.hpp',
'../src/RenderSystem.GL4/GraphicsDeviceGL4.cpp',
'../src/RenderSystem.GL4/GraphicsDeviceGL4.hpp',
'../src/RenderSystem.GL4/InputLayoutGL4.cpp',
'../src/RenderSystem.GL4/InputLayoutGL4.hpp',
'../src/RenderSystem.GL4/OpenGLContext.hpp',
'../src/RenderSystem.GL4/OpenGLPrerequisites.hpp',
'../src/RenderSystem.GL4/PipelineStateGL4.cpp',
'../src/RenderSystem.GL4/PipelineStateGL4.hpp',
'../src/RenderSystem.GL4/RasterizerStateGL4.cpp',
'../src/RenderSystem.GL4/RasterizerStateGL4.hpp',
'../src/RenderSystem.GL4/RenderTarget2DGL4.cpp',
'../src/RenderSystem.GL4/RenderTarget2DGL4.hpp',
'../src/RenderSystem.GL4/SamplerStateGL4.cpp',
'../src/RenderSystem.GL4/SamplerStateGL4.hpp',
'../src/RenderSystem.GL4/ShaderGL4.cpp',
'../src/RenderSystem.GL4/ShaderGL4.hpp',
'../src/RenderSystem.GL4/Texture2DGL4.cpp',
'../src/RenderSystem.GL4/Texture2DGL4.hpp',
'../src/RenderSystem.GL4/TypesafeGL4.hpp',
'../src/RenderSystem.GL4/TypesafeHelperGL4.hpp',
],
'pomdog_library_openal_sources': [
'../src/SoundSystem.OpenAL/AudioClipAL.cpp',
'../src/SoundSystem.OpenAL/AudioClipAL.hpp',
'../src/SoundSystem.OpenAL/AudioEngineAL.cpp',
'../src/SoundSystem.OpenAL/AudioEngineAL.hpp',
'../src/SoundSystem.OpenAL/ContextOpenAL.cpp',
'../src/SoundSystem.OpenAL/ContextOpenAL.hpp',
'../src/SoundSystem.OpenAL/ErrorCheckerAL.cpp',
'../src/SoundSystem.OpenAL/ErrorCheckerAL.hpp',
'../src/SoundSystem.OpenAL/PrerequisitesOpenAL.hpp',
'../src/SoundSystem.OpenAL/SoundEffectAL.cpp',
'../src/SoundSystem.OpenAL/SoundEffectAL.hpp',
],
'pomdog_library_apple_sources': [
'../src/Platform.Apple/FileSystemApple.mm',
'../src/Platform.Apple/TimeSourceApple.cpp',
'../src/Platform.Apple/TimeSourceApple.hpp',
],
'pomdog_library_cocoa_sources': [
'../include/Pomdog/Platform/Cocoa/Bootstrap.hpp',
'../include/Pomdog/Platform/Cocoa/PomdogOpenGLView.hpp',
'../src/Platform.Cocoa/Bootstrap.mm',
'../src/Platform.Cocoa/CocoaWindowDelegate.hpp',
'../src/Platform.Cocoa/CocoaWindowDelegate.mm',
'../src/Platform.Cocoa/GameHostCocoa.hpp',
'../src/Platform.Cocoa/GameHostCocoa.mm',
'../src/Platform.Cocoa/GameWindowCocoa.hpp',
'../src/Platform.Cocoa/GameWindowCocoa.mm',
'../src/Platform.Cocoa/KeyboardCocoa.hpp',
'../src/Platform.Cocoa/KeyboardCocoa.cpp',
'../src/Platform.Cocoa/MouseCocoa.hpp',
'../src/Platform.Cocoa/MouseCocoa.cpp',
'../src/Platform.Cocoa/OpenGLContextCocoa.hpp',
'../src/Platform.Cocoa/OpenGLContextCocoa.mm',
'../src/Platform.Cocoa/PomdogOpenGLView.mm',
],
'pomdog_library_dxgi_sources': [
'../src/RenderSystem.DXGI/DXGIFormatHelper.cpp',
'../src/RenderSystem.DXGI/DXGIFormatHelper.hpp',
],
'pomdog_library_direct3d_sources': [
'../src/RenderSystem.Direct3D/HLSLCompiling.cpp',
'../src/RenderSystem.Direct3D/HLSLCompiling.hpp',
'../src/RenderSystem.Direct3D/HLSLReflectionHelper.cpp',
'../src/RenderSystem.Direct3D/HLSLReflectionHelper.hpp',
'../src/RenderSystem.Direct3D/PrerequisitesDirect3D.hpp',
],
'pomdog_library_direct3d11_sources': [
'../src/RenderSystem.Direct3D11/BufferDirect3D11.cpp',
'../src/RenderSystem.Direct3D11/BufferDirect3D11.hpp',
'../src/RenderSystem.Direct3D11/EffectReflectionDirect3D11.cpp',
'../src/RenderSystem.Direct3D11/EffectReflectionDirect3D11.hpp',
'../src/RenderSystem.Direct3D11/GraphicsContextDirect3D11.cpp',
'../src/RenderSystem.Direct3D11/GraphicsContextDirect3D11.hpp',
'../src/RenderSystem.Direct3D11/GraphicsDeviceDirect3D11.cpp',
'../src/RenderSystem.Direct3D11/GraphicsDeviceDirect3D11.hpp',
'../src/RenderSystem.Direct3D11/PipelineStateDirect3D11.cpp',
'../src/RenderSystem.Direct3D11/PipelineStateDirect3D11.hpp',
'../src/RenderSystem.Direct3D11/PrerequisitesDirect3D11.hpp',
'../src/RenderSystem.Direct3D11/RenderTarget2DDirect3D11.cpp',
'../src/RenderSystem.Direct3D11/RenderTarget2DDirect3D11.hpp',
'../src/RenderSystem.Direct3D11/SamplerStateDirect3D11.cpp',
'../src/RenderSystem.Direct3D11/SamplerStateDirect3D11.hpp',
'../src/RenderSystem.Direct3D11/ShaderDirect3D11.cpp',
'../src/RenderSystem.Direct3D11/ShaderDirect3D11.hpp',
'../src/RenderSystem.Direct3D11/Texture2DDirect3D11.cpp',
'../src/RenderSystem.Direct3D11/Texture2DDirect3D11.hpp',
],
'pomdog_library_xaudio2_sources': [
'../src/SoundSystem.XAudio2/AudioClipXAudio2.cpp',
'../src/SoundSystem.XAudio2/AudioClipXAudio2.hpp',
'../src/SoundSystem.XAudio2/AudioEngineXAudio2.cpp',
'../src/SoundSystem.XAudio2/AudioEngineXAudio2.hpp',
'../src/SoundSystem.XAudio2/PrerequisitesXAudio2.hpp',
'../src/SoundSystem.XAudio2/SoundEffectXAudio2.cpp',
'../src/SoundSystem.XAudio2/SoundEffectXAudio2.hpp',
],
'pomdog_library_directinput_sources': [
'../src/InputSystem.DirectInput/DeviceContextDirectInput.cpp',
'../src/InputSystem.DirectInput/DeviceContextDirectInput.hpp',
'../src/InputSystem.DirectInput/PrerequisitesDirectInput.hpp',
],
'pomdog_library_win32_sources': [
'../include/Pomdog/Platform/Win32/Bootstrap.hpp',
'../include/Pomdog/Platform/Win32/BootstrapSettingsWin32.hpp',
'../include/Pomdog/Platform/Win32/PrerequisitesWin32.hpp',
'../src/Platform.Win32/Bootstrap.cpp',
'../src/Platform.Win32/GameHostWin32.cpp',
'../src/Platform.Win32/GameHostWin32.hpp',
'../src/Platform.Win32/GameWindowWin32.cpp',
'../src/Platform.Win32/GameWindowWin32.hpp',
'../src/Platform.Win32/FileSystemWin32.cpp',
'../src/Platform.Win32/KeyboardWin32.cpp',
'../src/Platform.Win32/KeyboardWin32.hpp',
'../src/Platform.Win32/MouseWin32.cpp',
'../src/Platform.Win32/MouseWin32.hpp',
'../src/Platform.Win32/TimeSourceWin32.cpp',
'../src/Platform.Win32/TimeSourceWin32.hpp',
],
'pomdog_library_win32_opengl_sources': [
'../src/Platform.Win32/OpenGLContextWin32.cpp',
'../src/Platform.Win32/OpenGLContextWin32.hpp',
],
'pomdog_library_x11_sources': [
'../include/Pomdog/Platform/X11/Bootstrap.hpp',
'../src/Platform.X11/Bootstrap.cpp',
'../src/Platform.X11/GameHostX11.cpp',
'../src/Platform.X11/GameHostX11.hpp',
'../src/Platform.X11/GameWindowX11.cpp',
'../src/Platform.X11/GameWindowX11.hpp',
'../src/Platform.X11/KeyboardX11.cpp',
'../src/Platform.X11/KeyboardX11.hpp',
'../src/Platform.X11/MouseX11.cpp',
'../src/Platform.X11/MouseX11.hpp',
'../src/Platform.X11/OpenGLContextX11.cpp',
'../src/Platform.X11/OpenGLContextX11.hpp',
'../src/Platform.X11/X11AtomCache.hpp',
'../src/Platform.X11/X11Context.cpp',
'../src/Platform.X11/X11Context.hpp',
],
'pomdog_library_linux_sources': [
'../src/Platform.Linux/FileSystemLinux.cpp',
'../src/Platform.Linux/TimeSourceLinux.cpp',
'../src/Platform.Linux/TimeSourceLinux.hpp',
],
},
'target_defaults': {
'dependencies': [
'<@(pomdog_third_party_dir)/libpng/libpng.gyp:libpng_static',
],
'include_dirs': [
'../include',
'<@(pomdog_third_party_dir)/libpng',
],
'sources': [
'<@(pomdog_library_core_sources)',
'../include/Pomdog/Pomdog.hpp',
],
'msbuild_settings': {
'ClCompile': {
'WarningLevel': 'Level4', # /W4
'TreatWarningAsError': 'true', # /WX
},
},
'xcode_settings': {
'GCC_VERSION': 'com.apple.compilers.llvm.clang.1_0',
'CLANG_CXX_LANGUAGE_STANDARD': 'c++14',
'CLANG_CXX_LIBRARY': 'libc++',
# Warnings (Clang):
'CLANG_WARN_BOOL_CONVERSION': 'YES',
'CLANG_WARN_CONSTANT_CONVERSION': 'YES',
'CLANG_WARN_EMPTY_BODY': 'YES',
'CLANG_WARN_ENUM_CONVERSION': 'YES',
'CLANG_WARN_INT_CONVERSION': 'YES',
'CLANG_WARN_UNREACHABLE_CODE': 'YES',
# Warnings (GCC and Clang):
'GCC_WARN_64_TO_32_BIT_CONVERSION': 'YES',
'GCC_WARN_ABOUT_DEPRECATED_FUNCTIONS': 'YES',
'GCC_WARN_ABOUT_MISSING_FIELD_INITIALIZERS': 'YES',
'GCC_WARN_ABOUT_MISSING_NEWLINE': 'YES',
'GCC_WARN_ABOUT_RETURN_TYPE': 'YES_ERROR',
'GCC_WARN_CHECK_SWITCH_STATEMENTS': 'YES',
'GCC_WARN_HIDDEN_VIRTUAL_FUNCTIONS': 'YES',
'GCC_WARN_MISSING_PARENTHESES': 'YES',
'GCC_WARN_NON_VIRTUAL_DESTRUCTOR': 'YES',
'GCC_WARN_SHADOW': 'YES',
'GCC_WARN_SIGN_COMPARE': 'YES',
'GCC_WARN_TYPECHECK_CALLS_TO_PRINTF': 'YES',
'GCC_WARN_UNINITIALIZED_AUTOS': 'YES_AGGRESSIVE',
'GCC_WARN_UNKNOWN_PRAGMAS': 'YES',
'GCC_WARN_UNUSED_FUNCTION': 'YES',
'GCC_WARN_UNUSED_LABEL': 'YES',
'GCC_WARN_UNUSED_VALUE': 'YES',
'GCC_WARN_UNUSED_VARIABLE': 'YES',
# Warnings - Objective-C:
'CLANG_WARN_DIRECT_OBJC_ISA_USAGE': 'YES_ERROR',
'CLANG_WARN__DUPLICATE_METHOD_MATCH': 'YES',
'GCC_WARN_ALLOW_INCOMPLETE_PROTOCOL': 'YES',
'GCC_WARN_UNDECLARED_SELECTOR': 'YES',
'CLANG_WARN_OBJC_ROOT_CLASS': 'YES_ERROR',
# Warning Policies:
'GCC_TREAT_WARNINGS_AS_ERRORS': 'YES',
'WARNING_CFLAGS': [
'-Wall',
],
# Symbols:
'CLANG_ENABLE_OBJC_ARC': 'YES',
'GCC_INLINES_ARE_PRIVATE_EXTERN': 'YES', # '-fvisibility-inlines-hidden'
'GCC_SYMBOLS_PRIVATE_EXTERN': 'YES', # '-fvisibility=hidden'
},
'conditions': [
['"Direct3D11" in renderers', {
'defines': ['POMDOG_ENABLE_DIRECT3D11'],
'sources': [
'<@(pomdog_library_dxgi_sources)',
'<@(pomdog_library_direct3d_sources)',
'<@(pomdog_library_direct3d11_sources)',
],
'link_settings': {
'libraries': [
'-ldxgi.lib',
'-ld3d11.lib',
'-ld3dcompiler.lib',
'-ldxguid.lib', # using _IID_ID3D11ShaderReflection
],
},
}],
['"GL4" in renderers', {
'defines': ['POMDOG_ENABLE_GL4'],
'sources': [
'<@(pomdog_library_opengl4_sources)',
],
}],
['"GL4" in renderers and OS == "win"', {
'sources': [
'<@(pomdog_library_win32_opengl_sources)',
],
'defines': [
'GLEW_STATIC',
],
'dependencies': [
'<@(pomdog_third_party_dir)/glew/glew.gyp:glew_static',
],
'include_dirs': [
'<@(pomdog_third_party_dir)/glew/include',
],
'link_settings': {
'libraries': [
'-lopengl32.lib',
],
},
}],
['"Metal" in renderers and OS == "mac"', {
'link_settings': {
'libraries': [
'$(SDKROOT)/System/Library/Frameworks/Metal.framework',
],
},
}],
['"Metal" in renderers and OS == "ios"', {
'link_settings': {
'libraries': [
'$(SDKROOT)/System/Library/Frameworks/Metal.framework',
'$(SDKROOT)/System/Library/Frameworks/MetalKit.framework',
'$(SDKROOT)/System/Library/Frameworks/ModelIO.framework',
],
},
}],
['audio == "OpenAL"', {
'sources': [
'<@(pomdog_library_openal_sources)',
],
}],
['audio == "OpenAL" and OS == "mac"', {
'link_settings': {
'libraries': [
'$(SDKROOT)/System/Library/Frameworks/AudioToolBox.framework',
'$(SDKROOT)/System/Library/Frameworks/OpenAL.framework',
],
},
}],
['audio == "OpenAL" and OS == "ios"', {
'link_settings': {
'libraries': [
'$(SDKROOT)/System/Library/Frameworks/AudioToolBox.framework',
'$(SDKROOT)/System/Library/Frameworks/OpenAL.framework',
],
},
}],
['audio == "OpenAL" and OS == "linux"', {
'link_settings': {
'libraries': [
'-lopenal',
],
},
}],
['audio == "XAudio2"', {
'sources': [
'<@(pomdog_library_xaudio2_sources)',
],
'link_settings': {
'libraries': [
'-lxaudio2.lib',
],
},
}], # audio == "XAudio2"
['"DirectInput" in input_devices', {
'sources': [
'<@(pomdog_library_directinput_sources)',
],
'link_settings': {
'libraries': [
'-ldinput8.lib',
'-ldxguid.lib',
],
},
}],
['application_platform == "X11"', {
'sources': [
'<@(pomdog_library_x11_sources)',
],
}],
['application_platform == "X11" and OS == "linux"', {
'defines': [
'GLEW_STATIC',
],
'dependencies': [
'<@(pomdog_third_party_dir)/glew/glew.gyp:glew_static',
],
'include_dirs': [
'/usr/X11R6/include',
'<@(pomdog_third_party_dir)/glew/include',
],
'library_dirs': [
'/usr/X11R6/lib',
],
'link_settings': {
'libraries': [
'-lX11',
'-lGL',
],
},
}],
['application_platform == "Cocoa"', {
'sources': [
'<@(pomdog_library_cocoa_sources)',
],
'link_settings': {
'libraries': [
'$(SDKROOT)/System/Library/Frameworks/Cocoa.framework',
'$(SDKROOT)/System/Library/Frameworks/OpenGL.framework',
'$(SDKROOT)/System/Library/Frameworks/QuartzCore.framework',
],
},
}],
['OS == "mac" or OS == "ios"', {
'sources': [
'<@(pomdog_library_apple_sources)',
],
}],
['OS == "mac"', {
'xcode_settings': {
'MACOSX_DEPLOYMENT_TARGET': '10.9',
},
}],
['OS == "ios"', {
'xcode_settings': {
'IPHONEOS_DEPLOYMENT_TARGET': '9.0',
'SDKROOT': 'iphoneos',
},
'link_settings': {
'libraries': [
'$(SDKROOT)/System/Library/Frameworks/Foundation.framework',
'$(SDKROOT)/System/Library/Frameworks/UIKit.framework',
],
},
}],
['OS == "win"', {
'sources': [
'<@(pomdog_library_win32_sources)',
],
'link_settings': {
'libraries': [
'-lkernel32.lib',
'-luser32.lib',
'-lgdi32.lib',
'-lole32.lib',
'-lwinmm.lib',
#'-lws2_32.lib',
#'-lwinspool.lib',
#'-lcomdlg32.lib',
#'-ladvapi32.lib',
'-lshell32.lib',
#'-loleaut32.lib',
#'-luuid.lib',
#'-lodbc32.lib',
#'-lodbccp32.lib',
],
},
}], # OS == "win"
['OS == "linux"', {
'sources': [
'<@(pomdog_library_linux_sources)',
],
}],
],
},
'targets': [
{
'target_name': 'pomdog-static',
'product_name': 'pomdog',
'type': 'static_library',
'xcode_settings': {
'SKIP_INSTALL': 'YES',
},
},
{
'target_name': 'pomdog-shared',
'product_name': 'Pomdog',
'type': 'shared_library',
'msvs_guid': 'A8F27BAE-660F-42B4-BC27-D5A435EF94BF',
'mac_bundle': 1,
'defines': ['POMDOG_BUILDING_LIBRARY_EXPORTS=1'],
'xcode_settings': {
'PRODUCT_NAME': 'Pomdog', #'$(TARGET_NAME)',
'PRODUCT_BUNDLE_IDENTIFIER': 'net.enginetrouble.pomdog',
'INFOPLIST_FILE': '../src/Platform.Apple/Info.plist',
'INSTALL_PATH': '$(LOCAL_LIBRARY_DIR)/Frameworks',
'SKIP_INSTALL': 'YES',
#'DEFINES_MODULE': 'YES',
'DYLIB_INSTALL_NAME_BASE': '@rpath',
'LD_RUNPATH_SEARCH_PATHS': [
'$(inherited)',
'@executable_path/../Frameworks',
'@loader_path/Frameworks',
],
},
},
],
}
| 41.362434 | 78 | 0.620979 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 24,229 | 0.774832 |
8786ee9f99a430b3faffc52d450268f38a85e338 | 368 | py | Python | pythonforandroid/recipes/android/__init__.py | inclement/p4a-experiment | 4e120e08cc3c33af89948307628c8b28fdf76b87 | [
"MIT"
] | 1 | 2015-06-09T21:12:09.000Z | 2015-06-09T21:12:09.000Z | pythonforandroid/recipes/android/__init__.py | inclement/p4a-experiment | 4e120e08cc3c33af89948307628c8b28fdf76b87 | [
"MIT"
] | null | null | null | pythonforandroid/recipes/android/__init__.py | inclement/p4a-experiment | 4e120e08cc3c33af89948307628c8b28fdf76b87 | [
"MIT"
] | null | null | null |
from pythonforandroid.toolchain import CythonRecipe, shprint, ensure_dir, current_directory, ArchAndroid, IncludedFilesBehaviour
import sh
from os.path import exists, join
class AndroidRecipe(IncludedFilesBehaviour, CythonRecipe):
# name = 'android'
version = None
url = None
depends = ['pygame']
src_filename = 'src'
recipe = AndroidRecipe()
| 23 | 128 | 0.75 | 165 | 0.44837 | 0 | 0 | 0 | 0 | 0 | 0 | 31 | 0.084239 |
878807ddada6f3595dcfacaf864041d2dd562b4d | 2,811 | py | Python | sportcred/backend/resources/userComments.py | aviral25/SportCredAPP | 68c8a81d316b6d949ec065fd9ee8d2d0845b031c | [
"MIT"
] | null | null | null | sportcred/backend/resources/userComments.py | aviral25/SportCredAPP | 68c8a81d316b6d949ec065fd9ee8d2d0845b031c | [
"MIT"
] | null | null | null | sportcred/backend/resources/userComments.py | aviral25/SportCredAPP | 68c8a81d316b6d949ec065fd9ee8d2d0845b031c | [
"MIT"
] | null | null | null | from flask_restful import Resource
from flask import request, Blueprint, jsonify
from models import db, Post, Comment
from datetime import datetime
commentBp = Blueprint('commentBp', __name__)
class UserComments():
@commentBp.route("/addComment", methods=['POST'])
def addComment():
json_data = request.get_json(force=True)
if not json_data:
return jsonify({'message': 'No input data provided'}), 400
comment = Comment(
post_id = json_data['post_id'],
username = json_data['username'],
content = json_data['content']
)
db.session.add(comment)
db.session.commit()
result = Comment.serialize(Comment)
return jsonify({ "status" : 'success', 'data': comment}), 201
@commentBp.route("/getComment", methods=['GET'])
def getComment():
json_data = request.get_json(force=True)
comment_list = []
if not json_data:
return jsonify({'message': 'No input data provided'}), 400
if json_data['query_by']=='top':
comments = Comment.query.order_by(Comment.upvotes.desc()).all()
else: #json_data['query_by']=='latest'
comments = Comment.query.order_by(Comment.timestamp.desc()).all()
if comment_list == []:
return jsonify({'message': 'No comments found'}), 404
for i in range(0, len(comments)):
comment_list.append(comments[i].serialize())
return jsonify({"status" : str(comment_list)}), 200
@commentBp.route("/deleteComment", methods=['DELETE'])
def deleteComment():
json_data = request.get_json(force=True)
if not json_data:
return jsonify({'message': 'No input data provided'}), 400
comment = Comment.query.filter_by(id=json_data['comment_id']).first()
if not comment:
return jsonify({'message': 'no such comment found'}), 400
if json_data['username'] == comment.username:
db.session.delete(comment)
db.session.commit()
return jsonify({ "status" : 'success'}), 200
return jsonify({ "status" : 'not authhorized'}), 401
@commentBp.route("/upvote'", methods=['PUT'])
def upvote():
json_data = request.get_json(force=True)
if not json_data:
return jsonify({'message': 'No input data provided'}), 400
commment = Comment.query.filter_by(id=json_data['id']).one()
if json_data['state'] == 'true':
comment.upvotes += 1
else:
comment.upvotes -= 1
result = comments.upvotes
db.session.commit()
return jsonify({ "status" : 'success', 'data': result}), 201 | 34.703704 | 77 | 0.581999 | 2,616 | 0.93063 | 0 | 0 | 2,559 | 0.910352 | 0 | 0 | 489 | 0.173959 |
8789474f74a671f6ddfeccf4cb104d024e738101 | 32 | py | Python | poetry/console/commands/cache/__init__.py | hongquan/poetry | d12f6421b1c34067e3968ddec2d821ae7f316af7 | [
"MIT"
] | 1 | 2020-12-22T12:51:11.000Z | 2020-12-22T12:51:11.000Z | poetry/console/commands/cache/__init__.py | hongquan/poetry | d12f6421b1c34067e3968ddec2d821ae7f316af7 | [
"MIT"
] | null | null | null | poetry/console/commands/cache/__init__.py | hongquan/poetry | d12f6421b1c34067e3968ddec2d821ae7f316af7 | [
"MIT"
] | null | null | null | from .cache import CacheCommand
| 16 | 31 | 0.84375 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
878978a0b479e9b1eb623ecca1b047a1ad925fe1 | 6,660 | py | Python | aalpy/utils/AutomatonGenerators.py | MDrey101/AALpy | bd8a4994db6c27438db61a3247ae404fc6e4074b | [
"MIT"
] | 61 | 2021-04-01T10:38:52.000Z | 2022-03-28T13:44:23.000Z | aalpy/utils/AutomatonGenerators.py | MDrey101/AALpy | bd8a4994db6c27438db61a3247ae404fc6e4074b | [
"MIT"
] | 16 | 2021-04-03T20:14:08.000Z | 2022-02-16T10:21:48.000Z | aalpy/utils/AutomatonGenerators.py | haubitzer/AALpy | e5b51742d886d5c5c72ab3e9c20eb349c56e2469 | [
"MIT"
] | 9 | 2021-04-05T13:43:17.000Z | 2022-03-09T14:06:17.000Z | import random
from aalpy.automata import Dfa, DfaState, MdpState, Mdp, MealyMachine, MealyState, \
MooreMachine, MooreState, OnfsmState, Onfsm, MarkovChain, McState
from aalpy.utils.HelperFunctions import random_string_generator
def generate_random_mealy_machine(num_states, input_alphabet, output_alphabet, compute_prefixes=False) -> MealyMachine:
"""
Generates a random Mealy machine.
Args:
num_states: number of states
input_alphabet: input alphabet
output_alphabet: output alphabet
compute_prefixes: if true, shortest path to reach each state will be computed (Default value = False)
Returns:
Mealy machine with num_states states
"""
states = list()
for i in range(num_states):
states.append(MealyState(i))
for state in states:
for a in input_alphabet:
state.transitions[a] = random.choice(states)
state.output_fun[a] = random.choice(output_alphabet)
mm = MealyMachine(states[0], states)
if compute_prefixes:
for state in states:
state.prefix = mm.get_shortest_path(mm.initial_state, state)
return mm
def generate_random_moore_machine(num_states, input_alphabet, output_alphabet, compute_prefixes=False) -> MooreMachine:
"""
Generates a random Moore machine.
Args:
num_states: number of states
input_alphabet: input alphabet
output_alphabet: output alphabet
compute_prefixes: if true, shortest path to reach each state will be computed (Default value = False)
Returns:
Moore machine with num_states states
"""
states = list()
for i in range(num_states):
states.append(MooreState(i, random.choice(output_alphabet)))
for state in states:
for a in input_alphabet:
state.transitions[a] = random.choice(states)
mm = MooreMachine(states[0], states)
if compute_prefixes:
for state in states:
state.prefix = mm.get_shortest_path(mm.initial_state, state)
return mm
def generate_random_dfa(num_states, alphabet, num_accepting_states=1, compute_prefixes=False) -> Dfa:
"""
Generates a random DFA.
Args:
num_states: number of states
alphabet: input alphabet
num_accepting_states: number of accepting states (Default value = 1)
compute_prefixes: if true, shortest path to reach each state will be computed (Default value = False)
Returns:
DFA
"""
assert num_states >= num_accepting_states
states = list()
for i in range(num_states):
states.append(DfaState(i))
state_buffer = list(states)
for state in states:
for a in alphabet:
if state_buffer:
new_state = random.choice(state_buffer)
state_buffer.remove(new_state)
else:
new_state = random.choice(states)
state.transitions[a] = new_state
for _ in range(num_accepting_states):
random.choice(states).is_accepting = True
dfa = Dfa(states[0], states)
if compute_prefixes:
for state in states:
state.prefix = dfa.get_shortest_path(dfa.initial_state, state)
return dfa
def generate_random_mdp(num_states, len_input, custom_outputs=None, num_unique_outputs=None):
"""
Generates random MDP.
Args:
num_states: number of states
len_input: number of inputs
custom_outputs: user predefined outputs
num_unique_outputs: number of outputs
Returns:
random MDP
"""
num_unique_outputs = num_states if not num_unique_outputs else num_unique_outputs
outputs = [random_string_generator(random.randint(3, 7)) for _ in range(num_unique_outputs)]
outputs = custom_outputs if custom_outputs else outputs
while len(outputs) < num_states:
outputs.append(random.choice(outputs))
possible_probabilities = [1.0, 1.0, 1.0, 1.0, 0.8, 0.5, 0.9]
states = []
for i in range(num_states):
states.append(MdpState(f'q{i}', outputs.pop()))
for state in states:
for i in range(len_input):
prob = random.choice(possible_probabilities)
if prob == 1.:
state.transitions[i].append((random.choice(states), prob))
else:
new_states = list(states)
s1 = random.choice(new_states)
new_states.remove(s1)
state.transitions[i].append((s1, prob))
state.transitions[i].append((random.choice(new_states), round(1 - prob, 2)))
return Mdp(states[0], states), list(range(len_input))
def generate_random_ONFSM(num_states, num_inputs, num_outputs, multiple_out_prob=0.1):
"""
Randomly generate an observable non-deterministic finite-state machine.
Args:
num_states: number of states
num_inputs: number of inputs
num_outputs: number of outputs
multiple_out_prob: probability that state will have multiple outputs (Default value = 0.1)
Returns:
randomly generated ONFSM
"""
inputs = [random_string_generator(random.randint(1, 3)) for _ in range(num_inputs)]
outputs = [random_string_generator(random.randint(3, 7)) for _ in range(num_outputs)]
states = []
for i in range(num_states):
state = OnfsmState(f's{i}')
states.append(state)
for state in states:
for i in inputs:
state_outputs = 1
if random.random() <= multiple_out_prob and num_outputs > 1:
state_outputs = random.randint(2, num_outputs)
random_out = random.sample(outputs, state_outputs)
for index in range(state_outputs):
state.transitions[i].append((random_out[index], random.choice(states)))
return Onfsm(states[0], states)
def generate_random_markov_chain(num_states):
assert num_states >= 3
possible_probabilities = [1.0, 1.0, 0.8, 0.5, 0.9]
states = []
for i in range(num_states):
states.append(McState(f'q{i}', i))
for index, state in enumerate(states[:-1]):
prob = random.choice(possible_probabilities)
if prob == 1.:
new_state = states[index + 1]
state.transitions.append((new_state, prob))
else:
next_state = states[index + 1]
up_states = list(states)
up_states.remove(next_state)
rand_state = random.choice(up_states)
state.transitions.append((next_state, prob))
state.transitions.append((rand_state, round(1 - prob, 2)))
return MarkovChain(states[0], states)
| 30.135747 | 119 | 0.651652 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,657 | 0.248799 |
878c2f822d6aa9a6f51618cb1ba0c0ab5a958df0 | 4,104 | py | Python | tools/_init.py | zhengshou/AutoLoc | 82c7e51a28eb740a8f002a460ce5c918fc61731b | [
"MIT"
] | 84 | 2018-09-03T02:40:04.000Z | 2022-03-01T15:17:57.000Z | tools/_init.py | zhengshou/AutoLoc | 82c7e51a28eb740a8f002a460ce5c918fc61731b | [
"MIT"
] | 10 | 2018-10-15T22:42:53.000Z | 2020-05-17T01:23:06.000Z | tools/_init.py | zhengshou/AutoLoc | 82c7e51a28eb740a8f002a460ce5c918fc61731b | [
"MIT"
] | 19 | 2018-12-29T01:32:32.000Z | 2022-01-26T16:36:49.000Z | import os
import sys
import os.path as osp
from contextlib import contextmanager
############################################################
# Setup path
def add_path(path):
if path not in sys.path:
sys.path.insert(0, path)
curdir = osp.dirname(__file__)
lib_path = osp.join(curdir, '..', 'lib')
add_path(lib_path)
############################################################
# Import modules from the lib
from config import cfg
from utils.ops import may_create
from utils.proto import prototxt_from_template
@contextmanager
def workenv():
olddir = os.getcwd()
os.chdir(osp.join(curdir, '..'))
try:
yield
finally:
os.chdir(olddir)
def setup(phase_key, dataset, expname, rsltname):
'''Setup paths & general args after possible merge from config file.'''
# Save args to config
cfg.DATASET = dataset
cfg.EXP = expname
cfg.NUM_CLASSES = {
'TH14': 20,
'AN': 100,
}[cfg.DATASET]
# AN.train == TH14.val; AN.val == TH14.test
# if cfg.DATASET == 'AN':
# cfg[phase_key].STAGE = {
# 'val': 'train',
# 'test': 'val',
# 'train': 'train',
# 'val': 'val',
# }[cfg[phase_key].STAGE]
# Setup <infix> first, resulting in
# '' => ''; 'infix' => '.infix' so that we can uniformly insert it.
ret_infix = cfg.INFIX if not cfg.INFIX.startswith('.') else cfg.INFIX[1:]
ret_infix = '' if ret_infix == '' else '.{}'.format(ret_infix)
cfg.INFIX = ret_infix
# Setup <viz_folder> name
norm_str = 'normed' if cfg.FEAT.NORM else 'unnormed'
avt_str = {
True: '{avt}',
False: '{avt}{trh}'
}[cfg.FEAT.THRESH is None].format(avt=cfg.FEAT.ACTIVATION,
trh=cfg.FEAT.THRESH)
cfg.VIZ.FOLDER_NAME = '{}_{}_{}_{}'.format(cfg[phase_key].STAGE, cfg.FEAT.MODE,
norm_str, avt_str)
if not cfg.VIZ.FIX_WIDTH:
cfg.VIZ.FOLDER_NAME += '_fixwidth'
# Then several paths: <proto>, <log>, <local_snapshots>, <viz>
cfg.EXP_PATH = osp.join(cfg.EXP_DIR, cfg.DATASET, cfg.EXP)
cfg.PROTO_PATH = osp.join(cfg.EXP_PATH, 'proto')
cfg.LOG_PATH = osp.join(cfg.EXP_PATH, 'log')
cfg.LOCAL_SNAPSHOT_PATH = osp.join(cfg.EXP_PATH, 'snapshot')
# Example: exp/TH14/experiment100/val_mul_normed_relu10_fixwidth
cfg.VIZ_PATH = osp.join(cfg.EXP_PATH, cfg.VIZ.FOLDER_NAME)
cfg.RSLT_PATH = osp.join(cfg.EXP_PATH, 'rslt')
path2check = [cfg.PROTO_PATH, cfg.LOG_PATH, cfg.LOCAL_SNAPSHOT_PATH,
cfg.VIZ_PATH, cfg.RSLT_PATH]
map(may_create, path2check)
cfg.SL_PATH = osp.join(cfg.PROTO_PATH,
'solver{}.prototxt'.format(cfg.INFIX))
cfg.TR_PATH = osp.join(cfg.PROTO_PATH,
'train{}.prototxt'.format(cfg.INFIX))
# Currently we share the prototxt between training and testing.
cfg.TE_PATH = cfg.TR_PATH
cfg.SNAPSHOT_PATH = osp.join(cfg.LOCAL_SNAPSHOT_PATH, {
True: rsltname.replace('.pc', '.caffemodel'),
False: '{}_iter{}.caffemodel'.format(rsltname, cfg.MAX_ITER)
}[rsltname.endswith('.pc')])
# Setup `videoids_lst` template.
cfg.DSPEC.VID_LST = osp.join(cfg.DATA_DIR, cfg.DATASET, '{stage}_videoid.lst')
# Specify training input.
cfg[phase_key].DATA_PATH = osp.join(cfg.DATA_DIR, cfg.DATASET,
cfg[phase_key].DATA_FILE)
phase_ = phase_key.lower() + '.'
# Processing rsltname in following logic in order:
# (1) rsltname should start with '<phase>.';
# (2) rslname with '.pc' should be directly used;
# (3) otherwise it should be recorded with the iteration.
if not rsltname.startswith(phase_):
rsltname = phase_ + rsltname
# Finally the result pickle file.
cfg[phase_key].RSLT_PATH = osp.join(cfg.RSLT_PATH, {
True: rsltname,
False: '{}_iter{}.pc'.format(rsltname, cfg.MAX_ITER)
}[rsltname.endswith('.pc')])
# Generate prototxt from template
prototxt_from_template()
| 31.813953 | 83 | 0.595517 | 0 | 0 | 137 | 0.033382 | 153 | 0.037281 | 0 | 0 | 1,335 | 0.325292 |
878d22338b37587b5be27d20237c97a67a7ed572 | 618 | py | Python | chap4/exchange_keys_and_values.py | marble-git/python-laoqi | 74c4bb5459113e54ce64443e5da5a9c6a3052d6a | [
"MIT"
] | null | null | null | chap4/exchange_keys_and_values.py | marble-git/python-laoqi | 74c4bb5459113e54ce64443e5da5a9c6a3052d6a | [
"MIT"
] | null | null | null | chap4/exchange_keys_and_values.py | marble-git/python-laoqi | 74c4bb5459113e54ce64443e5da5a9c6a3052d6a | [
"MIT"
] | null | null | null | #coding:utf-8
'''
filename:exchange_keys_and_values.py
chap:4
subject:10
conditions:a dict
solution:exchanged keys and values
'''
origin_dict = {'book':['python','djang','data'],'author':'laoqi','publisher':'phei'}
def ishashable(obj):
try:
hash(obj)
return True
except:
return False
xchange_dict = {}
for key,value in origin_dict.items():
if ishashable(value):
xchange_dict.update({value:key})
else:
for v in value:
xchange_dict.update({v:key})
print(f'''origin_dict : {origin_dict}
xchange_dict{xchange_dict}''')
| 17.166667 | 84 | 0.619741 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 272 | 0.440129 |
878d43b49692a27640a8a1fd757b78585e00815a | 760 | py | Python | datadog_checks_dev/tests/test_subprocess.py | seants/integrations-core | 1e5548915fc24f1bbd095e845f0940c22992b09c | [
"BSD-3-Clause"
] | 4 | 2021-06-21T19:21:49.000Z | 2021-06-23T21:21:55.000Z | datadog_checks_dev/tests/test_subprocess.py | seants/integrations-core | 1e5548915fc24f1bbd095e845f0940c22992b09c | [
"BSD-3-Clause"
] | 1 | 2018-08-15T05:50:17.000Z | 2018-08-15T05:50:17.000Z | datadog_checks_dev/tests/test_subprocess.py | seants/integrations-core | 1e5548915fc24f1bbd095e845f0940c22992b09c | [
"BSD-3-Clause"
] | 1 | 2021-06-21T19:21:51.000Z | 2021-06-21T19:21:51.000Z | # (C) Datadog, Inc. 2018
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import os
import sys
from datadog_checks.dev.subprocess import run_command
class TestRunCommand:
def test_output(self):
result = run_command(
'{} -c "import sys;print(sys.version)"'.format(sys.executable),
capture='out'
)
assert result.stdout.strip() == sys.version.strip()
def test_env(self):
env = dict(os.environ)
env['DDEV_ENV_VAR'] = 'is_set'
result = run_command(
'{} -c "import os;print(os.getenv(\'DDEV_ENV_VAR\'))"'.format(sys.executable),
capture='out',
env=env
)
assert result.stdout.strip() == 'is_set'
| 26.206897 | 90 | 0.6 | 574 | 0.755263 | 0 | 0 | 0 | 0 | 0 | 0 | 237 | 0.311842 |
878e2e53576e2b93158335b6f62791d259a663a0 | 186 | py | Python | output/models/ms_data/datatypes/facets/any_uri/any_uri_b002_xsd/__init__.py | tefra/xsdata-w3c-tests | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | [
"MIT"
] | 1 | 2021-08-14T17:59:21.000Z | 2021-08-14T17:59:21.000Z | output/models/ms_data/datatypes/facets/any_uri/any_uri_b002_xsd/__init__.py | tefra/xsdata-w3c-tests | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | [
"MIT"
] | 4 | 2020-02-12T21:30:44.000Z | 2020-04-15T20:06:46.000Z | output/models/ms_data/datatypes/facets/any_uri/any_uri_b002_xsd/__init__.py | tefra/xsdata-w3c-tests | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | [
"MIT"
] | null | null | null | from output.models.ms_data.datatypes.facets.any_uri.any_uri_b002_xsd.any_uri_b002 import (
Bar,
Ct,
Root,
St,
)
__all__ = [
"Bar",
"Ct",
"Root",
"St",
]
| 13.285714 | 90 | 0.580645 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 19 | 0.102151 |
878e9c375065b11c93ecd7ccacefc88bd6387185 | 3,476 | py | Python | tests/fixtures/statement_lists.py | TylerYep/wolfbot | 8d4786ce9542bab344b227e0571bb24bc354298d | [
"MIT"
] | 3 | 2018-06-16T00:03:30.000Z | 2021-12-26T20:48:45.000Z | tests/fixtures/statement_lists.py | TylerYep/wolfbot | 8d4786ce9542bab344b227e0571bb24bc354298d | [
"MIT"
] | null | null | null | tests/fixtures/statement_lists.py | TylerYep/wolfbot | 8d4786ce9542bab344b227e0571bb24bc354298d | [
"MIT"
] | 2 | 2021-03-03T09:31:35.000Z | 2021-03-03T10:02:55.000Z | import pytest
from wolfbot.enums import Role, SwitchPriority
from wolfbot.statements import Statement
@pytest.fixture(scope="session")
def example_statement() -> Statement:
return Statement(
"test",
((2, frozenset({Role.ROBBER})), (0, frozenset({Role.SEER}))),
((SwitchPriority.ROBBER, 2, 0),),
Role.ROBBER,
)
@pytest.fixture(scope="session")
def small_statement_list() -> tuple[Statement, ...]:
return (
Statement("I am a Villager.", ((0, frozenset({Role.VILLAGER})),)),
Statement(
"I am a Robber and I swapped with Player 2. I am now a Seer.",
((1, frozenset({Role.ROBBER})), (2, frozenset({Role.SEER}))),
((SwitchPriority.ROBBER, 1, 2),),
),
Statement(
"I am a Seer and I saw that Player 1 was a Robber.",
((2, frozenset({Role.SEER})), (1, frozenset({Role.ROBBER}))),
),
)
@pytest.fixture(scope="session")
def medium_statement_list() -> tuple[Statement, ...]:
return (
Statement(
"I am a Seer and I saw that Player 2 was a Drunk.",
((0, frozenset({Role.SEER})), (2, frozenset({Role.DRUNK}))),
),
Statement(
"I am a Seer and I saw that Player 3 was a Minion.",
((1, frozenset({Role.SEER})), (3, frozenset({Role.MINION}))),
),
Statement(
"I am a Drunk and I swapped with Center 0.",
((2, frozenset({Role.DRUNK})),),
((SwitchPriority.DRUNK, 2, 5),),
),
Statement(
"I am a Robber and I swapped with Player 2. I am now a Drunk.",
((3, frozenset({Role.ROBBER})), (2, frozenset({Role.DRUNK}))),
((SwitchPriority.ROBBER, 3, 2),),
),
Statement(
"I am a Seer and I saw that Player 1 was a Wolf.",
((4, frozenset({Role.SEER})), (1, frozenset({Role.WOLF}))),
),
)
@pytest.fixture(scope="session")
def large_statement_list() -> tuple[Statement, ...]:
return (
Statement(
"I am a Robber and I swapped with Player 6. I am now a Drunk.",
((0, frozenset({Role.ROBBER})), (6, frozenset({Role.DRUNK}))),
((SwitchPriority.ROBBER, 6, 0),),
),
Statement(
"I am a Robber and I swapped with Player 0. I am now a Seer.",
((1, frozenset({Role.ROBBER})), (0, frozenset({Role.SEER}))),
((SwitchPriority.ROBBER, 0, 1),),
),
Statement(
"I am a Seer and I saw that Player 3 was a Villager.",
((2, frozenset({Role.SEER})), (3, frozenset({Role.VILLAGER}))),
),
Statement("I am a Villager.", ((3, frozenset({Role.VILLAGER})),)),
Statement(
"I am a Mason. The other Mason is Player 5.",
((4, frozenset({Role.MASON})), (5, frozenset({Role.MASON}))),
),
Statement(
"I am a Mason. The other Mason is Player 4.",
((5, frozenset({Role.MASON})), (4, frozenset({Role.MASON}))),
),
Statement(
"I am a Drunk and I swapped with Center 1.",
((6, frozenset({Role.DRUNK})),),
((SwitchPriority.ROBBER, 9, 6),),
),
Statement(
"I am a Robber and I swapped with Player 5. I am now a Seer.",
((7, frozenset({Role.ROBBER})), (5, frozenset({Role.SEER}))),
((SwitchPriority.ROBBER, 5, 7),),
),
)
| 35.469388 | 75 | 0.52244 | 0 | 0 | 0 | 0 | 3,361 | 0.966916 | 0 | 0 | 813 | 0.23389 |
8791411f9352e21475daedce2828ad0066226068 | 419 | py | Python | ranking/migrations/0056_auto_20201128_2316.py | horacexd/clist | 9759dfea97b86514bec9825d2430abc36decacf0 | [
"Apache-2.0"
] | 166 | 2019-05-16T23:46:08.000Z | 2022-03-31T05:20:23.000Z | ranking/migrations/0056_auto_20201128_2316.py | horacexd/clist | 9759dfea97b86514bec9825d2430abc36decacf0 | [
"Apache-2.0"
] | 92 | 2020-01-18T22:51:53.000Z | 2022-03-12T01:23:57.000Z | ranking/migrations/0056_auto_20201128_2316.py | VadVergasov/clist | 4afcdfe88250d224043b28efa511749347cec71c | [
"Apache-2.0"
] | 23 | 2020-02-09T17:38:43.000Z | 2021-12-09T14:39:07.000Z | # Generated by Django 2.2.13 on 2020-11-28 23:16
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ranking', '0055_auto_20201009_0735'),
]
operations = [
migrations.AddIndex(
model_name='statistics',
index=models.Index(fields=['place_as_int', '-created'], name='ranking_sta_place_a_42252c_idx'),
),
]
| 23.277778 | 107 | 0.637232 | 325 | 0.775656 | 0 | 0 | 0 | 0 | 0 | 0 | 150 | 0.357995 |
879159c7ecfa20cd2c5df431af5a9c4a1ec6e23d | 489 | py | Python | aioboto3/resources/base.py | dacevedo12/aioboto3 | 289e08d7778dcbfa520c2fdd889206a9e98575be | [
"Apache-2.0"
] | null | null | null | aioboto3/resources/base.py | dacevedo12/aioboto3 | 289e08d7778dcbfa520c2fdd889206a9e98575be | [
"Apache-2.0"
] | null | null | null | aioboto3/resources/base.py | dacevedo12/aioboto3 | 289e08d7778dcbfa520c2fdd889206a9e98575be | [
"Apache-2.0"
] | null | null | null | import logging
import warnings
from boto3.resources.base import ServiceResource
logger = logging.getLogger(__name__)
class AIOBoto3ServiceResource(ServiceResource):
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
await self.meta.client.__aexit__(exc_type, exc_val, exc_tb)
def close(self):
warnings.warn("This should not be called anymore", DeprecationWarning)
return self.meta.client.close()
| 25.736842 | 78 | 0.738241 | 367 | 0.750511 | 0 | 0 | 0 | 0 | 168 | 0.343558 | 35 | 0.071575 |
8791725d30f44e82a659356c39e4c7bccfcfd512 | 11,728 | py | Python | AdvBox/examples/utils.py | Tough2011/PaddleSleeve | e654598a370220fbbc96ae527833676183909383 | [
"Apache-2.0"
] | 2 | 2022-02-24T08:47:04.000Z | 2022-02-24T08:47:13.000Z | AdvBox/examples/utils.py | Tough2011/PaddleSleeve | e654598a370220fbbc96ae527833676183909383 | [
"Apache-2.0"
] | null | null | null | AdvBox/examples/utils.py | Tough2011/PaddleSleeve | e654598a370220fbbc96ae527833676183909383 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
utility tools.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import distutils.util
import matplotlib.pyplot as plt
import numpy as np
from PIL import Image
import os
import time
OUTPUT = './output/'
img_mean = np.array([0.485, 0.456, 0.406]).reshape((3, 1, 1))
img_std = np.array([0.229, 0.224, 0.225]).reshape((3, 1, 1))
class bcolors:
RED = "\033[1;31m"
BLUE = "\033[1;34m"
CYAN = "\033[1;36m"
GREEN = "\033[1;32m"
RESET = "\033[0;0m"
BOLD = "\033[;1m"
REVERSE = "\033[;7m"
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
def print_arguments(args):
"""Print argparse's arguments.
Usage:
.. code-block:: python
parser = argparse.ArgumentParser()
parser.add_argument("name", default="Jonh", type=str, help="User name.")
args = parser.parse_args()
print_arguments(args)
:param args: Input argparse.Namespace for printing.
:type args: argparse.Namespace
"""
print("----------- Configuration Arguments -----------")
for arg, value in sorted(vars(args).items()):
print("%s: %s" % (arg, value))
print("------------------------------------------------")
def add_arguments(argname, type, default, help, argparser, **kwargs):
"""Add argparse's argument.
Usage:
.. code-block:: python
parser = argparse.ArgumentParser()
add_argument("name", str, "Jonh", "User name.", parser)
args = parser.parse_args()
"""
type = distutils.util.strtobool if type == bool else type
argparser.add_argument(
"--" + argname,
default=default,
type=type,
help=help + ' Default: %(default)s.',
**kwargs)
def check_output_directory(type):
"""
create output directory
Args:
type: name of picture set for test
"""
if not os.path.exists(OUTPUT):
os.mkdir(OUTPUT, 0o755)
if not os.path.exists(OUTPUT + "/" + type):
os.mkdir(OUTPUT + "/" + type, 0o755)
def convert_net(img_example):
"""
convert image array to original
Args:
img_example: array data of img
"""
#reshape img_example
output_img = np.reshape(img_example.astype('float32'), (3, 224, 224))
output_img *= img_std
output_img += img_mean
output_img *= 255
output_img = np.reshape(output_img.astype(np.uint8), (3, 224, 224))
#convert C,H,W to H,W,C
output_img = output_img.transpose((1, 2, 0))
return output_img
def save_image(output_img, path):
"""
save image from array that original or adversarial
Args:
img_example: array data of img
path: directory and filename
"""
im = Image.fromarray(output_img)
im.save(path, 'png')
def generation_image(id, org_img, org_label, adv_img, adv_label, attack_method='FGSM'):
"""
save image from array that original or adversarial
imagenet data set
Args:
org_img: array data of test img
adv_img: array data of adv img
org_label: the inference label of test image
adv_label: the adverarial label of adv image
attack_method: the adverarial example generation method
"""
DATA_TYPE = "imagenet"
check_output_directory(DATA_TYPE)
org_path= OUTPUT + DATA_TYPE + "/%d_original-%d-by-%s.png" \
% (id, org_label, attack_method)
adv_path= OUTPUT + DATA_TYPE + "/%d_adversary-%d-by-%s.png" \
% (id, adv_label, attack_method)
diff_path= OUTPUT + DATA_TYPE + "/%d_diff-x-by-%s.png" % (id, attack_method)
org_output = convert_net(org_img)
adv_output = convert_net(adv_img)
diff_output = abs(adv_output - org_output)
save_image(org_output, org_path)
save_image(adv_output, adv_path)
save_image(diff_output, diff_path)
print("--------------------------------------------------")
def show_images_diff(original_img, original_label, adversarial_img, adversarial_label):
"""
show original image, adversarial image and their difference
Args:
original_img: original image, numpy
original_label:original label, int
adversarial_img: adversarial image
adversarial_label: adversarial label
Returns:
"""
plt.figure()
plt.subplot(131)
plt.title('Original')
plt.imshow(original_img)
plt.axis('off')
plt.subplot(132)
plt.title('Adversarial')
plt.imshow(adversarial_img)
plt.axis('off')
plt.subplot(133)
plt.title('Adversarial-Original')
difference = adversarial_img - original_img
l0 = np.where(difference != 0)[0].shape[0]
l2 = np.linalg.norm(difference)
print("l0={} l2={}".format(l0, l2))
#(-1,1) -> (0,1)
difference = difference / abs(difference).max() / 2.0 + 0.5
plt.imshow(difference, cmap=plt.cm.gray)
plt.axis('off')
plt.tight_layout()
ts = time.localtime(time.time())
ts = time.strftime("%Y-%m-%d %H:%M:%S", ts)
if not os.path.exists('output'):
os.makedirs('output')
plt.savefig("output/orig_adv_diff_{}_{}.png".format(adversarial_label, ts))
plt.show()
def show_images_diff_denoising(image_a, image_a_label, image_b, image_b_label, image_a_title='Input', image_b_title='output'):
"""
show original image, adversarial image and their difference
Args:
image_a: original image, ndarray
image_a_label:original label, int
image_b: adversarial image, ndarray
image_b_label: adversarial label
image_a_title: the title of the image a
image_b_title: the title of the image b
Returns:
"""
plt.figure()
plt.subplot(131)
plt.title(image_a_title)
plt.imshow(image_a)
plt.axis('off')
plt.subplot(132)
plt.title(image_b_title)
plt.imshow(image_b)
plt.axis('off')
plt.subplot(133)
plt.title(image_a_title+'-'+image_b_title)
difference = image_a - image_b
l0 = np.where(difference != 0)[0].shape[0]
l2 = np.linalg.norm(difference)
print("l0={} l2={}".format(l0, l2))
#(-1,1) -> (0,1)
difference = difference / abs(difference).max() / 2.0 + 0.5
plt.imshow(difference, cmap=plt.cm.gray)
plt.axis('off')
plt.tight_layout()
ts = time.localtime(time.time())
ts = time.strftime("%Y-%m-%d %H:%M:%S", ts)
if not os.path.exists('examples/image_cls/output'):
os.makedirs('output')
plt.savefig("output/{}_{}_diff_{}_{}_{}.png".format(image_a_title, image_b_title, image_a_label, image_b_label, ts))
plt.show()
def show_input_adv_and_denoise(image_a, image_b, image_c, image_d, \
image_a_label, image_b_label, image_c_label, image_d_label, \
image_a_title='Input', image_b_title='Adversary', \
image_c_title='Adv-Denoise', image_d_title='In-Denoise',method='Default'
):
"""
show original image, adversarial image, and their denoising results, respectively
Args:
image_a: original image, ndarray
image_a_label: original label, str
image_a_title: the title of the image a
image_b: adversarial image, ndarray
image_b_label: adversarial label
image_b_title: the title of the image b
image_c: denoising result of the adversarial image, ndarray
image_c_label: the predicted class label after denoising of the adv-image
image_c_title: the title of the image c
image_d: denoising result of the original input image, ndarray
image_d_label: the predicted class label after denoising of the input image
image_d_title: the title of the image d
Returns:
"""
# get the first class name
a_label=''
for i in image_a_label:
if i!=',':
a_label+=i
else:
break
temp=a_label
if len(a_label)>10:
temp=''
for i in a_label:
if i==' ':
temp=''
else:
temp=temp+i
a_label=temp
b_label=''
for i in image_b_label:
if i!=',':
b_label+=i
else:
break
temp=b_label
if len(b_label)>10:
temp=''
for i in b_label:
if i==' ':
temp=''
else:
temp=temp+i
b_label=temp
c_label=''
for i in image_c_label:
if i!=',':
c_label+=i
else:
break
temp=c_label
if len(c_label)>10:
temp=''
for i in c_label:
if i==' ':
temp=''
else:
temp=temp+i
c_label=temp
d_label=''
for i in image_d_label:
if i!=',':
d_label+=i
else:
break
temp=d_label
if len(d_label)>10:
temp=''
for i in d_label:
if i==' ':
temp=''
else:
temp=temp+i
d_label=temp
# define the plot position
w = image_c.shape[0] if image_c.shape[0] > image_d.shape[0] else image_d.shape[0]
h = image_c.shape[1] if image_c.shape[1] > image_d.shape[1] else image_d.shape[1]
x = 0 # initial horizontal position of the first line
y = h + 10 # initial vertical position of the first line
xos = 15 # offset to x of the second line
yos = 10 # offset to y of the second line
fig = plt.figure()
title = 'Denoise method: ' + method
fig.suptitle(title, fontsize=12, fontweight='bold', y=0.80)
plt.subplot(141)
plt.title(image_a_title)
plt.imshow(image_a)
plt.text(x, y, 'Top1 label:')
plt.text(x+xos, y+yos, a_label)
plt.axis('off')
plt.subplot(142)
plt.title(image_b_title)
plt.imshow(image_b)
plt.text(x, y, 'Top1 label:')
plt.text(x+xos, y+yos, b_label)
plt.axis('off')
plt.subplot(143)
plt.title(image_c_title)
plt.imshow(image_c)
plt.text(x, y, 'Top1 label:')
plt.text(x+xos, y+yos, c_label)
plt.axis('off')
plt.subplot(144)
plt.title(image_d_title)
plt.imshow(image_d)
plt.text(x, y, 'Top1 label:')
plt.text(x+xos, y+yos, d_label)
plt.axis('off')
plt.tight_layout()
if not os.path.exists('examples/image_cls/output'):
os.makedirs('output')
plt.savefig("output/{}_Denoising_Comparison.png".format(method))
plt.show()
def get_best_weigthts_from_folder(folder, pdparams_file_starter):
pdparams_files = [filename for filename in os.listdir(folder) if filename.lower().endswith('.pdparams')
and filename.lower().startswith(pdparams_file_starter.lower())]
if not pdparams_files:
return None
else:
acc_list = [filename.split('.')[1] for filename in pdparams_files]
max_index = acc_list.index(max(acc_list))
best_weight_path = os.path.join(folder, pdparams_files[max_index])
print('Loaded: ', best_weight_path)
return best_weight_path
| 29.541562 | 126 | 0.612892 | 369 | 0.031463 | 0 | 0 | 0 | 0 | 0 | 0 | 4,630 | 0.394782 |
87930ac2ba2d5084ba1fa7dbb011e1fe84ad4946 | 399 | py | Python | shield.com/metahumans/validators.py | euribates/curso-django | 06f71a7d5b239e294c0cc6b3972861ed8d1c59d0 | [
"CC-BY-4.0"
] | 1 | 2020-04-14T11:08:11.000Z | 2020-04-14T11:08:11.000Z | shield.com/metahumans/validators.py | euribates/curso-django | 06f71a7d5b239e294c0cc6b3972861ed8d1c59d0 | [
"CC-BY-4.0"
] | null | null | null | shield.com/metahumans/validators.py | euribates/curso-django | 06f71a7d5b239e294c0cc6b3972861ed8d1c59d0 | [
"CC-BY-4.0"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from django.core.exceptions import ValidationError
def between_zero_and_one_hundred(value):
if value < 0 or value > 100:
raise ValidationError('El nivel de poder debe oscilar entre 0 y 100.')
| 26.6 | 78 | 0.774436 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 87 | 0.218045 |
8793f9ea482d0f9374dda4215d86b8945f6bf480 | 1,709 | py | Python | api/tiposrefeicao.py | Guergeiro/bd2-group2 | 34520e40ef915dc7bb89486ac2322f8994ac9862 | [
"MIT"
] | null | null | null | api/tiposrefeicao.py | Guergeiro/bd2-group2 | 34520e40ef915dc7bb89486ac2322f8994ac9862 | [
"MIT"
] | 12 | 2019-11-17T21:22:49.000Z | 2020-01-19T18:10:00.000Z | api/tiposrefeicao.py | Guergeiro/bd2-group2 | 34520e40ef915dc7bb89486ac2322f8994ac9862 | [
"MIT"
] | 2 | 2019-11-15T16:47:21.000Z | 2019-11-17T13:59:40.000Z | from flask import Blueprint, request
import json
import databaseutils as utils
tiposrefeicao = Blueprint("tiposrefeicao", __name__)
tiposrefeicaoColumns = ["cod_tiporefeicao", "designacao"]
@tiposrefeicao.route("/api/tiposrefeicao", methods=["GET"])
@tiposrefeicao.route("/api/tiposrefeicao/", methods=["GET"])
def get_tiposrefeicao():
return utils.getAll(tiposrefeicaoColumns,
f"SELECT * FROM selecttiposrefeicao();")
@tiposrefeicao.route("/api/tiposrefeicao/<cod_TipoRefeicao>", methods=["GET"])
@tiposrefeicao.route("/api/tiposrefeicao/<cod_TipoRefeicao>/", methods=["GET"])
def get_tiporefeicao(cod_TipoRefeicao):
return utils.getOne(
tiposrefeicaoColumns,
f"SELECT * FROM selecttiporefeicao('{cod_TipoRefeicao}');")
@tiposrefeicao.route("/api/tiposrefeicao", methods=["POST"])
@tiposrefeicao.route("/api/tiposrefeicao/", methods=["POST"])
def post_tiporefeicao():
return utils.postOne(
tiposrefeicaoColumns,
f"SELECT * FROM inserttiposrefeicao('{json.dumps(request.json)}');")
@tiposrefeicao.route("/api/tiposrefeicao/<cod_TipoRefeicao>", methods=["PUT"])
@tiposrefeicao.route("/api/tiposrefeicao/<cod_TipoRefeicao>/", methods=["PUT"])
def put_tiporefeicao(cod_TipoRefeicao):
return utils.putOne(
f"CALL updatetiposrefeicao('{cod_TipoRefeicao}', '{json.dumps(request.json)}');"
)
@tiposrefeicao.route("/api/tiposrefeicao/<cod_TipoRefeicao>",
methods=["DELETE"])
@tiposrefeicao.route("/api/tiposrefeicao/<cod_TipoRefeicao>/",
methods=["DELETE"])
def delete_tiporefeicao(cod_TipoRefeicao):
return utils.deleteOne(f"CALL deletetiposrefeicao('{cod_TipoRefeicao}');") | 37.152174 | 88 | 0.715038 | 0 | 0 | 0 | 0 | 1,503 | 0.879462 | 0 | 0 | 716 | 0.418958 |
87952a504dcf731fe7d1f7b6f19e351c3c8eec5a | 1,464 | py | Python | environment/postgresql_backups/scripts/insert.py | DarkSideMoon/metrics-dotnet-samples | 55fb9969370b500b76c01946039aa06421895f4f | [
"MIT"
] | null | null | null | environment/postgresql_backups/scripts/insert.py | DarkSideMoon/metrics-dotnet-samples | 55fb9969370b500b76c01946039aa06421895f4f | [
"MIT"
] | 8 | 2021-11-28T21:58:19.000Z | 2022-03-01T00:04:06.000Z | environment/postgresql_backups/scripts/insert.py | DarkSideMoon/metrics-dotnet-samples | 55fb9969370b500b76c01946039aa06421895f4f | [
"MIT"
] | null | null | null | #!/usr/bin/env /usr/local/opt/python@3.9/bin/python3.9
import sys
import psycopg2
from faker import Faker
from contextlib import closing
from random import randint
import time
if __name__ == "__main__":
total = 10
batch_size = 1000
if len(sys.argv) > 1:
total = int(sys.argv[1])
if len(sys.argv) > 2:
batch_size = int(sys.argv[2])
fake = Faker(["en_US"], use_weighting=False)
with closing(psycopg2.connect(dbname='hla', user='postgres',
password='postgres', host='localhost', port = 5432)) as conn:
with closing(conn.cursor()) as cursor:
start_time = time.monotonic()
for i in range(total):
author = fake.first_name() + " " + fake.last_name()
title = fake.text(max_nb_chars=20).replace(".", "")
year = fake.date_between(start_date='-75y', end_date='today').year
category_id = randint(1,3)
record = (category_id, author, title, year)
cursor.execute("INSERT INTO books (id, category_id, author, title, year) VALUES (nextval('books_seq'), %s, %s, %s, %s)", record)
if i>0 and i%batch_size == 0:
print("commit at",i)
conn.commit()
conn.commit()
elapsed = round(time.monotonic() - start_time, 2)
print()
print("Inserted ", total, " records in", elapsed, "sec") | 32.533333 | 144 | 0.561475 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 272 | 0.185792 |
87957d42b517250e340c85eabe5b2c0215cd012e | 2,092 | py | Python | DataVis/faradaysky_disp.py | EmmaAlexander/possum-tools | 051ebca682cd97b68fa2a89c9d67e99cf85b09c7 | [
"MIT"
] | 5 | 2021-11-18T13:27:30.000Z | 2021-12-05T00:15:33.000Z | DataVis/faradaysky_disp.py | EmmaAlexander/possum-tools | 051ebca682cd97b68fa2a89c9d67e99cf85b09c7 | [
"MIT"
] | null | null | null | DataVis/faradaysky_disp.py | EmmaAlexander/possum-tools | 051ebca682cd97b68fa2a89c9d67e99cf85b09c7 | [
"MIT"
] | null | null | null | import os
import sys
import matplotlib.pyplot as plt
import numpy as np
import cmasher as cmr
import astropy.units as u
import astropy.coordinates as coord
from astropy.io import ascii
from astropy.io import fits
from astropy.wcs import WCS
from functions import *
plt.rcParams['mathtext.fontset'] = 'cm'
plt.rcParams['font.family'] = 'cmu serif'
SMALL_SIZE = 8
MEDIUM_SIZE = 8
BIGGER_SIZE = 12
plt.rc('font', size=SMALL_SIZE) # controls default text sizes
plt.rc('axes', titlesize=MEDIUM_SIZE) # fontsize of the axes title
plt.rc('axes', labelsize=MEDIUM_SIZE) # fontsize of the x and y labels
plt.rc('xtick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('ytick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('legend', fontsize=SMALL_SIZE) # legend fontsize
fig_directory='/Users/emma/OneDrive/PhD/thesis/Figures/'
cmap_blue = cmr.get_sub_cmap('twilight_shifted', 0, 0.5)
cmap_red = cmr.get_sub_cmap('twilight', 0.5, 1)
cmap_redblue=cmr.get_sub_cmap('twilight_shifted', 0.1, 0.9)
cmap=plt.cm.twilight_shifted
def main():
faradaysky,header=fitsopen('/Volumes/TARDIS/Work/askap/Faraday_cutout_pilot.fits')
faradayuncertainty,header2=fitsopen('/Volumes/TARDIS/Work/askap/Faraday_error_pilot.fits')
print(faradaysky.shape)
wcs=WCS(header)
sources=np.loadtxt('source_coords.txt',dtype='str')
plt.figure()
ax=plt.subplot(projection=wcs)
c=ax.imshow(faradaysky, origin='lower', cmap=cmap_redblue,vmin=-50,vmax=50)
cbar=plt.colorbar(c,fraction=0.046, pad=0.04)
for i in range(0,sources.shape[0]):
ra_ha=coord.Angle(sources[i,0],unit=u.hourangle)
ra = coord.Angle(ra_ha,unit=u.degree)
dec = coord.Angle(sources[i,1],unit=u.degree)
coords=coord.SkyCoord(ra=ra,dec=dec)
pixcoords=wcs.world_to_pixel(coords)
x=int(round(float(pixcoords[0])))
y=int(round(float(pixcoords[1])))
plt.scatter(pixcoords[0],pixcoords[1],marker='.',color='k')
RM=faradaysky[y,x]
RMerr=faradayuncertainty[y,x]
print(sources[i,0],sources[i,1])
print('{} +/- {}'.format(RM,RMerr))
plt.show()
if __name__ == "__main__":
main() | 30.318841 | 91 | 0.736616 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 503 | 0.24044 |
8797dc3ef1031456c069cbbb89dbdaafc3b5a76e | 137 | py | Python | ty.py | IsSveshuD/lab_2_12 | e7a276292fed67764526fff4dda582a86f2ddf45 | [
"MIT"
] | null | null | null | ty.py | IsSveshuD/lab_2_12 | e7a276292fed67764526fff4dda582a86f2ddf45 | [
"MIT"
] | null | null | null | ty.py | IsSveshuD/lab_2_12 | e7a276292fed67764526fff4dda582a86f2ddf45 | [
"MIT"
] | null | null | null | import re
def c(text, chars=" !?"):
rx = re.compile(f'{chars}')
text = rx.sub(r'-', text)
print(text)
a = 'dsf !?#'
c(a)
| 11.416667 | 31 | 0.489051 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 28 | 0.20438 |
8799ab51d7c6f840177cf89a06ffd228899158aa | 2,859 | py | Python | polyA/fill_confidence_matrix.py | TravisWheelerLab/polyA | cbab7f2416066fd24340913fbf5328fb36432131 | [
"BSD-3-Clause"
] | 3 | 2021-01-15T11:39:30.000Z | 2021-01-26T07:28:32.000Z | polyA/fill_confidence_matrix.py | TravisWheelerLab/polyA | cbab7f2416066fd24340913fbf5328fb36432131 | [
"BSD-3-Clause"
] | 21 | 2020-12-09T23:07:43.000Z | 2021-09-23T03:05:35.000Z | polyA/fill_confidence_matrix.py | TravisWheelerLab/polyA | cbab7f2416066fd24340913fbf5328fb36432131 | [
"BSD-3-Clause"
] | null | null | null | from typing import Dict, List, Tuple
from .confidence_cm import confidence_cm
from .matrices import ConfidenceMatrix
from .performance import timeit
@timeit()
def fill_confidence_matrix(
column_count: int,
subfam_counts: Dict[str, float],
subfams: List[str],
active_cells: Dict[int, List[int]],
align_matrix: Dict[Tuple[int, int], float],
) -> ConfidenceMatrix:
"""
Fills confidence matrix from alignment matrix. Each column in the alignment
matrix is a group of competing annotations that are input into
confidence_cm, the output confidence values are used to populate
confidence_matrix.
Inputs:
columns - non empty matrix column indices
subfam_counts - mapping of subfamily names to their prior counts
subfams - list of subfamily names taken from the original alignments
active_cells - map of column indices (from columns) into list of non-empty
rows for the given column
align_matrix - the alignment matrix from fill_align_matrix
Outputs:
confidence_matrix - hash implementation of sparse 2D matrix used in pre-DP
calculations. Key is tuple[int, int] that maps row, col with the value held
in that cell of matrix. Rows are subfamilies in the input alignment file,
cols are nucleotide positions in the alignment. Each cell in matrix is the
confidence score calculated from all the alignment scores in a column of the
alignment matrix.
>>> align_mat = {
... (0, 0): 10, (0, 1): 10, (0, 2): 10, (0, 3): 10, (0, 4): 10,
... (1, 1): 42, (1, 2): 41, (1, 3): 41, (2, 1): 45, (2, 2): 43, (2, 3): 39,
... }
>>> active = {0: [0], 1: [0, 1, 2], 2: [0, 1, 2], 3: [0, 1, 2], 4: [0]}
>>> col_count = 5
>>> counts = {"skip": 0.4, "s1": .33, "s2": .33}
>>> subs = ["skip", "s1", "s2"]
>>> conf_mat = fill_confidence_matrix(col_count, counts, subs, active, align_mat)
>>> f"{conf_mat[1,1]:.4f}"
'0.1100'
>>> f"{conf_mat[2,1]:.4f}"
'0.8800'
>>> f"{conf_mat[1,2]:.4f}"
'0.1980'
>>> f"{conf_mat[2,2]:.4f}"
'0.7920'
>>> f"{conf_mat[1,3]:.4f}"
'0.7920'
>>> f"{conf_mat[2,3]:.4f}"
'0.1980'
"""
confidence_matrix: ConfidenceMatrix = {}
for col_index in range(column_count):
temp_region: List[float] = []
for row_index in active_cells[col_index]:
temp_region.append(align_matrix[row_index, col_index])
temp_confidence: List[float] = confidence_cm(
temp_region,
subfam_counts,
subfams,
active_cells[col_index],
0,
False,
)
for row_index2 in range(len(active_cells[col_index])):
confidence_matrix[
active_cells[col_index][row_index2], col_index
] = temp_confidence[row_index2]
return confidence_matrix
| 33.635294 | 85 | 0.622595 | 0 | 0 | 0 | 0 | 2,706 | 0.946485 | 0 | 0 | 1,784 | 0.623994 |
8799edb410a9b593ae1ec6c87d90f8b46275cfe2 | 1,005 | py | Python | website/articles.py | ceyeoh/fyp_doppler | 4805378d57870560f8a8b450ec49b6c72a85962a | [
"MIT"
] | null | null | null | website/articles.py | ceyeoh/fyp_doppler | 4805378d57870560f8a8b450ec49b6c72a85962a | [
"MIT"
] | null | null | null | website/articles.py | ceyeoh/fyp_doppler | 4805378d57870560f8a8b450ec49b6c72a85962a | [
"MIT"
] | null | null | null | from flask import Blueprint, render_template
from flask_login import login_required, current_user
articles = Blueprint(
"articles",
__name__,
)
@articles.route("/intro-fgr")
@login_required
def intro():
return render_template("article-intro-fgr.html", user=current_user)
@articles.route("/causes-fgr")
@login_required
def causes():
return render_template("article-causes-fgr.html", user=current_user)
@articles.route("/twinsrisk-fgr")
@login_required
def twinsrisk():
return render_template("article-twinsrisk-fgr.html", user=current_user)
@articles.route("/symptoms-fgr")
@login_required
def symptoms():
return render_template("article-symptoms-fgr.html", user=current_user)
@articles.route("/diagnosis-fgr")
@login_required
def diagnosis():
return render_template("article-diagnosis-fgr.html", user=current_user)
@articles.route("/preventions-fgr")
@login_required
def preventions():
return render_template("article-preventions-fgr.html", user=current_user)
| 22.840909 | 77 | 0.763184 | 0 | 0 | 0 | 0 | 834 | 0.829851 | 0 | 0 | 262 | 0.260697 |
879aa68e685cec97b9d594f9347dab0f1569f23f | 6,181 | py | Python | src/main.py | neeraj310/DLfM_BrandManagement | 1df952ed38018391c876b822338f30ff9c9f6568 | [
"Apache-2.0"
] | null | null | null | src/main.py | neeraj310/DLfM_BrandManagement | 1df952ed38018391c876b822338f30ff9c9f6568 | [
"Apache-2.0"
] | null | null | null | src/main.py | neeraj310/DLfM_BrandManagement | 1df952ed38018391c876b822338f30ff9c9f6568 | [
"Apache-2.0"
] | null | null | null | from flask import Flask, render_template, request, redirect, url_for
import tensorflow as tf
from keras.models import load_model
from keras.backend import set_session
from src.utils import image_preprocessing
from src.utils import overall_class_label
from src.utils import infinite_scraper
# sessions and default graphs are needed to make tensorflow work properly
global sess
global graph
sess = tf.Session()
graph = tf.get_default_graph()
set_session(sess)
num_attributes = 4
model = [[] for i in range(num_attributes)]
model[0] = load_model('./model/augmented/glamarous_model.h5')
model[1] = load_model('./model/augmented/rugged_model.h5')
model[2] = load_model('./model/augmented/fun_model.h5')
model[3] = load_model('./model/augmented/healthy_model.h5')
app = Flask(__name__)
# this function collects images from the official and the unofficial (hashtag) page
def data_collection(official, unofficial):
# specify number of images to retrieve
# Note: you do not retrieve exactly 36 images but 36 + the last batch
LIMIT_IMAGE_COUNT = 36
# specify your 'personal' instagram page, needed to get access to the API
# Note: Instagram will block access to their API when you retrieve too many images within a short amount of time
user_name = 'chenpeling@hotmail.com'
password = 'Instagram2020'
# retrieve lists of URLs for both the official and unofficial account
official_images = infinite_scraper.official(user_name, password, LIMIT_IMAGE_COUNT, official)
unofficial_images = infinite_scraper.unofficial(user_name, password, LIMIT_IMAGE_COUNT, unofficial)
return official_images, unofficial_images
# this function reformats the collected images to make them usable as model input (X_test)
# the images are stored in python objects, that way the user does not have to download images on his/her computer
def data_preprocessing(official_images, unofficial_images):
preprocessed_data_official = image_preprocessing.preprocessing(official_images)
preprocessed_data_unofficial = image_preprocessing.preprocessing(unofficial_images)
return preprocessed_data_official, preprocessed_data_unofficial
# this function takes the preprocessed images and feeds them into the pretrained models
# as output we get a list with the predicted labels
def make_prediction(preprocessed_data):
X_test = preprocessed_data
# tensorflow specifics to correctly use a pretrained model
with graph.as_default():
set_session(sess)
y_pred = [[] for i in range(num_attributes)]
for i in range(num_attributes):
y_pred[i] = model[i].predict(X_test)
y_pred_label = overall_class_label.give_ovr_class_label_output(y_pred)
# encoded label
y_pred_lst = y_pred_label.tolist()
# map back to original label name
code2label = {0: 'glamorous', 1: 'rugged', 2: 'fun', 3: 'healthy'}
y_pred_lbnm = map(code2label.get, y_pred_lst)
y_pred_lbnm = list(y_pred_lbnm)
prediction = y_pred_lbnm
total = len(prediction)
return prediction, total
# the homepage the user sees when starting the application
@app.route("/", methods=["POST", "GET"])
def index():
# once the user entered the data and clicked on 'Predict', the data is captured and redirected to the predict page
if request.method == "POST":
official = request.form["official"]
unofficial = request.form["unofficial"]
return redirect(url_for("predict", official=official, unofficial=unofficial))
else:
return render_template("index.html")
# the page the user gets redirected to after hitting the 'Predict' button on the homepage
# Note: the entire pipeline takes a couple of minutes since we feed every picture in each of the four models
@app.route("/predict/", methods=["POST", "GET"])
def predict():
official = request.args.get('official')
unofficial = request.args.get('unofficial')
official_images, unofficial_images = data_collection(official, unofficial)
preprocessed_data_official, preprocessed_data_unofficial = data_preprocessing(official_images, unofficial_images)
prediction_official, total_official = make_prediction(preprocessed_data_official)
prediction_unofficial, total_unofficial = make_prediction(preprocessed_data_unofficial)
# generate the numbers to be displayed in the analysis table
# for official:
fun_official = prediction_official.count('fun')
glamorous_official = prediction_official.count('glamorous')
healthy_official = prediction_official.count('healthy')
rugged_official = prediction_official.count('rugged')
# for unofficial:
fun_unofficial = prediction_unofficial.count('fun')
glamorous_unofficial = prediction_unofficial.count('glamorous')
healthy_unofficial = prediction_unofficial.count('healthy')
rugged_unofficial = prediction_unofficial.count('rugged')
# for relative table:
fun_official_rel = round(fun_official/total_official*100)
fun_unofficial_rel = round(fun_unofficial/total_unofficial*100)
glamorous_official_rel = round(glamorous_official / total_official*100)
glamorous_unofficial_rel = round(glamorous_unofficial / total_unofficial*100)
healthy_official_rel = round(healthy_official / total_official*100)
healthy_unofficial_rel = round(healthy_unofficial / total_unofficial*100)
rugged_official_rel = round(rugged_official / total_official*100)
rugged_unofficial_rel = round(rugged_unofficial / total_unofficial*100)
return render_template("predict.html", fo=fun_official, fu=fun_unofficial, fo_rel=fun_official_rel, fu_rel=fun_unofficial_rel,
go=glamorous_official, gu=glamorous_unofficial, go_rel=glamorous_official_rel, gu_rel=glamorous_unofficial_rel,
ho=healthy_official, hu=healthy_unofficial, ho_rel=healthy_official_rel, hu_rel=healthy_unofficial_rel,
ro=rugged_official, ru=rugged_unofficial, ro_rel=rugged_official_rel, ru_rel=rugged_unofficial_rel,
to=total_official, tu=total_unofficial, unofficial=unofficial)
if __name__ == "__main__":
app.run(host="127.0.0.1", port=8080, debug=True) | 49.055556 | 138 | 0.761204 | 0 | 0 | 0 | 0 | 2,789 | 0.451221 | 0 | 0 | 1,867 | 0.302055 |
879ae2125f2be56cca379202ae8598161954d149 | 2,777 | py | Python | rvaconnect/circles/migrations/0001_initial.py | rva-data/rvaconnect | dc7e387dd35971ff5514f2675532e29094843ae2 | [
"BSD-3-Clause"
] | 1 | 2015-01-27T05:24:13.000Z | 2015-01-27T05:24:13.000Z | rvaconnect/circles/migrations/0001_initial.py | rva-data/rvaconnect | dc7e387dd35971ff5514f2675532e29094843ae2 | [
"BSD-3-Clause"
] | null | null | null | rvaconnect/circles/migrations/0001_initial.py | rva-data/rvaconnect | dc7e387dd35971ff5514f2675532e29094843ae2 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Group'
db.create_table(u'circles_group', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('created', self.gf('model_utils.fields.AutoCreatedField')(default=datetime.datetime.now)),
('modified', self.gf('model_utils.fields.AutoLastModifiedField')(default=datetime.datetime.now)),
('name', self.gf('django.db.models.fields.CharField')(max_length=100)),
('slug', self.gf('django.db.models.fields.SlugField')(max_length=100)),
('description_markdown', self.gf('django.db.models.fields.TextField')(default='')),
('description', self.gf('django.db.models.fields.TextField')(null=True)),
('status', self.gf('model_utils.fields.StatusField')(default='active', max_length=100, no_check_for_status=True)),
('url', self.gf('django.db.models.fields.URLField')(max_length=200, null=True, blank=True)),
('is_active', self.gf('django.db.models.fields.BooleanField')(default=True)),
('notes', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
))
db.send_create_signal(u'circles', ['Group'])
def backwards(self, orm):
# Deleting model 'Group'
db.delete_table(u'circles_group')
models = {
u'circles.group': {
'Meta': {'ordering': "['name']", 'object_name': 'Group'},
'created': ('model_utils.fields.AutoCreatedField', [], {'default': 'datetime.datetime.now'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'description_markdown': ('django.db.models.fields.TextField', [], {'default': "''"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'modified': ('model_utils.fields.AutoLastModifiedField', [], {'default': 'datetime.datetime.now'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '100'}),
'status': ('model_utils.fields.StatusField', [], {'default': "'active'", 'max_length': '100', 'no_check_for_status': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['circles'] | 55.54 | 138 | 0.605329 | 2,645 | 0.952467 | 0 | 0 | 0 | 0 | 0 | 0 | 1,476 | 0.531509 |
879af2c105954003dfafd0a1947df6869a7f44fa | 512 | py | Python | PythonExercicios/ex017.py | gabjohann/python_3 | 380cb622669ed82d6b22fdd09d41f02f1ad50a73 | [
"MIT"
] | null | null | null | PythonExercicios/ex017.py | gabjohann/python_3 | 380cb622669ed82d6b22fdd09d41f02f1ad50a73 | [
"MIT"
] | null | null | null | PythonExercicios/ex017.py | gabjohann/python_3 | 380cb622669ed82d6b22fdd09d41f02f1ad50a73 | [
"MIT"
] | null | null | null | # Faça um programa que leia o comprimento do cateto oposto e a do cateto adjacente de um triângulo retângulo.
# Calcule e mostre o comprimento da hipotenusa.
from math import hypot
catoposto = float(input('Cateto oposto: '))
catadjacente = float(input('Cateto adjacente: '))
print(hypot(catoposto, catadjacente))
'''
Solução da aula:
co = float(input('Comprimento do cateto oposto: '))
ca = float(input('Comprimento do cateto adjacente: '))
hi = hypot(co, ca)
print('A hipotenusa mede {:.2f}'.format(hi))
'''
| 30.117647 | 109 | 0.732422 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 393 | 0.760155 |
879b31bea6b24323ed8fa97a0bdf6f6174418a2f | 2,837 | py | Python | code/tmp_rtrip/tkinter/messagebox.py | emilyemorehouse/ast-and-me | 3f58117512e125e1ecbe3c72f2f0d26adb80b7b3 | [
"MIT"
] | 24 | 2018-01-23T05:28:40.000Z | 2021-04-13T20:52:59.000Z | code/tmp_rtrip/tkinter/messagebox.py | emilyemorehouse/ast-and-me | 3f58117512e125e1ecbe3c72f2f0d26adb80b7b3 | [
"MIT"
] | 17 | 2017-12-21T18:32:31.000Z | 2018-12-18T17:09:50.000Z | code/tmp_rtrip/tkinter/messagebox.py | emilyemorehouse/ast-and-me | 3f58117512e125e1ecbe3c72f2f0d26adb80b7b3 | [
"MIT"
] | null | null | null | from tkinter.commondialog import Dialog
ERROR = 'error'
INFO = 'info'
QUESTION = 'question'
WARNING = 'warning'
ABORTRETRYIGNORE = 'abortretryignore'
OK = 'ok'
OKCANCEL = 'okcancel'
RETRYCANCEL = 'retrycancel'
YESNO = 'yesno'
YESNOCANCEL = 'yesnocancel'
ABORT = 'abort'
RETRY = 'retry'
IGNORE = 'ignore'
OK = 'ok'
CANCEL = 'cancel'
YES = 'yes'
NO = 'no'
class Message(Dialog):
"""A message box"""
command = 'tk_messageBox'
def _show(title=None, message=None, _icon=None, _type=None, **options):
if _icon and 'icon' not in options:
options['icon'] = _icon
if _type and 'type' not in options:
options['type'] = _type
if title:
options['title'] = title
if message:
options['message'] = message
res = Message(**options).show()
if isinstance(res, bool):
if res:
return YES
return NO
return str(res)
def showinfo(title=None, message=None, **options):
"""Show an info message"""
return _show(title, message, INFO, OK, **options)
def showwarning(title=None, message=None, **options):
"""Show a warning message"""
return _show(title, message, WARNING, OK, **options)
def showerror(title=None, message=None, **options):
"""Show an error message"""
return _show(title, message, ERROR, OK, **options)
def askquestion(title=None, message=None, **options):
"""Ask a question"""
return _show(title, message, QUESTION, YESNO, **options)
def askokcancel(title=None, message=None, **options):
"""Ask if operation should proceed; return true if the answer is ok"""
s = _show(title, message, QUESTION, OKCANCEL, **options)
return s == OK
def askyesno(title=None, message=None, **options):
"""Ask a question; return true if the answer is yes"""
s = _show(title, message, QUESTION, YESNO, **options)
return s == YES
def askyesnocancel(title=None, message=None, **options):
"""Ask a question; return true if the answer is yes, None if cancelled."""
s = _show(title, message, QUESTION, YESNOCANCEL, **options)
s = str(s)
if s == CANCEL:
return None
return s == YES
def askretrycancel(title=None, message=None, **options):
"""Ask if operation should be retried; return true if the answer is yes"""
s = _show(title, message, WARNING, RETRYCANCEL, **options)
return s == RETRY
if __name__ == '__main__':
print('info', showinfo('Spam', 'Egg Information'))
print('warning', showwarning('Spam', 'Egg Warning'))
print('error', showerror('Spam', 'Egg Alert'))
print('question', askquestion('Spam', 'Question?'))
print('proceed', askokcancel('Spam', 'Proceed?'))
print('yes/no', askyesno('Spam', 'Got it?'))
print('yes/no/cancel', askyesnocancel('Spam', 'Want it?'))
print('try again', askretrycancel('Spam', 'Try again?'))
| 28.656566 | 78 | 0.646458 | 76 | 0.026789 | 0 | 0 | 0 | 0 | 0 | 0 | 813 | 0.28657 |
879cdfa799a43a3cd06f5d3f201e4e357ab443c1 | 550 | py | Python | models/tree.py | pigaov10/tree_manager | c85aa03d59536ebe6b8fac0407fd285094df3a65 | [
"Apache-2.0"
] | null | null | null | models/tree.py | pigaov10/tree_manager | c85aa03d59536ebe6b8fac0407fd285094df3a65 | [
"Apache-2.0"
] | null | null | null | models/tree.py | pigaov10/tree_manager | c85aa03d59536ebe6b8fac0407fd285094df3a65 | [
"Apache-2.0"
] | null | null | null | from flask_sqlalchemy import SQLAlchemy
db = SQLAlchemy()
def configure(app):
db.init_app(app)
app.db = db
class Tree(db.Model):
__tablename__ = 'tree'
id = db.Column(db.Integer, primary_key=True)
code = db.Column(db.String(50), nullable=False)
description = db.Column(db.String(255), nullable=False)
age = db.Column(db.Integer(), nullable=False)
# specie_id = db.Column(db.Integer, db.ForeignKey('specie.id'), nullable=False)
def __repr__(self):
return '<Tree %r>' % self.description | 30.555556 | 85 | 0.656364 | 424 | 0.770909 | 0 | 0 | 0 | 0 | 0 | 0 | 98 | 0.178182 |
879f2916cb315017dfa31f66e1aa41f28cecd9f9 | 1,159 | py | Python | plugin.video.youtube/resources/lib/youtube_plugin/kodion/impl/xbmc/xbmc_progress_dialog.py | akuala/REPO.KUALA | ea9a157025530d2ce8fa0d88431c46c5352e89d4 | [
"Apache-2.0"
] | 2 | 2018-11-02T19:55:30.000Z | 2020-08-14T02:22:20.000Z | plugin.video.youtube/resources/lib/youtube_plugin/kodion/impl/xbmc/xbmc_progress_dialog.py | akuala/REPO.KUALA | ea9a157025530d2ce8fa0d88431c46c5352e89d4 | [
"Apache-2.0"
] | null | null | null | plugin.video.youtube/resources/lib/youtube_plugin/kodion/impl/xbmc/xbmc_progress_dialog.py | akuala/REPO.KUALA | ea9a157025530d2ce8fa0d88431c46c5352e89d4 | [
"Apache-2.0"
] | 3 | 2019-12-17T20:47:00.000Z | 2021-02-11T19:03:59.000Z | # -*- coding: utf-8 -*-
"""
Copyright (C) 2014-2016 bromix (plugin.video.youtube)
Copyright (C) 2016-2018 plugin.video.youtube
SPDX-License-Identifier: GPL-2.0-only
See LICENSES/GPL-2.0-only for more information.
"""
from six import string_types
import xbmcgui
from ..abstract_progress_dialog import AbstractProgressDialog
class XbmcProgressDialog(AbstractProgressDialog):
def __init__(self, heading, text):
AbstractProgressDialog.__init__(self, 100)
self._dialog = xbmcgui.DialogProgress()
self._dialog.create(heading, text)
# simple reset because KODI won't do it :(
self._position = 1
self.update(steps=-1)
def close(self):
if self._dialog:
self._dialog.close()
self._dialog = None
def update(self, steps=1, text=None):
self._position += steps
position = int(float((100.0 // self._total)) * self._position)
if isinstance(text, string_types):
self._dialog.update(position, text)
else:
self._dialog.update(position)
def is_aborted(self):
return self._dialog.iscanceled()
| 26.953488 | 70 | 0.648835 | 813 | 0.701467 | 0 | 0 | 0 | 0 | 0 | 0 | 275 | 0.237274 |
87a0c198aaf64fb0040e08381e0e0853c27aa4ce | 1,483 | py | Python | settings.py | vboginskey/cribfinder | fe89717b5ed503a42100069cad966f82b495d7b5 | [
"MIT"
] | null | null | null | settings.py | vboginskey/cribfinder | fe89717b5ed503a42100069cad966f82b495d7b5 | [
"MIT"
] | null | null | null | settings.py | vboginskey/cribfinder | fe89717b5ed503a42100069cad966f82b495d7b5 | [
"MIT"
] | null | null | null | import os
CRAIGSLIST_SITE = 'newjersey'
CRAIGSLIST_CATEGORY = 'apa'
MIN_FT2 = 900
MIN_PRICE = 1500
MAX_PRICE = 3000
MAX_TRANSIT_DISTANCE = 0.75
BOXES = {
"Hoboken": [40.734966101, -74.0439891815, 40.7529789172, -74.0192699432],
"The Heights": [40.7332100782, -74.0573787689, 40.7615609255, -74.0378093719],
"Downtown": [40.7111582926, -74.05626297, 40.7357465407, -74.0299129486],
"Journal Square": [40.7131100727, -74.0830850601, 40.7402664072, -74.0509414673]
}
NEIGHBORHOODS = ["hoboken", "journal square", "heights", "newport", "grove",
"downtown", "paulus hook", "powerhouse", "exchange place",
"waterfront", "jersey city"]
STATIONS = {
"9th St light rail": [40.748874, -74.038552],
"2nd St light rail": [40.741594, -74.042730],
"Newport light rail": [40.726828, -74.036256],
"Harsimus Cove light rail": [40.722663, -74.037288],
"Harborside light rail": [40.719514, -74.034019],
"Exchange Pl light rail": [40.715993, -74.034118],
"Essex St light rail": [40.712847, -74.036114],
"Marin Blvd light rail": [40.714425, -74.043264],
"Jersey Ave light rail": [40.715001, -74.048428],
"Exchange Pl PATH": [40.716738, -74.032397],
"Grove St PATH": [40.719609, -74.042642],
"Newport PATH": [40.726998, -74.033818],
"Journal Square PATH": [40.733014, -74.062882],
"Hoboken PATH": [40.734937, -74.027545]
}
SLACK_TOKEN = os.getenv('SLACK_TOKEN', '')
SLACK_CHANNEL = '#cribs'
| 34.488372 | 84 | 0.645314 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 493 | 0.332434 |
87a23d912466c34c97307a017f8d2956a06cdbc3 | 1,729 | py | Python | examples/black_lives/create_progmem.py | fejiso/PxMatrix | fc53edf18af43ab3d0459890c0575243a3592445 | [
"BSD-3-Clause"
] | 599 | 2018-03-31T21:56:45.000Z | 2022-03-26T03:31:30.000Z | examples/black_lives/create_progmem.py | fejiso/PxMatrix | fc53edf18af43ab3d0459890c0575243a3592445 | [
"BSD-3-Clause"
] | 291 | 2018-03-29T11:59:26.000Z | 2022-03-24T19:44:32.000Z | examples/black_lives/create_progmem.py | fejiso/PxMatrix | fc53edf18af43ab3d0459890c0575243a3592445 | [
"BSD-3-Clause"
] | 144 | 2018-03-31T04:45:50.000Z | 2022-03-29T15:00:22.000Z | #!/usr/bin/python
import binascii
import sys
import glob, os
import pdb
file_no=0;
file_names=[];
RGB565=1;
out_string="";
def printrgb565(red, green, blue):
x1 = (red & 0xF8) | (green >> 5);
x2 = ((green & 0x1C) << 3) | (blue >> 3);
#pdb.set_trace()
this_string="0x" + str(binascii.hexlify(chr(x2))) + ",";
this_string+="0x" + str(binascii.hexlify(chr(x1))) + ",";
return this_string;
def printrgb888(red, green, blue):
this_string="0x" + str(binascii.hexlify(red)) + ",";
this_string+="0x" + str(binascii.hexlify(green)) + ",";
this_string+="0x" + str(binascii.hexlify(blue)) + ",";
return this_string;
out_string="uint8_t animation_lengths[]={";
for file in glob.glob("*.rgb"):
file_no=file_no+1;
file_names.append(str(file))
size = os.path.getsize(str(file))/64/32/3
out_string+=str(size)+ ",";
out_string=out_string[:-1];
out_string+="};\nconst uint8_t animations[] PROGMEM = {";
print (out_string)
byte_count=0;
for file_name in file_names:
size = os.path.getsize(str(file_name))
print(str(file_name)+ "- source_size: " + str(size));
with open(file_name, 'rb') as f:
byte0 = f.read(1)
while byte0 != "":
byte1 = f.read(1)
byte2 = f.read(1)
# Do stuff with byte.
if (RGB565):
out_string+=printrgb565(ord(byte0), ord(byte1), ord(byte2))
byte_count=byte_count+2;
else:
out_string+=printrgb888(byte0, byte1, byte2,out_string)
byte_count=byte_count+3;
if ((byte_count%10)==0):
out_string+="\n";
byte0 = f.read(1)
#print(str(file_name)+ "- out_size: " + str(byte_count));
out_string+="0x00};";
out_file = open("anim_data.h", "w");
out_file.write(out_string);
out_file.close();
| 27.444444 | 65 | 0.626952 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 284 | 0.164257 |
87a2ad1b1b371bea7b87cb1eb38aeedd4b3b8d00 | 210 | py | Python | q5.py | fairoz-ahmed/Python_Practice | e498f81fca02f0773f1c6e9f93e5f1cf1f94eb89 | [
"MIT"
] | null | null | null | q5.py | fairoz-ahmed/Python_Practice | e498f81fca02f0773f1c6e9f93e5f1cf1f94eb89 | [
"MIT"
] | null | null | null | q5.py | fairoz-ahmed/Python_Practice | e498f81fca02f0773f1c6e9f93e5f1cf1f94eb89 | [
"MIT"
] | null | null | null | class Input_data():
def __init__(self):
self.s=''
def getString(self):
self.s = input()
def printString(self):
print(self.s.upper())
strobj=Input_data()
strobj.getString()
strobj.printString() | 21 | 24 | 0.67619 | 147 | 0.7 | 0 | 0 | 0 | 0 | 0 | 0 | 2 | 0.009524 |
87a4472be6c42e97657d22b4feb81b29914bd105 | 95 | py | Python | SP/Modul_02/skrypt_01.py | edu-sense-com/OSE-Python-Course | cbf93e18b0cdbcaf54483f6fac5faafd372de068 | [
"MIT"
] | null | null | null | SP/Modul_02/skrypt_01.py | edu-sense-com/OSE-Python-Course | cbf93e18b0cdbcaf54483f6fac5faafd372de068 | [
"MIT"
] | null | null | null | SP/Modul_02/skrypt_01.py | edu-sense-com/OSE-Python-Course | cbf93e18b0cdbcaf54483f6fac5faafd372de068 | [
"MIT"
] | null | null | null | print("To jest pierwsza linia.")
print("To jest druga linia.")
print("To jest trzecia linia.")
| 23.75 | 32 | 0.715789 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 71 | 0.747368 |
87a487207e754b62b27676fbeca5d8fa0f49a8b7 | 7,340 | py | Python | lisa_flexbe_states_flexbe_behaviors/src/lisa_flexbe_states_flexbe_behaviors/test_multiple_sm.py | lawrence-iviani/lisa-flexbe-states | 5a228b7a9139394c9bd9ea386725226fef7844ac | [
"BSD-3-Clause"
] | null | null | null | lisa_flexbe_states_flexbe_behaviors/src/lisa_flexbe_states_flexbe_behaviors/test_multiple_sm.py | lawrence-iviani/lisa-flexbe-states | 5a228b7a9139394c9bd9ea386725226fef7844ac | [
"BSD-3-Clause"
] | null | null | null | lisa_flexbe_states_flexbe_behaviors/src/lisa_flexbe_states_flexbe_behaviors/test_multiple_sm.py | lawrence-iviani/lisa-flexbe-states | 5a228b7a9139394c9bd9ea386725226fef7844ac | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
###########################################################
# WARNING: Generated code! #
# ************************** #
# Manual changes may get lost if file is generated again. #
# Only code inside the [MANUAL] tags will be kept. #
###########################################################
from flexbe_core import Behavior, Autonomy, OperatableStateMachine, ConcurrencyContainer, PriorityContainer, Logger
from lisa_flexbe_states_flexbe_states.lisa_utter_state import LisaUtterState
from lisa_flexbe_states_flexbe_states.lisa_utter_actionlib_state import LisaUtterActionState
from lisa_flexbe_states_flexbe_states.lisa_utter_and_wait_for_intent_state import LisaUtterAndWaitForIntentState
from flexbe_states.check_condition_state import CheckConditionState
from lisa_flexbe_states_flexbe_states.lisa_extract_payload_key import LisaGetPayloadKeyState
# Additional imports can be added inside the following tags
# [MANUAL_IMPORT]
# [/MANUAL_IMPORT]
'''
Created on Mon Nov 25 2020
@author: lawrence iviani
'''
class test_multipleSM(Behavior):
'''
a test of interactions with several repeated blocks
'''
def __init__(self):
super(test_multipleSM, self).__init__()
self.name = 'test_multiple'
# parameters of this behavior
# references to used behaviors
# Additional initialization code can be added inside the following tags
# [MANUAL_INIT]
# [/MANUAL_INIT]
# Behavior comments:
def create(self):
wait_time_utter = 5
context_id = "test_multiple"
intent_1 = ["GetTime"]
intent_2 = ["YesNo"]
suspend_time = 1.5
wait_time_interaction = 10
# x:633 y:607, x:643 y:65
_state_machine = OperatableStateMachine(outcomes=['finished', 'failed'])
_state_machine.userdata.utter_1 = "Utterance example 1"
_state_machine.userdata.utter_2 = "Utterance example 2, a little bit longer"
_state_machine.userdata.utter_repeat = "Repeat the test"
_state_machine.userdata.utter_and_intent_1 = "Intent is Get Time"
_state_machine.userdata.utter_and_intent_2 = "Intent is Continue Yes or no"
# Additional creation code can be added inside the following tags
# [MANUAL_CREATE]
# [/MANUAL_CREATE]
with _state_machine:
# x:62 y:59
OperatableStateMachine.add('Utter_1',
LisaUtterState(context_id=context_id, wait_time=wait_time_utter, suspend_time=suspend_time),
transitions={'done': 'UtterAndWaitForIntent_1', 'preempt': 'finished', 'timeouted': 'UtterAndWaitForIntent_1', 'error': 'failed'},
autonomy={'done': Autonomy.Off, 'preempt': Autonomy.Off, 'timeouted': Autonomy.Off, 'error': Autonomy.Off},
remapping={'text_to_utter': 'utter_1', 'error_reason': 'error_reason'})
# x:1173 y:38
OperatableStateMachine.add('UtterActionLib',
LisaUtterActionState(text_to_utter='Intent Not Recognized', wait_time=0),
transitions={'uttered_all': 'finished', 'timeout': 'failed', 'command_error': 'failed'},
autonomy={'uttered_all': Autonomy.Off, 'timeout': Autonomy.Off, 'command_error': Autonomy.Off},
remapping={'error_reason': 'error_reason'})
# x:596 y:287
OperatableStateMachine.add('Utter_2',
LisaUtterState(context_id=context_id, wait_time=wait_time_utter, suspend_time=suspend_time),
transitions={'done': 'UtterAndWaitForIntent_2', 'preempt': 'finished', 'timeouted': 'UtterAndWaitForIntent_2', 'error': 'failed'},
autonomy={'done': Autonomy.Off, 'preempt': Autonomy.Off, 'timeouted': Autonomy.Off, 'error': Autonomy.Off},
remapping={'text_to_utter': 'utter_2', 'error_reason': 'error_reason'})
# x:1045 y:374
OperatableStateMachine.add('UtterAndWaitForIntent_2',
LisaUtterAndWaitForIntentState(context_id=context_id, intents=intent_2, wait_time=wait_time_interaction),
transitions={'intent_recognized': 'get_answer', 'intent_not_recognized': 'utter_not_recogn_2', 'preempt': 'finished', 'timeouted': 'utter_not_recogn_2', 'error': 'failed'},
autonomy={'intent_recognized': Autonomy.Off, 'intent_not_recognized': Autonomy.Off, 'preempt': Autonomy.Off, 'timeouted': Autonomy.Off, 'error': Autonomy.Off},
remapping={'text_to_utter': 'utter_and_intent_2', 'payload': 'payload', 'original_sentence': 'original_sentence', 'error_reason': 'error_reason', 'intent_recognized': 'intent_recognized'})
# x:17 y:609
OperatableStateMachine.add('UtterNoTimeout',
LisaUtterState(context_id=context_id, wait_time=0, suspend_time=0),
transitions={'done': 'Utter_1', 'preempt': 'finished', 'timeouted': 'Utter_1', 'error': 'failed'},
autonomy={'done': Autonomy.Off, 'preempt': Autonomy.Off, 'timeouted': Autonomy.Off, 'error': Autonomy.Off},
remapping={'text_to_utter': 'utter_repeat', 'error_reason': 'error_reason'})
# x:1373 y:612
OperatableStateMachine.add('check_finish',
CheckConditionState(predicate=lambda x: x=="Yes"),
transitions={'true': 'UtterActionLib', 'false': 'UtterNoTimeout'},
autonomy={'true': Autonomy.Off, 'false': Autonomy.Off},
remapping={'input_value': 'answer'})
# x:171 y:264
OperatableStateMachine.add('utter_not_recogn_1',
LisaUtterActionState(text_to_utter="Intent 1 not recognized try again", wait_time=wait_time_utter),
transitions={'uttered_all': 'UtterAndWaitForIntent_1', 'timeout': 'UtterAndWaitForIntent_1', 'command_error': 'failed'},
autonomy={'uttered_all': Autonomy.Off, 'timeout': Autonomy.Off, 'command_error': Autonomy.Off},
remapping={'error_reason': 'error_reason'})
# x:937 y:513
OperatableStateMachine.add('utter_not_recogn_2',
LisaUtterActionState(text_to_utter="Intent 2 not recognized try again", wait_time=wait_time_utter),
transitions={'uttered_all': 'UtterAndWaitForIntent_2', 'timeout': 'UtterAndWaitForIntent_2', 'command_error': 'failed'},
autonomy={'uttered_all': Autonomy.Off, 'timeout': Autonomy.Off, 'command_error': Autonomy.Off},
remapping={'error_reason': 'error_reason'})
# x:289 y:121
OperatableStateMachine.add('UtterAndWaitForIntent_1',
LisaUtterAndWaitForIntentState(context_id=context_id, intents=intent_1, wait_time=wait_time_interaction),
transitions={'intent_recognized': 'Utter_2', 'intent_not_recognized': 'utter_not_recogn_1', 'preempt': 'finished', 'timeouted': 'utter_not_recogn_1', 'error': 'failed'},
autonomy={'intent_recognized': Autonomy.Off, 'intent_not_recognized': Autonomy.Off, 'preempt': Autonomy.Off, 'timeouted': Autonomy.Off, 'error': Autonomy.Off},
remapping={'text_to_utter': 'utter_and_intent_1', 'payload': 'payload', 'original_sentence': 'original_sentence', 'error_reason': 'error_reason', 'intent_recognized': 'intent_recognized'})
# x:1342 y:454
OperatableStateMachine.add('get_answer',
LisaGetPayloadKeyState(payload_key='confirm'),
transitions={'done': 'check_finish', 'error': 'failed'},
autonomy={'done': Autonomy.Off, 'error': Autonomy.Off},
remapping={'payload': 'payload', 'payload_value': 'answer'})
return _state_machine
# Private functions can be added inside the following tags
# [MANUAL_FUNC]
# [/MANUAL_FUNC]
| 49.261745 | 198 | 0.700409 | 6,212 | 0.846322 | 0 | 0 | 0 | 0 | 0 | 0 | 3,478 | 0.473842 |
87a653834398311d35699ada567936f2e6f4ca64 | 410 | py | Python | data_preparation/jobScreening_cvpr17/extract_spectograms.py | segurac/richEmbeddings | 3279714c4b70db09740152822951cd0359fda8c8 | [
"Apache-2.0"
] | null | null | null | data_preparation/jobScreening_cvpr17/extract_spectograms.py | segurac/richEmbeddings | 3279714c4b70db09740152822951cd0359fda8c8 | [
"Apache-2.0"
] | null | null | null | data_preparation/jobScreening_cvpr17/extract_spectograms.py | segurac/richEmbeddings | 3279714c4b70db09740152822951cd0359fda8c8 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys
import pickle
import numpy as np
from scipy.io import wavfile
import python_speech_features as fextract
audio_filename = sys.argv[1]
features_filename = sys.argv[2]
rate, sig = wavfile.read(audio_filename)
fbank_feat = fextract.logfbank(sig,samplerate=rate)
with open(features_filename, 'wb') as stream:
pickle.dump(fbank_feat, stream)
| 15.769231 | 51 | 0.74878 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 49 | 0.119512 |
87a6bfa19863925f03157d3ac9abad941247fbad | 2,999 | py | Python | 1.3.BayesianInference/exercises/srcs/5/connected_waxman.py | mihaighidoveanu/machine-learning-examples | e5a7ab71e52ae2809115eb7d7c943b46ebf394f3 | [
"MIT"
] | null | null | null | 1.3.BayesianInference/exercises/srcs/5/connected_waxman.py | mihaighidoveanu/machine-learning-examples | e5a7ab71e52ae2809115eb7d7c943b46ebf394f3 | [
"MIT"
] | null | null | null | 1.3.BayesianInference/exercises/srcs/5/connected_waxman.py | mihaighidoveanu/machine-learning-examples | e5a7ab71e52ae2809115eb7d7c943b46ebf394f3 | [
"MIT"
] | 1 | 2021-05-02T13:12:21.000Z | 2021-05-02T13:12:21.000Z | import numpy as np
import pymc as pm
import networkx as nx
from matplotlib import pyplot as plt
alpha = 0.5
beta = 0.1
L= 9.0
G0 = nx.Graph()
for i in range(1, 10):
for j in range(i + 1, 11):
G0.add_edge(i, j)
#G0.add_path(range(1, 11))
#G0.add_path(range(1, 11))
#G0.remove_edge(2, 3)
#G0.remove_edge(3, 4)
#G0.add_edge(2, 4)
#G0.add_edge(3, 7)
#G0.add_edge(8, 10)
# nx.draw(G0, with_labels=True, font_weight='bold')
# plt.show()
@pm.stochastic(dtype=nx.Graph)
def cwg(value = G0, alpha = alpha, beta = beta, L = L):
tmp = 0
for i in range(1, len(value)):
for j in range(i + 1, len(value)+1):
if value.has_edge(i, j):
tmp += np.log(alpha) - ((j - i) / (beta * L))
else:
tmp += np.log(1 - alpha * np.exp((i - j) / (beta * L)))
return tmp
class CWGMetropolis(pm.Metropolis):
""" A PyMC Step Method that walks on connected Waxman Graphs by
choosing two distinct nodes at random and considering the
possible link between them. If the link is already in the
graph, it consider it for deletion, and if the link is not in
the graph, it consider it for inclusion, keeping it with the
appropriate Metropolis probability (no Hastings factor necessary,
because the chain is reversible, right?)
"""
def __init__(self, stochastic):
# Initialize superclass
pm.Metropolis.__init__(self, stochastic, scale=1., verbose=0, tally=False)
def propose(self):
""" Add an edge or remove an edge"""
G = self.stochastic.value
G.u_new = np.random.choice(G.nodes()); G.v_new = np.random.choice(G.nodes())
while G.u_new == G.v_new:
G.v_new = np.random.choice(G.nodes())
if G.has_edge(G.u_new, G.v_new):
G.remove_edge(G.u_new, G.v_new)
if not nx.is_connected(G):
G.add_edge(G.u_new, G.v_new)
else:
G.add_edge(G.u_new, G.v_new)
self.stochastic.value = G
def reject(self):
""" Restore the graph"""
G = self.stochastic.value
if G.has_edge(G.u_new, G.v_new):
G.remove_edge(G.u_new, G.v_new)
else:
G.add_edge(G.u_new, G.v_new)
self.rejected += 1
self.stochastic.value = G
@pm.deterministic
def average_degree(G = cwg):
# return np.sum([t[1] for t in list(G.degree())]) / len(G)
return np.sum(list(G.degree().values())) / len(G)
mcmc = pm.MCMC([cwg, average_degree])
mcmc.use_step_method(CWGMetropolis, cwg)
mcmc.sample(100000)
avgd_samples = mcmc.trace("average_degree")[:]
plt.hist(avgd_samples[90000:])
plt.show()
nx.draw(cwg.value, with_labels=True, font_weight='bold')
plt.show()
mcmc.sample(100)
nx.draw(cwg.value, with_labels=True, font_weight='bold')
plt.show()
mcmc.sample(100)
nx.draw(cwg.value, with_labels=True, font_weight='bold')
plt.show()
| 28.028037 | 85 | 0.598533 | 1,519 | 0.506502 | 0 | 0 | 553 | 0.184395 | 0 | 0 | 873 | 0.291097 |
87a6ec55d7fc458c9d50fc876766d5e4b737fb6f | 452 | py | Python | urls.py | markbate/whiskerboard | fe157c1eff068c089f6948ac5cf21f5a6ff36600 | [
"MIT"
] | 20 | 2015-03-31T09:43:43.000Z | 2021-06-12T23:41:28.000Z | urls.py | ametaireau/whiskerboard | b539337416069e0c794b4c3e4dfdd1afc64562cb | [
"MIT"
] | 5 | 2015-01-19T23:07:52.000Z | 2021-06-10T17:38:37.000Z | urls.py | ametaireau/whiskerboard | b539337416069e0c794b4c3e4dfdd1afc64562cb | [
"MIT"
] | 6 | 2015-05-14T21:05:31.000Z | 2018-04-07T22:40:39.000Z | from django.conf.urls.defaults import patterns, include, url
from django.contrib import admin
from board.feeds import EventFeed
from board.views import IndexView, ServiceView
admin.autodiscover()
urlpatterns = patterns('',
url(r'^$', IndexView.as_view(), name='index'),
url(r'^services/(?P<slug>[-\w]+)$', ServiceView.as_view(), name='service'),
url(r'^feed$', EventFeed(), name='feed'),
url(r'^admin/', include(admin.site.urls)),
)
| 30.133333 | 79 | 0.692478 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 78 | 0.172566 |
87a7a5a455773a2830eecd85f18579f1935a4094 | 20,715 | py | Python | antipetros_discordbot/cogs/general_cogs/image_manipulation_cog.py | Giddius/Antipetros_Discord_Bot | 2c139a5c0fc410385e936999989513fc1e7ebc8b | [
"MIT"
] | null | null | null | antipetros_discordbot/cogs/general_cogs/image_manipulation_cog.py | Giddius/Antipetros_Discord_Bot | 2c139a5c0fc410385e936999989513fc1e7ebc8b | [
"MIT"
] | 13 | 2021-02-19T02:22:28.000Z | 2021-02-20T03:19:11.000Z | antipetros_discordbot/cogs/general_cogs/image_manipulation_cog.py | Giddius/Antipetros_Discord_Bot | 2c139a5c0fc410385e936999989513fc1e7ebc8b | [
"MIT"
] | 2 | 2020-11-19T10:21:06.000Z | 2021-12-14T00:27:45.000Z |
# region [Imports]
# * Standard Library Imports ---------------------------------------------------------------------------->
import os
import asyncio
from io import BytesIO
from pathlib import Path
from datetime import datetime
from tempfile import TemporaryDirectory
from textwrap import dedent
# * Third Party Imports --------------------------------------------------------------------------------->
import discord
from PIL import Image, ImageEnhance
from pytz import timezone
from discord.ext import commands, flags
# * Gid Imports ----------------------------------------------------------------------------------------->
import gidlogger as glog
# * Local Imports --------------------------------------------------------------------------------------->
from antipetros_discordbot.utility.misc import make_config_name
from antipetros_discordbot.utility.enums import WatermarkPosition
from antipetros_discordbot.utility.checks import allowed_channel_and_allowed_role_2, command_enabled_checker, allowed_requester
from antipetros_discordbot.utility.embed_helpers import make_basic_embed
from antipetros_discordbot.utility.gidtools_functions import loadjson, pathmaker
from antipetros_discordbot.init_userdata.user_data_setup import ParaStorageKeeper
from antipetros_discordbot.utility.poor_mans_abc import attribute_checker
from antipetros_discordbot.utility.enums import CogState
from antipetros_discordbot.utility.replacements.command_replacement import auto_meta_info_command
# endregion[Imports]
# region [TODO]
# TODO: create regions for this file
# TODO: Document and Docstrings
# endregion [TODO]
# region [Logging]
log = glog.aux_logger(__name__)
glog.import_notification(log, __name__)
# endregion[Logging]
# region [Constants]
APPDATA = ParaStorageKeeper.get_appdata()
BASE_CONFIG = ParaStorageKeeper.get_config('base_config')
COGS_CONFIG = ParaStorageKeeper.get_config('cogs_config')
THIS_FILE_DIR = os.path.abspath(os.path.dirname(__file__)) # location of this file, does not work if app gets compiled to exe with pyinstaller
COG_NAME = "ImageManipulationCog"
CONFIG_NAME = make_config_name(COG_NAME)
get_command_enabled = command_enabled_checker(CONFIG_NAME)
# endregion [Constants]
class ImageManipulatorCog(commands.Cog, command_attrs={'hidden': False, "name": COG_NAME}):
"""
Soon
"""
# region [ClassAttributes]
config_name = CONFIG_NAME
allowed_stamp_formats = set(loadjson(APPDATA["image_file_extensions.json"]))
stamp_positions = {'top': WatermarkPosition.Top, 'bottom': WatermarkPosition.Bottom, 'left': WatermarkPosition.Left, 'right': WatermarkPosition.Right, 'center': WatermarkPosition.Center}
docattrs = {'show_in_readme': True,
'is_ready': (CogState.WORKING | CogState.OPEN_TODOS | CogState.UNTESTED | CogState.FEATURE_MISSING | CogState.NEEDS_REFRACTORING | CogState.DOCUMENTATION_MISSING,
"2021-02-06 05:09:20",
"f166431cb83ae36c91d70d7d09020e274a7ebea84d5a0c724819a3ecd2230b9eca0b3e14c2d473563d005671b7a2bf9d87f5449544eb9b57bcab615035b0f83d")}
required_config_data = dedent(""" avatar_stamp = ASLOGO1
avatar_stamp_fraction = 0.2
stamps_margin = 5
stamp_fraction = 0.3""")
# endregion[ClassAttributes]
# region [Init]
def __init__(self, bot):
self.bot = bot
self.support = self.bot.support
self.stamp_location = APPDATA['stamps']
self.stamps = {}
self.stamp_pos_functions = {WatermarkPosition.Right | WatermarkPosition.Bottom: self._to_bottom_right,
WatermarkPosition.Right | WatermarkPosition.Top: self._to_top_right,
WatermarkPosition.Right | WatermarkPosition.Center: self._to_center_right,
WatermarkPosition.Left | WatermarkPosition.Bottom: self._to_bottom_left,
WatermarkPosition.Left | WatermarkPosition.Top: self._to_top_left,
WatermarkPosition.Left | WatermarkPosition.Center: self._to_center_left,
WatermarkPosition.Center | WatermarkPosition.Center: self._to_center_center,
WatermarkPosition.Center | WatermarkPosition.Bottom: self._to_bottom_center,
WatermarkPosition.Center | WatermarkPosition.Top: self._to_top_center}
# self.base_map_image = Image.open(r"D:\Dropbox\hobby\Modding\Ressources\Arma_Ressources\maps\tanoa_v3_2000_w_outposts.png")
# self.outpost_overlay = {'city': Image.open(r"D:\Dropbox\hobby\Modding\Ressources\Arma_Ressources\maps\tanoa_v2_2000_city_marker.png"),
# 'volcano': Image.open(r"D:\Dropbox\hobby\Modding\Ressources\Arma_Ressources\maps\tanoa_v2_2000_volcano_marker.png"),
# 'airport': Image.open(r"D:\Dropbox\hobby\Modding\Ressources\Arma_Ressources\maps\tanoa_v2_2000_airport_marker.png")}
self.old_map_message = None
self._get_stamps()
self.allowed_channels = allowed_requester(self, 'channels')
self.allowed_roles = allowed_requester(self, 'roles')
self.allowed_dm_ids = allowed_requester(self, 'dm_ids')
glog.class_init_notification(log, self)
# endregion[Init]
# region [Setup]
async def on_ready_setup(self):
self._get_stamps()
log.debug('setup for cog "%s" finished', str(self))
async def update(self, typus):
return
log.debug('cog "%s" was updated', str(self))
# endregion[Setup]
# region [Properties]
@property
def target_stamp_fraction(self):
return COGS_CONFIG.getfloat(CONFIG_NAME, 'stamp_fraction')
@property
def stamp_margin(self):
return COGS_CONFIG.getint(CONFIG_NAME, 'stamps_margin')
@property
def avatar_stamp_fraction(self):
return COGS_CONFIG.getfloat(CONFIG_NAME, 'avatar_stamp_fraction')
@property
def avatar_stamp(self):
return self._get_stamp_image(COGS_CONFIG.get(CONFIG_NAME, 'avatar_stamp').upper(), 1)
# endregion[Properties]
def _get_stamps(self):
self.stamps = {}
for file in os.scandir(self.stamp_location):
if os.path.isfile(file.path) is True and os.path.splitext(file.name)[1] in self.allowed_stamp_formats:
name = file.name.split('.')[0].replace(' ', '_').strip().upper()
self.stamps[name] = file.path
def _get_stamp_image(self, stamp_name, stamp_opacity):
image = Image.open(self.stamps.get(stamp_name))
alpha = image.split()[3]
alpha = ImageEnhance.Brightness(alpha).enhance(stamp_opacity)
image.putalpha(alpha)
return image.copy()
@staticmethod
def _stamp_resize(input_image, stamp_image, factor):
input_image_width, input_image_height = input_image.size
input_image_width_fractioned = input_image_width * factor
input_image_height_fractioned = input_image_height * factor
transform_factor_width = input_image_width_fractioned / stamp_image.size[0]
transform_factor_height = input_image_height_fractioned / stamp_image.size[1]
transform_factor = (transform_factor_width + transform_factor_height) / 2
return stamp_image.resize((round(stamp_image.size[0] * transform_factor), round(stamp_image.size[1] * transform_factor)), resample=Image.LANCZOS)
def _to_bottom_right(self, input_image, stamp_image, factor):
log.debug('pasting image to bottom_right')
input_image_width, input_image_height = input_image.size
_resized_stamp = self._stamp_resize(input_image, stamp_image, factor)
input_image.paste(_resized_stamp,
(input_image_width - _resized_stamp.size[0] - self.stamp_margin, input_image_height - _resized_stamp.size[1] - self.stamp_margin),
_resized_stamp)
return input_image
def _to_top_right(self, input_image, stamp_image, factor):
input_image_width, input_image_height = input_image.size
_resized_stamp = self._stamp_resize(input_image, stamp_image, factor)
input_image.paste(_resized_stamp,
(input_image_width - _resized_stamp.size[0] - self.stamp_margin, 0 + self.stamp_margin),
_resized_stamp)
return input_image
def _to_center_right(self, input_image, stamp_image, factor):
input_image_width, input_image_height = input_image.size
_resized_stamp = self._stamp_resize(input_image, stamp_image, factor)
input_image.paste(_resized_stamp,
(input_image_width - _resized_stamp.size[0] - self.stamp_margin, round((input_image_height / 2) - (_resized_stamp.size[1] / 2))),
_resized_stamp)
return input_image
def _to_bottom_left(self, input_image, stamp_image, factor):
input_image_width, input_image_height = input_image.size
_resized_stamp = self._stamp_resize(input_image, stamp_image, factor)
input_image.paste(_resized_stamp,
(0 + self.stamp_margin, input_image_height - _resized_stamp.size[1] - self.stamp_margin),
_resized_stamp)
return input_image
def _to_top_left(self, input_image, stamp_image, factor):
_resized_stamp = self._stamp_resize(input_image, stamp_image, factor)
input_image.paste(_resized_stamp,
(0 + self.stamp_margin, 0 + self.stamp_margin),
_resized_stamp)
return input_image
def _to_center_left(self, input_image, stamp_image, factor):
input_image_width, input_image_height = input_image.size
_resized_stamp = self._stamp_resize(input_image, stamp_image, factor)
input_image.paste(_resized_stamp,
(0 + self.stamp_margin, round((input_image_height / 2) - (_resized_stamp.size[1] / 2))),
_resized_stamp)
return input_image
def _to_center_center(self, input_image, stamp_image, factor):
input_image_width, input_image_height = input_image.size
_resized_stamp = self._stamp_resize(input_image, stamp_image, factor)
input_image.paste(_resized_stamp,
(round((input_image_width / 2) - (_resized_stamp.size[0] / 2)), round((input_image_height / 2) - (_resized_stamp.size[1] / 2))),
_resized_stamp)
return input_image
def _to_top_center(self, input_image, stamp_image, factor):
input_image_width, input_image_height = input_image.size
_resized_stamp = self._stamp_resize(input_image, stamp_image, factor)
input_image.paste(_resized_stamp,
(round((input_image_width / 2) - (_resized_stamp.size[0] / 2)), 0 + self.stamp_margin),
_resized_stamp)
return input_image
def _to_bottom_center(self, input_image, stamp_image, factor):
input_image_width, input_image_height = input_image.size
_resized_stamp = self._stamp_resize(input_image, stamp_image, factor)
input_image.paste(_resized_stamp,
(round((input_image_width / 2) - (_resized_stamp.size[0] / 2)), input_image_height - _resized_stamp.size[1] - self.stamp_margin),
_resized_stamp)
return input_image
async def _send_image(self, ctx, image, name, message_title, message_text=None, image_format=None, delete_after=None):
image_format = 'png' if image_format is None else image_format
with BytesIO() as image_binary:
image.save(image_binary, image_format.upper(), optimize=True)
image_binary.seek(0)
file = discord.File(fp=image_binary, filename=name.replace('_', '') + '.' + image_format)
embed = discord.Embed(title=message_title, description=message_text, color=self.support.cyan.discord_color, timestamp=datetime.now(tz=timezone("Europe/Berlin")), type='image')
embed.set_author(name='AntiPetros', icon_url="https://www.der-buntspecht-shop.de/wp-content/uploads/Baumwollstoff-Camouflage-olivegruen-2.jpg")
embed.set_image(url=f"attachment://{name.replace('_','')}.{image_format}")
await ctx.send(embed=embed, file=file, delete_after=delete_after)
@flags.add_flag("--stamp-image", "-si", type=str, default='ASLOGO1')
@flags.add_flag("--first-pos", '-fp', type=str, default="bottom")
@flags.add_flag("--second-pos", '-sp', type=str, default="right")
@flags.add_flag("--stamp-opacity", '-so', type=float, default=1.0)
@flags.add_flag('--factor', '-f', type=float, default=None)
@auto_meta_info_command(enabled=get_command_enabled("stamp_image"), cls=flags.FlagCommand)
@allowed_channel_and_allowed_role_2(in_dm_allowed=False)
@commands.max_concurrency(1, per=commands.BucketType.guild, wait=True)
async def stamp_image(self, ctx, **flags):
"""
Stamps an image with a small image from the available stamps.
Usefull for watermarking images.
Get all available stamps with '@AntiPetros available_stamps'
"""
async with ctx.channel.typing():
if len(ctx.message.attachments) == 0:
# TODO: make as embed
await ctx.send('! **there is NO image to antistasify** !')
return
if flags.get('stamp_image') not in self.stamps:
# TODO: make as embed
await ctx.send("! **There is NO stamp with that name** !")
return
first_pos = self.stamp_positions.get(flags.get("first_pos").casefold(), None)
second_pos = self.stamp_positions.get(flags.get("second_pos").casefold(), None)
if any(_pos is None for _pos in [first_pos, second_pos]) or first_pos | second_pos not in self.stamp_pos_functions:
# TODO: make as embed
await ctx.send("! **Those are NOT valid position combinations** !")
return
for _file in ctx.message.attachments:
# TODO: maybe make extra attribute for input format, check what is possible and working. else make a generic format list
if any(_file.filename.endswith(allowed_ext) for allowed_ext in self.allowed_stamp_formats):
_stamp = self._get_stamp_image(flags.get('stamp_image'), flags.get('stamp_opacity'))
_stamp = _stamp.copy()
with TemporaryDirectory(prefix='temp') as temp_dir:
temp_file = Path(pathmaker(temp_dir, 'temp_file.png'))
log.debug("Tempfile '%s' created", temp_file)
await _file.save(temp_file)
in_image = await self.bot.execute_in_thread(Image.open, temp_file)
in_image = await self.bot.execute_in_thread(in_image.copy)
factor = self.target_stamp_fraction if flags.get('factor') is None else flags.get('factor')
pos_function = self.stamp_pos_functions.get(first_pos | second_pos)
in_image = await self.bot.execute_in_thread(pos_function, in_image, _stamp, factor)
name = 'antistasified_' + os.path.splitext(_file.filename)[0]
await ctx.message.delete()
# TODO: make as embed
await self._send_image(ctx, in_image, name, f"__**{name}**__")
@auto_meta_info_command(enabled=get_command_enabled("available_stamps"))
@allowed_channel_and_allowed_role_2(in_dm_allowed=False)
@commands.cooldown(1, 120, commands.BucketType.channel)
async def available_stamps(self, ctx):
"""
Posts all available stamps.
"""
await ctx.message.delete()
await ctx.send(embed=await make_basic_embed(title="__**Currently available Stamps are:**__", footer="These messages will be deleted in 120 seconds", symbol='photo'), delete_after=120)
for name, image_path in self.stamps.items():
thumb_image = Image.open(image_path)
thumb_image.thumbnail((128, 128))
with BytesIO() as image_binary:
await asyncio.sleep(0)
thumb_image.save(image_binary, 'PNG', optimize=True)
image_binary.seek(0)
_file = discord.File(image_binary, filename=name + '.png')
embed = discord.Embed(title="Available Stamp")
embed.add_field(name='Stamp Name:', value=name)
embed.set_image(url=f"attachment://{name}.png")
await ctx.send(embed=embed, file=_file, delete_after=120)
@auto_meta_info_command(enabled=get_command_enabled("member_avatar"))
@allowed_channel_and_allowed_role_2(in_dm_allowed=False)
@commands.cooldown(1, 300, commands.BucketType.member)
async def member_avatar(self, ctx):
"""
Stamps the avatar of a Member with the Antistasi Crest.
Returns the new stamped avatar as a .PNG image that the Member can save and replace his orginal avatar with.
"""
avatar_image = await self.get_avatar_from_user(ctx.author)
stamp = self.avatar_stamp
modified_avatar = await self.bot.execute_in_thread(self._to_bottom_right, avatar_image, stamp, self.avatar_stamp_fraction)
name = f"{ctx.author.name}_Member_avatar"
await self._send_image(ctx, modified_avatar, name, "**Your New Avatar**") # change completion line to "Pledge your allegiance to the Antistasi Rebellion!"?
async def get_avatar_from_user(self, user):
avatar = user.avatar_url
temp_dir = TemporaryDirectory()
temp_file = pathmaker(temp_dir.name, 'user_avatar.png')
log.debug("Tempfile '%s' created", temp_file)
await avatar.save(temp_file)
avatar_image = Image.open(temp_file)
avatar_image = avatar_image.copy()
avatar_image = avatar_image.convert('RGB')
temp_dir.cleanup()
return avatar_image
def map_image_handling(self, base_image, marker_name, color, bytes_out):
log.debug("creating changed map, changed_location: '%s', changed_color: '%s'", marker_name, color)
marker_image = self.outpost_overlay.get(marker_name)
marker_alpha = marker_image.getchannel('A')
marker_image = Image.new('RGBA', marker_image.size, color=color)
marker_image.putalpha(marker_alpha)
base_image.paste(marker_image, mask=marker_alpha)
base_image.save(bytes_out, 'PNG', optimize=True)
bytes_out.seek(0)
return base_image, bytes_out
# @commands.command(aliases=get_aliases("map_changed"), enabled=get_command_enabled("map_changed"))
# @allowed_channel_and_allowed_role_2(in_dm_allowed=False)
# @commands.max_concurrency(1, per=commands.BucketType.guild, wait=False)
# async def map_changed(self, ctx, marker, color):
# """
# Proof of concept for future real time server map.
# """
# log.info("command was initiated by '%s'", ctx.author.name)
# with BytesIO() as image_binary:
# self.base_map_image, image_binary = await self.bot.execute_in_thread(self.map_image_handling, self.base_map_image, marker, color, image_binary)
# if self.old_map_message is not None:
# await self.old_map_message.delete()
# delete_time = None
# embed = discord.Embed(title='Current Server Map State', color=self.support.green.discord_color, timestamp=datetime.now(tz=timezone("Europe/Berlin")), type="image")
# embed.set_author(name='Antistasi Community Server 1', icon_url="https://s3.amazonaws.com/files.enjin.com/1218665/site_logo/NEW%20LOGO%20BANNER.png", url="https://a3antistasi.enjin.com/")
# embed.set_image(url="attachment://map.png")
# self.old_map_message = await ctx.send(embed=embed, file=discord.File(fp=image_binary, filename="map.png"), delete_after=delete_time)
# log.debug("finished 'map_changed' command")
# region [SpecialMethods]
def __repr__(self):
return f"{self.__class__.__name__}({self.bot.__class__.__name__})"
def __str__(self):
return self.qualified_name
def cog_unload(self):
log.debug("Cog '%s' UNLOADED!", str(self))
# endregion[SpecialMethods]
def setup(bot):
"""
Mandatory function to add the Cog to the bot.
"""
bot.add_cog(attribute_checker(ImageManipulatorCog(bot)))
| 50.647922 | 200 | 0.662901 | 18,326 | 0.884673 | 0 | 0 | 6,385 | 0.308231 | 5,900 | 0.284818 | 5,403 | 0.260825 |
87a8759b1ef4d1a86ddceb592ba202006fe0e88e | 75,810 | py | Python | sdk/python/pulumi_gcp/iam/workload_identity_pool_provider.py | sisisin/pulumi-gcp | af6681d70ea457843409110c1324817fe55f68ad | [
"ECL-2.0",
"Apache-2.0"
] | 121 | 2018-06-18T19:16:42.000Z | 2022-03-31T06:06:48.000Z | sdk/python/pulumi_gcp/iam/workload_identity_pool_provider.py | sisisin/pulumi-gcp | af6681d70ea457843409110c1324817fe55f68ad | [
"ECL-2.0",
"Apache-2.0"
] | 492 | 2018-06-22T19:41:03.000Z | 2022-03-31T15:33:53.000Z | sdk/python/pulumi_gcp/iam/workload_identity_pool_provider.py | sisisin/pulumi-gcp | af6681d70ea457843409110c1324817fe55f68ad | [
"ECL-2.0",
"Apache-2.0"
] | 43 | 2018-06-19T01:43:13.000Z | 2022-03-23T22:43:37.000Z | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['WorkloadIdentityPoolProviderArgs', 'WorkloadIdentityPoolProvider']
@pulumi.input_type
class WorkloadIdentityPoolProviderArgs:
def __init__(__self__, *,
workload_identity_pool_id: pulumi.Input[str],
workload_identity_pool_provider_id: pulumi.Input[str],
attribute_condition: Optional[pulumi.Input[str]] = None,
attribute_mapping: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
aws: Optional[pulumi.Input['WorkloadIdentityPoolProviderAwsArgs']] = None,
description: Optional[pulumi.Input[str]] = None,
disabled: Optional[pulumi.Input[bool]] = None,
display_name: Optional[pulumi.Input[str]] = None,
oidc: Optional[pulumi.Input['WorkloadIdentityPoolProviderOidcArgs']] = None,
project: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a WorkloadIdentityPoolProvider resource.
:param pulumi.Input[str] workload_identity_pool_id: The ID used for the pool, which is the final component of the pool resource name. This
value should be 4-32 characters, and may contain the characters [a-z0-9-]. The prefix
`gcp-` is reserved for use by Google, and may not be specified.
:param pulumi.Input[str] workload_identity_pool_provider_id: The ID for the provider, which becomes the final component of the resource name. This
value must be 4-32 characters, and may contain the characters [a-z0-9-]. The prefix
`gcp-` is reserved for use by Google, and may not be specified.
:param pulumi.Input[str] attribute_condition: [A Common Expression Language](https://opensource.google/projects/cel) expression, in
plain text, to restrict what otherwise valid authentication credentials issued by the
provider should not be accepted.
The expression must output a boolean representing whether to allow the federation.
The following keywords may be referenced in the expressions:
* `assertion`: JSON representing the authentication credential issued by the provider.
* `google`: The Google attributes mapped from the assertion in the `attribute_mappings`.
* `attribute`: The custom attributes mapped from the assertion in the `attribute_mappings`.
The maximum length of the attribute condition expression is 4096 characters. If
unspecified, all valid authentication credential are accepted.
The following example shows how to only allow credentials with a mapped `google.groups`
value of `admins`:
```python
import pulumi
```
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] attribute_mapping: Maps attributes from authentication credentials issued by an external identity provider
to Google Cloud attributes, such as `subject` and `segment`.
Each key must be a string specifying the Google Cloud IAM attribute to map to.
The following keys are supported:
* `google.subject`: The principal IAM is authenticating. You can reference this value
in IAM bindings. This is also the subject that appears in Cloud Logging logs.
Cannot exceed 127 characters.
* `google.groups`: Groups the external identity belongs to. You can grant groups
access to resources using an IAM `principalSet` binding; access applies to all
members of the group.
You can also provide custom attributes by specifying `attribute.{custom_attribute}`,
where `{custom_attribute}` is the name of the custom attribute to be mapped. You can
define a maximum of 50 custom attributes. The maximum length of a mapped attribute key
is 100 characters, and the key may only contain the characters [a-z0-9_].
You can reference these attributes in IAM policies to define fine-grained access for a
workload to Google Cloud resources. For example:
* `google.subject`:
`principal://iam.googleapis.com/projects/{project}/locations/{location}/workloadIdentityPools/{pool}/subject/{value}`
* `google.groups`:
`principalSet://iam.googleapis.com/projects/{project}/locations/{location}/workloadIdentityPools/{pool}/group/{value}`
* `attribute.{custom_attribute}`:
`principalSet://iam.googleapis.com/projects/{project}/locations/{location}/workloadIdentityPools/{pool}/attribute.{custom_attribute}/{value}`
Each value must be a [Common Expression Language](https://opensource.google/projects/cel)
function that maps an identity provider credential to the normalized attribute specified
by the corresponding map key.
You can use the `assertion` keyword in the expression to access a JSON representation of
the authentication credential issued by the provider.
The maximum length of an attribute mapping expression is 2048 characters. When evaluated,
the total size of all mapped attributes must not exceed 8KB.
For AWS providers, the following rules apply:
- If no attribute mapping is defined, the following default mapping applies:
```python
import pulumi
```
- If any custom attribute mappings are defined, they must include a mapping to the
`google.subject` attribute.
For OIDC providers, the following rules apply:
- Custom attribute mappings must be defined, and must include a mapping to the
`google.subject` attribute. For example, the following maps the `sub` claim of the
incoming credential to the `subject` attribute on a Google token.
```python
import pulumi
```
:param pulumi.Input['WorkloadIdentityPoolProviderAwsArgs'] aws: An Amazon Web Services identity provider. Not compatible with the property oidc.
Structure is documented below.
:param pulumi.Input[str] description: A description for the provider. Cannot exceed 256 characters.
:param pulumi.Input[bool] disabled: Whether the provider is disabled. You cannot use a disabled provider to exchange tokens.
However, existing tokens still grant access.
:param pulumi.Input[str] display_name: A display name for the provider. Cannot exceed 32 characters.
:param pulumi.Input['WorkloadIdentityPoolProviderOidcArgs'] oidc: An OpenId Connect 1.0 identity provider. Not compatible with the property aws.
Structure is documented below.
:param pulumi.Input[str] project: The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
"""
pulumi.set(__self__, "workload_identity_pool_id", workload_identity_pool_id)
pulumi.set(__self__, "workload_identity_pool_provider_id", workload_identity_pool_provider_id)
if attribute_condition is not None:
pulumi.set(__self__, "attribute_condition", attribute_condition)
if attribute_mapping is not None:
pulumi.set(__self__, "attribute_mapping", attribute_mapping)
if aws is not None:
pulumi.set(__self__, "aws", aws)
if description is not None:
pulumi.set(__self__, "description", description)
if disabled is not None:
pulumi.set(__self__, "disabled", disabled)
if display_name is not None:
pulumi.set(__self__, "display_name", display_name)
if oidc is not None:
pulumi.set(__self__, "oidc", oidc)
if project is not None:
pulumi.set(__self__, "project", project)
@property
@pulumi.getter(name="workloadIdentityPoolId")
def workload_identity_pool_id(self) -> pulumi.Input[str]:
"""
The ID used for the pool, which is the final component of the pool resource name. This
value should be 4-32 characters, and may contain the characters [a-z0-9-]. The prefix
`gcp-` is reserved for use by Google, and may not be specified.
"""
return pulumi.get(self, "workload_identity_pool_id")
@workload_identity_pool_id.setter
def workload_identity_pool_id(self, value: pulumi.Input[str]):
pulumi.set(self, "workload_identity_pool_id", value)
@property
@pulumi.getter(name="workloadIdentityPoolProviderId")
def workload_identity_pool_provider_id(self) -> pulumi.Input[str]:
"""
The ID for the provider, which becomes the final component of the resource name. This
value must be 4-32 characters, and may contain the characters [a-z0-9-]. The prefix
`gcp-` is reserved for use by Google, and may not be specified.
"""
return pulumi.get(self, "workload_identity_pool_provider_id")
@workload_identity_pool_provider_id.setter
def workload_identity_pool_provider_id(self, value: pulumi.Input[str]):
pulumi.set(self, "workload_identity_pool_provider_id", value)
@property
@pulumi.getter(name="attributeCondition")
def attribute_condition(self) -> Optional[pulumi.Input[str]]:
"""
[A Common Expression Language](https://opensource.google/projects/cel) expression, in
plain text, to restrict what otherwise valid authentication credentials issued by the
provider should not be accepted.
The expression must output a boolean representing whether to allow the federation.
The following keywords may be referenced in the expressions:
* `assertion`: JSON representing the authentication credential issued by the provider.
* `google`: The Google attributes mapped from the assertion in the `attribute_mappings`.
* `attribute`: The custom attributes mapped from the assertion in the `attribute_mappings`.
The maximum length of the attribute condition expression is 4096 characters. If
unspecified, all valid authentication credential are accepted.
The following example shows how to only allow credentials with a mapped `google.groups`
value of `admins`:
```python
import pulumi
```
"""
return pulumi.get(self, "attribute_condition")
@attribute_condition.setter
def attribute_condition(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "attribute_condition", value)
@property
@pulumi.getter(name="attributeMapping")
def attribute_mapping(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Maps attributes from authentication credentials issued by an external identity provider
to Google Cloud attributes, such as `subject` and `segment`.
Each key must be a string specifying the Google Cloud IAM attribute to map to.
The following keys are supported:
* `google.subject`: The principal IAM is authenticating. You can reference this value
in IAM bindings. This is also the subject that appears in Cloud Logging logs.
Cannot exceed 127 characters.
* `google.groups`: Groups the external identity belongs to. You can grant groups
access to resources using an IAM `principalSet` binding; access applies to all
members of the group.
You can also provide custom attributes by specifying `attribute.{custom_attribute}`,
where `{custom_attribute}` is the name of the custom attribute to be mapped. You can
define a maximum of 50 custom attributes. The maximum length of a mapped attribute key
is 100 characters, and the key may only contain the characters [a-z0-9_].
You can reference these attributes in IAM policies to define fine-grained access for a
workload to Google Cloud resources. For example:
* `google.subject`:
`principal://iam.googleapis.com/projects/{project}/locations/{location}/workloadIdentityPools/{pool}/subject/{value}`
* `google.groups`:
`principalSet://iam.googleapis.com/projects/{project}/locations/{location}/workloadIdentityPools/{pool}/group/{value}`
* `attribute.{custom_attribute}`:
`principalSet://iam.googleapis.com/projects/{project}/locations/{location}/workloadIdentityPools/{pool}/attribute.{custom_attribute}/{value}`
Each value must be a [Common Expression Language](https://opensource.google/projects/cel)
function that maps an identity provider credential to the normalized attribute specified
by the corresponding map key.
You can use the `assertion` keyword in the expression to access a JSON representation of
the authentication credential issued by the provider.
The maximum length of an attribute mapping expression is 2048 characters. When evaluated,
the total size of all mapped attributes must not exceed 8KB.
For AWS providers, the following rules apply:
- If no attribute mapping is defined, the following default mapping applies:
```python
import pulumi
```
- If any custom attribute mappings are defined, they must include a mapping to the
`google.subject` attribute.
For OIDC providers, the following rules apply:
- Custom attribute mappings must be defined, and must include a mapping to the
`google.subject` attribute. For example, the following maps the `sub` claim of the
incoming credential to the `subject` attribute on a Google token.
```python
import pulumi
```
"""
return pulumi.get(self, "attribute_mapping")
@attribute_mapping.setter
def attribute_mapping(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "attribute_mapping", value)
@property
@pulumi.getter
def aws(self) -> Optional[pulumi.Input['WorkloadIdentityPoolProviderAwsArgs']]:
"""
An Amazon Web Services identity provider. Not compatible with the property oidc.
Structure is documented below.
"""
return pulumi.get(self, "aws")
@aws.setter
def aws(self, value: Optional[pulumi.Input['WorkloadIdentityPoolProviderAwsArgs']]):
pulumi.set(self, "aws", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
A description for the provider. Cannot exceed 256 characters.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def disabled(self) -> Optional[pulumi.Input[bool]]:
"""
Whether the provider is disabled. You cannot use a disabled provider to exchange tokens.
However, existing tokens still grant access.
"""
return pulumi.get(self, "disabled")
@disabled.setter
def disabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "disabled", value)
@property
@pulumi.getter(name="displayName")
def display_name(self) -> Optional[pulumi.Input[str]]:
"""
A display name for the provider. Cannot exceed 32 characters.
"""
return pulumi.get(self, "display_name")
@display_name.setter
def display_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "display_name", value)
@property
@pulumi.getter
def oidc(self) -> Optional[pulumi.Input['WorkloadIdentityPoolProviderOidcArgs']]:
"""
An OpenId Connect 1.0 identity provider. Not compatible with the property aws.
Structure is documented below.
"""
return pulumi.get(self, "oidc")
@oidc.setter
def oidc(self, value: Optional[pulumi.Input['WorkloadIdentityPoolProviderOidcArgs']]):
pulumi.set(self, "oidc", value)
@property
@pulumi.getter
def project(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
"""
return pulumi.get(self, "project")
@project.setter
def project(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "project", value)
@pulumi.input_type
class _WorkloadIdentityPoolProviderState:
def __init__(__self__, *,
attribute_condition: Optional[pulumi.Input[str]] = None,
attribute_mapping: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
aws: Optional[pulumi.Input['WorkloadIdentityPoolProviderAwsArgs']] = None,
description: Optional[pulumi.Input[str]] = None,
disabled: Optional[pulumi.Input[bool]] = None,
display_name: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
oidc: Optional[pulumi.Input['WorkloadIdentityPoolProviderOidcArgs']] = None,
project: Optional[pulumi.Input[str]] = None,
state: Optional[pulumi.Input[str]] = None,
workload_identity_pool_id: Optional[pulumi.Input[str]] = None,
workload_identity_pool_provider_id: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering WorkloadIdentityPoolProvider resources.
:param pulumi.Input[str] attribute_condition: [A Common Expression Language](https://opensource.google/projects/cel) expression, in
plain text, to restrict what otherwise valid authentication credentials issued by the
provider should not be accepted.
The expression must output a boolean representing whether to allow the federation.
The following keywords may be referenced in the expressions:
* `assertion`: JSON representing the authentication credential issued by the provider.
* `google`: The Google attributes mapped from the assertion in the `attribute_mappings`.
* `attribute`: The custom attributes mapped from the assertion in the `attribute_mappings`.
The maximum length of the attribute condition expression is 4096 characters. If
unspecified, all valid authentication credential are accepted.
The following example shows how to only allow credentials with a mapped `google.groups`
value of `admins`:
```python
import pulumi
```
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] attribute_mapping: Maps attributes from authentication credentials issued by an external identity provider
to Google Cloud attributes, such as `subject` and `segment`.
Each key must be a string specifying the Google Cloud IAM attribute to map to.
The following keys are supported:
* `google.subject`: The principal IAM is authenticating. You can reference this value
in IAM bindings. This is also the subject that appears in Cloud Logging logs.
Cannot exceed 127 characters.
* `google.groups`: Groups the external identity belongs to. You can grant groups
access to resources using an IAM `principalSet` binding; access applies to all
members of the group.
You can also provide custom attributes by specifying `attribute.{custom_attribute}`,
where `{custom_attribute}` is the name of the custom attribute to be mapped. You can
define a maximum of 50 custom attributes. The maximum length of a mapped attribute key
is 100 characters, and the key may only contain the characters [a-z0-9_].
You can reference these attributes in IAM policies to define fine-grained access for a
workload to Google Cloud resources. For example:
* `google.subject`:
`principal://iam.googleapis.com/projects/{project}/locations/{location}/workloadIdentityPools/{pool}/subject/{value}`
* `google.groups`:
`principalSet://iam.googleapis.com/projects/{project}/locations/{location}/workloadIdentityPools/{pool}/group/{value}`
* `attribute.{custom_attribute}`:
`principalSet://iam.googleapis.com/projects/{project}/locations/{location}/workloadIdentityPools/{pool}/attribute.{custom_attribute}/{value}`
Each value must be a [Common Expression Language](https://opensource.google/projects/cel)
function that maps an identity provider credential to the normalized attribute specified
by the corresponding map key.
You can use the `assertion` keyword in the expression to access a JSON representation of
the authentication credential issued by the provider.
The maximum length of an attribute mapping expression is 2048 characters. When evaluated,
the total size of all mapped attributes must not exceed 8KB.
For AWS providers, the following rules apply:
- If no attribute mapping is defined, the following default mapping applies:
```python
import pulumi
```
- If any custom attribute mappings are defined, they must include a mapping to the
`google.subject` attribute.
For OIDC providers, the following rules apply:
- Custom attribute mappings must be defined, and must include a mapping to the
`google.subject` attribute. For example, the following maps the `sub` claim of the
incoming credential to the `subject` attribute on a Google token.
```python
import pulumi
```
:param pulumi.Input['WorkloadIdentityPoolProviderAwsArgs'] aws: An Amazon Web Services identity provider. Not compatible with the property oidc.
Structure is documented below.
:param pulumi.Input[str] description: A description for the provider. Cannot exceed 256 characters.
:param pulumi.Input[bool] disabled: Whether the provider is disabled. You cannot use a disabled provider to exchange tokens.
However, existing tokens still grant access.
:param pulumi.Input[str] display_name: A display name for the provider. Cannot exceed 32 characters.
:param pulumi.Input[str] name: The resource name of the provider as
'projects/{project_number}/locations/global/workloadIdentityPools/{workload_identity_pool_id}/providers/{workload_identity_pool_provider_id}'.
:param pulumi.Input['WorkloadIdentityPoolProviderOidcArgs'] oidc: An OpenId Connect 1.0 identity provider. Not compatible with the property aws.
Structure is documented below.
:param pulumi.Input[str] project: The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
:param pulumi.Input[str] state: The state of the provider. * STATE_UNSPECIFIED: State unspecified. * ACTIVE: The provider is active, and may be used to
validate authentication credentials. * DELETED: The provider is soft-deleted. Soft-deleted providers are permanently
deleted after approximately 30 days. You can restore a soft-deleted provider using UndeleteWorkloadIdentityPoolProvider.
You cannot reuse the ID of a soft-deleted provider until it is permanently deleted.
:param pulumi.Input[str] workload_identity_pool_id: The ID used for the pool, which is the final component of the pool resource name. This
value should be 4-32 characters, and may contain the characters [a-z0-9-]. The prefix
`gcp-` is reserved for use by Google, and may not be specified.
:param pulumi.Input[str] workload_identity_pool_provider_id: The ID for the provider, which becomes the final component of the resource name. This
value must be 4-32 characters, and may contain the characters [a-z0-9-]. The prefix
`gcp-` is reserved for use by Google, and may not be specified.
"""
if attribute_condition is not None:
pulumi.set(__self__, "attribute_condition", attribute_condition)
if attribute_mapping is not None:
pulumi.set(__self__, "attribute_mapping", attribute_mapping)
if aws is not None:
pulumi.set(__self__, "aws", aws)
if description is not None:
pulumi.set(__self__, "description", description)
if disabled is not None:
pulumi.set(__self__, "disabled", disabled)
if display_name is not None:
pulumi.set(__self__, "display_name", display_name)
if name is not None:
pulumi.set(__self__, "name", name)
if oidc is not None:
pulumi.set(__self__, "oidc", oidc)
if project is not None:
pulumi.set(__self__, "project", project)
if state is not None:
pulumi.set(__self__, "state", state)
if workload_identity_pool_id is not None:
pulumi.set(__self__, "workload_identity_pool_id", workload_identity_pool_id)
if workload_identity_pool_provider_id is not None:
pulumi.set(__self__, "workload_identity_pool_provider_id", workload_identity_pool_provider_id)
@property
@pulumi.getter(name="attributeCondition")
def attribute_condition(self) -> Optional[pulumi.Input[str]]:
"""
[A Common Expression Language](https://opensource.google/projects/cel) expression, in
plain text, to restrict what otherwise valid authentication credentials issued by the
provider should not be accepted.
The expression must output a boolean representing whether to allow the federation.
The following keywords may be referenced in the expressions:
* `assertion`: JSON representing the authentication credential issued by the provider.
* `google`: The Google attributes mapped from the assertion in the `attribute_mappings`.
* `attribute`: The custom attributes mapped from the assertion in the `attribute_mappings`.
The maximum length of the attribute condition expression is 4096 characters. If
unspecified, all valid authentication credential are accepted.
The following example shows how to only allow credentials with a mapped `google.groups`
value of `admins`:
```python
import pulumi
```
"""
return pulumi.get(self, "attribute_condition")
@attribute_condition.setter
def attribute_condition(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "attribute_condition", value)
@property
@pulumi.getter(name="attributeMapping")
def attribute_mapping(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Maps attributes from authentication credentials issued by an external identity provider
to Google Cloud attributes, such as `subject` and `segment`.
Each key must be a string specifying the Google Cloud IAM attribute to map to.
The following keys are supported:
* `google.subject`: The principal IAM is authenticating. You can reference this value
in IAM bindings. This is also the subject that appears in Cloud Logging logs.
Cannot exceed 127 characters.
* `google.groups`: Groups the external identity belongs to. You can grant groups
access to resources using an IAM `principalSet` binding; access applies to all
members of the group.
You can also provide custom attributes by specifying `attribute.{custom_attribute}`,
where `{custom_attribute}` is the name of the custom attribute to be mapped. You can
define a maximum of 50 custom attributes. The maximum length of a mapped attribute key
is 100 characters, and the key may only contain the characters [a-z0-9_].
You can reference these attributes in IAM policies to define fine-grained access for a
workload to Google Cloud resources. For example:
* `google.subject`:
`principal://iam.googleapis.com/projects/{project}/locations/{location}/workloadIdentityPools/{pool}/subject/{value}`
* `google.groups`:
`principalSet://iam.googleapis.com/projects/{project}/locations/{location}/workloadIdentityPools/{pool}/group/{value}`
* `attribute.{custom_attribute}`:
`principalSet://iam.googleapis.com/projects/{project}/locations/{location}/workloadIdentityPools/{pool}/attribute.{custom_attribute}/{value}`
Each value must be a [Common Expression Language](https://opensource.google/projects/cel)
function that maps an identity provider credential to the normalized attribute specified
by the corresponding map key.
You can use the `assertion` keyword in the expression to access a JSON representation of
the authentication credential issued by the provider.
The maximum length of an attribute mapping expression is 2048 characters. When evaluated,
the total size of all mapped attributes must not exceed 8KB.
For AWS providers, the following rules apply:
- If no attribute mapping is defined, the following default mapping applies:
```python
import pulumi
```
- If any custom attribute mappings are defined, they must include a mapping to the
`google.subject` attribute.
For OIDC providers, the following rules apply:
- Custom attribute mappings must be defined, and must include a mapping to the
`google.subject` attribute. For example, the following maps the `sub` claim of the
incoming credential to the `subject` attribute on a Google token.
```python
import pulumi
```
"""
return pulumi.get(self, "attribute_mapping")
@attribute_mapping.setter
def attribute_mapping(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "attribute_mapping", value)
@property
@pulumi.getter
def aws(self) -> Optional[pulumi.Input['WorkloadIdentityPoolProviderAwsArgs']]:
"""
An Amazon Web Services identity provider. Not compatible with the property oidc.
Structure is documented below.
"""
return pulumi.get(self, "aws")
@aws.setter
def aws(self, value: Optional[pulumi.Input['WorkloadIdentityPoolProviderAwsArgs']]):
pulumi.set(self, "aws", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
A description for the provider. Cannot exceed 256 characters.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def disabled(self) -> Optional[pulumi.Input[bool]]:
"""
Whether the provider is disabled. You cannot use a disabled provider to exchange tokens.
However, existing tokens still grant access.
"""
return pulumi.get(self, "disabled")
@disabled.setter
def disabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "disabled", value)
@property
@pulumi.getter(name="displayName")
def display_name(self) -> Optional[pulumi.Input[str]]:
"""
A display name for the provider. Cannot exceed 32 characters.
"""
return pulumi.get(self, "display_name")
@display_name.setter
def display_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "display_name", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The resource name of the provider as
'projects/{project_number}/locations/global/workloadIdentityPools/{workload_identity_pool_id}/providers/{workload_identity_pool_provider_id}'.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def oidc(self) -> Optional[pulumi.Input['WorkloadIdentityPoolProviderOidcArgs']]:
"""
An OpenId Connect 1.0 identity provider. Not compatible with the property aws.
Structure is documented below.
"""
return pulumi.get(self, "oidc")
@oidc.setter
def oidc(self, value: Optional[pulumi.Input['WorkloadIdentityPoolProviderOidcArgs']]):
pulumi.set(self, "oidc", value)
@property
@pulumi.getter
def project(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
"""
return pulumi.get(self, "project")
@project.setter
def project(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "project", value)
@property
@pulumi.getter
def state(self) -> Optional[pulumi.Input[str]]:
"""
The state of the provider. * STATE_UNSPECIFIED: State unspecified. * ACTIVE: The provider is active, and may be used to
validate authentication credentials. * DELETED: The provider is soft-deleted. Soft-deleted providers are permanently
deleted after approximately 30 days. You can restore a soft-deleted provider using UndeleteWorkloadIdentityPoolProvider.
You cannot reuse the ID of a soft-deleted provider until it is permanently deleted.
"""
return pulumi.get(self, "state")
@state.setter
def state(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "state", value)
@property
@pulumi.getter(name="workloadIdentityPoolId")
def workload_identity_pool_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID used for the pool, which is the final component of the pool resource name. This
value should be 4-32 characters, and may contain the characters [a-z0-9-]. The prefix
`gcp-` is reserved for use by Google, and may not be specified.
"""
return pulumi.get(self, "workload_identity_pool_id")
@workload_identity_pool_id.setter
def workload_identity_pool_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "workload_identity_pool_id", value)
@property
@pulumi.getter(name="workloadIdentityPoolProviderId")
def workload_identity_pool_provider_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID for the provider, which becomes the final component of the resource name. This
value must be 4-32 characters, and may contain the characters [a-z0-9-]. The prefix
`gcp-` is reserved for use by Google, and may not be specified.
"""
return pulumi.get(self, "workload_identity_pool_provider_id")
@workload_identity_pool_provider_id.setter
def workload_identity_pool_provider_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "workload_identity_pool_provider_id", value)
class WorkloadIdentityPoolProvider(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
attribute_condition: Optional[pulumi.Input[str]] = None,
attribute_mapping: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
aws: Optional[pulumi.Input[pulumi.InputType['WorkloadIdentityPoolProviderAwsArgs']]] = None,
description: Optional[pulumi.Input[str]] = None,
disabled: Optional[pulumi.Input[bool]] = None,
display_name: Optional[pulumi.Input[str]] = None,
oidc: Optional[pulumi.Input[pulumi.InputType['WorkloadIdentityPoolProviderOidcArgs']]] = None,
project: Optional[pulumi.Input[str]] = None,
workload_identity_pool_id: Optional[pulumi.Input[str]] = None,
workload_identity_pool_provider_id: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
A configuration for an external identity provider.
To get more information about WorkloadIdentityPoolProvider, see:
* [API documentation](https://cloud.google.com/iam/docs/reference/rest/v1beta/projects.locations.workloadIdentityPools.providers)
* How-to Guides
* [Managing workload identity providers](https://cloud.google.com/iam/docs/manage-workload-identity-pools-providers#managing_workload_identity_providers)
## Example Usage
### Iam Workload Identity Pool Provider Aws Basic
```python
import pulumi
import pulumi_gcp as gcp
pool = gcp.iam.WorkloadIdentityPool("pool", workload_identity_pool_id="example-pool",
opts=pulumi.ResourceOptions(provider=google_beta))
example = gcp.iam.WorkloadIdentityPoolProvider("example",
workload_identity_pool_id=pool.workload_identity_pool_id,
workload_identity_pool_provider_id="example-prvdr",
aws=gcp.iam.WorkloadIdentityPoolProviderAwsArgs(
account_id="999999999999",
),
opts=pulumi.ResourceOptions(provider=google_beta))
```
### Iam Workload Identity Pool Provider Aws Full
```python
import pulumi
import pulumi_gcp as gcp
pool = gcp.iam.WorkloadIdentityPool("pool", workload_identity_pool_id="example-pool",
opts=pulumi.ResourceOptions(provider=google_beta))
example = gcp.iam.WorkloadIdentityPoolProvider("example",
workload_identity_pool_id=pool.workload_identity_pool_id,
workload_identity_pool_provider_id="example-prvdr",
display_name="Name of provider",
description="AWS identity pool provider for automated test",
disabled=True,
attribute_condition="attribute.aws_role==\"arn:aws:sts::999999999999:assumed-role/stack-eu-central-1-lambdaRole\"",
attribute_mapping={
"google.subject": "assertion.arn",
"attribute.aws_account": "assertion.account",
"attribute.environment": "assertion.arn.contains(\":instance-profile/Production\") ? \"prod\" : \"test\"",
},
aws=gcp.iam.WorkloadIdentityPoolProviderAwsArgs(
account_id="999999999999",
),
opts=pulumi.ResourceOptions(provider=google_beta))
```
### Iam Workload Identity Pool Provider Oidc Basic
```python
import pulumi
import pulumi_gcp as gcp
pool = gcp.iam.WorkloadIdentityPool("pool", workload_identity_pool_id="example-pool",
opts=pulumi.ResourceOptions(provider=google_beta))
example = gcp.iam.WorkloadIdentityPoolProvider("example",
workload_identity_pool_id=pool.workload_identity_pool_id,
workload_identity_pool_provider_id="example-prvdr",
attribute_mapping={
"google.subject": "assertion.sub",
},
oidc=gcp.iam.WorkloadIdentityPoolProviderOidcArgs(
issuer_uri="https://sts.windows.net/azure-tenant-id",
),
opts=pulumi.ResourceOptions(provider=google_beta))
```
### Iam Workload Identity Pool Provider Oidc Full
```python
import pulumi
import pulumi_gcp as gcp
pool = gcp.iam.WorkloadIdentityPool("pool", workload_identity_pool_id="example-pool",
opts=pulumi.ResourceOptions(provider=google_beta))
example = gcp.iam.WorkloadIdentityPoolProvider("example",
workload_identity_pool_id=pool.workload_identity_pool_id,
workload_identity_pool_provider_id="example-prvdr",
display_name="Name of provider",
description="OIDC identity pool provider for automated test",
disabled=True,
attribute_condition="\"e968c2ef-047c-498d-8d79-16ca1b61e77e\" in assertion.groups",
attribute_mapping={
"google.subject": "\"azure::\" + assertion.tid + \"::\" + assertion.sub",
"attribute.tid": "assertion.tid",
"attribute.managed_identity_name": \"\"\" {
"8bb39bdb-1cc5-4447-b7db-a19e920eb111":"workload1",
"55d36609-9bcf-48e0-a366-a3cf19027d2a":"workload2"
}[assertion.oid]
\"\"\",
},
oidc=gcp.iam.WorkloadIdentityPoolProviderOidcArgs(
allowed_audiences=[
"https://example.com/gcp-oidc-federation",
"example.com/gcp-oidc-federation",
],
issuer_uri="https://sts.windows.net/azure-tenant-id",
),
opts=pulumi.ResourceOptions(provider=google_beta))
```
## Import
WorkloadIdentityPoolProvider can be imported using any of these accepted formats
```sh
$ pulumi import gcp:iam/workloadIdentityPoolProvider:WorkloadIdentityPoolProvider default projects/{{project}}/locations/global/workloadIdentityPools/{{workload_identity_pool_id}}/providers/{{workload_identity_pool_provider_id}}
```
```sh
$ pulumi import gcp:iam/workloadIdentityPoolProvider:WorkloadIdentityPoolProvider default {{project}}/{{workload_identity_pool_id}}/{{workload_identity_pool_provider_id}}
```
```sh
$ pulumi import gcp:iam/workloadIdentityPoolProvider:WorkloadIdentityPoolProvider default {{workload_identity_pool_id}}/{{workload_identity_pool_provider_id}}
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] attribute_condition: [A Common Expression Language](https://opensource.google/projects/cel) expression, in
plain text, to restrict what otherwise valid authentication credentials issued by the
provider should not be accepted.
The expression must output a boolean representing whether to allow the federation.
The following keywords may be referenced in the expressions:
* `assertion`: JSON representing the authentication credential issued by the provider.
* `google`: The Google attributes mapped from the assertion in the `attribute_mappings`.
* `attribute`: The custom attributes mapped from the assertion in the `attribute_mappings`.
The maximum length of the attribute condition expression is 4096 characters. If
unspecified, all valid authentication credential are accepted.
The following example shows how to only allow credentials with a mapped `google.groups`
value of `admins`:
```python
import pulumi
```
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] attribute_mapping: Maps attributes from authentication credentials issued by an external identity provider
to Google Cloud attributes, such as `subject` and `segment`.
Each key must be a string specifying the Google Cloud IAM attribute to map to.
The following keys are supported:
* `google.subject`: The principal IAM is authenticating. You can reference this value
in IAM bindings. This is also the subject that appears in Cloud Logging logs.
Cannot exceed 127 characters.
* `google.groups`: Groups the external identity belongs to. You can grant groups
access to resources using an IAM `principalSet` binding; access applies to all
members of the group.
You can also provide custom attributes by specifying `attribute.{custom_attribute}`,
where `{custom_attribute}` is the name of the custom attribute to be mapped. You can
define a maximum of 50 custom attributes. The maximum length of a mapped attribute key
is 100 characters, and the key may only contain the characters [a-z0-9_].
You can reference these attributes in IAM policies to define fine-grained access for a
workload to Google Cloud resources. For example:
* `google.subject`:
`principal://iam.googleapis.com/projects/{project}/locations/{location}/workloadIdentityPools/{pool}/subject/{value}`
* `google.groups`:
`principalSet://iam.googleapis.com/projects/{project}/locations/{location}/workloadIdentityPools/{pool}/group/{value}`
* `attribute.{custom_attribute}`:
`principalSet://iam.googleapis.com/projects/{project}/locations/{location}/workloadIdentityPools/{pool}/attribute.{custom_attribute}/{value}`
Each value must be a [Common Expression Language](https://opensource.google/projects/cel)
function that maps an identity provider credential to the normalized attribute specified
by the corresponding map key.
You can use the `assertion` keyword in the expression to access a JSON representation of
the authentication credential issued by the provider.
The maximum length of an attribute mapping expression is 2048 characters. When evaluated,
the total size of all mapped attributes must not exceed 8KB.
For AWS providers, the following rules apply:
- If no attribute mapping is defined, the following default mapping applies:
```python
import pulumi
```
- If any custom attribute mappings are defined, they must include a mapping to the
`google.subject` attribute.
For OIDC providers, the following rules apply:
- Custom attribute mappings must be defined, and must include a mapping to the
`google.subject` attribute. For example, the following maps the `sub` claim of the
incoming credential to the `subject` attribute on a Google token.
```python
import pulumi
```
:param pulumi.Input[pulumi.InputType['WorkloadIdentityPoolProviderAwsArgs']] aws: An Amazon Web Services identity provider. Not compatible with the property oidc.
Structure is documented below.
:param pulumi.Input[str] description: A description for the provider. Cannot exceed 256 characters.
:param pulumi.Input[bool] disabled: Whether the provider is disabled. You cannot use a disabled provider to exchange tokens.
However, existing tokens still grant access.
:param pulumi.Input[str] display_name: A display name for the provider. Cannot exceed 32 characters.
:param pulumi.Input[pulumi.InputType['WorkloadIdentityPoolProviderOidcArgs']] oidc: An OpenId Connect 1.0 identity provider. Not compatible with the property aws.
Structure is documented below.
:param pulumi.Input[str] project: The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
:param pulumi.Input[str] workload_identity_pool_id: The ID used for the pool, which is the final component of the pool resource name. This
value should be 4-32 characters, and may contain the characters [a-z0-9-]. The prefix
`gcp-` is reserved for use by Google, and may not be specified.
:param pulumi.Input[str] workload_identity_pool_provider_id: The ID for the provider, which becomes the final component of the resource name. This
value must be 4-32 characters, and may contain the characters [a-z0-9-]. The prefix
`gcp-` is reserved for use by Google, and may not be specified.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: WorkloadIdentityPoolProviderArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
A configuration for an external identity provider.
To get more information about WorkloadIdentityPoolProvider, see:
* [API documentation](https://cloud.google.com/iam/docs/reference/rest/v1beta/projects.locations.workloadIdentityPools.providers)
* How-to Guides
* [Managing workload identity providers](https://cloud.google.com/iam/docs/manage-workload-identity-pools-providers#managing_workload_identity_providers)
## Example Usage
### Iam Workload Identity Pool Provider Aws Basic
```python
import pulumi
import pulumi_gcp as gcp
pool = gcp.iam.WorkloadIdentityPool("pool", workload_identity_pool_id="example-pool",
opts=pulumi.ResourceOptions(provider=google_beta))
example = gcp.iam.WorkloadIdentityPoolProvider("example",
workload_identity_pool_id=pool.workload_identity_pool_id,
workload_identity_pool_provider_id="example-prvdr",
aws=gcp.iam.WorkloadIdentityPoolProviderAwsArgs(
account_id="999999999999",
),
opts=pulumi.ResourceOptions(provider=google_beta))
```
### Iam Workload Identity Pool Provider Aws Full
```python
import pulumi
import pulumi_gcp as gcp
pool = gcp.iam.WorkloadIdentityPool("pool", workload_identity_pool_id="example-pool",
opts=pulumi.ResourceOptions(provider=google_beta))
example = gcp.iam.WorkloadIdentityPoolProvider("example",
workload_identity_pool_id=pool.workload_identity_pool_id,
workload_identity_pool_provider_id="example-prvdr",
display_name="Name of provider",
description="AWS identity pool provider for automated test",
disabled=True,
attribute_condition="attribute.aws_role==\"arn:aws:sts::999999999999:assumed-role/stack-eu-central-1-lambdaRole\"",
attribute_mapping={
"google.subject": "assertion.arn",
"attribute.aws_account": "assertion.account",
"attribute.environment": "assertion.arn.contains(\":instance-profile/Production\") ? \"prod\" : \"test\"",
},
aws=gcp.iam.WorkloadIdentityPoolProviderAwsArgs(
account_id="999999999999",
),
opts=pulumi.ResourceOptions(provider=google_beta))
```
### Iam Workload Identity Pool Provider Oidc Basic
```python
import pulumi
import pulumi_gcp as gcp
pool = gcp.iam.WorkloadIdentityPool("pool", workload_identity_pool_id="example-pool",
opts=pulumi.ResourceOptions(provider=google_beta))
example = gcp.iam.WorkloadIdentityPoolProvider("example",
workload_identity_pool_id=pool.workload_identity_pool_id,
workload_identity_pool_provider_id="example-prvdr",
attribute_mapping={
"google.subject": "assertion.sub",
},
oidc=gcp.iam.WorkloadIdentityPoolProviderOidcArgs(
issuer_uri="https://sts.windows.net/azure-tenant-id",
),
opts=pulumi.ResourceOptions(provider=google_beta))
```
### Iam Workload Identity Pool Provider Oidc Full
```python
import pulumi
import pulumi_gcp as gcp
pool = gcp.iam.WorkloadIdentityPool("pool", workload_identity_pool_id="example-pool",
opts=pulumi.ResourceOptions(provider=google_beta))
example = gcp.iam.WorkloadIdentityPoolProvider("example",
workload_identity_pool_id=pool.workload_identity_pool_id,
workload_identity_pool_provider_id="example-prvdr",
display_name="Name of provider",
description="OIDC identity pool provider for automated test",
disabled=True,
attribute_condition="\"e968c2ef-047c-498d-8d79-16ca1b61e77e\" in assertion.groups",
attribute_mapping={
"google.subject": "\"azure::\" + assertion.tid + \"::\" + assertion.sub",
"attribute.tid": "assertion.tid",
"attribute.managed_identity_name": \"\"\" {
"8bb39bdb-1cc5-4447-b7db-a19e920eb111":"workload1",
"55d36609-9bcf-48e0-a366-a3cf19027d2a":"workload2"
}[assertion.oid]
\"\"\",
},
oidc=gcp.iam.WorkloadIdentityPoolProviderOidcArgs(
allowed_audiences=[
"https://example.com/gcp-oidc-federation",
"example.com/gcp-oidc-federation",
],
issuer_uri="https://sts.windows.net/azure-tenant-id",
),
opts=pulumi.ResourceOptions(provider=google_beta))
```
## Import
WorkloadIdentityPoolProvider can be imported using any of these accepted formats
```sh
$ pulumi import gcp:iam/workloadIdentityPoolProvider:WorkloadIdentityPoolProvider default projects/{{project}}/locations/global/workloadIdentityPools/{{workload_identity_pool_id}}/providers/{{workload_identity_pool_provider_id}}
```
```sh
$ pulumi import gcp:iam/workloadIdentityPoolProvider:WorkloadIdentityPoolProvider default {{project}}/{{workload_identity_pool_id}}/{{workload_identity_pool_provider_id}}
```
```sh
$ pulumi import gcp:iam/workloadIdentityPoolProvider:WorkloadIdentityPoolProvider default {{workload_identity_pool_id}}/{{workload_identity_pool_provider_id}}
```
:param str resource_name: The name of the resource.
:param WorkloadIdentityPoolProviderArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(WorkloadIdentityPoolProviderArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
attribute_condition: Optional[pulumi.Input[str]] = None,
attribute_mapping: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
aws: Optional[pulumi.Input[pulumi.InputType['WorkloadIdentityPoolProviderAwsArgs']]] = None,
description: Optional[pulumi.Input[str]] = None,
disabled: Optional[pulumi.Input[bool]] = None,
display_name: Optional[pulumi.Input[str]] = None,
oidc: Optional[pulumi.Input[pulumi.InputType['WorkloadIdentityPoolProviderOidcArgs']]] = None,
project: Optional[pulumi.Input[str]] = None,
workload_identity_pool_id: Optional[pulumi.Input[str]] = None,
workload_identity_pool_provider_id: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = WorkloadIdentityPoolProviderArgs.__new__(WorkloadIdentityPoolProviderArgs)
__props__.__dict__["attribute_condition"] = attribute_condition
__props__.__dict__["attribute_mapping"] = attribute_mapping
__props__.__dict__["aws"] = aws
__props__.__dict__["description"] = description
__props__.__dict__["disabled"] = disabled
__props__.__dict__["display_name"] = display_name
__props__.__dict__["oidc"] = oidc
__props__.__dict__["project"] = project
if workload_identity_pool_id is None and not opts.urn:
raise TypeError("Missing required property 'workload_identity_pool_id'")
__props__.__dict__["workload_identity_pool_id"] = workload_identity_pool_id
if workload_identity_pool_provider_id is None and not opts.urn:
raise TypeError("Missing required property 'workload_identity_pool_provider_id'")
__props__.__dict__["workload_identity_pool_provider_id"] = workload_identity_pool_provider_id
__props__.__dict__["name"] = None
__props__.__dict__["state"] = None
super(WorkloadIdentityPoolProvider, __self__).__init__(
'gcp:iam/workloadIdentityPoolProvider:WorkloadIdentityPoolProvider',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
attribute_condition: Optional[pulumi.Input[str]] = None,
attribute_mapping: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
aws: Optional[pulumi.Input[pulumi.InputType['WorkloadIdentityPoolProviderAwsArgs']]] = None,
description: Optional[pulumi.Input[str]] = None,
disabled: Optional[pulumi.Input[bool]] = None,
display_name: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
oidc: Optional[pulumi.Input[pulumi.InputType['WorkloadIdentityPoolProviderOidcArgs']]] = None,
project: Optional[pulumi.Input[str]] = None,
state: Optional[pulumi.Input[str]] = None,
workload_identity_pool_id: Optional[pulumi.Input[str]] = None,
workload_identity_pool_provider_id: Optional[pulumi.Input[str]] = None) -> 'WorkloadIdentityPoolProvider':
"""
Get an existing WorkloadIdentityPoolProvider resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] attribute_condition: [A Common Expression Language](https://opensource.google/projects/cel) expression, in
plain text, to restrict what otherwise valid authentication credentials issued by the
provider should not be accepted.
The expression must output a boolean representing whether to allow the federation.
The following keywords may be referenced in the expressions:
* `assertion`: JSON representing the authentication credential issued by the provider.
* `google`: The Google attributes mapped from the assertion in the `attribute_mappings`.
* `attribute`: The custom attributes mapped from the assertion in the `attribute_mappings`.
The maximum length of the attribute condition expression is 4096 characters. If
unspecified, all valid authentication credential are accepted.
The following example shows how to only allow credentials with a mapped `google.groups`
value of `admins`:
```python
import pulumi
```
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] attribute_mapping: Maps attributes from authentication credentials issued by an external identity provider
to Google Cloud attributes, such as `subject` and `segment`.
Each key must be a string specifying the Google Cloud IAM attribute to map to.
The following keys are supported:
* `google.subject`: The principal IAM is authenticating. You can reference this value
in IAM bindings. This is also the subject that appears in Cloud Logging logs.
Cannot exceed 127 characters.
* `google.groups`: Groups the external identity belongs to. You can grant groups
access to resources using an IAM `principalSet` binding; access applies to all
members of the group.
You can also provide custom attributes by specifying `attribute.{custom_attribute}`,
where `{custom_attribute}` is the name of the custom attribute to be mapped. You can
define a maximum of 50 custom attributes. The maximum length of a mapped attribute key
is 100 characters, and the key may only contain the characters [a-z0-9_].
You can reference these attributes in IAM policies to define fine-grained access for a
workload to Google Cloud resources. For example:
* `google.subject`:
`principal://iam.googleapis.com/projects/{project}/locations/{location}/workloadIdentityPools/{pool}/subject/{value}`
* `google.groups`:
`principalSet://iam.googleapis.com/projects/{project}/locations/{location}/workloadIdentityPools/{pool}/group/{value}`
* `attribute.{custom_attribute}`:
`principalSet://iam.googleapis.com/projects/{project}/locations/{location}/workloadIdentityPools/{pool}/attribute.{custom_attribute}/{value}`
Each value must be a [Common Expression Language](https://opensource.google/projects/cel)
function that maps an identity provider credential to the normalized attribute specified
by the corresponding map key.
You can use the `assertion` keyword in the expression to access a JSON representation of
the authentication credential issued by the provider.
The maximum length of an attribute mapping expression is 2048 characters. When evaluated,
the total size of all mapped attributes must not exceed 8KB.
For AWS providers, the following rules apply:
- If no attribute mapping is defined, the following default mapping applies:
```python
import pulumi
```
- If any custom attribute mappings are defined, they must include a mapping to the
`google.subject` attribute.
For OIDC providers, the following rules apply:
- Custom attribute mappings must be defined, and must include a mapping to the
`google.subject` attribute. For example, the following maps the `sub` claim of the
incoming credential to the `subject` attribute on a Google token.
```python
import pulumi
```
:param pulumi.Input[pulumi.InputType['WorkloadIdentityPoolProviderAwsArgs']] aws: An Amazon Web Services identity provider. Not compatible with the property oidc.
Structure is documented below.
:param pulumi.Input[str] description: A description for the provider. Cannot exceed 256 characters.
:param pulumi.Input[bool] disabled: Whether the provider is disabled. You cannot use a disabled provider to exchange tokens.
However, existing tokens still grant access.
:param pulumi.Input[str] display_name: A display name for the provider. Cannot exceed 32 characters.
:param pulumi.Input[str] name: The resource name of the provider as
'projects/{project_number}/locations/global/workloadIdentityPools/{workload_identity_pool_id}/providers/{workload_identity_pool_provider_id}'.
:param pulumi.Input[pulumi.InputType['WorkloadIdentityPoolProviderOidcArgs']] oidc: An OpenId Connect 1.0 identity provider. Not compatible with the property aws.
Structure is documented below.
:param pulumi.Input[str] project: The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
:param pulumi.Input[str] state: The state of the provider. * STATE_UNSPECIFIED: State unspecified. * ACTIVE: The provider is active, and may be used to
validate authentication credentials. * DELETED: The provider is soft-deleted. Soft-deleted providers are permanently
deleted after approximately 30 days. You can restore a soft-deleted provider using UndeleteWorkloadIdentityPoolProvider.
You cannot reuse the ID of a soft-deleted provider until it is permanently deleted.
:param pulumi.Input[str] workload_identity_pool_id: The ID used for the pool, which is the final component of the pool resource name. This
value should be 4-32 characters, and may contain the characters [a-z0-9-]. The prefix
`gcp-` is reserved for use by Google, and may not be specified.
:param pulumi.Input[str] workload_identity_pool_provider_id: The ID for the provider, which becomes the final component of the resource name. This
value must be 4-32 characters, and may contain the characters [a-z0-9-]. The prefix
`gcp-` is reserved for use by Google, and may not be specified.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _WorkloadIdentityPoolProviderState.__new__(_WorkloadIdentityPoolProviderState)
__props__.__dict__["attribute_condition"] = attribute_condition
__props__.__dict__["attribute_mapping"] = attribute_mapping
__props__.__dict__["aws"] = aws
__props__.__dict__["description"] = description
__props__.__dict__["disabled"] = disabled
__props__.__dict__["display_name"] = display_name
__props__.__dict__["name"] = name
__props__.__dict__["oidc"] = oidc
__props__.__dict__["project"] = project
__props__.__dict__["state"] = state
__props__.__dict__["workload_identity_pool_id"] = workload_identity_pool_id
__props__.__dict__["workload_identity_pool_provider_id"] = workload_identity_pool_provider_id
return WorkloadIdentityPoolProvider(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="attributeCondition")
def attribute_condition(self) -> pulumi.Output[Optional[str]]:
"""
[A Common Expression Language](https://opensource.google/projects/cel) expression, in
plain text, to restrict what otherwise valid authentication credentials issued by the
provider should not be accepted.
The expression must output a boolean representing whether to allow the federation.
The following keywords may be referenced in the expressions:
* `assertion`: JSON representing the authentication credential issued by the provider.
* `google`: The Google attributes mapped from the assertion in the `attribute_mappings`.
* `attribute`: The custom attributes mapped from the assertion in the `attribute_mappings`.
The maximum length of the attribute condition expression is 4096 characters. If
unspecified, all valid authentication credential are accepted.
The following example shows how to only allow credentials with a mapped `google.groups`
value of `admins`:
```python
import pulumi
```
"""
return pulumi.get(self, "attribute_condition")
@property
@pulumi.getter(name="attributeMapping")
def attribute_mapping(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Maps attributes from authentication credentials issued by an external identity provider
to Google Cloud attributes, such as `subject` and `segment`.
Each key must be a string specifying the Google Cloud IAM attribute to map to.
The following keys are supported:
* `google.subject`: The principal IAM is authenticating. You can reference this value
in IAM bindings. This is also the subject that appears in Cloud Logging logs.
Cannot exceed 127 characters.
* `google.groups`: Groups the external identity belongs to. You can grant groups
access to resources using an IAM `principalSet` binding; access applies to all
members of the group.
You can also provide custom attributes by specifying `attribute.{custom_attribute}`,
where `{custom_attribute}` is the name of the custom attribute to be mapped. You can
define a maximum of 50 custom attributes. The maximum length of a mapped attribute key
is 100 characters, and the key may only contain the characters [a-z0-9_].
You can reference these attributes in IAM policies to define fine-grained access for a
workload to Google Cloud resources. For example:
* `google.subject`:
`principal://iam.googleapis.com/projects/{project}/locations/{location}/workloadIdentityPools/{pool}/subject/{value}`
* `google.groups`:
`principalSet://iam.googleapis.com/projects/{project}/locations/{location}/workloadIdentityPools/{pool}/group/{value}`
* `attribute.{custom_attribute}`:
`principalSet://iam.googleapis.com/projects/{project}/locations/{location}/workloadIdentityPools/{pool}/attribute.{custom_attribute}/{value}`
Each value must be a [Common Expression Language](https://opensource.google/projects/cel)
function that maps an identity provider credential to the normalized attribute specified
by the corresponding map key.
You can use the `assertion` keyword in the expression to access a JSON representation of
the authentication credential issued by the provider.
The maximum length of an attribute mapping expression is 2048 characters. When evaluated,
the total size of all mapped attributes must not exceed 8KB.
For AWS providers, the following rules apply:
- If no attribute mapping is defined, the following default mapping applies:
```python
import pulumi
```
- If any custom attribute mappings are defined, they must include a mapping to the
`google.subject` attribute.
For OIDC providers, the following rules apply:
- Custom attribute mappings must be defined, and must include a mapping to the
`google.subject` attribute. For example, the following maps the `sub` claim of the
incoming credential to the `subject` attribute on a Google token.
```python
import pulumi
```
"""
return pulumi.get(self, "attribute_mapping")
@property
@pulumi.getter
def aws(self) -> pulumi.Output[Optional['outputs.WorkloadIdentityPoolProviderAws']]:
"""
An Amazon Web Services identity provider. Not compatible with the property oidc.
Structure is documented below.
"""
return pulumi.get(self, "aws")
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
"""
A description for the provider. Cannot exceed 256 characters.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter
def disabled(self) -> pulumi.Output[Optional[bool]]:
"""
Whether the provider is disabled. You cannot use a disabled provider to exchange tokens.
However, existing tokens still grant access.
"""
return pulumi.get(self, "disabled")
@property
@pulumi.getter(name="displayName")
def display_name(self) -> pulumi.Output[Optional[str]]:
"""
A display name for the provider. Cannot exceed 32 characters.
"""
return pulumi.get(self, "display_name")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The resource name of the provider as
'projects/{project_number}/locations/global/workloadIdentityPools/{workload_identity_pool_id}/providers/{workload_identity_pool_provider_id}'.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def oidc(self) -> pulumi.Output[Optional['outputs.WorkloadIdentityPoolProviderOidc']]:
"""
An OpenId Connect 1.0 identity provider. Not compatible with the property aws.
Structure is documented below.
"""
return pulumi.get(self, "oidc")
@property
@pulumi.getter
def project(self) -> pulumi.Output[str]:
"""
The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
"""
return pulumi.get(self, "project")
@property
@pulumi.getter
def state(self) -> pulumi.Output[str]:
"""
The state of the provider. * STATE_UNSPECIFIED: State unspecified. * ACTIVE: The provider is active, and may be used to
validate authentication credentials. * DELETED: The provider is soft-deleted. Soft-deleted providers are permanently
deleted after approximately 30 days. You can restore a soft-deleted provider using UndeleteWorkloadIdentityPoolProvider.
You cannot reuse the ID of a soft-deleted provider until it is permanently deleted.
"""
return pulumi.get(self, "state")
@property
@pulumi.getter(name="workloadIdentityPoolId")
def workload_identity_pool_id(self) -> pulumi.Output[str]:
"""
The ID used for the pool, which is the final component of the pool resource name. This
value should be 4-32 characters, and may contain the characters [a-z0-9-]. The prefix
`gcp-` is reserved for use by Google, and may not be specified.
"""
return pulumi.get(self, "workload_identity_pool_id")
@property
@pulumi.getter(name="workloadIdentityPoolProviderId")
def workload_identity_pool_provider_id(self) -> pulumi.Output[str]:
"""
The ID for the provider, which becomes the final component of the resource name. This
value must be 4-32 characters, and may contain the characters [a-z0-9-]. The prefix
`gcp-` is reserved for use by Google, and may not be specified.
"""
return pulumi.get(self, "workload_identity_pool_provider_id")
| 57.042889 | 237 | 0.669371 | 75,306 | 0.993352 | 0 | 0 | 71,900 | 0.948424 | 0 | 0 | 58,794 | 0.775544 |
87a8977fe67b0ca481dbef216d59dbccffefcd3d | 694 | py | Python | python/get_pj_urls.py | cookie1986/csa-chat-predict | 76fb07160930b0b824306b410b42c13e24983c19 | [
"MIT"
] | null | null | null | python/get_pj_urls.py | cookie1986/csa-chat-predict | 76fb07160930b0b824306b410b42c13e24983c19 | [
"MIT"
] | null | null | null | python/get_pj_urls.py | cookie1986/csa-chat-predict | 76fb07160930b0b824306b410b42c13e24983c19 | [
"MIT"
] | 1 | 2021-06-08T11:23:34.000Z | 2021-06-08T11:23:34.000Z | import requests
from bs4 import BeautifulSoup as soup
import pandas as pd
# PJ chatlog archive
url_main = 'http://perverted-justice.com/?archive=byName'
# get list of chat URLs
req_main = requests.get(url_main)
main_soup = soup(req_main.text, "html.parser")
# list to store URLs
url_link = []
for link in main_soup.find_all('a'):
url_link.append(str(link.get('href')))
# filter list to only those containing chatlogs
url_link = list(set(['http://perverted-justice.com'+i+'&nocomm=true' for i in url_link if i.startswith('./?archive=')]))
# export chatlog list
urlDF = pd.DataFrame(data=url_link)
urlDF.to_csv('C:/Users/Darren Cook/Documents/PhD Research/chat_logs/notes/chatlog_url.csv')
| 34.7 | 120 | 0.75072 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 333 | 0.479827 |
87a9f6cce805832e964e1ac6f9e5aea510c38d7e | 496 | py | Python | mtpipeline/ephem/mt_logging.py | STScI-Citizen-Science/MTPipeline | 903743afe55592ab67a240237d924c7c7383eec7 | [
"Unlicense"
] | 4 | 2015-10-03T02:30:50.000Z | 2016-04-07T04:07:19.000Z | mtpipeline/ephem/mt_logging.py | STScI-Citizen-Science/MTPipeline | 903743afe55592ab67a240237d924c7c7383eec7 | [
"Unlicense"
] | 3 | 2022-02-10T23:02:22.000Z | 2022-02-10T23:02:51.000Z | mtpipeline/ephem/mt_logging.py | STScI-Citizen-Science/MTPipeline | 903743afe55592ab67a240237d924c7c7383eec7 | [
"Unlicense"
] | null | null | null | """
This module contains the basic logging setup for the project.
"""
import datetime
import logging
import os
def setup_logging(module):
"""Set up the logging."""
log_file = os.path.join('/astro/3/mutchler/mt/logs/', module,
module + '_' + datetime.datetime.now().strftime('%Y-%m-%d-%H-%M') + '.log')
logging.basicConfig(filename = log_file,
format = '%(asctime)s %(levelname)s: %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S %p',
level = logging.INFO) | 31 | 83 | 0.614919 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 209 | 0.421371 |
87ae65900773758ea1e8c983d49e6f56cd93b9ec | 795 | py | Python | clone/views.py | kurian-thomas/neuralcloning | 1458aeeaef6be32a11079a66d09fb94feeeba8c1 | [
"MIT"
] | 9 | 2018-11-15T16:50:59.000Z | 2021-10-05T09:58:36.000Z | clone/views.py | kurian-thomas/neuralcloning | 1458aeeaef6be32a11079a66d09fb94feeeba8c1 | [
"MIT"
] | null | null | null | clone/views.py | kurian-thomas/neuralcloning | 1458aeeaef6be32a11079a66d09fb94feeeba8c1 | [
"MIT"
] | 6 | 2018-11-26T05:38:34.000Z | 2021-03-30T16:30:21.000Z | from django.shortcuts import render, HttpResponse
from django.views.decorators.csrf import csrf_exempt
import os
import base64
# from .celery_test import test
# from clone_script import clone
def index(request):
return render(request, 'clone/index.html')
def results(request):
return render(request, 'clone/results.html')
def login(request):
return render(request, 'clone/login.html')
@csrf_exempt
def upload(request):
if request.method == 'POST':
data = request.POST["base64_img"]
format, imgstr = data.split(';base64,')
ext = '.'+format.split('/')[-1]
# print(ext)
directory = "STEPS/0"
filename = directory + ".jpg"
with open(filename, "wb") as fh:
fh.write(base64.b64decode(imgstr))
# clone.delay()
return HttpResponse("success")
return HttpResponse("fail") | 25.645161 | 52 | 0.719497 | 0 | 0 | 0 | 0 | 402 | 0.50566 | 0 | 0 | 214 | 0.269182 |
87ae6a9d8005591fc2347c0f2c5ef9bc24403258 | 1,777 | py | Python | wis2box/metadata/base.py | webb-ben/wis2node | e577d2bea5524f74872f47eee9deb35d6c510460 | [
"Apache-2.0"
] | 7 | 2021-10-05T11:48:50.000Z | 2022-02-04T12:47:15.000Z | wis2box/metadata/base.py | webb-ben/wis2node | e577d2bea5524f74872f47eee9deb35d6c510460 | [
"Apache-2.0"
] | 47 | 2022-02-15T18:24:22.000Z | 2022-03-31T11:32:52.000Z | wis2box/metadata/base.py | webb-ben/wis2node | e577d2bea5524f74872f47eee9deb35d6c510460 | [
"Apache-2.0"
] | 3 | 2022-02-16T19:36:36.000Z | 2022-03-14T08:14:20.000Z | ###############################################################################
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
###############################################################################
import logging
from typing import Union
from pygeometa.core import read_mcf
LOGGER = logging.getLogger(__name__)
class BaseMetadata:
"""base metadata"""
def __init__(self):
pass
def generate(self, mcf: dict, schema: str = None) -> Union[dict, str]:
"""
Generate metadata in a given schema
:param mcf: `dict` of MCF file
:param schema: `str` of metadata schema to generate
:returns: `dict` or `str` of metadata representation
"""
raise NotImplementedError()
def parse_record(self, metadata_record: bytes) -> dict:
"""
Parses MCF metadata into dict
:param metadata_record: string of metadata
:return: `dict` of MCF
"""
LOGGER.debug('reading MCF')
return read_mcf(metadata_record)
| 30.118644 | 79 | 0.630838 | 709 | 0.398987 | 0 | 0 | 0 | 0 | 0 | 0 | 1,319 | 0.742262 |
87ae933f0ebeb6f0ea11f62d21063be61ac683ec | 3,406 | py | Python | packages/core/minos-microservice-common/minos/common/setup.py | minos-framework/minos-python | 9a6ad6783361f3d8a497a088808b55ea7a938c6c | [
"MIT"
] | 247 | 2022-01-24T14:55:30.000Z | 2022-03-25T12:06:17.000Z | packages/core/minos-microservice-common/minos/common/setup.py | minos-framework/minos-python | 9a6ad6783361f3d8a497a088808b55ea7a938c6c | [
"MIT"
] | 168 | 2022-01-24T14:54:31.000Z | 2022-03-31T09:31:09.000Z | packages/core/minos-microservice-common/minos/common/setup.py | minos-framework/minos-python | 9a6ad6783361f3d8a497a088808b55ea7a938c6c | [
"MIT"
] | 21 | 2022-02-06T17:25:58.000Z | 2022-03-27T04:50:29.000Z | from __future__ import (
annotations,
)
import logging
import warnings
from pathlib import (
Path,
)
from typing import (
TYPE_CHECKING,
Optional,
Type,
TypeVar,
Union,
)
from .object import (
Object,
)
if TYPE_CHECKING:
from .config import (
Config,
)
logger = logging.getLogger(__name__)
S = TypeVar("S", bound="SetupMixin")
class SetupMixin(Object):
"""Setup Mixin class."""
def __init__(self, *args, already_setup: bool = False, **kwargs):
super().__init__(**kwargs)
self._already_setup = already_setup
@property
def already_setup(self) -> bool:
"""Already Setup getter.
:return: A boolean value.
"""
return self._already_setup
@property
def already_destroyed(self) -> bool:
"""Already Destroy getter.
:return: A boolean value.
"""
return not self._already_setup
@classmethod
def from_config(cls: Type[S], config: Optional[Union[Config, Path]] = None, **kwargs) -> S:
"""Build a new instance from config.
:param config: Config instance. If `None` is provided, default config is chosen.
:param kwargs: Additional named arguments.
:return: A instance of the called class.
"""
if isinstance(config, Path):
from .config import (
Config,
)
config = Config(config)
if config is None:
from .config import (
Config,
)
from .injections import (
Inject,
)
config = Inject.resolve(Config)
logger.info(f"Building a {cls.__name__!r} instance from config...")
return cls._from_config(config=config, **kwargs)
@classmethod
def _from_config(cls: Type[S], config: Config, **kwargs) -> S:
return cls(**kwargs)
async def __aenter__(self: S) -> S:
await self.setup()
return self
async def setup(self) -> None:
"""Setup miscellaneous repository things.
:return: This method does not return anything.
"""
if not self._already_setup:
logger.debug(f"Setting up a {type(self).__name__!r} instance...")
await self._setup()
self._already_setup = True
async def _setup(self) -> None:
return
async def __aexit__(self, exc_type, exc_value, exc_traceback):
await self.destroy()
async def destroy(self) -> None:
"""Destroy miscellaneous repository things.
:return: This method does not return anything.
"""
if self._already_setup:
logger.debug(f"Destroying a {type(self).__name__!r} instance...")
await self._destroy()
self._already_setup = False
async def _destroy(self) -> None:
"""Destroy miscellaneous repository things."""
def __del__(self):
if not getattr(self, "already_destroyed", True):
warnings.warn(
f"A not destroyed {type(self).__name__!r} instance is trying to be deleted...", ResourceWarning
)
class MinosSetup(SetupMixin):
"""Minos Setup class."""
def __init__(self, *args, **kwargs):
warnings.warn(f"{MinosSetup!r} has been deprecated. Use {SetupMixin} instead.", DeprecationWarning)
super().__init__(*args, **kwargs)
| 25.609023 | 111 | 0.591897 | 3,020 | 0.886671 | 0 | 0 | 1,296 | 0.380505 | 976 | 0.286553 | 1,028 | 0.30182 |
87afa90fb2718364a61c00608eb9958c15861cad | 6,389 | py | Python | addons/base_address_extended/models/res_partner.py | SHIVJITH/Odoo_Machine_Test | 310497a9872db7844b521e6dab5f7a9f61d365a4 | [
"Apache-2.0"
] | null | null | null | addons/base_address_extended/models/res_partner.py | SHIVJITH/Odoo_Machine_Test | 310497a9872db7844b521e6dab5f7a9f61d365a4 | [
"Apache-2.0"
] | null | null | null | addons/base_address_extended/models/res_partner.py | SHIVJITH/Odoo_Machine_Test | 310497a9872db7844b521e6dab5f7a9f61d365a4 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import re
from odoo import api, fields, models, _
from odoo.exceptions import UserError
class Partner(models.Model):
_inherit = ['res.partner']
street_name = fields.Char(
'Street Name', compute='_compute_street_data', inverse='_inverse_street_data', store=True)
street_number = fields.Char(
'House', compute='_compute_street_data', inverse='_inverse_street_data', store=True)
street_number2 = fields.Char(
'Door', compute='_compute_street_data', inverse='_inverse_street_data', store=True)
def _inverse_street_data(self):
"""Updates the street field.
Writes the `street` field on the partners when one of the sub-fields in STREET_FIELDS
has been touched"""
street_fields = self._get_street_fields()
for partner in self:
street_format = (partner.country_id.street_format or
'%(street_number)s/%(street_number2)s %(street_name)s')
previous_field = None
previous_pos = 0
street_value = ""
separator = ""
# iter on fields in street_format, detected as '%(<field_name>)s'
for re_match in re.finditer(r'%\(\w+\)s', street_format):
# [2:-2] is used to remove the extra chars '%(' and ')s'
field_name = re_match.group()[2:-2]
field_pos = re_match.start()
if field_name not in street_fields:
raise UserError(_("Unrecognized field %s in street format.", field_name))
if not previous_field:
# first iteration: add heading chars in street_format
if partner[field_name]:
street_value += street_format[0:field_pos] + partner[field_name]
else:
# get the substring between 2 fields, to be used as separator
separator = street_format[previous_pos:field_pos]
if street_value and partner[field_name]:
street_value += separator
if partner[field_name]:
street_value += partner[field_name]
previous_field = field_name
previous_pos = re_match.end()
# add trailing chars in street_format
street_value += street_format[previous_pos:]
partner.street = street_value
@api.depends('street')
def _compute_street_data(self):
"""Splits street value into sub-fields.
Recomputes the fields of STREET_FIELDS when `street` of a partner is updated"""
street_fields = self._get_street_fields()
for partner in self:
if not partner.street:
for field in street_fields:
partner[field] = None
continue
street_format = (partner.country_id.street_format or
'%(street_number)s/%(street_number2)s %(street_name)s')
street_raw = partner.street
vals = self._split_street_with_params(street_raw, street_format)
# assign the values to the fields
for k, v in vals.items():
partner[k] = v
for k in set(street_fields) - set(vals):
partner[k] = None
def _split_street_with_params(self, street_raw, street_format):
street_fields = self._get_street_fields()
vals = {}
previous_pos = 0
field_name = None
# iter on fields in street_format, detected as '%(<field_name>)s'
for re_match in re.finditer(r'%\(\w+\)s', street_format):
field_pos = re_match.start()
if not field_name:
#first iteration: remove the heading chars
street_raw = street_raw[field_pos:]
# get the substring between 2 fields, to be used as separator
separator = street_format[previous_pos:field_pos]
field_value = None
if separator and field_name:
#maxsplit set to 1 to unpack only the first element and let the rest untouched
tmp = street_raw.split(separator, 1)
if previous_greedy in vals:
# attach part before space to preceding greedy field
append_previous, sep, tmp[0] = tmp[0].rpartition(' ')
street_raw = separator.join(tmp)
vals[previous_greedy] += sep + append_previous
if len(tmp) == 2:
field_value, street_raw = tmp
vals[field_name] = field_value
if field_value or not field_name:
previous_greedy = None
if field_name == 'street_name' and separator == ' ':
previous_greedy = field_name
# select next field to find (first pass OR field found)
# [2:-2] is used to remove the extra chars '%(' and ')s'
field_name = re_match.group()[2:-2]
else:
# value not found: keep looking for the same field
pass
if field_name not in street_fields:
raise UserError(_("Unrecognized field %s in street format.", field_name))
previous_pos = re_match.end()
# last field value is what remains in street_raw minus trailing chars in street_format
trailing_chars = street_format[previous_pos:]
if trailing_chars and street_raw.endswith(trailing_chars):
vals[field_name] = street_raw[:-len(trailing_chars)]
else:
vals[field_name] = street_raw
return vals
def write(self, vals):
res = super(Partner, self).write(vals)
if 'country_id' in vals and 'street' not in vals:
self._inverse_street_data()
return res
def _formatting_address_fields(self):
"""Returns the list of address fields usable to format addresses."""
return super(Partner, self)._formatting_address_fields() + self._get_street_fields()
def _get_street_fields(self):
"""Returns the fields that can be used in a street format.
Overwrite this function if you want to add your own fields."""
return ['street_name', 'street_number', 'street_number2']
| 45.635714 | 98 | 0.594616 | 6,197 | 0.969948 | 0 | 0 | 876 | 0.137111 | 0 | 0 | 1,901 | 0.297543 |
87afb24e4081ff8ae5ac4df3accb708ca9bd5693 | 10,032 | py | Python | third_party/aten/src/ATen/gen.py | AIHGF/caffe2 | b9e61b72cd460f4c9f644294a7bf0b6306df17fc | [
"Apache-2.0"
] | 58 | 2019-01-03T02:20:41.000Z | 2022-02-25T14:24:13.000Z | third_party/aten/src/ATen/gen.py | AIHGF/caffe2 | b9e61b72cd460f4c9f644294a7bf0b6306df17fc | [
"Apache-2.0"
] | 6 | 2019-02-12T03:52:08.000Z | 2020-12-17T02:40:37.000Z | third_party/aten/src/ATen/gen.py | AIHGF/caffe2 | b9e61b72cd460f4c9f644294a7bf0b6306df17fc | [
"Apache-2.0"
] | 5 | 2019-01-03T06:46:04.000Z | 2019-10-29T07:40:11.000Z | from optparse import OptionParser
import yaml
import cwrap_parser
import nn_parse
import native_parse
import preprocess_declarations
import function_wrapper
import dispatch_macros
import copy_wrapper
from code_template import CodeTemplate
parser = OptionParser()
parser.add_option('-s', '--source-path', help='path to source director for tensorlib',
action='store', default='.')
parser.add_option('-o', '--output-dependencies',
help='only output a list of dependencies', action='store')
parser.add_option('-n', '--no-cuda', action='store_true')
options, files = parser.parse_args()
if options.output_dependencies is not None:
output_dependencies_file = open(options.output_dependencies, 'w')
TEMPLATE_PATH = options.source_path + "/templates"
GENERATOR_DERIVED = CodeTemplate.from_file(
TEMPLATE_PATH + "/GeneratorDerived.h")
STORAGE_DERIVED_CPP = CodeTemplate.from_file(
TEMPLATE_PATH + "/StorageDerived.cpp")
STORAGE_DERIVED_H = CodeTemplate.from_file(TEMPLATE_PATH + "/StorageDerived.h")
TYPE_DERIVED_CPP = CodeTemplate.from_file(TEMPLATE_PATH + "/TypeDerived.cpp")
TYPE_DERIVED_H = CodeTemplate.from_file(TEMPLATE_PATH + "/TypeDerived.h")
TYPE_H = CodeTemplate.from_file(TEMPLATE_PATH + "/Type.h")
TYPE_CPP = CodeTemplate.from_file(TEMPLATE_PATH + "/Type.cpp")
TENSOR_DERIVED_CPP = CodeTemplate.from_file(
TEMPLATE_PATH + "/TensorDerived.cpp")
TENSOR_SPARSE_CPP = CodeTemplate.from_file(
TEMPLATE_PATH + "/TensorSparse.cpp")
TENSOR_DENSE_CPP = CodeTemplate.from_file(
TEMPLATE_PATH + "/TensorDense.cpp")
TENSOR_DERIVED_H = CodeTemplate.from_file(TEMPLATE_PATH + "/TensorDerived.h")
TENSOR_H = CodeTemplate.from_file(TEMPLATE_PATH + "/Tensor.h")
TENSOR_METHODS_H = CodeTemplate.from_file(TEMPLATE_PATH + "/TensorMethods.h")
FUNCTIONS_H = CodeTemplate.from_file(TEMPLATE_PATH + "/Functions.h")
NATIVE_FUNCTIONS_PATH = options.source_path + "/NativeFunctions.h"
generators = {
'CPUGenerator.h': {
'name': 'CPU',
'th_generator': 'THGenerator * generator;',
'header': 'TH/TH.h',
},
'CUDAGenerator.h': {
'name': 'CUDA',
'th_generator': '',
'header': 'THC/THC.h'
},
}
backends = ['CPU']
if not options.no_cuda:
backends.append('CUDA')
densities = ['Dense', 'Sparse']
scalar_types = [
('Byte', 'uint8_t', 'Long', 'uint8_t'),
('Char', 'int8_t', 'Long', 'int8_t'),
('Double', 'double', 'Double', 'double'),
('Float', 'float', 'Double', 'float'),
('Int', 'int', 'Long', 'int32_t'),
('Long', 'int64_t', 'Long', 'int64_t'),
('Short', 'int16_t', 'Long', 'int16_t'),
('Half', 'Half', 'Double', 'THHalf'),
]
# shared environment for non-derived base classes Type.h Tensor.h Storage.h
top_env = {
'type_registrations': [],
'type_headers': [],
'type_method_declarations': [],
'type_method_definitions': [],
'type_method_inline_definitions': [],
'tensor_method_declarations': [],
'tensor_method_definitions': [],
'function_declarations': [],
'function_definitions': [],
'type_ids': [],
}
def write(filename, s):
filename = "ATen/" + filename
if options.output_dependencies is not None:
output_dependencies_file.write(filename + ";")
return
with open(filename, "w") as f:
f.write(s)
def format_yaml(data):
if options.output_dependencies:
# yaml formatting is slow so don't do it if we will ditch it.
return ""
noalias_dumper = yaml.dumper.SafeDumper
noalias_dumper.ignore_aliases = lambda self, data: True
return yaml.dump(data, default_flow_style=False, Dumper=noalias_dumper)
def generate_storage_type_and_tensor(backend, density, scalar_type, declarations):
scalar_name, c_type, accreal, th_scalar_type = scalar_type
env = {}
density_tag = 'Sparse' if density == 'Sparse' else ''
th_density_tag = 'S' if density == 'Sparse' else ''
env['Density'] = density
env['ScalarName'] = scalar_name
env['ScalarType'] = c_type
env['THScalarType'] = th_scalar_type
env['AccScalarName'] = accreal
env['Storage'] = "{}{}Storage".format(backend, scalar_name)
env['Type'] = "{}{}{}Type".format(density_tag, backend, scalar_name)
env['Tensor'] = "{}{}{}Tensor".format(density_tag, backend, scalar_name)
env['SparseTensor'] = "Sparse{}{}Tensor".format(backend, scalar_name)
env['Backend'] = density_tag + backend
# used for generating switch logic for external functions
tag = density_tag + backend + scalar_name
env['TypeID'] = 'TypeID::' + tag
top_env['type_ids'].append(tag + ',')
if backend == 'CUDA':
env['th_headers'] = ['#include <THC/THC.h>',
'#include <THCUNN/THCUNN.h>',
'#undef THNN_',
'#undef THCIndexTensor_']
# if density == 'Sparse':
env['th_headers'] += ['#include <THCS/THCS.h>',
'#undef THCIndexTensor_']
sname = '' if scalar_name == "Float" else scalar_name
env['THType'] = 'Cuda{}'.format(sname)
env['THStorage'] = 'THCuda{}Storage'.format(sname)
if density == 'Dense':
env['THTensor'] = 'THCuda{}Tensor'.format(sname)
else:
env['THTensor'] = 'THCS{}Tensor'.format(scalar_name)
env['THIndexTensor'] = 'THCudaLongTensor'
env['state'] = ['context->thc_state']
env['isCUDA'] = 'true'
env['storage_device'] = 'return storage->device;'
env['Generator'] = 'CUDAGenerator'
else:
env['th_headers'] = ['#include <TH/TH.h>',
'#include <THNN/THNN.h>',
'#undef THNN_']
# if density == 'Sparse':
env['th_headers'].append('#include <THS/THS.h>')
env['THType'] = scalar_name
env['THStorage'] = "TH{}Storage".format(scalar_name)
env['THTensor'] = 'TH{}{}Tensor'.format(th_density_tag, scalar_name)
env['THIndexTensor'] = 'THLongTensor'
env['state'] = []
env['isCUDA'] = 'false'
env['storage_device'] = 'throw std::runtime_error("CPU storage has no device");'
env['Generator'] = 'CPUGenerator'
env['AS_REAL'] = env['ScalarType']
if scalar_name == "Half":
env['SparseTensor'] = 'Tensor'
if backend == "CUDA":
env['to_th_type'] = 'HalfFix<__half,Half>'
env['to_at_type'] = 'HalfFix<Half,__half>'
env['AS_REAL'] = 'convert<half,double>'
env['THScalarType'] = 'half'
else:
env['to_th_type'] = 'HalfFix<THHalf,Half>'
env['to_at_type'] = 'HalfFix<Half,THHalf>'
elif scalar_name == 'Long':
env['to_th_type'] = 'long'
env['to_at_type'] = 'int64_t'
else:
env['to_th_type'] = ''
env['to_at_type'] = ''
declarations, definitions = function_wrapper.create_derived(
env, declarations)
env['type_derived_method_declarations'] = declarations
env['type_derived_method_definitions'] = definitions
if density != 'Sparse':
# there are no special storage types for Sparse, they are composed
# of Dense tensors
write(env['Storage'] + ".cpp", STORAGE_DERIVED_CPP.substitute(env))
write(env['Storage'] + ".h", STORAGE_DERIVED_H.substitute(env))
env['TensorDenseOrSparse'] = TENSOR_DENSE_CPP.substitute(env)
env['THTensor_nDimension'] = 'tensor->nDimension'
else:
env['TensorDenseOrSparse'] = TENSOR_SPARSE_CPP.substitute(env)
env['THTensor_nDimension'] = 'tensor->nDimensionI + tensor->nDimensionV'
write(env['Type'] + ".cpp", TYPE_DERIVED_CPP.substitute(env))
write(env['Type'] + ".h", TYPE_DERIVED_H.substitute(env))
write(env['Tensor'] + ".cpp", TENSOR_DERIVED_CPP.substitute(env))
write(env['Tensor'] + ".h", TENSOR_DERIVED_H.substitute(env))
type_register = (('context->type_registry[static_cast<int>(Backend::{})]' +
'[static_cast<int>(ScalarType::{})].reset(new {}(context));')
.format(env['Backend'], scalar_name, env['Type']))
top_env['type_registrations'].append(type_register)
top_env['type_headers'].append(
'#include "ATen/{}.h"'.format(env['Type']))
return env
cwrap_files = [f for f in files if f.endswith('.cwrap')]
nn_files = [f for f in files if f.endswith('.yaml') or f.endswith('.h')]
declarations = [d
for file in cwrap_files
for d in cwrap_parser.parse(file)]
print(nn_files)
declarations += nn_parse.run(nn_files)
declarations += native_parse.parse(NATIVE_FUNCTIONS_PATH)
declarations = preprocess_declarations.run(declarations)
for fname, env in generators.items():
write(fname, GENERATOR_DERIVED.substitute(env))
# note: this will fill in top_env['type/tensor_method_declarations/definitions']
# and modify the declarations to include any information that will all_backends
# be used by function_wrapper.create_derived
output_declarations = function_wrapper.create_generic(top_env, declarations)
write("Declarations.yaml", format_yaml(output_declarations))
# populated by generate_storage_type_and_tensor
all_types = []
for backend in backends:
for density in densities:
for scalar_type in scalar_types:
if density == 'Sparse' and scalar_type[0] == 'Half':
# THS does not do half type yet.
continue
all_types.append(generate_storage_type_and_tensor(
backend, density, scalar_type, declarations))
write('Type.h', TYPE_H.substitute(top_env))
write('Type.cpp', TYPE_CPP.substitute(top_env))
write('Tensor.h', TENSOR_H.substitute(top_env))
write('TensorMethods.h', TENSOR_METHODS_H.substitute(top_env))
write('Functions.h', FUNCTIONS_H.substitute(top_env))
write('Dispatch.h', dispatch_macros.create(all_types))
write('Copy.cpp', copy_wrapper.create(all_types))
if options.output_dependencies is not None:
output_dependencies_file.close()
| 37.856604 | 88 | 0.650817 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,580 | 0.356858 |
87b2ac0cb62dd997f1308319e2198c8f122f2f8d | 458 | py | Python | tests/mock_module.py | nullpsifer/cryptosploit | e33cfca07397c05dffa734274c202acc7ff597b4 | [
"MIT"
] | null | null | null | tests/mock_module.py | nullpsifer/cryptosploit | e33cfca07397c05dffa734274c202acc7ff597b4 | [
"MIT"
] | null | null | null | tests/mock_module.py | nullpsifer/cryptosploit | e33cfca07397c05dffa734274c202acc7ff597b4 | [
"MIT"
] | null | null | null | from modules.abstract_module import *
class MockModule(AbstractModule):
executed = False
name = "mock_module"
description = "Module for testing purposes."
arguments = [
ModuleArgumentDescription("Arg1", "Argument 1", True),
ModuleArgumentDescription("Arg2", "Argument 2", False),
ModuleArgumentDescription("Arg3", "Argument 3", False)
]
def execute(self):
self.executed = True | 25.444444 | 67 | 0.635371 | 419 | 0.914847 | 0 | 0 | 0 | 0 | 0 | 0 | 97 | 0.21179 |
87b40c595c59158b89d0513124bb59d2d086ec52 | 149 | py | Python | kornia/augmentation/_3d/intensity/__init__.py | Ishticode/kornia | 974abb43ec72d12dbd244a2fb247bbbab8498de0 | [
"ECL-2.0",
"Apache-2.0"
] | 418 | 2018-10-02T22:31:36.000Z | 2019-01-16T14:15:45.000Z | kornia/augmentation/_3d/intensity/__init__.py | Ishticode/kornia | 974abb43ec72d12dbd244a2fb247bbbab8498de0 | [
"ECL-2.0",
"Apache-2.0"
] | 94 | 2019-01-17T22:10:45.000Z | 2019-05-22T23:47:58.000Z | kornia/augmentation/_3d/intensity/__init__.py | Ishticode/kornia | 974abb43ec72d12dbd244a2fb247bbbab8498de0 | [
"ECL-2.0",
"Apache-2.0"
] | 25 | 2018-10-02T22:50:04.000Z | 2019-01-13T18:14:11.000Z | from kornia.augmentation._3d.intensity.equalize import RandomEqualize3D
from kornia.augmentation._3d.intensity.motion_blur import RandomMotionBlur3D
| 49.666667 | 76 | 0.892617 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
87b46b1723a9e240a2245cee77024e18ad09f5ff | 3,270 | py | Python | tmtrader/exchange_for_backtest/position_manager.py | reouno/tomatorader | b781206051129fa59439a0f314f4f1ed647a6852 | [
"MIT"
] | null | null | null | tmtrader/exchange_for_backtest/position_manager.py | reouno/tomatorader | b781206051129fa59439a0f314f4f1ed647a6852 | [
"MIT"
] | null | null | null | tmtrader/exchange_for_backtest/position_manager.py | reouno/tomatorader | b781206051129fa59439a0f314f4f1ed647a6852 | [
"MIT"
] | null | null | null | from logging import getLogger
from typing import Dict, List, Optional
from tmtrader.entity.order import FilledBasicOrder
from tmtrader.entity.position import ClosedPosition, Position, Positions, \
PositionsRef
from tmtrader.exchange_for_backtest.usecase.order_to_share import from_order
logger = getLogger(__name__)
PositionsDict = Dict[int, Positions]
ClosedPositions = List[ClosedPosition]
class PositionManager:
def __init__(self):
self.__positions_dic: PositionsDict = dict()
def current_positions_of(self, product_id: int) -> Optional[PositionsRef]:
if product_id in self.__positions_dic:
return self.__positions_dic[product_id].to_ref()
else:
return None
def current_positions(self) -> Dict[int, PositionsRef]:
return {k: v.to_ref() for k, v in self.__positions_dic.items() if
v.len}
def update_position(self,
order: FilledBasicOrder) -> ClosedPositions:
logger.debug(f'Got filled order at PositionManager: {order}')
pid = order.product_id
if pid in self.__positions_dic:
logger.debug(f'position size before update: {self.__positions_dic[pid].len}')
else:
logger.debug(f'position size before update: 0')
new_shares = from_order(order)
positions = None
if pid in self.__positions_dic:
positions = self.__positions_dic.pop(pid)
closed_pos = []
if positions and positions.is_long:
if order.is_buy:
self.__add_positions(pid, positions, new_shares)
else:
closed_pos = self.__close_and_may_open(pid, positions,
new_shares)
elif positions:
if order.is_buy:
closed_pos = self.__close_and_may_open(pid, positions,
new_shares)
else:
self.__add_positions(pid, positions, new_shares)
else:
self.__positions_dic[pid] = Positions(pid, new_shares,
order.is_buy)
if pid in self.__positions_dic:
logger.debug(
f'position size after update: {self.__positions_dic[pid].len}')
else:
logger.debug('position size after update: 0')
return closed_pos
def __add_positions(self, pid: int, positions: Positions,
new_shares: List[Position]):
positions.add_positions(new_shares)
self.__positions_dic[pid] = positions
def __close_and_may_open(self, pid: int, positions: Positions,
new_shares: List[Position]) -> ClosedPositions:
closed = positions.close_positions(new_shares)
if closed.remaining_contracts:
self.__positions_dic[pid] = Positions(pid,
closed.remaining_contracts,
is_long=not
positions.is_long)
else:
if positions.len:
self.__positions_dic[pid] = positions
return closed.closed
| 38.023256 | 89 | 0.585627 | 2,868 | 0.877064 | 0 | 0 | 0 | 0 | 0 | 0 | 236 | 0.072171 |
87b5a69588b76f251c9cd5a2072d8ea5e658ab2d | 850 | py | Python | service/server.py | IsraelAbebe/fake-news-classification | 3c8c46d8e4222a5f70daea423b7a90480cb2044c | [
"MIT"
] | null | null | null | service/server.py | IsraelAbebe/fake-news-classification | 3c8c46d8e4222a5f70daea423b7a90480cb2044c | [
"MIT"
] | null | null | null | service/server.py | IsraelAbebe/fake-news-classification | 3c8c46d8e4222a5f70daea423b7a90480cb2044c | [
"MIT"
] | null | null | null |
import grpc
from concurrent import futures
import time
import sys
sys.path.insert(0, 'service/')
from service_spec import fake_news_pb2
from service_spec import fake_news_pb2_grpc
import json
import test
class fake_news_classificationServicer(fake_news_pb2_grpc.fake_news_classificationServicer):
def classify(self, request, context):
response = fake_news_pb2.OutputMessage()
response.result = test.predict(request.value)
return response
server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
fake_news_pb2_grpc.add_fake_news_classificationServicer_to_server(fake_news_classificationServicer(), server)
print('Starting server. Listening on port 7011.')
server.add_insecure_port('0.0.0.0:7011')
server.start()
try:
while True:
time.sleep(86400)
except KeyboardInterrupt:
server.stop(0)
| 23.611111 | 109 | 0.787059 | 262 | 0.308235 | 0 | 0 | 0 | 0 | 0 | 0 | 66 | 0.077647 |
87b849087828668df3e9bdba8cecbc7f2e80f614 | 3,807 | py | Python | authorization/models.py | Joost-dm/jooster | 04f3fa979b689b61a4aeae8a5309a09277404fd6 | [
"MIT"
] | 1 | 2020-06-17T02:51:35.000Z | 2020-06-17T02:51:35.000Z | authorization/models.py | Joost-dm/jooster | 04f3fa979b689b61a4aeae8a5309a09277404fd6 | [
"MIT"
] | 7 | 2020-06-06T18:32:37.000Z | 2022-03-12T00:33:42.000Z | authorization/models.py | Joost-dm/jooster | 04f3fa979b689b61a4aeae8a5309a09277404fd6 | [
"MIT"
] | null | null | null | from io import BytesIO
from PIL import Image
from django.contrib.auth.models import AbstractUser
from django.core.files.uploadedfile import InMemoryUploadedFile
from django.core.mail import send_mail
from django.db import models
from resizeimage import resizeimage
from rest_framework.authtoken.models import Token
from main.settings import USER_SETTINGS
from django.db.models import ObjectDoesNotExist
def generate_avatar_path(obj, filename):
""" Generates an unique path to user's avatar dir according to user's id. """
return 'images/avatars/' + str(obj.id) + '/' + filename
class CustomUser(AbstractUser):
"""Extends base django user model"""
displayed = models.CharField(
max_length=40,
unique=True,
verbose_name='отоброжаемое имя'
)
avatar = models.ImageField(
default='images/avatars/default_avatar.png',
upload_to=generate_avatar_path,
blank=True,
null=True,
verbose_name='аватар',
)
foreign_avatar_url = models.URLField(
null=True,
blank=True,
verbose_name='аватар из стороних источников'
)
def save(self, *args, **kwargs):
""" User's profile update handler. """
try:
self.avatar_update_handler()
except ObjectDoesNotExist:
send_mail('Новый пользователь!', 'Зарегистрирован новый пользователь: ' + self.displayed,
'joost.mail@gmail.com', ['vasiliishirokov@gmail.com'], fail_silently=True)
super(CustomUser, self).save(*args, **kwargs)
def avatar_update_handler(self):
""" Downloaded avatar image update handler. """
user = CustomUser.objects.get(id=self.id)
if user.avatar != self.avatar:
self.get_avatar_ext()
self.generate_avatar_name()
self.resize_avatar()
# self.delete_current_avatar()
def get_avatar_ext(self):
""" Parses an avatar image extension. """
try:
user_avatar_ext = self.avatar.name.split('.')[-1]
if user_avatar_ext.upper() == 'JPG':
user_avatar_ext = 'jpeg'
self.user_avatar_ext = user_avatar_ext
except AttributeError:
self.avatar = 'images/avatars/default_avatar.png'
raise ObjectDoesNotExist
def resize_avatar(self):
""" Compresses user's avatar image. New sizes declared at project settings. """
user_avatar = Image.open(self.avatar)
avatar_settings = USER_SETTINGS['USER_AVATAR_SETTINGS']
new_user_avatar = resizeimage.resize_cover(
user_avatar,
[avatar_settings['COMPRESSED_WIDTH'], avatar_settings['COMPRESSED_HEIGHT']]
)
new_user_avatar_io = BytesIO()
new_user_avatar.save(new_user_avatar_io, format=self.user_avatar_ext)
self.avatar = InMemoryUploadedFile(new_user_avatar_io, None, self.avatar.name, 'image/' + self.user_avatar_ext,
new_user_avatar_io.tell(), None)
# For using with local storage
"""
def delete_current_avatar(self):
try:
user = CustomUser.objects.get(id=self.id)
except IntegrityError:
raise ValidationError('Некорректный пользователь.')
storage, path = user.avatar.storage, user.avatar.path
if self.avatar.name in path:
storage.delete(path)"""
def generate_avatar_name(self):
""" Generates an user's avatar image name according project settings."""
avatar_settings = USER_SETTINGS['USER_AVATAR_SETTINGS']
self.avatar.name = avatar_settings['AVATAR_IMAGE_NAME'] + '.' + self.user_avatar_ext
AVATAR_FIELD = 'avatar'
REQUIRED_FIELDS = ['email', 'avatar', 'displayed', 'foreign_avatar_url']
| 34.926606 | 119 | 0.653008 | 3,335 | 0.849249 | 0 | 0 | 0 | 0 | 0 | 0 | 1,350 | 0.343774 |
87b8e5d4a4c403c1e33d8ea9c1529f056cb42aa0 | 646 | py | Python | mapnik_style_generator/utils/PandasMerge.py | sjsafranek/GeoTileServer | 4f39ce3b6dc5bd374d4a683fd7a425e950b218f0 | [
"MIT"
] | 1 | 2017-09-29T21:30:33.000Z | 2017-09-29T21:30:33.000Z | mapnik_style_generator/utils/PandasMerge.py | sjsafranek/go-tileserver | 5453186b5c00281588aeba82311bea8ddf8d8ae2 | [
"BSD-3-Clause"
] | null | null | null | mapnik_style_generator/utils/PandasMerge.py | sjsafranek/go-tileserver | 5453186b5c00281588aeba82311bea8ddf8d8ae2 | [
"BSD-3-Clause"
] | null | null | null | #table join
#stefan safranek
import csv, os, sys, time
import pandas as pd
start_time = time.time() #start time
#File 1
IMPORT_FILE = "Burns_wO_LnLID.csv"
SAVE_FILE = IMPORT_FILE.replace('.csv','') + " MATCHED.csv"
df1 = pd.read_csv("Burns HD 58.csv", index_col=None, usecols=[0,1,2,4], parse_dates=True)
df2 = pd.read_csv(IMPORT_FILE, index_col=None, usecols=[0,1,2], parse_dates=True)
df = pd.merge(df1,df2)
df.to_csv(SAVE_FILE,on='Voters_StateVoterID',how='right')
#df.to_csv(SAVE_FILE,on='Full_Name',how='right')
end_time = time.time()
run_time = end_time - start_time
print run_time
#time.sleep(15)
| 22.275862 | 90 | 0.690402 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 201 | 0.311146 |
87b94e9ea498eb18e633d7320caae4f23a5511f9 | 3,637 | py | Python | arcticdem/ee_setsm_meta2file.py | whyjz/ArcticDEM-Batch-Pipeline | 7710c3ed5b466eef63619ee088256cf12f522bbd | [
"Apache-2.0"
] | null | null | null | arcticdem/ee_setsm_meta2file.py | whyjz/ArcticDEM-Batch-Pipeline | 7710c3ed5b466eef63619ee088256cf12f522bbd | [
"Apache-2.0"
] | null | null | null | arcticdem/ee_setsm_meta2file.py | whyjz/ArcticDEM-Batch-Pipeline | 7710c3ed5b466eef63619ee088256cf12f522bbd | [
"Apache-2.0"
] | null | null | null | import csv
from itertools import islice
import os
from glob import glob
import time
def demmeta(folder,mfile,errorlog):
metasource = [y for x in os.walk(folder) for y in glob(os.path.join(x[0], '*.txt'))]
with open(mfile,'wb') as csvfile:
writer=csv.DictWriter(csvfile,fieldnames=["id_no", "system:time_start", "platform", "catId1","catId2", "noDataValue", "releaseVersion", "srcImg1","srcImg2","setsmVersion","resolution","bitdepth","acqDate","minelv","maxelv","units"], delimiter=',')
writer.writeheader()
with open(errorlog,'wb') as csvfile:
writer=csv.DictWriter(csvfile,fieldnames=["id_no"], delimiter=',')
writer.writeheader()
for files in metasource:
print(files)
with open(files,'r') as myfile:
a=myfile.readlines()
try:
demid=str(a).split('stripDemId = "')[1].split('v2.0";')[0]+"v20_dem"
platform=str(a).split('platform = "')[1].split('";')[0]
catId1 = str(a).split('catId1 = "')[1].split('";')[0]
catId2 = str(a).split('catId2 = "')[1].split('";')[0]
noDataValue = str(a).split('noDataValue = ')[1].split(';')[0]
date_time = str(a).split('stripCreationTime = ')[1].split('T')[0]
rls=str(a).split('releaseVersion = "')[1].split('";')[0]
sim=str(a).split('sourceImage1 = "')[1].split('";')[0]
sim2=str(a).split('sourceImage2 = "')[1].split('";')[0]
setv=str(a).split('setsmVersion = ')[1].split(';')[0]
rs=str(a).split('outputResolution = ')[1].split(';')[0]
bp=str(a).split('bitsPerPixel = ')[1].split(';')[0]
acq=str(a).split('acqDate = ')[1].split(';')[0]
minelv=str(a).split('minElevValue = ')[1].split(';')[0]
maxelv=str(a).split('maxElevValue = ')[1].split(';')[0]
units=str(a).split('horizontalCoordSysUnits = "')[1].split('";')[0]
pattern = '%Y-%m-%d'
epoch = int(time.mktime(time.strptime(date_time, pattern)))*1000
acqtime=int(time.mktime(time.strptime(acq, pattern)))*1000
print("DEM ID",demid)
print("Platform",platform)
print("Acquisition Time",acqtime)
print("Strip Creation Time",epoch)
print('CatID1',catId1)
print('CatID2',catId2)
print("noDataValue",noDataValue)
print("Release Version",rls)
print("SourceImage 1",sim)
print('SourceImage 2',sim2)
print('SETSM Version',setv)
print("BitsPerPixel",bp)
print("Unit",units)
print("Minimum Elevation",format(float(minelv),'.2f'))
print("Maximum Elevation",format(float(maxelv),'.2f'))
print("Output Resolution",format(float(rs),'.2f'))
with open(mfile,'a') as csvfile:
writer=csv.writer(csvfile,delimiter=',',lineterminator='\n')
writer.writerow([demid,epoch,platform,catId1,catId2,noDataValue,rls,sim,sim2,setv,format(float(rs),'.2f'),bp,acqtime,format(float(minelv),'.2f'),format(float(maxelv),'.2f'),units])
csvfile.close()
except Exception:
print(infilename)
with open(errorlog,'a') as csvfile:
writer=csv.writer(csvfile,delimiter=',',lineterminator='\n')
writer.writerow([infilename])
csvfile.close()
if __name__ == '__main__':
main()
| 54.283582 | 255 | 0.534781 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 840 | 0.23096 |
87ba433641ad537815e75200ac70b792bbb16c42 | 152 | py | Python | ai/bertpredict/urls.py | thdwlsgus0/relay_15 | b5cb66ca250afd5402b0f19c870fda1852b77a28 | [
"MIT"
] | 3 | 2020-08-07T05:35:28.000Z | 2020-08-10T00:48:11.000Z | ai/bertpredict/urls.py | thdwlsgus0/relay_15 | b5cb66ca250afd5402b0f19c870fda1852b77a28 | [
"MIT"
] | null | null | null | ai/bertpredict/urls.py | thdwlsgus0/relay_15 | b5cb66ca250afd5402b0f19c870fda1852b77a28 | [
"MIT"
] | 11 | 2020-07-28T00:45:51.000Z | 2020-11-22T18:34:52.000Z | from django.urls import path
from . import views
app_name = 'bertpredict'
urlpatterns = [
path('bertpredict/', views.BertPredictView.as_view()),
] | 19 | 58 | 0.730263 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 27 | 0.177632 |
87bcc23da78418d37f7d6aad89e6e754fd2069ff | 3,493 | py | Python | examples/eager_examples/scripts/example_custom_env.py | eager-dev/eager | f10ccbd7452acb3a29881ecd95c759f632c91da9 | [
"Apache-2.0"
] | 16 | 2021-07-02T14:48:53.000Z | 2022-02-23T02:53:01.000Z | examples/eager_examples/scripts/example_custom_env.py | eager-dev/eager | f10ccbd7452acb3a29881ecd95c759f632c91da9 | [
"Apache-2.0"
] | 37 | 2021-06-30T12:10:29.000Z | 2022-02-02T09:46:34.000Z | examples/eager_examples/scripts/example_custom_env.py | eager-dev/eager | f10ccbd7452acb3a29881ecd95c759f632c91da9 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# ROS packages required
import rospy
from eager_core.eager_env import BaseEagerEnv
from eager_core.objects import Object
from eager_core.wrappers.flatten import Flatten
from eager_bridge_webots.webots_engine import WebotsEngine # noqa: F401
from eager_bridge_pybullet.pybullet_engine import PyBulletEngine # noqa: F401
from eager_process_safe_actions.safe_actions_processor import SafeActionsProcessor
from gym import spaces
import numpy as np
from stable_baselines3 import PPO
class MyEnv(BaseEagerEnv):
def __init__(self, engine, name="custom_env"):
super().__init__(engine, name=name)
self.STEPS_PER_ROLLOUT = 100
self.steps = 0
# Create ur5e robot
self.ur5e = Object.create('ur5e1', 'eager_robot_ur5e', 'ur5e')
# Add preprocessing so that commanded actions are safe
processor = SafeActionsProcessor(duration=0.1,
checks_per_rad=15,
vel_limit=3.0,
robot_type='ur5e',
collision_height=0.01,
)
self.ur5e.actuators['joints'].add_preprocess(
processor=processor,
observations_from_objects=[self.ur5e],
action_space=spaces.Box(low=-np.pi, high=np.pi, shape=(6,)))
self.camera = Object.create('ms21', 'eager_sensor_multisense_s21', 'dual_cam')
self._init_nodes([self.camera, self.ur5e])
self.observation_space = self.ur5e.observation_space
self.action_space = self.ur5e.action_space
def step(self, action):
# Set actions before stepping
self.ur5e.set_action(action)
# Step the environment
self._step()
self.steps += 1
# Get observations
obs = self.ur5e.get_obs()
return obs, self._get_reward(obs), self._is_done(obs), self.ur5e.get_state()
def reset(self) -> object:
self.steps = 0
# Set desired reset state
reset_states = dict()
reset_states['joint_pos'] = np.array([0, -np.pi / 2, 0, 0, 0, 0], dtype='float32')
reset_states['joint_vel'] = np.array([0, 0, 0, 0, 0, 0], dtype='float32')
self.ur5e.reset(states=reset_states)
# Reset the environment
self._reset()
# Get new observations
return self.ur5e.get_obs()
def render(self, mode, **kwargs):
# Use camera to render rgb images
rgbd = self.camera.sensors['camera_right'].get_obs()
return rgbd[:, :, :3]
def _get_reward(self, obs):
return -(self.ur5e.get_state(['joint_pos'])['joint_pos'][5] - 2)**2 # Je mag hier iets verzinnen Bas
def _is_done(self, obs):
return self.steps >= self.STEPS_PER_ROLLOUT
if __name__ == '__main__':
rospy.init_node('ur5e_example', anonymous=True, log_level=rospy.WARN)
# Engine specific parameters
# engine = WebotsEngine(world='$(find ur5e_example)/worlds/ur5e_cam.wbt')
engine = PyBulletEngine(gui=True)
env = MyEnv(engine, name="my_env")
env = Flatten(env)
env.seed(42)
obs = env.reset()
for i in range(1000):
action = env.action_space.sample()
obs, reward, done, info = env.step(action)
env.render()
if done:
obs = env.reset()
model = PPO('MlpPolicy', env, verbose=1)
model.learn(total_timesteps=100000)
env.close()
| 30.911504 | 109 | 0.619238 | 2,334 | 0.668194 | 0 | 0 | 0 | 0 | 0 | 0 | 668 | 0.19124 |
87c0c5db07800916d6e2f834710cacb09b92bba1 | 11,190 | py | Python | DataFormats/FWLite/test/RefTest_cfg.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 852 | 2015-01-11T21:03:51.000Z | 2022-03-25T21:14:00.000Z | DataFormats/FWLite/test/RefTest_cfg.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 30,371 | 2015-01-02T00:14:40.000Z | 2022-03-31T23:26:05.000Z | DataFormats/FWLite/test/RefTest_cfg.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 3,240 | 2015-01-02T05:53:18.000Z | 2022-03-31T17:24:21.000Z | # Configuration file for RefTest_t
import FWCore.ParameterSet.Config as cms
process = cms.Process("TEST")
process.load("FWCore.Framework.test.cmsExceptionsFatal_cff")
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(2)
)
process.source = cms.Source("EmptySource")
process.WhatsItESProducer = cms.ESProducer("WhatsItESProducer")
process.DoodadESSource = cms.ESSource("DoodadESSource")
process.Thing = cms.EDProducer("ThingProducer",
offsetDelta = cms.int32(1)
)
process.OtherThing = cms.EDProducer("OtherThingProducer")
process.thingProducer = cms.EDProducer("ThingProducer",
offsetDelta = cms.int32(100),
nThings = cms.int32(50)
)
process.trackOfThingsProducerA = cms.EDProducer("TrackOfThingsProducer",
inputTag = cms.InputTag('thingProducer'),
keysToReference = cms.vuint32(0, 1, 2, 3, 4, 5, 6, 7, 8)
)
process.trackOfThingsProducerB = cms.EDProducer("TrackOfThingsProducer",
inputTag = cms.InputTag('thingProducer'),
keysToReference = cms.vuint32(0, 1, 2, 3)
)
process.trackOfThingsProducerC = cms.EDProducer("TrackOfThingsProducer",
inputTag = cms.InputTag('thingProducer'),
keysToReference = cms.vuint32(4, 5, 6, 7)
)
process.trackOfThingsProducerD = cms.EDProducer("TrackOfThingsProducer",
inputTag = cms.InputTag('thingProducer'),
keysToReference = cms.vuint32(10, 11, 12, 13, 14, 15, 16, 17, 18)
)
process.trackOfThingsProducerDMinus = cms.EDProducer("TrackOfThingsProducer",
inputTag = cms.InputTag('thingProducer'),
keysToReference = cms.vuint32(10, 11, 12, 13, 14, 15, 16, 17)
)
process.trackOfThingsProducerDPlus = cms.EDProducer("TrackOfThingsProducer",
inputTag = cms.InputTag('thingProducer'),
keysToReference = cms.vuint32(10, 11, 12, 13, 14, 15, 16, 17, 18, 21)
)
process.trackOfThingsProducerE = cms.EDProducer("TrackOfThingsProducer",
inputTag = cms.InputTag('thingProducer'),
keysToReference = cms.vuint32(10, 11, 12, 13, 14)
)
process.trackOfThingsProducerF = cms.EDProducer("TrackOfThingsProducer",
inputTag = cms.InputTag('thingProducer'),
keysToReference = cms.vuint32(14, 15, 16, 17)
)
process.trackOfThingsProducerG = cms.EDProducer("TrackOfThingsProducer",
inputTag = cms.InputTag('thingProducer'),
keysToReference = cms.vuint32(20, 21, 22, 23, 24, 25, 26, 27, 28)
)
process.trackOfThingsProducerH = cms.EDProducer("TrackOfThingsProducer",
inputTag = cms.InputTag('thingProducer'),
keysToReference = cms.vuint32(20, 21, 22, 23)
)
process.trackOfThingsProducerI = cms.EDProducer("TrackOfThingsProducer",
inputTag = cms.InputTag('thingProducer'),
keysToReference = cms.vuint32(24, 25, 26, 27)
)
process.trackOfThingsProducerJ = cms.EDProducer("TrackOfThingsProducer",
inputTag = cms.InputTag('thingProducer'),
keysToReference = cms.vuint32(30, 31, 32, 33, 34, 35, 36, 37, 38)
)
process.trackOfThingsProducerK = cms.EDProducer("TrackOfThingsProducer",
inputTag = cms.InputTag('thingProducer'),
keysToReference = cms.vuint32(30, 31, 32, 33)
)
process.trackOfThingsProducerL = cms.EDProducer("TrackOfThingsProducer",
inputTag = cms.InputTag('thingProducer'),
keysToReference = cms.vuint32(34, 35, 36, 37)
)
process.trackOfThingsProducerM = cms.EDProducer("TrackOfThingsProducer",
inputTag = cms.InputTag('thingProducer'),
keysToReference = cms.vuint32(40, 41, 42, 43, 44, 45, 46, 47, 48)
)
process.trackOfThingsProducerN = cms.EDProducer("TrackOfThingsProducer",
inputTag = cms.InputTag('thingProducer'),
keysToReference = cms.vuint32(40, 41, 42, 43)
)
process.trackOfThingsProducerO = cms.EDProducer("TrackOfThingsProducer",
inputTag = cms.InputTag('thingProducer'),
keysToReference = cms.vuint32(44, 45, 46, 47)
)
process.thinningThingProducerA = cms.EDProducer("ThinningThingProducer",
inputTag = cms.InputTag('thingProducer'),
trackTag = cms.InputTag('trackOfThingsProducerA'),
offsetToThinnedKey = cms.uint32(0),
expectedCollectionSize = cms.uint32(50)
)
process.thinningThingProducerB = cms.EDProducer("ThinningThingProducer",
inputTag = cms.InputTag('thinningThingProducerA'),
trackTag = cms.InputTag('trackOfThingsProducerB'),
offsetToThinnedKey = cms.uint32(0),
expectedCollectionSize = cms.uint32(9)
)
process.thinningThingProducerC = cms.EDProducer("ThinningThingProducer",
inputTag = cms.InputTag('thinningThingProducerA'),
trackTag = cms.InputTag('trackOfThingsProducerC'),
offsetToThinnedKey = cms.uint32(0),
expectedCollectionSize = cms.uint32(9)
)
process.thinningThingProducerD = cms.EDProducer("ThinningThingProducer",
inputTag = cms.InputTag('thingProducer'),
trackTag = cms.InputTag('trackOfThingsProducerD'),
offsetToThinnedKey = cms.uint32(0),
expectedCollectionSize = cms.uint32(50)
)
process.thinningThingProducerE = cms.EDProducer("ThinningThingProducer",
inputTag = cms.InputTag('thinningThingProducerD'),
trackTag = cms.InputTag('trackOfThingsProducerE'),
offsetToThinnedKey = cms.uint32(10),
expectedCollectionSize = cms.uint32(9)
)
process.thinningThingProducerF = cms.EDProducer("ThinningThingProducer",
inputTag = cms.InputTag('thinningThingProducerD'),
trackTag = cms.InputTag('trackOfThingsProducerF'),
offsetToThinnedKey = cms.uint32(10),
expectedCollectionSize = cms.uint32(9)
)
process.thinningThingProducerG = cms.EDProducer("ThinningThingProducer",
inputTag = cms.InputTag('thingProducer'),
trackTag = cms.InputTag('trackOfThingsProducerG'),
offsetToThinnedKey = cms.uint32(0),
expectedCollectionSize = cms.uint32(50)
)
process.thinningThingProducerH = cms.EDProducer("ThinningThingProducer",
inputTag = cms.InputTag('thinningThingProducerG'),
trackTag = cms.InputTag('trackOfThingsProducerH'),
offsetToThinnedKey = cms.uint32(20),
expectedCollectionSize = cms.uint32(9)
)
process.thinningThingProducerI = cms.EDProducer("ThinningThingProducer",
inputTag = cms.InputTag('thinningThingProducerG'),
trackTag = cms.InputTag('trackOfThingsProducerI'),
offsetToThinnedKey = cms.uint32(20),
expectedCollectionSize = cms.uint32(9)
)
process.thinningThingProducerJ = cms.EDProducer("ThinningThingProducer",
inputTag = cms.InputTag('thingProducer'),
trackTag = cms.InputTag('trackOfThingsProducerJ'),
offsetToThinnedKey = cms.uint32(0),
expectedCollectionSize = cms.uint32(50)
)
process.thinningThingProducerK = cms.EDProducer("ThinningThingProducer",
inputTag = cms.InputTag('thinningThingProducerJ'),
trackTag = cms.InputTag('trackOfThingsProducerK'),
offsetToThinnedKey = cms.uint32(30),
expectedCollectionSize = cms.uint32(9)
)
process.thinningThingProducerL = cms.EDProducer("ThinningThingProducer",
inputTag = cms.InputTag('thinningThingProducerJ'),
trackTag = cms.InputTag('trackOfThingsProducerL'),
offsetToThinnedKey = cms.uint32(30),
expectedCollectionSize = cms.uint32(9)
)
process.thinningThingProducerM = cms.EDProducer("ThinningThingProducer",
inputTag = cms.InputTag('thingProducer'),
trackTag = cms.InputTag('trackOfThingsProducerM'),
offsetToThinnedKey = cms.uint32(0),
expectedCollectionSize = cms.uint32(50)
)
process.thinningThingProducerN = cms.EDProducer("ThinningThingProducer",
inputTag = cms.InputTag('thinningThingProducerM'),
trackTag = cms.InputTag('trackOfThingsProducerN'),
offsetToThinnedKey = cms.uint32(40),
expectedCollectionSize = cms.uint32(9)
)
process.thinningThingProducerO = cms.EDProducer("ThinningThingProducer",
inputTag = cms.InputTag('thinningThingProducerM'),
trackTag = cms.InputTag('trackOfThingsProducerO'),
offsetToThinnedKey = cms.uint32(40),
expectedCollectionSize = cms.uint32(9)
)
process.out = cms.OutputModule("PoolOutputModule",
fileName = cms.untracked.string('goodDataFormatsFWLite.root'),
outputCommands = cms.untracked.vstring(
'keep *',
'drop *_thingProducer_*_*',
'drop *_thinningThingProducerD_*_*',
'drop *_thinningThingProducerH_*_*',
'drop *_thinningThingProducerI_*_*',
'drop *_thinningThingProducerJ_*_*',
'drop *_thinningThingProducerK_*_*',
'drop *_thinningThingProducerL_*_*',
'drop *_thinningThingProducerM_*_*',
'drop *_thinningThingProducerN_*_*',
)
)
process.out2 = cms.OutputModule("PoolOutputModule",
fileName = cms.untracked.string('good2DataFormatsFWLite.root')
)
process.out_other = cms.OutputModule("PoolOutputModule",
outputCommands = cms.untracked.vstring('drop *',
'keep edmtestOtherThings_*_*_*',
'keep *_TriggerResults_*_*'),
fileName = cms.untracked.string('other_onlyDataFormatsFWLite.root')
)
process.thinningTestPath = cms.Path(process.thingProducer
* process.trackOfThingsProducerA
* process.trackOfThingsProducerB
* process.trackOfThingsProducerC
* process.trackOfThingsProducerD
* process.trackOfThingsProducerDMinus
* process.trackOfThingsProducerDPlus
* process.trackOfThingsProducerE
* process.trackOfThingsProducerF
* process.trackOfThingsProducerG
* process.trackOfThingsProducerH
* process.trackOfThingsProducerI
* process.trackOfThingsProducerJ
* process.trackOfThingsProducerK
* process.trackOfThingsProducerL
* process.trackOfThingsProducerM
* process.trackOfThingsProducerN
* process.trackOfThingsProducerO
* process.thinningThingProducerA
* process.thinningThingProducerB
* process.thinningThingProducerC
* process.thinningThingProducerD
* process.thinningThingProducerE
* process.thinningThingProducerF
* process.thinningThingProducerG
* process.thinningThingProducerH
* process.thinningThingProducerI
* process.thinningThingProducerJ
* process.thinningThingProducerK
* process.thinningThingProducerL
* process.thinningThingProducerM
* process.thinningThingProducerN
* process.thinningThingProducerO
)
process.p = cms.Path(process.Thing*process.OtherThing)
process.outp = cms.EndPath(process.out*process.out2*process.out_other)
| 39.401408 | 77 | 0.67471 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,378 | 0.212511 |
87c130e031505973ff9591b6295aca73c9bc126b | 158 | py | Python | keycards/urls.py | oangervuori/namubufferi | b9353b1d1a32e18e93cb1e9bd2b591950d54269a | [
"MIT"
] | 2 | 2016-12-05T03:31:47.000Z | 2017-02-13T20:10:39.000Z | keycards/urls.py | oangervuori/namubufferi | b9353b1d1a32e18e93cb1e9bd2b591950d54269a | [
"MIT"
] | 1 | 2016-12-14T10:53:15.000Z | 2016-12-17T18:52:25.000Z | keycards/urls.py | oangervuori/namubufferi | b9353b1d1a32e18e93cb1e9bd2b591950d54269a | [
"MIT"
] | 1 | 2017-01-14T10:56:28.000Z | 2017-01-14T10:56:28.000Z | from django.urls import path
from . import views
app_name = "keycards"
urlpatterns = [
path(f"", views.KeycardLoginFormView.as_view(), name="login"),
]
| 17.555556 | 66 | 0.708861 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 20 | 0.126582 |
87c2898d1865ba6df1980805dbc8552821fbbfba | 2,859 | py | Python | src/drias2020/__init__.py | jpvergnes/pybrgmlib | b768cd75635abd85f09c68efc4ab92d1857f792a | [
"MIT"
] | null | null | null | src/drias2020/__init__.py | jpvergnes/pybrgmlib | b768cd75635abd85f09c68efc4ab92d1857f792a | [
"MIT"
] | null | null | null | src/drias2020/__init__.py | jpvergnes/pybrgmlib | b768cd75635abd85f09c68efc4ab92d1857f792a | [
"MIT"
] | null | null | null | from drias2020.extract_drias2020 import extract_drias2020
from drias2020.convert_drias2020 import convert_drias2020
alls_models = [
('CNRM-CM5-LR', 'ALADIN63', 'historical'),
('CNRM-CM5-LR', 'ALADIN63', 'rcp2.6'),
('CNRM-CM5-LR', 'ALADIN63', 'rcp4.5'),
('CNRM-CM5-LR', 'ALADIN63', 'rcp8.5'),
('CNRM-CM5-LR', 'RACMO22E', 'historical'),
('CNRM-CM5-LR', 'RACMO22E', 'rcp2.6'),
('CNRM-CM5-LR', 'RACMO22E', 'rcp4.5'),
('CNRM-CM5-LR', 'RACMO22E', 'rcp8.5'),
('EC-EARTH', 'RACMO22E', 'historical'),
('EC-EARTH', 'RACMO22E', 'rcp2.6'),
('EC-EARTH', 'RACMO22E', 'rcp4.5'),
('EC-EARTH', 'RACMO22E', 'rcp8.5'),
('EC-EARTH', 'RCA4', 'historical'),
('EC-EARTH', 'RCA4', 'rcp2.6'),
('EC-EARTH', 'RCA4', 'rcp4.5'),
('EC-EARTH', 'RCA4', 'rcp8.5'),
('HadGEM2-ES', 'CCLM4-8-17', 'historical'),
('HadGEM2-ES', 'CCLM4-8-17', 'rcp4.5'),
('HadGEM2-ES', 'CCLM4-8-17', 'rcp8.5'),
('HadGEM2-ES', 'RegCM4-6', 'historical'),
('HadGEM2-ES', 'RegCM4-6', 'rcp2.6'),
('HadGEM2-ES', 'RegCM4-6', 'rcp8.5'),
('IPSL-CM5A-MR', 'RCA4', 'historical'),
('IPSL-CM5A-MR', 'RCA4', 'rcp4.5'),
('IPSL-CM5A-MR', 'RCA4', 'rcp8.5'),
('IPSL-CM5A-MR', 'WRF381P', 'historical'),
('IPSL-CM5A-MR', 'WRF381P', 'rcp4.5'),
('IPSL-CM5A-MR', 'WRF381P', 'rcp8.5'),
('MPI-ESM-LR', 'CCLM4-8-17', 'historical'),
('MPI-ESM-LR', 'CCLM4-8-17', 'rcp2.6'),
('MPI-ESM-LR', 'CCLM4-8-17', 'rcp4.5'),
('MPI-ESM-LR', 'CCLM4-8-17', 'rcp8.5'),
('MPI-ESM-LR', 'REMO2009', 'historical'),
('MPI-ESM-LR', 'REMO2009', 'rcp2.6'),
('MPI-ESM-LR', 'REMO2009', 'rcp4.5'),
('MPI-ESM-LR', 'REMO2009', 'rcp8.5'),
('NorESM1-M', 'HIRHAM5', 'historical'),
('NorESM1-M', 'HIRHAM5', 'rcp4.5'),
('NorESM1-M', 'HIRHAM5', 'rcp8.5'),
('NorESM1-M', 'REMO2015', 'historical'),
('NorESM1-M', 'REMO2015', 'rcp2.6'),
('NorESM1-M', 'REMO2015', 'rcp8.5')
]
short_list_mf = [
('CNRM-CM5-LR', 'ALADIN63', 'historical'),
('CNRM-CM5-LR', 'ALADIN63', 'rcp2.6'),
('CNRM-CM5-LR', 'ALADIN63', 'rcp4.5'),
('CNRM-CM5-LR', 'ALADIN63', 'rcp8.5'),
('EC-EARTH', 'RACMO22E', 'historical'),
('EC-EARTH', 'RACMO22E', 'rcp2.6'),
('EC-EARTH', 'RACMO22E', 'rcp4.5'),
('EC-EARTH', 'RACMO22E', 'rcp8.5'),
('HadGEM2-ES', 'CCLM4-8-17', 'historical'),
('HadGEM2-ES', 'CCLM4-8-17', 'rcp4.5'),
('HadGEM2-ES', 'CCLM4-8-17', 'rcp8.5'),
('IPSL-CM5A-MR', 'WRF381P', 'historical'),
('IPSL-CM5A-MR', 'WRF381P', 'rcp4.5'),
('IPSL-CM5A-MR', 'WRF381P', 'rcp8.5'),
('MPI-ESM-LR', 'CCLM4-8-17', 'historical'),
('MPI-ESM-LR', 'CCLM4-8-17', 'rcp2.6'),
('MPI-ESM-LR', 'CCLM4-8-17', 'rcp4.5'),
('MPI-ESM-LR', 'CCLM4-8-17', 'rcp8.5'),
]
exemple_variables = [
'prtotAdjust',
'evspsblpotAdjust_Hg0175',
'prsnAdjust',
'tasAdjust',
] | 38.12 | 57 | 0.535852 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,920 | 0.671563 |
87c3057e22efbeaed539521bd317f016e2009be3 | 2,118 | py | Python | userbot/plugins/_light.py | justteen/BUZZ-USERBOT | 55651cce150e1d04d2c61efb2565ef9f46b42933 | [
"BSL-1.0"
] | null | null | null | userbot/plugins/_light.py | justteen/BUZZ-USERBOT | 55651cce150e1d04d2c61efb2565ef9f46b42933 | [
"BSL-1.0"
] | null | null | null | userbot/plugins/_light.py | justteen/BUZZ-USERBOT | 55651cce150e1d04d2c61efb2565ef9f46b42933 | [
"BSL-1.0"
] | null | null | null | # Rewritten by @keinshin
import io
from userbot import CMD_LIST, ALIVE_NAME, bot as light
from userbot import CMD_HELP
from userbot.utils import lightning_cmd
import asyncio
from var import Var
DEFAULTUSER = str(ALIVE_NAME) if ALIVE_NAME else "Pls Go To Heroku Vars Then in `ALIVE_NAME`place You Telegram `Your Desired Name` "
@light.on(lightning_cmd(pattern="help ?(.*)"))
async def cmd_list(event):
if not event.text[0].isalpha() and event.text[0] not in ("/", "#", "@", "!"):
lightningusername = Var.TG_BOT_USER_NAME_BF_HER
input_str = event.pattern_match.group(1)
if lightningusername is None or input_str == "text":
string = ""
for i in CMD_LIST:
string += "ℹ️ " + i + "\n"
for iter_list in CMD_LIST[i]:
string += " `" + str(iter_list) + "`"
string += "\n"
string += "\n"
if len(string) > 4095:
await borg.send_message(event.chat_id, "`Lol Try .help`")
await asyncio.sleep(5)
else:
await event.edit(string)
elif input_str:
if input_str in CMD_LIST:
string = "Commands found in {}:\n".format(input_str)
for i in CMD_LIST[input_str]:
string += "\n " + i
string += "\n"
await event.edit(string)
else:
await event.edit("`Wait Checking..`")
await asyncio.sleep(2)
await event.edit(input_str + " ☹️ is not a valid plugin😞😞!")
else:
light_help_strin = """**𝐁𝐔𝐙𝐙 Heres With The Detailed Help For CMDs** 😉😉 !\n If Faced Any Bug Please Give The Feed Back at [𝐁𝐔𝐙𝐙 Support](https://t.me/ossuport):"""
results = await bot.inline_query( # pylint:disable=E0602
lightningusername, light_help_strin
)
await results[0].click(
event.chat_id, reply_to=event.reply_to_msg_id, hide_via=True
)
await event.delete()
| 39.962264 | 175 | 0.54627 | 0 | 0 | 0 | 0 | 1,810 | 0.837188 | 1,763 | 0.815449 | 481 | 0.222479 |
87c4398ff0b611f2762232d295fa661ec24f4e12 | 4,593 | py | Python | model/regressors/Hourglass.py | meansnothing/Deep-vSlam | 2e0197f52b0520f134ac4144ae7364d201765208 | [
"MIT"
] | null | null | null | model/regressors/Hourglass.py | meansnothing/Deep-vSlam | 2e0197f52b0520f134ac4144ae7364d201765208 | [
"MIT"
] | null | null | null | model/regressors/Hourglass.py | meansnothing/Deep-vSlam | 2e0197f52b0520f134ac4144ae7364d201765208 | [
"MIT"
] | 1 | 2021-06-28T11:40:06.000Z | 2021-06-28T11:40:06.000Z | import torch
import torch.nn as nn
from torchvision import models
import torch.nn.functional as F
def model_parser(model, sum_mode=False, dropout_rate=0.0, bayesian=False):
base_model = None
if model == 'Resnet':
base_model = models.resnet34(pretrained=True)
network = HourglassNet(base_model, sum_mode, dropout_rate, bayesian)
else:
assert 'Unvalid Model'
return network
class HourglassNet(nn.Module):
def __init__(self, base_model, sum_mode=False, dropout_rate=0.0, bayesian=False):
super(HourglassNet, self).__init__()
self.bayesian = bayesian
self.dropout_rate = dropout_rate
self.sum_mode = sum_mode
# Encoding Blocks
self.init_block = nn.Sequential(*list(base_model.children())[:4])
# self.res1_block = nn.Sequential(*list(base_model.layer1.children()))
self.res_block1 = base_model.layer1
self.res_block2 = base_model.layer2
self.res_block3 = base_model.layer3
self.res_block4 = base_model.layer4
# Decoding Blocks
if sum_mode:
self.deconv_block1 = nn.ConvTranspose2d(512, 256, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False, output_padding=1)
self.deconv_block2 = nn.ConvTranspose2d(256, 128, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False, output_padding=1)
self.deconv_block3 = nn.ConvTranspose2d(128, 64, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False, output_padding=1)
self.conv_block = nn.Conv2d(64, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
else:
self.deconv_block1 = nn.ConvTranspose2d(512, 256, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False, output_padding=1)
self.deconv_block2 = nn.ConvTranspose2d(512, 128, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False, output_padding=1)
self.deconv_block3 = nn.ConvTranspose2d(256, 64, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False, output_padding=1)
self.conv_block = nn.Conv2d(128, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
# Regressor
self.fc_dim_reduce = nn.Linear(56 * 56 * 32, 1024)
self.fc_trans = nn.Linear(1024, 3)
self.fc_rot = nn.Linear(1024, 4)
# Initialize Weights
init_modules = [self.deconv_block1, self.deconv_block2, self.deconv_block3, self.conv_block,
self.fc_dim_reduce, self.fc_trans, self.fc_rot]
for module in init_modules:
if isinstance(module, nn.ConvTranspose2d) or isinstance(module, nn.Linear) or isinstance(module, nn.Conv3d):
nn.init.kaiming_normal_(module.weight)
if module.bias is not None:
nn.init.constant_(module.bias, 0)
# nn.init.normal_(self.fc_last.weight, 0, 0.01)
# nn.init.constant_(self.fc_last.bias, 0)
#
# nn.init.normal_(self.fc_position.weight, 0, 0.5)
# nn.init.constant_(self.fc_position.bias, 0)
#
# nn.init.normal_(self.fc_rotation.weight, 0, 0.01)
# nn.init.constant_(self.fc_rotation.bias, 0)
def forward(self, x):
# Conv
x = self.init_block(x)
x_res1 = self.res_block1(x)
x_res2 = self.res_block2(x_res1)
x_res3 = self.res_block3(x_res2)
x_res4 = self.res_block4(x_res3)
# Deconv
x_deconv1 = self.deconv_block1(x_res4)
if self.sum_mode:
x_deconv1 = x_res3 + x_deconv1
else:
x_deconv1 = torch.cat((x_res3, x_deconv1), dim=1)
x_deconv2 = self.deconv_block2(x_deconv1)
if self.sum_mode:
x_deconv2 = x_res2 + x_deconv2
else:
x_deconv2 = torch.cat((x_res2, x_deconv2), dim=1)
x_deconv3 = self.deconv_block3(x_deconv2)
if self.sum_mode:
x_deconv3 = x_res1 + x_deconv3
else:
x_deconv3 = torch.cat((x_res1, x_deconv3), dim=1)
x_conv = self.conv_block(x_deconv3)
x_linear = x_conv.view(x_conv.size(0), -1)
x_linear = self.fc_dim_reduce(x_linear)
x_linear = F.relu(x_linear)
dropout_on = self.training or self.bayesian
if self.dropout_rate > 0:
x_linear = F.dropout(x_linear, p=self.dropout_rate, training=dropout_on)
trans = self.fc_trans(x_linear)
rot = self.fc_rot(x_linear)
return trans, rot
if __name__ == '__main__':
model=model_parser('Resnet')
print(model)
| 39.93913 | 142 | 0.627477 | 4,089 | 0.890268 | 0 | 0 | 0 | 0 | 0 | 0 | 471 | 0.102547 |
87c6d0a0ba7a3214919a21e563389329abb8370f | 5,414 | py | Python | data/faces.py | FabianSchuetze/ssd.pytorch | 4508da2384aa74e42757d86c5a794242fd15f980 | [
"MIT"
] | null | null | null | data/faces.py | FabianSchuetze/ssd.pytorch | 4508da2384aa74e42757d86c5a794242fd15f980 | [
"MIT"
] | null | null | null | data/faces.py | FabianSchuetze/ssd.pytorch | 4508da2384aa74e42757d86c5a794242fd15f980 | [
"MIT"
] | null | null | null | r"""
Class to the python faces
"""
from typing import List, Tuple
import xml.etree.ElementTree as ET
import torch
import torch.utils.data as data
import cv2
import numpy as np
from PIL import Image
from torchvision.transforms import functional as F
class FacesDB(data.Dataset):
"""VOC Detection Dataset Object
input is image, target is annotation
Arguments:
root (string): filepath to VOCdevkit folder.
image_set (string): imageset to use (eg. 'train', 'val', 'test')
transform (callable, optional): transformation to perform on the
input image
target_transform (callable, optional): transformation to perform on the
target `annotation`
(eg: take in caption string, return tensor of word indices)
dataset_name (string, optional): which dataset to load
(default: 'VOC2007')
"""
def __init__(self, database: str, transform=None):
# target_transform=VOCAnnotationTransform()):
self._database = database
self.ids = self._load_images()
self._conversion = {'glabella': 0, 'left_eye':1, 'right_eye':2,
'nose_tip': 3}
self.transform = transform
# self.target_transform = target_transform
self.name = 'Faces'
def _load_images(self):
tree = ET.parse(self._database)
return tree.findall('images/image')
def _convert_to_box(self, box: ET.Element) -> List[int]:
"""
Generates the bouding boxes
"""
xmin = int(box.get('left'))
ymin = int(box.get('top'))
xmax = int(box.get('left')) + int(box.get('width'))
ymax = int(box.get('top')) + int(box.get('height'))
return [xmin, ymin, xmax, ymax]
def _append_label(self, box: ET.Element) -> int:
"""
Gets the corresponding label to the box
"""
label = box.find('label').text
return self._conversion[label]
def _load_sample(self, idx) -> Tuple[List]:
sample = self.ids[idx]
img = Image.open(sample.get('file'))
height, width = img.height, img.width
# img = cv2.imread(sample.get('file'))
# convert to numpy array
res = []
for tag in sample.findall('box'):
sample = []
box = self._convert_to_box(tag)
for i in range(4):
scale = width if i %2 == 0 else height
sample.append(box[i] / scale)
sample.append(self._append_label(tag))
res += [sample]
return img, res, height, width
def pull_item(self, index: int):
# import pdb; pdb.set_trace()
img, target, height, width = self._load_sample(index)
img = F.to_tensor(img)
img = img.numpy().transpose(1, 2, 0)
# img = np.array(img, dtype=np.float32)
# convert img to numpy array
# target = ET.parse(self._annopath % img_id).getroot()
# img = cv2.imread(self._imgpath % img_id)
#TODO: I NEED TO ADD THIS AS I ALWAYS TRANFORMS!!!
# if self.target_transform is not None:
# target = self.target_transform(target, width, height)
# breakpoint()
if self.transform is not None:
target = np.array(target)
img, boxes, labels = self.transform(img, target[:, :4], target[:, 4])
# to rgb
# img = img[:, :, (2, 1, 0)]
# img = img.transpose(2, 0, 1)
target = np.hstack((boxes, np.expand_dims(labels, axis=1)))
# img = img.astype(float)
# img = F.resize(img, (300, 300))
img = F.to_tensor(img)
return img, target, height, width
# return torch.from_numpy(img), target, height, width
def __getitem__(self, index):
img, gts = self.pull_item(index)[:2]
return img, gts
def __len__(self):
return len(self.ids)
def pull_image(self, index):
'''Returns the original image object at index in PIL form
Note: not using self.__getitem__(), as any transformations passed in
could mess up this functionality.
Argument:
index (int): index of img to show
Return:
PIL img
'''
sample = self.ids[index]
img = cv2.imread(sample.get('file'))
return img
# def pull_anno(self, index):
# '''Returns the original annotation of image at index
# Note: not using self.__getitem__(), as any transformations passed in
# could mess up this functionality.
# Argument:
# index (int): index of img to get annotation of
# Return:
# list: [img_id, [(label, bbox coords),...]]
# eg: ('001718', [('dog', (96, 13, 438, 332))])
# '''
# img_id = self.ids[index]
# anno = ET.parse(self._annopath % img_id).getroot()
# gt = self.target_transform(anno, 1, 1)
# return img_id[1], gt
def pull_tensor(self, index):
'''Returns the original image at an index in tensor form
Note: not using self.__getitem__(), as any transformations passed in
could mess up this functionality.
Argument:
index (int): index of img to show
Return:
tensorized version of img, squeezed
'''
return torch.Tensor(self.pull_image(index)).unsqueeze_(0)
| 33.8375 | 81 | 0.577392 | 5,161 | 0.953269 | 0 | 0 | 0 | 0 | 0 | 0 | 2,647 | 0.488918 |
87ca3a57df23d8770609b8c197e911f0d1988dd7 | 1,473 | py | Python | exchanges/okex.py | soulmachine/crypto-market-data | 1dbf1cfd28754a37dd054777feadc1554e1cccaf | [
"Apache-2.0"
] | null | null | null | exchanges/okex.py | soulmachine/crypto-market-data | 1dbf1cfd28754a37dd054777feadc1554e1cccaf | [
"Apache-2.0"
] | null | null | null | exchanges/okex.py | soulmachine/crypto-market-data | 1dbf1cfd28754a37dd054777feadc1554e1cccaf | [
"Apache-2.0"
] | null | null | null | from typing import Any, Dict, List
from .utils import get_json
def fetch_markets(market_type: str) -> List[Dict[str, Any]]:
'''Fetch all trading markets from a crypto exchage.'''
if market_type == 'future':
return _fetch_future_markets()
elif market_type == 'option':
return _fetch_option_markets()
elif market_type == 'spot':
return _fetch_spot_markets()
elif market_type == 'swap':
return _fetch_swap_markets()
else:
raise ValueError(f'Unknown market type: {market_type}')
def _fetch_future_markets() -> List[Dict[str, Any]]:
url = 'https://www.okex.com/api/futures/v3/instruments'
return get_json(url)
def _fetch_spot_markets() -> List[Dict[str, Any]]:
url = 'https://www.okex.com/api/spot/v3/instruments'
symbols = get_json(url)
symbols.sort(key=lambda x: x['instrument_id'])
return symbols
def _fetch_swap_markets() -> List[Dict[str, Any]]:
url = 'https://www.okex.com/api/swap/v3/instruments'
return get_json(url)
def _fetch_option_markets_underlying(underlying: str) -> List[Dict[str, Any]]:
url = f'https://www.okex.com/api/option/v3/instruments/{underlying}'
return get_json(url)
def _fetch_option_markets() -> List[Dict[str, Any]]:
underlying = ["BTC-USD", "ETH-USD", "EOS-USD"]
lst: List[Dict[str, Any]] = []
for underlying_symbol in underlying:
lst.extend(_fetch_option_markets_underlying(underlying_symbol))
return lst
| 30.6875 | 78 | 0.680923 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 364 | 0.247115 |
87cc1d411068ff6128e0109b9dbc111a69527c35 | 3,218 | py | Python | client/netmgr.py | kendase3/every | 83b543c54a2dd071f0a34f128c5baa20a7e58def | [
"BSD-2-Clause"
] | 1 | 2017-06-12T18:36:20.000Z | 2017-06-12T18:36:20.000Z | client/netmgr.py | kendase3/every | 83b543c54a2dd071f0a34f128c5baa20a7e58def | [
"BSD-2-Clause"
] | null | null | null | client/netmgr.py | kendase3/every | 83b543c54a2dd071f0a34f128c5baa20a7e58def | [
"BSD-2-Clause"
] | null | null | null |
from twisted.internet import defer, reactor
from twisted.protocols import basic
from twisted.internet.protocol import Protocol, ReconnectingClientFactory
from time import sleep
# local
import screen
import stevent
class NetMgr:
"""
new abstraction:
1) this module receives the latest screen from the server.
2) this module sends all user input in the form of an event list to the server.
"""
HOST = "localhost"
PORT = 50025
LINE_ENDING = "\r\n"
#TODO: should eventually prompt for host and port
# and remember a list of previously used hosts and ports
# (or at least the last one)
def __init__(self, host=None):
if host == None:
host = NetMgr.HOST
reactor.connectTCP(host, NetMgr.PORT, IngressClientFactory(self))
reactor.startRunning(False)
self.screen = None
self.client = None
self.quit = False
self.failed = False
def iterate(self):
reactor.iterate()
def sendEvents(self, outgoing):
if self.client == None:
print "No client! =0 "
return
#TODO: what if an event happens to match '\r\n'? it could happen.
for event in outgoing:
outBytes = stevent.byte(event)
self.client.sendMessage(str(outBytes))
def receiveScreen(self, screenBytes):
"""
the server has sent us an updated screen object
"""
self.screen = screen.unbyte(screenBytes)
def popScreen(self):
"""
gets the screen and clears the
screen object so we know
when we get a new one
"""
ret = self.screen
self.screen = None
return ret
def hasScreen(self):
"""
returns whether or not there is a screen update
"""
if self.screen == None:
return False
else:
return True
def cleanup(self):
if self.client != None:
self.client.transport.loseConnection()
#FIXME: should use deferreds and wait exact amount
sleep(1)
reactor.stop()
# allow the stop to actually be processed
reactor.iterate()
class IngressClient(basic.LineReceiver):
def __init__(self, netMgr):
self.netMgr = netMgr
def lineReceived(self, line):
# we assume it is a screen update
# and pass it up to netMgr
#print "USING LINE MODE!"
self.netMgr.receiveScreen(line)
def sendMessage(self, line):
# send out the message
#print "sending|%s|" % (line + NetMgr.LINE_ENDING)
#print "\nlen=%d" % len(line + NetMgr.LINE_ENDING)
self.transport.write(line + NetMgr.LINE_ENDING)
class IngressClientFactory(ReconnectingClientFactory):
def __init__(self, netMgr):
self.netMgr = netMgr
def startedConnecting(self, connector):
print 'Started to connect.'
def buildProtocol(self, addr):
print 'Connected.'
print 'Resetting connection delay.'
self.resetDelay()
ic = IngressClient(self.netMgr)
# the latest client is always the correct one
self.netMgr.client = ic
return ic
def clientConnectionLost(self, connector, reason):
print 'Lost connection!\n'
#print 'Reason: %s' % reason
ReconnectingClientFactory.clientConnectionFailed(
self, connector, reason)
self.netMgr.quit = True
def clientConnectionFailed(self, connector, reason):
self.netMgr.failed = True
print 'Failed to connect to server. Are you sure one is running at %s on port %d?' % (NetMgr.HOST, NetMgr.PORT)
self.netMgr.quit = True
| 25.951613 | 114 | 0.711933 | 2,995 | 0.930702 | 0 | 0 | 0 | 0 | 0 | 0 | 1,163 | 0.361405 |
87cd80e00b84a57e97e8bf916ce62b8c5fca17b5 | 6,336 | py | Python | dataplaybook/tasks/io_mongo.py | kellerza/data-playbook | 382c369505a5cb0c28bd786ebdda51c05417165c | [
"Apache-2.0"
] | 3 | 2018-07-06T08:34:46.000Z | 2021-05-27T23:29:04.000Z | dataplaybook/tasks/io_mongo.py | kellerza/data-playbook | 382c369505a5cb0c28bd786ebdda51c05417165c | [
"Apache-2.0"
] | null | null | null | dataplaybook/tasks/io_mongo.py | kellerza/data-playbook | 382c369505a5cb0c28bd786ebdda51c05417165c | [
"Apache-2.0"
] | null | null | null | """MongoDB IO tasks."""
import logging
from typing import List, Optional, Sequence
from urllib.parse import urlparse
import attr
from icecream import ic # noqa pylint: disable=unused-import
from pymongo import MongoClient
from pymongo.errors import ServerSelectionTimeoutError
import voluptuous as vol
from dataplaybook import Columns, Table, task
from dataplaybook.utils import PlaybookError
_LOGGER = logging.getLogger(__name__)
def _clean_netloc(db_netloc: str) -> str:
if "/" not in db_netloc:
return db_netloc
try:
res = urlparse(db_netloc)
return res.netloc
except AttributeError as err:
_LOGGER.error("could not parse URL: %s: %s", db_netloc, err)
raise err
@attr.s(slots=True)
class MongoURI:
"""MongoDB URI."""
netloc = attr.ib(converter=_clean_netloc)
database = attr.ib()
collection = attr.ib()
set_id = attr.ib(default="")
@staticmethod
def new_from_string(db_uri: str, set_id=None):
"""new mongodb uri."""
try:
res = urlparse(db_uri)
except AttributeError as err:
_LOGGER.error("could not parse URL: %s: %s", db_uri, err)
raise err
if res.scheme not in ["mdb", "mongodb", "db"]:
raise vol.Invalid("mdb://host:port/database/collection/[set_id]")
pth = res.path.split("/")
if len(pth) == 4:
if set_id:
raise vol.InInvalid("set_id specified, not allowed in mdb URI")
set_id = pth[3]
return MongoURI(
netloc=res.netloc,
database=pth[1],
collection=pth[2],
set_id=set_id,
)
# @staticmethod
# def validate(opt):
# """Validate MongoDB URI."""
# if not isinstance(opt.get("mdb"), MongoURI):
# opt["mdb"] = MongoURI.new_from_string(opt["mdb"], opt.pop("set_id", None))
# return opt
def __str__(self) -> str:
return f"{self.netloc}/{self.database}/{self.collection}/{self.set_id}"
def get_client(self, connect=True) -> MongoClient:
"""Return a MongoClient."""
return MongoClient(self.netloc, connect=connect)
@task()
def read_mongo(
mdb: MongoURI,
*,
set_id: Optional[str] = None,
) -> Table:
"""Read data from a MongoDB collection."""
client = MongoClient(mdb.netloc, connect=True)
if not set_id:
set_id = mdb.set_id
if set_id:
cursor = client[mdb.database][mdb.collection].find({"_sid": set_id})
else:
cursor = client[mdb.database][mdb.collection].find()
cursor.batch_size(200)
for result in cursor:
result.pop("_sid", None)
result.pop("_id", None)
yield result
@task()
def write_mongo(
table: Table, mdb: MongoURI, *, set_id: Optional[str] = None, force=False
):
"""Write data to a MongoDB collection."""
if not set_id:
set_id = mdb.set_id
try:
client = MongoClient(mdb.netloc, connect=True)
col = client[mdb.database][mdb.collection]
if not set_id:
_LOGGER.info("Writing %s documents", len(table))
client[mdb.database][mdb.collection].insert_many(table)
return
filtr = {"_sid": set_id}
existing_count = col.count(filtr)
if not force and existing_count > 0 and not table:
_LOGGER.error(
"Trying to replace %s documents with an empty set", existing_count
)
return
_LOGGER.info(
"Replacing %s documents matching %s, %s new",
existing_count,
set_id,
len(table),
)
col.delete_many(filtr)
if table:
col.insert_many([dict(d, _sid=set_id) for d in table])
except ServerSelectionTimeoutError as err:
raise PlaybookError(f"Could not open connection to mdb {mdb}") from err
@task
def columns_to_list(table: Table, *, list_column: str, columns: Columns) -> None:
"""Convert columns with booleans to a list in a single column.
Useful to store columns with true/false in a single list with the columns
names.
"""
for row in table:
row[list_column] = [n for n in columns if row.pop(n, False)]
@task
def list_to_columns(table: Table, *, list_column: str, columns: Columns) -> None:
"""Convert a list with values to columns with True."""
for row in table:
for col in columns:
if col in row[list_column]:
row[col] = True
del row[list_column]
@task
def mongo_list_sids(mdb: MongoURI) -> List[str]:
"""Return a list of _sid's"""
client = MongoClient(mdb.netloc, connect=True)
cursor = client[mdb.database][mdb.collection]
# non = cursor.find_one({"_sid": {"$exists": False}})
# print(non)
other = cursor.distinct("_sid")
# print(other)
return other
@task
def mongo_delete_sids(*, mdb: MongoURI, sids: List[str]):
"""Delete a specific _sid."""
client = MongoClient(mdb.netloc, connect=True)
cursor = client[mdb.database][mdb.collection]
for sid in sids:
if sid == "None" or sid is None:
cursor.delete_many({"_sid": {"$exists": False}})
else:
cursor.delete_many({"_sid": sid})
@task
def mongo_sync_sids(
*, mdb_local: MongoURI, mdb_remote: MongoURI, ignore_remote: Sequence[str] = None
):
"""Sync two MongoDB collections. Only sync _sid's where the count is different.
Dont delete additional SIDs from th remote if in ignore_remote
"""
agg = [{"$group": {"_id": "$_sid", "count": {"$sum": 1}}}]
# get local
l_db = mdb_local.get_client()[mdb_local.database][mdb_local.collection]
lsc = {i["_id"]: i["count"] for i in l_db.aggregate(agg)}
# get remote
r_db = mdb_remote.get_client()[mdb_remote.database][mdb_remote.collection]
rsc = {i["_id"]: i["count"] for i in r_db.aggregate(agg)}
for sid, lval in lsc.items():
rval = rsc.pop(sid, None)
if rval != lval:
# counts are different!
mdb_local.set_id = sid
lcl = read_mongo(mdb=mdb_local)
write_mongo(mdb=mdb_remote, table=lcl, set_id=sid)
extra = list(set(rsc.keys()) - set(ignore_remote or []))
ic(extra)
if extra:
mongo_delete_sids(mdb=mdb_remote, sids=extra)
| 30.757282 | 88 | 0.613163 | 1,437 | 0.226799 | 535 | 0.084438 | 5,589 | 0.882102 | 0 | 0 | 1,484 | 0.234217 |
87cedabd60c632947a95a1fb25c64135907bd605 | 422 | py | Python | Lista 1/exe8.py | jesset27/Listas-de-exercicios-python | ab238f22f896a95d38b7a4ad64cf8848ad68a232 | [
"MIT"
] | 1 | 2021-11-30T13:16:36.000Z | 2021-11-30T13:16:36.000Z | Lista 1/exe8.py | jesset27/Listas-de-exercicios-python | ab238f22f896a95d38b7a4ad64cf8848ad68a232 | [
"MIT"
] | null | null | null | Lista 1/exe8.py | jesset27/Listas-de-exercicios-python | ab238f22f896a95d38b7a4ad64cf8848ad68a232 | [
"MIT"
] | 1 | 2021-11-20T00:00:50.000Z | 2021-11-20T00:00:50.000Z | '''8. Faça um programa que receba o valor de um depósito e o valor da taxa de juros, calcule e
mostre o valor do rendimento e o valor total depois do rendimento de um mês.'''
#entrada
deposito = float(input('valor de deposito: '))
juros = float(input("Taxa de juros: "))
#processamento
rendimento = deposito * juros / 100
total = deposito + rendimento
#saida
print("Rendimento: ",rendimento)
print("Valor total: ",total) | 35.166667 | 95 | 0.732227 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 273 | 0.642353 |
87d24049a1ea45c4c31c143ecc5594de2fbc0702 | 7,639 | py | Python | tests/test_patterns.py | golly-splorts/golly-maps | b1845dae65c289f26f8ee54e265e25732e487acb | [
"MIT"
] | null | null | null | tests/test_patterns.py | golly-splorts/golly-maps | b1845dae65c289f26f8ee54e265e25732e487acb | [
"MIT"
] | null | null | null | tests/test_patterns.py | golly-splorts/golly-maps | b1845dae65c289f26f8ee54e265e25732e487acb | [
"MIT"
] | null | null | null | import os
import unittest
from golly_maps.patterns import (
get_patterns,
get_pattern,
get_pattern_size,
get_grid_pattern,
pattern_union,
)
HERE = os.path.split(os.path.abspath(__file__))[0]
ALL_PATTERNS = [
"78p70",
"acorn",
"airforce",
"backrake2",
"bheptomino",
"block",
"cheptomino",
"coespaceship",
"crabstretcher",
"cthulhu",
"dinnertable",
"dinnertablecenter",
"dinnertableedges",
"eheptomino",
"glider",
"harbor",
"heavyweightspaceship",
"justyna",
"koksgalaxy",
"lightweightspaceship",
"middleweightspaceship",
"multuminparvo",
"piheptomino",
"quadrupleburloaferimeter",
"rabbit",
"ring64",
"rpentomino",
"spaceshipgrower",
"switchengine",
"tagalong",
"timebomb",
"twoglidermess",
"unidimensionalinfinitegrowth",
"unidimensionalsixgliders",
"vring64",
"wickstretcher",
"x66",
]
# [nrows, ncols]
PATTERN_SIZES = {
"block": (2, 2),
"cheptomino": (3, 4),
"justyna": (17, 22),
"multuminparvo": (4, 6),
"quadrupleburloaferimeter": (16, 16),
"x66": (11, 9),
}
class PatternsTest(unittest.TestCase):
"""
Test patterns functionality in golly_maps
"""
def test_get_patterns(self):
"""
Compare the list of patterns returned to the hard-coded list.
"""
patterns = get_patterns()
self.assertEqual(len(patterns), len(ALL_PATTERNS))
for pattern_name in ALL_PATTERNS:
self.assertIn(pattern_name, patterns)
def test_get_pattern(self):
"""
Check the get_pattern() method and its arguments.
This doesn't check the contents of patterns returned, only the sizes.
"""
for pattern_name in ALL_PATTERNS:
# Test the function with each argument specified
p = get_pattern(pattern_name)
hp = get_pattern(pattern_name, hflip=True)
vp = get_pattern(pattern_name, vflip=True)
rot90 = get_pattern(pattern_name, rotdeg=90)
rot180 = get_pattern(pattern_name, rotdeg=180)
rot270 = get_pattern(pattern_name, rotdeg=270)
rot360 = get_pattern(pattern_name, rotdeg=360)
# Assert things that are unconditional on symmetry
self.assertGreater(len(p), 0)
self.assertGreater(len(hp), 0)
self.assertGreater(len(vp), 0)
self.assertGreater(len(rot90), 0)
self.assertGreater(len(rot180), 0)
self.assertGreater(len(rot270), 0)
self.assertGreater(len(rot360), 0)
self.assertEqual(p, rot360)
self.assertEqual(len(p), len(rot180))
self.assertEqual(len(rot90), len(rot270))
def test_get_pattern_size(self):
"""
Check that the get_pattern_size() method returns the correct sizes.
Check that hflip/vflip/rotdeg arguments modify the size correctly.
"""
for pattern_name, (pattern_r, pattern_c) in PATTERN_SIZES.items():
(r, c) = get_pattern_size(pattern_name)
self.assertEqual(r, pattern_r)
self.assertEqual(c, pattern_c)
(hr, hc) = get_pattern_size(pattern_name, hflip=True)
self.assertEqual(hr, pattern_r)
self.assertEqual(hc, pattern_c)
(vr, vc) = get_pattern_size(pattern_name, vflip=True)
self.assertEqual(vr, pattern_r)
self.assertEqual(vc, pattern_c)
(r90, c90) = get_pattern_size(pattern_name, rotdeg=90)
self.assertEqual(r90, pattern_c)
self.assertEqual(c90, pattern_r)
(r180, c180) = get_pattern_size(pattern_name, rotdeg=180)
self.assertEqual(r180, pattern_r)
self.assertEqual(c180, pattern_c)
(r270, c270) = get_pattern_size(pattern_name, rotdeg=270)
self.assertEqual(r270, pattern_c)
self.assertEqual(c270, pattern_r)
with self.assertRaises(Exception):
_ = get_pattern_size(pattern_name, rotdeg=111)
def test_get_grid_pattern(self):
"""
Call the get_grid_pattern() function with its various arguments.
Check that the grid patterns returned are the correct size.
"""
for pattern_name, (pattern_r, pattern_c) in PATTERN_SIZES.items():
# Test get pattern
_ = get_pattern(pattern_name)
# Test get grid pattern
rows = 80
cols = 80
xoffset = 40
yoffset = 40
grid_patterns = []
grid_patterns.append(
get_grid_pattern(
pattern_name, rows, cols, xoffset=xoffset, yoffset=yoffset
)
)
grid_patterns.append(
get_grid_pattern(
pattern_name,
rows,
cols,
xoffset=xoffset,
yoffset=yoffset,
hflip=True,
)
)
grid_patterns.append(
get_grid_pattern(
pattern_name,
rows,
cols,
xoffset=xoffset,
yoffset=yoffset,
vflip=True,
)
)
grid_patterns.append(
get_grid_pattern(
pattern_name,
rows,
cols,
xoffset=xoffset,
yoffset=yoffset,
rotdeg=90,
)
)
grid_patterns.append(
get_grid_pattern(
pattern_name,
rows,
cols,
xoffset=xoffset,
yoffset=yoffset,
rotdeg=180,
)
)
grid_patterns.append(
get_grid_pattern(
pattern_name,
rows,
cols,
xoffset=xoffset,
yoffset=yoffset,
rotdeg=270,
)
)
grid_patterns.append(
get_grid_pattern(
pattern_name,
rows,
cols,
xoffset=xoffset,
yoffset=yoffset,
rotdeg=360,
)
)
for gp in grid_patterns:
self.assertEqual(len(gp), rows)
self.assertEqual(len(gp[0]), cols)
with self.assertRaises(Exception):
get_grid_pattern(pattern_name, rows=-1, cols=-1)
get_grid_pattern(pattern_name, rows=0, cols=0)
get_grid_pattern(pattern_name, rows=1, cols=1)
get_grid_pattern(
pattern_name, rows=10, cols=10, xoffset=100, yoffset=100
)
get_grid_pattern(
pattern_name,
rows,
cols,
xoffset=xoffset,
yoffset=yoffset,
rotdeg=111,
)
def test_pattern_union(self):
pattern1 = [".......ooo", ".......ooo", "...ooooooo", "...ooooooo"]
pattern2 = ["ooooooo...", "ooooooo...", "ooo.......", "ooo......."]
union = pattern_union([pattern1, pattern2])
for row in union:
for ch in row:
self.assertEqual(ch, "o")
| 30.678715 | 78 | 0.51512 | 6,477 | 0.847886 | 0 | 0 | 0 | 0 | 0 | 0 | 1,446 | 0.189292 |
87d609e8f00dbd73a5e4178b9a5f87334cd537e4 | 929 | py | Python | leetcode/1986.py | Cannizza-zzk/python_review | 5a04b3dbc8baa835780c039386529e20e69af81c | [
"Apache-2.0"
] | null | null | null | leetcode/1986.py | Cannizza-zzk/python_review | 5a04b3dbc8baa835780c039386529e20e69af81c | [
"Apache-2.0"
] | null | null | null | leetcode/1986.py | Cannizza-zzk/python_review | 5a04b3dbc8baa835780c039386529e20e69af81c | [
"Apache-2.0"
] | null | null | null | class Solution:
def minSessions(self, tasks: List[int], sessionTime: int) -> int:
n = len(tasks)
initial_state = int('0b'+'1'*n,2)
dp = {}
def find_dp(state):
if state in dp:
return dp[state]
if state == 0:
dp[state] = (1, 0)
else:
ans = (float('inf'), float('inf'))
for i in range(n):
if state & (1<<i):
pieces, last = find_dp(state - (1 << i))
full = (last + tasks[i] > sessionTime)
ans = min(ans, (pieces + full, tasks[i] + (1-full)*last))
dp[state] = ans
return dp[state]
return find_dp((1<<n)-1)[0]
# reference: https://leetcode.com/problems/minimum-number-of-work-sessions-to-finish-the-tasks/discuss/1431829/Python-dynamic-programming-on-subsets-explained
| 33.178571 | 158 | 0.481163 | 767 | 0.825619 | 0 | 0 | 0 | 0 | 0 | 0 | 175 | 0.188375 |
87d62ac325e3ca26d040b943dc34b2166788628d | 8,350 | py | Python | spimstitch/commands/stitch_cmd.py | chunglabmit/spimstitch | ab6f7578e9f4116680536f9707936de171e4fe6b | [
"MIT"
] | 1 | 2022-02-02T01:25:02.000Z | 2022-02-02T01:25:02.000Z | spimstitch/commands/stitch_cmd.py | chunglabmit/spimstitch | ab6f7578e9f4116680536f9707936de171e4fe6b | [
"MIT"
] | 1 | 2020-08-11T14:32:06.000Z | 2020-08-12T10:37:47.000Z | spimstitch/commands/stitch_cmd.py | chunglabmit/spimstitch | ab6f7578e9f4116680536f9707936de171e4fe6b | [
"MIT"
] | null | null | null | import argparse
import json
import numpy as np
import typing
from blockfs.directory import Directory
import logging
from precomputed_tif.blockfs_stack import BlockfsStack
from precomputed_tif.ngff_stack import NGFFStack
import os
import sys
from spimstitch.ngff import NGFFDirectory
from ..stitch import get_output_size, StitchSrcVolume, run
def parse_args(args=sys.argv[1:]):
parser = argparse.ArgumentParser()
parser.add_argument(
"--input",
help="The root directory of the oblique volume tree. The program expects "
"blockfs Neuroglancer volumes in directories whose name is in the "
"format, <x>_<y> where <x> and <y> are the X and Y coordinates of "
"the top left corner of the volume.",
required=True)
parser.add_argument(
"--output",
help="The directory for the precomputed volume output"
)
parser.add_argument(
"--levels",
help="The number of mipmap levels in the precomputed volume",
default=5,
type=int)
parser.add_argument(
"--log-level",
help="The log level for logging",
default="WARNING")
parser.add_argument(
"--n-writers",
help="The number of writer processes for writing blockfs files",
default=min(12, os.cpu_count()),
type=int)
parser.add_argument(
"--n-workers",
help="The number of worker processes for the processing pipeline",
default=min(12, os.cpu_count()),
type=int)
parser.add_argument(
"--silent",
help="Turn off progress bars",
action="store_true")
parser.add_argument(
"--x-step-size",
help="X step size in microns",
default=1.28,
type=float)
parser.add_argument(
"--y-voxel-size",
help="Size of a voxel in the Y direction in microns",
default=1.8,
type=float)
parser.add_argument(
"--z-offset",
help="# of voxels of offset between the start of the stack above "
"in Z and the stack underneath it",
default=2048,
type=int
)
parser.add_argument(
"--output-size",
help="Size of the output volume (x,y,z). Defaults to the extent of all "
"prestitched volumes.")
parser.add_argument(
"--output-offset",
help="Offset of the output volume. Only use with --output-size. ")
parser.add_argument(
"--alignment",
help="Alignment file from oblique-align. Default is use static "
"alignment"
)
parser.add_argument(
"--y-illum-corr",
help="Fractional brightness of y[2047] with respect to y[0] for "
"each subvolume. Default is properly corrected",
type=float
)
parser.add_argument(
"--compute-y-illum-corr",
help="If present, compute fractional brightness at overlaps "
"between volumes",
action="store_true"
)
parser.add_argument(
"--n-y-illum-patches",
help="Number of patches to take to compute the y illumination "
"correction",
type=int,
default=1000
)
parser.add_argument(
"--min-y-illum-mean",
help="For an illum patch, the minimum allowed value of the mean "
"intensity of the patch",
type=int,
default=100
)
parser.add_argument(
"--min-y-illum-corr-coef",
help="The two overlapping volumes in an illumination patch must "
"have at least this correlation coefficient "
"(0 <= min-y-illum-corr-coef < 1) to be included",
type=float,
default=.80
)
parser.add_argument(
"--ngff",
help="Output an NGFF volume instead of blockfs",
action="store_true"
)
return parser.parse_args(args)
def main(args=sys.argv[1:]):
opts = parse_args(args)
logging.basicConfig(level=getattr(logging,opts.log_level))
volume_paths = []
zs = []
for root, folders, files in os.walk(opts.input, followlinks=True):
if os.path.split(root)[-1] == "1_1_1":
for file in files:
if file == BlockfsStack.DIRECTORY_FILENAME:
volume_paths.append(os.path.join(root, file))
try:
zs.append(int(os.path.split(os.path.dirname(root))[1]))
except ValueError:
logging.warning(
"Non-numeric Z found in stack path: %s" % root)
all_z = sorted(set(zs))
if opts.alignment is not None:
with open(opts.alignment) as fd:
align_z = json.load(fd)["align-z"]
else:
align_z = False
if align_z:
z_offsets = [z / 10 for z in zs]
else:
z_offsets = [opts.z_offset * all_z.index(z) * opts.x_step_size for z in zs]
volumes = [
StitchSrcVolume(volume_path,
opts.x_step_size,
opts.y_voxel_size,
z_offset)
for volume_path, z_offset in zip(volume_paths, z_offsets)]
z_too = adjust_alignments(opts, volumes)
StitchSrcVolume.rebase_all(volumes, z_too=z_too)
if opts.compute_y_illum_corr:
y_illum_corr = StitchSrcVolume.compute_illum_corr(
volumes,
n_patches=opts.n_y_illum_patches,
min_mean=opts.min_y_illum_mean,
min_corr_coef=opts.min_y_illum_corr_coef,
n_workers=opts.n_workers
)
elif opts.y_illum_corr is not None:
y_illum_corr = opts.y_illum_corr
else:
y_illum_corr = None
if y_illum_corr is not None:
y_illum_corr = \
(1 - y_illum_corr) * (2047 - np.arange(2048)) / 2047 + \
y_illum_corr
if opts.output_size is None:
zs, ys, xs = get_output_size(volumes)
x0 = y0 = z0 = 0
else:
xs, ys, zs = [int(_) for _ in opts.output_size.split(",")]
if opts.output_offset is None:
x0 = y0 = z0 = 0
else:
x0, y0, z0 = [int(_) for _ in opts.output_offset.split(",")]
if not os.path.exists(opts.output):
os.mkdir(opts.output)
l1_dir = os.path.join(opts.output, "1_1_1")
if not os.path.exists(l1_dir):
os.mkdir(l1_dir)
if opts.ngff:
output = NGFFStack((xs, ys, xs), opts.output)
output.create()
else:
output = BlockfsStack((zs, ys, xs), opts.output)
voxel_size = (opts.x_step_size * 1000,
opts.y_voxel_size * 1000,
opts.x_step_size * 1000)
output.write_info_file(opts.levels, voxel_size)
if opts.ngff:
directory = NGFFDirectory(output)
directory.create()
else:
directory_path = os.path.join(l1_dir, BlockfsStack.DIRECTORY_FILENAME)
directory = Directory(xs, ys, zs, volumes[0].directory.dtype,
directory_path,
n_filenames=opts.n_writers)
directory.create()
directory.start_writer_processes()
run(volumes, directory, x0, y0, z0, opts.n_workers, opts.silent,
y_illum_corr)
directory.close()
for level in range(2, opts.levels + 1):
output.write_level_n(level,
silent=opts.silent,
n_cores=opts.n_writers)
def adjust_alignments(opts, volumes:typing.Sequence[StitchSrcVolume]):
"""
Adjust the volume coordinates based on alignments recorded by
oblique-align or similar.
:param opts: The command-line options - we take the --alignment arg
as a json file.
:param volumes: The volumes to be adjusted
"""
if opts.alignment is not None:
alignments = {}
with open(opts.alignment) as fd:
d:dict = json.load(fd)
if "alignments" in d:
for k, v in d["alignments"].items():
alignments[tuple(json.loads(k)[:-1])] = v
align_z = d.get("align-z", False)
for volume in volumes:
k = (volume.x0, volume.y0)
if k in alignments:
if align_z:
volume.x0, volume.y0, volume.z0 = alignments[k]
else:
volume.x0, volume.y0, _ = alignments[k]
return align_z
if __name__ == "__main__":
main()
| 33.943089 | 83 | 0.586707 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,110 | 0.252695 |
87d6924e02630e60ee58f3ea397ba2d39e4eb305 | 442 | py | Python | set_input.py | oinu32/discordpy-startup | 9ef948d1811beba71aa44051b23b5a59adac0500 | [
"MIT"
] | 1 | 2020-01-09T16:29:25.000Z | 2020-01-09T16:29:25.000Z | set_input.py | oinu32/discordpy-startup | 9ef948d1811beba71aa44051b23b5a59adac0500 | [
"MIT"
] | null | null | null | set_input.py | oinu32/discordpy-startup | 9ef948d1811beba71aa44051b23b5a59adac0500 | [
"MIT"
] | null | null | null | #AGNET
# 該当のチャンネルのID
ID_CHANNEL_README = 771383900814573598
#たすきる
ID_ROLE_TSKL = 671354476044615680
#たすきる用ちゃんねるID(とつかんり)
ID_TSKILL = 624668843444273164
#とつ予定(とつかんり)
ID_totu = 739498595329376487
#とつ予定じゃり
ID_totu2 = 750321508219355227
#さーばーID
ID_SRV = 539773033724772362
#agnet
ID_agnet = 549971775828656168
ID_alist = 750236871967244379
#RKME
#たすきる用ちゃんねるID
ID_Channel_RKME = 795869377882226708
#さーばーID
ID_SRV_RKME = 795355793524129823
| 15.785714 | 38 | 0.830317 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 236 | 0.41115 |
87d835e669333bab23cb9ebd086673441ab97685 | 1,821 | py | Python | src/issues/search_indexes.py | ofirr/OpenCommunity | 7786ac2996530af8f545f4398c071793c73634c8 | [
"BSD-3-Clause"
] | null | null | null | src/issues/search_indexes.py | ofirr/OpenCommunity | 7786ac2996530af8f545f4398c071793c73634c8 | [
"BSD-3-Clause"
] | null | null | null | src/issues/search_indexes.py | ofirr/OpenCommunity | 7786ac2996530af8f545f4398c071793c73634c8 | [
"BSD-3-Clause"
] | null | null | null | from haystack import indexes
from issues.models import Issue, Proposal
from haystack.fields import IntegerField, CharField, BooleanField, DateField, DateTimeField
from datetime import date, datetime, timedelta
class IssueIndex(indexes.ModelSearchIndex, indexes.Indexable):
community = IntegerField(model_attr='community_id')
is_confidential = BooleanField(model_attr='is_confidential')
class Meta:
model = Issue
fields = ['title', 'abstract']
# Note that regular ``SearchIndex`` methods apply.
def index_queryset(self, using=None):
"Used when the entire index for model is updated."
return Issue.objects.active()
class ProposalIndex(indexes.ModelSearchIndex, indexes.Indexable):
text = CharField(document=True, use_template=True)
active = BooleanField(model_attr='active')
title = CharField(model_attr='title')
community = IntegerField(model_attr='issue__community_id')
status = IntegerField(model_attr='status')
task_completed = BooleanField(model_attr='task_completed')
type = IntegerField(model_attr='type')
decided_at = DateTimeField()
assignee = CharField()
due_by = DateField(model_attr='due_by', null=True)
is_confidential = BooleanField(model_attr='is_confidential')
def get_model(self):
return Proposal
def prepare_assignee(self, obj):
return u'' if not obj.assigned_to_user else \
obj.assigned_to_user.display_name
def prepare_decided_at(self, obj):
return obj.created_at if not obj.decided_at_meeting \
else obj.decided_at_meeting.held_at
# Note that regular ``SearchIndex`` methods apply.
def index_queryset(self, using=None):
"Used when the entire index for model is updated."
return Proposal.objects.active()
| 37.163265 | 91 | 0.721032 | 1,605 | 0.881384 | 0 | 0 | 0 | 0 | 0 | 0 | 342 | 0.187809 |
87d8507d0c1a4a250f2a1f63328c068e8ca77ba8 | 16,437 | py | Python | options/opts.py | apple/ml-cvnets | 84d992f413e52c0468f86d23196efd9dad885e6f | [
"AML"
] | 209 | 2021-10-30T08:32:10.000Z | 2022-03-31T16:18:03.000Z | options/opts.py | apple/ml-cvnets | 84d992f413e52c0468f86d23196efd9dad885e6f | [
"AML"
] | 12 | 2021-12-04T10:47:11.000Z | 2022-03-31T15:39:40.000Z | options/opts.py | apple/ml-cvnets | 84d992f413e52c0468f86d23196efd9dad885e6f | [
"AML"
] | 50 | 2021-11-01T08:15:02.000Z | 2022-03-29T08:17:34.000Z | #
# For licensing see accompanying LICENSE file.
# Copyright (C) 2022 Apple Inc. All Rights Reserved.
#
import argparse
from typing import Optional
from data.sampler import arguments_sampler
from data.collate_fns import arguments_collate_fn
from options.utils import load_config_file
from data.datasets import arguments_dataset
from cvnets import arguments_model, arguments_nn_layers, arguments_ema
from cvnets.anchor_generator import arguments_anchor_gen
from loss_fn import arguments_loss_fn
from optim import arguments_optimizer
from optim.scheduler import arguments_scheduler
from common import SUPPORTED_MODALITIES
from data.transforms import arguments_augmentation
from metrics import arguments_stats
from data.video_reader import arguments_video_reader
from cvnets.matcher_det import arguments_box_matcher
from utils import logger
class ParseKwargs(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
namespace_dict = vars(namespace)
if len(values) > 0:
override_dict = {}
# values are list of key-value pairs
for value in values:
key = None
try:
key, value = value.split("=")
except ValueError as e:
logger.error(
"For override arguments, a key-value pair of the form key=value is expected"
)
if key in namespace_dict:
value_namespace = namespace_dict[key]
if value_namespace is None and value is None:
value = None
elif value_namespace is None and value is not None:
# possibly a string or list of strings or list of integers
# check if string is a list or not
value = value.split(",")
if len(value) == 1:
# its a string
value = str(value[0])
# check if its empty string or not
if value == "" or value.lower() == "none":
value = None
else:
# its a list of integers or strings
try:
# convert to int
value = [int(v) for v in value]
except:
# pass because its a string
pass
else:
try:
if value.lower() == "true": # check for boolean
value = True
elif value.lower() == "false":
value = False
else:
desired_type = type(value_namespace)
value = desired_type(value)
except ValueError as e:
logger.warning(
"Type mismatch while over-riding. Skipping key: {}".format(
key
)
)
continue
override_dict[key] = value
setattr(namespace, "override_args", override_dict)
else:
setattr(namespace, "override_args", None)
def arguments_common(parser: argparse.ArgumentParser) -> argparse.ArgumentParser:
group = parser.add_argument_group(
title="Common arguments", description="Common arguments"
)
group.add_argument("--common.seed", type=int, default=0, help="Random seed")
group.add_argument(
"--common.config-file", type=str, default=None, help="Configuration file"
)
group.add_argument(
"--common.results-loc",
type=str,
default="results",
help="Directory where results will be stored",
)
group.add_argument(
"--common.run-label",
type=str,
default="run_1",
help="Label id for the current run",
)
group.add_argument(
"--common.resume", type=str, default=None, help="Resume location"
)
group.add_argument(
"--common.finetune_imagenet1k",
type=str,
default=None,
help="Checkpoint location to be used for finetuning",
)
group.add_argument(
"--common.finetune_imagenet1k-ema",
type=str,
default=None,
help="EMA Checkpoint location to be used for finetuning",
)
group.add_argument(
"--common.mixed-precision", action="store_true", help="Mixed precision training"
)
group.add_argument(
"--common.accum-freq",
type=int,
default=1,
help="Accumulate gradients for this number of iterations",
)
group.add_argument(
"--common.accum-after-epoch",
type=int,
default=0,
help="Start accumulation after this many epochs",
)
group.add_argument(
"--common.log-freq",
type=int,
default=100,
help="Display after these many iterations",
)
group.add_argument(
"--common.auto-resume",
action="store_true",
help="Resume training from the last checkpoint",
)
group.add_argument(
"--common.grad-clip", type=float, default=None, help="Gradient clipping value"
)
group.add_argument(
"--common.k-best-checkpoints",
type=int,
default=5,
help="Keep k-best checkpoints",
)
group.add_argument(
"--common.inference-modality",
type=str,
default="image",
choices=SUPPORTED_MODALITIES,
help="Inference modality. Image or videos",
)
group.add_argument(
"--common.channels-last",
action="store_true",
default=False,
help="Use channel last format during training. "
"Note 1: that some models may not support it, so we recommend to use it with caution"
"Note 2: Channel last format does not work with 1-, 2-, and 3- tensors. "
"Therefore, we support it via custom collate functions",
)
group.add_argument(
"--common.tensorboard-logging",
action="store_true",
help="Enable tensorboard logging",
)
group.add_argument(
"--common.bolt-logging", action="store_true", help="Enable bolt logging"
)
group.add_argument(
"--common.override-kwargs",
nargs="*",
action=ParseKwargs,
help="Override arguments. Example. To override the value of --sampler.vbs.crop-size-width, "
"we can pass override argument as "
"--common.override-kwargs sampler.vbs.crop_size_width=512 \n "
"Note that keys in override arguments do not contain -- or -",
)
group.add_argument(
"--common.enable-coreml-compatible-module",
action="store_true",
help="Use coreml compatible modules (if applicable) during inference",
)
group.add_argument(
"--common.debug-mode",
action="store_true",
help="You can use this flag for debugging purposes.",
)
return parser
def arguments_ddp(parser: argparse.ArgumentParser) -> argparse.ArgumentParser:
group = parser.add_argument_group(
title="DDP arguments", description="DDP arguments"
)
group.add_argument("--ddp.disable", action="store_true", help="Don't use DDP")
group.add_argument(
"--ddp.rank", type=int, default=0, help="Node rank for distributed training"
)
group.add_argument(
"--ddp.world-size", type=int, default=-1, help="World size for DDP"
)
group.add_argument("--ddp.dist-url", type=str, default=None, help="DDP URL")
group.add_argument(
"--ddp.dist-port",
type=int,
default=30786,
help="DDP Port. Only used when --ddp.dist-url is not specified",
)
group.add_argument("--ddp.device-id", type=int, default=None, help="Device ID")
group.add_argument(
"--ddp.no-spawn", action="store_true", help="Don't use DDP with spawn"
)
group.add_argument(
"--ddp.backend", type=str, default="nccl", help="DDP backend. Default is nccl"
)
group.add_argument(
"--ddp.find-unused-params",
action="store_true",
help="Find unused params in model. useful for debugging with DDP",
)
return parser
def get_training_arguments(parse_args: Optional[bool] = True):
parser = argparse.ArgumentParser(description="Training arguments", add_help=True)
# sampler related arguments
parser = arguments_sampler(parser=parser)
# dataset related arguments
parser = arguments_dataset(parser=parser)
# anchor generator arguments
parser = arguments_anchor_gen(parser=parser)
# arguments related to box matcher
parser = arguments_box_matcher(parser=parser)
# Video reader related arguments
parser = arguments_video_reader(parser=parser)
# collate fn related arguments
parser = arguments_collate_fn(parser=parser)
# transform related arguments
parser = arguments_augmentation(parser=parser)
# model related arguments
parser = arguments_nn_layers(parser=parser)
parser = arguments_model(parser=parser)
parser = arguments_ema(parser=parser)
# loss function arguments
parser = arguments_loss_fn(parser=parser)
# optimizer arguments
parser = arguments_optimizer(parser=parser)
parser = arguments_scheduler(parser=parser)
# DDP arguments
parser = arguments_ddp(parser=parser)
# stats arguments
parser = arguments_stats(parser=parser)
# common
parser = arguments_common(parser=parser)
if parse_args:
# parse args
opts = parser.parse_args()
opts = load_config_file(opts)
return opts
else:
return parser
def get_eval_arguments(parse_args=True):
return get_training_arguments(parse_args=parse_args)
def get_conversion_arguments():
parser = get_training_arguments(parse_args=False)
# Arguments related to coreml conversion
group = parser.add_argument_group("Conversion arguments")
group.add_argument(
"--conversion.coreml-extn",
type=str,
default="mlmodel",
help="Extension for converted model. Default is mlmodel",
)
group.add_argument(
"--conversion.input-image-path",
type=str,
default=None,
help="Path of the image to be used for conversion",
)
# Arguments related to server.
group.add_argument(
"--conversion.bucket-name", type=str, help="Model job's bucket name"
)
group.add_argument("--conversion.task-id", type=str, help="Model job's id")
group.add_argument(
"--conversion.viewers",
type=str,
nargs="+",
default=None,
help="Users who can view your models on server",
)
# parse args
opts = parser.parse_args()
opts = load_config_file(opts)
return opts
def get_bencmarking_arguments():
parser = get_training_arguments(parse_args=False)
#
group = parser.add_argument_group("Benchmarking arguments")
group.add_argument(
"--benchmark.batch-size",
type=int,
default=1,
help="Batch size for benchmarking",
)
group.add_argument(
"--benchmark.warmup-iter", type=int, default=10, help="Warm-up iterations"
)
group.add_argument(
"--benchmark.n-iter",
type=int,
default=100,
help="Number of iterations for benchmarking",
)
group.add_argument(
"--benchmark.use-jit-model",
action="store_true",
help="Convert the model to JIT and then benchmark it",
)
# parse args
opts = parser.parse_args()
opts = load_config_file(opts)
return opts
def get_segmentation_eval_arguments():
parser = get_training_arguments(parse_args=False)
group = parser.add_argument_group("Segmentation evaluation related arguments")
group.add_argument(
"--evaluation.segmentation.apply-color-map",
action="store_true",
help="Apply color map to different classes in segmentation masks. Useful in visualization "
"+ some competitions (e.g, PASCAL VOC) accept submissions with colored segmentation masks",
)
group.add_argument(
"--evaluation.segmentation.save-overlay-rgb-pred",
action="store_true",
help="enable this flag to visualize predicted masks on top of input image",
)
group.add_argument(
"--evaluation.segmentation.save-masks",
action="store_true",
help="save predicted masks without colormaps. Useful for submitting to "
"competitions like Cityscapes",
)
group.add_argument(
"--evaluation.segmentation.overlay-mask-weight",
default=0.5,
type=float,
help="Contribution of mask when overlaying on top of RGB image. ",
)
group.add_argument(
"--evaluation.segmentation.mode",
type=str,
default="validation_set",
required=False,
choices=["single_image", "image_folder", "validation_set"],
help="Contribution of mask when overlaying on top of RGB image. ",
)
group.add_argument(
"--evaluation.segmentation.path",
type=str,
default=None,
help="Path of the image or image folder (only required for single_image and image_folder modes)",
)
group.add_argument(
"--evaluation.segmentation.num-classes",
type=str,
default=None,
help="Number of segmentation classes used during training",
)
group.add_argument(
"--evaluation.segmentation.resize-input-images",
action="store_true",
help="Resize input images",
)
# parse args
opts = parser.parse_args()
opts = load_config_file(opts)
return opts
def get_detection_eval_arguments():
parser = get_training_arguments(parse_args=False)
group = parser.add_argument_group("Detection evaluation related arguments")
group.add_argument(
"--evaluation.detection.save-overlay-boxes",
action="store_true",
help="enable this flag to visualize predicted masks on top of input image",
)
group.add_argument(
"--evaluation.detection.mode",
type=str,
default="validation_set",
required=False,
choices=["single_image", "image_folder", "validation_set"],
help="Contribution of mask when overlaying on top of RGB image. ",
)
group.add_argument(
"--evaluation.detection.path",
type=str,
default=None,
help="Path of the image or image folder (only required for single_image and image_folder modes)",
)
group.add_argument(
"--evaluation.detection.num-classes",
type=str,
default=None,
help="Number of segmentation classes used during training",
)
group.add_argument(
"--evaluation.detection.resize-input-images",
action="store_true",
default=False,
help="Resize the input images",
)
# parse args
opts = parser.parse_args()
opts = load_config_file(opts)
return opts
def get_loss_landscape_args():
parser = get_training_arguments(parse_args=False)
group = parser.add_argument_group("Loss landscape related arguments")
group.add_argument(
"--loss-landscape.n-points",
type=int,
default=11,
help="No. of grid points. Default is 11, so we have 11x11 grid",
)
group.add_argument(
"--loss-landscape.min-x",
type=float,
default=-1.0,
help="Min. value along x-axis",
)
group.add_argument(
"--loss-landscape.max-x",
type=float,
default=1.0,
help="Max. value along x-axis",
)
group.add_argument(
"--loss-landscape.min-y",
type=float,
default=-1.0,
help="Min. value along y-axis",
)
group.add_argument(
"--loss-landscape.max-y",
type=float,
default=1.0,
help="Max. value along y-axis",
)
# parse args
opts = parser.parse_args()
opts = load_config_file(opts)
return opts
| 31.609615 | 105 | 0.607288 | 2,691 | 0.163716 | 0 | 0 | 0 | 0 | 0 | 0 | 5,991 | 0.364483 |
87da34bc55c43a75ad4a47d5920ff97f1a7ed071 | 1,012 | py | Python | grokking-the-coding-interview/dfs/Path-for-Max-Sum-(medium).py | huandrew99/LeetCode | aa36b48d06100ce5f0bc64c789a906ec29409440 | [
"MIT"
] | 36 | 2021-12-23T15:44:41.000Z | 2022-03-31T04:26:26.000Z | grokking-the-coding-interview/dfs/Path-for-Max-Sum-(medium).py | wzy0766/LeetCode-1 | 3070e672c519e8af74966811b8058a9baef8c0bc | [
"MIT"
] | null | null | null | grokking-the-coding-interview/dfs/Path-for-Max-Sum-(medium).py | wzy0766/LeetCode-1 | 3070e672c519e8af74966811b8058a9baef8c0bc | [
"MIT"
] | 11 | 2022-02-26T22:41:26.000Z | 2022-03-02T07:18:41.000Z | """
Given a binary tree, find the root-to-leaf path with the maximum sum.
"""
class TreeNode:
def __init__(self, val, left=None, right=None):
self.val = val
self.left = left
self.right = right
def find_paths(root):
res = []
cur_path = []
find_max(root, res, cur_path, 0)
return res
max_sum = 0
def find_max(root, res, cur_path, cur_sum):
global max_sum
if not root:
return
cur_sum += root.val
cur_path.append(root.val)
if (not root.left) and (not root.right):
if cur_sum > max_sum:
max_sum = cur_sum
res.clear()
res.extend(cur_path)
find_max(root.left, res, cur_path, cur_sum)
find_max(root.right, res, cur_path, cur_sum)
cur_path.pop()
def main():
root = TreeNode(12)
root.left = TreeNode(8)
root.right = TreeNode(1)
root.left.left = TreeNode(4)
root.right.left = TreeNode(10)
root.right.right = TreeNode(5)
required_sum = 23
print("Tree paths max sum " + str(find_paths(root)))
main()
"""
Time O(N)
Space O(N)
"""
| 17.448276 | 69 | 0.649209 | 128 | 0.126482 | 0 | 0 | 0 | 0 | 0 | 0 | 126 | 0.124506 |
87dab20f9aff7e74782e7e82bc9833799c25002f | 9,867 | py | Python | django-ng/django_ng_project/django_ng_app/views.py | bounouhmahmoud/ensi | 209e57340359af3ea063c064982198848dc36c5f | [
"MIT"
] | 1 | 2018-07-10T06:59:05.000Z | 2018-07-10T06:59:05.000Z | django-ng/django_ng_project/django_ng_app/views.py | Arsalen/BusinessStrategies | 209e57340359af3ea063c064982198848dc36c5f | [
"MIT"
] | null | null | null | django-ng/django_ng_project/django_ng_app/views.py | Arsalen/BusinessStrategies | 209e57340359af3ea063c064982198848dc36c5f | [
"MIT"
] | null | null | null | # coding=utf-8
from django.shortcuts import render
from rest_framework import viewsets, generics, status
from .serializers import ClientSerializer, MessageSerializer, SearchSerializer, ManageSearchSerializer, PreferenceSerializer
from .models import Client, Message, Search, ManageSearch, Preference
from rest_framework.decorators import api_view
from rest_framework.response import Response
from rest_framework.views import APIView
from django.http import Http404
import simplejson
from django.contrib.auth.models import User
from rest_framework.permissions import IsAuthenticated
class ClientList(APIView):
permission_classes = (IsAuthenticated,)
def get(self, request, format=None):
clients = Client.objects.all()
serializer = ClientSerializer(clients, many=True)
return Response(serializer.data)
def post(self, request, format=None):
serializer = ClientSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class ClientDetail(APIView):
permission_classes = (IsAuthenticated,)
def get_object(self, pk):
try:
return Client.objects.get(pk=pk)
except Client.DoesNotExist:
raise Http404
def get(self, request, pk, format=None):
snippet = self.get_object(pk)
serializer = ClientSerializer(snippet)
return Response(serializer.data)
def put(self, request, pk, format=None):
snippet = self.get_object(request.data['id'])
if ( int(pk) == request.data['id'] ):
if (snippet.password == request.data['oldpassword']):
serializer = ClientSerializer(snippet, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
else:
serializer = ClientSerializer(snippet, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def delete(self, request, pk, format=None):
snippet = self.get_object(pk)
snippet.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
class ClientAuth(APIView):
#permission_classes = (IsAuthenticated,)
def get_object(self, request):
try:
return Client.objects.get(email=request['email'],password=request['password'],admn=request['admin'])
except Client.DoesNotExist:
raise Http404
def post(self, request, format=None):
snippet = self.get_object(request.data)
serializer = ClientSerializer(snippet)
if(serializer.data['held'] == False):
return Response(serializer.data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
################################################################################################################################
class MessageList(APIView):
permission_classes = (IsAuthenticated,)
def get(self, request, format=None):
messages = Message.objects.all()
serializer = MessageSerializer(messages, many=True)
return Response(serializer.data)
def post(self, request, format=None):
serializer = MessageSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class MessageDetail(APIView):
permission_classes = (IsAuthenticated,)
def get_object(self, pk):
try:
return Message.objects.get(pk=pk)
except Message.DoesNotExist:
raise Http404
def get(self, request, pk, format=None):
snippet = self.get_object(pk)
serializer = MessageSerializer(snippet)
return Response(serializer.data)
def put(self, request, pk, format=None):
snippet = self.get_object(pk)
serializer = MessageSerializer(snippet, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def delete(self, request, pk, format=None):
snippet = self.get_object(pk)
snippet.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
################################################################################################################################
class SearchList(APIView):
permission_classes = (IsAuthenticated,)
def get(self, request, format=None):
searchs = Search.objects.all()
serializer = SearchSerializer(searchs, many=True)
return Response(serializer.data)
def post(self, request, format=None):
#print(simplejson.loads(request.body)['period'])
#from rest_framework import status
serializer = SearchSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class SearchDetail(APIView):
permission_classes = (IsAuthenticated,)
def get_object(self, pk):
try:
return Search.objects.get(pk=pk)
except Search.DoesNotExist:
raise Http404
def get(self, request, pk, format=None):
snippet = self.get_object(pk)
serializer = SearchSerializer(snippet)
return Response(serializer.data)
def put(self, request, pk, format=None):
snippet = self.get_object(pk)
serializer = SearchSerializer(snippet, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def delete(self, request, pk, format=None):
snippet = self.get_object(pk)
snippet.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
################################################################################################################################
class ManageSearchList(APIView):
permission_classes = (IsAuthenticated,)
def get(self, request, format=None):
managesearchs = ManageSearch.objects.all()
serializer = ManageSearchSerializer(managesearchs, many=True)
return Response(serializer.data)
def post(self, request, format=None):
serializer = ManageSearchSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class ManageSearchDetail(APIView):
permission_classes = (IsAuthenticated,)
def get_object(self, pk):
try:
return ManageSearch.objects.get(pk=pk)
except ManageSearch.DoesNotExist:
raise Http404
def get(self, request, pk, format=None):
snippet = self.get_object(pk)
serializer = ManageSearchSerializer(snippet)
return Response(serializer.data)
def put(self, request, pk, format=None):
snippet = self.get_object(pk)
serializer = ManageSearchSerializer(snippet, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def delete(self, request, pk, format=None):
snippet = self.get_object(pk)
snippet.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
################################################################################################################################
class PreferenceList(APIView):
permission_classes = (IsAuthenticated,)
def get(self, request, format=None):
preferences = Preference.objects.all()
serializer = PreferenceSerializer(preferences, many=True)
return Response(serializer.data)
def post(self, request, format=None):
serializer = PreferenceSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class PreferenceDetail(APIView):
permission_classes = (IsAuthenticated,)
def get_object(self, pk):
try:
return Preference.objects.get(pk=pk)
except Preference.DoesNotExist:
raise Http404
def get(self, request, pk, format=None):
snippet = self.get_object(pk)
serializer = PreferenceSerializer(snippet)
return Response(serializer.data)
def put(self, request, pk, format=None):
snippet = self.get_object(pk)
serializer = PreferenceSerializer(snippet, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def delete(self, request, pk, format=None):
snippet = self.get_object(pk)
snippet.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
################################################################################################################################
| 35.365591 | 144 | 0.629371 | 9,249 | 0.937367 | 0 | 0 | 0 | 0 | 0 | 0 | 827 | 0.083815 |