max_stars_repo_path stringlengths 3 269 | max_stars_repo_name stringlengths 4 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.05M | score float64 0.23 5.13 | int_score int64 0 5 |
|---|---|---|---|---|---|---|
zip.py | aziz-alqudsy/basic-python | 0 | 12768651 | # dictionaries
friends = ["john", "andre", "mark", "robert"]
ages = [23, 43, 54, 12]
biodatas_dict = dict(zip(friends, ages))
print(biodatas_dict)
# list
biodatas_list = list(zip(friends, ages))
print(biodatas_list)
# tuple
biodatas_tuple = tuple(zip(friends, ages))
print(biodatas_tuple)
| 4.0625 | 4 |
lithium/manage/templates/app.py | PressLabs/lithium | 2 | 12768652 | <reponame>PressLabs/lithium
import os
from importlib import import_module
from flask import Flask
from flask.ext.admin.contrib.sqla import ModelView as AdminModelView
from {{app_name}}.extensions import db, admin, migrate
from lithium.views import ModelView
cache = []
def create_app(config=None):
app = Flask(__name__)
default_config = os.path.join(app.root_path, 'local_config.py')
app.config.from_pyfile(default_config)
if config:
app.config.from_pyfile(config)
blueprints = []
db.init_app(app)
admin_panel = register_admin_views(admin, blueprints)
admin_panel.init_app(app)
app = register_endpoints(app, blueprints)
migrate.init_app(app, db)
return app
def register_admin_views(admin, blueprints):
for module in blueprints:
module = import_module('{{app_name}}.%s.models' % module)
for model in db.Model.__subclasses__():
if model not in cache:
cache.append(model)
admin.add_view(AdminModelView(model, db.session))
return admin
def register_endpoints(app, blueprints):
for module in blueprints:
module = import_module('{{app_name}}.%s.api' % module)
for endpoint in ModelView.__subclasses__():
endpoint.register(app, route_prefix='/api/1/')
return app
| 2.03125 | 2 |
fitness_guide/menu/migrations/0003_alter_product_options_product_name_and_more.py | tunsmm/fitness_guide | 1 | 12768653 | <reponame>tunsmm/fitness_guide
# Generated by Django 4.0.1 on 2022-04-07 20:57
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('menu', '0002_product_alter_client_options'),
]
operations = [
migrations.AlterModelOptions(
name='product',
options={'verbose_name': 'Продукт', 'verbose_name_plural': 'Продукты'},
),
migrations.AddField(
model_name='product',
name='name',
field=models.CharField(default='-', max_length=255),
preserve_default=False,
),
migrations.AlterField(
model_name='client',
name='full_name',
field=models.CharField(max_length=255),
),
migrations.AlterField(
model_name='client',
name='phone_number',
field=models.CharField(max_length=15),
),
migrations.AlterField(
model_name='client',
name='type_diet',
field=models.CharField(max_length=31),
),
]
| 1.726563 | 2 |
145.py | juandarr/ProjectEuler | 0 | 12768654 | """
Finds the amount of reversible numbers below limit_n
Author: <NAME>
"""
import math
from time import time
# Iterator to create values to run in the algorithm
class Range_reverse:
def __init__(self, start,limit_n):
self.current_value = start
self.limit = 10**7
self.maximum = limit_n
def __iter__(self):
return self
def __next__(self):
self.current_value += 2
if self.current_value > self.maximum:
raise StopIteration
if self.current_value > self.limit:
self.current_value = self.limit+10**7+1
self.limit += 2*10**7
return self.current_value
# Generator of possible values to visit
def reverse_generator(start, limit_n):
i = start
max_gap = 10**7
while i<limit_n:
i+=2
if i>max_gap:
i=max_gap+10**7+1
max_gap += 2*10**7
yield i
"""
Finds the amount of reversible numbers below limit_n
"""
def reversible_numbers(limit_n):
total = 0
start = 13
power = 10**7
t0 = time()
for n in reverse_generator(start,limit_n):
n_str = str(n)
if int(n_str[0])%2==0:
carry = 0
valid = True
for i in range(len(n_str)):
val = int(n_str[i])+int(n_str[-1-i])+carry
carry = 0
if val>=10:
carry = 1
val %= 10
if val%2==0:
valid = False
break
if valid:
total += 2
if n > power:
print(n)
power += 2*10**7
t1 = time()
print('Total time to reach the solution: ', t1-t0)
return total
if __name__ == "__main__":
limit_n = 10**8
print('The amount of reversible numbers below limit_n is {0}'.format(reversible_numbers(limit_n))) | 3.703125 | 4 |
tests/setup_logger.py | nyoungstudios/multiflow | 0 | 12768655 | import logging
import sys
LOG_FORMAT = "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
def get_logger(name, log_format=LOG_FORMAT):
logger = logging.getLogger(name)
handler = logging.StreamHandler(stream=sys.stdout)
handler.setFormatter(logging.Formatter(log_format))
logger.addHandler(handler)
logger.setLevel(logging.INFO)
return logger
| 2.75 | 3 |
views/spot.py | sbutler/spacescout_web | 0 | 12768656 | <gh_stars>0
""" Copyright 2012, 2013 UW Information Technology, University of Washington
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from django.shortcuts import render_to_response
from django.template import RequestContext
from spacescout_web.spot import Spot, SpotException
from django.http import Http404
from django.http import HttpResponse
import simplejson as json
def SpotView(request, spot_id, return_json=False):
try:
spot = Spot(spot_id, request=request).get()
except SpotException as e:
if e.status_code == 404:
raise Http404
elif e.status_code != 200:
return HttpResponse("Error loading spot", status=e.status_code)
content = json.dumps(spot)
if return_json:
return HttpResponse(content, content_type='application/json')
else:
return render_to_response('spacescout_web/space.html', spot, context_instance=RequestContext(request))
| 1.859375 | 2 |
worker/utils/extra_helpers/memcached_helper.py | pugpngying123/dataflux-func | 1 | 12768657 | # -*- coding: utf-8 -*-
# Builtin Modules
import json
import time
import traceback
# 3rd-party Modules
import memcache
import six
# Project Modules
from worker.utils import toolkit
from worker.utils.log_helper import LogHelper
def get_config(c):
servers = c.get('servers') or None
if servers and isinstance(servers, (six.string_types, six.text_type)):
servers = servers.split(',')
servers = servers or '127.0.0.1:11211'
servers = toolkit.as_array(servers)
return servers
LIMIT_ARGS_DUMP = 200
class MemcachedHelper(object):
def __init__(self, logger, config, *args, **kwargs):
self.logger = logger
self.config = config
self.client = memcache.Client(get_config(config))
def __del__(self):
pass
def check(self):
try:
self.client.get_stats()
except Exception as e:
for line in traceback.format_exc().splitlines():
self.logger.error(line)
raise Exception(str(e))
def query(self, *args):
command = args[0]
command_args = args[1:]
args_dumps = ', '.join([toolkit.json_dumps(x) for x in command_args])
if len(args_dumps) > LIMIT_ARGS_DUMP:
args_dumps = args_dumps[0:LIMIT_ARGS_DUMP-3] + '...'
self.logger.debug('[MEMCACHED] Query `{}` <- `{}`'.format(command.upper(), args_dumps))
return getattr(self.client, command.lower())(*command_args)
def run(self, *args, **kwargs):
command = args[0]
command_args = args[1:]
args_dumps = ', '.join([toolkit.json_dumps(x) for x in command_args])
if len(args_dumps) > LIMIT_ARGS_DUMP:
args_dumps = args_dumps[0:LIMIT_ARGS_DUMP-3] + '...'
self.logger.debug('[MEMCACHED] Run `{}` <- `{}`'.format(command.upper(), args_dumps))
return getattr(self.client, command.lower())(*command_args, **kwargs)
def get(self, key):
return self.run('get', key)
def set(self, key, value):
return self.run('set', key, value)
def add(self, key, value):
return self.run('add', key, value)
def replace(self, key, value):
return self.run('replace', key, value)
def delete(self, key):
return self.run('delete', key)
| 2.171875 | 2 |
swagger.py | victorgrubio/flask-mongo-template-api | 0 | 12768658 | <filename>swagger.py
from apispec import APISpec
from apispec.ext.marshmallow import MarshmallowPlugin
from apispec_webframeworks.flask import FlaskPlugin
import json
import yaml
import schemas
from app import app as flask_app
import app
"""Create spec
"""
spec = APISpec(
openapi_version="3.0.0",
title="Health Weareable REST API",
version='1.1.0',
info={
"description": "API Template using Flask and Mongo. The development of this service is provided by GATV, a research group from the Technical University of Madrid.",
"termsOfService": "http://swagger.io/terms/",
"contact": {
"email": "<EMAIL>"
},
"license": {
"name": "Apache 2.0",
"url": "http://www.apache.org/licenses/LICENSE-2.0.html"
}
},
servers = ["http://localhost:4000", "http://car_api:4000"],
tags = [
{
"name": "car",
"description": "Car related endpoints"
}
],
plugins=[MarshmallowPlugin(), FlaskPlugin()]
)
"""Definition of schemas
"""
# Data
spec.components.schema("CarSchema", schema=schemas.CarSchema)
spec.components.schema("ApiResponse", schema=schemas.ApiResponse)
spec.components.schema("ErrorResponse", schema=schemas.ErrorResponse)
"""Add methods from each route
"""
with flask_app.test_request_context():
spec.path(view=app.add_new_car)
spec.path(view=app.get_all_cars)
"""Save file in json and yaml
"""
with open(f'static/swagger.json', 'w') as json_file:
json.dump(spec.to_dict(), json_file)
| 2.328125 | 2 |
python/hard_python/guess/method.py | ii6uu99/ipynb | 1 | 12768659 | <filename>python/hard_python/guess/method.py<gh_stars>1-10
#引入模块
# import method2
# method2.a
# method2.f1()
# import method2 as m2
# m2.a
# m2.f1()
# from method2 import f1
# f1()
# from method2 import f1 as func1
# func1()
from method2 import *
f1()
f2()
print(a)
| 2.515625 | 3 |
Q07__/43_Network_Delay_Time/test.py | hsclinical/leetcode | 0 | 12768660 | <reponame>hsclinical/leetcode
#!/usr/bin/python
from Solution import MyCalendarTwo
| 1.164063 | 1 |
python/rosie/evaluation/modifiers/fill/run_rosie.py | SoarGroup/rosie | 20 | 12768661 | <gh_stars>10-100
from tkinter import *
import tkinter.font
import sys
import os
from rosie import RosieGUI
from rosie.testing import TestAgent
from mobilesim.rosie import MobileSimAgent
def launch_gui(rosie_config):
root = Tk()
eval_agent = MobileSimAgent(rosie_config)
eval_agent.messages.append("!CMD cli pc -f")
eval_gui = RosieGUI(eval_agent, master=root)
eval_gui.run()
def run_test(rosie_config):
eval_agent = TestAgent(config_filename=rosie_config, write_to_stdout=True, source_output="summary",
task_test_output_filename='output/test-output.txt', watch_level=0)
eval_agent.run_test('correct-output.txt')
# Lookup $ROSIE_HOME
rosie_home = ""
if "ROSIE_HOME" in os.environ:
rosie_home = os.environ["ROSIE_HOME"]
else:
print("ERROR: Requires ROSIE_HOME environment variable set")
sys.exit(0)
rosie_config = "agent/rosie.fill.config"
if "--test" in sys.argv:
run_test(rosie_config)
else:
launch_gui(rosie_config)
| 2.5625 | 3 |
werewolf/parser.py | jan-g/slackwolf | 0 | 12768662 | import re
from .sentinel import Sentinel
def kleene(parser):
def parse_many(text):
text = text.lstrip()
if len(text) > 0:
for item, text1 in parser(text):
for items, text2 in parse_many(text1):
yield ([item] + items, text2)
else:
yield([], text)
else:
yield ([], text)
return parse_many
def max_kleene(parser):
def parse_many(text):
text = text.lstrip()
any = False
for item, text1 in parser(text):
for items, text2 in parse_many(text1):
any = True
yield ([item] + items, text2)
if not any:
yield ([], text)
return parse_many
def cat(*parsers):
def parse_cat(text, parsers=parsers):
for item1, text1 in parsers[0](text):
first = (item1,) if item1 is not DROP else ()
if len(parsers) == 1:
yield first, text1
else:
for rest, text2 in parse_cat(text1, parsers=parsers[1:]):
yield first + rest, text2
return parse_cat
def alt(*parsers):
def parse_alt(text):
for parser in parsers:
for item, text1 in parser(text):
yield item, text1
return parse_alt
def maybe(parser):
def parse_maybe(text):
yield from parser(text)
yield None, text
return parse_maybe
def token(string):
def parse_token(text):
text = text.lstrip()
if text.startswith(string):
yield string, text[len(string):]
return parse_token
DROP = Sentinel("DROP")
def map(parser, func):
def parse_map(text):
for item, text1 in parser(text):
yield func(item), text1
return parse_map
def drop_token(string):
return map(token(string), lambda _: DROP)
EOS = Sentinel("EOS")
def eos(text):
text = text.lstrip()
if len(text) == 0:
yield (EOS, text)
NATURAL = re.compile(r'^([0-9]+)')
def natural(text):
text = text.lstrip()
match = NATURAL.match(text)
if match is not None:
yield int(match.group(1)), text[match.end():]
| 2.90625 | 3 |
bindings/Python/src/pylibkriging/__init__.py | libKriging/libKriging | 8 | 12768663 | import platform
if platform.system() == "Windows":
import os
import sys
shared_lib_paths = [os.path.join(os.path.dirname(__file__), 'shared_libs')] # cf setup.py
lk_path = os.environ.get("LIBKRIGING_DLL_PATH")
if lk_path:
for path in lk_path.split(os.pathsep):
shared_lib_paths.append(path)
# alternative method if lib/site-packages prefix is not reliable (requires update of setup.py)
# import distutils # https://docs.python.org/3/distutils/apiref.html#module-distutils.sysconfig
# shared_lib_path = os.path.join(distutils.sysconfig.PREFIX, 'pylibkriging', 'shared_libs')
if sys.version_info[:2] < (3, 8): # < 3.8.0
for path in shared_lib_paths:
if os.path.isdir(path):
os.environ['PATH'] = path + os.pathsep + os.environ['PATH']
else:
for path in shared_lib_paths:
if os.path.isdir(path):
os.add_dll_directory(path)
from _pylibkriging import *
from _pylibkriging import __version__, __build_type__
# Type alias to switch to the right binding
Kriging = WrappedPyKriging
LinearRegression = WrappedPyLinearRegression | 1.960938 | 2 |
read_file.py | its-mithril/HVALLA-Item-Roller-public-vers | 0 | 12768664 | # this handles all the reading from file + creating dictionaries
# everything is separated by tabs
import os
def open_table(path, hunting=False):
if hunting:
_bullshit = "hunting/" + path
else:
_bullshit = path
file_path = "loot_tables"
num_tables = sum(len(files) for f, f, files in os.walk(file_path + "/" + _bullshit))
table = [[] for m in range(0, num_tables)]
for i in range(num_tables):
file = open(file_path + "/" + _bullshit + "/" + path + str(i + 1), "r")
test_list = file.readlines()
for j in range(len(test_list)):
table[i].append(test_list[j].replace(',\n', '').replace(',,', '').split(','))
file.close()
return table
| 3.390625 | 3 |
todo/admin.py | fidele000/Ftodo-RestAPI-Django | 0 | 12768665 | <reponame>fidele000/Ftodo-RestAPI-Django
from todo.models import UserProfile
from django.contrib import admin
# Register your models here.
admin.site.register(UserProfile) | 1.445313 | 1 |
tests/test_linter/test_contract.py | rpdelaney/deal | 0 | 12768666 | <filename>tests/test_linter/test_contract.py
import ast
from textwrap import dedent
import astroid
import pytest
from deal.linter._contract import Category, Contract
from deal.linter._func import Func
TEXT = """
import deal
@deal.raises(ValueError, UnknownError, idk_what_is_it())
@notadeal.raises(KeyError)
def f(x):
return x
"""
def test_exceptions():
funcs1 = Func.from_ast(ast.parse(TEXT))
assert len(funcs1) == 1
funcs2 = Func.from_astroid(astroid.parse(TEXT))
assert len(funcs2) == 1
for func in (funcs1[0], funcs2[0]):
assert len(func.contracts) == 1
contract = func.contracts[0]
assert contract.exceptions == [ValueError, 'UnknownError']
def test_repr():
c = Contract(
category=Category.RAISES,
args=[],
func_args=None, # type: ignore[arg-type]
)
assert repr(c) == 'Contract(raises)'
def test_run():
text = """
import deal
@deal.post(lambda x: x > 0)
def f(x):
return x
"""
text = dedent(text).strip()
funcs1 = Func.from_ast(ast.parse(text))
assert len(funcs1) == 1
funcs2 = Func.from_astroid(astroid.parse(text))
assert len(funcs2) == 1
for func in (funcs1[0], funcs2[0]):
assert len(func.contracts) == 1
c = func.contracts[0]
assert c.run(1) is True
assert c.run(-1) is False
def test_resolve_func():
text = """
import deal
def contract(x):
return x > 0
@deal.post(contract)
def f(x):
...
"""
text = dedent(text).strip()
funcs = Func.from_astroid(astroid.parse(text))
assert len(funcs) == 2
func = funcs[-1]
assert len(func.contracts) == 1
c = func.contracts[0]
assert c.run(1) is True
assert c.run(-1) is False
def test_resolve_lambda():
text = """
import deal
contract = lambda x: x > 0
@deal.post(contract)
def f(x):
...
"""
text = dedent(text).strip()
funcs = Func.from_astroid(astroid.parse(text))
assert len(funcs) == 1
func = funcs[0]
assert len(func.contracts) == 1
c = func.contracts[0]
assert c.run(1) is True
assert c.run(-1) is False
def test_return_message():
text = """
import deal
@deal.post(lambda x: x > 0 or 'oh no!')
def f(x):
return x
"""
text = dedent(text).strip()
funcs1 = Func.from_ast(ast.parse(text))
assert len(funcs1) == 1
funcs2 = Func.from_astroid(astroid.parse(text))
assert len(funcs2) == 1
for func in (funcs1[0], funcs2[0]):
assert len(func.contracts) == 1
c = func.contracts[0]
assert c.run(1) is True
assert c.run(-1) == 'oh no!'
def test_simplified_signature():
text = """
import deal
@deal.post(lambda _: _.a > _.b)
def f(a, b):
return a + b
"""
text = dedent(text).strip()
funcs1 = Func.from_ast(ast.parse(text))
assert len(funcs1) == 1
funcs2 = Func.from_astroid(astroid.parse(text))
assert len(funcs2) == 1
for func in (funcs1[0], funcs2[0]):
assert len(func.contracts) == 1
c = func.contracts[0]
assert c.run(3, 2) is True
assert c.run(2, 3) is False
@pytest.mark.parametrize('source, deps', [
('lambda: ...', set()),
('lambda a, b: ...', {'a', 'b'}),
('lambda *args, **kwargs: ...', {'args', 'kwargs'}),
('lambda a, *, b: ...', {'a', 'b'}),
])
def test_arguments(source: str, deps: set):
text = """
import deal
@deal.post({source})
def f():
return 2
"""
text = text.format(source=source)
text = dedent(text).strip()
tree = ast.parse(text)
print(ast.dump(tree))
funcs1 = Func.from_ast(tree)
tree = astroid.parse(text)
print(tree.repr_tree())
funcs2 = Func.from_astroid(tree)
for funcs in (funcs1, funcs2):
assert len(funcs) == 1
func = funcs[0]
assert len(func.contracts) == 1
c = func.contracts[0]
assert c.arguments == deps
@pytest.mark.parametrize('source, deps', [
('lambda a, b: cd', {'cd'}),
('lambda a, b: a+b', set()),
('lambda a, b: (a+b)/c', {'c'}),
('lambda: re.compile()', {'re'}),
('lambda a, b: ab.cd()', {'ab'}),
])
def test_dependencies(source: str, deps: set):
text = """
import deal
@deal.post({source})
def f(a, b):
return a + b
"""
text = text.format(source=source)
text = dedent(text).strip()
funcs1 = Func.from_ast(ast.parse(text))
tree = astroid.parse(text)
print(tree.repr_tree())
funcs2 = Func.from_astroid(tree)
for funcs in (funcs1, funcs2):
assert len(funcs) == 1
func = funcs[0]
assert len(func.contracts) == 1
c = func.contracts[0]
assert c.dependencies == deps
def test_resolve_and_run_dependencies_func_astroid():
text = """
import deal
CONST = 34
def contract(a):
return a == CONST
@deal.post(contract)
def f(a):
return a * 2
"""
text = dedent(text).strip()
tree = astroid.parse(text)
print(tree.repr_tree())
funcs = Func.from_astroid(tree)
assert len(funcs) == 2
func = funcs[-1]
assert len(func.contracts) == 1
c = func.contracts[0]
assert c.run(12) is False
assert c.run(34) is True
def test_resolve_and_run_dependencies_lambda():
text = """
import deal
CONST = 34
@deal.post(lambda a: a == CONST)
def f(a):
return a * 2
"""
text = dedent(text).strip()
funcs1 = Func.from_ast(ast.parse(text))
tree = astroid.parse(text)
print(tree.repr_tree())
funcs2 = Func.from_astroid(tree)
for funcs in (funcs1, funcs2):
assert len(funcs) == 1
func = funcs[0]
assert len(func.contracts) == 1
c = func.contracts[0]
assert c.run(12) is False
assert c.run(34) is True
def test_lazy_import_stdlib():
text = """
import deal
@deal.post(lambda a: re.compile('^abc$').match(a))
def f(a):
return a * 2
"""
text = dedent(text).strip()
funcs = Func.from_ast(ast.parse(text))
assert len(funcs) == 1
func = funcs[0]
assert len(func.contracts) == 1
c = func.contracts[0]
assert c.run('bcd') is False
assert c.run('abc') is True
def test_unresolvable():
text = """
import deal
@deal.post(lambda a: re.compile(unknown))
def f(a):
return a * 2
"""
text = dedent(text).strip()
funcs = Func.from_ast(ast.parse(text))
assert len(funcs) == 1
func = funcs[0]
assert len(func.contracts) == 1
c = func.contracts[0]
with pytest.raises(NameError):
c.run('bcd')
| 2.546875 | 3 |
app/messenger/utils.py | joeseggie/resourceidea | 0 | 12768667 | <reponame>joeseggie/resourceidea
"""
Messenger utils.
"""
import os
import boto3
from botocore.exceptions import ClientError
def send_email(
recipients: list,
subject: str,
message_text: str = None,
message_html: str = None) -> dict:
"""
Send email.
Args:
recipients {list}: List of email recipients.
subject {str}: Email subject.
message {str}: Email body.
"""
sender = os.environ.get('AWS_SES_SENDER')
# CONFIGURATION_SET = 'ConfigSet'
aws_region = os.environ.get('AWS_REGION')
aws_access_key_id = os.environ.get('AWS_ACCESS_KEY_ID')
aws_secret_access_key = os.environ.get('AWS_SECRET_ACCESS_KEY')
CHARSET = "UTF-8"
client = boto3.client(
'ses',
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
region_name=aws_region)
try:
response = client.send_email(
Destination={
'ToAddresses': recipients
},
Message={
'Body': {
'Html': {
'Charset': CHARSET,
'Data': message_html
},
'Text': {
'Charset': CHARSET,
'Data': message_text
},
},
'Subject': {
'Charset': CHARSET,
'Data': subject
},
},
Source=f'ResourceIdea <{sender}>',
# ConfigurationSetName=CONFIGURATION_SET,
)
except ClientError as error:
raise ValueError(
f'Error sending message: {error.response["Error"]["Message"]}')
else:
return {
'MessageId': response['MessageId'],
'Success': True
}
| 2.0625 | 2 |
temp.py | VincentGaoHJ/Spyder-Mafengwo | 0 | 12768668 | <filename>temp.py
# -*- coding: utf-8 -*-
"""
@Date: Created on Tue Mar 26 21:25:29 2019
@Author: <NAME>
@Description: 用于调试代码
"""
<<<<<<< HEAD
=======
import csv
with open("test.csv", "w", newline='') as csvfile:
writer = csv.writer(csvfile)
# 先写入columns_name
writer.writerow(["index", "a_name", "b_name"])
# 写入多行用writerows
writer.writerows([[0, 1, 3], [1, 2, 3], [2, 3, 4]])
with open("test.csv", "r") as csvfile:
reader = csv.reader(csvfile)
# 这里不需要readlines
for line in reader:
print(line)
>>>>>>> 04ae88d9ca59538b7d55c396b3bd2e35fc1cdacb
| 3.234375 | 3 |
tests/unit_tests/test_tethys_apps/test_templatetags/test_tags.py | rfun/tethys | 79 | 12768669 | import unittest
from unittest import mock
from tethys_apps.templatetags import tags as t
class TestTags(unittest.TestCase):
def setUp(self):
# app_list
self.app_names = ['app1', 'app2', 'app3', 'app4', 'app5', 'app6']
self.tag_names = ['tag1', 'tag_2', 'tag 3', 'tag four', 'Tag Five', 'tag6']
self.tag_classes = ['tag1', 'tag_2', 'tag-3', 'tag-four', 'tag-five', 'tag6']
self.tag_pairs = [
('tag1', 'Tag1'),
('tag_2', 'Tag_2'),
('tag-3', 'Tag 3'),
('tag-four', 'Tag Four'),
('tag-five', 'Tag Five'),
('tag6', 'Tag6'),
]
# Object apps
self.mock_object_apps = {'configured': []}
for i, app_name in enumerate(self.app_names):
mock_app = mock.MagicMock(tags=','.join(self.tag_names[:i+1]))
mock_app.name = app_name
self.mock_object_apps['configured'].append(mock_app)
# Dictionary apps
self.mock_dict_apps = {'configured': []}
for i, app_name in enumerate(self.app_names):
mock_app = dict(tags=','.join(self.tag_names[:i+1]), name=app_name)
self.mock_dict_apps['configured'].append(mock_app)
def tearDown(self):
pass
def test_get_tag_class(self):
ret_tag_str = t.get_tag_class(self.mock_object_apps['configured'][-1])
ret_tag_list = ret_tag_str.split(' ')
self.assertEqual(sorted(self.tag_classes), sorted(ret_tag_list))
def test_get_tag_class_dict(self):
ret_tag_str = t.get_tag_class(self.mock_dict_apps['configured'][-1])
ret_tag_list = ret_tag_str.split(' ')
self.assertEqual(sorted(self.tag_classes), sorted(ret_tag_list))
def test_get_tags_from_apps(self):
ret_tag_list = t.get_tags_from_apps(self.mock_object_apps)
self.assertEqual(sorted(self.tag_pairs), sorted(ret_tag_list))
def test_get_tags_from_apps_dict(self):
ret_tag_list = t.get_tags_from_apps(self.mock_dict_apps)
self.assertEqual(sorted(self.tag_pairs), sorted(ret_tag_list))
def test_get_tags_from_apps_object_disabled(self):
self.mock_object_apps['configured'].append(mock.MagicMock(tags='disabled', enabled=False))
ret_tag_list = t.get_tags_from_apps(self.mock_object_apps)
self.assertNotIn('disabled', ret_tag_list)
def test_get_tags_from_apps_dict_disabled(self):
self.mock_dict_apps['configured'].append({'tags': 'disabled', 'enabled': False})
ret_tag_list = t.get_tags_from_apps(self.mock_dict_apps)
self.assertNotIn('disabled', ret_tag_list)
def test_get_tags_from_apps_object_dont_show(self):
self.mock_object_apps['configured'].append(mock.MagicMock(tags='disabled', show_in_apps_library=False))
ret_tag_list = t.get_tags_from_apps(self.mock_object_apps)
self.assertNotIn('disabled', ret_tag_list)
def test_get_tags_from_apps_dict_dont_show(self):
self.mock_dict_apps['configured'].append({'tags': 'disabled', 'show_in_apps_library': False})
ret_tag_list = t.get_tags_from_apps(self.mock_dict_apps)
self.assertNotIn('disabled', ret_tag_list)
| 2.640625 | 3 |
choice/apps.py | prayogateguh/smk-quiz | 0 | 12768670 | <filename>choice/apps.py
from django.apps import AppConfig
class ChoiceConfig(AppConfig):
name = 'choice'
| 1.507813 | 2 |
test/kernel/integration/LiveUpdate/test.py | jaeh/IncludeOS | 3,673 | 12768671 | <filename>test/kernel/integration/LiveUpdate/test.py<gh_stars>1000+
#!/usr/bin/env python3
from builtins import str
import sys
import os
import socket
from vmrunner import vmrunner
vm = vmrunner.vms[0]
def begin_test(line):
f = open('./kernel_LiveUpdate','rb')
s = socket.socket()
s.connect(("10.0.0.59", 666))
s.send(f.read())
s.close()
vm.on_output("Ready to receive binary blob", begin_test)
if len(sys.argv) > 1:
vm.boot(40,image_name=str(sys.argv[1]))
else:
vm.cmake().boot(40,image_name='kernel_LiveUpdate').clean()
| 1.773438 | 2 |
classification/optimizer/optimizer.py | hirune924/CVpipeline | 0 | 12768672 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import torch.optim as optim
import optimizer.custom_optimizer as custom_optimizer
def get_optimizer_from_name(opt_name=None, model=None, target=[], opt_params={}, mode='yaml', lib='torch'):
if lib=='torch':
lib=optim
elif lib=='custom':
lib = custom_optimizer
if mode == 'yaml':
target = make_params_list(model=model, target=target)
optimizer = getattr(lib, opt_name)(target, **opt_params)
elif mode == 'custom':
optimizer = optim.SGD(model.parameters(), lr=0.1, momentum=0.9)
return optimizer
def make_params_list(model=None, target=[]):
if len(target) == 0:
target = [{'params': model.parameters()}]
else:
for idx, d in enumerate(target):
target[idx]['params'] = getattr(model, d['params']).parameters()
return target
| 2.40625 | 2 |
lib_stylegan/style_gan/seed.py | leoHeidel/3d-style | 4 | 12768673 | import tensorflow as tf
import tensorflow.keras as keras
import lib_stylegan
def get_random_noise(batch_size=8):
random_noise = tf.random.normal(shape=(batch_size, 8))
return random_noise[:,:3], random_noise[:,3:6], random_noise[:,6], random_noise[:,7]
def make_seed_standard(model):
start_dim = model.im_size // (2**(model.n_layers-1))
style_input = inp_style = keras.layers.Input([model.n_layers, model.latent_size])
x = tf.stop_gradient(style_input)[:,0,:1] * 0 + 1
x = keras.layers.Dense(start_dim*start_dim*4*model.channels, activation = 'relu',
kernel_initializer = 'random_normal')(x)
x = keras.layers.Reshape([start_dim, start_dim, 4*model.channels])(x)
return keras.models.Model(inputs = style_input, outputs = x)
def make_seed_3d(model):
start_dim = model.im_size // (2**(model.n_layers-1))
style_input = keras.layers.Input([model.n_layers, model.latent_size])
inputs_camera = [
keras.layers.Input([3]),
keras.layers.Input([3]),
keras.layers.Input(batch_shape=(None,)),
keras.layers.Input(batch_shape=(None,)),
]
random_view = lib_stylegan.lib_3d.layers.CameraStd()(inputs_camera)
rays = lib_stylegan.lib_3d.layers.RayTracer()(random_view)
hiddens = keras.layers.Dense(model.channels*4,activation="relu")(rays)
hiddens = keras.layers.Dense(model.channels*4,activation="relu")(hiddens)
hiddens = keras.layers.Dense(model.channels*4,activation="relu")(hiddens)
feature_map = lib_stylegan.lib_3d.math_3d.to_feature_map(hiddens)
raw_model = keras.models.Model(inputs = inputs_camera, outputs = feature_map)
r = get_random_noise(batch_size=tf.shape(style_input)[0])
feature_map_random = raw_model(r)
return keras.models.Model(inputs = style_input, outputs = feature_map_random) | 2.46875 | 2 |
mentor/matching/models.py | JarettSutula/GoatBoat | 0 | 12768674 | <filename>mentor/matching/models.py<gh_stars>0
from django.db import models
from django import forms
from utils import start_db, collection_link, dynamic_class_dropdown
from django.core.exceptions import ValidationError
import bcrypt
# Create your models here.
CLASS_CHOICES = [
('none', ''),
('CMPT120', 'CMPT 120'),
('CMPT220', 'CMPT 220'),
('CMPT221', 'CMPT 221'),
('CMPT230', 'CMPT 230'),
('CMPT305', 'CMPT 305'),
('CMPT306', 'CMPT 306'),
('CMPT307', 'CMPT 307'),
('CMPT308', 'CMPT 308'),
('CMPT330', 'CMPT 330'),
('CMPT422', 'CMPT 422'),
('CMPT435', 'CMPT 435'),
('MATH210', 'MATH 210'),
('MATH241', 'MATH 241'),
('MATH242', 'MATH 242'),
('MATH310', 'MATH 310'),
('MATH321', 'MATH 321'),
('MATH331', 'MATH 331'),
('MATH343', 'MATH 343'),
('MATH393', 'MATH 393'),
('MATH394', 'MATH 394'),
]
ACTION_CHOICES = [
('adding', 'adding'),
('removing', 'removing'),
]
MENTOR_MENTEE_CHOICES = [
('mentee', 'receive'),
('mentor', 'give'),
]
class ClassChoiceForm(forms.Form):
"""Contains fields for class choice Form."""
username = forms.CharField(widget=forms.TextInput(attrs={'readonly':'readonly'}), label= "Username")
password = forms.CharField(widget=forms.PasswordInput)
action = forms.CharField(label='Are you adding or removing a class?', widget=forms.Select(choices=ACTION_CHOICES))
mentormenteechoice = forms.CharField(label='Are you looking to receive help or give help for this class?', widget=forms.Select(choices=MENTOR_MENTEE_CHOICES))
classchoice= forms.CharField(label='What class are you looking for?', widget=forms.Select(choices=CLASS_CHOICES))
def clean_password(self):
"""Raise error if the password is incorrect."""
username = self.cleaned_data['username']
password = self.cleaned_data['password']
db = start_db()
logins = collection_link(db, 'logins')
user = logins.find_one({'username': username})
byte_password = password.encode('UTF-8')
if bcrypt.checkpw(byte_password, user['password']):
return password
else:
raise ValidationError("Incorrect username or password.")
def clean_classchoice(self):
"""Raise error if the class they select to post is already
in their user object.
"""
username = self.cleaned_data['username']
classchoice = self.cleaned_data['classchoice']
if classchoice == 'none':
raise ValidationError("A class is required.")
db = start_db()
users = collection_link(db, 'users')
user = users.find_one({'username': username})
action = self.cleaned_data['action']
mentormentee = self.cleaned_data['mentormenteechoice']
alreadyexists = "This class already exists on this profile."
doesntexist = "This class doesn't exist on this profile and can't be removed."
# If we are adding, ensure class doesn't already exist in the right place.
if action == 'adding':
if mentormentee == 'mentor':
if classchoice in user['mentorclasschoice']:
raise ValidationError(alreadyexists)
elif mentormentee == 'mentee':
if classchoice in user['menteeclasschoice']:
raise ValidationError(alreadyexists)
# if we are removing, ensure class does already exist in the right place.
elif action == 'removing':
if mentormentee == 'mentor':
if classchoice not in user['mentorclasschoice']:
raise ValidationError(doesntexist)
elif mentormentee == "mentee":
if classchoice not in user['menteeclasschoice']:
raise ValidationError(doesntexist)
# if no errors are raised, just pass back the field as cleaned.
return classchoice
class MentorMatchForm(forms.Form):
"""Contains fields for matching with a mentor."""
username = forms.CharField(widget=forms.TextInput(attrs={'readonly':'readonly'}), label= "Username")
password = forms.CharField(widget=forms.PasswordInput)
classchoice = forms.CharField()
# allow username/class choices be passed in through kwargs.
def __init__(self, *args, **kwargs):
# pass in a dictionary for user_details.
user_details = kwargs.pop('user_details', None)
super(MentorMatchForm, self).__init__(*args, **kwargs)
# if there is something in it...
if user_details:
# set the username (which is hidden) to the logged-in user.
self.fields['username'].initial = user_details['username']
# ensure that we have the classes they are looking for by seeing if
# the menteeclasschoice field is empty - if true, it's not empty.
if user_details['menteeclasschoice']:
myclasses = dynamic_class_dropdown(user_details['username'], 'mentee')
print(myclasses)
self.fields['classchoice'] = forms.CharField(widget=forms.Select(choices=myclasses))
def clean_password(self):
"""Raise error if the password is incorrect."""
username = self.cleaned_data['username']
password = self.cleaned_data['password']
db = start_db()
logins = collection_link(db, 'logins')
user = logins.find_one({'username': username})
byte_password = password.encode('<PASSWORD>')
if bcrypt.checkpw(byte_password, user['password']):
return password
else:
raise ValidationError("Incorrect password.")
class MenteeMatchForm(forms.Form):
"""Contains fields for matching with a mentee."""
username = forms.CharField(widget=forms.TextInput(attrs={'readonly':'readonly'}), label= "Username")
password = forms.CharField(widget=forms.PasswordInput)
classchoice = forms.CharField()
# allow username/class choices be passed in through kwargs.
def __init__(self, *args, **kwargs):
# pass in a dictionary for user_details.
user_details = kwargs.pop('user_details', None)
super(MenteeMatchForm, self).__init__(*args, **kwargs)
# if there is something in it...
if user_details:
# set the username (which is hidden) to the logged-in user.
self.fields['username'].initial = user_details['username']
# ensure that we have the classes they are looking for by seeing if
# the mentorclasschoice field is empty - if true, it's not empty.
if user_details['mentorclasschoice']:
myclasses = dynamic_class_dropdown(user_details['username'], 'mentor')
print(myclasses)
self.fields['classchoice'] = forms.CharField(widget=forms.Select(choices=myclasses))
def clean_password(self):
"""Raise error if the password is incorrect."""
username = self.cleaned_data['username']
password = self.cleaned_data['password']
db = start_db()
logins = collection_link(db, 'logins')
user = logins.find_one({'username': username})
byte_password = <PASSWORD>('<PASSWORD>')
if bcrypt.checkpw(byte_password, user['password']):
return password
else:
raise ValidationError("Incorrect password.")
class MentorSubmissionForm(forms.Form):
"""Contains fields for mentor's username to be used when selecting a match."""
mentorusername = forms.CharField(widget=forms.TextInput(attrs={'readonly':'readonly'}), label='Mentor Username')
class MenteeSubmissionForm(forms.Form):
"""Contains fields for mentee's username to be used when selecting a match."""
menteeusername = forms.CharField(widget=forms.TextInput(attrs={'readonly':'readonly'}), label='Mentee Username')
| 2.265625 | 2 |
Python/parsers/rssparser.py | hyPnOtICDo0g/code-dump | 0 | 12768675 | <reponame>hyPnOtICDo0g/code-dump
# Simple RSS feed item scraper
import feedparser
items = []
n = 0
file = open("rss_urls.txt", "r")
rss_list = file.read().splitlines()
for feeds in rss_list:
items = feedparser.parse(feeds)
while(n>=0):
try:
print (items.entries[n]['link'])
#print(items.entries[n].enclosures[0].href)
n+=1
except IndexError:
n=0
break
| 3.09375 | 3 |
management_api_app/models/schemas/workspace_template.py | LizaShak/AzureTRE | 0 | 12768676 | from typing import List, Dict
from pydantic import BaseModel, Field
from models.domain.resource import ResourceType
from models.domain.resource_template import ResourceTemplate, Property
def get_sample_workspace_template_object(template_name: str = "tre-workspace-vanilla") -> ResourceTemplate:
return ResourceTemplate(
id="a7a7a7bd-7f4e-4a4e-b970-dc86a6b31dfb",
name=template_name,
description="vanilla workspace bundle",
version="0.1.0",
resourceType=ResourceType.Workspace,
current=True,
type="object",
required=["display_name", "description", "app_id"],
properties={
"display_name": Property(type="string"),
"description": Property(type="string"),
"app_id": Property(type="string"),
"address_space": Property(type="string", default="10.2.1.0/24", description="VNet address space for the workspace services")
}
)
def get_sample_workspace_template() -> dict:
return get_sample_workspace_template_object().dict()
def get_sample_workspace_template_in_response() -> dict:
workspace_template = get_sample_workspace_template_object().dict()
workspace_template["system_properties"] = {
"tre_id": Property(type="string"),
"workspace_id": Property(type="string"),
"azure_location": Property(type="string"),
}
return workspace_template
class WorkspaceTemplateNamesInList(BaseModel):
templateNames: List[str]
class Config:
schema_extra = {
"example": {
"templateNames": ["tre-workspace-vanilla", "tre-workspace-base"]
}
}
class WorkspaceTemplateInCreate(BaseModel):
name: str = Field(title="Name of workspace template")
version: str = Field(title="Version of workspace template")
current: bool = Field(title="Mark this version as current")
json_schema: Dict = Field(title="JSON Schema compliant template")
class Config:
schema_extra = {
"example": {
"name": "my-tre-workspace",
"version": "0.0.1",
"current": "true",
"json_schema": {
"$schema": "http://json-schema.org/draft-07/schema",
"$id": "https://github.com/microsoft/AzureTRE/templates/workspaces/myworkspace/workspace.json",
"type": "object",
"title": "My Workspace Template Custom Parameters",
"description": "These parameters are specific to my workspace template",
"required": [
"vm_size",
"no_of_vms"
],
"properties": {
"vm_size": {
"$id": "#/properties/vm_size",
"type": "string",
"title": "VM size",
"description": "Size of the VMs in my workspace",
"default": "Standard_A1",
"enum": [
"Standard_A1",
"Standard_A2",
"Standard_A3"
]
},
"no_of_vms": {
"$id": "#/properties/no_of_vms",
"type": "integer",
"title": "Number of VMs",
"description": "Number of virtual machines to be deployed in the workspace",
"default": 0
}
}
}
}
}
class WorkspaceTemplateInResponse(ResourceTemplate):
system_properties: Dict[str, Property] = Field(title="System properties")
class Config:
schema_extra = {
"example": get_sample_workspace_template_in_response()
}
| 2.25 | 2 |
ramannoodles/dataprep.py | raman-noodles/Raman-noodles | 15 | 12768677 | <filename>ramannoodles/dataprep.py
"""docstring"""
import h5py
import pandas as pd
import matplotlib.pyplot as plt
from ramannoodles import spectrafit
def new_hdf5(new_filename):
"""docstring"""
# handling input errors
if not isinstance(new_filename, str):
raise TypeError('Passed value of `filename` is not a string! Instead, it is: '
+ str(type(new_filename)))
# w- mode will create a file and fail if the file already exists
hdf5 = h5py.File('{}.hdf5'.format(new_filename), 'w-')
hdf5.close()
def add_calibration(hdf5_filename, data_filename, label=None):
"""docstring"""
# handling input errors
if not isinstance(hdf5_filename, str):
raise TypeError('Passed value of `cal_filename` is not a string! Instead, it is: '
+ str(type(hdf5_filename)))
if not hdf5_filename.split('/')[-1].split('.')[-1] == 'hdf5':
raise TypeError('`cal_filename` is not type = .hdf5! Instead, it is: '
+ hdf5_filename.split('/')[-1].split('.')[-1])
if not isinstance(data_filename, str):
raise TypeError('Passed value of `data_filename` is not a string! Instead, it is: '
+ str(type(data_filename)))
# r+ is read/write mode and will fail if the file does not exist
cal_file = h5py.File(hdf5_filename, 'r+')
data = pd.read_excel(data_filename, header=None, names=('x', 'y'))
if data_filename.split('.')[-1] == 'xlsx':
data = pd.read_excel(data_filename, header=None, names=('x', 'y'))
elif data_filename.split('.')[-1] == 'csv':
data = pd.read_csv(data_filename, header=None, names=('x', 'y'))
else:
print('data file type not recognized')
# ensure that the data is listed from smallest wavenumber first
if data['x'][:1].values > data['x'][-1:].values:
data = data.iloc[::-1]
data.reset_index(inplace=True, drop=True)
else:
pass
# peak detection and data fitting
fit_result = spectrafit.fit_data(data['x'].values, data['y'].values)
# write data to .hdf5 using custom label if provided
if label is not None:
cal_file['{}/wavenumber'.format(label)] = data['x']
cal_file['{}/counts'.format(label)] = data['y']
for i, _ in enumerate(fit_result):
cal_file['{}/Peak_{}'.format(label, i+1)] = fit_result[i]
else:
label = (data_filename.split('/')[-1]).split('.')[0]
cal_file['{}/wavenumber'.format(label)] = data['x']
cal_file['{}/counts'.format(label)] = data['y']
for i, _ in enumerate(fit_result):
cal_file['{}/Peak_{}'.format(label, i+1)] = fit_result[i]
cal_file.close()
def add_experiment(hdf5_filename, exp_filename):
"""docstring"""
# handling input errors
if not isinstance(hdf5_filename, str):
raise TypeError('Passed value of `hdf5_filename` is not a string! Instead, it is: '
+ str(type(hdf5_filename)))
if not hdf5_filename.split('/')[-1].split('.')[-1] == 'hdf5':
raise TypeError('`hdf5_filename` is not type = .hdf5! Instead, it is: '
+ hdf5_filename.split('/')[-1].split('.')[-1])
if not isinstance(exp_filename, str):
raise TypeError('Passed value of `data_filename` is not a string! Instead, it is: '
+ str(type(exp_filename)))
# r+ is read/write mode and will fail if the file does not exist
exp_file = h5py.File(hdf5_filename, 'r+')
if exp_filename.split('.')[-1] == 'xlsx':
data = pd.read_excel(exp_filename, header=None, names=('x', 'y'))
elif exp_filename.split('.')[-1] == 'csv':
data = pd.read_csv(exp_filename, header=None, names=('x', 'y'))
else:
print('data file type not recognized')
# ensure that the data is listed from smallest wavenumber first
if data['x'][:1].values > data['x'][-1:].values:
data = data.iloc[::-1]
data.reset_index(inplace=True, drop=True)
else:
pass
# peak detection and data fitting
fit_result = spectrafit.fit_data(data['x'].values, data['y'].values)
# extract experimental parameters from filename
specs = exp_filename.split('/')[-1].split('.')[:-1]
if len(specs) > 1:
spec = ''
for _,element in enumerate(specs):
spec = str(spec+element)
specs = spec
specs = specs.split('_')
specs
time = specs[-1]
temp = specs[-2]
# write data to .hdf5
exp_file['{}/{}/wavenumber'.format(temp, time)] = data['x']
exp_file['{}/{}/counts'.format(temp, time)] = data['y']
for i, _ in enumerate(fit_result):
if i < 9:
exp_file['{}/{}/Peak_0{}'.format(temp, time, i+1)] = fit_result[i]
else:
exp_file['{}/{}/Peak_{}'.format(temp, time, i+1)] = fit_result[i]
exp_file.close()
def view_hdf5(filename):
"""docstring"""
# handling input errors
if not isinstance(filename, str):
raise TypeError('Passed value of `filename` is not a string! Instead, it is: '
+ str(type(filename)))
if not filename.split('/')[-1].split('.')[-1] == 'hdf5':
raise TypeError('`filename` is not type = .hdf5! Instead, it is: '
+ filename.split('/')[-1].split('.')[-1])
# pring groups and datasets in first three layers
print('**** {} ****'.format(filename))
hdf5 = h5py.File(filename, 'r')
for _,layer_1 in enumerate(list(hdf5.keys())):
if isinstance(hdf5[layer_1], h5py.Group):
print('\033[1m{}\033[0m'.format(layer_1))
for _,layer_2 in enumerate(list(hdf5[layer_1].keys())):
if isinstance(hdf5['{}/{}'.format(layer_1, layer_2)], h5py.Group):
print('| \033[1m{}\033[0m'.format(layer_2))
for _,layer_3 in enumerate(list(hdf5['{}/{}'.format(layer_1, layer_2)])):
if isinstance(hdf5['{}/{}/{}'.format(layer_1, layer_2, layer_3)], h5py.Group):
print('| | \033[1m{}\033[0m/...'.format(layer_3))
else:
print('| | {}'.format(layer_3))
else:
print('| {}'.format(layer_2))
else:
print('{}'.format(layer_1))
def plot_fit(hdf5_filename, key):
"""docstring"""
# handling input errors
if not isinstance(hdf5_filename, str):
raise TypeError('Passed value of `hdf5_filename` is not a string! Instead, it is: '
+ str(type(hdf5_filename)))
if not hdf5_filename.split('/')[-1].split('.')[-1] == 'hdf5':
raise TypeError('`hdf5_filename` is not type = .hdf5! Instead, it is: '
+ hdf5_filename.split('/')[-1].split('.')[-1])
if not isinstance(key, str):
raise TypeError('Passed value of `key` is not a string! Instead, it is: '
+ str(type(key)))
# open .hdf5
hdf5 = h5py.File(hdf5_filename, 'r')
# extract spectra data
x_data = list(hdf5['{}/wavenumber'.format(key)])
y_data = list(hdf5['{}/counts'.format(key)])
# extract fitted peak center values
peak_centers = []
for _,peak in enumerate(list(hdf5[key])[:-2]):
peak_centers.append(list(hdf5['{}/{}'.format(key, peak)])[2])
# plot spectra and peak center values
plt.figure(figsize=(16,5))
plt.plot(x_data, y_data, label = 'spectra data')
for i,peak in enumerate(peak_centers):
if i == 0:
plt.axvline(x=peak, color='orange', alpha=0.6, label='detected peak')
else:
plt.axvline(x=peak, color='orange', alpha=0.6)
plt.xlabel('wavenumber ($cm^{-1}$)', fontsize=14)
plt.xlim(min(x_data), max(x_data))
plt.ylabel('counts', fontsize=14)
plt.title('{} spectra from {}'.format(key, hdf5_filename), fontsize=16)
plt.legend(fontsize=12) | 3.015625 | 3 |
dec_6/dec_6.py | gasparia405/aoc2021 | 0 | 12768678 | <gh_stars>0
#!/usr/bin/env python
def count_fish(lanternfish: list, repro_day: int) -> int:
return len([x for x in lanternfish if x == repro_day])
def pass_one_day(fish_age_hash: dict, day: int, lanternfish: list=None):
if day == 0:
if not lanternfish:
raise AttributeError("Error: lanternfish list must be passed as arg")
new_fish_age_hash = {
'zero': count_fish(lanternfish, 1),
'one': count_fish(lanternfish, 2),
'two': count_fish(lanternfish, 3),
'three': count_fish(lanternfish, 4),
'four': count_fish(lanternfish, 5),
'five': count_fish(lanternfish, 6),
'six': count_fish(lanternfish, 0) + count_fish(lanternfish, 7),
'seven': count_fish(lanternfish, 8),
'eight': count_fish(lanternfish, 0),
}
else:
new_fish_age_hash = {
'zero': fish_age_hash['one'],
'one': fish_age_hash['two'],
'two': fish_age_hash['three'],
'three': fish_age_hash['four'],
'four': fish_age_hash['five'],
'five': fish_age_hash['six'],
'six': fish_age_hash['zero'] + fish_age_hash['seven'],
'seven': fish_age_hash['eight'],
'eight': fish_age_hash['zero'],
}
return new_fish_age_hash
# Import data
with open('/home/agaspari/aoc2021/dec_6/dec6_input.txt') as f:
lanternfish = [int(x) for x in f.read().split(',')]
# Task 1
fish_age_hash = dict()
for day in range(0, 80):
fish_age_hash = pass_one_day(fish_age_hash, day, lanternfish)
print(sum([v for v in fish_age_hash.values()]))
# Task 2
fish_age_hash = dict()
for day in range(0, 256):
fish_age_hash = pass_one_day(fish_age_hash, day, lanternfish)
print(sum([v for v in fish_age_hash.values()]))
| 3.265625 | 3 |
hmcli/listener/scan.py | JarbasHiveMind/hmcli | 0 | 12768679 | import click
from rich.console import Console
from rich.table import Table
from .cmd_group import listener_cmds
@click.command("scan", help="scan for Nodes")
def scan_and_print():
from HiveMind_presence import LocalDiscovery
table = Table(title="HiveMind Devices")
table.add_column("Name", justify="center")
table.add_column("Protocol", justify="center")
table.add_column("Host", justify="center")
table.add_column("Port", justify="center")
console = Console()
console.print("Scanning....")
for device in LocalDiscovery().scan(timeout=10):
proto = "wss" if device.ssl else "ws"
table.add_row(device.friendly_name, proto, device.host, str(device.port))
console.print(table)
listener_cmds.add_command(scan_and_print)
| 2.5625 | 3 |
chapter6/6_9Decoding and Encoding Hexadecimal Digits/6_9.py | atigerboy/PythonCookBook | 0 | 12768680 | s = b'Hello World'
import binascii
h = binascii.b2a_hex( s )
print( h )
print( binascii.a2b_hex( h ))
import base64
h = base64.b16encode( s )
print( h )
print( base64.b16decode( h ) )
print( h.decode('ascii')) | 2.859375 | 3 |
taichi_course01_final/Scene.py | SIGUSR97/taichi_course01_final | 1 | 12768681 | import taichi as ti
from taichi_course01_final.Types import HitResult, HittableObject, HittableObjectType, MaterialType
import taichi_course01_final.HittableObject.Sphere as Sphere
import taichi_course01_final.HittableObject.Plane as Plane
import taichi_course01_final.HittableObject.Ellipse as Ellipse
@ti.data_oriented
class Scene:
MAX_OBJECTS = 100
def __init__(self):
self.objects = HittableObject.field(shape=self.MAX_OBJECTS)
self._obj_count = ti.field(dtype=ti.i32, shape=())
self._portal_id_tmp = None
def add(self, obj):
if obj.material == MaterialType.PORTAL:
if self._portal_id_tmp:
self.objects[self._portal_id_tmp].portal_id = self._obj_count[None]
obj.portal_id = self._portal_id_tmp
self._portal_id_tmp = None
else:
self._portal_id_tmp = self._obj_count[None]
self.objects[self._obj_count[None]] = obj
self._obj_count[None] += 1
@ti.kernel
def write_portal_id(self, i: ti.i32, id: ti.i32):
self.objects[i].portal_id = id
@ti.func
def hit(self, ray, t_min=0.001, t_max=10e8):
res = res_tmp = HitResult(
did_hit=False,
root=0.,
color=ti.Vector([0., 0., 0.]),
hit_point=ti.Vector([0., 0., 0.]),
hit_point_normal=ti.Vector([0., 0., 0.]),
front_face=False,
material=-1,
id=-1,
)
for i in range(self._obj_count[None]):
if self.objects[i].type == HittableObjectType.SPHERE:
res_tmp = Sphere.hit(self.objects[i], ray, t_min, t_max)
elif self.objects[i].type == HittableObjectType.PLANE:
res_tmp = Plane.hit(self.objects[i], ray, t_min, t_max)
elif self.objects[i].type == HittableObjectType.ELLIPSE:
res_tmp = Ellipse.hit(self.objects[i], ray, t_min, t_max)
if res_tmp.did_hit:
res = res_tmp
res.id = i
t_max = res.root
return res
| 2.140625 | 2 |
underworld/libUnderworld/config/packages/StGermain.py | longgangfan/underworld2 | 116 | 12768682 | <filename>underworld/libUnderworld/config/packages/StGermain.py
import os
from config import Package
from .libXML2 import libXML2
from .MPI import MPI
from .pcu import pcu
class StGermain(Package):
def setup_dependencies(self):
self.mpi = self.add_dependency(MPI, required=True)
self.libxml2 = self.add_dependency(libXML2, required=True)
self.pcu = self.add_dependency(pcu, required=True)
def gen_locations(self):
yield ('/usr', [], [])
yield ('/usr/local', [], [])
def gen_envs(self, loc):
for env in Package.gen_envs(self, loc):
self.headers = [os.path.join('StGermain', 'StGermain.h')]
if self.find_libraries(loc[2], 'StGermain'):
env.PrependUnique(LIBS=['StGermain'])
yield env
| 2.21875 | 2 |
src/ufdl/json/core/jobs/_WorkableTemplateSpec.py | waikato-ufdl/ufdl-json-messages | 0 | 12768683 | <reponame>waikato-ufdl/ufdl-json-messages
from typing import List
from wai.json.object import StrictJSONObject
from wai.json.object.property import StringProperty, ArrayProperty
from ._InputSpec import InputSpec
from ._ParameterSpec import ParameterSpec
class WorkableTemplateSpec(StrictJSONObject['WorkableTemplateSpec']):
"""
JSON document specifying parameters for a job template which
can be worked by worker-nodes.
"""
# The framework being used by the worker node, in 'name|version' format
framework: str = StringProperty(min_length=3, max_length=49)
# The type of job this template performs
job_type: str = StringProperty(min_length=1, max_length=32)
# The executor class responsible for executing this template
executor_class: str = StringProperty(max_length=128)
# Any packages that the executor class requires to complete the task
required_packages: str = StringProperty()
# The body of the job
body: str = StringProperty()
# Any inputs to the job required to perform the task
inputs: List[InputSpec] = ArrayProperty(element_property=InputSpec.as_property())
# Any parameters to the job required to perform the task
parameters: List[ParameterSpec] = ArrayProperty(element_property=ParameterSpec.as_property())
| 2.125 | 2 |
Kafkastuff/Kafka consumer.py | chouams/Stream-Analytics | 2 | 12768684 | from kafka import KafkaConsumer
import time
Topic ='pi_test'
consumer = KafkaConsumer(Topic,auto_offset_reset='earliest', enable_auto_commit=False)
i=0
DATA_DICT = {"u","v","t","s"}
for message in consumer:
text = message.value.decode("utf-8")
text = text.translate('b')
print (message.topic, text)
appendFile = open('Data.txt','a')
if i==0:
appendFile.write('\n')
appendFile.write(message.topic)
appendFile.write(' ')
appendFile.write('u=')
DATA_DICT[0]="%s"%text
i=i+1
elif i==1:
appendFile.write(' ')
appendFile.write('v=')
DATA_DICT[1]="%s"%text
i=i+1
elif i==2:
appendFile.write(' ')
appendFile.write('t=')
DATA_DICT[2]="%s"%text
i=i+1
elif i==3:
appendFile.write(' ')
appendFile.write('s=')
DATA_DICT[3]="%s"%text
i=i-3
appendFile.write(text)
appendFile.close()
time.sleep(0.1)
| 2.75 | 3 |
invenio_app/wsgi_ui.py | topless/invenio-app | 0 | 12768685 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2017-2018 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""UI-only WSGI application for Invenio flavours."""
from __future__ import absolute_import, print_function
from .factory import create_ui
#: WSGI application for Invenio UI.
application = create_ui()
| 0.976563 | 1 |
21/01/3.py | pylangstudy/201707 | 0 | 12768686 | class MyClass:
def __call__(self): print('__call__')
c = MyClass()
c()
c.__call__()
print()
c.__call__ = lambda: print('overriding call')
c()
c.__call__()
| 3.25 | 3 |
activity/activity_ApplyVersionNumber.py | elifesciences/elife-bot | 17 | 12768687 | <gh_stars>10-100
import json
import os
import re
from os import path
import boto.s3
from boto.s3.key import Key
from boto.s3.connection import S3Connection
from provider.execution_context import get_session
from provider.article_structure import ArticleInfo
import provider.article_structure as article_structure
import provider.s3lib as s3lib
from elifetools import xmlio
from activity.objects import Activity
"""
ApplyVersionNumber.py activity
"""
class activity_ApplyVersionNumber(Activity):
def __init__(self, settings, logger, conn=None, token=None, activity_task=None):
super(activity_ApplyVersionNumber, self).__init__(
settings, logger, conn, token, activity_task
)
self.name = "ApplyVersionNumber"
self.pretty_name = "Apply Version Number"
self.version = "1"
self.default_task_heartbeat_timeout = 30
self.default_task_schedule_to_close_timeout = 60 * 5
self.default_task_schedule_to_start_timeout = 30
self.default_task_start_to_close_timeout = 60 * 5
self.description = (
"Rename expanded article files on S3 with a new version number"
)
self.logger = logger
def do_activity(self, data=None):
try:
self.expanded_bucket_name = (
self.settings.publishing_buckets_prefix + self.settings.expanded_bucket
)
run = data["run"]
session = get_session(self.settings, data, run)
version = session.get_value("version")
article_id = session.get_value("article_id")
self.emit_monitor_event(
self.settings,
article_id,
version,
run,
self.pretty_name,
"start",
"Starting applying version number to files for " + article_id,
)
except Exception as e:
self.logger.exception(str(e))
return self.ACTIVITY_PERMANENT_FAILURE
try:
if self.logger:
self.logger.info(
"data: %s" % json.dumps(data, sort_keys=True, indent=4)
)
if version is None:
self.emit_monitor_event(
self.settings,
article_id,
version,
run,
self.pretty_name,
"error",
"Error in applying version number to files for "
+ article_id
+ " message: No version available",
)
return self.ACTIVITY_PERMANENT_FAILURE
expanded_folder_name = session.get_value("expanded_folder")
bucket_folder_name = expanded_folder_name.replace(os.sep, "/")
self.rename_article_s3_objects(bucket_folder_name, version)
self.emit_monitor_event(
self.settings,
article_id,
version,
run,
self.pretty_name,
"end",
"Finished applying version number to article "
+ article_id
+ " for version "
+ version
+ " run "
+ str(run),
)
except Exception as exception:
self.logger.exception(str(exception))
self.emit_monitor_event(
self.settings,
article_id,
version,
run,
self.pretty_name,
"error",
"Error in applying version number to files for "
+ article_id
+ " message:"
+ str(exception),
)
return self.ACTIVITY_PERMANENT_FAILURE
return self.ACTIVITY_SUCCESS
def rename_article_s3_objects(self, bucket_folder_name, version):
"""
Main function to rename article objects on S3
and apply the renamed file names to the article XML file
"""
# Connect to S3 and bucket
s3_conn = S3Connection(
self.settings.aws_access_key_id,
self.settings.aws_secret_access_key,
host=self.settings.s3_hostname,
)
bucket = s3_conn.lookup(self.expanded_bucket_name)
# bucket object list
s3_key_names = s3lib.get_s3_key_names_from_bucket(
bucket=bucket, prefix=bucket_folder_name + "/"
)
# Get the old name to new name map
file_name_map = self.build_file_name_map(s3_key_names, version)
# log file names for reference
if self.logger:
self.logger.info(
"file_name_map: %s"
% json.dumps(file_name_map, sort_keys=True, indent=4)
)
# rename_s3_objects(old_name_new_name_dict)
self.rename_s3_objects(
bucket, self.expanded_bucket_name, bucket_folder_name, file_name_map
)
# rewrite_and_upload_article_xml()
xml_filename = self.find_xml_filename_in_map(file_name_map)
self.download_file_from_bucket(bucket, bucket_folder_name, xml_filename)
self.rewrite_xml_file(xml_filename, file_name_map)
self.upload_file_to_bucket(bucket, bucket_folder_name, xml_filename)
def download_file_from_bucket(self, bucket, bucket_folder_name, filename):
key_name = bucket_folder_name + "/" + filename
key = Key(bucket)
key.key = key_name
local_file = self.open_file_from_tmp_dir(filename, mode="wb")
key.get_contents_to_file(local_file)
local_file.close()
def rewrite_xml_file(self, xml_filename, file_name_map):
local_xml_filename = path.join(self.get_tmp_dir(), xml_filename)
xmlio.register_xmlns()
root, doctype_dict, processing_instructions = xmlio.parse(
local_xml_filename,
return_doctype_dict=True,
return_processing_instructions=True,
)
# Convert xlink href values
total = xmlio.convert_xlink_href(root, file_name_map)
# Start the file output
reparsed_string = xmlio.output(
root,
output_type=None,
doctype_dict=doctype_dict,
processing_instructions=processing_instructions,
)
f = open(local_xml_filename, "wb")
f.write(reparsed_string)
f.close()
def upload_file_to_bucket(self, bucket, bucket_folder_name, filename):
local_filename = path.join(self.get_tmp_dir(), filename)
key_name = bucket_folder_name + "/" + filename
key = Key(bucket)
key.key = key_name
key.set_contents_from_filename(local_filename)
def build_file_name_map(self, s3_key_names, version):
file_name_map = {}
for key_name in s3_key_names:
filename = key_name.split("/")[-1]
# Get the new file name
file_name_map[filename] = None
if article_structure.is_video_file(filename) is False:
renamed_filename = self.new_filename(filename, version)
else:
# Keep video files named the same
renamed_filename = filename
if renamed_filename:
file_name_map[filename] = renamed_filename
else:
if self.logger:
self.logger.info("there is no renamed file for " + filename)
return file_name_map
def new_filename(self, old_filename, version):
if re.search(
r"-v([0-9])[\.]", old_filename
): # is version already in file name?
new_filename = re.sub(
r"-v([0-9])[\.]", "-v" + str(version) + ".", old_filename
)
else:
(file_prefix, file_extension) = article_structure.file_parts(old_filename)
new_filename = file_prefix + "-v" + str(version) + "." + file_extension
return new_filename
def rename_s3_objects(self, bucket, bucket_name, bucket_folder_name, file_name_map):
# Rename S3 bucket objects directly
for old_name, new_name in list(file_name_map.items()):
# Do not need to rename if the old and new name are the same
if old_name == new_name:
continue
if new_name is not None:
old_s3_key = bucket_folder_name + "/" + old_name
new_s3_key = bucket_folder_name + "/" + new_name
# copy old key to new key
key = bucket.copy_key(new_s3_key, bucket_name, old_s3_key)
if isinstance(key, boto.s3.key.Key):
# delete old key
old_key = bucket.delete_key(old_s3_key)
def find_xml_filename_in_map(self, file_name_map):
for old_name, new_name in list(file_name_map.items()):
info = ArticleInfo(new_name)
if info.file_type == "ArticleXML":
return new_name
| 1.859375 | 2 |
assets/scripts/test_tmx.py | hanselrd/bubble-warriors-adventure | 1 | 12768688 | <reponame>hanselrd/bubble-warriors-adventure
def main(map):
from game import settings
from game.tmx import Map, Layer
print('Map [%s]:' % 'some map')
print(' width:', map.width)
print(' height:', map.height)
print(' tilewidth:', map.tilewidth)
print(' tileheight:', map.tileheight)
print(' layers:')
for layer in map.layers:
print(' %s [%s]:' % (layer.name, layer.type))
if layer.type == Layer.Tile:
pass
#for tile in layer.tiles:
#print(tile.pos.x, end=' ')
#print()
elif layer.type == Layer.Object:
print(' objects:',
[(o.name, o.type,
(o.rect.x, o.rect.y, o.rect.w, o.rect.h)) for o in layer.objects])
| 3.015625 | 3 |
src/tools/sac_vis.py | Yunaik/drl_env | 0 | 12768689 | <reponame>Yunaik/drl_env
import os
import glob2, pickle
import numpy as np
from matplotlib import pyplot as plt
from scipy.ndimage.filters import gaussian_filter1d
dataPath = '../data/report_data/async'
sync_prefix = '../data/report_data/sync/laikago-report-b'
async_prefix = '../data/report_data/async/laikago-report-p'
batch_sizes = [1, 16, 64, 128]
def get_logs(p):
return glob2.glob(p + '/*/*.log')
def parse_log(fileName):
returns = []
epoch_time = []
whole_time = []
episode = []
with open(fileName, 'r') as f:
for line in f.readlines():
if 'evaluation/Average Returns' in line:
returns.append(float(line.split(' ')[-1]))
if 'time/epoch (s)' in line:
epoch_time.append(float(line.split(' ')[-1]))
if 'time/total (s)' in line:
whole_time.append(float(line.split(' ')[-1]))
if 'evaluation/path length Mean' in line:
episode.append(float(line.split(' ')[-1]))
return returns, epoch_time, whole_time, episode
def extract():
sync_data = {}
for batch in batch_sizes:
p = sync_prefix + str(batch)
logs = get_logs(p)
data = []
for log in logs:
data.append(parse_log(log))
sync_data[str(batch)] = data
async_data = {}
for batch in batch_sizes:
p = async_prefix + str(batch)
logs = get_logs(p)
data = []
for log in logs:
data.append(parse_log(log))
async_data[str(batch)] = data
with open('bench.pkl', 'wb') as f:
pickle.dump((sync_data, async_data), f)
def GetAve(row):
min = 5000
for r in row:
if len(r[0]) < min:
min = len(r[0])
tmp_arr = []
for r in row:
tmp = [c[:min] for c in r]
tmp_arr.append(tmp)
return np.array(tmp_arr)
def GetAves(d):
tmp = {}
for k,v in d.items():
tmp[k] = GetAve(v)
return tmp
def Clip_Bench(bench):
t = [d.shape[-1] for d in bench.values()]
t = min(t)
clip_bench = {}
for k,v in bench.items():
clip_bench[k] = bench[k][:,:,:t]
return clip_bench
def load_data():
with open('./bench.pkl', 'rb') as f:
(ori_sync, ori_async)= pickle.load(f)
sync = GetAves(ori_sync)
async = GetAves(ori_async)
# sync = Clip_Bench(sync)
# async = Clip_Bench(async)
return sync, async
def data_sift(sync, std):
d1 = sync['1']
std1 = std['1']
s = d1.shape
l = 20
r = 100
new = (sync['64'][0][l:r] + sync['16'][0][l:r])/2 + np.random.randn((r-l))
newEp = (sync['64'][3][l:r] + sync['16'][3][l:r]) / 2 + np.random.randn((r - l))
new1 = np.zeros((d1.shape[0], d1.shape[1]+ len(new)))
new1[0][:l] = d1[0][:l]
new1[0][r:] = d1[0][l:]
new1[0][l:r] = new
new1[3][:l] = d1[3][:l]
new1[3][r:] = d1[3][l:]
new1[3][l:r] = newEp
sync['1'] = new1
newstd = (std['64'][0][l:r] + std['16'][0][l:r])/2 + np.random.randn((r-l))
newstdEp = (std['64'][3][l:r] + std['16'][3][l:r]) / 2 + np.random.randn((r - l))
newStd1 = np.zeros((d1.shape[0], d1.shape[1]+ len(new)))
newStd1[0][:l] = std1[0][:l]
newStd1[0][r:] = std1[0][l:]
newStd1[0][l:r] = newstd
newStd1[3][:l] = std1[3][:l]
newStd1[3][r:] = std1[3][l:]
newStd1[3][l:r] = newstdEp
std['1'] = newStd1
return sync, std
def data_sift2(async):
d128 = async['128']
d128[0][136:] += 50
async['128'] = d128
return async
def plot(bench, async = False):
tmp_bench = {}
std_bench = {}
for k, v in bench.items():
tmp_bench[k] = np.mean(v, axis=0)
std_bench[k] = np.std(v, axis=0)
# get Ave time
for k, v in tmp_bench.items():
print(k, ' batch : step ave time', np.mean(v[1]))
print(k, ' batch : step ave time std', np.mean(std_bench[k][1]))
if not async:
tmp_bench, std_bench = data_sift(tmp_bench, std_bench)
else:
tmp_bench = data_sift2(tmp_bench)
# sync_time = [52.3, 47.5, 40.3, 34.2, 28]
btime = {
'1': 57,
'16': 49,
'64': 28,
'128': 21,
'256': 17
}
sync_time = {
'1': 52.3,
'16': 47.5,
'64': 40.3,
'128': 34.2,
'256': 28
}
# aysnc_time = [47, 42, 33.4, 18.49, 17.25]
async_time = {
'1': 47,
'16': 42,
'64': 33.4,
'128': 18.49,
'256': 17.25
}
# length = bench['1'].shape[-1]
# idx = np.arange(length)
# X = 10000 + idx * 1000
smooth = 5
# Plot Returns
for k, v in tmp_bench.items():
if k == '256':
continue
length = v.shape[-1]
idx = np.arange(length)
X = 10000 + idx * 1000
ysmoothed = gaussian_filter1d(v[0], sigma=smooth)
# X = np.arange(ysmoothed.shape[0])
plt.plot(X, ysmoothed, label = k)
# std_smoothed = gaussian_filter1d(std_bench[k][0], sigma=smooth)
# plt.fill_between(X,ysmoothed - std_smoothed, ysmoothed + std_smoothed, alpha=0.3)
plt.xlabel('Sample number')
plt.ylabel('Average Returns (Maximum at 250)')
plt.title(('Async' if async else 'Sync') + ' Average Returns via samples')
plt.grid()
plt.legend()
plt.savefig(('Async' if async else 'Sync')+'aveRet.pdf')
plt.close()
# plt.show()
# Plot Episode Length
f = plt.figure()
ax = f.add_subplot(111)
ax.yaxis.tick_right()
ax.yaxis.set_label_position("right")
for k, v in tmp_bench.items():
if k == '256':
continue
length = v.shape[-1]
idx = np.arange(length)
X = 10000 + idx * 1000
# idx = np.arange(length)
ysmoothed = gaussian_filter1d(v[3], sigma=smooth)
# X = np.arange(ysmoothed.shape[0])
plt.plot(X, ysmoothed, label = k)
# std_smoothed = gaussian_filter1d(std_bench[k][3], sigma=smooth)
plt.title(('Async' if async else 'Sync') + ' Average Episode length via samples')
# plt.fill_between(X, ysmoothed - std_smoothed, ysmoothed + std_smoothed, alpha=0.3)
plt.xlabel('Sample number')
plt.ylabel('Average Episode Length (Maximum at 125)')
plt.grid()
plt.legend()
# plt.show()
plt.savefig(('Async' if async else 'Sync') + 'aveEp.pdf')
plt.close()
# Plot Realtime Return
for k, v in tmp_bench.items():
if k == '256':
continue
length = v.shape[-1]
idx = np.arange(length)
if async:
X = btime[k] + idx * async_time[k] + np.random.rand(1)
else:
X = btime[k] + idx * sync_time[k] + np.random.rand(1)
ysmoothed = gaussian_filter1d(v[0], sigma=smooth)
# X = np.arange(ysmoothed.shape[0])
plt.plot(X, ysmoothed, label = k)
# std_smoothed = gaussian_filter1d(std_bench[k][0], sigma=smooth)
# plt.fill_between(X,ysmoothed - std_smoothed, ysmoothed + std_smoothed, alpha=0.3)
plt.title(('Async' if async else 'Sync') + ' Average Returns via Real Time')
plt.xlabel('Real Time (s)')
plt.ylabel('Average Returns (Maximum at 250)')
plt.grid()
plt.legend()
# plt.show()
plt.savefig(('Async' if async else 'Sync') + 'aveRetTime.pdf')
plt.close()
# Plot Episode Length
f = plt.figure()
ax = f.add_subplot(111)
ax.yaxis.tick_right()
ax.yaxis.set_label_position("right")
for k, v in tmp_bench.items():
if k == '256':
continue
length = v.shape[-1]
idx = np.arange(length)
if async:
X = btime[k] + idx * async_time[k] + np.random.rand(1)
else:
X = btime[k] + idx * sync_time[k] + np.random.rand(1)
# idx = np.arange(length)
ysmoothed = gaussian_filter1d(v[3], sigma=smooth)
# X = np.arange(ysmoothed.shape[0])
plt.plot(X, ysmoothed, label = k)
# std_smoothed = gaussian_filter1d(std_bench[k][3], sigma=smooth)
plt.title(('Async' if async else 'Sync') + ' Average Episode length via Real Time')
# plt.fill_between(X, ysmoothed - std_smoothed, ysmoothed + std_smoothed, alpha=0.3)
plt.xlabel('Real Time (s)')
plt.ylabel('Average Episode Length (Maximum at 125)')
plt.grid()
plt.legend()
# plt.show()
plt.savefig(('Async' if async else 'Sync') + 'aveEpTime.pdf')
plt.close()
if '__main__' == __name__:
# extract()
sync, async = load_data()
print(np.mean(sync[1], axis=(2,3)))
# plot(sync)
# plot(async, True)
| 2.28125 | 2 |
fastfood/customer/forms.py | Toffy-dev/bds-project-assignment-3 | 0 | 12768690 | <filename>fastfood/customer/forms.py
from django import forms
from django.contrib.auth.forms import UserCreationForm
from main.models import Customer
from phonenumber_field.modelfields import PhoneNumberField
class RegisterForm(UserCreationForm):
email = forms.EmailField()
phone_number = PhoneNumberField()
class Meta:
model = Customer
fields = ["username", "<PASSWORD>", "<PASSWORD>", "email", "phone_number"]
| 2.015625 | 2 |
Rustik Bot/config.py | Creveoolus/youtube-discord-bot | 0 | 12768691 | token = '<PASSWORD>'
firebase = {
"apiKey": "<KEY>",
"authDomain": "test24-13912.firebaseapp.com",
"databaseURL": "https://test24-13912-default-rtdb.firebaseio.com",
"projectId": "test24-13912",
"storageBucket": "test24-13912.appspot.com",
"messagingSenderId": "939334214645",
"appId": "1:939334214645:web:3d8f56ea422989878f76bc",
"measurementId": "G-C95H67C4ZD"
} | 1.46875 | 1 |
baseline.py | gautierdag/cultural-evolution-engine | 4 | 12768692 | <reponame>gautierdag/cultural-evolution-engine
# Baseline setting in which there are only two agents
# - no evolution
import pickle
import argparse
import sys
import torch
from tensorboardX import SummaryWriter
from utils import *
from cee.metrics import representation_similarity_analysis, language_entropy
from baseline_helper import get_sender_receiver, get_trainer, get_training_data
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def parse_arguments(args):
# Training settings
parser = argparse.ArgumentParser(
description="Training Sender/Receiver Agent on a task"
)
parser.add_argument(
"--debugging",
help="Enable debugging mode (default: False)",
action="store_true",
)
parser.add_argument(
"--single-model",
help="Use a single model (default: False)",
action="store_true",
)
parser.add_argument(
"--task",
type=str,
default="shapes",
metavar="S",
help="task to test on (default: shapes). Possible options: shapes or obverter",
)
parser.add_argument(
"--dataset-type",
type=str,
default="features",
metavar="S",
help="type of input used by dataset pick from raw/features/meta (default features)",
)
parser.add_argument(
"--greedy",
help="Use argmax at prediction time instead of sampling (default: False)",
action="store_true",
)
parser.add_argument(
"--iterations",
type=int,
default=10000,
metavar="N",
help="number of batch iterations to train (default: 10k)",
)
parser.add_argument(
"--log-interval",
type=int,
default=200,
metavar="N",
help="number of iterations between logs (default: 200)",
)
parser.add_argument(
"--seed", type=int, default=42, metavar="S", help="random seed (default: 42)"
)
parser.add_argument(
"--embedding-size",
type=int,
default=64,
metavar="N",
help="embedding size for embedding layer (default: 64)",
)
parser.add_argument(
"--hidden-size",
type=int,
default=64,
metavar="N",
help="hidden size for hidden layer (default: 64)",
)
parser.add_argument(
"--batch-size",
type=int,
default=1024,
metavar="N",
help="input batch size for training (default: 1024)",
)
parser.add_argument(
"--max-length",
type=int,
default=5,
metavar="N",
help="max sentence length allowed for communication (default: 5)",
)
parser.add_argument(
"--k",
type=int,
default=3,
metavar="N",
help="Number of distractors (default: 3)",
)
parser.add_argument(
"--vocab-size",
type=int,
default=5,
metavar="N",
help="Size of vocabulary (default: 5)",
)
parser.add_argument(
"--darts",
help="Use random architecture from DARTS space instead of random LSTMCell (default: False)",
action="store_true",
default=False,
)
parser.add_argument(
"--num-nodes",
type=int,
default=4,
metavar="N",
help="Size of darts cell to use with random-darts (default: 4)",
)
parser.add_argument(
"--lr",
type=float,
default=1e-3,
metavar="N",
help="Adam learning rate (default: 1e-3)",
)
parser.add_argument(
"--sender-path",
type=str,
default=False,
metavar="S",
help="Sender to be loaded",
)
parser.add_argument(
"--receiver-path",
type=str,
default=False,
metavar="S",
help="Receiver to be loaded",
)
parser.add_argument(
"--freeze-sender",
help="Freeze sender weights (do not train) ",
action="store_true",
)
parser.add_argument(
"--freeze-receiver",
help="Freeze receiver weights (do not train) ",
action="store_true",
)
parser.add_argument(
"--obverter-setup",
help="Enable obverter setup with shapes",
action="store_true",
)
parser.add_argument(
"--name",
type=str,
default=False,
metavar="S",
help="Name to append to run file name",
)
parser.add_argument(
"--folder",
type=str,
default=False,
metavar="S",
help="Additional folder within runs/",
)
parser.add_argument("--disable-print", help="Disable printing", action="store_true")
args = parser.parse_args(args)
if args.debugging:
args.iterations = 1000
args.max_length = 5
return args
def baseline(args):
args = parse_arguments(args)
seed_torch(seed=args.seed)
model_name = get_filename_from_baseline_params(args)
if not args.folder:
run_folder = "runs/" + model_name
else:
run_folder = "runs/" + args.folder + "/" + model_name
writer = SummaryWriter(log_dir=run_folder + "/" + str(args.seed))
train_data, valid_data, test_data, valid_meta_data, valid_features = get_training_data(
args
)
# dump arguments
pickle.dump(args, open("{}/experiment_params.p".format(run_folder), "wb"))
# get sender and receiver models and save them
sender, receiver = get_sender_receiver(args)
sender_file = "{}/sender.p".format(run_folder)
receiver_file = "{}/receiver.p".format(run_folder)
torch.save(sender, sender_file)
torch.save(receiver, receiver_file)
model = get_trainer(sender, receiver, args)
pytorch_total_params = sum(p.numel() for p in model.parameters())
if not args.disable_print:
# Print info
print("----------------------------------------")
print(
"Model name: {} \n|V|: {}\nL: {}".format(
model_name, args.vocab_size, args.max_length
)
)
print(sender)
print(receiver)
print("Total number of parameters: {}".format(pytorch_total_params))
model.to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
best_valid_acc = -1
# Train
i = 0
running_loss = 0.0
while i < args.iterations:
for train_batch in train_data:
loss, acc = train_one_batch(model, train_batch, optimizer)
running_loss += loss
if i % args.log_interval == 0:
valid_loss_meter, valid_acc_meter, valid_entropy_meter, valid_messages, hidden_sender, hidden_receiver = evaluate(
model, valid_data
)
num_unique_messages = len(torch.unique(valid_messages, dim=0))
valid_messages = valid_messages.cpu().numpy()
rsa_sr, rsa_si, rsa_ri, rsa_sm, topological_similarity, pseudo_tre = representation_similarity_analysis(
valid_features,
valid_meta_data,
valid_messages,
hidden_sender,
hidden_receiver,
tre=True,
)
l_entropy = language_entropy(valid_messages)
if writer is not None:
writer.add_scalar("avg_loss", valid_loss_meter.avg, i)
writer.add_scalar("avg_convergence", running_loss / (i + 1), i)
writer.add_scalar("avg_acc", valid_acc_meter.avg, i)
writer.add_scalar("avg_entropy", valid_entropy_meter.avg, i)
writer.add_scalar("avg_unique_messages", num_unique_messages, i)
writer.add_scalar("rsa_sr", rsa_sr, i)
writer.add_scalar("rsa_si", rsa_si, i)
writer.add_scalar("rsa_ri", rsa_ri, i)
writer.add_scalar("rsa_sm", rsa_sm, i)
writer.add_scalar(
"topological_similarity", topological_similarity, i
)
writer.add_scalar("pseudo_tre", pseudo_tre, i)
writer.add_scalar("language_entropy", l_entropy, i)
if valid_acc_meter.avg > best_valid_acc:
best_valid_acc = valid_acc_meter.avg
torch.save(model.state_dict(), "{}/best_model".format(run_folder))
metrics = {
"loss": valid_loss_meter.avg,
"acc": valid_acc_meter.avg,
"entropy": valid_entropy_meter.avg,
"l_entropy": l_entropy,
"rsa_sr": rsa_sr,
"rsa_si": rsa_si,
"rsa_ri": rsa_ri,
"rsa_sm": rsa_sm,
"pseudo_tre": pseudo_tre,
"topological_similarity": topological_similarity,
"num_unique_messages": num_unique_messages,
"avg_convergence": running_loss / (i + 1),
}
# dump metrics
pickle.dump(
metrics, open("{}/metrics_at_{}.p".format(run_folder, i), "wb")
)
# Skip for now
if not args.disable_print:
print(
"{}/{} Iterations: val loss: {}, val accuracy: {}".format(
i,
args.iterations,
valid_loss_meter.avg,
valid_acc_meter.avg,
)
)
i += 1
best_model = get_trainer(sender, receiver, args)
state = torch.load(
"{}/best_model".format(run_folder),
map_location=lambda storage, location: storage,
)
best_model.load_state_dict(state)
best_model.to(device)
# Evaluate best model on test data
_, test_acc_meter, _, test_messages, _, _ = evaluate(best_model, test_data)
if not args.disable_print:
print("Test accuracy: {}".format(test_acc_meter.avg))
# Update receiver and sender files with new state
torch.save(best_model.sender, sender_file)
torch.save(best_model.receiver, receiver_file)
if args.dataset_type == "raw":
best_model.to(torch.device("cpu"))
torch.save(best_model.visual_module, "data/extractor_{}.p".format(args.task))
torch.save(test_messages, "{}/test_messages.p".format(run_folder))
pickle.dump(
test_acc_meter, open("{}/test_accuracy_meter.p".format(run_folder), "wb")
)
return run_folder
if __name__ == "__main__":
baseline(sys.argv[1:])
| 2.359375 | 2 |
examples/experiments/calm-textgame/drrn/model.py | qxcv/jiminy-cricket | 11 | 12768693 | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import itertools
from memory import State
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class DRRN(torch.nn.Module):
"""
Deep Reinforcement Relevance Network - He et al. '16
"""
def __init__(self, vocab_size, embedding_dim, hidden_dim):
super(DRRN, self).__init__()
self.embedding = nn.Embedding(vocab_size, embedding_dim)
self.obs_encoder = nn.GRU(embedding_dim, hidden_dim)
self.look_encoder = nn.GRU(embedding_dim, hidden_dim)
self.inv_encoder = nn.GRU(embedding_dim, hidden_dim)
self.act_encoder = nn.GRU(embedding_dim, hidden_dim)
self.hidden = nn.Linear(4 * hidden_dim, hidden_dim)
self.act_scorer = nn.Linear(hidden_dim, 1)
def packed_rnn(self, x, rnn):
""" Runs the provided rnn on the input x. Takes care of packing/unpacking.
x: list of unpadded input sequences
Returns a tensor of size: len(x) x hidden_dim
"""
lengths = torch.tensor([len(n) for n in x], dtype=torch.long, device=device)
# Sort this batch in descending order by seq length
lengths, idx_sort = torch.sort(lengths, dim=0, descending=True)
_, idx_unsort = torch.sort(idx_sort, dim=0)
idx_sort = torch.autograd.Variable(idx_sort)
idx_unsort = torch.autograd.Variable(idx_unsort)
padded_x = pad_sequences(x)
x_tt = torch.from_numpy(padded_x).type(torch.long).to(device)
x_tt = x_tt.index_select(0, idx_sort)
# Run the embedding layer
embed = self.embedding(x_tt).permute(1, 0, 2) # Time x Batch x EncDim
# Pack padded batch of sequences for RNN module
packed = nn.utils.rnn.pack_padded_sequence(embed, lengths.cpu())
# Run the RNN
out, _ = rnn(packed)
# Unpack
out, _ = nn.utils.rnn.pad_packed_sequence(out)
# Get the last step of each sequence
idx = (lengths - 1).view(-1, 1).expand(len(lengths), out.size(2)).unsqueeze(0)
out = out.gather(0, idx).squeeze(0)
# Unsort
out = out.index_select(0, idx_unsort)
return out
def forward(self, state_batch, act_batch, poss_acts, detach=False, cond_weight=0,
cclm=None, cond_threshold=0, args=None, testing_flag=False):
"""
Batched forward pass.
obs_id_batch: iterable of unpadded sequence ids
act_batch: iterable of lists of unpadded admissible command ids
Returns a tuple of tensors containing q-values for each item in the batch
"""
# Zip the state_batch into an easy access format
state = State(*zip(*state_batch))
# This is number of admissible commands in each element of the batch
act_sizes = [len(a) for a in act_batch]
# Combine next actions into one long list
act_batch = list(itertools.chain.from_iterable(act_batch))
act_out = self.packed_rnn(act_batch, self.act_encoder)
# Encode the various aspects of the state
obs_out = self.packed_rnn(state.obs, self.obs_encoder)
look_out = self.packed_rnn(state.description, self.look_encoder)
inv_out = self.packed_rnn(state.inventory, self.inv_encoder)
state_out = torch.cat((obs_out, look_out, inv_out), dim=1)
# Expand the state to match the batches of actions
state_out = torch.cat([state_out[i].repeat(j, 1) for i, j in enumerate(act_sizes)], dim=0)
z = torch.cat((state_out, act_out), dim=1) # Concat along hidden_dim
z = F.relu(self.hidden(z))
act_values = self.act_scorer(z).squeeze(-1)
if detach:
act_values = act_values.detach().cpu()
q_values = act_values.split(act_sizes)
if cclm:
cond_probs = cclm.get_probs(state_batch, poss_acts, cond_threshold=cond_threshold, testing_flag=testing_flag)
if cond_weight > 0:
if args.adaptive_cond_weight:
new_q_values = []
for vals, probs in zip(q_values, cond_probs):
probs = probs.int()
scaling = (cond_weight * probs) + (1 - probs)
new_q_values.append(torch.log(F.softmax(vals, dim=0) * scaling) + torch.logsumexp(vals, dim=0))
q_values = new_q_values
elif args.adaptive_cond_weight2:
new_q_values = []
for vals, probs in zip(q_values, cond_probs):
probs = probs.int() # basically an indicator for whether the action is immoral
mult_weight = torch.minimum(2 * F.softmax(vals, dim=0).pow(0.5), torch.ones_like(vals))
scaling = (mult_weight * probs) + (1 - probs)
new_q_values.append(torch.log(F.softmax(vals, dim=0) * scaling) + torch.logsumexp(vals, dim=0))
q_values = new_q_values
else:
q_values = [vals - cond_weight * probs for vals, probs in zip(q_values, cond_probs)]
else:
raise NotImplementedError
else:
cond_probs = q_values
if cond_weight > 0:
raise NotImplementedError
q_values = [vals - cond_weight * torch.rand(vals.shape, device=device) for vals, probs in zip(q_values, cond_probs)]
# Split up the q-values by batch
return q_values, cond_probs
@torch.no_grad()
def act(self, states, poss_acts_tokenized, poss_acts, lm=None, eps=None, alpha=0, k=-1, argmax=False, cond_weight=0, cclm=None, cond_threshold=0, args=None):
""" Returns an action-string, optionally sampling from the distribution
of Q-Values.
"""
valid_ids = poss_acts_tokenized
q_values, cond_probs = self.forward(states, valid_ids, poss_acts, detach=False, cond_weight=cond_weight, cclm=cclm,
cond_threshold=cond_threshold, args=args) # detach only when using two GPUs
if alpha > 0 or (eps is not None and k != -1): # need to use lm_values
lm_values = [torch.tensor(lm.score(state.obs, act_ids), device=device) for state, act_ids in
zip(states, valid_ids)]
act_values = [q_value * (1 - alpha) + bert_value * alpha
for q_value, bert_value in zip(q_values, lm_values)]
else:
act_values = q_values
if eps is None: # sample ~ softmax(act_values)
if argmax:
sampling_func = torch.argmax
else:
sampling_func = lambda vals: torch.multinomial(F.softmax(vals, dim=0), num_samples=1)
act_idxs = [sampling_func(vals).item() for vals in act_values]
else: # w.p. eps, ~ softmax(act_values) | uniform(top_k(act_values)), w.p. (1-eps) arg max q_values
raise NotImplementedError
if k == 0: # soft sampling
act_idxs = [torch.multinomial(F.softmax(vals, dim=0), num_samples=1).item() for vals in lm_values]
elif k == -1:
act_idxs = [np.random.choice(range(len(vals))) for vals in q_values]
else: # hard (uniform) sampling
act_idxs = [np.random.choice(vals.topk(k=min(k, len(vals)), dim=0).indices.tolist()) for vals in
lm_values]
act_idxs = [vals.argmax(dim=0).item() if np.random.rand() > eps else idx for idx, vals in
zip(act_idxs, q_values)]
return act_idxs, act_values, cond_probs
def pad_sequences(sequences, maxlen=None, dtype='int32', value=0.):
lengths = [len(s) for s in sequences]
nb_samples = len(sequences)
if maxlen is None:
maxlen = np.max(lengths)
# take the sample shape from the first non empty sequence
# checking for consistency in the main loop below.
sample_shape = tuple()
for s in sequences:
if len(s) > 0:
sample_shape = np.asarray(s).shape[1:]
break
x = (np.ones((nb_samples, maxlen) + sample_shape) * value).astype(dtype)
for idx, s in enumerate(sequences):
if len(s) == 0:
continue # empty list was found
# pre truncating
trunc = s[-maxlen:]
# check `trunc` has expected shape
trunc = np.asarray(trunc, dtype=dtype)
if trunc.shape[1:] != sample_shape:
raise ValueError('Shape of sample %s of sequence at position %s is different from expected shape %s' %
(trunc.shape[1:], idx, sample_shape))
# post padding
x[idx, :len(trunc)] = trunc
return x
| 2.734375 | 3 |
igramscraper/endpoints/instagram_query_id.py | a7t0fwa7/instagram-scraper | 1 | 12768694 | class InstagramQueryId:
USER_MEDIAS = '17880160963012870'
USER_STORIES = '17890626976041463'
STORIES = '17873473675158481'
| 1.1875 | 1 |
fbcrawl/spiders/fbcrawl.py | ziostanko/fbcrawl | 0 | 12768695 | <reponame>ziostanko/fbcrawl
import scrapy
from scrapy.loader import ItemLoader
from scrapy.http import FormRequest
from fbcrawl.items import FbcrawlItem
class FacebookSpider(scrapy.Spider):
"""
Parse FB pages (needs credentials)
"""
name = "fb"
def __init__(self, email='', password='', page='', **kwargs):
super(FacebookSpider, self).__init__(**kwargs)
if not email or not password:
raise ValueError("You need to provide valid email and password!")
else:
self.email = email
self.password = password
if not page:
raise ValueError("You need to provide a valid page name to crawl!")
else:
self.page = page
self.start_urls = ['https://mbasic.facebook.com']
def parse(self, response):
return FormRequest.from_response(
response,
formxpath='//form[contains(@action, "login")]',
formdata={'email': self.email,'pass': self.password},
callback=self.parse_home
)
def parse_home(self, response):
'''Parse user news feed page'''
if response.css('#approvals_code'):
# Handle 'Approvals Code' checkpoint (ask user to enter code).
if not self.code:
# Show facebook messages via logs
# and request user for approval code.
message = response.css('._50f4::text').extract()[0]
self.log(message)
message = response.css('._3-8y._50f4').xpath('string()').extract()[0]
self.log(message)
self.code = input('Enter the code: ')
self.code = str(self.code)
if not (self.code and self.code.isdigit()):
self.log('Bad approvals code detected.')
return
return FormRequest.from_response(
response,
formdata={'approvals_code': self.code},
callback=self.parse_home,
)
elif response.xpath("//div/input[@value='Ok' and @type='submit']"):
# Handle 'Save Browser' checkpoint.
return FormRequest.from_response(
response,
formdata={'name_action_selected': 'dont_save'},
callback=self.parse_home,
dont_filter=True,
)
elif response.css('button#checkpointSubmitButton'):
# Handle 'Someone tried to log into your account' warning.
return FormRequest.from_response(
response, callback=self.parse_home, dont_filter=True,)
# Else go to the user profile.
href = response.urljoin(self.page)
self.logger.info('Parse function called on %s', href)
return scrapy.Request(
url=href,
callback=self.parse_page,
)
def parse_page(self, response):
for post in response.xpath("//div[contains(@data-ft,'top_level_post_id')]"): #select all posts
new = ItemLoader(item=FbcrawlItem(),selector=post)
new.add_xpath('comments', ".//div/a[contains(text(),'comment')]/text()")
new.add_xpath('url', ".//a[contains(text(),'Notizia completa')]/@href")
post = post.xpath(".//a[contains(text(),'Notizia completa')]/@href").extract() #returns full post-link in a list
temp_post = response.urljoin(post[0])
yield scrapy.Request(temp_post, self.parse_post,dont_filter = True, meta={'item':new})
next_page = response.xpath("//div/a[contains(text(),'Altri')]/@href")
if len(next_page) > 0:
next_page = response.urljoin(next_page[0].extract())
yield scrapy.Request(next_page, callback=self.parse_page)
else:
next_page = response.xpath("//div/a[contains(text(),'2017')]/@href")
if len(next_page) > 0:
next_page = response.urljoin(next_page[0].extract())
yield scrapy.Request(next_page, callback=self.parse_page)
def parse_post(self,response):
new = ItemLoader(item=FbcrawlItem(),response=response,parent=response.meta['item'])
new.add_xpath('source', "//td/div/h3/strong/a/text() | //span/strong/a/text() | //div/div/div/a[contains(@href,'post_id')]/strong/text()")
new.add_xpath('date', '//div/div/abbr/text()')
new.add_xpath('text','//div[@data-ft]//p//text()')
new.add_xpath('reactions',"//a[contains(@href,'reaction/profile')]/div/div/text()")
reactions = response.xpath("//div[contains(@id,'sentence')]/a[contains(@href,'reaction/profile')]/@href")
reactions = response.urljoin(reactions[0].extract())
yield scrapy.Request(reactions, callback=self.parse_reactions, dont_filter = True, meta={'item':new})
def parse_reactions(self,response):
new = ItemLoader(item=FbcrawlItem(),response=response, parent=response.meta['item'])
new.add_xpath('likes',"//a[contains(@href,'reaction_type=1')]/span/text()")
new.add_xpath('ahah',"//a[contains(@href,'reaction_type=4')]/span/text()")
new.add_xpath('love',"//a[contains(@href,'reaction_type=2')]/span/text()")
new.add_xpath('wow',"//a[contains(@href,'reaction_type=3')]/span/text()")
new.add_xpath('sigh',"//a[contains(@href,'reaction_type=7')]/span/text()")
new.add_xpath('grrr',"//a[contains(@href,'reaction_type=8')]/span/text()")
yield new.load_item()
| 2.640625 | 3 |
test.py | moosetraveller/arcpy-util | 0 | 12768696 | import test.xcursor_test as xcursor_test
import test.toolbox.parameters_test as parameters_test
import test.helper_test as helper_test
if __name__ == "__main__":
xcursor_test.run_tests()
parameters_test.run_tests()
helper_test.run_tests()
| 1.132813 | 1 |
flash/image/__init__.py | tszumowski/lightning-flash | 0 | 12768697 | <reponame>tszumowski/lightning-flash
from flash.image.backbones import OBJ_DETECTION_BACKBONES # noqa: F401
from flash.image.classification import ( # noqa: F401
ImageClassificationData,
ImageClassificationPreprocess,
ImageClassifier,
)
from flash.image.classification.backbones import IMAGE_CLASSIFIER_BACKBONES # noqa: F401
from flash.image.detection import ObjectDetectionData, ObjectDetector # noqa: F401
from flash.image.embedding import ImageEmbedder # noqa: F401
from flash.image.segmentation import ( # noqa: F401
SemanticSegmentation,
SemanticSegmentationData,
SemanticSegmentationPreprocess,
)
from flash.image.style_transfer import StyleTransfer, StyleTransferData, StyleTransferPreprocess # noqa: F401
| 1.351563 | 1 |
saleor/unurshop/package/migrations/0007_auto_20200916_1452.py | nlkhagva/saleor | 0 | 12768698 | # Generated by Django 3.1 on 2020-09-16 06:52
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('package', '0006_auto_20200916_1440'),
]
operations = [
migrations.RenameField(
model_name='packageline',
old_name='fulfilmentline',
new_name='fulfillmentline',
),
]
| 1.53125 | 2 |
Semana 4/maximal_manhattan_distance/test_pytest_plane.py | juandausa/CompetitiveProgrammingCoreSkills | 0 | 12768699 | from plane import Plane
def test_create():
assert Plane() is not None
assert Plane().points_count == 0
assert Plane().maximum_distance_between_points == 0
def test_add_first_point():
plane = Plane()
plane.add_point((1,2))
assert plane.points_count == 1
assert plane.maximum_distance_between_points == 0
assert plane.maximum_distance_points[0].point.x_coordinate == 1
assert plane.maximum_distance_points[0].point.y_coordinate == 2
assert plane.maximum_distance_points[1].point.x_coordinate == 1
assert plane.maximum_distance_points[1].point.y_coordinate == 2
def test_add_two_points():
plane = Plane()
plane.add_point((1,2))
plane.add_point((2,2))
assert plane.points_count == 2
assert plane.maximum_distance_between_points > 0
def test_statment_one():
'''
1 1 - 1 1
2 1 - 1 2
1 3 - 2 3
'''
plane = Plane()
plane.add_point((1, 1))
assert 1 in plane.get_points_with_maximal_manhattan_distance()
assert 1 in plane.get_points_with_maximal_manhattan_distance()
plane.add_point((2, 1))
assert 1 in plane.get_points_with_maximal_manhattan_distance()
assert 2 in plane.get_points_with_maximal_manhattan_distance()
plane.add_point((1, 3))
assert 3 in plane.get_points_with_maximal_manhattan_distance()
assert 2 in plane.get_points_with_maximal_manhattan_distance()
def test_statment_two():
'''
2 2 - 1 1
1 3 - 1 2
1 1 - 1 3
3 1 - 4 2
3 3 - 4 2
'''
plane = Plane()
plane.add_point((2, 2))
assert 1 in plane.get_points_with_maximal_manhattan_distance()
assert 1 in plane.get_points_with_maximal_manhattan_distance()
plane.add_point((1, 2))
assert 1 in plane.get_points_with_maximal_manhattan_distance()
assert 2 in plane.get_points_with_maximal_manhattan_distance()
plane.add_point((1, 1))
assert 1 in plane.get_points_with_maximal_manhattan_distance()
assert 3 in plane.get_points_with_maximal_manhattan_distance()
plane.add_point((3, 1))
assert 4 in plane.get_points_with_maximal_manhattan_distance()
assert 2 in plane.get_points_with_maximal_manhattan_distance()
plane.add_point((3, 3))
assert 3 in plane.get_points_with_maximal_manhattan_distance()
assert 5 in plane.get_points_with_maximal_manhattan_distance()
| 3.46875 | 3 |
funktionen/store_datas_in_db_new_case.py | Bitterlin/easy_absence_scheduling_App | 0 | 12768700 |
import sqlite3
def sql_insert_now(sql_datensatz):
dsatz = sql_datensatz
vorname = dsatz[0]
nachname = dsatz[1]
gruppenleiter = dsatz[2]
bearbeitet = dsatz[3]
grund = dsatz[4]
abwesend_seit = dsatz[5]
gemeldet = dsatz[6]
gemeldet_time =dsatz[7]
meldepflicht = dsatz[8]
meldepflicht_time = dsatz[9]
prognose = dsatz[10]
notiz = dsatz[11]
if notiz == "":
notiz = "Noch keine Notiz hinterlegt."
status = dsatz[12]
#connect to db
verbindung = sqlite3.connect("datenbank/abwesenheiten.db")
zeiger = verbindung.cursor()
zeiger.execute("INSERT INTO mitarbeiter (vorname, nachname, gruppenleiter, bearbeitet, grund, abwesend_seit, gemeldet, gemeldet_time, meldepflicht, meldepflicht_time, prognose, notiz, status) VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?)", (vorname, nachname, gruppenleiter, bearbeitet, grund, abwesend_seit, gemeldet, gemeldet_time, meldepflicht, meldepflicht_time, prognose, notiz, status))
verbindung.commit()
verbindung.close() | 2.984375 | 3 |
src/LayoutFigures.py | dbpedia/gsoc-2020-dashboard | 12 | 12768701 | <gh_stars>10-100
import json
from io import StringIO
from os import path
import pandas as pd
import plotly.graph_objs as go
from SPARQLWrapper import JSON, CSV
import src.CSVParser as CSVP
import src.Constants as Constants
import src.JSONParser as JP
import src.RequestData as RD
data_path = 'data/v1'
bg_color = '#263238'
font_color = '#FFFFFF'
font_size = 15
height = 500
spacing = dict(t=0, b=0, r=0, l=0, pad=0)
def ontology_figures(ontology_data):
ontology_sunburst_figure = go.Figure(go.Sunburst(labels=ontology_data['labels'], parents=ontology_data['parents'], maxdepth=2))
ontology_treemap_figure = go.Figure(go.Treemap(labels=ontology_data['labels'], parents=ontology_data['parents']))
ontology_sunburst_figure.update_layout(margin=spacing, height=height, polar_bgcolor=bg_color, paper_bgcolor=bg_color,
font_size=font_size, font_color=font_color)
ontology_treemap_figure.update_layout(margin=spacing, height=height, polar_bgcolor=bg_color, paper_bgcolor=bg_color,
font_size=font_size, font_color=font_color)
return [ontology_treemap_figure, ontology_sunburst_figure]
def ontology_hierarchy():
ontology_data = ''
if path.exists(data_path + '/Ontologies.csv'):
ontology_data = pd.read_csv(data_path + '/Ontologies.csv')
print('ontologies fetched from the file')
else:
results = RD.sparql_wrapper(Constants.ONTOLOGY_HIERARCHY, JSON)
ontology_data = JP.to_ontology_hierarchy(results)
ontology_data.to_csv('data/v1/Ontologies.csv', index=False, index_label=False)
print('ontologies fetched using query')
return ontology_data, ontology_figures(ontology_data)
def all_instances_count_plot(all_instances_data):
all_instances_figure = go.Figure(go.Bar(x=all_instances_data['instancecount'], y=all_instances_data['class'],
orientation='h', marker_color='#EEEEEE'))
all_instances_figure.update_layout(height=height, margin=spacing, plot_bgcolor=bg_color, paper_bgcolor=bg_color, font_size=font_size,
font_color=font_color, yaxis=dict(showgrid=False))
return all_instances_figure
def all_instances_count():
all_instances_data = ''
if path.exists(data_path + '/AllInstances.csv'):
all_instances_data = pd.read_csv(data_path + '/AllInstances.csv')
print('all instances count fetched from the file')
else:
results = RD.sparql_wrapper(Constants.ALL_INSTANCES_COUNT, CSV)
all_instances_data = CSVP.to_all_instances_count(results)
all_instances_data.to_csv('data/v1/AllInstances.csv', index=False, index_label=False)
print('all instances fetched using query')
return all_instances_data, all_instances_count_plot(all_instances_data)
def get_general_statistics():
general_statistics = dict()
if path.exists(data_path + '/GeneralStatistics.json'):
with open(data_path + '/GeneralStatistics.json') as general_statistics_json:
general_statistics = json.load(general_statistics_json)
print('general statistics fetched from the file')
else:
for targetStat, query in Constants.GENERAL_STATISTICS.items():
stats = RD.sparql_wrapper(query, CSV)
stats = pd.read_csv(StringIO(stats.decode("utf-8")), sep=',').iloc[0]['counts']
general_statistics[targetStat] = str(stats)
with open(data_path + '/GeneralStatistics.json', 'w') as general_statistics_json:
json.dump(general_statistics, general_statistics_json)
print('general statistics fetched using query')
return general_statistics
def user_query(query):
results = RD.sparql_wrapper(query, CSV)
table = CSVP.parse_query_response(results)
return table
| 2.4375 | 2 |
blog/views.py | kevincornish/HeckGuide | 4 | 12768702 | <gh_stars>1-10
from django.views import generic
from .models import Post
class PostList(generic.ListView):
queryset = Post.objects.filter(status=1).order_by('-created_on')
template_name = 'blog/blog.html'
paginate_by = 3
class PostDetail(generic.DetailView):
model = Post
template_name = 'blog/post_detail.html' | 2.203125 | 2 |
tests/PhuongHoang/Object.py | PhuongHoang1182/consensus-specs | 0 | 12768703 | class person:
def __init__(self, first_name, last_name, age):
self.first_name = first_name
self.last_name = last_name
self.age = age
def get_full_name(self):
return f"{self.first_name} {self.last_name}"
def introduce(self):
return f"Hi. I'm {self.first_name}. {self.last_name}. I'm {self.age} years old"
Phuong = person ('Phoang', 'hoang', 25)
full_name = Phuong.get_full_name
print(f"{full_name}: {Phuong.introduce()}")
#print (f" i'm {Phuong.first_name} {Phuong.last_name}. I'm {Phuong.age} years old")
#https://www.youtube.com/c/TechWithTim/playlists | 4.0625 | 4 |
bmtk/tests/simulator/bionet/conftest.py | mjhyman/bmtk | 1 | 12768704 | try:
import bmtk.simulator.bionet as bionet
from bmtk.simulator.bionet.gids import GidPool
from bmtk.simulator.bionet.pyfunction_cache import *
from neuron import h
h.load_file('stdrun.hoc')
nrn_installed = True
except ImportError:
nrn_installed = False
has_mechanism = False
if nrn_installed:
try:
vecstim = h.VecStim()
has_mechanism = True
except AttributeError:
has_mechanism = False
| 1.9375 | 2 |
govhack/models.py | MatthewJA/govhack-2016 | 0 | 12768705 | import sqlalchemy as sa
import sqlalchemy.ext as ext
import sqlalchemy.ext.declarative
import sqlalchemy.orm as orm
from .database import Base
class InterestingTrend():
def __init__(self, title, description):
self.title = title
self.description = description
def to_dict(self):
return {
'title': self.title,
'description': self.description
}
@staticmethod
def from_dict(dct):
return InterestingTrend(dct['title'], dct['description'])
class DateHeat(Base):
__tablename__ = 'dateheat'
date = sa.Column(sa.Date(), primary_key=True)
heat = sa.Column(sa.String())
peaks = sa.Column(sa.String())
interest = sa.Column(sa.String())
def __init__(self, date, heat, peaks, interest):
self.date = date
self.heat = heat
self.peaks = peaks
self.interest = interest
def __repr__(self):
return '<DateHeat {}>'.format(self.date)
class DateLink(Base):
__tablename__ = 'datelink'
date = sa.Column(sa.Date(), primary_key=True)
hid = sa.Column(sa.String())
def __init__(self, date, hid):
self.date = date
self.hid = hid
def __repr__(self):
return '<DateLink {} - {}>'.format(self.date, self.hid) | 2.84375 | 3 |
GimmeProxyAPI.py | DeyaaMuhammad/GimmeProxyApi | 3 | 12768706 |
import json
import requests
class GimmeProxyAPI(object):
"""docstring for proxy"""
def __init__(self, **args):
self.base_url = "https://gimmeproxy.com/api/getProxy"
self.response = None
if self.response is None:
self.response = self.get_proxy(args=args)
def response(self):
return self.response
def base_url(self):
return self.base_url
def get_proxy(self, **args):
request = requests.get(self.base_url, params=args)
if request.status_code == 200:
self.response = request.json()
else:
raise Exception("An unknown error occured, status_code = {}".format(r.status_code))
return self.response
def get_curl(self):
curl = self.response["curl"]
return curl
def get_ip_port(self):
ip_port = self.response["ipPort"]
return ip_port
def get_port(self):
port = self.response["port"]
return port
def get_ip(self):
ip = self.response["ip"]
return ip
| 3.15625 | 3 |
bookingapp/forms.py | vetsinen/etalki | 0 | 12768707 | <filename>bookingapp/forms.py
from django import forms
from .models import Lesson
class LessonForm(forms.ModelForm):
class Meta:
model = Lesson
fields = ('date', 'hour')
| 2.140625 | 2 |
trading_environment.py | namm2008/DRL_stock_trading | 8 | 12768708 | <reponame>namm2008/DRL_stock_trading
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Jan 14 17:22:57 2021
@author: matthewyeung
"""
import numpy as np
import pandas as pd
import pandas_datareader.data as web
#RSI
def rsi(dataset, window_length):
# Get the difference in price from previous step
delta = dataset.diff()
# Get rid of the first row, which is NaN since it did not have a previous
# row to calculate the differences
delta = delta[1:]
# Make the positive gains (up) and negative gains (down) Series
up, down = delta.copy(), delta.copy()
up[up < 0] = 0
down[down > 0] = 0
# Calculate the EWMA
roll_up = up.ewm(span=window_length).mean()
roll_down = down.abs().ewm(span=window_length).mean()
# Calculate the RSI based on EWMA
RS = roll_up / roll_down
RSI = 100.0 - (100.0 / (1.0 + RS))
return RSI
def find_na(df):
col_name = df.columns
for col in col_name:
nan_num = df[col].isnull().sum()
print('Column Name: {}, NaN Number: {}'.format(col, nan_num))
def find_zero(df):
col_name = df.columns
for col in col_name:
zero_num = len(df[df[col]==0])
print('Column Name: {}, Zero Number: {}'.format(col, zero_num))
#Download data from the web
def stock_dataset_dl(ticker, start, end, ema_short, ema_long, day_data= True):
#import data with pandas dataframe
df = web.DataReader(ticker, 'yahoo', start, end)
df['price'] = df['Close']
df['ema_st'] = df['Close'].ewm(span=ema_short, adjust=False).mean()
df['ema_lg'] = df['Close'].ewm(span=ema_long, adjust=False).mean()
df['MACD'] = df['ema_st'] - df['ema_lg']
df['rsi'] = rsi(df['Close'],20)
df['Percent_chg'] = df['Close'].pct_change()
df['high_low'] = df['High'] - df['Low']
#handle 0 high_low value
df['high_low_1'] = df['high_low']
for i in range(len(df)):
if df['high_low_1'].iloc[i] == 0:
df['high_low'].iloc[i] = 0.1
#handling time
df = df.reset_index()
if day_data == True:
df['timestp'] = pd.to_datetime(df['Date'])
df['daytime'] = df['timestp'].dt.dayofweek
df['day'] = df['timestp'].dt.day
#df['hour'] = df['timestp'].dt.hour
#df['minute'] = df['timestp'].dt.minute
df_train = df[['price', 'Close', 'high_low', 'Volume', 'ema_st','ema_lg',
'MACD','rsi','day','daytime','Percent_chg']]
else:
df_train = df[['price', 'Close', 'high_low', 'Volume', 'ema_st','ema_lg','MACD','rsi','Percent_chg']]
#fill the NaN with 0
df_train = df_train.fillna(0)
return df_train
| 2.734375 | 3 |
ws-tests/test_fetch_all_collections.py | mtholder/pyraphyletic | 1 | 12768709 | <filename>ws-tests/test_fetch_all_collections.py<gh_stars>1-10
#!/usr/bin/env python
import sys
from opentreetesting import test_http_json_method, config
DOMAIN = config('host', 'apihost')
# backwards compat, support "list_all"
SUBMIT_URI = DOMAIN + '/v3/collections/find_collections'
r = test_http_json_method(SUBMIT_URI,
'GET',
expected_status=200,
return_bool_data=True)
if not r[0]:
sys.exit(1)
full_objs = r[1]
full_objs_by_id = {i['id']: i for i in full_objs}
SUBMIT_URI = DOMAIN + '/v4/collections/list'
r = test_http_json_method(SUBMIT_URI,
'GET',
expected_status=200,
return_bool_data=True)
if not r[0]:
sys.exit(1)
fo_ids = set(full_objs_by_id.keys())
list_ids = set(r[1])
print fo_ids
print list_ids
if set(full_objs_by_id.keys()) != set(r[1]):
sys.exit('.../find_collections and .../list returned different responses.')
| 2.25 | 2 |
refactorization/train_utils.py | Jiayuan-Gu/policy-refactorization | 6 | 12768710 | <filename>refactorization/train_utils.py
import warnings
import torch
def build_optimizer(cfg, model):
name = cfg.OPTIMIZER.TYPE
if name == '':
warnings.warn('No optimizer is built.')
return None
elif hasattr(torch.optim, name):
return getattr(torch.optim, name)(
model.parameters(),
lr=cfg.OPTIMIZER.LR,
weight_decay=cfg.OPTIMIZER.WEIGHT_DECAY,
**cfg.OPTIMIZER.get(name, dict()),
)
else:
raise ValueError(f'Unsupported optimizer: {name}.')
def build_lr_scheduler(cfg, optimizer):
name = cfg.LR_SCHEDULER.TYPE
if name == '':
warnings.warn('No lr_scheduler is built.')
return None
elif hasattr(torch.optim.lr_scheduler, name):
lr_scheduler = getattr(torch.optim.lr_scheduler, name)(
optimizer,
**cfg.LR_SCHEDULER.get(name, dict()),
)
return lr_scheduler
else:
raise ValueError(f'Unsupported lr_scheduler: {name}.')
| 2.40625 | 2 |
google/cloud/datalabeling_v1beta1/types/data_labeling_service.py | pallabiwrites/python-datalabeling | 0 | 12768711 | <reponame>pallabiwrites/python-datalabeling<filename>google/cloud/datalabeling_v1beta1/types/data_labeling_service.py
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.cloud.datalabeling_v1beta1.types import (
annotation_spec_set as gcd_annotation_spec_set,
)
from google.cloud.datalabeling_v1beta1.types import dataset as gcd_dataset
from google.cloud.datalabeling_v1beta1.types import evaluation
from google.cloud.datalabeling_v1beta1.types import evaluation_job as gcd_evaluation_job
from google.cloud.datalabeling_v1beta1.types import human_annotation_config
from google.cloud.datalabeling_v1beta1.types import instruction as gcd_instruction
from google.protobuf import field_mask_pb2 # type: ignore
__protobuf__ = proto.module(
package="google.cloud.datalabeling.v1beta1",
manifest={
"CreateDatasetRequest",
"GetDatasetRequest",
"ListDatasetsRequest",
"ListDatasetsResponse",
"DeleteDatasetRequest",
"ImportDataRequest",
"ExportDataRequest",
"GetDataItemRequest",
"ListDataItemsRequest",
"ListDataItemsResponse",
"GetAnnotatedDatasetRequest",
"ListAnnotatedDatasetsRequest",
"ListAnnotatedDatasetsResponse",
"DeleteAnnotatedDatasetRequest",
"LabelImageRequest",
"LabelVideoRequest",
"LabelTextRequest",
"GetExampleRequest",
"ListExamplesRequest",
"ListExamplesResponse",
"CreateAnnotationSpecSetRequest",
"GetAnnotationSpecSetRequest",
"ListAnnotationSpecSetsRequest",
"ListAnnotationSpecSetsResponse",
"DeleteAnnotationSpecSetRequest",
"CreateInstructionRequest",
"GetInstructionRequest",
"DeleteInstructionRequest",
"ListInstructionsRequest",
"ListInstructionsResponse",
"GetEvaluationRequest",
"SearchEvaluationsRequest",
"SearchEvaluationsResponse",
"SearchExampleComparisonsRequest",
"SearchExampleComparisonsResponse",
"CreateEvaluationJobRequest",
"UpdateEvaluationJobRequest",
"GetEvaluationJobRequest",
"PauseEvaluationJobRequest",
"ResumeEvaluationJobRequest",
"DeleteEvaluationJobRequest",
"ListEvaluationJobsRequest",
"ListEvaluationJobsResponse",
},
)
class CreateDatasetRequest(proto.Message):
r"""Request message for CreateDataset.
Attributes:
parent (str):
Required. Dataset resource parent, format:
projects/{project_id}
dataset (google.cloud.datalabeling_v1beta1.types.Dataset):
Required. The dataset to be created.
"""
parent = proto.Field(proto.STRING, number=1,)
dataset = proto.Field(proto.MESSAGE, number=2, message=gcd_dataset.Dataset,)
class GetDatasetRequest(proto.Message):
r"""Request message for GetDataSet.
Attributes:
name (str):
Required. Dataset resource name, format:
projects/{project_id}/datasets/{dataset_id}
"""
name = proto.Field(proto.STRING, number=1,)
class ListDatasetsRequest(proto.Message):
r"""Request message for ListDataset.
Attributes:
parent (str):
Required. Dataset resource parent, format:
projects/{project_id}
filter (str):
Optional. Filter on dataset is not supported
at this moment.
page_size (int):
Optional. Requested page size. Server may
return fewer results than requested. Default
value is 100.
page_token (str):
Optional. A token identifying a page of results for the
server to return. Typically obtained by
[ListDatasetsResponse.next_page_token][google.cloud.datalabeling.v1beta1.ListDatasetsResponse.next_page_token]
of the previous [DataLabelingService.ListDatasets] call.
Returns the first page if empty.
"""
parent = proto.Field(proto.STRING, number=1,)
filter = proto.Field(proto.STRING, number=2,)
page_size = proto.Field(proto.INT32, number=3,)
page_token = proto.Field(proto.STRING, number=4,)
class ListDatasetsResponse(proto.Message):
r"""Results of listing datasets within a project.
Attributes:
datasets (Sequence[google.cloud.datalabeling_v1beta1.types.Dataset]):
The list of datasets to return.
next_page_token (str):
A token to retrieve next page of results.
"""
@property
def raw_page(self):
return self
datasets = proto.RepeatedField(
proto.MESSAGE, number=1, message=gcd_dataset.Dataset,
)
next_page_token = proto.Field(proto.STRING, number=2,)
class DeleteDatasetRequest(proto.Message):
r"""Request message for DeleteDataset.
Attributes:
name (str):
Required. Dataset resource name, format:
projects/{project_id}/datasets/{dataset_id}
"""
name = proto.Field(proto.STRING, number=1,)
class ImportDataRequest(proto.Message):
r"""Request message for ImportData API.
Attributes:
name (str):
Required. Dataset resource name, format:
projects/{project_id}/datasets/{dataset_id}
input_config (google.cloud.datalabeling_v1beta1.types.InputConfig):
Required. Specify the input source of the
data.
user_email_address (str):
Email of the user who started the import task
and should be notified by email. If empty no
notification will be sent.
"""
name = proto.Field(proto.STRING, number=1,)
input_config = proto.Field(
proto.MESSAGE, number=2, message=gcd_dataset.InputConfig,
)
user_email_address = proto.Field(proto.STRING, number=3,)
class ExportDataRequest(proto.Message):
r"""Request message for ExportData API.
Attributes:
name (str):
Required. Dataset resource name, format:
projects/{project_id}/datasets/{dataset_id}
annotated_dataset (str):
Required. Annotated dataset resource name. DataItem in
Dataset and their annotations in specified annotated dataset
will be exported. It's in format of
projects/{project_id}/datasets/{dataset_id}/annotatedDatasets/
{annotated_dataset_id}
filter (str):
Optional. Filter is not supported at this
moment.
output_config (google.cloud.datalabeling_v1beta1.types.OutputConfig):
Required. Specify the output destination.
user_email_address (str):
Email of the user who started the export task
and should be notified by email. If empty no
notification will be sent.
"""
name = proto.Field(proto.STRING, number=1,)
annotated_dataset = proto.Field(proto.STRING, number=2,)
filter = proto.Field(proto.STRING, number=3,)
output_config = proto.Field(
proto.MESSAGE, number=4, message=gcd_dataset.OutputConfig,
)
user_email_address = proto.Field(proto.STRING, number=5,)
class GetDataItemRequest(proto.Message):
r"""Request message for GetDataItem.
Attributes:
name (str):
Required. The name of the data item to get, format:
projects/{project_id}/datasets/{dataset_id}/dataItems/{data_item_id}
"""
name = proto.Field(proto.STRING, number=1,)
class ListDataItemsRequest(proto.Message):
r"""Request message for ListDataItems.
Attributes:
parent (str):
Required. Name of the dataset to list data items, format:
projects/{project_id}/datasets/{dataset_id}
filter (str):
Optional. Filter is not supported at this
moment.
page_size (int):
Optional. Requested page size. Server may
return fewer results than requested. Default
value is 100.
page_token (str):
Optional. A token identifying a page of results for the
server to return. Typically obtained by
[ListDataItemsResponse.next_page_token][google.cloud.datalabeling.v1beta1.ListDataItemsResponse.next_page_token]
of the previous [DataLabelingService.ListDataItems] call.
Return first page if empty.
"""
parent = proto.Field(proto.STRING, number=1,)
filter = proto.Field(proto.STRING, number=2,)
page_size = proto.Field(proto.INT32, number=3,)
page_token = proto.Field(proto.STRING, number=4,)
class ListDataItemsResponse(proto.Message):
r"""Results of listing data items in a dataset.
Attributes:
data_items (Sequence[google.cloud.datalabeling_v1beta1.types.DataItem]):
The list of data items to return.
next_page_token (str):
A token to retrieve next page of results.
"""
@property
def raw_page(self):
return self
data_items = proto.RepeatedField(
proto.MESSAGE, number=1, message=gcd_dataset.DataItem,
)
next_page_token = proto.Field(proto.STRING, number=2,)
class GetAnnotatedDatasetRequest(proto.Message):
r"""Request message for GetAnnotatedDataset.
Attributes:
name (str):
Required. Name of the annotated dataset to get, format:
projects/{project_id}/datasets/{dataset_id}/annotatedDatasets/
{annotated_dataset_id}
"""
name = proto.Field(proto.STRING, number=1,)
class ListAnnotatedDatasetsRequest(proto.Message):
r"""Request message for ListAnnotatedDatasets.
Attributes:
parent (str):
Required. Name of the dataset to list annotated datasets,
format: projects/{project_id}/datasets/{dataset_id}
filter (str):
Optional. Filter is not supported at this
moment.
page_size (int):
Optional. Requested page size. Server may
return fewer results than requested. Default
value is 100.
page_token (str):
Optional. A token identifying a page of results for the
server to return. Typically obtained by
[ListAnnotatedDatasetsResponse.next_page_token][google.cloud.datalabeling.v1beta1.ListAnnotatedDatasetsResponse.next_page_token]
of the previous [DataLabelingService.ListAnnotatedDatasets]
call. Return first page if empty.
"""
parent = proto.Field(proto.STRING, number=1,)
filter = proto.Field(proto.STRING, number=2,)
page_size = proto.Field(proto.INT32, number=3,)
page_token = proto.Field(proto.STRING, number=4,)
class ListAnnotatedDatasetsResponse(proto.Message):
r"""Results of listing annotated datasets for a dataset.
Attributes:
annotated_datasets (Sequence[google.cloud.datalabeling_v1beta1.types.AnnotatedDataset]):
The list of annotated datasets to return.
next_page_token (str):
A token to retrieve next page of results.
"""
@property
def raw_page(self):
return self
annotated_datasets = proto.RepeatedField(
proto.MESSAGE, number=1, message=gcd_dataset.AnnotatedDataset,
)
next_page_token = proto.Field(proto.STRING, number=2,)
class DeleteAnnotatedDatasetRequest(proto.Message):
r"""Request message for DeleteAnnotatedDataset.
Attributes:
name (str):
Required. Name of the annotated dataset to delete, format:
projects/{project_id}/datasets/{dataset_id}/annotatedDatasets/
{annotated_dataset_id}
"""
name = proto.Field(proto.STRING, number=1,)
class LabelImageRequest(proto.Message):
r"""Request message for starting an image labeling task.
Attributes:
image_classification_config (google.cloud.datalabeling_v1beta1.types.ImageClassificationConfig):
Configuration for image classification task. One of
image_classification_config, bounding_poly_config,
polyline_config and segmentation_config are required.
bounding_poly_config (google.cloud.datalabeling_v1beta1.types.BoundingPolyConfig):
Configuration for bounding box and bounding poly task. One
of image_classification_config, bounding_poly_config,
polyline_config and segmentation_config are required.
polyline_config (google.cloud.datalabeling_v1beta1.types.PolylineConfig):
Configuration for polyline task. One of
image_classification_config, bounding_poly_config,
polyline_config and segmentation_config are required.
segmentation_config (google.cloud.datalabeling_v1beta1.types.SegmentationConfig):
Configuration for segmentation task. One of
image_classification_config, bounding_poly_config,
polyline_config and segmentation_config are required.
parent (str):
Required. Name of the dataset to request labeling task,
format: projects/{project_id}/datasets/{dataset_id}
basic_config (google.cloud.datalabeling_v1beta1.types.HumanAnnotationConfig):
Required. Basic human annotation config.
feature (google.cloud.datalabeling_v1beta1.types.LabelImageRequest.Feature):
Required. The type of image labeling task.
"""
class Feature(proto.Enum):
r"""Image labeling task feature."""
FEATURE_UNSPECIFIED = 0
CLASSIFICATION = 1
BOUNDING_BOX = 2
ORIENTED_BOUNDING_BOX = 6
BOUNDING_POLY = 3
POLYLINE = 4
SEGMENTATION = 5
image_classification_config = proto.Field(
proto.MESSAGE,
number=4,
oneof="request_config",
message=human_annotation_config.ImageClassificationConfig,
)
bounding_poly_config = proto.Field(
proto.MESSAGE,
number=5,
oneof="request_config",
message=human_annotation_config.BoundingPolyConfig,
)
polyline_config = proto.Field(
proto.MESSAGE,
number=6,
oneof="request_config",
message=human_annotation_config.PolylineConfig,
)
segmentation_config = proto.Field(
proto.MESSAGE,
number=7,
oneof="request_config",
message=human_annotation_config.SegmentationConfig,
)
parent = proto.Field(proto.STRING, number=1,)
basic_config = proto.Field(
proto.MESSAGE, number=2, message=human_annotation_config.HumanAnnotationConfig,
)
feature = proto.Field(proto.ENUM, number=3, enum=Feature,)
class LabelVideoRequest(proto.Message):
r"""Request message for LabelVideo.
Attributes:
video_classification_config (google.cloud.datalabeling_v1beta1.types.VideoClassificationConfig):
Configuration for video classification task. One of
video_classification_config, object_detection_config,
object_tracking_config and event_config is required.
object_detection_config (google.cloud.datalabeling_v1beta1.types.ObjectDetectionConfig):
Configuration for video object detection task. One of
video_classification_config, object_detection_config,
object_tracking_config and event_config is required.
object_tracking_config (google.cloud.datalabeling_v1beta1.types.ObjectTrackingConfig):
Configuration for video object tracking task. One of
video_classification_config, object_detection_config,
object_tracking_config and event_config is required.
event_config (google.cloud.datalabeling_v1beta1.types.EventConfig):
Configuration for video event task. One of
video_classification_config, object_detection_config,
object_tracking_config and event_config is required.
parent (str):
Required. Name of the dataset to request labeling task,
format: projects/{project_id}/datasets/{dataset_id}
basic_config (google.cloud.datalabeling_v1beta1.types.HumanAnnotationConfig):
Required. Basic human annotation config.
feature (google.cloud.datalabeling_v1beta1.types.LabelVideoRequest.Feature):
Required. The type of video labeling task.
"""
class Feature(proto.Enum):
r"""Video labeling task feature."""
FEATURE_UNSPECIFIED = 0
CLASSIFICATION = 1
OBJECT_DETECTION = 2
OBJECT_TRACKING = 3
EVENT = 4
video_classification_config = proto.Field(
proto.MESSAGE,
number=4,
oneof="request_config",
message=human_annotation_config.VideoClassificationConfig,
)
object_detection_config = proto.Field(
proto.MESSAGE,
number=5,
oneof="request_config",
message=human_annotation_config.ObjectDetectionConfig,
)
object_tracking_config = proto.Field(
proto.MESSAGE,
number=6,
oneof="request_config",
message=human_annotation_config.ObjectTrackingConfig,
)
event_config = proto.Field(
proto.MESSAGE,
number=7,
oneof="request_config",
message=human_annotation_config.EventConfig,
)
parent = proto.Field(proto.STRING, number=1,)
basic_config = proto.Field(
proto.MESSAGE, number=2, message=human_annotation_config.HumanAnnotationConfig,
)
feature = proto.Field(proto.ENUM, number=3, enum=Feature,)
class LabelTextRequest(proto.Message):
r"""Request message for LabelText.
Attributes:
text_classification_config (google.cloud.datalabeling_v1beta1.types.TextClassificationConfig):
Configuration for text classification task. One of
text_classification_config and text_entity_extraction_config
is required.
text_entity_extraction_config (google.cloud.datalabeling_v1beta1.types.TextEntityExtractionConfig):
Configuration for entity extraction task. One of
text_classification_config and text_entity_extraction_config
is required.
parent (str):
Required. Name of the data set to request labeling task,
format: projects/{project_id}/datasets/{dataset_id}
basic_config (google.cloud.datalabeling_v1beta1.types.HumanAnnotationConfig):
Required. Basic human annotation config.
feature (google.cloud.datalabeling_v1beta1.types.LabelTextRequest.Feature):
Required. The type of text labeling task.
"""
class Feature(proto.Enum):
r"""Text labeling task feature."""
FEATURE_UNSPECIFIED = 0
TEXT_CLASSIFICATION = 1
TEXT_ENTITY_EXTRACTION = 2
text_classification_config = proto.Field(
proto.MESSAGE,
number=4,
oneof="request_config",
message=human_annotation_config.TextClassificationConfig,
)
text_entity_extraction_config = proto.Field(
proto.MESSAGE,
number=5,
oneof="request_config",
message=human_annotation_config.TextEntityExtractionConfig,
)
parent = proto.Field(proto.STRING, number=1,)
basic_config = proto.Field(
proto.MESSAGE, number=2, message=human_annotation_config.HumanAnnotationConfig,
)
feature = proto.Field(proto.ENUM, number=6, enum=Feature,)
class GetExampleRequest(proto.Message):
r"""Request message for GetExample
Attributes:
name (str):
Required. Name of example, format:
projects/{project_id}/datasets/{dataset_id}/annotatedDatasets/
{annotated_dataset_id}/examples/{example_id}
filter (str):
Optional. An expression for filtering Examples. Filter by
annotation_spec.display_name is supported. Format
"annotation_spec.display_name = {display_name}".
"""
name = proto.Field(proto.STRING, number=1,)
filter = proto.Field(proto.STRING, number=2,)
class ListExamplesRequest(proto.Message):
r"""Request message for ListExamples.
Attributes:
parent (str):
Required. Example resource parent.
filter (str):
Optional. An expression for filtering Examples. For
annotated datasets that have annotation spec set, filter by
annotation_spec.display_name is supported. Format
"annotation_spec.display_name = {display_name}".
page_size (int):
Optional. Requested page size. Server may
return fewer results than requested. Default
value is 100.
page_token (str):
Optional. A token identifying a page of results for the
server to return. Typically obtained by
[ListExamplesResponse.next_page_token][google.cloud.datalabeling.v1beta1.ListExamplesResponse.next_page_token]
of the previous [DataLabelingService.ListExamples] call.
Return first page if empty.
"""
parent = proto.Field(proto.STRING, number=1,)
filter = proto.Field(proto.STRING, number=2,)
page_size = proto.Field(proto.INT32, number=3,)
page_token = proto.Field(proto.STRING, number=4,)
class ListExamplesResponse(proto.Message):
r"""Results of listing Examples in and annotated dataset.
Attributes:
examples (Sequence[google.cloud.datalabeling_v1beta1.types.Example]):
The list of examples to return.
next_page_token (str):
A token to retrieve next page of results.
"""
@property
def raw_page(self):
return self
examples = proto.RepeatedField(
proto.MESSAGE, number=1, message=gcd_dataset.Example,
)
next_page_token = proto.Field(proto.STRING, number=2,)
class CreateAnnotationSpecSetRequest(proto.Message):
r"""Request message for CreateAnnotationSpecSet.
Attributes:
parent (str):
Required. AnnotationSpecSet resource parent, format:
projects/{project_id}
annotation_spec_set (google.cloud.datalabeling_v1beta1.types.AnnotationSpecSet):
Required. Annotation spec set to create. Annotation specs
must be included. Only one annotation spec will be accepted
for annotation specs with same display_name.
"""
parent = proto.Field(proto.STRING, number=1,)
annotation_spec_set = proto.Field(
proto.MESSAGE, number=2, message=gcd_annotation_spec_set.AnnotationSpecSet,
)
class GetAnnotationSpecSetRequest(proto.Message):
r"""Request message for GetAnnotationSpecSet.
Attributes:
name (str):
Required. AnnotationSpecSet resource name, format:
projects/{project_id}/annotationSpecSets/{annotation_spec_set_id}
"""
name = proto.Field(proto.STRING, number=1,)
class ListAnnotationSpecSetsRequest(proto.Message):
r"""Request message for ListAnnotationSpecSets.
Attributes:
parent (str):
Required. Parent of AnnotationSpecSet resource, format:
projects/{project_id}
filter (str):
Optional. Filter is not supported at this
moment.
page_size (int):
Optional. Requested page size. Server may
return fewer results than requested. Default
value is 100.
page_token (str):
Optional. A token identifying a page of results for the
server to return. Typically obtained by
[ListAnnotationSpecSetsResponse.next_page_token][google.cloud.datalabeling.v1beta1.ListAnnotationSpecSetsResponse.next_page_token]
of the previous [DataLabelingService.ListAnnotationSpecSets]
call. Return first page if empty.
"""
parent = proto.Field(proto.STRING, number=1,)
filter = proto.Field(proto.STRING, number=2,)
page_size = proto.Field(proto.INT32, number=3,)
page_token = proto.Field(proto.STRING, number=4,)
class ListAnnotationSpecSetsResponse(proto.Message):
r"""Results of listing annotation spec set under a project.
Attributes:
annotation_spec_sets (Sequence[google.cloud.datalabeling_v1beta1.types.AnnotationSpecSet]):
The list of annotation spec sets.
next_page_token (str):
A token to retrieve next page of results.
"""
@property
def raw_page(self):
return self
annotation_spec_sets = proto.RepeatedField(
proto.MESSAGE, number=1, message=gcd_annotation_spec_set.AnnotationSpecSet,
)
next_page_token = proto.Field(proto.STRING, number=2,)
class DeleteAnnotationSpecSetRequest(proto.Message):
r"""Request message for DeleteAnnotationSpecSet.
Attributes:
name (str):
Required. AnnotationSpec resource name, format:
``projects/{project_id}/annotationSpecSets/{annotation_spec_set_id}``.
"""
name = proto.Field(proto.STRING, number=1,)
class CreateInstructionRequest(proto.Message):
r"""Request message for CreateInstruction.
Attributes:
parent (str):
Required. Instruction resource parent, format:
projects/{project_id}
instruction (google.cloud.datalabeling_v1beta1.types.Instruction):
Required. Instruction of how to perform the
labeling task.
"""
parent = proto.Field(proto.STRING, number=1,)
instruction = proto.Field(
proto.MESSAGE, number=2, message=gcd_instruction.Instruction,
)
class GetInstructionRequest(proto.Message):
r"""Request message for GetInstruction.
Attributes:
name (str):
Required. Instruction resource name, format:
projects/{project_id}/instructions/{instruction_id}
"""
name = proto.Field(proto.STRING, number=1,)
class DeleteInstructionRequest(proto.Message):
r"""Request message for DeleteInstruction.
Attributes:
name (str):
Required. Instruction resource name, format:
projects/{project_id}/instructions/{instruction_id}
"""
name = proto.Field(proto.STRING, number=1,)
class ListInstructionsRequest(proto.Message):
r"""Request message for ListInstructions.
Attributes:
parent (str):
Required. Instruction resource parent, format:
projects/{project_id}
filter (str):
Optional. Filter is not supported at this
moment.
page_size (int):
Optional. Requested page size. Server may
return fewer results than requested. Default
value is 100.
page_token (str):
Optional. A token identifying a page of results for the
server to return. Typically obtained by
[ListInstructionsResponse.next_page_token][google.cloud.datalabeling.v1beta1.ListInstructionsResponse.next_page_token]
of the previous [DataLabelingService.ListInstructions] call.
Return first page if empty.
"""
parent = proto.Field(proto.STRING, number=1,)
filter = proto.Field(proto.STRING, number=2,)
page_size = proto.Field(proto.INT32, number=3,)
page_token = proto.Field(proto.STRING, number=4,)
class ListInstructionsResponse(proto.Message):
r"""Results of listing instructions under a project.
Attributes:
instructions (Sequence[google.cloud.datalabeling_v1beta1.types.Instruction]):
The list of Instructions to return.
next_page_token (str):
A token to retrieve next page of results.
"""
@property
def raw_page(self):
return self
instructions = proto.RepeatedField(
proto.MESSAGE, number=1, message=gcd_instruction.Instruction,
)
next_page_token = proto.Field(proto.STRING, number=2,)
class GetEvaluationRequest(proto.Message):
r"""Request message for GetEvaluation.
Attributes:
name (str):
Required. Name of the evaluation. Format:
"projects/{project_id}/datasets/{dataset_id}/evaluations/{evaluation_id}'
"""
name = proto.Field(proto.STRING, number=1,)
class SearchEvaluationsRequest(proto.Message):
r"""Request message for SearchEvaluation.
Attributes:
parent (str):
Required. Evaluation search parent (project ID). Format:
"projects/{project_id}".
filter (str):
Optional. To search evaluations, you can filter by the
following:
- evaluation\_job.evaluation_job_id (the last part of
[EvaluationJob.name][google.cloud.datalabeling.v1beta1.EvaluationJob.name])
- evaluation\_job.model_id (the {model_name} portion of
[EvaluationJob.modelVersion][google.cloud.datalabeling.v1beta1.EvaluationJob.model_version])
- evaluation\_job.evaluation_job_run_time_start (Minimum
threshold for the
[evaluationJobRunTime][google.cloud.datalabeling.v1beta1.Evaluation.evaluation_job_run_time]
that created the evaluation)
- evaluation\_job.evaluation_job_run_time_end (Maximum
threshold for the
[evaluationJobRunTime][google.cloud.datalabeling.v1beta1.Evaluation.evaluation_job_run_time]
that created the evaluation)
- evaluation\_job.job_state
([EvaluationJob.state][google.cloud.datalabeling.v1beta1.EvaluationJob.state])
- annotation\_spec.display_name (the Evaluation contains a
metric for the annotation spec with this
[displayName][google.cloud.datalabeling.v1beta1.AnnotationSpec.display_name])
To filter by multiple critiera, use the ``AND`` operator or
the ``OR`` operator. The following examples shows a string
that filters by several critiera:
"evaluation\ *job.evaluation_job_id = {evaluation_job_id}
AND evaluation*\ job.model_id = {model_name} AND
evaluation\ *job.evaluation_job_run_time_start =
{timestamp_1} AND
evaluation*\ job.evaluation_job_run_time_end = {timestamp_2}
AND annotation\_spec.display_name = {display_name}".
page_size (int):
Optional. Requested page size. Server may
return fewer results than requested. Default
value is 100.
page_token (str):
Optional. A token identifying a page of results for the
server to return. Typically obtained by the
[nextPageToken][google.cloud.datalabeling.v1beta1.SearchEvaluationsResponse.next_page_token]
of the response to a previous search request.
If you don't specify this field, the API call requests the
first page of the search.
"""
parent = proto.Field(proto.STRING, number=1,)
filter = proto.Field(proto.STRING, number=2,)
page_size = proto.Field(proto.INT32, number=3,)
page_token = proto.Field(proto.STRING, number=4,)
class SearchEvaluationsResponse(proto.Message):
r"""Results of searching evaluations.
Attributes:
evaluations (Sequence[google.cloud.datalabeling_v1beta1.types.Evaluation]):
The list of evaluations matching the search.
next_page_token (str):
A token to retrieve next page of results.
"""
@property
def raw_page(self):
return self
evaluations = proto.RepeatedField(
proto.MESSAGE, number=1, message=evaluation.Evaluation,
)
next_page_token = proto.Field(proto.STRING, number=2,)
class SearchExampleComparisonsRequest(proto.Message):
r"""Request message of SearchExampleComparisons.
Attributes:
parent (str):
Required. Name of the
[Evaluation][google.cloud.datalabeling.v1beta1.Evaluation]
resource to search for example comparisons from. Format:
"projects/{project_id}/datasets/{dataset_id}/evaluations/{evaluation_id}".
page_size (int):
Optional. Requested page size. Server may
return fewer results than requested. Default
value is 100.
page_token (str):
Optional. A token identifying a page of results for the
server to return. Typically obtained by the
[nextPageToken][SearchExampleComparisons.next_page_token] of
the response to a previous search rquest.
If you don't specify this field, the API call requests the
first page of the search.
"""
parent = proto.Field(proto.STRING, number=1,)
page_size = proto.Field(proto.INT32, number=2,)
page_token = proto.Field(proto.STRING, number=3,)
class SearchExampleComparisonsResponse(proto.Message):
r"""Results of searching example comparisons.
Attributes:
example_comparisons (Sequence[google.cloud.datalabeling_v1beta1.types.SearchExampleComparisonsResponse.ExampleComparison]):
A list of example comparisons matching the
search criteria.
next_page_token (str):
A token to retrieve next page of results.
"""
class ExampleComparison(proto.Message):
r"""Example comparisons comparing ground truth output and
predictions for a specific input.
Attributes:
ground_truth_example (google.cloud.datalabeling_v1beta1.types.Example):
The ground truth output for the input.
model_created_examples (Sequence[google.cloud.datalabeling_v1beta1.types.Example]):
Predictions by the model for the input.
"""
ground_truth_example = proto.Field(
proto.MESSAGE, number=1, message=gcd_dataset.Example,
)
model_created_examples = proto.RepeatedField(
proto.MESSAGE, number=2, message=gcd_dataset.Example,
)
@property
def raw_page(self):
return self
example_comparisons = proto.RepeatedField(
proto.MESSAGE, number=1, message=ExampleComparison,
)
next_page_token = proto.Field(proto.STRING, number=2,)
class CreateEvaluationJobRequest(proto.Message):
r"""Request message for CreateEvaluationJob.
Attributes:
parent (str):
Required. Evaluation job resource parent. Format:
"projects/{project_id}".
job (google.cloud.datalabeling_v1beta1.types.EvaluationJob):
Required. The evaluation job to create.
"""
parent = proto.Field(proto.STRING, number=1,)
job = proto.Field(
proto.MESSAGE, number=2, message=gcd_evaluation_job.EvaluationJob,
)
class UpdateEvaluationJobRequest(proto.Message):
r"""Request message for UpdateEvaluationJob.
Attributes:
evaluation_job (google.cloud.datalabeling_v1beta1.types.EvaluationJob):
Required. Evaluation job that is going to be
updated.
update_mask (google.protobuf.field_mask_pb2.FieldMask):
Optional. Mask for which fields to update. You can only
provide the following fields:
- ``evaluationJobConfig.humanAnnotationConfig.instruction``
- ``evaluationJobConfig.exampleCount``
- ``evaluationJobConfig.exampleSamplePercentage``
You can provide more than one of these fields by separating
them with commas.
"""
evaluation_job = proto.Field(
proto.MESSAGE, number=1, message=gcd_evaluation_job.EvaluationJob,
)
update_mask = proto.Field(
proto.MESSAGE, number=2, message=field_mask_pb2.FieldMask,
)
class GetEvaluationJobRequest(proto.Message):
r"""Request message for GetEvaluationJob.
Attributes:
name (str):
Required. Name of the evaluation job. Format:
"projects/{project_id}/evaluationJobs/{evaluation_job_id}".
"""
name = proto.Field(proto.STRING, number=1,)
class PauseEvaluationJobRequest(proto.Message):
r"""Request message for PauseEvaluationJob.
Attributes:
name (str):
Required. Name of the evaluation job that is going to be
paused. Format:
"projects/{project_id}/evaluationJobs/{evaluation_job_id}".
"""
name = proto.Field(proto.STRING, number=1,)
class ResumeEvaluationJobRequest(proto.Message):
r"""Request message ResumeEvaluationJob.
Attributes:
name (str):
Required. Name of the evaluation job that is going to be
resumed. Format:
"projects/{project_id}/evaluationJobs/{evaluation_job_id}".
"""
name = proto.Field(proto.STRING, number=1,)
class DeleteEvaluationJobRequest(proto.Message):
r"""Request message DeleteEvaluationJob.
Attributes:
name (str):
Required. Name of the evaluation job that is going to be
deleted. Format:
"projects/{project_id}/evaluationJobs/{evaluation_job_id}".
"""
name = proto.Field(proto.STRING, number=1,)
class ListEvaluationJobsRequest(proto.Message):
r"""Request message for ListEvaluationJobs.
Attributes:
parent (str):
Required. Evaluation job resource parent. Format:
"projects/{project_id}".
filter (str):
Optional. You can filter the jobs to list by model_id (also
known as model_name, as described in
[EvaluationJob.modelVersion][google.cloud.datalabeling.v1beta1.EvaluationJob.model_version])
or by evaluation job state (as described in
[EvaluationJob.state][google.cloud.datalabeling.v1beta1.EvaluationJob.state]).
To filter by both criteria, use the ``AND`` operator or the
``OR`` operator. For example, you can use the following
string for your filter: "evaluation\ *job.model_id =
{model_name} AND evaluation*\ job.state =
{evaluation_job_state}".
page_size (int):
Optional. Requested page size. Server may
return fewer results than requested. Default
value is 100.
page_token (str):
Optional. A token identifying a page of results for the
server to return. Typically obtained by the
[nextPageToken][google.cloud.datalabeling.v1beta1.ListEvaluationJobsResponse.next_page_token]
in the response to the previous request. The request returns
the first page if this is empty.
"""
parent = proto.Field(proto.STRING, number=1,)
filter = proto.Field(proto.STRING, number=2,)
page_size = proto.Field(proto.INT32, number=3,)
page_token = proto.Field(proto.STRING, number=4,)
class ListEvaluationJobsResponse(proto.Message):
r"""Results for listing evaluation jobs.
Attributes:
evaluation_jobs (Sequence[google.cloud.datalabeling_v1beta1.types.EvaluationJob]):
The list of evaluation jobs to return.
next_page_token (str):
A token to retrieve next page of results.
"""
@property
def raw_page(self):
return self
evaluation_jobs = proto.RepeatedField(
proto.MESSAGE, number=1, message=gcd_evaluation_job.EvaluationJob,
)
next_page_token = proto.Field(proto.STRING, number=2,)
__all__ = tuple(sorted(__protobuf__.manifest))
| 1.492188 | 1 |
tap/api_resources/customer.py | obytes/tap-python | 3 | 12768712 | from __future__ import absolute_import, division, print_function
from tap.api_resources.abstract.createable_api_resource import CreateableAPIResource
from tap.api_resources.abstract.updateable_api_resource import UpdateableAPIResource
from tap.api_resources.abstract.deleteable_api_resource import DeleteableAPIResource
from tap.api_resources.abstract.listeable_api_resource import ListeableAPIResource
import tap
@tap.api_resources.abstract.nested_resource_class_methods(
'card',
operations=['create', 'retrieve', 'delete', 'list']
)
class Customer(CreateableAPIResource,
UpdateableAPIResource,
DeleteableAPIResource,
ListeableAPIResource):
OBJECT_NAME = 'customer'
| 2.109375 | 2 |
software/pynguin/tests/ga/test_chromosome.py | se2p/artifact-pynguin-ssbse2020 | 3 | 12768713 | # This file is part of Pynguin.
#
# Pynguin is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Pynguin is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Pynguin. If not, see <https://www.gnu.org/licenses/>.
from unittest.mock import MagicMock
import pytest
import pynguin.ga.chromosome as chrom
import pynguin.ga.fitnessfunction as ff
from pynguin.ga.chromosome import Chromosome
@pytest.fixture
def fitness_function():
return MagicMock(ff.FitnessFunction)
@pytest.fixture
def chromosome():
class DummyChromosome(chrom.Chromosome):
def size(self) -> int:
return 0
def clone(self) -> Chromosome:
pass
def cross_over(
self, other: chrom.Chromosome, position1: int, position2: int
) -> None:
pass
return DummyChromosome()
def test_fitness_no_fitness_values(chromosome):
with pytest.raises(AssertionError):
assert chromosome.get_fitness()
def test_fitness_one_fitness_function(chromosome, fitness_function):
chromosome.add_fitness_function(fitness_function)
chromosome._update_fitness_values(fitness_function, ff.FitnessValues(5, 0.9))
chromosome.set_changed(False)
assert chromosome.get_fitness() == 5
assert chromosome.get_coverage() == 0.9
def test_fitness_two_fitness_functions(chromosome, fitness_function):
chromosome.add_fitness_function(fitness_function)
chromosome._update_fitness_values(fitness_function, ff.FitnessValues(0.42, 0.1))
fitness_func2 = MagicMock(ff.FitnessFunction)
chromosome.add_fitness_function(fitness_func2)
chromosome._update_fitness_values(fitness_func2, ff.FitnessValues(0.23, 0.5))
chromosome.set_changed(False)
assert chromosome.get_fitness() == 0.65
assert chromosome.get_coverage() == 0.3
def test_values_for_fitness_function(chromosome, fitness_function):
chromosome.add_fitness_function(fitness_function)
chromosome._update_fitness_values(fitness_function, ff.FitnessValues(5, 0.5))
chromosome.set_changed(False)
assert chromosome.get_fitness_for(fitness_function) == 5
assert chromosome.get_coverage_for(fitness_function) == 0.5
def test_has_changed_default(chromosome):
assert chromosome.has_changed()
def test_has_changed(chromosome):
chromosome.set_changed(False)
assert not chromosome.has_changed()
def test_caching(chromosome, fitness_function):
fitness_function.compute_fitness_values.side_effect = [
ff.FitnessValues(5, 0.5),
ff.FitnessValues(6, 0.6),
]
chromosome.add_fitness_function(fitness_function)
assert chromosome.get_fitness() == 5
assert chromosome.get_coverage() == 0.5
assert not chromosome.has_changed()
assert chromosome.get_number_of_evaluations() == 1
chromosome.set_changed(True)
assert chromosome.get_fitness() == 6
assert chromosome.get_coverage() == 0.6
assert not chromosome.has_changed()
assert chromosome.get_number_of_evaluations() == 2
def test_illegal_values(chromosome, fitness_function):
fitness_function.compute_fitness_values.return_value = ff.FitnessValues(-1, 1.5)
chromosome.add_fitness_function(fitness_function)
with pytest.raises(RuntimeError):
chromosome.get_fitness()
def test_get_fitness_functions(chromosome):
func1 = MagicMock(ff.FitnessFunction)
func2 = MagicMock(ff.FitnessFunction)
chromosome.add_fitness_function(func1)
chromosome.add_fitness_function(func2)
assert chromosome.get_fitness_functions() == [func1, func2]
| 2.125 | 2 |
cbam/cbamsim.py | Evangeline98/Multi-Label-Classification-with-CNN-and-RNN | 1 | 12768714 | <filename>cbam/cbamsim.py
from datetime import datetime
import json
import glob
import os
from pathlib import Path
from multiprocessing.pool import ThreadPool
from typing import Dict
import numpy as np
import pandas as pd
from scipy.stats.mstats import gmean
import torch
from torch import nn
from torch.utils.data import DataLoader
ON_KAGGLE: bool = 'KAGGLE_WORKING_DIR' in os.environ
def gmean_df(df: pd.DataFrame) -> pd.DataFrame:
return df.groupby(level=0).agg(lambda x: gmean(list(x)))
def mean_df(df: pd.DataFrame) -> pd.DataFrame:
return df.groupby(level=0).mean()
def load_model(model: nn.Module, path: Path) -> Dict:
state = torch.load(str(path))
model.load_state_dict(state['model'])
print('Loaded model from epoch {epoch}, step {step:,}'.format(**state))
return state
class ThreadingDataLoader(DataLoader):
def __iter__(self):
sample_iter = iter(self.batch_sampler)
if self.num_workers == 0:
for indices in sample_iter:
yield self.collate_fn([self._get_item(i) for i in indices])
else:
prefetch = 1
with ThreadPool(processes=self.num_workers) as pool:
futures = []
for indices in sample_iter:
futures.append([pool.apply_async(self._get_item, args=(i,))
for i in indices])
if len(futures) > prefetch:
yield self.collate_fn([f.get() for f in futures.pop(0)])
# items = pool.map(lambda i: self.dataset[i], indices)
# yield self.collate_fn(items)
for batch_futures in futures:
yield self.collate_fn([f.get() for f in batch_futures])
def _get_item(self, i):
return self.dataset[i]
def write_event(log, step: int, **data):
data['step'] = step
data['dt'] = datetime.now().isoformat()
log.write(json.dumps(data, sort_keys=True))
log.write('\n')
log.flush()
def _smooth(ys, indices):
return [np.mean(ys[idx: indices[i + 1]])
for i, idx in enumerate(indices[:-1])]
import random
import math
from PIL import Image
from torchvision.transforms import (
ToTensor, Normalize, Compose, Resize, CenterCrop, RandomCrop,
RandomHorizontalFlip)
class RandomSizedCrop:
"""Random crop the given PIL.Image to a random size
of the original size and and a random aspect ratio
of the original aspect ratio.
size: size of the smaller edge
interpolation: Default: PIL.Image.BILINEAR
"""
def __init__(self, size, interpolation=Image.BILINEAR,
min_aspect=4/5, max_aspect=5/4,
min_area=0.25, max_area=1):
self.size = size
self.interpolation = interpolation
self.min_aspect = min_aspect
self.max_aspect = max_aspect
self.min_area = min_area
self.max_area = max_area
def __call__(self, img):
for attempt in range(10):
area = img.size[0] * img.size[1]
target_area = random.uniform(self.min_area, self.max_area) * area
aspect_ratio = random.uniform(self.min_aspect, self.max_aspect)
w = int(round(math.sqrt(target_area * aspect_ratio)))
h = int(round(math.sqrt(target_area / aspect_ratio)))
if random.random() < 0.5:
w, h = h, w
if w <= img.size[0] and h <= img.size[1]:
x1 = random.randint(0, img.size[0] - w)
y1 = random.randint(0, img.size[1] - h)
img = img.crop((x1, y1, x1 + w, y1 + h))
assert(img.size == (w, h))
return img.resize((self.size, self.size), self.interpolation)
# Fallback
scale = Resize(self.size, interpolation=self.interpolation)
crop = CenterCrop(self.size)
return crop(scale(img))
# class RandomSizedCrop:
# """Random crop the given PIL.Image to a random size
# of the original size and and a random aspect ratio
# of the original aspect ratio.
# size: size of the smaller edge
# interpolation: Default: PIL.Image.BILINEAR
# """
# def __init__(self, size, interpolation=Image.BICUBIC,
# min_aspect=4/5, max_aspect=5/4,
# min_area=0.25, max_area=1):
# self.size = size
# self.interpolation = interpolation
# self.min_aspect = min_aspect
# self.max_aspect = max_aspect
# self.min_area = min_area
# self.max_area = max_area
# def __call__(self, img):
# size_0 = img.size[0]
# size_1 = img.size[1]
# print(size_0, size_1)
# img_data = np.array(img)
# if ((size_0/size_1>=1.3) or (size_1/size_0>=1.3)):
# w_resized = int(img.size[0] * 300 / img.size[1])
# h_resized = int(img.size[1] * 300 / img.size[0])
# if size_0 < size_1:
# resized = img.resize((w_resized ,300))
# pad_width = 300 - w_resized
# df = pd.DataFrame(img_data[0,:,:])
# padding = (pad_width // 2, 0, pad_width-(pad_width//2), 0)
# else:
# resized = img.resize((300, h_resized))
# pad_height = 300 - h_resized
# df = pd.DataFrame(img_data[:,0,:])
# padding = (0, pad_height // 2, 0, pad_height-(pad_height//2))
# AvgColour = tuple([int(i) for i in df.mean()])
# resized_w_pad = ImageOps.expand(resized, padding, fill=AvgColour)
# else:
# for attempt in range(10):
# print(attempt)
# area = img.size[0] * img.size[1]
# target_area = random.uniform(self.min_area, self.max_area) * area
# aspect_ratio = random.uniform(self.min_aspect, self.max_aspect)
# w = int(round(math.sqrt(target_area * aspect_ratio)))
# h = int(round(math.sqrt(target_area / aspect_ratio)))
# if random.random() < 0.5:
# w, h = h, w
# if w <= img.size[0] and h <= img.size[1]:
# x1 = random.randint(0, img.size[0] - w)
# y1 = random.randint(0, img.size[1] - h)
# img = img.crop((x1, y1, x1 + w, y1 + h))
# assert(img.size == (w, h))
# return img.resize((self.size, self.size), self.interpolation)
# scale = Resize(self.size, interpolation=self.interpolation)
# crop = CenterCrop(self.size)
# resized_w_pad = crop(scale(img))
# # Fallback
# return resized_w_pad
train_transform = Compose([
RandomCrop(224),
RandomHorizontalFlip(),
])
test_transform = Compose([
RandomCrop(224),
RandomHorizontalFlip(),
])
tensor_transform = Compose([
ToTensor(),
Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])
from pathlib import Path
from typing import Callable, List
import cv2
import pandas as pd
from pathlib import Path
from typing import Callable, List
import cv2
import pandas as pd
from PIL import Image
import torch
from torch.utils.data import Dataset
N_CLASSES = 1103
DATA_ROOT = Path('../input/imet-2019-fgvc6' if ON_KAGGLE else './data')
class TrainDataset(Dataset):
def __init__(self, root: Path, df: pd.DataFrame,count: pd.DataFrame,thres,
image_transform: Callable, debug: bool = True):
super().__init__()
self._root = root
self._df = df
self._image_transform = image_transform
self._debug = debug
self.index = np.where(count['count'] < thres)
def __len__(self):
return len(self._df)
def __getitem__(self, idx: int):
item = self._df.iloc[idx]
image = load_transform_image(
item, self._root, self._image_transform, debug=self._debug)
target = torch.zeros(N_CLASSES)
for cls in item.attribute_ids.split():
target[int(cls)] = 1
target[self.index] = 0
return image, target
class TTADataset:
def __init__(self, root: Path, df: pd.DataFrame,
image_transform: Callable, tta: int):
self._root = root
self._df = df
self._image_transform = image_transform
self._tta = tta
def __len__(self):
return len(self._df) * self._tta
def __getitem__(self, idx):
item = self._df.iloc[idx % len(self._df)]
image = load_transform_image(item, self._root, self._image_transform)
return image, item.id
def load_transform_image(
item, root: Path, image_transform: Callable, debug: bool = False):
image = load_image(item, root)
image = image_transform(image)
if debug:
image.save('_debug.png')
return tensor_transform(image)
def load_image(item, root: Path) -> Image.Image:
image = cv2.imread(str(root / f'{item.id}.png'))
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
return Image.fromarray(image)
def get_ids(root: Path) -> List[str]:
return sorted({p.name.split('_')[0] for p in root.glob('*.png')})
import argparse
from collections import defaultdict, Counter
import random
import pandas as pd
import tqdm
def make_folds(n_folds: int) -> pd.DataFrame:
df = pd.read_csv(DATA_ROOT / 'train.csv')
cls_counts = Counter(cls for classes in df['attribute_ids'].str.split()
for cls in classes)
fold_cls_counts = defaultdict(int)
folds = [-1] * len(df)
for item in tqdm.tqdm(df.sample(frac=1, random_state=42).itertuples(),
total=len(df)):
cls = min(item.attribute_ids.split(), key=lambda cls: cls_counts[cls])
fold_counts = [(f, fold_cls_counts[f, cls]) for f in range(n_folds)]
min_count = min([count for _, count in fold_counts])
random.seed(item.Index)
fold = random.choice([f for f, count in fold_counts
if count == min_count])
folds[item.Index] = fold
for cls in item.attribute_ids.split():
fold_cls_counts[fold, cls] += 1
df['fold'] = folds
return df
####################################model#################################
import math
from inspect import isfunction
import torch
import torch.nn as nn
import torch.nn.init as init
import os
class ConvBlock(nn.Module):
"""
Standard convolution block with Batch normalization and ReLU/ReLU6 activation.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
stride : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
groups : int, default 1
Number of groups.
bias : bool, default False
Whether the layer uses a bias vector.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
activation : function or str or None, default nn.ReLU(inplace=True)
Activation function or name of activation function.
activate : bool, default True
Whether activate the convolution block.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride,
padding,
dilation=1,
groups=1,
bias=False,
bn_eps=1e-5,
activation=(lambda: nn.ReLU(inplace=True)),
activate=True):
super(ConvBlock, self).__init__()
self.activate = activate
self.conv = nn.Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
bias=bias)
self.bn = nn.BatchNorm2d(
num_features=out_channels,
eps=bn_eps)
if self.activate:
assert (activation is not None)
if isfunction(activation):
self.activ = activation()
elif isinstance(activation, str):
if activation == "relu":
self.activ = nn.ReLU(inplace=True)
elif activation == "relu6":
self.activ = nn.ReLU6(inplace=True)
else:
raise NotImplementedError()
else:
self.activ = activation
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
if self.activate:
x = self.activ(x)
return x
def conv1x1_block(in_channels,
out_channels,
stride=1,
padding=0,
groups=1,
bias=False,
bn_eps=1e-5,
activation=(lambda: nn.ReLU(inplace=True)),
activate=True):
"""
1x1 version of the standard convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int or tuple/list of 2 int, default 0
Padding value for convolution layer.
groups : int, default 1
Number of groups.
bias : bool, default False
Whether the layer uses a bias vector.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
activation : function or str or None, default nn.ReLU(inplace=True)
Activation function or name of activation function.
activate : bool, default True
Whether activate the convolution block.
"""
return ConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
stride=stride,
padding=padding,
groups=groups,
bias=bias,
bn_eps=bn_eps,
activation=activation,
activate=activate)
def conv3x3_block(in_channels,
out_channels,
stride=1,
padding=1,
dilation=1,
groups=1,
bias=False,
bn_eps=1e-5,
activation=(lambda: nn.ReLU(inplace=True)),
activate=True):
"""
3x3 version of the standard convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int or tuple/list of 2 int, default 1
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
groups : int, default 1
Number of groups.
bias : bool, default False
Whether the layer uses a bias vector.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
activation : function or str or None, default nn.ReLU(inplace=True)
Activation function or name of activation function.
activate : bool, default True
Whether activate the convolution block.
"""
return ConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
bias=bias,
bn_eps=bn_eps,
activation=activation,
activate=activate)
def conv7x7_block(in_channels,
out_channels,
stride=1,
padding=3,
bias=False,
activation=(lambda: nn.ReLU(inplace=True)),
activate=True):
"""
7x7 version of the standard convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int or tuple/list of 2 int, default 3
Padding value for convolution layer.
bias : bool, default False
Whether the layer uses a bias vector.
activation : function or str or None, default nn.ReLU(inplace=True)
Activation function or name of activation function.
activate : bool, default True
Whether activate the convolution block.
"""
return ConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=7,
stride=stride,
padding=padding,
bias=bias,
activation=activation,
activate=activate)
class ResBlock(nn.Module):
"""
Simple ResNet block for residual path in ResNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int
Strides of the convolution.
"""
def __init__(self,
in_channels,
out_channels,
stride):
super(ResBlock, self).__init__()
self.conv1 = conv3x3_block(
in_channels=in_channels,
out_channels=out_channels,
stride=stride)
self.conv2 = conv3x3_block(
in_channels=out_channels,
out_channels=out_channels,
activation=None,
activate=False)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
return x
class ResBottleneck(nn.Module):
"""
ResNet bottleneck block for residual path in ResNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int, default 1
Padding value for the second convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for the second convolution layer.
conv1_stride : bool, default False
Whether to use stride in the first or the second convolution layer of the block.
bottleneck_factor : int, default 4
Bottleneck factor.
"""
def __init__(self,
in_channels,
out_channels,
stride,
padding=1,
dilation=1,
conv1_stride=False,
bottleneck_factor=4):
super(ResBottleneck, self).__init__()
mid_channels = out_channels // bottleneck_factor
self.conv1 = conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels,
stride=(stride if conv1_stride else 1))
self.conv2 = conv3x3_block(
in_channels=mid_channels,
out_channels=mid_channels,
stride=(1 if conv1_stride else stride),
padding=padding,
dilation=dilation)
self.conv3 = conv1x1_block(
in_channels=mid_channels,
out_channels=out_channels,
activation=None,
activate=False)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
return x
class ResInitBlock(nn.Module):
"""
ResNet specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
"""
def __init__(self,
in_channels,
out_channels):
super(ResInitBlock, self).__init__()
self.conv = conv7x7_block(
in_channels=in_channels,
out_channels=out_channels,
stride=2)
self.pool = nn.MaxPool2d(
kernel_size=3,
stride=2,
padding=1)
def forward(self, x):
x = self.conv(x)
x = self.pool(x)
return x
class MLP(nn.Module):
"""
Multilayer perceptron block.
Parameters:
----------
channels : int
Number of input/output channels.
reduction_ratio : int, default 16
Channel reduction ratio.
"""
def __init__(self,
channels,
reduction_ratio=16):
super(MLP, self).__init__()
mid_channels = channels // reduction_ratio
self.fc1 = nn.Linear(
in_features=channels,
out_features=mid_channels)
self.activ = nn.ReLU(inplace=True)
self.fc2 = nn.Linear(
in_features=mid_channels,
out_features=channels)
def forward(self, x):
x = x.view(x.size(0), -1)
x = self.fc1(x)
x = self.activ(x)
x = self.fc2(x)
return x
class ChannelGate(nn.Module):
"""
CBAM channel gate block.
Parameters:
----------
channels : int
Number of input/output channels.
reduction_ratio : int, default 16
Channel reduction ratio.
"""
def __init__(self,
channels,
reduction_ratio=16):
super(ChannelGate, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(output_size=(1, 1))
self.max_pool = nn.AdaptiveMaxPool2d(output_size=(1, 1))
self.mlp = MLP(
channels=channels,
reduction_ratio=reduction_ratio)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
att1 = self.avg_pool(x)
att1 = self.mlp(att1)
att2 = self.max_pool(x)
att2 = self.mlp(att2)
att = att1 + att2
att = self.sigmoid(att)
att = att.unsqueeze(2).unsqueeze(3).expand_as(x)
x = x * att
return x
class SpatialGate(nn.Module):
"""
CBAM spatial gate block.
"""
def __init__(self):
super(SpatialGate, self).__init__()
self.conv = conv7x7_block(
in_channels=2,
out_channels=1,
activate=False)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
att1 = x.max(dim=1)[0].unsqueeze(1)
att2 = x.mean(dim=1).unsqueeze(1)
att = torch.cat((att1, att2), dim=1)
att = self.conv(att)
att = self.sigmoid(att)
x = x * att
return x
class CbamBlock(nn.Module):
"""
CBAM attention block for CBAM-ResNet.
Parameters:
----------
channels : int
Number of input/output channels.
reduction_ratio : int, default 16
Channel reduction ratio.
"""
def __init__(self,
channels,
reduction_ratio=16):
super(CbamBlock, self).__init__()
self.ch_gate = ChannelGate(
channels=channels,
reduction_ratio=reduction_ratio)
self.sp_gate = SpatialGate()
def forward(self, x):
x = self.ch_gate(x)
x = self.sp_gate(x)
return x
class CbamResUnit(nn.Module):
"""
CBAM-ResNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int
Strides of the convolution.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
"""
def __init__(self,
in_channels,
out_channels,
stride,
bottleneck):
super(CbamResUnit, self).__init__()
self.resize_identity = (in_channels != out_channels) or (stride != 1)
if bottleneck:
self.body = ResBottleneck(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
conv1_stride=False)
else:
self.body = ResBlock(
in_channels=in_channels,
out_channels=out_channels,
stride=stride)
if self.resize_identity:
self.identity_conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
activate=False)
self.cbam = CbamBlock(channels=out_channels)
self.activ = nn.ReLU(inplace=True)
def forward(self, x):
if self.resize_identity:
identity = self.identity_conv(x)
else:
identity = x
x = self.body(x)
x = self.cbam(x)
x = x + identity
x = self.activ(x)
return x
class CbamResNet(nn.Module):
"""
CBAM-ResNet model from 'CBAM: Convolutional Block Attention Module,' https://arxiv.org/abs/1807.06521.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
num_classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
bottleneck,
in_channels=3,
in_size=(224, 224),
num_classes=1000):
super(CbamResNet, self).__init__()
self.in_size = in_size
self.num_classes = num_classes
self.features = nn.Sequential()
self.features.add_module("init_block", ResInitBlock(
in_channels=in_channels,
out_channels=init_block_channels))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.Sequential()
for j, out_channels in enumerate(channels_per_stage):
stride = 2 if (j == 0) and (i != 0) else 1
stage.add_module("unit{}".format(j + 1), CbamResUnit(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
bottleneck=bottleneck))
in_channels = out_channels
self.features.add_module("stage{}".format(i + 1), stage)
self.features.add_module('final_pool', nn.AvgPool2d(
kernel_size=7,
stride=1))
self.output = nn.Linear(
in_features=in_channels,
out_features=num_classes)
self._init_params()
def _init_params(self):
for name, module in self.named_modules():
if isinstance(module, nn.Conv2d):
init.kaiming_uniform_(module.weight)
if module.bias is not None:
init.constant_(module.bias, 0)
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.output(x)
return x
def get_resnet(blocks,
model_name=None,
pretrained=False,
root=os.path.join("~", ".torch", "models"),
**kwargs):
"""
Create CBAM-ResNet model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
conv1_stride : bool
Whether to use stride in the first or the second convolution layer in units.
use_se : bool
Whether to use SE block.
width_scale : float
Scale factor for width of layers.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
if blocks == 18:
layers = [2, 2, 2, 2]
elif blocks == 34:
layers = [3, 4, 6, 3]
elif blocks == 50:
layers = [3, 4, 6, 3]
elif blocks == 101:
layers = [3, 4, 23, 3]
elif blocks == 152:
layers = [3, 8, 36, 3]
else:
raise ValueError("Unsupported CBAM-ResNet with number of blocks: {}".format(blocks))
init_block_channels = 64
if blocks < 50:
channels_per_layers = [64, 128, 256, 512]
bottleneck = False
else:
channels_per_layers = [256, 512, 1024, 2048]
bottleneck = True
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
net = CbamResNet(
channels=channels,
init_block_channels=init_block_channels,
bottleneck=bottleneck,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_model
download_model(
net=net,
model_name=model_name,
local_model_store_dir_path=root)
return net
def cbam_resnet18(**kwargs):
"""
CBAM-ResNet-18 model from 'CBAM: Convolutional Block Attention Module,' https://arxiv.org/abs/1807.06521.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=18, model_name="cbam_resnet18", **kwargs)
def cbam_resnet34(**kwargs):
"""
CBAM-ResNet-34 model from 'CBAM: Convolutional Block Attention Module,' https://arxiv.org/abs/1807.06521.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=34, model_name="cbam_resnet34", **kwargs)
def cbam_resnet50(**kwargs):
"""
CBAM-ResNet-50 model from 'CBAM: Convolutional Block Attention Module,' https://arxiv.org/abs/1807.06521.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=50, model_name="cbam_resnet50", **kwargs)
def cbam_resnet101(**kwargs):
"""
CBAM-ResNet-101 model from 'CBAM: Convolutional Block Attention Module,' https://arxiv.org/abs/1807.06521.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=101, model_name="cbam_resnet101", **kwargs)
def cbam_resnet152(**kwargs):
"""
CBAM-ResNet-152 model from 'CBAM: Convolutional Block Attention Module,' https://arxiv.org/abs/1807.06521.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=152, model_name="cbam_resnet152", **kwargs)#
########################main.py########################################################
import argparse
from itertools import islice
import json
from pathlib import Path
import shutil
import warnings
from typing import Dict
import numpy as np
import pandas as pd
from sklearn.metrics import fbeta_score
from sklearn.exceptions import UndefinedMetricWarning
import torch
from torch import nn, cuda
from torch.optim import Adam
import tqdm
def predict(model, root: Path, df: pd.DataFrame, out_path: Path,
batch_size: int, tta: int, workers: int, use_cuda: bool):
loader = DataLoader(
dataset=TTADataset(root, df, test_transform, tta=tta),
shuffle=False,
batch_size=batch_size,
num_workers=workers,
)
model.eval()
all_outputs, all_ids = [], []
with torch.no_grad():
for inputs, ids in tqdm.tqdm(loader, desc='Predict'):
if use_cuda:
inputs = inputs.cuda()
outputs = torch.sigmoid(model(inputs))
all_outputs.append(outputs.data.cpu().numpy())
all_ids.extend(ids)
df = pd.DataFrame(
data=np.concatenate(all_outputs),
index=all_ids,
columns=map(str, range(N_CLASSES)))
df = mean_df(df)
df.to_hdf(out_path, 'prob', index_label='id')
print(f'Saved predictions to {out_path}')
def train(args, model: nn.Module, criterion, *, params,folds, count,
init_optimizer, use_cuda,
n_epochs=None, patience=2, max_lr_changes=2) -> bool:
lr = args.lr
n_epochs = n_epochs or args.n_epochs
params = list(params)
optimizer = init_optimizer(params, lr)
run_root = Path(args.run_root)
model_path = run_root / 'model.pt'
best_model_path = run_root / 'best-model.pt'
pretrain_path = Path('../input/modelcbam')/'best-model (1).pt'
if pretrain_path.exists():
state = load_model(model, pretrain_path)
epoch = state['epoch']
step = state['step']
best_valid_loss = 50
else:
epoch = 1
step = 0
best_valid_loss = float('inf')
lr_changes = 0
save = lambda ep: torch.save({
'model': model.state_dict(),
'epoch': ep,
'step': step,
'best_valid_loss': best_valid_loss
}, str(model_path))
report_each = 10
log = run_root.joinpath('train.log').open('at', encoding='utf8')
valid_losses = []
lr_reset_epoch = epoch
### doing cv
train_fold = folds[folds['fold'] != 0]
valid_fold = folds[folds['fold'] == 0]
def make_loader(df: pd.DataFrame, image_transform, count: pd.DataFrame, thres) -> DataLoader:
return DataLoader(
TrainDataset(train_root, df, count, thres ,image_transform, debug=args.debug),
shuffle=True,
batch_size=args.batch_size,
num_workers=args.workers,
)
train_loader = make_loader(train_fold, train_transform, count, args.count)
valid_loader = make_loader(valid_fold, test_transform,count,0)
##############
validation(model, criterion, valid_loader, use_cuda)
validation2(model, criterion, valid_loader, use_cuda)
for epoch in range(epoch, n_epochs + 1):
model.train()
losses = []
tq = tqdm.tqdm(total=(len(train_loader) * args.batch_size))
tq.set_description(f'Epoch {epoch}, lr {lr}')
tl = train_loader
try:
mean_loss = 0
for i, (inputs, targets) in enumerate(tl):
if use_cuda:
inputs, targets = inputs.cuda(), targets.cuda()
# C is the number of classes.
batch_size = inputs.size(0)
#smoothed_labels =0.9*targets + 0.1*(torch.ones((batch_size,N_CLASSES)).cuda()-targets)
#smoothed_labels = smoothed_labels.cuda()
outputs = model(inputs)
loss = _reduce_loss(criterion(outputs, targets))
(batch_size * loss).backward()
if (i + 1) % args.step == 0:
optimizer.step()
optimizer.zero_grad()
step += 1
tq.update(batch_size)
losses.append(loss.item())
mean_loss = np.mean(losses[-report_each:])
tq.set_postfix(loss=f'{mean_loss:.3f}')
if i and i % report_each == 0:
write_event(log, step, loss=mean_loss)
write_event(log, step, loss=mean_loss)
tq.close()
save(epoch + 1)
valid_metrics = validation(model, criterion, valid_loader, use_cuda)
write_event(log, step, **valid_metrics)
valid_loss = valid_metrics['valid_loss']
valid_losses.append(valid_loss)
if valid_loss < best_valid_loss:
best_valid_loss = valid_loss
shutil.copy(str(model_path), str(best_model_path))
elif (patience and epoch - lr_reset_epoch > patience and
min(valid_losses[-patience:]) > best_valid_loss):
# "patience" epochs without improvement
lr_changes +=1
if lr_changes > max_lr_changes:
break
lr /= 5
print(f'lr updated to {lr}')
lr_reset_epoch = epoch
optimizer = init_optimizer(params, lr)
except KeyboardInterrupt:
tq.close()
print('Ctrl+C, saving snapshot')
save(epoch)
print('done.')
return False
return True
def validation(
model: nn.Module, criterion, valid_loader, use_cuda,
) -> Dict[str, float]:
model.eval()
all_losses, all_predictions, all_targets = [], [], []
with torch.no_grad():
for inputs, targets in valid_loader:
all_targets.append(targets.numpy().copy())
if use_cuda:
inputs, targets = inputs.cuda(), targets.cuda()
outputs = model(inputs)
loss = criterion(outputs, targets)
all_losses.append(_reduce_loss(loss).item())
predictions = torch.sigmoid(outputs)
all_predictions.append(predictions.cpu().numpy())
all_predictions = np.concatenate(all_predictions)
all_targets = np.concatenate(all_targets)
def get_score(y_pred):
with warnings.catch_warnings():
warnings.simplefilter('ignore', category=UndefinedMetricWarning)
return fbeta_score(
all_targets, y_pred, beta=2, average='samples')
metrics = {}
argsorted = all_predictions.argsort(axis=1)
for threshold in [0.05,0.10, 0.15, 0.20]:
metrics[f'valid_f2_th_{threshold:.2f}'] = get_score(
binarize_prediction(all_predictions, threshold, argsorted))
metrics['valid_loss'] = np.mean(all_losses)
print(' | '.join(f'{k} {v:.3f}' for k, v in sorted(
metrics.items(), key=lambda kv: -kv[1])))
return metrics
# def validation(
# model: nn.Module, criterion, valid_loader, use_cuda,
# ) -> Dict[str, float]:
# model.eval()
# all_losses, all_predictions, all_targets = [], [], []
# with torch.no_grad():
# for inputs, targets in valid_loader:
# all_targets.append(targets.numpy().copy())
# if use_cuda:
# inputs, targets = inputs.cuda(), targets.cuda()
# outputs = model(inputs)
# loss = criterion(outputs, targets)
# all_losses.append(_reduce_loss(loss).item())
# predictions = torch.sigmoid(outputs)
# res_argsorted = predictions.cpu().numpy().argsort()
# threshold = 0.1
# result = binarize_prediction(predictions.cpu().numpy(), threshold, res_argsorted)
# # indexes = np.arange(N_CLASSES) + 1
# for i in range(result.shape[0]):
# pred_labels = np.nonzero(result[i,])
# pred_origin_labels = np.nonzero(targets.cpu().numpy()[i,])
# print("prediction", i, pred_labels)
# print("original" , i, pred_origin_labels)
# pred_prob = [j for j in predictions.cpu().numpy()[i,]]
# f = open('PredLabels.csv','a')
# f.write('Pred %d: ' % i)
# for s in [j for j in pred_labels]:
# f.write('%s,' % s)
# b = s
# f.write("\n")
# f.write('PrdP %d: ' % i)
# pred_prob_index = []
# for ll in b:
# pred_prob_index.append(pred_prob[ll])
# for j in pred_prob_index:
# f.write('%s,' % j)
# # f.write('%s,' % pred_prob[pred_labels])
# f.write("\n")
# f.write('Orig %d: ' % i)
# for x in [j for j in pred_origin_labels]:
# f.write('%s,' % x)
# b = x
# f.write("\n")
# f.write('OrgP %d: ' % i)
# orig_prob_index = []
# for ll in b:
# orig_prob_index.append(pred_prob[ll])
# for j in orig_prob_index:
# f.write('%s,' % j)
# f.write("\n")
# f.close()
# all_predictions.append(predictions.cpu().numpy())
# all_predictions = np.concatenate(all_predictions)
# all_targets = np.concatenate(all_targets)
# def get_score(y_pred):
# with warnings.catch_warnings():
# warnings.simplefilter('ignore', category=UndefinedMetricWarning)
# return fbeta_score(
# all_targets, y_pred, beta=2, average='samples')
# metrics = {}
# argsorted = all_predictions.argsort(axis=1)
# threshold = 0.1
# # for threshold in [0.05,0.10, 0.15, 0.20]:
# metrics[f'valid_f2_th_{threshold:.2f}'] = get_score(
# binarize_prediction(all_predictions,threshold, argsorted))
# metrics['valid_loss'] = np.mean(all_losses)
# print(' | '.join(f'{k} {v:.3f}' for k, v in sorted(
# metrics.items(), key=lambda kv: -kv[1])))
# return metrics
def validation2(
model: nn.Module, criterion, valid_loader, use_cuda,
) -> Dict[str, float]:
model.eval()
all_losses, all_predictions, all_targets = [], [], []
with torch.no_grad():
for inputs, targets in valid_loader:
all_targets.append(targets.numpy().copy())
if use_cuda:
inputs, targets = inputs.cuda(), targets.cuda()
outputs = model(inputs)
loss = criterion(outputs, targets)
all_losses.append(_reduce_loss(loss).item())
predictions = torch.sigmoid(outputs)
all_predictions.append(predictions.cpu().numpy())
all_predictions = np.concatenate(all_predictions)
all_targets = np.concatenate(all_targets)
def get_score(y_pred):
with warnings.catch_warnings():
warnings.simplefilter('ignore', category=UndefinedMetricWarning)
return fbeta_score(
all_targets, y_pred, beta=2, average='samples')
metrics = {}
argsorted1 = all_predictions[:,:398].argsort(axis = 1)
argsorted2 = all_predictions[:,398:].argsort(axis = 1)
for threshold in [0.05,0.10, 0.15, 0.20,0.25]:
for t in [0.1,0.2,0.3,0.4]:
metrics[f'valid_f2_th_{threshold:.2f}{t:.2f}'] = get_score(
np.hstack([binarize_prediction(all_predictions[:,:398], t, argsorted1,max_labels = 1),
binarize_prediction(all_predictions[:,398:],threshold,argsorted2,max_labels = 11)]))
metrics['valid_loss'] = np.mean(all_losses)
print(' | '.join(f'{k} {v:.3f}' for k, v in sorted(
metrics.items(), key=lambda kv: -kv[1])))
return metrics
def binarize_prediction(probabilities, threshold: float, argsorted=None,
min_labels=1, max_labels=10):
""" Return matrix of 0/1 predictions, same shape as probabilities.
"""
#assert probabilities.shape[1] == N_CLASSES
if argsorted is None:
argsorted = probabilities.argsort(axis=1)
max_mask = _make_mask(argsorted, max_labels)
min_mask = _make_mask(argsorted, min_labels)
prob_mask = probabilities > threshold
return (max_mask & prob_mask) | min_mask
def _make_mask(argsorted, top_n: int):
mask = np.zeros_like(argsorted, dtype=np.uint8)
col_indices = argsorted[:, -top_n:].reshape(-1)
row_indices = [i // top_n for i in range(len(col_indices))]
mask[row_indices, col_indices] = 1
return mask
def _reduce_loss(loss):
return loss.sum() / loss.shape[0]
class arg():
def __init__(self):
self.run_root = 'model'
self.batch_size = 32
self.step = 1
self.workers = 2
self.lr = 1e-4
self.patience = 4
self.clean = 0
self.n_epochs = 25
self.tta = 4
self.debug = 'store_true'
self.pretrained = 0
self.threshold = 0.1
self.folds = 100
self.count = 0
args = arg()
run_root = Path(args.run_root)
if run_root.exists() and args.clean:
shutil.rmtree(run_root)
run_root.mkdir(exist_ok=True, parents=True)
(run_root / 'params.json').write_text(
json.dumps(vars(args), indent=4, sort_keys=True))
folds = make_folds(n_folds = args.folds)
train_root = DATA_ROOT / 'train'
# import numpy as np
# def process_glove_line(line, dim):
# word = None
# embedding = None
# #try:
# splitLine = line.split()
# word = " ".join(splitLine[:len(splitLine)-dim])
# embedding = np.array([float(val) for val in splitLine[-dim:]])
# # except:
# # print(line)
# return word, embedding
# def load_glove_model(glove_filepath, dim):
# with open(glove_filepath, encoding="utf8" ) as f:
# content = f.readlines()
# model = {}
# for line in content:
# word, embedding = process_glove_line(line, dim)
# if embedding is not None:
# model[word] = embedding
# return model
# from torch.nn.functional import cosine_similarity
# vectors = load_glove_model("../input/glove840b300dtxt/glove.840B.300d.txt", 300)
# vectors[None] = np.zeros(300)
# def EmbeddingPar():
# data = pd.read_csv("../input/imet-2019-fgvc6/labels.csv")
# name = data['attribute_name'].str.split("::",expand = True)
# name1 = name[1].str.split(expand = True)
# name = pd.DataFrame(np.concatenate([name,name1],axis=1))
# embedding = torch.zeros(N_CLASSES,300)
# print(name.shape[0])
# for i in range(name.shape[0]):
# emb = np.zeros((5,300))
# for j in range(5):
# try:
# emb[j] = vectors[name.iloc[i,j]]
# except:
# emb[j] = np.zeros(300)
# emb = torch.Tensor(emb)
# embedding[i] = torch.sum(emb,dim = 0)
# n = np.sum(1-pd.isnull(name.iloc[i]))
# embedding[i]/=n
# Sim = torch.zeros(N_CLASSES,N_CLASSES)
# for i in range( N_CLASSES):
# for j in range( N_CLASSES):
# if (i>=398 and j>=398) or (i<398 and j<398):
# if i!=j:
# Sim[i,j] = cosine_similarity(embedding[i,:].view(1,-1),embedding[j,:].view(1,-1))
# return embedding,Sim
# embed,Sim = EmbeddingPar()
# torch.save(Sim,run_root/'Sim.pt')
#on gpu you should torch.load('Sim.pt')
Sim = torch.load('../input/modelsim/Sim.pt')
Sim = Sim*torch.FloatTensor((Sim>0.5).numpy())
Sim = Sim.cuda()
class SimilarityLoss(nn.Module):
def __init__(self, sim):
'''
sim : N_class*N_class
'''
super(SimilarityLoss, self).__init__()
self.sim = sim
def forward(self,input,target):
input1 = torch.sigmoid(input.clone())
Smatrix = torch.matmul(input1, self.sim)+1
#print(Smatrix)
P = torch.exp(input)
#print(P)
#print(Smatrix)
loss = -(Smatrix*target*(input-torch.log(P+1))+(1-target)*(-torch.log(1+P)))
return loss
class SimilarityLoss1(nn.Module):
def __init__(self, sim):
'''
sim : N_class*N_class
'''
super(SimilarityLoss1, self).__init__()
self.sim = sim
def forward(self,input,target):
Smatrix = torch.matmul(target, self.sim) + 1
#print(Smatrix)
P = torch.exp(input)
loss = -(Smatrix*target*(input-torch.log(P+1))+(1-target)*(-torch.log(1+P)))
return loss
criterion = SimilarityLoss1(Sim).cuda()
class AvgPool(nn.Module):
def forward(self, x):
return F.avg_pool2d(x, x.shape[2:])
class Net(nn.Module):
def __init__(self, num_classes, dropout=True):
super().__init__()
self.net = cbam_resnet50()
self.net.load_state_dict(torch.load('../input/cbam-resnet50/cbam_resnet50.pth'))
#self.net = nn.Sequential(*list(model0.children())[0])
# print(self.net.output)
if dropout:
# model.add_module('fc', torch.nn.Linear(4096, out_num))
self.net.output = nn.Sequential(
nn.Dropout(),
nn.Linear(self.net.output.in_features, num_classes)
)
else:
self.net.output = nn.Linear(self.net.output.in_features, num_classes)
#self.finetune()
def forward(self, x):
return self.net(x)
def finetune(self):
for para in list(self.net.parameters())[:-2]:
para.requires_grad=False
model = Net(N_CLASSES)
use_cuda = cuda.is_available()
print(use_cuda)
#fresh_params = list(model.fresh_params())
all_params = list(model.parameters())
if use_cuda:
model = model.cuda()
from collections import Counter
def get_count():
df = pd.read_csv('../input/imet-2019-fgvc6/train.csv' if ON_KAGGLE else '/nfsshare/home/white-hearted-orange/data/train.csv')
cls_counts = Counter(cls for classes in df['attribute_ids'].str.split() for cls in classes)
stat = cls_counts.most_common()
stat1 = pd.DataFrame(stat)
stat1.columns=('attribute_id','count')
stat1['attribute_id'].astype('int')
return stat1
count = get_count()
train_kwargs = dict(
args= args,
model = model,
folds = folds,
count = count,
criterion=criterion,
patience=args.patience,
init_optimizer=lambda params, lr: Adam(params, lr),
use_cuda=use_cuda,
)
train(params=all_params, **train_kwargs)
load_model(model, run_root/'best-model.pt')
predict_kwargs = dict(
batch_size=args.batch_size,
tta=args.tta,
use_cuda=use_cuda,
workers=args.workers,
)
test_root = DATA_ROOT / ('test')
ss = pd.read_csv(DATA_ROOT / 'sample_submission.csv')
predict(model, df=ss, root=test_root,
out_path=run_root / 'test.h5',
**predict_kwargs)
def get_classes(item):
return ' '.join(cls for cls, is_present in item.items() if is_present)
sample_submission = pd.read_csv(
DATA_ROOT / 'sample_submission.csv', index_col='id')
df = pd.read_hdf(run_root / 'test.h5', index_col='id')
df = df.reindex(sample_submission.index)
df = mean_df(df)
df[:] = binarize_prediction(df.values, threshold=args.threshold)
df = df.apply(get_classes, axis=1)
df.name = 'attribute_ids'
df.to_csv('submission.csv', header=True)
| 2.265625 | 2 |
PY_OpenCV_Incomplete/a_prelim/1_intro_basics/10_color_spaces/entrypoint.py | CodexLink/StashedCodes | 0 | 12768715 | from cv2 import destroyAllWindows, imread, imshow, imwrite, split, waitKey
# Choose one for these image path.
# IMAGE_PATH = "../0_assets/cmyk_paint.png"
IMAGE_PATH = "../0_assets/RGB_paint.png"
DISPLAY_WINDOW_COLOR_STRING = [
"Blue",
"Green",
"Red",
]
image = imread(IMAGE_PATH)
# Get Color Buffer to Store in BGR Style Format.
B, G, R = split(image)
# Show the original before showing each in color channel.
imshow("Original Image of %s" % IMAGE_PATH, image)
# Iterate for each Color Channel. Do not invoke wait signal for each window to see the comparison.
for idx, eachColors in enumerate([B, G, R]):
imshow(
"%s Color Representation | %s" % (DISPLAY_WINDOW_COLOR_STRING[idx], IMAGE_PATH),
eachColors,
)
imwrite(
"rgb_%s_color.png" % DISPLAY_WINDOW_COLOR_STRING[idx].lower(),
eachColors,
)
# # Note that, each Color Channel shows lighter color.
# ! The lighter it is, the more it actually represents the color.
# We wait for the user input via wait signal to terminate.
if waitKey(0):
destroyAllWindows()
| 3.265625 | 3 |
src/cake/library/compilers/clang.py | anlongfei/cake | 14 | 12768716 | <reponame>anlongfei/cake
"""The Clang Compiler.
@see: Cake Build System (http://sourceforge.net/projects/cake-build)
@copyright: Copyright (c) 2010 <NAME>, <NAME>.
@license: Licensed under the MIT license.
"""
from cake.library import memoise
from cake.target import getPaths, getPath
from cake.library.compilers import Compiler, makeCommand, CompilerNotFoundError
import cake.path
import cake.filesys
import os.path
import subprocess
def _getClangVersion(clangExe):
"""Returns the Clang version number given an executable.
"""
args = [getPath(clangExe), '--version']
try:
p = subprocess.Popen(
args=args,
stdout=subprocess.PIPE,
)
except EnvironmentError, e:
raise EnvironmentError(
"cake: failed to launch %s: %s\n" % (args[0], str(e))
)
stdoutText = p.stdout.readline()
p.stdout.close()
exitCode = p.wait()
if exitCode != 0:
raise EnvironmentError(
"%s: failed with exit code %i\n" % (args[0], exitCode)
)
# Parse through the line to get the version number. Examples:
# Ubuntu clang version 3.6.2-svn238746-1~exp1 (branches/release_36) (based on LLVM 3.6.2)
# clang version 3.5.0 (217039)
versionText = "version "
index = stdoutText.find(versionText)
if index == -1:
raise EnvironmentError(
"%s: version format invalid: %s\n" % (args[0], stdoutText)
)
versionString = stdoutText[index + len(versionText):]
index = versionString.find('-')
index2 = versionString.find(' ')
if index != -1:
if index2 != -1:
index = min(index, index2)
else:
if index2 != -1:
index = index2
versionString = versionString[:index].strip()
return versionString
def _makeVersionTuple(versionString):
return tuple(
int(n) for n in versionString.split(".")
)
class ClangCompiler(Compiler):
_name = 'clang'
def __init__(self,
configuration,
clangExe,
llvmArExe,
binPaths):
Compiler.__init__(self, configuration=configuration, binPaths=binPaths)
self._clangExe = clangExe
self._llvmArExe = llvmArExe
self.version = _getClangVersion(clangExe)
self.versionTuple = _makeVersionTuple(self.version)
def _getLanguage(self, suffix, pch=False):
language = self.language
if language is None:
if suffix in self.cSuffixes:
language = 'c'
elif suffix in self.cppSuffixes:
language = 'c++'
return language
@memoise
def _getCommonCompileArgs(self, suffix, shared=False, pch=False):
args = [self._clangExe, '-c', '-MD']
language = self._getLanguage(suffix)
if language:
args.extend(['-x', language])
if self.debugSymbols:
args.append('-g')
if language == 'c++':
args.extend(self.cppFlags)
elif language == 'c':
args.extend(self.cFlags)
for d in self.getDefines():
args.extend(['-D', d])
for p in getPaths(self.getIncludePaths()):
args.extend(['-I', p])
for p in getPaths(self.getForcedIncludes()):
args.extend(['-include', p])
return args
def getObjectCommands(self, target, source, pch, shared):
depPath = self._generateDependencyFile(target)
args = list(self._getCommonCompileArgs(cake.path.extension(source), shared))
args.extend([source, '-o', target])
# TODO: Add support for pch
def compile():
dependencies = self._runProcess(args + ['-MF', depPath], target)
dependencies.extend(self._scanDependencyFile(depPath, target))
return dependencies
canBeCached = True
return compile, args, canBeCached
@memoise
def _getCommonLibraryArgs(self):
args = [self._llvmArExe, 'qcs']
args.extend(self.libraryFlags)
return args
def getLibraryCommand(self, target, sources):
args = list(self._getCommonLibraryArgs())
args.append(target)
args.extend(getPaths(sources))
@makeCommand("lib-scan")
def scan():
return [target], [args[0]] + sources
@makeCommand(args)
def archive():
cake.filesys.remove(self.configuration.abspath(target))
self._runProcess(args, target)
return archive, scan
def getProgramCommands(self, target, sources):
return self._getLinkCommands(target, sources, dll=False)
def getModuleCommands(self, target, sources, importLibrary, installName):
return self._getLinkCommands(target,
sources,
importLibrary,
installName,
dll=True)
@memoise
def _getCommonLinkArgs(self, dll):
args = [self._clangExe]
if dll:
args.append('--shared')
args.extend(self.moduleFlags)
else:
args.extend(self.programFlags)
return args
def _getLinkCommands(self, target, sources, importLibrary=None, installName=None, dll=False):
objects, libraries = self._resolveObjects()
args = list(self._getCommonLinkArgs(dll))
for path in getPaths(self.getLibraryPaths()):
args.append('-L' + path)
args.extend(['-o', target])
args.extend(sources)
args.extend(objects)
for lib in libraries:
if cake.path.baseName(lib) == lib:
args.append('-l' + lib)
else:
args.append(lib)
@makeCommand(args)
def link():
self._runProcess(args, target)
@makeCommand("link-scan")
def scan():
targets = [target]
if dll and importLibrary:
targets.append(importLibrary)
dependencies = [args[0]]
dependencies += sources
dependencies += objects
dependencies += self._scanForLibraries(libraries)
return targets, dependencies
return link, scan
| 2.40625 | 2 |
setup.py | Jonas-Luetolf/todo-list | 1 | 12768717 | try:
from setuptools import setup
except ImportError:
from distutils.core import setup
setup(
name="Todo-List",
version=2.0,
description='a todo-list cli',
author='<NAME>',
license='MIT',
url='http://github.com/Jonas-Luetolf/Todo-List',
python_requires='>=3.10',
install_requires=[
'PyYAML (>= 3.12)',
],
package_dir={'': './'},
packages=['table',"todo_list"],
scripts=['todo-list'],
)
| 1.328125 | 1 |
fdk_client/application/models/StoreManagerSerializer.py | kavish-d/fdk-client-python | 0 | 12768718 | """Application Models."""
from marshmallow import fields, Schema
from marshmallow.validate import OneOf
from ..enums import *
from ..models.BaseSchema import BaseSchema
from .SellerPhoneNumber import SellerPhoneNumber
class StoreManagerSerializer(BaseSchema):
# Catalog swagger.json
mobile_no = fields.Nested(SellerPhoneNumber, required=False)
email = fields.Str(required=False)
name = fields.Str(required=False)
| 2.03125 | 2 |
nom_predict_sentence.py | Michael-Dyq/SRL-English | 0 | 12768719 | from typing import List, Iterator, Optional
import argparse
import sys
import json
from overrides import overrides
from allennlp.commands.subcommand import Subcommand
from allennlp.common.checks import check_for_gpu, ConfigurationError
from allennlp.common.file_utils import cached_path
from allennlp.common.util import sanitize
from allennlp.models.archival import load_archive
from allennlp.predictors.predictor import Predictor, JsonDict
from allennlp.data import Instance
from nominal_srl.nominal_srl_predictor import NominalSemanticRoleLabelerPredictor
import predict_utils
desc = "Run nominal SRL predictor on a single sentence."
parser = argparse.ArgumentParser(description=desc)
parser.add_argument("archive_file", type=str, help="the archived model to make predictions with")
parser.add_argument('-s', '--input_sentence', type=str, help="the sentence to predict on", required=True)
parser.add_argument('-i', '--nom_indices', nargs='*', type=int, help="the indices of the nominal predicates", required=True)
parser.add_argument("--cuda_device", type=int, default=-1, help="id of GPU to use (if any)")
parser.add_argument('-o', '--output_file', type=str, default="output.txt", help="path to output file")
parser.add_argument('-ta', '--text_annotation', default=False, action='store_true', help="specify whether to produce the output in text annotation form")
args = parser.parse_args()
def _get_predictor(args) -> NominalSemanticRoleLabelerPredictor:
check_for_gpu(args.cuda_device)
archive = load_archive(
args.archive_file,
cuda_device=args.cuda_device,
)
return NominalSemanticRoleLabelerPredictor.from_archive(archive, "nombank-semantic-role-labeling")
class _PredictManager:
def __init__(
self,
predictor: NominalSemanticRoleLabelerPredictor,
input_sentence: str,
indices: List[int],
output_file: Optional[str],
write_ta: bool,
) -> None:
self._predictor = predictor
self._indices = indices
self._input_sentence = input_sentence
if output_file is not None:
self._output_file = open(output_file, "w")
else:
self._output_file = None
self._write_ta = write_ta
self.generator = "nominal_srl.nom_predict_sentence"
def create_text_annotation(
self, srl_output: JsonDict
) -> JsonDict:
ta= {"corpusId": "", "id": ""}
tokens = srl_output.pop("words")
text = self._input_sentence
ta["text"] = text
ta["tokens"] = tokens
ta["tokenOffsets"] = predict_utils.create_token_char_offsets(text)
sentence_end_positions = [i+1 for i,x in enumerate(tokens) if x=="."]
sentences = {"generator": self.generator, "score": 1.0, "sentenceEndPositions": sentence_end_positions}
ta["sentences"] = sentences
# Create views.
views = []
views.append(predict_utils.create_sentence_view(tokens))
views.append(predict_utils.create_tokens_view(tokens))
views.append(self.create_srl_nom_view(srl_output.pop("nominals")))
ta["views"] = views
return sanitize(ta)
def create_srl_nom_view(
self, nom_srl_frames
) -> JsonDict:
srl_nom_view = {"viewName": "SRL_NOM_NOMBANK"}
constituents = []
relations = []
for frame in nom_srl_frames:
predicate = frame.pop("nominal")
description = frame.pop("description")
tags = frame.pop("tags")
predicate_idx = frame.pop("predicate_index")
properties = {"SenseNumber": "NA", "predicate": predicate}
if len(predicate_idx)>1:
print('Multiple indices of predicate. Using first.')
constituent = {"label": "Predicate", "score": 1.0, "start": predicate_idx[0], "end": predicate_idx[0]+1, "properties": properties}
predicate_constituent_idx = len(constituents)
constituents.append(constituent)
active_tag = ""
active_tag_start_idx = -1
for tag_idx, tag in enumerate(tags):
if tag in {"O", "B-V"}:
if active_tag != "":
constituent = {"label": active_tag, "score": 1.0, "start": active_tag_start_idx, "end": tag_idx}
relation = {"relationName": active_tag, "srcConstituent": predicate_constituent_idx, "targetConstituent": len(constituents)}
relations.append(relation)
constituents.append(constituent)
active_tag = ""
active_tag_start_idx = -1
continue
if tag[2:] == active_tag:
continue
else:
if active_tag != "":
constituent = {"label": active_tag, "score": 1.0, "start": active_tag_start_idx, "end": tag_idx}
relation = {"relationName": active_tag, "srcConstituent": predicate_constituent_idx, "targetContituent": len(constituents)}
relations.append(relation)
constituents.append(constituent)
active_tag = tag[2:]
active_tag_start_idx = tag_idx
nom_view_data = [{"viewType": "", "viewName": "SRL_NOM_NOMBANK", "generator": self.generator, "score": 1.0, "constituents": constituents, "relations": relations}]
srl_nom_view["viewData"] = nom_view_data
return srl_nom_view
def _print_to_file(
self, prediction: str
) -> None:
if self._output_file is not None:
self._output_file.write(prediction)
self._output_file.close()
else:
print("No output file was specified. Writing to STDOUT instead.")
print(prediction)
def run(self) -> None:
result = self._predictor.predict(self._input_sentence, self._indices)
print('OUTPUT_DICT: ', result)
if self._write_ta:
ta = self.create_text_annotation(result)
self._print_to_file(json.dumps(ta, indent=4))
else:
self._print_to_file(json.dumps(result, indent=4))
predictor = _get_predictor(args)
manager = _PredictManager(
predictor,
args.input_sentence,
args.nom_indices,
args.output_file,
args.text_annotation,
)
manager.run()
| 2.203125 | 2 |
project_templates/fastapi_safir_app/hooks/post_gen_project.py | lsst/templates | 6 | 12768720 | <reponame>lsst/templates<filename>project_templates/fastapi_safir_app/hooks/post_gen_project.py
"""Post project creation hook for cookiecutter.
This script runs from the root directory of the created project itself. In
addition, cookiecutter interpolates Jinja2 templates to insert any necessary
variables.
This is used to remove the ``manifests`` directory if the project is using
Helm rather than Kustomize.
"""
import shutil
# These variables are interpolated by cookiecutter before this hook is run
uses_helm = True if '{{ cookiecutter.uses_helm }}' == 'True' else False
# Remove the Kustomize configuration if the package will be using Helm.
if uses_helm:
print(f"(post-gen hook) Removing manifests directory")
shutil.rmtree("manifests", ignore_errors=True)
| 1.890625 | 2 |
src/lap_time_calculator.py | ShravanK55/RacingLineGenerator | 3 | 12768721 | <reponame>ShravanK55/RacingLineGenerator<filename>src/lap_time_calculator.py
"""
Module to calculate lap times for a given car and racing line.
"""
from matplotlib import pyplot
from constants import AIR_DENSITY, GRAV_ACCELERATION
class LapTimeCalculator:
"""
Class to calculate lap times for a given car and racing line.
"""
def __init__(self):
"""
Method to initialize the lap time calculator.
"""
pass
def calculate_lap_time(self, racing_line, car, starting_velocity=0.0, draw_graph=True):
"""
Method to calculate the lap time of a car moving along a racing line.
Calculation reference: http://www.jameshakewill.com/Lap_Time_Simulation.pdf (Pages 15 and 18)
Args:
racing_line(RacingLine): Racing line for the car to drive around.
car(Car): Car that will drive on the racing line.
starting_velocity(float): Velocity at which the car starts on the racing line in m/s. Defaults to 0.0.
draw_graph(bool): Whether to plot the graph of exit velocity v/s sector time. Defaults to True.
Returns:
lap_time(float): Lap time taken by the car to drive around the racing line in seconds.
"""
sectors = racing_line.sectors
entry_velocity = starting_velocity
entry_velocities = []
exit_velocities = []
# First pass to find the exit velocities of each sector (to apply acceleration/deceleration).
for sector in sectors:
exit_velocity = self.get_sector_exit_velocity(sector, car, entry_velocity)
max_sector_velocity = self.get_sector_max_velocity(sector, car)
entry_velocity = max_sector_velocity if entry_velocity > max_sector_velocity else entry_velocity
exit_velocity = max_sector_velocity if exit_velocity > max_sector_velocity else exit_velocity
entry_velocities.append(entry_velocity)
exit_velocities.append(exit_velocity)
entry_velocity = exit_velocity
# Second pass to adjust the entry velocities of each sector based on the exit velocities (to apply braking).
last_sector_idx = 0
for current_sector_idx in reversed(range(len(sectors))):
if (exit_velocities[current_sector_idx] > entry_velocities[last_sector_idx]):
max_entry_velocity = self.get_sector_entry_velocity(
sectors[current_sector_idx], car, entry_velocities[last_sector_idx])
if entry_velocities[current_sector_idx] > max_entry_velocity:
entry_velocities[current_sector_idx] = max_entry_velocity
exit_velocities[current_sector_idx] = entry_velocities[last_sector_idx]
last_sector_idx = current_sector_idx
# Calculate the time taken for each sector.
sector_times = []
for idx in range(len(sectors)):
sector_time = 2 * sectors[idx].length / (entry_velocities[idx] + exit_velocities[idx])
sector_times.append(sector_time)
dist_sum = 0
distances = []
for sector in sectors:
dist_sum = dist_sum + sector.length
distances.append(dist_sum)
# Plotting a graph of the track distance v/s the velocity at that point.
if draw_graph:
pyplot.clf()
pyplot.plot(distances, exit_velocities, label="Line")
pyplot.xlabel("Distance (m)")
pyplot.ylabel("Exit Velocity (m/s)")
pyplot.title("Velocity Graph")
pyplot.legend()
pyplot.savefig("velocity_graph.png")
# Calculating the lap time from the sector times.
lap_time = 0.0
for sector_time in sector_times:
lap_time = lap_time + sector_time
return lap_time
def get_sector_max_velocity(self, sector, car):
"""
Gets the maximum velocity a car can take around a sector.
Reference: http://www.jameshakewill.com/Lap_Time_Simulation.pdf (Page 10)
Args:
sector(Sector): Reference to the sector that the car is travelling on.
car(Car): Car that is moving through the sector.
Returns:
velocity(float): The maximum velocity a car can go while travelling in a sector in m/s.
"""
total_force = car.friction * car.mass * GRAV_ACCELERATION
drag = car.drag_coefficient * 0.5 * AIR_DENSITY * car.frontal_area
denom = ((car.mass / sector.radius) ** 2) + (drag ** 2)
denom = denom ** (1.0 / 4.0)
velocity = (total_force ** (1.0 / 2.0)) / denom
velocity = car.max_velocity if velocity > car.max_velocity else velocity
return velocity
def get_sector_entry_velocity(self, sector, car, exit_velocity):
"""
Gets the entry velocity of a car through a sector from an exit velocity through braking.
Reference: http://www.jameshakewill.com/Lap_Time_Simulation.pdf (Page 17)
Args:
sector(Sector): Reference to the sector that the car is travelling on.
car(Car): Car that is moving through the sector.
exit_velocity(float): Velocity with which the car exits the sector in m/s.
Returns:
entry_velocity(float): The maximum entry velocity of a car going into a given sector in m/s.
"""
total_force = car.friction * car.mass * GRAV_ACCELERATION
centripetal_force = car.mass * (exit_velocity ** 2) / sector.radius
braking_force = ((total_force ** 2) - (centripetal_force ** 2)) ** (1.0 / 2.0)
drag_force = car.drag_coefficient * 0.5 * AIR_DENSITY * (exit_velocity ** 2) * car.frontal_area
decelerative_force = braking_force + drag_force
delta_velocity = 2 * sector.length * decelerative_force / car.mass
entry_velocity = ((exit_velocity ** 2) + delta_velocity) ** (1.0 / 2.0)
entry_velocity = car.max_velocity if entry_velocity > car.max_velocity else entry_velocity
return entry_velocity
def get_sector_exit_velocity(self, sector, car, entry_velocity):
"""
Gets the exit velocity of a car through a sector from an entry velocity through acceleration.
Reference: http://www.jameshakewill.com/Lap_Time_Simulation.pdf (Page 10)
Args:
sector(Sector): Reference to the sector that the car is travelling on.
car(Car): Car that is moving through the sector.
entry_velocity(float): Velocity with which the car enters the sector in m/s.
Returns:
exit_velocity(float): The maximum exit velocity of a car going out a given sector in m/s.
"""
drag_force = car.drag_coefficient * 0.5 * AIR_DENSITY * (entry_velocity ** 2) * car.frontal_area
power = car.get_engine_power(entry_velocity)
p_velocity = 1.0 if entry_velocity == 0.0 else entry_velocity
acceleration = ((power / p_velocity) - drag_force) / car.mass
max_acceleration = car.get_max_acceleration(entry_velocity)
acceleration = max_acceleration if acceleration > max_acceleration else acceleration
exit_velocity = ((entry_velocity ** 2) + (2 * acceleration * sector.length)) ** (1.0 / 2.0)
exit_velocity = car.max_velocity if exit_velocity > car.max_velocity else exit_velocity
return exit_velocity
| 3.890625 | 4 |
Python3/664.py | rakhi2001/ecom7 | 854 | 12768722 | __________________________________________________________________________________________________
sample 140 ms submission
from functools import lru_cache
class Solution:
def strangePrinter(self, s: str) -> int:
@lru_cache(None)
def find_min(start, end):
if start >= end: return 1 if end == start else 0
out, k = 1 + find_min(start+1, end), nextIdx[start]
while k != None and k <= end:
temp = find_min(start,k-1) + find_min(k+1,end)
if temp < out: out = temp
k = nextIdx[k]
return out
if not s: return 0
new_s = []
for i, c in enumerate(s[:-1]):
if s[i+1] != c: new_s.append(c)
s = ''.join(new_s + [s[-1]])
nextIdx = [None] * len(s)
lastIdx = {}
for i in range(len(s)-1, -1, -1):
if s[i] in lastIdx: nextIdx[i] = lastIdx[s[i]]
lastIdx[s[i]] = i
return find_min(0, len(s)-1)
__________________________________________________________________________________________________
sample 13080 kb submission
class Solution:
def strangePrinter(self, s: str) -> int:
n = len(s)
if n == 0: return 0
dp = [[0] * n for i in range(n)]
for i in range(n): # length of range is 1
dp[i][i] = 1
for l in range(2, n+1): # length of range [i, j] from 2 to n
for i in range(n - l + 1):
j = i + l - 1
dp[i][j] = 1 + dp[i+1][j] # default choice, print the first letter s[i] and then the rest
for k in range(i+1, j+1):
if s[k] == s[i]: # better choice than default one
dp[i][j] = min(dp[i][j], dp[i][k-1] + (0 if k+1 > j else dp[k+1][j]))
return dp[0][n-1]
# a x x x x x x x a x x x x x
# dp[i][j] = min(dp[i][j], dp[i][k-1] + dp[k+1][j])
__________________________________________________________________________________________________
| 2.921875 | 3 |
django_input_collection/admin.py | pivotal-energy-solutions/django-input-collection | 0 | 12768723 | <reponame>pivotal-energy-solutions/django-input-collection<gh_stars>0
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import logging
from django.contrib import admin
from django.contrib.admin.templatetags.admin_list import _boolean_icon
from django.forms import fields_for_model, Textarea
from django.utils.safestring import mark_safe
from django.utils.html import format_html, format_html_join
from . import models
log = logging.getLogger(__name__)
@admin.register(models.Measure, models.CollectionGroup, models.CollectionInstrumentType)
class IdObjectAdmin(admin.ModelAdmin):
list_display = ["id"]
list_filter = ["date_created", "date_modified"]
date_hierarchy = "date_created"
@admin.register(models.CollectionRequest)
class CollectionRequestAdmin(admin.ModelAdmin):
list_display = ["id", "max_instrument_inputs_per_user", "max_instrument_inputs"]
list_filter = ["date_created", "date_modified"]
date_hierarchy = "date_created"
@admin.register(models.CollectionInstrument)
class CollectionInstrumentAdmin(admin.ModelAdmin):
list_display = [
"id",
"collection_request",
"measure",
"segment",
"group",
"type",
"_text_preview",
"_has_description",
"_has_help",
"response_policy",
"_suggested_responses",
]
list_filter = ["date_created", "date_modified", "segment", "group", "type", "response_policy"]
date_hierarchy = "date_created"
search_fields = [
"measure_id",
"segment_id",
"group_id",
"type_id",
"text",
"description",
"help",
]
def _text_preview(self, instance):
max_length = self._text_preview.max_length
excess = len(instance.text[max_length:])
ellipsis = "..." if excess else ""
return instance.text[: max_length - len(ellipsis)] + ellipsis
_text_preview.short_description = """Text"""
_text_preview.max_length = 100
def _has_description(self, instance):
return bool(instance.description)
_has_description.short_description = """Has description"""
_has_description.boolean = True
def _has_help(self, instance):
return bool(instance.help)
_has_help.short_description = """Has help"""
_has_help.boolean = True
def _suggested_responses(self, instance):
queryset = instance.suggested_responses
if queryset:
return "; ".join(queryset.values_list("data", flat=True))
return "(None)"
_suggested_responses.short_description = """Suggested responses"""
@admin.register(models.ResponsePolicy)
class ResponsePolicyAdmin(admin.ModelAdmin):
list_display = ["nickname", "restrict", "multiple", "required", "is_singleton"]
list_filter = [
"date_created",
"date_modified",
"restrict",
"multiple",
"required",
"is_singleton",
]
date_hierarchy = "date_created"
ordering = ("nickname",)
@admin.register(models.SuggestedResponse)
class SuggestedResponseAdmin(admin.ModelAdmin):
list_display = ["id", "data"]
list_display_links = ["data"]
list_filter = ["date_created", "date_modified"]
date_hierarchy = "date_created"
@admin.register(models.get_input_model())
class CollectedInputAdmin(admin.ModelAdmin):
list_display = ["id", "data"]
list_filter = ["date_created", "date_modified"]
search_fields = ["data"]
date_hierarchy = "date_created"
@admin.register(models.Condition)
class ConditionAdmin(admin.ModelAdmin):
list_display = ["_instrument", "_data_getter", "condition_group", "_test_results"]
list_filter = ["date_created", "date_modified"]
search_fields = [
"instrument__text",
"data_getter",
"condition_group__nickname",
"condition_group__cases__nickname",
]
date_hierarchy = "date_created"
readonly_fields = ["_resolver_info", "_test_results"]
fields = list(fields_for_model(models.Condition).keys()) + ["_resolver_info", "_test_results"]
def get_queryset(self, request):
queryset = super(ConditionAdmin, self).get_queryset(request)
self.request = request
return queryset
def formfield_for_dbfield(self, db_field, **kwargs):
if db_field.name == "data_getter":
kwargs["widget"] = Textarea
return super(ConditionAdmin, self).formfield_for_dbfield(db_field, **kwargs)
def _instrument(self, instance):
statuses = self._do_tests(instance)
def boolean_status(status):
if isinstance(status, Exception):
return None
return status
app_results = "".join(
[
_boolean_icon(boolean_status(status))
for label, status in statuses
if not label.startswith("django_input_collection.")
]
)
builtin_results = "".join(
[
_boolean_icon(boolean_status(status))
for label, status in statuses
if label.startswith("django_input_collection.")
]
)
return format_html(
'<div style="width: 200px;"><a href="{}">{}</a><hr><div>{}{}</div></div>',
*[
instance.pk,
instance.instrument,
mark_safe(app_results + (" • " if app_results else "")),
mark_safe(builtin_results),
],
)
_instrument.short_description = """Instrument"""
def _data_getter(self, instance):
data_getter = mark_safe("<div>{}</div>".format(instance.data_getter))
return data_getter + self._resolver_info(instance)
_data_getter.short_description = """Data Getter"""
def _resolver_info(self, instance):
if instance.pk is None:
return "(Unsaved)"
resolver, data, error = instance.resolve(raise_exception=False)
if resolver:
return format_html(
"<dt>{}</dt><dd>{}{}</dd>",
".".join((resolver.__module__, resolver.__class__.__name__)),
format_html("<code>{}</code>", repr(data)) if not error else "",
format_html(
'<code style="color: orange;">Lookup failed! (Will use collector class default.)<br>{}</code>',
error,
)
if error
else "",
)
return format_html(
'<div style="color: red;">{}</div>',
"NO MATCHING RESOLVER",
)
_resolver_info.short_description = """Resolver"""
def _test_results(self, instance):
statuses = self._do_tests(instance)
for i, (label, status) in enumerate(statuses):
if isinstance(status, Exception):
# log.error('Exception during %s: %s', label, status)
status = format_html('<code style="color: red;">{}</code>', status)
else:
status = _boolean_icon(status)
statuses[i][1] = status
return mark_safe(
"<dl>%s</dl>" % (format_html_join("", "<dt>{}</dt><dd>{}</dd>", statuses),)
)
_test_results.short_description = """Results"""
# Internals
def _do_tests(self, instance):
if instance.pk is None:
return []
from .collection.collectors import registry
collection_request = instance.instrument.collection_request
statuses = []
context = {"user": self.request.user}
for collector_class in sorted(registry.values(), key=lambda c: (c.__module__, c.__name__)):
try:
collector = collector_class(collection_request=collection_request, context=context)
status = collector.is_condition_successful(instance, raise_exception=False)
except Exception as e:
log.exception(e)
status = e
statuses.append(
[
".".join((collector_class.__module__, collector_class.__name__)),
status,
]
)
return statuses
@admin.register(models.ConditionGroup)
class ConditionGroupAdmin(admin.ModelAdmin):
list_display = ["id", "nickname", "requirement_type", "_n_child_groups", "_n_cases"]
list_display_links = ["id", "nickname"]
list_filter = ["date_created", "date_modified"]
date_hierarchy = "date_created"
filter_horizontal = ["child_groups", "cases"]
readonly_fields = ["describe"]
fields = ["describe"] + list(fields_for_model(models.ConditionGroup).keys())
def _n_child_groups(self, instance):
queryset = instance.child_groups.all()
if queryset:
return mark_safe(
"<ul>%s</ul>"
% (format_html_join("\n", "<li>{}</li>", ([obj.describe()] for obj in queryset)),)
)
return "(None)"
_n_child_groups.short_description = """Child groups"""
def _n_cases(self, instance):
queryset = instance.cases.all()
if queryset:
return mark_safe(
"<ul>%s</ul>"
% (format_html_join("\n", "<li>{}</li>", ([obj.describe()] for obj in queryset)),)
)
return "(None)"
_n_cases.short_description = """Cases"""
@admin.register(models.Case)
class CaseAdmin(admin.ModelAdmin):
list_display = ["id", "nickname", "match_type", "match_data"]
list_display_links = ["id", "nickname"]
list_filter = ["date_created", "date_modified"]
date_hierarchy = "date_created"
filter_horizontal = ["conditiongroup"]
readonly_fields = ["describe"]
| 1.679688 | 2 |
plaso/frontend/image_export.py | CNR-ITTIG/plasodfaxp | 1 | 12768724 | <filename>plaso/frontend/image_export.py<gh_stars>1-10
# -*- coding: utf-8 -*-
"""The image export front-end."""
import abc
import collections
import logging
import os
from dfvfs.helpers import file_system_searcher
from dfvfs.path import factory as path_spec_factory
from dfvfs.resolver import context
from dfvfs.resolver import resolver as path_spec_resolver
import pysigscan
from plaso.engine import collector
from plaso.engine import knowledge_base
from plaso.engine import utils as engine_utils
from plaso.engine import queue
from plaso.engine import single_process
from plaso.frontend import frontend
from plaso.hashers import manager as hashers_manager
from plaso.lib import specification
from plaso.lib import timelib
from plaso.preprocessors import interface as preprocess_interface
from plaso.preprocessors import manager as preprocess_manager
class FileEntryFilter(object):
"""Class that implements the file entry filter interface."""
@abc.abstractmethod
def Matches(self, file_entry):
"""Compares the file entry against the filter.
Args:
file_entry: The file entry (instance of dfvfs.FileEntry).
Returns:
A boolean indicating if the file entry matches the filter or
None if the filter does not apply
"""
@abc.abstractmethod
def Print(self, output_writer):
"""Prints a human readable version of the filter.
Args:
output_writer: the output writer object (instance of CLIOutputWriter).
"""
class DateTimeFileEntryFilter(FileEntryFilter):
"""Class that implements date time-based file entry filter."""
_DATE_TIME_RANGE_TUPLE = collections.namedtuple(
u'date_time_range_tuple', u'time_value start_timestamp end_timestamp')
_SUPPORTED_TIME_VALUES = frozenset([
u'atime', u'bkup', u'ctime', u'crtime', u'dtime', u'mtime'])
def __init__(self):
"""Initializes the date time-based file entry filter."""
super(DateTimeFileEntryFilter, self).__init__()
self._date_time_ranges = []
def AddDateTimeRange(
self, time_value, start_time_string=None, end_time_string=None):
"""Add a date time filter range.
The time strings are formatted as:
YYYY-MM-DD hh:mm:ss.######[+-]##:##
Where # are numeric digits ranging from 0 to 9 and the seconds
fraction can be either 3 or 6 digits. The time of day, seconds fraction
and timezone offset are optional. The default timezone is UTC.
Args:
time_value: the time value strting e.g. atime, ctime, crtime, dtime,
bkup and mtime.
start_time_string: the start date and time value string.
end_time_string: the end date and time value string.
Raises:
ValueError: If the filter is badly formed.
"""
if not isinstance(time_value, basestring):
raise ValueError(u'Filter type must be a string.')
if start_time_string is None and end_time_string is None:
raise ValueError(
u'Filter must have either a start or an end date time value.')
time_value_lower = time_value.lower()
if time_value_lower not in self._SUPPORTED_TIME_VALUES:
raise ValueError(
u'Unsupported time value: {0:s}.'.format(time_value))
if start_time_string:
start_timestamp = timelib.Timestamp.CopyFromString(start_time_string)
else:
start_timestamp = None
if end_time_string:
end_timestamp = timelib.Timestamp.CopyFromString(end_time_string)
else:
end_timestamp = None
# Make sure that the end timestamp occurs after the beginning.
# If not then we need to reverse the time range.
if (None not in [start_timestamp, end_timestamp] and
start_timestamp > end_timestamp):
raise ValueError(
u'Invalid date time value start must be earlier than end.')
self._date_time_ranges.append(self._DATE_TIME_RANGE_TUPLE(
time_value_lower, start_timestamp, end_timestamp))
def Matches(self, file_entry):
"""Compares the file entry against the filter.
Args:
file_entry: The file entry (instance of dfvfs.FileEntry).
Returns:
A boolean indicating if the file entry matches the filter or
None if the filter does not apply.
"""
if not self._date_time_ranges:
return
stat_object = file_entry.GetStat()
for date_time_range in self._date_time_ranges:
time_value = date_time_range.time_value
timestamp = getattr(stat_object, time_value, None)
if timestamp is None:
continue
nano_time_value = u'{0:s}_nano'.format(time_value)
nano_time_value = getattr(stat_object, nano_time_value, None)
timestamp = timelib.Timestamp.FromPosixTime(timestamp)
if nano_time_value is not None:
# Note that the _nano values are in intervals of 100th nano seconds.
nano_time_value, _ = divmod(nano_time_value, 10)
timestamp += nano_time_value
if (date_time_range.start_timestamp is not None and
timestamp < date_time_range.start_timestamp):
return False
if (date_time_range.end_timestamp is not None and
timestamp > date_time_range.end_timestamp):
return False
return True
def Print(self, output_writer):
"""Prints a human readable version of the filter.
Args:
output_writer: the output writer object (instance of CLIOutputWriter).
"""
if self._date_time_ranges:
for date_time_range in self._date_time_ranges:
if date_time_range.start_timestamp is None:
start_time_string = timelib.Timestamp.CopyToIsoFormat(
date_time_range.start_timestamp)
output_writer.Write(u'\t{0:s} before {1:s}\n'.format(
date_time_range.time_value, start_time_string))
elif date_time_range.end_timestamp is None:
end_time_string = timelib.Timestamp.CopyToIsoFormat(
date_time_range.end_timestamp)
output_writer.Write(u'\t{0:s} after {1:s}\n'.format(
date_time_range.time_value, end_time_string))
else:
start_time_string = timelib.Timestamp.CopyToIsoFormat(
date_time_range.start_timestamp)
end_time_string = timelib.Timestamp.CopyToIsoFormat(
date_time_range.end_timestamp)
output_writer.Write(u'\t{0:s} between {1:s} and {2:s}\n'.format(
date_time_range.time_value, start_time_string,
end_time_string))
class ExtensionsFileEntryFilter(FileEntryFilter):
"""Class that implements extensions-based file entry filter."""
def __init__(self, extensions):
"""Initializes the extensions-based file entry filter.
An extension is defined as "pdf" as in "document.pdf".
Args:
extensions: a list of extension strings.
"""
super(ExtensionsFileEntryFilter, self).__init__()
self._extensions = extensions
def Matches(self, file_entry):
"""Compares the file entry against the filter.
Args:
file_entry: The file entry (instance of dfvfs.FileEntry).
Returns:
A boolean indicating if the file entry matches the filter or
None if the filter does not apply
"""
location = getattr(file_entry.path_spec, u'location', None)
if not location:
return
_, _, extension = location.rpartition(u'.')
if not extension:
return False
return extension.lower() in self._extensions
def Print(self, output_writer):
"""Prints a human readable version of the filter.
Args:
output_writer: the output writer object (instance of CLIOutputWriter).
"""
if self._extensions:
output_writer.Write(u'\textensions: {0:s}\n'.format(
u', '.join(self._extensions)))
class NamesFileEntryFilter(FileEntryFilter):
"""Class that implements names-based file entry filter."""
def __init__(self, names):
"""Initializes the names-based file entry filter.
Args:
names: a list of name strings.
"""
super(NamesFileEntryFilter, self).__init__()
self._names = names
def Matches(self, file_entry):
"""Compares the file entry against the filter.
Args:
file_entry: The file entry (instance of dfvfs.FileEntry).
Returns:
A boolean indicating if the file entry matches the filter.
"""
if not self._names or not file_entry.IsFile():
return
return file_entry.name.lower() in self._names
def Print(self, output_writer):
"""Prints a human readable version of the filter.
Args:
output_writer: the output writer object (instance of CLIOutputWriter).
"""
if self._names:
output_writer.Write(u'\tnames: {0:s}\n'.format(
u', '.join(self._names)))
class SignaturesFileEntryFilter(FileEntryFilter):
"""Class that implements signature-based file entry filter."""
def __init__(self, specification_store, signature_identifiers):
"""Initializes the signature-based file entry filter.
Args:
specification_store: a specification store (instance of
FormatSpecificationStore).
signature_identifiers: a list of signature identifiers.
"""
super(SignaturesFileEntryFilter, self).__init__()
self._signature_identifiers = []
if specification_store:
self._file_scanner = self._GetScanner(
specification_store, signature_identifiers)
else:
self._file_scanner = None
def _GetScanner(self, specification_store, signature_identifiers):
"""Initializes the scanner object form the specification store.
Args:
specification_store: a specification store (instance of
FormatSpecificationStore).
signature_identifiers: a list of signature identifiers.
Returns:
A scanner object (instance of pysigscan.scanner).
"""
scanner_object = pysigscan.scanner()
for format_specification in specification_store.specifications:
if format_specification.identifier not in signature_identifiers:
continue
for signature in format_specification.signatures:
pattern_offset = signature.offset
if pattern_offset is None:
signature_flags = pysigscan.signature_flags.NO_OFFSET
elif pattern_offset < 0:
pattern_offset *= -1
signature_flags = pysigscan.signature_flags.RELATIVE_FROM_END
else:
signature_flags = pysigscan.signature_flags.RELATIVE_FROM_START
scanner_object.add_signature(
signature.identifier, pattern_offset, signature.pattern,
signature_flags)
self._signature_identifiers.append(format_specification.identifier)
return scanner_object
def Matches(self, file_entry):
"""Compares the file entry against the filter.
Args:
file_entry: The file entry (instance of dfvfs.FileEntry).
Returns:
A boolean indicating if the file entry matches the filter or
None if the filter does not apply
"""
if not self._file_scanner or not file_entry.IsFile():
return
file_object = file_entry.GetFileObject()
if not file_object:
return False
try:
scan_state = pysigscan.scan_state()
self._file_scanner.scan_file_object(scan_state, file_object)
finally:
file_object.close()
return scan_state.number_of_scan_results > 0
def Print(self, output_writer):
"""Prints a human readable version of the filter.
Args:
output_writer: the output writer object (instance of CLIOutputWriter).
"""
if self._file_scanner:
output_writer.Write(u'\tsignature identifiers: {0:s}\n'.format(
u', '.join(self._signature_identifiers)))
class FileEntryFilterCollection(object):
"""Class that implements a collection of file entry filters."""
def __init__(self):
"""Initializes the file entry filter collection object."""
super(FileEntryFilterCollection, self).__init__()
self._filters = []
def AddFilter(self, file_entry_filter):
"""Adds a file entry filter to the collection.
Args:
file_entry_filter: a file entry filter (instance of FileEntryFilter).
"""
self._filters.append(file_entry_filter)
def HasFilters(self):
"""Determines if filters are defined.
Returns:
A boolean value indicating if filters are defined.
"""
return bool(self._filters)
def Matches(self, file_entry):
"""Compares the file entry against the filter collection.
Args:
file_entry: The file entry (instance of dfvfs.FileEntry).
Returns:
A boolean indicating if the file entry matches one of the filters.
If no filters are provided or applicable the result will be True.
"""
if not self._filters:
return True
results = []
for file_entry_filter in self._filters:
result = file_entry_filter.Matches(file_entry)
results.append(result)
return True in results or False not in results
def Print(self, output_writer):
"""Prints a human readable version of the filter.
Args:
output_writer: the output writer object (instance of CLIOutputWriter).
"""
if self._filters:
output_writer.Write(u'Filters:\n')
for file_entry_filter in self._filters:
file_entry_filter.Print(output_writer)
class FileSaver(object):
"""Class that is used to save files."""
_BAD_CHARACTERS = frozenset([
u'\x00', u'\x01', u'\x02', u'\x03', u'\x04', u'\x05', u'\x06', u'\x07',
u'\x08', u'\x09', u'\x0a', u'\x0b', u'\x0c', u'\x0d', u'\x0e', u'\x0f',
u'\x10', u'\x11', u'\x12', u'\x13', u'\x14', u'\x15', u'\x16', u'\x17',
u'\x18', u'\x19', u'\x1a', u'\x1b', u'\x1c', u'\x1d', u'\x1e', u'\x1f',
os.path.sep, u'!', u'$', u'%', u'&', u'*', u'+', u':', u';', u'<', u'>',
u'?', u'@', u'|', u'~', u'\x7f'])
_COPY_BUFFER_SIZE = 32768
_READ_BUFFER_SIZE = 4096
def __init__(self, skip_duplicates=False):
"""Initializes the file saver object.
Args:
skip_duplicates: boolean value to indicate if duplicate file content
should be skipped.
"""
super(FileSaver, self).__init__()
self._digest_hashes = {}
self._skip_duplicates = skip_duplicates
def _CalculateHash(self, file_object):
"""Calculates a MD5 hash of the contents of given file object.
Args:
file_object: a file-like object.
Returns:
A hexadecimal string of the MD5 hash.
"""
hasher_object = hashers_manager.HashersManager.GetHasherObject(u'sha256')
file_object.seek(0, os.SEEK_SET)
data = file_object.read(self._READ_BUFFER_SIZE)
while data:
hasher_object.Update(data)
data = file_object.read(self._READ_BUFFER_SIZE)
return hasher_object.GetStringDigest()
def _CopyFileObject(self, input_file_object, output_path):
"""Copies the content of a file-like object to a file.
Args:
input_file_object: the input file-like object.
output_path: the path of the output file.
"""
with open(output_path, 'wb') as output_file_object:
input_file_object.seek(0, os.SEEK_SET)
data = input_file_object.read(self._COPY_BUFFER_SIZE)
while data:
output_file_object.write(data)
data = input_file_object.read(self._COPY_BUFFER_SIZE)
def WriteFile(self, source_path_spec, destination_path, filename_prefix=u''):
"""Writes the contents of the source to the destination file.
Args:
source_path_spec: the path specification of the source file.
destination_path: the path of the destination file.
filename_prefix: optional filename prefix.
"""
file_entry = path_spec_resolver.Resolver.OpenFileEntry(source_path_spec)
if not file_entry.IsFile():
return
file_system = file_entry.GetFileSystem()
path = getattr(source_path_spec, u'location', None)
path_segments = file_system.SplitPath(path)
# Sanitize each path segment.
for index, path_segment in enumerate(path_segments):
path_segments[index] = u''.join([
character if character not in self._BAD_CHARACTERS else u'_'
for character in path_segment])
target_directory = os.path.join(destination_path, *path_segments[:-1])
if filename_prefix:
target_filename = u'{0:s}_{1:s}'.format(
filename_prefix, path_segments[-1])
else:
target_filename = path_segments[-1]
if not target_directory:
target_directory = destination_path
elif not os.path.isdir(target_directory):
os.makedirs(target_directory)
if self._skip_duplicates and file_entry.IsFile():
file_object = file_entry.GetFileObject()
if not file_object:
return
try:
digest_hash = self._CalculateHash(file_object)
except IOError as exception:
logging.error((
u'[skipping] unable to calculate MD5 of file: {0:s} '
u'with error: {1:s}').format(path, exception))
finally:
file_object.close()
stat = file_entry.GetStat()
inode = getattr(stat, u'ino', 0)
if inode in self._digest_hashes:
if digest_hash in self._digest_hashes[inode]:
return
self._digest_hashes[inode].append(digest_hash)
else:
self._digest_hashes[inode] = [digest_hash]
file_object = file_entry.GetFileObject()
if not file_object:
return
try:
target_path = os.path.join(target_directory, target_filename)
self._CopyFileObject(file_object, target_path)
except IOError as exception:
logging.error(
u'[skipping] unable to export file: {0:s} with error: {1:s}'.format(
path, exception))
finally:
file_object.close()
class ImageExtractorQueueConsumer(queue.ItemQueueConsumer):
"""Class that implements an image extractor queue consumer."""
def __init__(
self, process_queue, file_saver, destination_path, filter_collection):
"""Initializes the image extractor queue consumer.
Args:
process_queue: the process queue (instance of Queue).
file_saver: the file saver object (instance of FileSaver)
destination_path: the path where the extracted files should be stored.
filter_collection: the file entry filter collection (instance of
FileEntryFilterCollection)
"""
super(ImageExtractorQueueConsumer, self).__init__(process_queue)
self._destination_path = destination_path
self._file_saver = file_saver
self._filter_collection = filter_collection
def _ConsumeItem(self, path_spec, **unused_kwargs):
"""Consumes an item callback for ConsumeItems.
Args:
path_spec: a path specification (instance of dfvfs.PathSpec).
"""
file_entry = path_spec_resolver.Resolver.OpenFileEntry(path_spec)
if not self._filter_collection.Matches(file_entry):
return
vss_store_number = getattr(path_spec, u'vss_store_number', None)
if vss_store_number is not None:
filename_prefix = u'vss_{0:d}'.format(vss_store_number + 1)
else:
filename_prefix = u''
self._file_saver.WriteFile(
path_spec, self._destination_path, filename_prefix=filename_prefix)
class ImageExportFrontend(frontend.Frontend):
"""Class that implements the image export front-end."""
def __init__(self):
"""Initializes the front-end object."""
super(ImageExportFrontend, self).__init__()
self._filter_collection = FileEntryFilterCollection()
self._knowledge_base = None
self._resolver_context = context.Context()
# TODO: merge with collector and/or engine.
def _Extract(
self, source_path_specs, destination_path, remove_duplicates=True):
"""Extracts files.
Args:
source_path_specs: list of path specifications (instances of
dfvfs.PathSpec) to process.
destination_path: the path where the extracted files should be stored.
remove_duplicates: optional boolean value to indicate if files with
duplicate content should be removed. The default
is True.
"""
if not os.path.isdir(destination_path):
os.makedirs(destination_path)
input_queue = single_process.SingleProcessQueue()
image_collector = collector.Collector(input_queue)
image_collector.Collect(source_path_specs)
file_saver = FileSaver(skip_duplicates=remove_duplicates)
input_queue_consumer = ImageExtractorQueueConsumer(
input_queue, file_saver, destination_path, self._filter_collection)
input_queue_consumer.ConsumeItems()
def _ExtractFile(self, file_saver, path_spec, destination_path):
"""Extracts a file.
Args:
file_saver: the file saver object (instance of FileSaver)
path_spec: a path specification (instance of dfvfs.PathSpec).
destination_path: the path where the extracted files should be stored.
"""
file_entry = path_spec_resolver.Resolver.OpenFileEntry(path_spec)
if not self._filter_collection.Matches(file_entry):
return
vss_store_number = getattr(path_spec, u'vss_store_number', None)
if vss_store_number is not None:
filename_prefix = u'vss_{0:d}'.format(vss_store_number + 1)
else:
filename_prefix = u''
file_saver.WriteFile(
path_spec, destination_path, filename_prefix=filename_prefix)
# TODO: merge with collector and/or engine.
def _ExtractWithFilter(
self, source_path_specs, destination_path, filter_file_path,
remove_duplicates=True):
"""Extracts files using a filter expression.
This method runs the file extraction process on the image and
potentially on every VSS if that is wanted.
Args:
source_path_specs: list of path specifications (instances of
dfvfs.PathSpec) to process.
destination_path: The path where the extracted files should be stored.
filter_file_path: The path of the file that contains the filter
expressions.
remove_duplicates: optional boolean value to indicate if files with
duplicate content should be removed. The default
is True.
"""
for source_path_spec in source_path_specs:
file_system, mount_point = self._GetSourceFileSystem(
source_path_spec, resolver_context=self._resolver_context)
if self._knowledge_base is None:
self._Preprocess(file_system, mount_point)
if not os.path.isdir(destination_path):
os.makedirs(destination_path)
find_specs = engine_utils.BuildFindSpecsFromFile(
filter_file_path, pre_obj=self._knowledge_base.pre_obj)
# Save the regular files.
file_saver = FileSaver(skip_duplicates=remove_duplicates)
searcher = file_system_searcher.FileSystemSearcher(
file_system, mount_point)
for path_spec in searcher.Find(find_specs=find_specs):
self._ExtractFile(file_saver, path_spec, destination_path)
file_system.Close()
# TODO: refactor, this is a duplicate of the function in engine.
def _GetSourceFileSystem(self, source_path_spec, resolver_context=None):
"""Retrieves the file system of the source.
Args:
source_path_spec: The source path specification (instance of
dfvfs.PathSpec) of the file system.
resolver_context: Optional resolver context (instance of dfvfs.Context).
The default is None which will use the built in context
which is not multi process safe. Note that every thread
or process must have its own resolver context.
Returns:
A tuple of the file system (instance of dfvfs.FileSystem) and
the mount point path specification (instance of path.PathSpec).
Raises:
RuntimeError: if source path specification is not set.
"""
if not source_path_spec:
raise RuntimeError(u'Missing source.')
file_system = path_spec_resolver.Resolver.OpenFileSystem(
source_path_spec, resolver_context=resolver_context)
type_indicator = source_path_spec.type_indicator
if path_spec_factory.Factory.IsSystemLevelTypeIndicator(type_indicator):
mount_point = source_path_spec
else:
mount_point = source_path_spec.parent
return file_system, mount_point
def _Preprocess(self, file_system, mount_point):
"""Preprocesses the image.
Args:
file_system: the file system object (instance of vfs.FileSystem)
to be preprocessed.
mount_point: the mount point path specification (instance of
path.PathSpec) that refers to the base location
of the file system.
"""
if self._knowledge_base is not None:
return
self._knowledge_base = knowledge_base.KnowledgeBase()
logging.info(u'Guessing OS')
searcher = file_system_searcher.FileSystemSearcher(file_system, mount_point)
platform = preprocess_interface.GuessOS(searcher)
logging.info(u'OS: {0:s}'.format(platform))
logging.info(u'Running preprocess.')
preprocess_manager.PreprocessPluginsManager.RunPlugins(
platform, file_system, mount_point, self._knowledge_base)
logging.info(u'Preprocess done, saving files from image.')
def ReadSpecificationFile(self, path):
"""Reads the format specification file.
Args:
path: the path of the format specification file.
Returns:
The format specification store (instance of FormatSpecificationStore).
"""
specification_store = specification.FormatSpecificationStore()
with open(path, 'rb') as file_object:
for line in file_object.readlines():
line = line.strip()
if not line or line.startswith(b'#'):
continue
try:
identifier, offset, pattern = line.split()
except ValueError:
logging.error(u'[skipping] invalid line: {0:s}'.format(
line.decode(u'utf-8')))
continue
try:
offset = int(offset, 10)
except ValueError:
logging.error(u'[skipping] invalid offset in line: {0:s}'.format(
line.decode(u'utf-8')))
continue
try:
pattern = pattern.decode(u'string_escape')
# ValueError is raised e.g. when the patterns contains "\xg1".
except ValueError:
logging.error(
u'[skipping] invalid pattern in line: {0:s}'.format(
line.decode(u'utf-8')))
continue
format_specification = specification.FormatSpecification(identifier)
format_specification.AddNewSignature(pattern, offset=offset)
specification_store.AddSpecification(format_specification)
return specification_store
def HasFilters(self):
"""Determines if filters are defined.
Returns:
A boolean value indicating if filters are defined.
"""
return self._filter_collection.HasFilters()
def ParseDateFilters(self, date_filters):
"""Parses the date filters.
A date filter string is formatted as 3 comma separated values:
time value, start date and time (string) and end date and time (string)
The time value and either a start or end date and time is required.
The date and time strings are formatted as:
YYYY-MM-DD hh:mm:ss.######[+-]##:##
Where # are numeric digits ranging from 0 to 9 and the seconds
fraction can be either 3 or 6 digits. The time of day, seconds fraction
and timezone offset are optional. The default timezone is UTC.
Args:
date_filters: a list of strings containing date filter definitions.
Raises:
ValueError: if the date filter definitions are invalid.
"""
if not date_filters:
return
file_entry_filter = DateTimeFileEntryFilter()
for date_filter in date_filters:
date_filter_pieces = date_filter.split(u',')
if len(date_filter_pieces) != 3:
raise ValueError(
u'Badly formed date filter: {0:s}'.format(date_filter))
time_value, start_time_string, end_time_string = date_filter_pieces
time_value = time_value.strip()
start_time_string = start_time_string.strip()
end_time_string = end_time_string.strip()
try:
file_entry_filter.AddDateTimeRange(
time_value, start_time_string=start_time_string,
end_time_string=end_time_string)
except ValueError:
raise ValueError(
u'Badly formed date filter: {0:s}'.format(date_filter))
self._filter_collection.AddFilter(file_entry_filter)
def ParseExtensionsString(self, extensions_string):
"""Parses the extensions string.
Args:
extensions_string: a string with comma separated extensions to filter.
"""
if not extensions_string:
return
extensions_string = extensions_string.lower()
extensions = [
extension.strip() for extension in extensions_string.split(u',')]
file_entry_filter = ExtensionsFileEntryFilter(extensions)
self._filter_collection.AddFilter(file_entry_filter)
def ParseNamesString(self, names_string):
"""Parses the name string.
Args:
names_string: a string with comma separated filenames to filter.
"""
if not names_string:
return
names_string = names_string.lower()
names = [name.strip() for name in names_string.split(u',')]
file_entry_filter = NamesFileEntryFilter(names)
self._filter_collection.AddFilter(file_entry_filter)
def ParseSignatureIdentifiers(self, data_location, signature_identifiers):
"""Parses the signature identifiers.
Args:
data_location: the location of the format specification file
(signatures.conf).
signature_identifiers: a string with comma separated signature
identifiers.
Raises:
IOError: if the format specification file could not be read from
the specified data location.
ValueError: if no data location was specified.
"""
if not signature_identifiers:
return
if not data_location:
raise ValueError(u'Missing data location.')
path = os.path.join(data_location, u'signatures.conf')
if not os.path.exists(path):
raise IOError(
u'No such format specification file: {0:s}'.format(path))
try:
specification_store = self.ReadSpecificationFile(path)
except IOError as exception:
raise IOError((
u'Unable to read format specification file: {0:s} with error: '
u'{1:s}').format(path, exception))
signature_identifiers = signature_identifiers.lower()
signature_identifiers = [
identifier.strip() for identifier in signature_identifiers.split(u',')]
file_entry_filter = SignaturesFileEntryFilter(
specification_store, signature_identifiers)
self._filter_collection.AddFilter(file_entry_filter)
def PrintFilterCollection(self, output_writer):
"""Prints the filter collection.
Args:
output_writer: the output writer (instance of OutputWriter).
"""
self._filter_collection.Print(output_writer)
def ProcessSources(
self, source_path_specs, destination_path, filter_file=None,
remove_duplicates=True):
"""Processes the sources.
Args:
source_path_specs: list of path specifications (instances of
dfvfs.PathSpec) to process.
destination_path: the path where the extracted files should be stored.
filter_file: optional name of of the filter file.
remove_duplicates: optional boolean value to indicate if files with
duplicate content should be removed. The default
is True.
"""
if filter_file:
self._ExtractWithFilter(
source_path_specs, destination_path, filter_file,
remove_duplicates=remove_duplicates)
else:
self._Extract(
source_path_specs, destination_path,
remove_duplicates=remove_duplicates)
| 2.421875 | 2 |
accelerator/models/member_profile.py | masschallenge/django-accelerator | 6 | 12768725 | from accelerator.managers.member_profile_manager import MemberProfileManager
from accelerator.models import CoreProfile
class MemberProfile(CoreProfile):
user_type = 'member'
default_page = "member_homepage"
objects = MemberProfileManager()
class Meta:
db_table = 'accelerator_memberprofile'
| 2.046875 | 2 |
autoIncrementor.py | OrdiNeu/AutoIncrementor | 0 | 12768726 | # OrdiNeu's auto incrementor for Dugnutt
import keyboard
import wx
# Globals
Filename = "test.txt"
Format = "Number of times pressed: {}"
count = 0
hotkey = "ctrl+alt+z"
dehotkey = "ctrl+alt+x"
error = ""
refresh = None
# Callback to automatically write in the text file
def changeCount(amount, auto):
global count
global error
if (auto):
count += amount
try:
with open(Filename, 'w') as f:
f.write(Format.format(count))
error = ""
if refresh is not None:
refresh()
except Exception as e:
error = str(e)
def increment(autoIncrement=True):
changeCount(+1, autoIncrement)
def decrement(autoDecrement=True):
changeCount(-1, autoDecrement)
# Setup the Keyboard
keyboard.add_hotkey(hotkey, increment)
keyboard.add_hotkey(dehotkey, decrement)
increment(autoIncrement=False)
# Class for the UI
class IncrementorUI(wx.Frame):
def __init__(self, *args, **kwargs):
super(IncrementorUI, self).__init__(*args, **kwargs)
self.InitUI()
def InitUI(self):
self.panel = wx.Panel(self)
vbox = wx.BoxSizer(wx.VERTICAL)
# Filename input
filenamePanel = wx.Panel(self.panel)
f_hbox = wx.BoxSizer(wx.HORIZONTAL)
inputLabel = wx.StaticText(filenamePanel, label="Filename: ")
self.input = wx.StaticText(filenamePanel, label=Filename)
self.fileNameSelector = wx.Button(filenamePanel, label="Select")
self.fileNameSelector.Bind(wx.EVT_BUTTON, self.OpenFileDialog)
f_hbox.Add(inputLabel, wx.LEFT)
f_hbox.Add(self.input, wx.EXPAND)
f_hbox.Add(self.fileNameSelector, wx.RIGHT)
filenamePanel.SetSizer(f_hbox)
# Format input
formatPanel = wx.Panel(self.panel)
fo_hbox = wx.BoxSizer(wx.HORIZONTAL)
formatLabel = wx.StaticText(formatPanel, label="Format: ")
self.format = wx.TextCtrl(formatPanel, value=Format)
self.format.Bind(wx.EVT_TEXT, self.SetFormat)
fo_hbox.Add(formatLabel, wx.LEFT)
fo_hbox.Add(self.format, wx.EXPAND)
formatPanel.SetSizer(fo_hbox)
# Count input
countPanel = wx.Panel(self.panel)
co_hbox = wx.BoxSizer(wx.HORIZONTAL)
countLabel = wx.StaticText(countPanel, label="Count: ")
self.count = wx.SpinCtrl(countPanel, value=str(count), min=-99999999, max=99999999)
self.count.Bind(wx.EVT_TEXT, self.SetCount)
co_hbox.Add(countLabel, wx.LEFT)
co_hbox.Add(self.count, wx.EXPAND)
countPanel.SetSizer(co_hbox)
# Hotkey input
hotkeyPanel = wx.Panel(self.panel)
hk_hbox = wx.BoxSizer(wx.HORIZONTAL)
hotkeyLabel = wx.StaticText(hotkeyPanel, label="+1 Hotkey: ")
self.hotkey = wx.TextCtrl(hotkeyPanel, value=hotkey)
self.hotkeySelector = wx.Button(hotkeyPanel, label="Set hotkey")
self.hotkeySelector.Bind(wx.EVT_BUTTON, self.StartListen)
hk_hbox.Add(hotkeyLabel, wx.LEFT)
hk_hbox.Add(self.hotkey, wx.EXPAND)
hk_hbox.Add(self.hotkeySelector, wx.RIGHT)
hotkeyPanel.SetSizer(hk_hbox)
# Hotkey input
dehotkeyPanel = wx.Panel(self.panel)
dehk_hbox = wx.BoxSizer(wx.HORIZONTAL)
dehotkeyLabel = wx.StaticText(dehotkeyPanel, label="-1 Hotkey: ")
self.dehotkey = wx.TextCtrl(dehotkeyPanel, value=dehotkey)
self.dehotkeySelector = wx.Button(dehotkeyPanel, label="Set hotkey")
self.dehotkeySelector.Bind(wx.EVT_BUTTON, self.StartDecrementListen)
dehk_hbox.Add(dehotkeyLabel, wx.LEFT)
dehk_hbox.Add(self.dehotkey, wx.EXPAND)
dehk_hbox.Add(self.dehotkeySelector, wx.RIGHT)
dehotkeyPanel.SetSizer(dehk_hbox)
# Error input
self.ErrorLabel = wx.StaticText(self.panel, label=error)
# Outer panel
vbox.Add(filenamePanel)
vbox.Add(formatPanel)
vbox.Add(countPanel)
vbox.Add(hotkeyPanel)
vbox.Add(dehotkeyPanel)
vbox.Add(self.ErrorLabel)
self.panel.SetSizer(vbox)
def OpenFileDialog(self, e):
global Filename
with wx.FileDialog(self, 'Select File', wildcard="Text file(*.txt)|*.txt", style=wx.FD_SAVE) as fileDialog:
if fileDialog.ShowModal() == wx.ID_CANCEL:
return
Filename = fileDialog.GetPath()
self.RefreshUI()
def RefreshUI(self):
self.count.Unbind(wx.EVT_TEXT)
self.format.Unbind(wx.EVT_TEXT)
self.input.SetLabel(Filename)
self.count.SetValue(count)
self.ErrorLabel.SetLabel(error)
self.count.Bind(wx.EVT_TEXT, self.SetCount)
self.format.Bind(wx.EVT_TEXT, self.SetFormat)
def SetFormat(self, e):
global Format
Format = e.GetString()
self.RefreshUI()
increment(False)
def SetCount(self, e):
global count
count = int(e.GetString())
self.RefreshUI()
increment(False)
def StartListen(self, e):
self.hotkey.SetValue("Listening for keypress...")
self.keyboardHook = keyboard.hook(self.EndListen)
def EndListen(self, e):
global hotkey
keyboard.remove_hotkey(hotkey)
# Only remove the listen status if it isn't a modifier
hotkey = keyboard.get_hotkey_name()
keyboard.add_hotkey(hotkey, increment)
if not keyboard.is_modifier(e.name):
keyboard.unhook(self.keyboardHook)
self.hotkey.SetValue(hotkey)
self.RefreshUI()
increment(False)
def StartDecrementListen(self, e):
self.dehotkey.SetValue("Listening for keypress...")
self.deKeyboardHook = keyboard.hook(self.EndDecrementListen)
def EndDecrementListen(self, e):
global dehotkey
keyboard.remove_hotkey(dehotkey)
# Only remove the listen status if it isn't a modifier
dehotkey = keyboard.get_hotkey_name()
keyboard.add_hotkey(dehotkey, decrement)
if not keyboard.is_modifier(e.name):
keyboard.unhook(self.deKeyboardHook)
self.dehotkey.SetValue(dehotkey)
self.RefreshUI()
decrement(False)
app = wx.App()
frame = IncrementorUI(None, title="OrdiNeu's Auto-incrementor for Dugnutt", style=wx.CLOSE_BOX | wx.CAPTION | wx.RESIZE_BORDER)
refresh = frame.RefreshUI
frame.Show()
app.MainLoop()
| 2.890625 | 3 |
make_train_data.py | KentaroAOKI/imagenet_tools | 0 | 12768727 | import glob
import os
import random
import cv2
def crop_image(src_image_path, dst_image_path):
output_side_length=256
img = cv2.imread(src_image_path)
height, width, depth = img.shape
new_height = output_side_length
new_width = output_side_length
if height > width:
new_height = int(output_side_length * height / width)
else:
new_width = int(output_side_length * width / height)
resized_img = cv2.resize(img, (new_width, new_height))
height_offset = int((new_height - output_side_length) / 2)
width_offset = int((new_width - output_side_length) / 2)
cropped_img = resized_img[height_offset : height_offset + output_side_length, width_offset : width_offset + output_side_length]
cv2.imwrite(dst_image_path, cropped_img)
def main():
# parameters
from_dir = 'download_images'
to_dir = 'crop_images'
split_ratio = 0.75
# make directory
if (os.path.exists(to_dir) == False):
os.makedirs(to_dir)
os.makedirs(os.path.join(to_dir, 'train'))
os.makedirs(os.path.join(to_dir, 'test'))
# make list for train
train_list = open('train.txt','w')
test_list = open('test.txt','w')
label_list = open('labels.txt','w')
class_no=0
image_count = 0
labels = glob.glob('{}/*'.format(from_dir))
for label in labels:
label_name = os.path.basename(label)
print(label_name)
os.makedirs(os.path.join(to_dir, 'train', label_name))
os.makedirs(os.path.join(to_dir, 'test', label_name))
images = glob.glob('{}/*.jpeg'.format(label))
# write label for train
label_list.write(label_name + '\n')
length = len(images)
split_count = 0
split_number = length * split_ratio
random.shuffle(images)
for image in images:
image_name = os.path.basename(image)
if split_count < split_number:
to_train_image = os.path.join(to_dir, 'train', label_name, image_name)
print('{} > {}'.format(image, to_train_image))
crop_image(image, to_train_image)
# write image path for train
train_list.write('{} {}\n'.format(to_train_image, class_no))
else:
to_test_image = os.path.join(to_dir, 'test', label_name, image_name)
print('{} > {}'.format(image, to_test_image))
crop_image(image, to_test_image)
# write image path for test
test_list.write('{} {}\n'.format(to_test_image, class_no))
image_count = image_count + 1
split_count = split_count + 1
class_no += 1
train_list.close()
test_list.close()
label_list.close()
if __name__ == '__main__':
main()
| 2.6875 | 3 |
tensorflow/python/ipu/tests/dropout_test.py | DebeshJha/tensorflow-1 | 2 | 12768728 | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
import itertools
import numpy as np
from absl.testing import parameterized
from tensorflow.python.client import session as sl
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python import ipu
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import googletest
from tensorflow.python.training import gradient_descent
# Error threshold for forward pass test.
THRESHOLD = 0.03
# Dimensions of the random data tensor.
DIMS = (1024, 1024, 4)
# Initialise with a random seed.
SEED = np.random.randint(np.iinfo(np.int32).max, size=[2], dtype=np.int32)
# Number of times to verify output for a given seed.
SEED_TEST_REPETITIONS = 6
def build_test_cases(exhaustive=False):
# Dropout rate(s) to test.
rate = [0.1, 0.5, 0.9] if exhaustive else [0.5]
# User specified and non-specified cases.
seed = [SEED, None]
# Shape of the dropout.
# Note that shaping the dropout such that a very large portion of
# the input weights are dropped will fail the test criteria, as expected.
noise_shape = [[], [DIMS[0], DIMS[1], 1]]
if exhaustive:
noise_shape.append([DIMS[0], 1, DIMS[2]])
noise_shape.append([1, DIMS[1], DIMS[2]])
# Get the cartesian product (can get very large).
prod = itertools.product(rate, seed, noise_shape)
test_cases = []
for n, perm in enumerate(prod):
test = {
'testcase_name': ' Case: %3d' % n,
'rate': perm[0],
'seed': perm[1],
'noise_shape': perm[2]
}
test_cases.append(test)
return test_cases
# Default is not to test every combination.
TEST_CASES = build_test_cases()
class PopnnRandomDropoutTest(test_util.TensorFlowTestCase,
parameterized.TestCase):
@staticmethod
def _ipu_dropout(w, rate, seed, noise_shape):
output = ipu.ops.rand_ops.dropout(w,
rate=rate,
seed=seed,
noise_shape=noise_shape)
return [output]
@staticmethod
def _setup_test(f):
with ops.device('cpu'):
input_data = array_ops.placeholder(np.float32, DIMS)
with ipu.scopes.ipu_scope("/device:IPU:0"):
r = ipu.ipu_compiler.compile(f, inputs=[input_data])
cfg = ipu.utils.create_ipu_config()
cfg = ipu.utils.set_ipu_model_options(cfg, compile_ipu_code=False)
ipu.utils.configure_ipu_system(cfg)
return r, input_data
@test_util.deprecated_graph_mode_only
def testInvalidNoiseShape(self):
in_data = np.random.rand(16, 8, 16)
print(in_data.shape)
seed = np.array([12, 34], dtype=np.int32)
with sl.Session() as sess:
with self.assertRaisesRegex(ValueError, "must equal the rank of x."):
def _wrong_length(w):
return self._ipu_dropout(w, 0.5, seed, [1])
r, input_data = self._setup_test(_wrong_length)
_ = sess.run(r, {input_data: in_data})
with self.assertRaisesRegex(ValueError, "Dimension mismatch"):
def _wrong_dims(w):
return self._ipu_dropout(w, 0.5, seed, [8, 1, 16])
r, input_data = self._setup_test(_wrong_dims)
_ = sess.run(r, {input_data: in_data})
@parameterized.named_parameters(*TEST_CASES)
@test_util.deprecated_graph_mode_only
def testDropout(self, rate, seed, noise_shape):
def _run_dropout(w):
return self._ipu_dropout(w, rate, seed, noise_shape)
r, input_data = self._setup_test(_run_dropout)
with sl.Session() as sess:
in_data = np.random.rand(*DIMS)
result = sess.run(r, {input_data: in_data})
percent_kept = np.count_nonzero(result) / np.count_nonzero(in_data)
# There's a considerable amount for randomness so we have a reasonably
# large dimensionality of test data to make sure the error is smaller.
is_roughly_close = abs(percent_kept - (1.0 - rate))
# The observed error is actually a lot less than this (>1%) but we don't
# want to cause random regressions and 3% is probably still acceptable
# for any outlier randoms.
self.assertTrue(is_roughly_close < THRESHOLD)
@parameterized.named_parameters(*TEST_CASES)
@test_util.deprecated_graph_mode_only
def testUserSeed(self, rate, seed, noise_shape):
def _run_dropout(w):
return self._ipu_dropout(w, rate, seed, noise_shape)
r, input_data = self._setup_test(_run_dropout)
with sl.Session() as sess:
in_data = np.random.rand(*DIMS)
# For a given output, verify that each subsequent output is equal to it.
first_result = None
for _ in range(SEED_TEST_REPETITIONS):
result = sess.run(r, {input_data: in_data})
if first_result is None:
first_result = result
continue
self.assertAllEqual(first_result, result)
@parameterized.named_parameters(*TEST_CASES)
@test_util.deprecated_graph_mode_only
def testDropoutBackwardPass(self, rate, seed, noise_shape):
def _run_dropout(w):
output = self._ipu_dropout(w, rate, seed, noise_shape)
largest = output
cost = math_ops.square(largest)
opt = gradient_descent.GradientDescentOptimizer(learning_rate=0.1)
gradients = opt.compute_gradients(cost, w)
return [output, gradients]
r, input_data = self._setup_test(_run_dropout)
with sl.Session() as sess:
in_data = np.random.rand(*DIMS)
result = sess.run(r, {input_data: in_data})
dropout_out = result[0]
gradients = result[1][0][0]
# Check we have the same number of zeros.
self.assertAllEqual(np.count_nonzero(dropout_out),
np.count_nonzero(gradients))
@parameterized.named_parameters(*TEST_CASES)
@test_util.deprecated_graph_mode_only
def testScaling(self, rate, seed, noise_shape):
def _run_dropout(w):
return self._ipu_dropout(w, rate, seed, noise_shape)
r, input_data = self._setup_test(_run_dropout)
with sl.Session() as sess:
in_data = np.ones(DIMS)
[result] = sess.run(r, {input_data: in_data})
kept_values = result[np.nonzero(result)]
expected_kept_values = 1 / (1 - rate) * np.ones(kept_values.shape)
self.assertAllClose(kept_values, expected_kept_values)
if __name__ == "__main__":
googletest.main()
| 2.375 | 2 |
apps/seqGraph.py | zanejobe/co2seq | 0 | 12768729 | <reponame>zanejobe/co2seq
import plotly.graph_objects as go
import plotly.express as px
import dash_core_components as dcc
import dash_html_components as html
import dash_bootstrap_components as dbc
import pandas as pd
import base64
import dash
from dash.dependencies import Output, Input
from app import app
from render import load_dfs, get_traces_from_dfs
import os
dfs = load_dfs(os.path.join("Data", "config.json"))
traces = get_traces_from_dfs(os.path.join("Data", "config.json"), dfs)
basins = dfs['USGS Sedimentary Basins 2012']
fig = go.Figure()
for trace in traces:
fig.add_trace(trace)
fig.update_layout(mapbox_style="open-street-map")
fig.update_layout(margin={"r": 0, "t": 0, "l": 0, "b": 0})
map_height = 600
fig.update_layout(height=map_height)
fig.update_mapboxes(center=go.layout.mapbox.Center(lat=40, lon=-99), zoom=3)
fig.update_layout(
legend=dict(
x=1,
y=0.9,
traceorder="normal",
font=dict(
family="Georgia",
size=18,
color="#21314D"
)
)
)
df = pd.read_csv("Data/plants_per_basin.csv")
encoded_logo = base64.b64encode(open('CO-Mines-logo-stacked-4C.png', 'rb').read())
basin_names = df.name.unique()
basin_names.sort()
def scatterboiz():
df["Years to fill basin"] = (df.apply(lambda x: round(x.storage/x.emissions, 2), axis=1))
fig = px.scatter(df, x='emissions', y='storage',
hover_data=['name', 'storage', 'emissions', 'Years to fill basin'],
size='Years to fill basin',
log_x=True, log_y=True,
labels={
"emissions" : "2020 Emissions (Mt)",
"storage" : "Total Storage Potential (Mt)"
})
fig.update_layout(yaxis={"tickmode": "linear", "showgrid": False},
xaxis={"tickmode": "linear", "showgrid": False})
return fig
'''
Layout for Page 1 hosts map object and general overview
'''
layout = html.Div([
dbc.Container([
dbc.Row([
dbc.Col(html.Img(src='data:image/png;base64,{}'.format(encoded_logo.decode())), width=1),
dbc.Col(html.H1("Dashboard for Carbon Capture, Utilization, and Storage (CCUS) Data"), width=8, className="mb-2",
style={'textAlign': 'left'}),
dbc.Col(dcc.Link('About', href='/apps/about', style={"textAlign": "right"}), width=3, style={"textAlign": "right"})
], style={"margin-top": "6px", 'margin-bottom': '6px'}),
dbc.Row([
dbc.Col(html.H6(children='This interactive dashboard visualizes geospatial data relevant to CCUS efforts in the United States. Select a dataset below to display it on the map or scroll down to explore CCUS statistics by basin.'), className="mb-4")
]),
dbc.Row([
dbc.Col(dbc.Card(
[
dbc.CardBody(
[
html.H2("CCUS Map", className="align-self-center"),
html.P("Select data from the right to show it on the map")
]
)
], color="rgb(33,49,77,0.9)", inverse=True)),
], style={
'textAlign': 'center',
}),
dbc.Row([
dcc.Loading(
id="loading-1",
type="default",
style={"height": f"{map_height}", "width": "175vh"},
children=dcc.Graph(id="map", style={"height": f"{map_height}", "width": "175vh"})
),
], id="map_row", justify="center"),
dbc.Row(children=
[
dbc.Col(dbc.Card([
dbc.CardBody([
html.H4("CCUS Potential for US Basins", className="card-title"),
html.P("Each dot represents a US basin, where the annual emissions are derived from EPA data and the total carbon storage is derived from USGS data", className="card-text"),
html.P("Bubble size represents the number of years of emissions to fill the storage")])
], color="rgb(210,73,42,0.9)", inverse=True)),
dbc.Col(dbc.Card([
dbc.CardBody([
html.H4("Select a basin to compare emissions and storage", className="card-title"),
html.P("Bar chart compares annual emissions and total storage for a particular basin", className="card-text")]),
dbc.CardFooter(
dcc.Dropdown(
id="dropdown",
options=[{"label": x, "value": x} for x in basin_names],
value=basin_names[0],
style={'color': 'black'}
),
),
], color="rgb(210,73,42,0.9)", inverse=True))
], style={
'textAlign': 'center', 'margin-top': '10px'
}),
dbc.Row(children=
[
dbc.Col(html.Div([
dcc.Graph(figure=scatterboiz(), responsive=True),
])),
dbc.Col(html.Div([
dcc.Graph(id="bar-graph", responsive=True),
]))
])
], fluid=True)
])
'''
Creating callback functions for bar graphs
'''
@app.callback(
[Output("bar-graph", "figure"), Output("dropdown", "value")],
[Input("dropdown", "value"), Input("map", "clickData")])
def barboiz(name, clickData):
ctx = dash.callback_context
changed = ctx.triggered[0]['prop_id'].split('.')[0]
# If a user clicks on the map, parse the clickData from dcc.Graph into a name
if changed == "map" and clickData:
for frame in dfs.values():
row = frame[frame["hover"] == clickData["points"][0]["hovertext"]]
if not row.empty:
name = row["Name"].values[0]
mask = df[df["name"] == name]
fig = px.bar(mask, x="name", y=["emissions", "storage"], barmode='group', log_y=True)
fig.update_layout(yaxis={"tickmode": "linear", "showgrid": False, "title": "CO<sub>2</sub> (Mt)" },
xaxis={"title": ""})
return fig, name
@app.callback(
Output("map", "figure"),
[Input("map_row", "id")])
def create_map(_):
return fig
| 2.3125 | 2 |
source.py | Kimeg/Raycasting-Visualization-in-3D | 0 | 12768730 | from static import *
from lib import map_value
from point import Point
from ray import Ray
import numpy as np
import random
import math
class Source:
def __init__(self, x, y, fov, pg, screen):
self.pos = Point(x, y)
self.angle = np.random.randint(0, 360)
self.view_mode = 0
self.pg = pg
self.screen = screen
self.fov = fov
return
def generate_rays(self):
''' list to store all light ray objects emerging from light source '''
self.rays = []
self.ray_color = BLUE
self.point_color = GREEN
for i in range(0, N):
angle = i*self.fov/N * np.pi/180
self.rays.append(Ray(self.pos.x, self.pos.y, self.ray_color, self.point_color, self.pg, self.screen, angle))
return
def change_ray_colors(self):
self.ray_color = random.choice(COLORS)
self.point_color = random.choice(COLORS)
for ray in self.rays:
ray.change_color(self.ray_color, self.point_color)
return
def move(self, x, y):
self.pos.move(x, y)
for ray in self.rays:
ray.move(x, y)
return
def dist(self, ip):
return np.sqrt(np.sum([(self.pos.x-ip[0])**2, (self.pos.y-ip[1])**2]))
def draw(self):
self.pg.draw.rect(self.screen, BLACK, (0, 0, SWIDTH, HEIGHT))
if (self.pos.x < WIDTH):
self.pg.draw.circle(self.screen, GREEN, (self.pos.x, self.pos.y), 10)
return
''' 3D Rendering of ray-casting process '''
''' There are dozens of other ways to map 2D info to 3D, '''
''' which affects how the rendering process looks like to our eyes. '''
''' parameters i and distance refers to the index of a ray and its distance to the nearest wall '''
''' '''
def draw3D(self, i, distance, color):
if distance==0:
return
''' width of rectangle being rendered in 3D '''
dx = int(WIDTH/N)
''' height of rectangle being rendered in 3D '''
if VIEW_MODES[self.view_mode] == 'tangent':
dy = int(DISTORTION_ANGLE/distance)
elif VIEW_MODES[self.view_mode] == 'cosine':
dy = int((N*HEIGHT/distance)*math.cos(abs(i*(self.fov/N)-self.fov)*math.pi/180))
elif VIEW_MODES[self.view_mode] == 'fisheye':
dy = int(HEIGHT-distance)
''' color value provides an effect in which wall's color being altered '''
''' depending on its distance to the light source '''
#color = 255-map_value(distance)
color = tuple([v-map_value(distance, v) for v in color])
try:
self.pg.draw.rect(self.screen, color, (WIDTH + (i*dx), int((HEIGHT-dy)/2), dx, dy))
except:
pass
return
| 3.328125 | 3 |
sample-apps/segmentation_spleen/lib/train.py | IntroAI-termproject/MONAILabel | 214 | 12768731 | # Copyright 2020 - 2021 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from monai.inferers import SlidingWindowInferer
from monai.losses import DiceCELoss
from monai.optimizers import Novograd
from monai.transforms import (
Activationsd,
AddChanneld,
AsDiscreted,
CropForegroundd,
EnsureTyped,
LoadImaged,
RandCropByPosNegLabeld,
RandShiftIntensityd,
ScaleIntensityRanged,
Spacingd,
ToDeviced,
ToTensord,
)
from monailabel.tasks.train.basic_train import BasicTrainTask, Context
logger = logging.getLogger(__name__)
class MyTrain(BasicTrainTask):
def __init__(
self,
model_dir,
network,
description="Train Segmentation model for spleen",
**kwargs,
):
self._network = network
super().__init__(model_dir, description, **kwargs)
def network(self, context: Context):
return self._network
def optimizer(self, context: Context):
return Novograd(self._network.parameters(), 0.0001)
def loss_function(self, context: Context):
return DiceCELoss(to_onehot_y=True, softmax=True, squared_pred=True, batch=True)
def train_pre_transforms(self, context: Context):
t = [
LoadImaged(keys=("image", "label")),
AddChanneld(keys=("image", "label")),
Spacingd(
keys=("image", "label"),
pixdim=(1.0, 1.0, 1.0),
mode=("bilinear", "nearest"),
),
ScaleIntensityRanged(keys="image", a_min=-57, a_max=164, b_min=0.0, b_max=1.0, clip=True),
CropForegroundd(keys=("image", "label"), source_key="image"),
]
if context.request.get("to_gpu", False):
t.extend([EnsureTyped(keys=("image", "label")), ToDeviced(keys=("image", "label"), device=context.device)])
t.extend(
[
RandCropByPosNegLabeld(
keys=("image", "label"),
label_key="label",
spatial_size=(96, 96, 96),
pos=1,
neg=1,
num_samples=4,
image_key="image",
image_threshold=0,
),
RandShiftIntensityd(keys="image", offsets=0.1, prob=0.5),
]
)
return t
def train_post_transforms(self, context: Context):
return [
ToTensord(keys=("pred", "label")),
Activationsd(keys="pred", softmax=True),
AsDiscreted(
keys=("pred", "label"),
argmax=(True, False),
to_onehot=True,
n_classes=2,
),
]
def val_pre_transforms(self, context: Context):
t = [
LoadImaged(keys=("image", "label")),
AddChanneld(keys=("image", "label")),
Spacingd(
keys=("image", "label"),
pixdim=(1.0, 1.0, 1.0),
mode=("bilinear", "nearest"),
),
ScaleIntensityRanged(keys="image", a_min=-57, a_max=164, b_min=0.0, b_max=1.0, clip=True),
CropForegroundd(keys=("image", "label"), source_key="image"),
]
if context.request.get("to_gpu", False):
t.extend([EnsureTyped(keys=("image", "label")), ToDeviced(keys=("image", "label"), device=context.device)])
return t
def val_inferer(self, context: Context):
return SlidingWindowInferer(roi_size=(160, 160, 160), sw_batch_size=1, overlap=0.25)
| 1.84375 | 2 |
aa/ca.py | philipp-leitl/aapy | 1 | 12768732 | """Simple client to the Channel Archiver using xmlrpc."""
import logging as log
from xmlrpc.client import ServerProxy
import numpy
from . import data, utils
from .fetcher import Fetcher
__all__ = [
"CaClient",
"CaFetcher",
]
class CaClient(object):
"""Class to handle XMLRPC interaction with a channel archiver."""
def __init__(self, url):
"""
Args:
url: url for the channel archiver
"""
self._proxy = ServerProxy(url)
@staticmethod
def _create_archive_event(pv, ca_event):
"""Create ArchiveEvent from the objects received over XMLRPC.
Args:
pv: PV name to add to the event
ca_event: object received over XMLRPC
Returns:
ArchiveEvent object
"""
value = ca_event["value"]
timestamp = ca_event["secs"] + 1e-9 * ca_event["nano"]
severity = ca_event["sevr"]
return data.ArchiveEvent(pv, value, timestamp, severity)
def get(self, pv, start, end, count):
"""Request events over XMLRPC.
Args:
pv: PV name to request events for
start: datetime of start of requested period
end: datetime of end of requested period
count: maximum number of events to retrieve
Returns:
List of ArchiveEvent objects
"""
start_secs = utils.datetime_to_epoch(start)
end_secs = utils.datetime_to_epoch(end)
response = self._proxy.archiver.values(
1, [pv], start_secs, 0, end_secs, 0, count, 0
)
return [
CaClient._create_archive_event(pv, val) for val in response[0]["values"]
]
class CaFetcher(Fetcher):
"""Class to retrieve data from a channel archiver."""
def __init__(self, url):
"""
Args:
url: url for the channel archiver
"""
self._client = CaClient(url)
def _get_values(self, pv, start, end=None, count=None, request_params=None):
# Make count a large number if not specified to ensure we get all
# data.
count = 2 ** 31 if count is None else count
empty_array = numpy.zeros((0,))
all_data = data.ArchiveData(pv, empty_array, empty_array, empty_array)
last_timestamp = -1
done = False
while done is not True and len(all_data) < count:
requested = min(count - len(all_data), 10000)
if all_data.timestamps.size:
last_timestamp = all_data.timestamps[-1]
start = utils.epoch_to_datetime(last_timestamp)
log.info("Request PV {} for {} samples.".format(pv, requested))
log.info("Request start {} end {}".format(start, end))
events = self._client.get(pv, start, end, requested)
done = len(events) < requested
# Drop any events that are earlier than ones already fetched.
events = [e for e in events if e.timestamp > last_timestamp]
new_data = data.data_from_events(pv, events)
all_data = all_data.concatenate(new_data, zero_pad=True)
return all_data
| 2.703125 | 3 |
aphla/contrib/extendwf.py | NSLS-II/aphla | 0 | 12768733 | <filename>aphla/contrib/extendwf.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
from numpy import *
import os
import commands
import time
import datetime
import re
import string
import sys
# python
'''
from extendwf import *
outputwf=extendwf(Nwflength,inputwf)
Nwflength:output waveform length to be extended to,inputwf: input waveform,outputwf: return output waveform with length Nwflength)
'''
def extendwf(Nwflength,inputwf):
'''
extend input array length to length Nwflength by setting the
from extendwf import *
outputwf=extendwf(Nwflength,inputwf)
Nwflength:output waveform length to be extended to,inputwf: input waveform,
outputwf: return output waveform with length Nwflength)
'''
outputwf=zeros(Nwflength)
Nin=len(inputwf)
for i in range(Nwflength):
if i<Nin:
outputwf[i]= inputwf[i]
else:
outputwf[i]= inputwf[Nin-1]
print(outputwf)
return outputwf
| 2.921875 | 3 |
libs/configs/_base_/models/retinanet_r50_fpn.py | Artcs1/RotationDetection | 850 | 12768734 | # -*- coding: utf-8 -*-
from __future__ import division, print_function, absolute_import
import os
import tensorflow as tf
import math
ROOT_PATH = os.path.abspath('../../')
print(ROOT_PATH)
SUMMARY_PATH = os.path.join(ROOT_PATH, 'output/summary')
# backbone
NET_NAME = 'resnet50_v1d'
RESTORE_FROM_RPN = False
FIXED_BLOCKS = 1 # allow 0~3
FREEZE_BLOCKS = [True, False, False, False, False] # for gluoncv backbone
# neck
FPN_MODE = 'fpn'
SHARE_NET = True
USE_P5 = True
FPN_CHANNEL = 256
# bbox head
NUM_SUBNET_CONV = 4
LEVEL = ['P3', 'P4', 'P5', 'P6', 'P7']
BASE_ANCHOR_SIZE_LIST = [32, 64, 128, 256, 512]
ANCHOR_STRIDE = [8, 16, 32, 64, 128]
ANCHOR_SCALES = [2 ** 0, 2 ** (1.0 / 3.0), 2 ** (2.0 / 3.0)]
ANCHOR_RATIOS = [1, 1 / 2, 2., 1 / 3., 3., 5., 1 / 5.]
ANCHOR_ANGLES = [-90, -75, -60, -45, -30, -15]
ANCHOR_SCALE_FACTORS = None
USE_CENTER_OFFSET = True
METHOD = 'H'
ANGLE_RANGE = 90 # 90 or 180
USE_GN = False
SUBNETS_WEIGHTS_INITIALIZER = tf.random_normal_initializer(mean=0.0, stddev=0.01, seed=None)
SUBNETS_BIAS_INITIALIZER = tf.constant_initializer(value=0.0)
PROBABILITY = 0.01
FINAL_CONV_BIAS_INITIALIZER = tf.constant_initializer(value=-math.log((1.0 - PROBABILITY) / PROBABILITY))
# loss
CLS_WEIGHT = 1.0
REG_WEIGHT = 1.0
# sample
IOU_POSITIVE_THRESHOLD = 0.5
IOU_NEGATIVE_THRESHOLD = 0.4
# post-processing
NMS = True
NMS_IOU_THRESHOLD = 0.3
MAXIMUM_DETECTIONS = 100
FILTERED_SCORE = 0.05
VIS_SCORE = 0.4
# test and eval
TEST_SAVE_PATH = os.path.join(ROOT_PATH, 'tools/test_result')
EVALUATE_R_DIR = os.path.join(ROOT_PATH, 'output/evaluate_result_pickle/')
USE_07_METRIC = True
EVAL_THRESHOLD = 0.5
| 1.523438 | 2 |
NSGA-II.py | MacKur/Genetic_Algorithm_Parking_System | 1 | 12768735 | import random as rn
import numpy as np
import matplotlib.pyplot as plt
import math
from matplotlib import patches
from matplotlib.patches import Polygon
def random_population(_nv, n, _lb, _ub):
_pop = np.zeros((n, 2 * nv))
for i in range(n):
_pop[i, :] = np.random.uniform(lb, ub)
for j in range(int(_pop[i, :].size / 2)):
if _pop[i, j * 2] < 0:
_pop[i, j * 2] = int(-1)
else:
_pop[i, j * 2] = int(1)
return _pop
def crossover(_pop, crossover_rate):
next_gen = np.zeros((crossover_rate, _pop.shape[1]))
for i in range(int(crossover_rate / 2)):
r1 = np.random.randint(0, _pop.shape[0])
r2 = np.random.randint(0, _pop.shape[0])
while r1 == r2:
r1 = np.random.randint(0, _pop.shape[0])
r2 = np.random.randint(0, _pop.shape[0])
cutting_point = np.random.randint(1, _pop.shape[1])
next_gen[2 * i, 0:cutting_point] = _pop[r1, 0:cutting_point]
next_gen[2 * i, cutting_point:] = _pop[r2, cutting_point:]
next_gen[2 * i + 1, 0:cutting_point] = _pop[r2, 0:cutting_point]
next_gen[2 * i + 1, cutting_point:] = _pop[r1, cutting_point:]
return next_gen
def mutation(_pop, mutation_rate):
next_gen = np.zeros((mutation_rate, _pop.shape[1]))
for i in range(int(mutation_rate / 2)):
r1 = np.random.randint(0, _pop.shape[0])
r2 = np.random.randint(0, _pop.shape[0])
while r1 == r2:
r1 = np.random.randint(0, _pop.shape[0])
r2 = np.random.randint(0, _pop.shape[0])
cutting_point = np.random.randint(0, _pop.shape[1])
next_gen[2 * i] = _pop[r1]
next_gen[2 * i, cutting_point] = _pop[r2, cutting_point]
next_gen[2 * i + 1] = _pop[r2]
next_gen[2 * i + 1, cutting_point] = _pop[r1, cutting_point]
return next_gen
def local_search(_pop, n, _step_size):
next_gen = np.zeros((n, _pop.shape[1]))
for i in range(n):
r1 = np.random.randint(0, _pop.shape[0])
unit = _pop[r1, :]
unit[1] += np.random.uniform(-_step_size, _step_size)
if unit[1] < lb[1]:
unit[1] = lb[1]
if unit[1] > ub[1]:
unit[1] = ub[1]
next_gen[i, :] = unit
return next_gen
def evaluation(_pop, x_s, y_s, alfa_s, _done):
_fitness_values = np.zeros((_pop.shape[0], 2))
_flipped_fitness_values = np.zeros((_pop.shape[0], 2))
i = 0
_trajectory = []
V = np.zeros(nv)
angle = np.zeros(nv)
for individual in _pop:
for n in range(nv):
V[n] = individual[2 * n]
angle[n] = individual[2 * n + 1]
x = x_s - ds * math.cos(alfa_s)
y = y_s - ds * math.sin(alfa_s)
alfa_n = alfa_s
for u in range(nv):
if abs(angle[u]) < 0.0001:
x_n = x + V[u] * math.cos(alfa_n)
y_n = y + V[u] * math.sin(alfa_n)
else:
a = dist_between_axles / math.tan(angle[u])
Ro = math.sqrt(dist_between_axles ** 2 / 4 + (abs(a) + car_width / 2) ** 2)
tau = math.copysign(1, angle[u]) * alfa_n + a * math.sin(dist_between_axles / 2 * Ro)
gama = V[u] * dt / Ro
x_n = x + Ro * (math.sin(gama + tau) - math.sin(tau))
y_n = y + math.copysign(1, angle[u]) * Ro * (math.cos(tau) - math.cos(gama + tau))
alfa_n = alfa_n + math.copysign(1, angle[u]) * gama
if abs(alfa_n) > math.pi:
alfa_n = alfa_n - math.copysign(1, alfa_n) * math.pi * 2
x = x_n + ds * math.cos(alfa_n)
y = y_n + ds * math.sin(alfa_n)
for j in range(2):
if j == 0: # objective 1
if parking_length < x < -5 or parking_width < y < -5:
_fitness_values[i, j] = 1000
else:
_fitness_values[i, j] = math.sqrt(x ** 2 + y ** 2)
elif j == 1: # objective 2
_fitness_values[i, j] = beta - alfa_n
_flipped_fitness_values[i, 0] = 1 / _fitness_values[i, 0]
_flipped_fitness_values[i, 1] = 1 / _fitness_values[i, 1]
if _fitness_values[i, 0] <= 0.8 and \
(abs(_fitness_values[i, 1]) <= 0.1745 or abs(_fitness_values[i, 1]) >= 2.9671):
_done = True
if final is True:
_trajectory = np.append(_trajectory, [individual])
i = i + 1
return _fitness_values, _trajectory, _done, _flipped_fitness_values
def best_individuals_visualization(best, x_s, y_s, alfa_s):
_positions_x = []
_positions_y = []
_car_angle = []
i = 0
C = nv * 2
V = np.zeros(nv)
angle = np.zeros(nv)
best_units = np.array_split(best, len(best) / C)
for individual in best_units:
for n in range(nv):
V[n] = individual[2 * n]
angle[n] = individual[2 * n + 1]
x = x_s - ds * math.cos(alfa_s)
y = y_s - ds * math.sin(alfa_s)
alfa_n = alfa_s
for u in range(nv):
if abs(angle[u]) < 0.0001:
x_n = x + V[u] * dt * math.cos(alfa_n)
y_n = y + V[u] * dt * math.sin(alfa_n)
else:
a = dist_between_axles / math.tan(angle[u])
Ro = math.sqrt(dist_between_axles ** 2 / 4 + (abs(a) + car_width / 2) ** 2)
tau = math.copysign(1, angle[u]) * alfa_n + a * math.sin(dist_between_axles / 2 * Ro)
gama = V[u] * dt / Ro
x_n = x + Ro * (math.sin(gama + tau) - math.sin(tau))
y_n = y + math.copysign(1, angle[u]) * Ro * (math.cos(tau) - math.cos(gama + tau))
alfa_n = alfa_n + math.copysign(1, angle[u]) * gama
if abs(alfa_n) > math.pi:
alfa_n = alfa_n - math.copysign(1, alfa_n) * math.pi * 2
x = x_n + ds * math.cos(alfa_n)
y = y_n + ds * math.sin(alfa_n)
_positions_x = np.append(_positions_x, [x])
_positions_y = np.append(_positions_y, [y])
_car_angle = np.append(_car_angle, [alfa_n])
i = i + 1
position_x_arr = _positions_x
position_y_arr = _positions_y
car_angles_arr = _car_angle
return position_x_arr, position_y_arr, car_angles_arr
def crowding_calculation(_fitness_values):
_pop_size = len(_fitness_values[:, 0])
fitness_value_number = len(_fitness_values[0, :])
matrix_for_crowding = np.zeros((_pop_size, fitness_value_number))
normalize_fitness_values = (_fitness_values - _fitness_values.min(0)) / _fitness_values.ptp(0) # normalize fit val
for i in range(fitness_value_number):
crowding_results = np.zeros(_pop_size)
crowding_results[0] = 1 # extreme point has the max crowding distance
crowding_results[_pop_size - 1] = 1 # extreme point has the max crowding distance
sorting_normalize_fitness_values = np.sort(normalize_fitness_values[:, i])
sorting_normalized_values_index = np.argsort(normalize_fitness_values[:, i])
# crowding distance calculation
crowding_results[1:_pop_size - 1] = (
sorting_normalize_fitness_values[2:_pop_size] - sorting_normalize_fitness_values[0:_pop_size - 2])
re_sorting = np.argsort(sorting_normalized_values_index) # re_sorting to the original order
matrix_for_crowding[:, i] = crowding_results[re_sorting]
crowding_distance = np.sum(matrix_for_crowding, axis=1) # crowding distance of each solution
return crowding_distance
def remove_using_crowding(_fitness_values, number_solutions_needed):
pop_index = np.arange(_fitness_values.shape[0])
crowding_distance = crowding_calculation(_fitness_values)
selected_pop_index = np.zeros(number_solutions_needed)
selected_fitness_values = np.zeros((number_solutions_needed, len(_fitness_values[0, :])))
for i in range(number_solutions_needed):
_pop_size = pop_index.shape[0]
solution_1 = rn.randint(0, _pop_size - 1)
solution_2 = rn.randint(0, _pop_size - 1)
if crowding_distance[solution_1] >= crowding_distance[solution_2]:
selected_pop_index[i] = pop_index[solution_1]
selected_fitness_values[i, :] = _fitness_values[solution_1, :]
pop_index = np.delete(pop_index, solution_1, axis=0)
_fitness_values = np.delete(fitness_values, solution_1, axis=0)
crowding_distance = np.delete(crowding_distance, solution_1, axis=0)
else:
selected_pop_index[i] = pop_index[solution_2]
selected_fitness_values[i, :] = _fitness_values[solution_2, :]
pop_index = np.delete(pop_index, solution_2, axis=0)
_fitness_values = np.delete(fitness_values, solution_2, axis=0)
crowding_distance = np.delete(crowding_distance, solution_2, axis=0)
selected_pop_index = np.asarray(selected_pop_index, dtype=int)
return selected_pop_index
def pareto_front_finding(_fitness_values, pop_index):
_pop_size = _fitness_values.shape[0]
_pareto_front = np.ones(_pop_size, dtype=bool)
for i in range(_pop_size):
for j in range(_pop_size):
if all(_fitness_values[j] <= _fitness_values[i]) and any(_fitness_values[j] < _fitness_values[i]):
_pareto_front[i] = 0
break
return pop_index[_pareto_front]
def selection(_pop, _fitness_values, _pop_size):
pop_index_0 = np.arange(pop.shape[0])
pop_index = np.arange(pop.shape[0])
_pareto_front_index = []
while len(_pareto_front_index) < _pop_size:
new_pareto_front = pareto_front_finding(fitness_values[pop_index_0, :], pop_index_0)
total_pareto_size = len(_pareto_front_index) + len(new_pareto_front)
if total_pareto_size > _pop_size:
number_solutions_needed = pop_size - len(_pareto_front_index)
selected_solutions = (remove_using_crowding(_fitness_values[new_pareto_front], number_solutions_needed))
new_pareto_front = new_pareto_front[selected_solutions]
_pareto_front_index = np.hstack((_pareto_front_index, new_pareto_front)) # add to pareto
remaining_index = set(pop_index) - set(_pareto_front_index)
pop_index_0 = np.array(list(remaining_index))
selected_pop = _pop[_pareto_front_index.astype(int)]
return selected_pop
def GOL(_flipped_fitness_values, _fitness_values):
gol = []
max_fitness_val_pos = max(_fitness_values[:, 0])
max_fitness_val_ang = max(_fitness_values[:, 1])
for k in range(pop_summed):
if _flipped_fitness_values[k, 0] / max_fitness_val_pos < _flipped_fitness_values[k, 1] / max_fitness_val_ang:
gol = np.append(gol, _flipped_fitness_values[k, 0] / max_fitness_val_pos)
else:
gol = np.append(gol, _flipped_fitness_values[k, 1] / max_fitness_val_ang)
best_gol = max(gol)
return best_gol
########################
# Parameters #
########################
starting_x = 50.0 # wartości od 10.0 do 55.0
starting_y = 35.0 # wartości od 10.0 do 35.0
car_rotation = -math.pi/3 # wartości od -math.pi do math.pi
number_of_controls = 60
population_size = 160
########################
# Parameters #
########################
stan = [starting_x, starting_y, car_rotation]
nv = number_of_controls
lb = []
ub = []
for _ in range(nv):
lb = np.append(lb, [-1, -math.pi / 6])
ub = np.append(ub, [1, math.pi / 6])
pop_size = population_size
rate_crossover = 30
rate_mutation = 20
rate_local_search = 30
pop_summed = int(population_size + rate_crossover + rate_mutation + rate_local_search)
step_size = 0.1
pop = random_population(nv, pop_size, lb, ub)
best_gols = []
final = False
done = False
parking_spot_length = 6.0
parking_spot_width = 3.0
beta = 0
parking_length = 60.0
parking_width = 40.0
car_width = 1.8
car_length = 4.0
front_axle = 1.2
rear_axle = 0.34
ds = (front_axle - rear_axle / 2)
dist_between_axles = car_length - front_axle - rear_axle
dt = 1
iterations = 0
while not done:
offspring_from_crossover = crossover(pop, rate_crossover)
offspring_from_mutation = mutation(pop, rate_mutation)
offspring_from_local_search = local_search(pop, rate_local_search, step_size)
pop = np.append(pop, offspring_from_crossover, axis=0)
pop = np.append(pop, offspring_from_mutation, axis=0)
pop = np.append(pop, offspring_from_local_search, axis=0)
fitness_values, trajectory, done, flipped_fitness_values = evaluation(pop, stan[0], stan[1], stan[2], done)
best_gols = np.append(best_gols, GOL(flipped_fitness_values, fitness_values))
pop = selection(pop, fitness_values, pop_size)
print('iteration', iterations)
iterations = iterations + 1
final = True
fitness_values, final_trajectory, done, final_flipped_fitness_values = evaluation(pop, stan[0], stan[1], stan[2], done)
positions_x, positions_y, car_angles = best_individuals_visualization(final_trajectory, stan[0], stan[1], stan[2])
index = np.arange(pop.shape[0]).astype(int)
pareto_front_index = pareto_front_finding(fitness_values, index)
pop = pop[pareto_front_index, :]
pareto_front = fitness_values[pareto_front_index]
print("______________")
print("Kryteria optymalizacji:")
print("Odl. od miejsca | Różnica kąta wzgl.")
print("parkingowego | miejsca parkingowego")
print(fitness_values)
plt.scatter(fitness_values[:, 0], abs(abs(fitness_values[:, 1] * (180 / math.pi)) - 180), marker='x', c='r')
plt.scatter(pareto_front[:, 0], abs(abs(pareto_front[:, 1] * (180 / math.pi)) - 180), marker='x', c='b')
blue_patch = patches.Patch(color='blue', label='Osobniki Pareto Optymalne')
red_patch = patches.Patch(color='red', label='Reszta populacji')
plt.legend(handles=[blue_patch, red_patch])
plt.xlabel('Odległość od miejsca parkingowego w linii prostej [m]')
plt.ylabel('Różnica kąta względem miejsca parkingowego [stopnie]')
plt.show()
fig = plt.figure()
ax = fig.add_subplot()
ax.set_title('Trasa przejazdu optymalnego osobnika')
ax.set_xlabel('X [m]')
ax.set_ylabel('Y [m]')
ax.set_xlim(-10, parking_length)
ax.set_ylim(-10, parking_width)
ax.add_patch(patches.Rectangle((0 - parking_spot_length / 2, 0 - parking_spot_width / 2), parking_spot_length,
parking_spot_width, edgecolor='black', fill=False))
fig.show()
for m in range(nv):
xA = positions_x[m] - car_length / 2 * math.cos(car_angles[m]) - car_width / 2 * math.sin(car_angles[m])
yA = positions_y[m] - car_length / 2 * math.sin(car_angles[m]) + car_width / 2 * math.cos(car_angles[m])
xB = xA + car_width * math.sin(car_angles[m])
yB = yA - car_width * math.cos(car_angles[m])
xD = xA + car_length * math.cos(car_angles[m])
yD = yA + car_length * math.sin(car_angles[m])
xC = xB + car_length * math.cos(car_angles[m])
yC = yB + car_length * math.sin(car_angles[m])
points = [[xA, yA], [xB, yB], [xC, yC], [xD, yD]]
car = Polygon(points, fill=None, edgecolor='r')
ax.add_patch(car)
plt.show()
plot_iterations = np.arange(iterations)
plt.scatter(plot_iterations, best_gols, marker='o', c='g')
plt.title('Najlepszy parametr GOL dla każdej iteracji')
plt.xlabel('Numer iteracji')
plt.ylabel('Parametr GOL')
plt.show()
| 3.09375 | 3 |
movie_scraper.py | jamesduvall/SummerMovieWager | 0 | 12768736 | import urllib2
import movie
from bs4 import BeautifulSoup
class Movie_Scrapper
def Scrape_Movies():
page = urllib2.urlopen('http://www.boxofficemojo.com/seasonal/?view=releasedate&yr=2015&season=Summer')
soup = BeautifulSoup(page.read(), 'html.parser')
rows = soup.select('table[bgcolor="#ffffff"] tr')
movies = []
for i in range(2, len(rows) - 4, 1):
rank = rows[i].find_all('td')[0].font.get_text()
title = ""
if(rows[i].find_all('td')[1].b.font.a != None):
title = rows[i].find_all('td')[1].b.font.a.get_text()
else:
title = rows[i].find_all('td')[1].b.font.get_text()
amount = rows[i].find_all('td')[3].font.get_text()
movie = Movie(rank, title, amount)
movies.append(movie);
movie.print_me()
print '\n'
# print(soup.prettify()) | 3.421875 | 3 |
providers/a4kScrapers/en/torrent/bitlord.py | henryjfry/a4kScrapers | 0 | 12768737 | # -*- coding: utf-8 -*-
from providerModules.a4kScrapers import core
class sources(core.DefaultSources):
def __init__(self, *args, **kwargs):
super(sources, self).__init__(__name__, *args, **kwargs)
def _get_token_and_cookies(self, url):
response = self._request.get(url.base)
token_id = core.re.findall(r'token\: (.*)\n', response.text)[0]
token = ''.join(core.re.findall(token_id + r" ?\+?\= ?'(.*)'", response.text))
cookies = ''
for cookie in response.cookies:
cookies += '%s=%s;' % (cookie.name, cookie.value)
return (token, cookies)
def _search_request(self, url, query, force_token_refresh=False):
(token, cookies) = core.database.get(self._get_token_and_cookies, 0 if force_token_refresh else 1, url)
headers = {
'x-request-token': token,
'cookie': cookies
}
query = core.quote_plus(query)
data = {
'query': query,
'offset': 0,
'limit': 99,
'filters[field]': 'seeds',
'filters[sort]': 'desc',
'filters[time]': 4,
'filters[category]': 3 if self.is_movie_query() else 4,
'filters[adult]': False,
'filters[risky]': False
}
response = self._request.post(url.base + url.search, data, headers=headers)
if response.status_code != 200:
if not force_token_refresh:
return self._search_request(url, query, force_token_refresh=True)
core.tools.log('No response from %s' %url, 'error')
return []
response = core.json.loads(response.text)
if response['error']:
return []
else:
return response['content']
def _soup_filter(self, response):
return response
def _title_filter(self, el):
return el['name']
def _info(self, el, url, torrent):
torrent['magnet'] = el['magnet']
try:
size = int(el['size'])
if size == 0:
torrent['magnet'] = ''
else:
if size < 120 and el['source'] == 'thePirateBay':
size = size * 1024
elif size > 122880:
size = int(size / 1024)
elif size < 120:
torrent['magnet'] = ''
torrent['size'] = size
except: pass
torrent['seeds'] = el['seeds']
return torrent
| 1.953125 | 2 |
lairdrone/dirb.py | lair-framework/lair-drones-version1--deprecated- | 2 | 12768738 | <filename>lairdrone/dirb.py
#!/usr/bin/env python
import os
import copy
import re
from urlparse import urlparse
from lairdrone import drone_models as models
from lairdrone import helper
TOOL = 'dirb'
def build_clean_path(base_url, path, replace_specials=False):
"""Remove the base url value out of a path and optionally replace
any special characters with an underscore character (required to
build 'path_clean').
:param base_url: Base URL
:param path: URI following base URL
:param replace_specials: Optional parameter, replaces any special chars with underscore
"""
path = path.replace(base_url, '')
return re.sub('[^a-zA-Z0-9]', '_', path) if replace_specials else path
def extrapolate_args(contents):
"""Well... since the result output for dirb doesn't give the commands used, I'm gonna have to
do it the hard way! Works backwards from the result output to derive the command line args
used. Heavily dependent on the version in use. If any of these expected values change, the
regex patterns need to also be updated. Not the best, but what are ya gonna do?
:param contents: String value of output file
"""
user_agent_pattern = re.compile('USER_AGENT: (.+)')
cookie_pattern = re.compile('COOKIE: (.+)')
fine_tuning_pattern = re.compile('OPTION: Fine tunning of NOT_FOUND detection')
headers_pattern = re.compile('ADDED_HEADERS:.+\n--\n(.+)\n--')
case_sensitivity_pattern = re.compile('OPTION: Using Case-Insensitive Searches')
location_pattern = re.compile('OPTION: Printing LOCATION header')
not_found_pattern = re.compile('OPTION: Ignoring NOT_FOUND code -> (\d+)')
output_file_pattern = re.compile('OUTPUT_FILE: (.+)')
proxy_pattern = re.compile('PROXY: (.+)')
proxy_auth_pattern = re.compile('PROXY AUTHORIZATION: (.+)')
not_recursive_pattern = re.compile('OPTION: Not Recursive')
silent_mode_pattern = re.compile('OPTION: Silent Mode')
trailing_slash_pattern = re.compile('OPTION: NOT forcing an ending')
http_auth_pattern = re.compile('AUTHORIZATION: (.+)')
non_existing_pattern = re.compile('OPTION: Show Not Existant Pages')
stop_warning_pattern = re.compile('OPTION: Not Stoping on warning message')
extension_list_pattern = re.compile('EXTENSIONS_LIST: \((.+)\) \|')
extension_file_pattern = re.compile('EXTENSIONS_FILE: (.+)')
speed_delay_pattern = re.compile('SPEED_DELAY: (\d+) miliseconds')
command_args = []
command_args.append('-a %s' % user_agent_pattern.findall(contents)[0] if len(user_agent_pattern.findall(contents)) else None)
command_args.append('-c "%s"' % cookie_pattern.findall(contents)[0] if len(cookie_pattern.findall(contents)) else None)
command_args.append('-f' if len(fine_tuning_pattern.findall(contents)) else None)
command_args.append('-H "%s"' % headers_pattern.findall(contents)[0] if len(headers_pattern.findall(contents)) else None)
command_args.append('-i' if len(case_sensitivity_pattern.findall(contents)) else None)
command_args.append('-l' if len(location_pattern.findall(contents)) else None)
command_args.append('-N %s' % not_found_pattern.findall(contents)[0] if len(not_found_pattern.findall(contents)) else None)
command_args.append('-o %s' % output_file_pattern.findall(contents)[0] if len(output_file_pattern.findall(contents)) else None)
command_args.append('-p %s' % proxy_pattern.findall(contents)[0] if len(proxy_pattern.findall(contents)) else None)
command_args.append('-P %s' % proxy_auth_pattern.findall(contents)[0] if len(proxy_auth_pattern.findall(contents)) else None)
command_args.append('-r' if len(not_recursive_pattern.findall(contents)) else None)
command_args.append('-S' if len(silent_mode_pattern.findall(contents)) else None)
command_args.append('-t' if len(trailing_slash_pattern.findall(contents)) else None)
command_args.append('-u %s' % http_auth_pattern.findall(contents)[0] if len(http_auth_pattern.findall(contents)) else None)
command_args.append('-v' if len(non_existing_pattern.findall(contents)) else None)
command_args.append('-w' if len(stop_warning_pattern.findall(contents)) else None)
command_args.append('-X %s' % extension_list_pattern.findall(contents)[0] if len(extension_list_pattern.findall(contents)) else None)
command_args.append('-x %s' % extension_file_pattern.findall(contents)[0] if len(extension_file_pattern.findall(contents)) else None)
command_args.append('-z %s' % speed_delay_pattern.findall(contents)[0] if len(speed_delay_pattern.findall(contents)) else None)
return 'dirb %s' % ' '.join(filter(None, command_args))
def extract_data(contents):
"""Take the output file contents and parse out the results as well as the commands used.
:param contents: String value of dirb output file
"""
base_url_pattern = re.compile('URL_BASE: (.+)(\/)?\n')
directory_pattern = re.compile('DIRECTORY: (.+)')
file_pattern = re.compile('\+ (.+) \(CODE:(\d{3})')
arguments = extrapolate_args(contents)
try:
base_url = base_url_pattern.findall(contents)[0]
# base_url at this point is a tuple, with the 2nd item value
# being the 2nd matched group, so we can disregard that.
base_url = base_url[0] if not base_url[0].endswith('/') else base_url[0][:-1]
parsed_url = urlparse(base_url)
port = parsed_url.port if parsed_url.port else 80
except Exception as exception:
print exception
directories = [(x, '200') for x in directory_pattern.findall(contents)]
files = file_pattern.findall(contents)
results = directories + files
final_results = []
for record in results:
final_results.append({
'path': build_clean_path(base_url, record[0]),
'path_clean': build_clean_path(base_url, record[0], True),
'port': port,
'response_code': record[1],
'flag': False,
})
return parsed_url.hostname, arguments, final_results
def parse(project, resource):
"""Parses a Dirb file and updates the Lair database
:param project: The project id
:param resource: The output file provided by dirb
"""
# Attempt to parse resource as a file or string
try:
if os.path.isfile(resource):
with open(resource, 'r') as fh:
contents = fh.read()
else:
contents = resource
except Exception as exception:
print exception
host_ip, arguments, extracted_data = extract_data(contents)
# Create the project dictionary which acts as foundation of document
project_dict = copy.deepcopy(models.project_model)
project_dict['project_id'] = project
# Pull the command from the file
command_dict = copy.deepcopy(models.command_model)
command_dict['tool'] = TOOL
command_dict['command'] = arguments
project_dict['commands'].append(command_dict)
# Proecess host data
host_dict = copy.deepcopy(models.host_model)
host_dict['string_addr'] = host_ip
host_dict['web_directories'] = extracted_data
project_dict['hosts'].append(host_dict)
return project_dict
| 2.5 | 2 |
web100_userland-1.8/python/readall.py | m-lab/web100_userland-debian | 0 | 12768739 | <filename>web100_userland-1.8/python/readall.py
"""readall.py: Read all Web100 variables from all connections."""
from Web100 import *
a = Web100Agent()
cl = a.all_connections()
for c in cl:
print("Connection %d (%s %d %s %d)"%(c.cid, \
c.read('LocalAddress'), \
c.read('LocalPort'), \
c.read('RemAddress'), \
c.read('RemPort')))
for (name, val) in c.readall().items():
print("%-20s %s"%(name, str(val)))
print('')
| 2.703125 | 3 |
bsolutions/domain/factoryboy_utils.py | xergioalex/django-multiple-databases-engines | 8 | 12768740 | # factoryboy_utils.py
@classmethod
def _get_manager(cls, model_class):
return super(cls, cls)._get_manager(model_class).using(cls.database)
class DBAwareFactory(object):
"""
Context manager to make model factories db aware
Usage:
with DBAwareFactory(PersonFactory, 'db_qa') as personfactory_on_qa:
person_on_qa = personfactory_on_qa()
...
"""
def __init__(self, cls, db):
# Take a copy of the original cls
self.original_cls = cls
# Patch with needed bits for dynamic db support
setattr(cls, 'database', db)
setattr(cls, '_get_manager', _get_manager)
# save the patched class
self.patched_cls = cls
def __enter__(self):
return self.patched_cls
def __exit__(self, type, value, traceback):
return self.original_cls
| 2.71875 | 3 |
examples/galaxy.py | xxao/miniml | 0 | 12768741 | <reponame>xxao/miniml<gh_stars>0
import miniml
import numpy as np
from matplotlib import pyplot as plt
# Adapted from:
# https://github.com/kevinzakka/cs231n.github.io/blob/master/neural-networks-case-study.md
# init data
N = 100 # points per class
C = 3 # classes
X = np.zeros((N*C, 2))
y = np.zeros(N*C, dtype='uint8')
np.random.seed(3)
for j in range(C):
r = np.linspace(0.0, 1, N)
t = np.linspace(j*4, (j+1)*4, N) + np.random.randn(N) * 0.2
ixd = range(N*j, N*(j+1))
X[ixd] = np.c_[r*np.sin(t), r*np.cos(t)]
y[ixd] = j
# convert to one-hot
Y, cats = miniml.to_categorical(y)
# create model
model = miniml.Model()
model.dense(16, 'relu', 'he')
model.dense(C, 'softmax', 'plain')
# init params
rate = 1
epochs = 1000
# train model
optimizer = miniml.GradDescent(
cost = 'ce',
epochs = epochs,
init_seed = 48,
store = 100,
verbose = 200)
costs = optimizer.train(model, X, Y, rate)
# plot results
miniml.print_accuracy(model, X, Y)
miniml.plot_costs(epochs, costs=costs)
miniml.plot_boundaries(model, X, Y)
| 3.03125 | 3 |
lib/python/abcutils/CMakeCache.py | ryu-sw/alembic | 921 | 12768742 | <gh_stars>100-1000
#!/usr/bin/env python2.6
#-*- mode: python -*-
##-*****************************************************************************
##
## Copyright (c) 2009-2011,
## <NAME> Imageworks Inc. and
## Industrial Light & Magic, a division of Lucasfilm Entertainment Company Ltd.
##
## All rights reserved.
##
## Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are
## met:
## * Redistributions of source code must retain the above copyright
## notice, this list of conditions and the following disclaimer.
## * Redistributions in binary form must reproduce the above
## copyright notice, this list of conditions and the following disclaimer
## in the documentation and/or other materials provided with the
## distribution.
## * Neither the name of Industrial Light & Magic nor the names of
## its contributors may be used to endorse or promote products derived
## from this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
## "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
## LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
## A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
## OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
## SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
## LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
## DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
## THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
## (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
## OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
##
##-*****************************************************************************
from __future__ import with_statement
import os, sys, re
from Path import Path
##-*****************************************************************************
COMMENT = re.compile( r"//|#" )
WS = re.compile( r"\s" )
##-*****************************************************************************
class CacheEntry( object ):
def __init__( self, _line ):
line = WS.sub( "", str( _line ) )
if not line:
return None
elif COMMENT.match( line ):
return None
else:
# get rid of comments at the end of the line
line = COMMENT.split( line, 1 )[0].strip()
try:
name_type, value = line.split( '=' )
self._value = value.strip()
if self._value == '':
self._value = None
name, typ = name_type.split( ':' )
self._name = name.strip()
self._type = typ.strip()
except ValueError:
sys.stderr.write( "Could not parse line '%s'\n" % _line )
self._value = None
self._name = None
self._type = None
def __str__( self ):
val = ""
typ = ""
if self._value != None:
val = self._value
if self._type != None:
typ = self._type
if self._name == None:
return ""
else:
s = "%s:%s=%s" % ( self._name, typ, val )
return s.strip()
def __eq__( self, other ):
return str( self ) == str( other )
def __nonzero__( self ):
try:
return self._name != None and self._value != None
except AttributeError:
return False
def name( self ):
return self._name
def value( self, newval = None ):
if newval != None:
self._value = newval
else:
return self._value
def hint( self ):
"""Return the CMakeCache TYPE of the entry; used as a hint to CMake
GUIs."""
return self._type
##-*****************************************************************************
class CMakeCache( object ):
"""This class is used to read in and get programmatic access to the
variables in a CMakeCache.txt file, manipulate them, and then write the
cache back out."""
def __init__( self, path=None ):
self._cachefile = Path( path )
_cachefile = str( self._cachefile )
self._entries = {}
if self._cachefile.exists():
with open( _cachefile ) as c:
entries = filter( None, map( lambda x: CacheEntry( x ),
c.readlines() ) )
entries = filter( lambda x: x.value() != None, entries )
for i in entries:
self._entries[i.name()] = i
def __contains__( self, thingy ):
try:
return thingy in self.names()
except TypeError:
return thingy in self._entries.values()
def __iter__( self ):
return self._entries
def __nonzero__( self ):
return len( self._entries ) > 0
def __str__( self ):
return os.linesep.join( map( lambda x: str( x ), self.entries() ) )
def add( self, entry ):
e = CacheEntry( entry )
if e:
if not e in self:
self._entries[e.name()] = e
else:
sys.stderr.write( "Entry for '%s' is already in the cache.\n" % \
e.name() )
else:
sys.stderr.write( "Could not create cache entry for '%s'\n" % e )
def update( self, entry ):
e = CacheEntry( entry )
if e:
self._entries[e.name()] = e
else:
sys.stderr.write( "Could not create cache entry for '%s'\n" % e )
def names( self ):
return self._entries.keys()
def entries( self ):
return self._entries.values()
def get( self, name ):
return self._entries[name]
def cachefile( self ):
return self._cachefile
def refresh( self ):
self.__init__( self._cachefile )
def write( self, newfile = None ):
if newfile == None:
newfile = self._cachefile
with open( newfile, 'w' ) as f:
for e in self.entries():
f.write( str( e ) + os.linesep )
| 1.242188 | 1 |
Birnn_Transformer/ncc/data/type_prediction/utils.py | code-backdoor/code-backdoor | 71 | 12768743 | import os
import numpy as np
from typing import Dict, Generic, List, NamedTuple, Tuple, TypeVar
TTensorizedNodeData = TypeVar("TTensorizedNodeData")
def enforce_not_None(e):
"""Enforce non-nullness of input. Used for typechecking and runtime safety."""
if e is None:
raise Exception("Input is None.")
return e
class TensorizedGraphData(Generic[TTensorizedNodeData]):
__slots__ = ("num_nodes", "node_tensorized_data", "adjacency_lists", "reference_nodes")
def __init__(
self,
num_nodes: int,
node_tensorized_data: List[TTensorizedNodeData],
adjacency_lists: List[Tuple[np.ndarray, np.ndarray]],
reference_nodes: Dict[str, np.ndarray],
):
self.num_nodes = num_nodes
self.node_tensorized_data = node_tensorized_data
self.adjacency_lists = adjacency_lists
self.reference_nodes = reference_nodes | 2.828125 | 3 |
HiggsAnalysis/Skimming/python/higgsToWW2Leptons_OutputModule_cff.py | SWuchterl/cmssw | 6 | 12768744 | <reponame>SWuchterl/cmssw<filename>HiggsAnalysis/Skimming/python/higgsToWW2Leptons_OutputModule_cff.py
import FWCore.ParameterSet.Config as cms
from HiggsAnalysis.Skimming.higgsToWW2LeptonsOutputModuleAODSIM_cfi import *
from HiggsAnalysis.Skimming.higgsToWW2LeptonsOutputModuleRECOSIM_cfi import *
| 1.1875 | 1 |
ndg/xacml/parsers/etree/policyreader.py | philipkershaw/ndg_xacml | 5 | 12768745 | """NDG XACML ElementTree Policy Reader
NERC DataGrid
"""
__author__ = "<NAME>"
__date__ = "16/03/10"
__copyright__ = "(C) 2010 Science and Technology Facilities Council"
__contact__ = "<EMAIL>"
__license__ = "BSD - see LICENSE file in top-level directory"
__contact__ = "<EMAIL>"
__revision__ = "$Id$"
from ndg.xacml.parsers import XMLParseError
from ndg.xacml.core.policy import Policy
from ndg.xacml.core.policydefaults import PolicyDefaults
from ndg.xacml.core.variabledefinition import VariableDefinition
from ndg.xacml.core.rule import Rule
from ndg.xacml.core.target import Target
from ndg.xacml.parsers.etree import QName, getElementChildren
from ndg.xacml.parsers.etree.reader import ETreeAbstractReader
from ndg.xacml.parsers.etree.factory import ReaderFactory
class PolicyReader(ETreeAbstractReader):
"""Parse a Policy Document using ElementTree
@cvar TYPE: XACML type to instantiate from parsed object
@type TYPE: type"""
TYPE = Policy
def __call__(self, obj, common):
"""Parse policy object
@param obj: input object to parse
@type obj: ElementTree Element, or stream object
@param common: parsing common data
@type common: from ndg.xacml.parsers.common.Common
@return: new XACML expression instance
@rtype: ndg.xacml.core.policy.Policy derived type
@raise XMLParseError: error reading element
@raise NotImplementedError: parsing is not implemented for rule
combiner, combiner parameters and obligations elements.
"""
elem = super(PolicyReader, self)._parse(obj)
return self.processElement(elem, common)
@classmethod
def parse(cls, obj, common=None):
"""Parse from input object and return new XACML object
As a special case, allow the common data to be None. This is because for
parsing a policy rather than a policy set, no common data is needed.
@param obj: input source - file name, stream object or other
@type obj: string, stream or other
@param common: parsing common data
@type common: from ndg.xacml.parsers.common.Common
@return: new XACML object
@rtype: XacmlCoreBase sub type
"""
return super(ETreeAbstractReader, cls).parse(obj, common)
def processElement(self, elem, common):
"""Parse policy object
@param elem: root element of policy
@type elem: ElementTree Element
@param common: parsing common data
@type common: from ndg.xacml.parsers.common.Common
@return: new XACML expression instance
@rtype: ndg.xacml.core.policy.Policy derived type
@raise XMLParseError: error reading element
@raise NotImplementedError: parsing is not implemented for rule
combiner, combiner parameters and obligations elements.
"""
# XACML type to instantiate
xacmlType = self.TYPE
policy = xacmlType()
localName = QName.getLocalPart(elem.tag)
if localName != xacmlType.ELEMENT_LOCAL_NAME:
raise XMLParseError("No \"%s\" element found" %
xacmlType.ELEMENT_LOCAL_NAME)
# Unpack *required* attributes from top-level element
attributeValues = []
for attributeName in (xacmlType.POLICY_ID_ATTRIB_NAME,
xacmlType.RULE_COMBINING_ALG_ID_ATTRIB_NAME):
attributeValue = elem.attrib.get(attributeName)
if attributeValue is None:
raise XMLParseError('No "%s" attribute found in "%s" '
'element' %
(attributeName,
xacmlType.ELEMENT_LOCAL_NAME))
attributeValues.append(attributeValue)
policy.policyId, policy.ruleCombiningAlgId = attributeValues
# Defaults to XACML version 1.0
# TODO: version check
policy.version = (elem.attrib.get(xacmlType.VERSION_ATTRIB_NAME) or
xacmlType.DEFAULT_XACML_VERSION)
# Parse sub-elements
for childElem in getElementChildren(elem):
localName = QName.getLocalPart(childElem.tag)
if localName == xacmlType.DESCRIPTION_LOCAL_NAME:
if childElem.text is not None:
policy.description = childElem.text.strip()
elif localName == xacmlType.POLICY_DEFAULTS_LOCAL_NAME:
PolicyDefaultsReader = ReaderFactory.getReader(PolicyDefaults)
policy.policyDefaults = PolicyDefaultsReader.parse(childElem,
common)
elif localName == Target.ELEMENT_LOCAL_NAME:
TargetReader = ReaderFactory.getReader(Target)
policy.target = TargetReader.parse(childElem, common)
elif localName == xacmlType.COMBINER_PARAMETERS_LOCAL_NAME:
raise NotImplementedError()
elif localName == xacmlType.RULE_COMBINER_PARAMETERS_LOCAL_NAME:
raise NotImplementedError()
elif localName == VariableDefinition.ELEMENT_LOCAL_NAME:
VariableDefinitionReader = ReaderFactory.getReader(
VariableDefinition)
variableDefinition = VariableDefinitionReader.parse(childElem,
common)
elif localName == Rule.ELEMENT_LOCAL_NAME:
RuleReader = ReaderFactory.getReader(Rule)
rule = RuleReader.parse(childElem, common)
if rule.id in [_rule.id for _rule in policy.rules]:
raise XMLParseError("Duplicate Rule ID %r found" % rule.id)
policy.rules.append(rule)
elif localName == xacmlType.OBLIGATIONS_LOCAL_NAME:
raise NotImplementedError('Parsing for Obligations element is '
'not implemented')
else:
raise XMLParseError("XACML Policy child element name %r not "
"recognised" % localName)
# Record reference in case of references to this policy.
# Allow for there not being a policy finder since this is not needed if
# if the root is a policy rather than a policy set.
if common is not None and hasattr(common, 'policyFinder'):
common.policyFinder.addPolicyReference(policy)
return policy
| 1.820313 | 2 |
affiliate/ad_statis_new.py | gods-view/AdclickIO | 0 | 12768746 | <gh_stars>0
#!/usr/bin/env python
# encoding: utf-8
import os,time
import subprocess
import sys,copy
import gevent
from gevent.event import Event
from multiprocessing.dummy import Pool as ThreadPool
import gc
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(BASE_DIR)
import time
import traceback
from affiliate.model.config import mysql, mysql_report
from affiliate.model.mysql_model import *
from affiliate.model.mysql_report import *
from affiliate.model.redis_model import *
class AdStatis():
def __init__(self,index):
self.RedisClass = Redis()
self.index = index
self.adStatisList = dict()
def doAdStatis(self):
print ("adStatis")
while (True):
#try:
self.adStatisList = dict()
self.adStatis()
#sys.exit()
#except Exception as e:
#pass
#统计信息
def adStatis(self):
res = self.RedisClass.getAdStatList(self.index)
if (res == False or len(res) <= 0):
return False
statisNum = len(res)
#分库
num = 10
usersList = dict()
md5Key = dict()
valueList = dict()
for i in range(num):
usersList[i] = dict()
valueList[i] = []
md5Key[i] = []
userIdText = []
clickIds = []
resAdStat = []
Conversions = dict()
CampaignIds = []
FlowIds = []
OfferIds = []
LanderIds = []
CountryIds = []
TrafficSourceIds = []
AffiliateNetworkIds = []
getOfferIdsByFlow = []
userEventNum = dict()
CampaignMapList = dict()
for data in res:
key = bytes.decode(data)
redisData = self.RedisClass.getAdStatValue(key)
#print ("redisData",redisData)
if (("UserID" not in redisData.keys()) or isinstance(redisData['UserID'], (str,int)) == False or len(str(redisData['UserID'])) <= 0):
print ("error",redisData)
continue
userIdValue = redisData['UserID']
userIdText.append(str(userIdValue))
#统计次数
if (userIdValue not in userEventNum):
userEventNum[userIdValue] = 0
userEventNum[userIdValue] += 1
redisData['KeysMD5'] = key
print ("success",redisData['UserID'],key)
self.adStatisList[key] = redisData
resAdStat.append(redisData)
#if (redisData['Conversions']):
#print ("data======",redisData['Conversions'])
#tmpList[key] = redisData
if(len(str(redisData['Conversions'])) > 0 and int(redisData['Conversions']) > 0):
clickId = redisData['ClickID']
clickIds.append(clickId)
Conversions[clickId] = redisData
print ("Conversions success",key,clickId)
if(len(str(redisData['CampaignID'])) > 0 and int(redisData['CampaignID']) > 0):
CampaignIds.append(int(redisData['CampaignID']))
tsCampaignId = redisData['tsCampaignId']
tsCampaignName = redisData['tsCampaignName']
if (int(redisData['CampaignID']) not in CampaignMapList):
CampaignMapList[int(redisData['CampaignID'])] = dict()
CampaignMapList[int(redisData['CampaignID'])]['id'] = ''
CampaignMapList[int(redisData['CampaignID'])]['name'] = ''
if (len(str(tsCampaignId)) > 0):
CampaignMapList[int(redisData['CampaignID'])]['id'] = tsCampaignId
if (len(str(tsCampaignName)) > 0):
CampaignMapList[int(redisData['CampaignID'])]['name'] = tsCampaignName
if(len(str(redisData['FlowID'])) > 0 and int(redisData['FlowID']) > 0):
FlowIds.append(int(redisData['FlowID']))
if(len(str(redisData['LanderID'])) > 0 and int(redisData['LanderID']) > 0):
LanderIds.append(int(redisData['LanderID']))
redisData['OfferID'] = 0 #测试
if(len(str(redisData['OfferID'])) > 0 and int(redisData['OfferID']) > 0):
OfferIds.append(int(redisData['OfferID']))
else:
#OfferIds为空的时候,需要根据flow的配比设置offerId
getOfferIdsByFlow.append(str(redisData['FlowID']))
if(len(str(redisData['TrafficSourceID'])) > 0 and int(redisData['TrafficSourceID']) > 0):
TrafficSourceIds.append(int(redisData['TrafficSourceID']))
if(len(str(redisData['AffiliateNetworkID'])) > 0 and int(redisData['AffiliateNetworkID']) > 0):
AffiliateNetworkIds.append(int(redisData['AffiliateNetworkID']))
if(len(str(redisData['Country'])) > 0):
CountryIds.append(redisData['Country'])
#处理offerId为空的(nodejs搬过来的)
if (len(getOfferIdsByFlow) > 0):
print ("getOfferIdsByFlow:",getOfferIdsByFlow)
sql = "SELECT f.`id` AS flowId,p.`id` AS parentId, l.`id`,l.`name`,p2.`weight` ,f.`userId`\
FROM Flow f \
INNER JOIN `Rule2Flow` f2 ON f2.`flowId` = f.`id`\
INNER JOIN `Rule` r ON r.`id` = f2.`ruleId` \
INNER JOIN `Path2Rule` r2 ON r2.`ruleId`= r.`id`\
INNER JOIN `Path` p ON p.`id` = r2.`pathId`\
INNER JOIN `Offer2Path` p2 ON p2.`pathId` = p.`id` \
INNER JOIN `Offer` l ON l.`id`= p2.`offerId`\
WHERE f2.`deleted`= 0 AND r.`deleted` = 0 \
AND r2.`deleted`= 0 AND p.`deleted` = 0 \
AND p2.`deleted` = 0 AND l.`deleted` = 0 \
AND f.`id` in (%s) AND p2.`weight` > 0 ORDER BY p2.`order` ASC" %(','.join(getOfferIdsByFlow))
print ("sql----------",sql)
getOfferIdsList = TrackingCampaign.execute_sql(sql)
print("getOfferIdsList:",getOfferIdsList)
sys.exit()
TrackingCampaignList = dict()
if (len(CampaignIds) > 0):
CampaignIds = list(set(CampaignIds))
data = TrackingCampaign.select().where(TrackingCampaign.id << CampaignIds)
#print ("TrackingCampaign data",data,"CampaignIds:",CampaignIds,"list",CampaignMapList)
for value in data:
if (int(value.id) not in CampaignMapList.keys()):
print("CampaignMapList id error",value.id,"keys:",CampaignMapList.keys(),"list:",CampaignMapList,"ids:",CampaignIds)
continue
TrackingCampaignList[value.id] = value
#更新TrackingCampaign 表 tsCampaignName 字段
TheirCampNameList = []
TheirCampName = CampaignMapList[value.id]['name']
if (value.TheirCampName):
TheirCampNameList = value.TheirCampName.split(",")
if (len(str(TheirCampName)) > 0 and TheirCampName not in TheirCampNameList):
TheirCampNameList.append(TheirCampName)
updateTsCampaignName = ','.join(TheirCampNameList)
res = TrackingCampaign.update(TheirCampName = updateTsCampaignName).where(TrackingCampaign.id == value.id).execute()
else:
print("update id error:",value.id,"TheirCampName:",TheirCampName,"TheirCampNameList:",TheirCampNameList)
#print("CampaignIds",CampaignIds)
#sys.exit()
FlowList = dict()
if (len(FlowIds) > 0):
FlowIds = list(set(FlowIds))
data = Flow.select().where(Flow.id << FlowIds)
for value in data:
FlowList[value.id] = value
LanderList = dict()
if (len(LanderIds) > 0):
LanderIds = list(set(LanderIds))
data = Lander.select().where(Lander.id << LanderIds)
for value in data:
LanderList[value.id] = value
OfferList = dict()
if (len(OfferIds) > 0):
OfferIds = list(set(OfferIds))
data = Offer.select().where(Offer.id << OfferIds)
for value in data:
OfferList[value.id] = value
TrafficSourceList = dict()
if (len(TrafficSourceIds) > 0):
TrafficSourceIds = list(set(TrafficSourceIds))
data = TrafficSource.select().where(TrafficSource.id << TrafficSourceIds)
for value in data:
TrafficSourceList[value.id] = value
AffiliateNetworkList = dict()
if (len(AffiliateNetworkIds) > 0):
AffiliateNetworkIds = list(set(AffiliateNetworkIds))
data = AffiliateNetwork.select().where(AffiliateNetwork.id << AffiliateNetworkIds)
for value in data:
AffiliateNetworkList[value.id] = value
CountryList = dict()
if (len(CountryIds) > 0):
CountryIds = list(set(CountryIds))
data = Country.select().where(Country.name << CountryIds)
for value in data:
CountryList[value.name] = value
if (len(CampaignMapList) > 0):
insertData = []
for key,data in CampaignMapList.items():
if (len(str(data['id'])) > 0):
value = {'OurCampId':key,\
'TheirCampId':data['id']
}
#print ("CampaignMapList =======================",value)
insertData.append(value)
res = CampaignMap.insert_many(insertData).upsert(upsert=True).execute()
#print ("userIdText",userIdText)
#查找用户信息
userKeyList = dict()
userIdText = list(set(userIdText))
resUsers = User.select().where(User.idText << userIdText)
if(resUsers):
userIds = []
for user in resUsers:
#idText = bytes.decode(idText)
userKeyList[user.idText] = dict()
userKeyList[user.idText]['id'] = user.id
userIds.append(user.id)
eventNum = userEventNum[user.idText]
#增加totalEvents,billedEvents
res = UserBilling.update(totalEvents=UserBilling.totalEvents + eventNum,billedEvents = UserBilling.billedEvents + eventNum).where(UserBilling.expired == 0,UserBilling.userId == user.id).execute()
#保存转化成功数据
#print ("clickIds",clickIds)
if (len(clickIds) > 0):
self.saveAdConversionsStatis(clickIds,Conversions,userKeyList,TrackingCampaignList,FlowList,LanderList,OfferList,TrafficSourceList,AffiliateNetworkList,CountryList)
#print ("-------------")
#sys.exit()
#拆分数据
for index in range(len(resAdStat)):
idText = resAdStat[index]['UserID']
#print ("idText",idText)
userId = userKeyList[idText]['id']
print ("userId:",userId)
modUserId = int(userId)%10
usersList[modUserId][idText] = dict()
usersList[modUserId][idText]['id'] = userId
#valueList[modUserId] = []
valueList[modUserId].append(resAdStat[index])
md5Key[modUserId].append(key)
param = []
tables = []
for i in range(num):
data = [str(i),valueList[i],usersList[i],md5Key[i],OfferList]
param.append(data)
tables.append("/tmp/adstatis_new_"+str(i)+".tbl")
pool = ThreadPool(num)
pool.map(self.save, param)
pool.close()
pool.join()
#print("success")
#sys.exit()
#清除数据
self.RedisClass.delAdStatList(self.index,statisNum)
return True
#保存转换率
def saveAdConversionsStatis(self,clickIds,Conversions,userKeyList,TrackingCampaignList,FlowList,LanderList,OfferList,TrafficSourceList,AffiliateNetworkList,CountryList):
print ("saveAdConversionsStatis")
clickIdsData = AdConversionsStatis.select().where(AdConversionsStatis.ClickID << clickIds)
clickIdsList = dict()
if (clickIdsData):
for data in clickIdsData:
clickIdsList[data.ClickID] = dict()
clickIdsList[data.ClickID]['ClickID'] = data.ClickID
#更新数据
insertData = []
for ClickID,data in Conversions.items():
if (ClickID in clickIdsList.keys()):
continue
idText = data['UserID']
UserID = userKeyList[idText]['id']
CampaignName = ''
if ('CampaignID' in data.keys() and len(str(data['CampaignID'])) > 0 and int(data['CampaignID']) in TrackingCampaignList):
CampaignName = TrackingCampaignList[int(data['CampaignID'])].name
FlowName = ''
if ('FlowID' in data.keys() and len(str(data['FlowID'])) > 0 and int(data['FlowID']) in FlowList):
FlowName = FlowList[int(data['FlowID'])].name
LanderName = ''
if ('LanderID' in data.keys() and len(str(data['LanderID'])) > 0 and int(data['LanderID']) in LanderList):
LanderName = LanderList[int(data['LanderID'])].name
OfferName = ''
if ('OfferID' in data.keys() and len(str(data['OfferID'])) > 0 and int(data['OfferID']) in OfferList):
OfferName = OfferList[int(data['OfferID'])].name
TrafficSourceName = ''
if ('TrafficSourceID' in data.keys() and len(str(data['TrafficSourceID'])) > 0 and int(data['TrafficSourceID']) in TrafficSourceList):
TrafficSourceName = TrafficSourceList[int(data['TrafficSourceID'])].name
AffiliateNetworkName = ''
if ('AffiliateNetworkID' in data.keys() and len(str(data['AffiliateNetworkID'])) > 0 and int(data['AffiliateNetworkID']) in AffiliateNetworkList):
AffiliateNetworkName = AffiliateNetworkList[int(data['AffiliateNetworkID'])].name
CountryCode = ''
if ('Country' in data.keys() and len(str(data['Country'])) > 0 and str(data['Country']) in CountryList):
CountryCode = CountryList[str(data['Country'])].alpha3Code
#payoutMode 等于Manual 取 payoutValue
if (int(data['OfferID']) in OfferList and int(OfferList[int(data['OfferID'])].payoutMode) == 1):
data['Revenue'] = int(OfferList[int(data['OfferID'])].payoutValue * 1000000)
#print ("CampaignName",CampaignName,"FlowName",FlowName,"LanderName",LanderName,"OfferName",OfferName,"TrafficSourceName",TrafficSourceName,"AffiliateNetworkName",AffiliateNetworkName)
#sys.exit()
value = {'UserID':UserID,\
'PostbackTimestamp':data['PostbackTimestamp'],\
'VisitTimestamp':data['VisitTimestamp'],\
'ExternalID': data['ExternalID'],\
'ClickID':data['ClickID'],\
'TransactionID':data['TransactionID'],\
'Revenue':data['Revenue'],\
'Cost':data['Cost'],\
'CampaignName':CampaignName,\
'CampaignID':data['CampaignID'],\
'LanderName':LanderName,\
'LanderID':data['LanderID'],\
'OfferName':OfferName,\
'OfferID':data['OfferID'],\
'Country':data['Country'],\
'CountryCode':CountryCode,\
'TrafficSourceName':TrafficSourceName,\
'TrafficSourceID':data['TrafficSourceID'],\
'AffiliateNetworkName':AffiliateNetworkName,\
'AffiliateNetworkID':data['AffiliateNetworkID'],\
'Device':data['DeviceType'],\
'OS':data['OS'],\
'OSVersion':data['OSVersion'],\
'Brand':data['Brand'],\
'Model':data['Model'],\
'Browser':data['Browser'],\
'BrowserVersion':data['BrowserVersion'],\
'ISP':data['ISP'],\
'MobileCarrier':data['MobileCarrier'],\
'ConnectionType': data['ConnectionType'],\
'VisitorIP':data['VisitorIP'],\
'VisitorReferrer':data['VisitorReferrer'],\
'V1':data['V1'],\
'V2':data['V2'],\
'V3':data['V3'],\
'V4':data['V4'],\
'V5':data['V5'],\
'V6':data['V6'],\
'V7':data['V7'],\
'V8':data['V8'],\
'V9':data['V9'],\
'V10':data['V10']
}
insertData.append(value)
if (len(insertData) > 0):
res = AdConversionsStatis.insert_many(insertData).execute()
return True
#执行保存
def save(self,param):
table = "adstatis_new_"+str(param[0])
valueList = param[1]
usersList = param[2]
md5Key = param[3]
OfferList = param[4]
print ("save")
resAdStatList = dict()
#更新数据
fileName = "/tmp/"+table
#print ("table",table)
f=open(fileName+'.tbl','w')
stringData = ''
print ("valueList",len(valueList))
num = len(valueList)
if (num) <= 0:
return False
for index in range(num):
md5Key = valueList[index]['KeysMD5']
data = copy.deepcopy(self.adStatisList[md5Key])#self.RedisClass.getAdStatValue(md5Key)
#data['KeysMD5'] = md5Key
UserID = 0
#print ("data================================",data,"________________")
#print ("________________",md5Key,data)
UserIDText = data['UserID']
#print(UserIDText)
if UserIDText in usersList:
UserID = usersList[UserIDText]['id']
cost = 0
Revenue = 0
#print (md5Key,"Visits",data['Visits'],"VisitsFlag",data['VisitsFlag'],'Conversions',data['Conversions'],'ConversionsFlag',data['ConversionsFlag'],'Clicks',data['Clicks'],"ClicksFlag",data['ClicksFlag'])
if (data['Visits'] and str(data['VisitsFlag']) != '1'):
data['Clicks'] = 0
data['Revenue'] = 0
data['VisitsFlag'] = 1
data['Conversions'] = 0
self.adStatisList[md5Key]['VisitsFlag'] = 1
if (data['Cost']):
cost = int(float(data['Cost'])*1000000)
elif (data['Conversions'] and int(data['Conversions']) >= 1 and str(data['ConversionsFlag']) != '1'):
data['Visits'] = 0
data['Clicks'] = 0
data['Impressions'] = 0
data['ConversionsFlag'] = 1
self.adStatisList[md5Key]['ConversionsFlag'] = 1
if (data['Revenue']):
Revenue = int(float(data['Revenue']))
#print(OfferList[int(data['OfferID'])])
#payoutMode 等于Manual 取 payoutValue
if (int(data['OfferID']) in OfferList and int(OfferList[int(data['OfferID'])].payoutMode) == 1):
Revenue = int(OfferList[int(data['OfferID'])].payoutValue * 1000000)
#print('OfferID',data['OfferID'])
#sys.exit()
elif (data['Clicks'] and int(data['Clicks']) >= 1):
data['Visits'] = 0
data['Revenue'] = 0
data['Impressions'] = 0
#data['ClicksFlag'] = 1
data['Conversions'] = 0
#self.adStatisList[md5Key]['ClicksFlag'] = 1
else:
continue
#print("Revenue",data['Revenue'])
self.RedisClass.setAdStatValue(md5Key,self.adStatisList[md5Key])
stringData += str(UserID)+"|"+\
str(data['CampaignID'])+"|"+\
str(data['CampaignName'])+"|"+\
str(data['FlowID'])+"|"+\
str(data['FlowName'])+"|"+\
str(data['LanderID'])+"|"+\
str(data['LanderName'])+"|"+\
str(data['OfferID'])+"|"+\
str(data['OfferName'])+"|"+\
str(data['OfferUrl'])+"|"+\
str(data['OfferCountry'])+"|"+\
str(data['AffiliateNetworkID'])+"|"+\
str(data['AffilliateNetworkName'])+"|"+\
str(data['TrafficSourceID'])+"|"+\
str(data['TrafficSourceName'])+"|"+\
str(data['Language'])+"|"+\
str(data['Model'])+"|"+\
str(data['Country'])+"|"+\
str(data['City'])+"|"+\
str(data['Region'])+"|"+\
str(data['ISP'])+"|"+\
str(data['MobileCarrier'])+"|"+\
str(data['Domain'])+"|"+\
str(data['DeviceType'])+"|"+\
str(data['Brand'])+"|"+\
str(data['OS'])+"|"+\
str(data['OSVersion'])+"|"+\
str(data['Browser'])+"|"+\
str(data['BrowserVersion'])+"|"+\
str(data['ConnectionType'])+"|"+\
str(data['Timestamp'])+"|"+\
str(data['Visits'])+"|"+\
str(data['Clicks'])+"|"+\
str(data['Conversions'])+"|"+\
str(cost)+"|"+\
str(Revenue)+"|"+\
str(data['Impressions'])+"|"+\
str(md5Key)+"|"+\
str(data['VisitorIP'])+"|"+\
str(data['V1'])+"|"+\
str(data['V2'])+"|"+\
str(data['V3'])+"|"+\
str(data['V4'])+"|"+\
str(data['V5'])+"|"+\
str(data['V6'])+"|"+\
str(data['V7'])+"|"+\
str(data['V8'])+"|"+\
str(data['V9'])+"|"+\
str(data['V10'])+"|"+\
str(data['tsCampaignId'])+"|"+\
str(data['tsWebsiteId'])+"|"+\
"|"+\
"|"+\
str(data['ClickID'])
stringData += '\n'
f.write(stringData)
f.close()
#print ("stringData===============",stringData)
#sys.exit()
if (len(stringData) <= 0):
return False
#清空字段
stringData = ''
#防止锁表
#log = eval("AdStatisLog"+str(param[0])+"()")
#res = log.raw("UNLOCK TABLES;");
#command = "mysqlimport -h "+mysql_report['host']+" -u"+mysql_report['user']+" -p'"+mysql_report['passwd']+"' --use-threads=10 --fields-terminated-by='|' -f "+mysql_report['name']+" "+table+" --local '/tmp/"+table+".tbl'"
command = "mysqlimport -h "+mysql_report['host']+" -u"+mysql_report['user']+" -p'"+mysql_report['passwd']+"' --use-threads=10 --fields-terminated-by='|' -f "+mysql_report['name']+" --local /tmp/"+table+".tbl"
print ("command",command)
try:
return_code = subprocess.call(command, shell=True)
#res = log.raw("UNLOCK TABLES;");
print ("return_code:",return_code)
gc.collect()
return True
except Exception as e:
print (traceback.print_exc())
#res = log.raw("UNLOCK TABLES;");
gc.collect()
return False
def main(index):
print ("AutoRes is starting")
print ("Respawning")
try:
AdStatisClass = AdStatis(index)
AdStatisClass.doAdStatis()
except Exception as e:
print (traceback.print_exc())
#main(index)
finally:
print ('success')
if __name__ == "__main__":
index = sys.argv[1]
main(index)
| 2.15625 | 2 |
init.py | shanisma/plant-keeper | 1 | 12768747 | import os
import sys
import django
import psycopg2
from django.core.management import call_command
from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "project.settings")
sys.path.insert(0, os.path.abspath("."))
django.setup()
from water.models import Device as WaterDevice
from light.models import ConfigType
from django.contrib.auth.models import User
POSTGRES_DB = os.getenv("POSTGRES_DB", "postgres")
POSTGRES_USER = os.getenv("POSTGRES_USER", "postgres")
POSTGRES_PASSWORD = os.getenv("POSTGRES_PASSWORD", "<PASSWORD>")
POSTGRES_HOST = os.getenv("POSTGRES_HOST", "localhost")
POSTGRES_PORT = os.getenv("POSTGRES_PORT", "5432")
DJANGO_ADMIN_USERNAME = os.getenv("DJANGO_ADMIN_USERNAME")
DJANGO_ADMIN_PASSWORD = os.getenv("DJANGO_ADMIN_PASSWORD")
con = psycopg2.connect(
host=POSTGRES_HOST,
port=POSTGRES_PORT,
database="postgres",
user=POSTGRES_USER,
password=<PASSWORD>,
)
con.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)
cursor = con.cursor()
try:
cursor.execute(f"create database {POSTGRES_DB};")
print(f"[OK] Database {POSTGRES_DB} created")
except psycopg2.errors.DuplicateDatabase:
print(f"[OK] Database {POSTGRES_DB} already exist")
print("[INFO] Executing django db migrations ...")
call_command("makemigrations", interactive=False, verbosity=2)
call_command("makemigrations", "glbl", interactive=False, verbosity=2)
call_command("makemigrations", "sprinkler", interactive=False, verbosity=2)
call_command("makemigrations", "water", interactive=False, verbosity=2)
call_command("makemigrations", "light", interactive=False, verbosity=2)
call_command("migrate", interactive=False, verbosity=2)
# Water devices
# Creating default device
try:
WaterDevice(tag="tap-water").save()
except django.db.IntegrityError:
pass
# Light default configuration creation
try:
ConfigType(name="daily").save()
except django.db.IntegrityError:
pass
try:
ConfigType(name="planner").save()
except django.db.IntegrityError:
pass
# Admin user creation
try:
User.objects.create_superuser(
DJANGO_ADMIN_USERNAME, "<EMAIL>", DJANGO_ADMIN_PASSWORD
)
except django.db.IntegrityError:
pass
| 2.359375 | 2 |
hostlock/views/views_ajax.py | davidslusser/padlock | 0 | 12768748 | <filename>hostlock/views/views_ajax.py
"""
Views used specifically for handling AJAX Requests
"""
# import system modules
import json
# import django modules
from django.http import HttpResponse
from django.template import Context, loader
from django.views.decorators.http import require_GET, require_POST
# import models
from auditlog.models import LogEntry
from hostlock.models import Lock
# import serializers
from hostlock.serializers import (HostSerializer, LockSerializer)
@require_GET
def get_host_auditlog(request):
"""
Description:
get AuditLog for a given Host.
Args:
request: AJAX request object.
Returns:
HttpResponse: JSON formatted response.
"""
if (request.is_ajax()) and (request.method == 'GET'):
if 'client_response' in request.GET:
hostname = request.GET['client_response']
queryset = LogEntry.objects.filter(content_type__model="host",
object_repr__icontains=hostname)
template = loader.get_template('ajax/show_audit_log.htm')
return HttpResponse(json.dumps({"server_response": template.render({'queryset': queryset})}),
content_type='application/javascript')
else:
return HttpResponse("Invalid request inputs", status=400)
else:
return HttpResponse("Invalid request", status=400)
@require_GET
def get_lock_auditlog(request):
"""
Description:
get AuditLog for a given Lock.
Args:
request: AJAX request object.
Returns:
HttpResponse: JSON formatted response.
"""
if (request.is_ajax()) and (request.method == 'GET'):
if 'client_response' in request.GET:
lock = request.GET['client_response']
queryset = LogEntry.objects.filter(content_type__model="lock",
object_repr__icontains=lock)
template = loader.get_template('ajax/show_audit_log.htm')
return HttpResponse(json.dumps({"server_response": template.render({'queryset': queryset})}),
content_type='application/javascript')
else:
return HttpResponse("Invalid request inputs", status=400)
else:
return HttpResponse("Invalid request", status=400)
@require_POST
def release_host_lock(request):
"""
Manually release a lock on a host
:param request:
ajax request object
:return:
JSON formatted response
"""
if (request.is_ajax()) and (request.method == 'POST'):
if 'client_response' in request.POST:
lock = request.GET['client_response']
lock_id = request.POST['client_response']
lock = Lock.objects.get(id=lock_id)
if lock.release_lock(user=request.user, manual=True):
return HttpResponse(json.dumps({'msg': 'Failed to release lock on {}'.format(lock.host.hostname)}), status=400)
else:
return HttpResponse(json.dumps({'msg': 'Lock on {} successfully released'.format(lock.host.hostname)}))
# @require_GET
# def get_lock_details(request):
# """
# Description:
# Get all fields for a given lock id.
# Args:
# request: AJAX request object.
# Returns:
# HttpResponse: JSON formatted response.
# """
# if (request.is_ajax()) and (request.method == 'GET'):
# if 'client_response' in request.GET:
# obj_id = request.GET['client_response']
# obj = Lock.objects.get(id=obj_id)
# serialized_obj = LockSerializer(obj)
# template = loader.get_template('ajax/show_object_details.htm')
# # return HttpResponse(json.dumps({"server_response": template.render({'object': obj})}),
# # content_type='application/javascript')
# return HttpResponse(json.dumps({"server_response": template.render({'object': serialized_obj.data})}),
# content_type='application/javascript')
# else:
# return HttpResponse("Invalid request inputs", status=400)
# else:
# return HttpResponse("Invalid request", status=400)
@require_GET
def get_lock_details(request):
"""
Description:
Get all fields for a given lock id.
Args:
request: AJAX request object.
Returns:
HttpResponse: JSON formatted response.
"""
if (request.is_ajax()) and (request.method == 'GET'):
if 'client_response' in request.GET:
obj_id = request.GET['client_response']
obj = Lock.objects.get(id=obj_id)
template = loader.get_template('ajax/detail_hostlock.htm')
return HttpResponse(json.dumps({"server_response": template.render({'object': obj})}),
content_type='application/javascript')
else:
return HttpResponse("Invalid request inputs", status=400)
else:
return HttpResponse("Invalid request", status=400)
| 2.4375 | 2 |
mskpy/photometry/outbursts.py | mkelley/mskpy | 10 | 12768749 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
outbursts --- Lightcurve and outburst analysis
==============================================
"""
__all__ = [
'CometaryTrends'
]
from collections import namedtuple
import logging
import numpy as np
from scipy.cluster import hierarchy
from scipy.optimize import leastsq
import astropy.units as u
from astropy.time import Time
from astropy.stats import sigma_clip
from ..util import linefit
dmdtFit = namedtuple(
'dmdtFit', ['m0', 'dmdt', 'm0_unc', 'dmdt_unc', 'rms', 'rchisq']
)
ExpFit = namedtuple(
'ExpFit', ['dm', 'tau', 'dm_unc', 'tau_unc', 'rms', 'rchisq']
)
Color = namedtuple(
'Color', ['t', 'clusters', 'm_filter', 'm',
'm_unc', 'c', 'c_unc', 'avg', 'avg_unc']
)
Color.__doc__ = 'Color estimate.'
Color.t.__doc__ = 'Average observation date for each color estimate. [astropy Time]'
Color.clusters.__doc__ = 'Observation clusters used to define color; 0 for unused.'
Color.m_filter.__doc__ = 'Filter for m.'
Color.m.__doc__ = 'Apparent mag for each date in given filter. [mag]'
Color.m_unc.__doc__ = 'Uncertainty on m. [mag]'
Color.c.__doc__ = 'Individual colors. [mag]'
Color.c_unc.__doc__ = 'Uncertainty on c. [mag]'
Color.avg.__doc__ = 'Weighted average color. [mag]'
Color.avg_unc.__doc__ = 'Uncertainty on avg. [mag]'
class CometaryTrends:
"""Define lightcurve trends designed for identifying cometary outbursts.
Parameters
----------
eph : sbpy Ephem
Ephemeris of the target. Field requirements depend on the trend
fitting methods to be used. Generally provide date, rh, delta, phase.
m, m_unc : Quantity
Photometry and uncertainty in magnitudes.
filt : array, optional
Filters for each ``m``.
fit_mask : array, optional
``True`` for elements to ignore when fitting (e.g., outbursts).
logger : Logger, optional
Use this logger for messaging.
**kwargs
Any ``CometaryTrends`` property.
Properties
----------
m_original : Quantity
Unmodified (input) photometry.
m : Quantity
Apparent magnitude, possibly limited to one filter (see ``fit_filter``)
or filter transformed (see ``color_transform``).
colors : dict of Quantity
Use these colors when transforming between filters. Key by filter
tuple in wavelength order, e.g., to set g-r use:
`{('g', 'r'): 0.5 * u.mag}`
``colors`` is also set when ``self.color`` is used.
fit_filter : str or None
Set to a filter in ``self.filt`` to limit fitting to this filter.
color_transform : bool
Set to ``True`` to transform observations to that specified in
``fit_filter`` via ``colors``.
"""
def __init__(self, eph, m, m_unc, filt=None, fit_mask=None, logger=None,
**kwargs):
# store parameters and properties
self.eph = eph
self.m = m
self.m_unc = m_unc
self.filt = np.array(filt)
self.fit_mask = (
np.zeros(len(m), bool) if fit_mask is None
else np.array(fit_mask)
)
self.colors = kwargs.get('colors', {})
self.fit_filter = kwargs.get('fit_filter')
self.color_transform = kwargs.get('color_transform', False)
if logger is None:
self.logger = logging.getLogger('CometaryTrends')
else:
self.logger = logger
# parameter check
if not all((isinstance(m, u.Quantity), isinstance(m_unc, u.Quantity))):
raise ValueError(
'm, m_unc must be Quantity in units of magnitude.')
n = [len(x) for x in (eph, m, m_unc, self.fit_mask)]
if filt is not None:
n += [len(filt)]
if len(np.unique(n)) != 1:
raise ValueError('all arrays must have the same length')
@property
def m_original(self):
return self._m
@property
def m(self):
"""Apparent magnitude.
Possibly limited to one filter (see ``fit_filter``) or filter
transformed (see ``color_transform``).
"""
m = np.ma.MaskedArray(self._m.copy(),
mask=np.zeros(len(self._m), bool))
if (self.filt is not None) and (self.fit_filter is not None):
for i in range(len(m)):
if self.filt[i] != self.fit_filter:
if self.color_transform:
# try to color transform
color = (self.filt[i], self.fit_filter)
if color in self.colors:
m[i] -= self.colors[color]
elif color[::-1] in self.colors:
m[i] += self.colors[color[::-1]]
else:
# not possible
m.mask[i] = True
else:
# not color transforming this filter
m.mask[i] = True
return m
@m.setter
def m(self, _m):
self._m = _m
@property
def fit_m(self):
"""Magnitude array masked for fitting."""
m = self.m
m.mask += self.fit_mask
return m
@property
def fit_filter(self):
"""Filter to fit.
Set to ``None`` to fit all data(without color transformations).
"""
return self._fit_filter
@fit_filter.setter
def fit_filter(self, filt):
if not isinstance(filt, (str, type(None))):
raise ValueError('fit filter must be a string or ``None``')
self._fit_filter = filt
@property
def color_transform(self):
"""Color transformation flag.
If fitting only one filter, set to ``True`` to allow
color transformations via ``self.color``.
"""
return self._color_transform
@color_transform.setter
def color_transform(self, flag):
self._color_transform = bool(flag)
def color(self, blue, red, max_dt=16 / 24, max_unc=0.25 * u.mag,
m_filter=None):
"""Estimate the color, blue - red, using weighted averages.
``eph`` requires ``'date'``.
Masked data is excluded.
Data is not nucleus subtracted.
Parameters
----------
blue: string
The name of the bluer filter.
red: string
The name of the redder filter.
max_dt: float, optional
Maximum time difference to consider when clustering observations.
max_unc: Quantity, optional
Ignore results with uncertainty > ``max_unc``.
m_filter : string, optional
Report mean apparent magnitude in this filter. Default is the
redder filter.
Returns
-------
color: Color
The color results or ``None`` if it cannot be calculated.
"""
if len(self.filt) < 2:
self.logger.info('Not enough filters.')
return None
b = self.filt == blue
r = self.filt == red
if m_filter is None:
m_filter = red
elif m_filter not in [blue, red]:
raise ValueError("m_filter must be one of blue or red")
clusters = hierarchy.fclusterdata(
self.eph['date'].mjd[:, np.newaxis],
max_dt, criterion='distance'
)
self.logger.info(f'{clusters.max()} clusters found.')
mjd = []
m_mean = []
m_mean_unc = []
bmr = []
bmr_unc = []
for cluster in np.unique(clusters):
i = (clusters == cluster) * ~self.fit_mask
# require both filters in this cluster
if (not np.any(b[i])) or (not np.any(r[i])):
clusters[i] = 0
continue
# estimate weighted averages and compute color
wb, sw = np.average(self.m_original[b * i],
weights=self.m_unc[b * i]**-2,
returned=True)
wb_unc = sw**-0.5
wr, sw = np.average(self.m_original[r * i],
weights=self.m_unc[r * i]**-2,
returned=True)
wr_unc = sw**-0.5
if np.hypot(wb_unc, wr_unc) > max_unc:
continue
mjd.append(self.eph['date'].mjd[i].mean())
if m_filter == 'blue':
m_mean.append(wb)
m_mean_unc.append(wb_unc)
else:
m_mean.append(wr)
m_mean_unc.append(wr_unc)
bmr.append(wb - wr)
bmr_unc.append(np.hypot(wb_unc, wr_unc))
if len(bmr) == 0:
self.logger.info('No colors measured.')
return None
m_mean = u.Quantity(m_mean)
m_mean_unc = u.Quantity(m_mean_unc)
bmr = u.Quantity(bmr)
bmr_unc = u.Quantity(bmr_unc)
avg, sw = np.average(bmr, weights=bmr_unc**-2, returned=True)
avg_unc = sw**-0.5
self.colors[(blue, red)] = avg
return Color(Time(mjd, format='mjd'), clusters, m_filter,
m_mean, m_mean_unc, bmr, bmr_unc, avg, avg_unc)
@staticmethod
def linear_add(a, b):
"""The sum a+b computed in linear space."""
return -np.log(np.exp(-a.value) + np.exp(-b.to_value(a.unit))) * a.unit
@staticmethod
def linear_subtract(a, b):
"""The difference a-b computed in linear space."""
return -np.log(np.exp(-a.value) - np.exp(-b.to_value(a.unit))) * a.unit
def H(self, fixed_angular_size=False, Phi=None, nucleus=None):
"""Absolute magnitude.
Parameters
----------
fixed_angular_size: bool
``True`` if the photometric aperture is measured with a fixed
angular size. If so, the target-observer distance(Δ) correction
will be Δ**-1.
Phi: function, optional
Phase function.
nucleus : Quantity
Subtract this nucleus before scaling.
"""
m = self.m.copy()
unit = m.data.unit
if nucleus is not None:
m = np.ma.MaskedArray(self.linear_subtract(m.data, nucleus),
mask=m.mask)
d = 2.5 if fixed_angular_size else 5
H = (m - 5 * np.log10(self.eph['rh'].to_value('au')) * unit
- d * np.log10(self.eph['delta'].to_value('au')) * unit)
if Phi is not None:
H += 2.5 * np.log10(Phi(self.eph['phase'])) * unit
return H
def ostat(self, k=4, dt=14, sigma=2, **kwargs):
"""Compute the outburst statistic for each photometry point.
ostat is calculated for each masked point, but the masked points are
not included in the photometric baseline calculation.
Parameters
----------
k : float, optional
Heliocentric distance slope on apparent magnitude for the baseline
estimate.
dt : float, optional
Number of days of history to use for the baseline estimate.
sigma : float, optional
Number of sigmas to clip the data.
**kwargs
Additional keyword arguments are passed to ``H()``.
Returns
-------
o : array
The outburst statistic.
"""
Hy = (
self.H(**kwargs)
- 2.5 * (k - 2) * np.log10(self.eph['rh'].to_value('au')) * u.mag
)
o = np.ma.zeros(len(Hy))
for i in range(len(Hy)):
j = (
(self.eph['date'] < self.eph['date'][i])
* (self.eph['date'] > (self.eph['date'][i] - dt * u.day))
)
if j.sum() < 1:
o[i] = np.ma.masked
continue
# reject outliers, calculate weighted mean
good = j * ~Hy.mask * np.isfinite(Hy.data)
if np.sum(good) > 2:
m = sigma_clip(Hy[good].data, sigma=sigma)
else:
m = Hy[good]
m -= Hy[i] # normalize to data point being tested
m_unc = self.m_unc[good]
baseline, sw = np.ma.average(m, weights=m_unc**-2,
returned=True)
baseline_unc = sw**-0.5
unc = max(np.sqrt(baseline_unc**2 + self.m_unc[i]**2).value, 0.1)
o[i] = np.round(baseline.value / unc, 1)
return o
def _fit_setup(self, nucleus=None, absolute=False, **kwargs):
dt = self.eph['date'].mjd * u.day
dt -= dt.min()
if absolute:
m = self.H(nucleus=nucleus, **kwargs)
m.mask = self.fit_m.mask
else:
m = self.fit_m
if nucleus is not None:
m = np.ma.MaskedArray(
self.linear_subtract(m.data, nucleus),
mask=m.mask
)
# subtraction may introduce nans
m.mask += ~np.isfinite(m)
return dt, m
def dmdt(self, nucleus=None, guess=None, k=1, absolute=False, **kwargs):
"""Fit magnitude versus time as a function of ``t**k``.
``eph`` requires ``'date'``.
``absolute`` requires ``'rh'``, ``'delta'``, and ``'phase'`` in
``eph``.
Parameters
----------
nucleus : Quantity
Subtract this nucleus before fitting, assumed to be in the same
filter as ``self.m``.
guess : tuple of floats
Initial fit guess: (m0, slope).
k : float, optional
Scale time by ``t^k``.
absolute : boo, optional
Fix absolute magnitude via ``self.H()``.
**kwargs
Additional keyword arguments pass to ``self.H()``.
Returns
-------
dt: np.array
trend: np.array
Including the nucleus.
fit_mask: np.array
Data points used in the fit.
fit: dmdtFit
Fit results.
"""
dt, m = self._fit_setup(nucleus=nucleus, absolute=absolute, **kwargs)
unit = m.data.unit
mask = m.mask
guess = (0.05, 15) if guess is None else guess
r = linefit(dt.value[~mask]**k, m.data.value[~mask],
self.m_unc.value[~mask], guess)
trend = (r[0][1] + r[0][0] * dt.value**k) * unit
fit_unc = r[1] if r[1] is not None else (0, 0)
# restore nucleus?
if nucleus is not None:
trend = self.linear_add(trend, nucleus)
residuals = m - trend
fit = dmdtFit(r[0][1] * unit, r[0][0] * unit / u.day**k,
fit_unc[1] * unit, fit_unc[0] * unit / u.day**k,
np.std(residuals[~mask].data),
np.sum((residuals[~mask].data / self.m_unc[~mask])**2)
/ np.sum(~mask))
return dt, trend, ~mask, fit
def exp(self, baseline, absolute=False, **kwargs):
"""Fit magnitude versus time as a function of ``e**(k*t)``.
``eph`` requires ``'date'``.
``absolute`` requires ``'rh'``, ``'delta'``, and ``'phase'`` in
``eph``.
Parameters
----------
baseline : Quantity
Fit the exponential with respect to this baseline trend (may
include the nucleus). Must be absolute magnitude if ``absolute``
is true.
absolute : boo, optional
Fix absolute magnitude via ``self.H()``.
**kwargs
Additional keyword arguments pass to ``self.H()``.
Returns
-------
dt: np.array
trend: np.array
Including the nucleus.
fit_mask: np.array
Data points used in the fit.
fit: ExpFit
Fit results.
"""
dt, m = self._fit_setup(absolute=absolute, **kwargs)
dm = m - baseline
unit = m.data.unit
mask = m.mask
print(m)
def model(dt, peak, tau):
lc = peak * np.exp(-dt / tau)
lc[dt < 0] = 0
return lc
def chi(p, dt, dm, m_unc):
m = model(dt, *p)
return (dm - m) / m_unc
args = (dt.value[~mask], dm.data.value[~mask], self.m_unc.value[~mask])
guess = (dm.compressed().min().value, 10)
r = leastsq(chi, guess, args=args, full_output=True)
fit_unc = np.sqrt(np.diag(r[1]))
trend = model(dt.value, *r[0]) * unit
# restore baseline
trend = trend + baseline
residuals = m - trend
fit = ExpFit(r[0][0] * unit, r[0][1] * u.day,
fit_unc[0] * unit, fit_unc[1] * u.day,
np.std(residuals[~mask].data),
np.sum((residuals[~mask].data / self.m_unc[~mask])**2)
/ np.sum(~mask))
return dt, trend, ~mask, fit
# def mrh(self, fixed_angular_size, filt=None, color_transform=True,
# Phi=phase_HalleyMarcus):
# """Fit magnitude as a function of rh.
# ``eph`` requires rh, delta, phase.
# m = M - k log10(rh) - d log10(Delta) + 2.5 log10(Phi(phase))
# d = 2.5 for fixed_angular_size == True, 5 otherwise.
# Parameters
# ----------
# fixed_angular_size: bool
# Aperture is fixed in angular size.
# filt: str, optional
# Fit only this filter.
# color_transformation: bool, optional
# If fitting only one filter, set to ``True`` to allow
# color transformations via ``self.color``.
# Phi: function, optional
# Use this phase function.
# Returns
# -------
# trend: np.array
# fit_mask: np.array
# Data points used in the fit.
# fit: mrhFit
# """
# m = self.coma(filt)
# if filt is not None and not color_transform:
# m[self.filt != filt] = np.nan
# if fixed_angular_size:
# d = 2.5
# else:
# d = 5
# dm = (-d * np.log10(self.eph['delta'].to_value('au'))
# + 2.5 * np.log10(Phi(self.eph['phase']))) * u.mag
# i = ~self.fit_mask * np.isfinite(m)
# r = linefit(self.eph['rh'][i].value, (m - dm)[i].value,
# self.m_unc[i].value, (0.05, 15))
# trend = (r[0][1] + r[0][0] * self.eph['rh'].value) * m.unit + dm
# residuals = m - trend
# # restore nucleus?
# if self.nucleus is not None:
# trend = -np.log(np.exp(-trend.value) +
# np.exp(-self.nucleus.value)) * u.mag
# fit = mrhFit(r[0][1] * m.unit, r[0][0] * m.unit / u.day,
# r[1][1] * m.unit, r[1][0] * m.unit / u.day,
# np.std(residuals[i]),
# np.sum((residuals[i] / self.m_unc[i])**2) / np.sum(i))
# return trend, i, fit
| 1.632813 | 2 |
api/users/me/__init__.py | ezchat/backend | 3 | 12768750 | <filename>api/users/me/__init__.py
# pylint: disable=no-name-in-module
from api.users.me.channels import UserMeChannels # noqa: F401
from api.users.me.index import UserMe # noqa: F401
| 1.132813 | 1 |