repo_name stringlengths 6 97 | path stringlengths 3 341 | text stringlengths 8 1.02M |
|---|---|---|
dspasic/aws-cf-update-tags | cfut/update_tags.py | <filename>cfut/update_tags.py<gh_stars>1-10
#! /usr/bin/env python3
__author__ = '<NAME> <<EMAIL>>'
__doc__ = """Updating Tags for Existing CloudFormation Stacks
This script is iterating through all root stacks and updates the tags.
It uses the UsePreviousTemplate and UsePreviousValue for each parameter to
achieve this task without a need to touch the templates.
In case all declarations are already done, update_stack function will throw an
ValidationError with a message that nothing has been changed. This is absolutely
okay (except that the design is not) and the script will just ignore this case.
To resolve the function and the environment it uses the stack name where by
convention all information can be extracted. There is one exception namely
the base stack names. In this case the environment will be set to n/a and
the function to `base`.
You can as well map function names to a more common name. This can be useful
if you want to group stacks by there product. To achieve this you have just
to adjust the FUNC_MAPS list with corresponding callbacks.
This script here will filter *only* inventory stacks and accordingly set the
tags for the matching team. So be aware of this settings, before execution.
"""
import argparse
import re
import pprint
import sys
import logging
from typing import Optional, Generator, Callable, Iterable, List, Dict
_log = logging.getLogger(__name__)
_log.setLevel(logging.WARNING)
_default_handler = logging.StreamHandler(stream=sys.stdout)
_error_handler = logging.StreamHandler(stream=sys.stderr)
_error_handler.setLevel(logging.ERROR)
_log.addHandler(_default_handler)
_log.addHandler(_error_handler)
try:
import boto3
from botocore.exceptions import ClientError
except ImportError:
_log.fatal('Please ensure you have boto3 installed')
sys.exit(-1)
_Param = Dict[str, str]
_Tag = Dict[str, str]
# Map some function names extracted from stack name
_func_maps = [
lambda x: 'matcher' if re.match('^matcher-', x) else x,
lambda x: 'sink' if re.match('^sink-', x) else x,
lambda x: 'crowd' if re.match('^crowd-', x) else x,
lambda x: 'matchbox' if re.match('^matchbox-', x) else x,
lambda x: 'matchbox' if re.match('^matching-', x) else x,
]
_cf = boto3.client('cloudformation')
def get_stacks(ntoken: Optional[str] = None) -> Generator:
"""Get All Root CloudFormation Stacks From an AWS Account"""
if ntoken:
kwargs = dict(NextToken=ntoken)
else:
kwargs = {}
res = _cf.describe_stacks(**kwargs)
if len(res['Stacks']) <= 0:
raise StopIteration()
for s in res['Stacks']:
# Avoid nested stacks
if not s.get('RootId', None):
yield s
if res.get('NextToken', None):
yield from get_stacks(ntoken=res['NextToken'])
def filter_stack(stacks: Iterable, f: Callable) -> None:
"""Filters Stack Based On the Given Filter Condition"""
for s in stacks:
if f(s):
yield s
def prepare_params(params: List[_Param]) -> List[_Param]:
"""Return a List of Given Parameters With UserPreviousValues Enabled"""
res = []
for param in params:
res.append(dict(ParameterKey=param['ParameterKey'],
UsePreviousValue=True))
return res
def update_stack(stackname: str, params: List[_Param], tags: List[_Tag]):
"""Updates Given Stack with Previous Template"""
try:
_cf.update_stack(
StackName=stackname,
UsePreviousTemplate=True,
Parameters=params,
Capabilities=['CAPABILITY_NAMED_IAM'],
Tags=tags
)
except ClientError as err:
errmsg = str(err)
if not re.search(errmsg, str(err), re.IGNORECASE):
raise
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Updates Tags for CloudFormation Stacks',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-v', '--verbosity', help='Specify verbosity',
action='count', default=0)
args = parser.parse_args()
print(args.verbosity)
if args.verbosity >= 2:
_log.setLevel(logging.DEBUG)
elif args.verbosity >= 1:
_log.setLevel(logging.INFO)
else:
_log.setLevel(logging.WARNING)
pp = pprint.PrettyPrinter(indent=2, stream=sys.stdout)
# filter only for inventory stacks
sn = re.compile('inventory', re.IGNORECASE)
resn = lambda x: sn.match(x['StackName'])
# Extract some useful parts like function and env from stack name
snext = re.compile('([\w]+)--([\w-]+)--([\w]+)')
for stack in filter_stack(get_stacks(), resn):
stackname = stack['StackName']
snparts = snext.match(stackname)
if snparts:
funcname = snparts.group(2)
env = snparts.group(3)
else:
funcname = 'base'
env = ''
for fmap in _func_maps:
funcname = fmap(funcname)
params = prepare_params(stack.get('Parameters', []))
_log.info(f'Processing {stackname}')
_log.debug(f' Determined function {funcname}')
_log.debug(f' Determined environment {env}')
_log.debug(f' Determined parameters {pp.pformat(params)}')
tags = [
{
'Key': 'Pillar',
'Value': 'hs'
},
{
'Key': 'Domain',
'Value': 'identity'
},
{
'Key': 'Team',
'Value': 'matching'
},
{
'Key': 'Environment',
'Value': env if env else 'n/a'
},
{
'Key': 'Function',
'Value': funcname
}
]
update_stack(stackname, params, tags)
sys.exit(0)
|
vincentlux/TextBrewer | src/textbrewer/distiller_multiteacher.py | <reponame>vincentlux/TextBrewer
from .distiller_utils import *
from .distiller_basic import BasicDistiller
class MultiTeacherDistiller(BasicDistiller):
"""
Distills multiple teacher models (of the same tasks) into a student model. **It doesn't support intermediate feature matching**.
Args:
train_config (:class:`TrainingConfig`): training configuration.
distill_config (:class:`DistillationConfig`): distillation configuration.
model_T (List[torch.nn.Module]): list of teacher models.
model_S (torch.nn.Module): student model.
adaptor_T (Callable): teacher model's adaptor.
adaptor_S (Callable): student model's adaptor.
The roles of `adaptor_T` and `adaptor_S` are explained in :py:func:`adaptor`.
"""
def __init__(self, train_config,
distill_config,
model_T,
model_S,
adaptor_T,
adaptor_S):
super(MultiTeacherDistiller, self).__init__(
train_config, distill_config,
model_T, model_S,
adaptor_T, adaptor_S)
if hasattr(self.adaptor_T,'__iter__'):
assert len(self.adaptor_T)==len(self.model_T)
self.avg = True
def train_on_batch(self, batch, args):
if self.d_config.is_caching_logits is False:
if type(batch) is dict:
for k,v in batch.items():
if type(v) is torch.Tensor:
batch[k] = v.to(self.t_config.device)
with torch.no_grad():
results_T = [model_t(**batch, **args) for model_t in self.model_T]
results_S = self.model_S(**batch, **args)
else:
moved_batch = tuple(item.to(self.t_config.device) if type(item) is torch.Tensor else item for item in batch)
batch = moved_batch
with torch.no_grad():
results_T = [model_T(*batch, **args) for model_T in self.model_T]
results_S = self.model_S(*batch, **args)
if hasattr(self.adaptor_T,'__iter__'):
results_T = [post_adaptor(adpt_t(batch,results_t)) for results_t,adpt_t in zip(results_T,self.adaptor_T)]
else:
results_T = [post_adaptor(self.adaptor_T(batch,results_t)) for results_t in results_T]
results_S = post_adaptor(self.adaptor_S(batch,results_S))
else:
batch, cached_logits = batch
if type(batch) is dict:
new_batch = {}
for k,v in batch.items():
if type(v) is torch.Tensor:
new_batch[k] = v.to(self.t_config.device)
else:
new_batch[k] = v
batch = new_batch
results_S = self.model_S(**batch, **args)
else:
batch = tuple(item.to(self.t_config.device) if type(item) is torch.Tensor else item for item in batch)
results_S = self.model_S(*batch, **args)
results_S = post_adaptor(self.adaptor_S(batch,results_S))
results_T = [{'logits': [lo.to(self.t_config.device) for lo in logits]} for logits in cached_logits]
if 'logits_mask' in results_S:
results_T[0]['logits_mask'] = results_S['logits_mask']
logits_list_T = [results_t['logits'] for results_t in results_T] # list of tensor
logits_list_S = results_S['logits'] # list of tensor
total_loss = 0
if 'logits_mask' in results_S:
masks_list_S = results_S['logits_mask']
logits_list_S = select_logits_with_mask(logits_list_S,masks_list_S) #(mask_sum, num_of_class)
if 'logits_mask' in results_T[0]:
masks_list_T = results_T[0]['logits_mask']
logits_list_T = [select_logits_with_mask(logits_list_t,masks_list_T)
for logits_list_t in logits_list_T] #(mask_sum, num_of_class)
if self.d_config.probability_shift is True:
labels_list = results_S['labels']
for l_T, l_S, labels in zip(zip(*logits_list_T),logits_list_S,labels_list):
mean_l_T = sum(l_T)/len(l_T)
mean_l_T = probability_shift_(mean_l_T, labels)
if self.d_config.temperature_scheduler is not None:
temperature = self.d_config.temperature_scheduler(l_S, mean_l_T, self.d_config.temperature)
else:
temperature = self.d_config.temperature
total_loss += self.kd_loss(l_S, mean_l_T, temperature) * self.d_config.kd_loss_weight
else:
for l_T, l_S in zip(zip(*logits_list_T),logits_list_S):
mean_l_T = sum(l_T)/len(l_T)
if self.d_config.temperature_scheduler is not None:
temperature = self.d_config.temperature_scheduler(l_S, mean_l_T, self.d_config.temperature)
else:
temperature = self.d_config.temperature
total_loss += self.kd_loss(l_S, mean_l_T, temperature) * self.d_config.kd_loss_weight
if 'losses' in results_S:
for loss in results_S['losses']:
# in case of multi-GPU
total_loss += loss.mean() * self.d_config.hard_label_weight
return total_loss
def cache_logits(self, batch, args, batch_postprocessor):
if batch_postprocessor is not None:
batch = batch_postprocessor(batch)
if type(batch) is dict:
new_batch = {}
for k,v in batch.items():
if type(v) is torch.Tensor:
new_batch[k] = v.to(self.t_config.device)
else:
new_batch[k] = v
with torch.no_grad():
results_T = [model_t(**new_batch, **args) for model_t in self.model_T]
else:
new_batch = tuple(item.to(self.t_config.device) if type(item) is torch.Tensor else item for item in batch)
with torch.no_grad():
results_T = [model_t(*new_batch, **args) for model_t in self.model_T]
if hasattr(self.adaptor_T,'__iter__'):
results_T = [post_adaptor(adpt_t(batch,results_t)) for results_t,adpt_t in zip(results_T,self.adaptor_T)]
else:
results_T = [post_adaptor(self.adaptor_T(batch,results_t)) for results_t in results_T]
self.logits_cache.append([batch, [[logits.to('cpu') for logits in results_t['logits']] for results_t in results_T]]) |
vincentlux/TextBrewer | setup.py | """
Simple check list from AllenNLP repo: https://github.com/allenai/allennlp/blob/master/setup.py
To create the package for pypi.
1. Change the version in __init__.py, setup.py as well as docs/source/conf.py.
2. Commit these changes with the message: "Release: VERSION"
3. Add a tag in git to mark the release: "git tag VERSION -m'Adds tag VERSION for pypi' "
Push the tag to git: git push --tags origin master
4. Build both the sources and the wheel. Do not change anything in setup.py between
creating the wheel and the source distribution (obviously).
For the wheel, run: "python setup.py bdist_wheel" in the top level directory.
(this will build a wheel for the python version you use to build it).
For the sources, run: "python setup.py sdist"
You should now have a /dist directory with both .whl and .tar.gz source versions.
5. Check that everything looks correct by uploading the package to the pypi test server:
twine upload dist/* -r pypitest
(pypi suggest using twine as other methods upload files via plaintext.)
Check that you can install it in a virtualenv by running:
pip install -i https://testpypi.python.org/pypi transformers
6. Upload the final version to actual pypi:
twine upload dist/* -r pypi
7. Copy the release notes from RELEASE.md to the tag in github once everything is looking hunky-dory.
"""
import shutil
from pathlib import Path
from setuptools import find_packages, setup
setup(
name="textbrewer",
version="0.1.10",
author="ziqingyang",
author_email="<EMAIL>",
description="PyTorch-based knowledge distillation toolkit for natural language processing",
long_description="PyTorch-based knowledge distillation toolkit for natural language processing.",
#long_description=open("READMEshort.md", "r", encoding="utf-8").read(),
long_description_content_type="text/markdown",
keywords="NLP deep learning knowledge distillation pytorch",
#license="",
url="http://textbrewer.hfl-rc.com",
#package_dir={"": "src"},
packages=['textbrewer'],
package_dir={'':'src'},
install_requires=[
"numpy",
"torch >= 1.1",
"tensorboard",
"tqdm"
],
python_requires=">=3.6",
classifiers=[
#"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Intended Audience :: Education",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
],
)
|
vincentlux/TextBrewer | src/textbrewer/distillation.py | <reponame>vincentlux/TextBrewer
import torch
from collections import OrderedDict
from tqdm import tqdm
from torch import nn
try:
from tensorboardX import SummaryWriter
except ImportError:
from torch.utils.tensorboard import SummaryWriter
import os, random, json
import numpy as np
import logging
from typing import Optional, Dict, Union
from .presets import *
from .configurations import TrainingConfig, DistillationConfig
from .compatibility import mask_dtype
logger = logging.getLogger("Distillation")
#logger.setLevel(logging.INFO)
class CustomMatch:
def __init__(self, module_T, module_S, weight, loss,
proj_func =None, proj_group = None):
self.module_T = module_T
self.module_S = module_S
self.loss = loss,
self.weight = weight,
self.proj_func = proj_func
if proj_group is None:
self.proj_group = dict()
else:
self.proj_group = proj_group
def to_dict(self):
return {'module_T':self.module_T,
'module_S':self.module_S,
'weight':self.weight,
'loss':self.loss,
'proj_func':self.proj_func,
'proj_group':self.proj_group}
@classmethod
def from_dict(cls,dict_object):
return cls(**dict_object)
class DistillationContext:
def __init__(self):
self.model_S = None
self.model_T = None
def __enter__(self):
if isinstance(self.model_T,(list,tuple)):
self.model_T_is_training = [model_t.training for model_t in self.model_T]
for model_t in self.model_T:
model_t.eval()
elif isinstance(self.model_T,dict):
self.model_T_is_training = {name:model.training for name,model in self.model_T.items()}
else:
self.model_T_is_training = self.model_T.training
self.model_T.eval()
self.model_S_is_training = self.model_S.training
self.model_S.train()
def __exit__(self, exc_type, exc_val, exc_tb):
#Restore model status
if isinstance(self.model_T,(list,tuple)):
for i in range(len(self.model_T_is_training)):
self.model_T[i].train(self.model_T_is_training[i])
elif isinstance(self.model_T,dict):
for name,is_training in self.model_T_is_training.items():
self.model_T[name].train(is_training)
else:
self.model_T.train(self.model_T_is_training)
self.model_S.train(self.model_S_is_training)
class AbstractDistiller(DistillationContext):
def __init__(self, train_config: TrainingConfig,
distill_config: DistillationConfig,
model_T, model_S, adaptor_T, adaptor_S):
super(AbstractDistiller, self).__init__()
self.t_config = train_config
self.d_config = distill_config
self.model_T = model_T
self.model_S = model_S
self.adaptor_S = adaptor_S
self.adaptor_T = adaptor_T
self.kd_loss = KD_LOSS_MAP[self.d_config.kd_loss_type]
if self.t_config.log_dir is not None:
self.tb_writer = SummaryWriter(log_dir = self.t_config.log_dir)
else:
self.tb_writer = no_op
self.print_freq = 20
class BasicDistiller(AbstractDistiller):
"""
Performs **single-teacher single-task** distillation, provides basic distillation strategies.
Args:
train_config (:class:`TrainingConfig`): training configuration.
distill_config (:class:`DistillationConfig`): distillation configuration.
model_T (:class:`torch.nn.Module`): teacher model.
model_S (:class:`torch.nn.Module`): student model.
adaptor_T (Callable): teacher model's adaptor.
adaptor_S (Callable): student model's adaptor.
The roles of `adaptor_T` and `adaptor_S` are explained in :py:func:`adaptor`.
"""
def __init__(self, train_config,
distill_config,
model_T,
model_S,
adaptor_T,
adaptor_S):
super(BasicDistiller, self).__init__(train_config, distill_config, model_T, model_S, adaptor_T, adaptor_S)
def save_and_callback(self,global_step, step, epoch, callback):
logger.info(f"Saving at global step {global_step}, epoch step {step + 1} epoch {epoch+1}")
coreModel = self.model.module if hasattr(self.model, "module") else self
state_dict = coreModel.state_dict()
torch.save(state_dict, os.path.join(self.t_config.output_dir, f"gs{global_step}.pkl"))
if callback is not None:
logger.info("Running callback function...")
callback(model=self.model_S, step=global_step)
self.model_S.train()
def write_loss(self, total_loss, writer_step):
cpu_total_loss = total_loss.cpu().item() * self.t_config.gradient_accumulation_steps
self.tb_writer.add_scalar('scalar/total_loss', cpu_total_loss, writer_step)
#for name, loss in losses_dict.items():
# cpu_loss = loss.cpu().item() * self.t_config.gradient_accumulation_steps
# self.tb_writer.add_scalar(f"scalar/{name}", cpu_loss, writer_step)
def train(self, optimizer, scheduler, dataloader, num_epochs, num_steps=None, callback=None, batch_postprocessor=None, **args):
"""
trains the student model.
Args:
optimizer: optimizer.
scheduler: used to adjust learning rate, optional, can be None.
dataloader: dataset iterator.
num_epochs (int): number of training epochs.
num_steps (int): number of training steps. If it is not None, distiller will ignore `num_epochs` and trains for `num_steps`, and dataloader can have an unkonwn size, i.e., has no `__len__` attribute. Dataloader will be cycled automatically after iterating over the whole dataset.
callback (Callable): function called after each epoch, can be None. It is called as ``callback(model=self.model_S, step = global_step)``. It can be used to evaluate the model at each checkpoint.
batch_postprocessor (Callable): a function for post-processing batches. It should take a batch and return a batch. Its output is fed to the models and adaptors.
**args: additional arguments fed to the model.
Note:
* If the batch is a list or tuple, model is called as: ``model(*batch, **args)``. Make sure the order of elements in the batch matches their order in ``model.forward``.
* If the batch is a dict, model is called as: ``model(**batch,**args)``. Make sure the keys of the batch match the arguments of the ``model.forward``.
"""
if num_steps is not None:
total_global_steps = num_steps
ckpt_steps =self.t_config.ckpt_steps
print_every = ckpt_steps // self.print_freq
if print_every == 0:
print_every = ckpt_steps
checkpoints = [ i * ckpt_steps for i in range(1,num_steps//ckpt_steps+1)] + [total_global_steps]
logger.info(f"Total training steps: {total_global_steps}")
logger.info(f"Checkpoints(step): {checkpoints}")
global_step = 0
writer_step = 0
for step, batch in tqdm(enumerate(cycle(dataloader)),disable=None):
if batch_postprocessor is not None:
batch = batch_postprocessor(batch)
total_loss = self.train_on_batch(batch,args)
total_loss /= self.t_config.gradient_accumulation_steps
total_loss.backward()
self.write_loss(total_loss, writer_step)
writer_step += 1
if (step+1)%self.t_config.gradient_accumulation_steps == 0:
optimizer.step()
if scheduler is not None:
scheduler.step()
self.model_S.zero_grad()
global_step += 1
if self.d_config.kd_loss_weight_scheduler is not None:
self.d_config.kd_loss_weight = \
self.d_config.kd_loss_weight_scheduler(global_step/total_global_steps)
if self.d_config.hard_label_weight_scheduler is not None:
self.d_config.hard_label_weight = \
self.d_config.hard_label_weight_scheduler(global_step/total_global_steps)
if (global_step) % print_every == 0:
logger.info(f"Global step: {global_step}, epoch step:{step+1}")
if (global_step%ckpt_steps==0) or global_step==total_global_steps:
self.save_and_callback(global_step, step, 0, callback)
logger.info("Training finished")
return
train_steps_per_epoch = len(dataloader)//self.t_config.gradient_accumulation_steps
total_global_steps = train_steps_per_epoch * num_epochs
print_every = train_steps_per_epoch // self.print_freq
if print_every == 0:
print_every = train_steps_per_epoch
checkpoints = [int(train_steps_per_epoch*ci/self.t_config.ckpt_frequency) for ci in range(self.t_config.ckpt_frequency)]
logger.info(f"Training steps per epoch: {train_steps_per_epoch}")
logger.info(f"Checkpoints(step): {checkpoints}")
global_step = 0
writer_step = 0
for current_epoch in tqdm(range(int(num_epochs)),disable=None):
logger.info(f"Epoch {current_epoch+1}")
self.model_S.zero_grad()
logger.info(f"Length of current epoch in forward batch: {len(dataloader)}")
for step, batch in tqdm(enumerate(dataloader),disable=None):
if batch_postprocessor is not None:
batch = batch_postprocessor(batch)
total_loss = self.train_on_batch(batch,args)
total_loss /= self.t_config.gradient_accumulation_steps
total_loss.backward()
self.write_loss(total_loss, writer_step)
writer_step += 1
if (step+1)%self.t_config.gradient_accumulation_steps == 0:
optimizer.step()
if scheduler is not None:
scheduler.step()
self.model_S.zero_grad()
global_step += 1
if self.d_config.kd_loss_weight_scheduler is not None:
self.d_config.kd_loss_weight = \
self.d_config.kd_loss_weight_scheduler(global_step/total_global_steps)
if self.d_config.hard_label_weight_scheduler is not None:
self.d_config.hard_label_weight = \
self.d_config.hard_label_weight_scheduler(global_step/total_global_steps)
if (global_step) % print_every == 0:
logger.info(f"Global step: {global_step}, epoch step:{step+1}")
if (global_step%train_steps_per_epoch in checkpoints) \
and ((current_epoch+1)%self.t_config.ckpt_epoch_frequency==0 or current_epoch+1==num_epochs):
self.save_and_callback(global_step, step, current_epoch, callback)
logger.info(f"Epoch {current_epoch+1} finished")
def train_on_batch(self, batch, args):
#TODO implement caching
if type(batch) is dict:
for k,v in batch.items():
if type(v) is torch.Tensor:
batch[k] = v.to(self.t_config.device)
with torch.no_grad():
results_T = self.model_T(**batch, **args)
results_S = self.model_S(**batch, **args)
else:
moved_batch = tuple(item.to(self.t_config.device) if type(item) is torch.Tensor else item for item in batch)
batch = moved_batch
with torch.no_grad():
results_T = self.model_T(*batch, **args)
results_S = self.model_S(*batch, **args)
results_T = post_adaptor(self.adaptor_T(batch,results_T))
results_S = post_adaptor(self.adaptor_S(batch,results_S))
logits_list_T = results_T['logits'] # list of tensor
logits_list_S = results_S['logits'] # list of tensor
total_loss = 0
if 'logits_mask' in results_S:
masks_list_S = results_S['logits_mask']
logits_list_S = select_logits_with_mask(logits_list_S,masks_list_S) #(mask_sum, num_of_class)
if 'logits_mask' in results_T:
masks_list_T = results_T['logits_mask']
logits_list_T = select_logits_with_mask(logits_list_T,masks_list_T) #(mask_sum, num_of_class)
if self.d_config.probability_shift is True:
labels_list = results_S['labels']
for l_T, l_S, labels in zip(logits_list_T, logits_list_S, labels_list):
l_T = probability_shift_(l_T, labels)
if self.d_config.temperature_scheduler is not None:
temperature = self.d_config.temperature_scheduler(l_S, l_T, self.d_config.temperature)
else:
temperature = self.d_config.temperature
kd_loss = self.kd_loss(l_S, l_T, temperature) * self.d_config.kd_loss_weight
total_loss += kd_loss
else:
for l_T,l_S in zip(logits_list_T,logits_list_S):
if self.d_config.temperature_scheduler is not None:
temperature = self.d_config.temperature_scheduler(l_S, l_T, self.d_config.temperature)
else:
temperature = self.d_config.temperature
kd_loss = self.kd_loss(l_S, l_T, temperature) * self.d_config.kd_loss_weight
total_loss += kd_loss
if 'losses' in results_S:
for loss in results_S['losses']:
# in case of multi-GPU
total_loss += loss.mean() * self.d_config.hard_label_weight
return total_loss
class MultiTeacherDistiller(BasicDistiller):
"""
Distills multiple teacher models (of the same tasks) into a student model. **It doesn't support intermediate feature matching**.
Args:
train_config (:class:`TrainingConfig`): training configuration.
distill_config (:class:`DistillationConfig`): distillation configuration.
model_T (List[torch.nn.Module]): list of teacher models.
model_S (torch.nn.Module): student model.
adaptor_T (Callable): teacher model's adaptor.
adaptor_S (Callable): student model's adaptor.
The roles of `adaptor_T` and `adaptor_S` are explained in :py:func:`adaptor`.
"""
def __init__(self, train_config,
distill_config,
model_T,
model_S,
adaptor_T,
adaptor_S):
super(MultiTeacherDistiller, self).__init__(
train_config, distill_config,
model_T, model_S,
adaptor_T, adaptor_S)
if hasattr(self.adaptor_T,'__iter__'):
assert len(self.adaptor_T)==len(self.model_T)
self.avg = True
def train_on_batch(self, batch, args):
# Basic uses no cache
selected = None
num_T = len(self.model_T)
if type(batch) is dict:
for k,v in batch.items():
if type(v) is torch.Tensor:
batch[k] = v.to(self.t_config.device)
with torch.no_grad():
if self.avg:
results_T = [model_t(**batch, **args) for model_t in self.model_T]
else:
selected = random.choice(range(num_T))
results_T = [self.model_T[selected](**batch,**args)]
results_S = self.model_S(**batch, **args)
else:
moved_batch = tuple(item.to(self.t_config.device) if type(item) is torch.Tensor else item for item in batch)
batch = moved_batch
with torch.no_grad():
if self.avg:
results_T = [model_T(*batch, **args) for model_T in self.model_T]
else:
selected = random.choice(range(num_T))
results_T = [self.model_T[selected](*batch,**args)]
results_S = self.model_S(*batch, **args)
if hasattr(self.adaptor_T,'__iter__'):
if self.avg:
results_T = [post_adaptor(adpt_t(batch,results_t)) for results_t,adpt_t in zip(results_T,self.adaptor_T)]
else:
results_T = [post_adaptor(self.adaptor_T[selected](batch,results_T[0]))]
else:
results_T = [post_adaptor(self.adaptor_T(batch,results_t)) for results_t in results_T]
results_S = post_adaptor(self.adaptor_S(batch,results_S))
logits_list_T = [results_t['logits'] for results_t in results_T] # list of tensor
logits_list_S = results_S['logits'] # list of tensor
total_loss = 0
if 'logits_mask' in results_S:
masks_list_S = results_S['logits_mask']
logits_list_S = select_logits_with_mask(logits_list_S,masks_list_S) #(mask_sum, num_of_class)
if 'logits_mask' in results_T[0]:
masks_list_T = results_T[0]['logits_mask']
logits_list_T = [select_logits_with_mask(logits_list_t,masks_list_T)
for logits_list_t in logits_list_T] #(mask_sum, num_of_class)
if self.d_config.probability_shift is True:
labels_list = results_S['labels']
for l_T, l_S, labels in zip(zip(*logits_list_T),logits_list_S,labels_list):
mean_l_T = sum(l_T)/len(l_T)
mean_l_T = probability_shift_(mean_l_T, labels)
if self.d_config.temperature_scheduler is not None:
temperature = self.d_config.temperature_scheduler(l_S, mean_l_T, self.d_config.temperature)
else:
temperature = self.d_config.temperature
total_loss += self.kd_loss(l_S, mean_l_T, temperature) * self.d_config.kd_loss_weight
else:
for l_T, l_S in zip(zip(*logits_list_T),logits_list_S):
mean_l_T = sum(l_T)/len(l_T)
if self.d_config.temperature_scheduler is not None:
temperature = self.d_config.temperature_scheduler(l_S, mean_l_T, self.d_config.temperature)
else:
temperature = self.d_config.temperature
total_loss += self.kd_loss(l_S, mean_l_T, temperature) * self.d_config.kd_loss_weight
if 'losses' in results_S:
for loss in results_S['losses']:
# in case of multi-GPU
total_loss += loss.mean() * self.d_config.hard_label_weight
return total_loss
class GeneralDistiller(BasicDistiller):
"""
Supports intermediate features matching. **Recommended for single-teacher single-task distillation**.
Args:
train_config (:class:`TrainingConfig`): training configuration.
distill_config (:class:`DistillationConfig`): distillation configuration.
model_T (:class:`torch.nn.Module`): teacher model.
model_S (:class:`torch.nn.Module`): student model.
adaptor_T (Callable): teacher model's adaptor.
adaptor_S (Callable): student model's adaptor.
custom_matches (list): supports more flexible user-defined matches (testing).
The roles of `adaptor_T` and `adaptor_S` are explained in :py:func:`adaptor`.
"""
def __init__(self, train_config,
distill_config,
model_T,
model_S,
adaptor_T,
adaptor_S,
custom_matches: Optional[List[CustomMatch]] = None):
# custom_matches=[{'module_T': module_T, 'module_S':module_S,
# 'loss': loss, 'weight': weight},...]
super(GeneralDistiller, self).__init__(train_config, distill_config, model_T, model_S, adaptor_T, adaptor_S)
self.projs = []
self.projs_group = []
for im in self.d_config.intermediate_matches:
if im.proj is not None:
projection = im.proj[0]
dim_in = im.proj[1]
dim_out = im.proj[2]
self.projs_group.append(im.proj[3])
self.projs.append(PROJ_MAP[projection](dim_in,dim_out))
self.projs[-1].to(self.t_config.device)
else:
self.projs.append(None)
self.projs_group.append(None)
self.has_custom_matches = False
if custom_matches:
self.handles_T = []
self.handles_S = []
self.custom_matches_cache = {'hook_outputs_T': [], 'hook_outputs_S': [], 'match_proj_funcs': [],
'match_weights': [], 'match_losses': [], 'match_proj_groups': []}
for match in custom_matches:
self.add_match(match)
self.has_custom_matches = True
def save_and_callback(self,global_step, step, epoch, callback):
if self.has_custom_matches:
handles_T = self.model_T._forward_hooks
handles_S = self.model_S._forward_hooks
self.model_S._forward_hooks = OrderedDict() # clear hooks
self.model_T._forward_hooks = OrderedDict()
super(GeneralDistiller, self).save_and_callback(global_step, step, epoch, callback)
if self.has_custom_matches:
self.model_S._forward_hooks = handles_S # restore hooks
self.model_T._forward_hooks = handles_T
def train(self, optimizer, scheduler, dataloader, num_epochs, num_steps=None, callback=None, batch_postprocessor=None, **args):
"""
trains the student model. See :meth:`BasicDistiller.train`.
"""
# update optimizer for projection layer
for proj,proj_group in zip(self.projs, self.projs_group):
if proj is not None:
assert isinstance(proj,nn.Module)
optimizer.add_param_group({**{'params':proj.parameters()},**proj_group})
if self.has_custom_matches:
for proj_func,proj_group in zip(self.custom_matches_cache['match_proj_funcs'],
self.custom_matches_cache['match_proj_groups']):
if isinstance(proj_func,nn.Module):
optimizer.add_param_group({**{'params':proj_func.parameters()},**proj_group})
logger.debug("Optimizer param group: ")
for group in optimizer.param_groups:
for k,v in group.items():
logger.debug(f"{k}:{v}")
super(GeneralDistiller, self).train(optimizer, scheduler, dataloader, num_epochs, num_steps, callback, batch_postprocessor, **args)
def train_on_batch(self, batch, args):
if type(batch) is dict:
for k,v in batch.items():
if type(v) is torch.Tensor:
batch[k] = v.to(self.t_config.device)
with torch.no_grad():
results_T = self.model_T(**batch, **args)
results_S = self.model_S(**batch, **args)
else:
moved_batch = tuple(item.to(self.t_config.device) if type(item) is torch.Tensor else item for item in batch)
batch = moved_batch
with torch.no_grad():
results_T = self.model_T(*batch, **args)
results_S = self.model_S(*batch, **args)
results_T = post_adaptor(self.adaptor_T(batch,results_T))
results_S = post_adaptor(self.adaptor_S(batch,results_S))
total_loss = 0
if 'logits' in results_T and 'logits' in results_S:
logits_list_T = results_T['logits'] # list of tensor
logits_list_S = results_S['logits'] # list of tensor
if 'logits_mask' in results_S:
masks_list_S = results_S['logits_mask']
logits_list_S = select_logits_with_mask(logits_list_S,masks_list_S) #(mask_sum, num_of_class)
if 'logits_mask' in results_T:
masks_list_T = results_T['logits_mask']
logits_list_T = select_logits_with_mask(logits_list_T,masks_list_T) #(mask_sum, num_of_class)
if self.d_config.probability_shift is True:
labels_list = results_S['labels']
for l_T, l_S, labels in zip(logits_list_T, logits_list_S, labels_list):
l_T = probability_shift_(l_T, labels)
if self.d_config.temperature_scheduler is not None:
temperature = self.d_config.temperature_scheduler(l_S, l_T, self.d_config.temperature)
else:
temperature = self.d_config.temperature
kd_loss = self.kd_loss(l_S, l_T, temperature) * self.d_config.kd_loss_weight
total_loss += kd_loss
else:
for l_T,l_S in zip(logits_list_T,logits_list_S):
if self.d_config.temperature_scheduler is not None:
temperature = self.d_config.temperature_scheduler(l_S, l_T, self.d_config.temperature)
else:
temperature = self.d_config.temperature
kd_loss = self.kd_loss(l_S, l_T, temperature) * self.d_config.kd_loss_weight
total_loss += kd_loss
inters_T = {feature: results_T.get(feature,[]) for feature in FEATURES}
inters_S = {feature: results_S.get(feature,[]) for feature in FEATURES}
inputs_mask_T = results_T.get('inputs_mask',None)
inputs_mask_S = results_S.get('inputs_mask',None)
for ith,inter_match in enumerate(self.d_config.intermediate_matches):
layer_T = inter_match.layer_T
layer_S = inter_match.layer_S
feature = inter_match.feature
loss_type = inter_match.loss
match_weight = inter_match.weight
match_loss = MATCH_LOSS_MAP[loss_type]
if type(layer_S) is list and type(layer_T) is list:
inter_S = [inters_S[feature][s] for s in layer_S]
inter_T = [inters_T[feature][t] for t in layer_T]
if self.projs[ith]:
#inter_T = [self.projs[ith](t) for t in inter_T]
inter_S = [self.projs[ith](s) for s in inter_S]
else:
inter_S = inters_S[feature][layer_S]
inter_T = inters_T[feature][layer_T]
if self.projs[ith]:
#inter_T = self.projs[ith](inter_T)
inter_S = self.projs[ith](inter_S)
total_loss += match_loss(inter_S, inter_T, mask=inputs_mask_S) * match_weight
if self.has_custom_matches:
for hook_T, hook_S, match_weight, match_loss, proj_func in \
zip(self.custom_matches_cache['hook_outputs_T'], self.custom_matches_cache['hook_outputs_S'],
self.custom_matches_cache['match_weghts'], self.custom_matches_cache['match_losses'],
self.custom_matches_cache['match_proj_funcs']):
if proj_func is not None:
hook_S = proj_func(hook_S)
total_loss += match_weight * match_loss(hook_S,hook_T,inputs_mask_S,inputs_mask_T)
self.custom_matches_cache['hook_outputs_T'] = []
self.custom_matches_cache['hook_outputs_S'] = []
if 'losses' in results_S:
for loss in results_S['losses']:
# in case of multi-GPU
total_loss += loss.mean() * self.d_config.hard_label_weight
return total_loss
def add_match(self,match: CustomMatch):
if type(match.module_T) is str or type(match.module_S) is str:
raise NotImplementedError
else:
module_T = match.module_T
module_S = match.module_S
weight = match.weight
loss = match.loss
proj_func = match.proj_func
proj_group = match.proj_group
self.add_match_by_module(module_T,module_S,proj_func,proj_group,weight,loss)
def add_match_by_module(self,module_T : torch.nn.Module,
module_S : torch.nn.Module,
proj_func, proj_group,
match_weight, match_loss):
self.handles_T = module_T.register_forward_hook(self._hook_T)
self.handles_S = module_S.register_forward_hook(self._hook_S)
self.custom_matches_cache['match_weights'].append(match_weight)
self.custom_matches_cache['match_losses'].append(match_loss)
self.custom_matches_cache['match_proj_funcs'].append(proj_func)
if isinstance(proj_func,nn.Module):
self.custom_matches_cache['match_proj_funcs'][-1].to(self.t_config.device)
self.custom_matches_cache['match_proj_groups'].append(proj_group)
def _hook_T(self,module,input, output):
self.custom_matches_cache['hook_outputs_T'].append(output)
def _hook_S(self, module, input, output):
self.custom_matches_cache['hook_outputs_S'].append(output)
class MultiTaskDistiller(BasicDistiller):
"""
distills multiple teacher models (of different tasks) into a single student. **It doesn't support intermediate feature matching**.
Args:
train_config (:class:`TrainingConfig`): training configuration.
distill_config (:class:`DistillationConfig`): distillation configuration.
model_T (dict): dict of teacher models: {task1:model1, task2:model2, .... }. Keys are tasknames.
model_S (torch.nn.Module): student model.
adaptor_T (dict): dict of teacher adaptors: {task1:adpt1, task2:adpt2, .... }. Keys are tasknames.
adaptor_S (dict): dict of student adaptors: {task1:adpt1, task2:adpt2, .... }. Keys are tasknames.
"""
def __init__(self, train_config,
distill_config,
model_T,
model_S,
adaptor_T,
adaptor_S):
super(MultiTaskDistiller, self).__init__(
train_config, distill_config,
model_T, model_S,
adaptor_T, adaptor_S)
if hasattr(self.adaptor_T,'__iter__'):
assert len(self.adaptor_T)==len(self.model_T)==len(self.adaptor_S)
assert (self.d_config.kd_loss_weight_scheduler is None) and (self.d_config.hard_label_weight_scheduler is None),\
"BasicMultiTaskDistiller does not support WEIGHT_SCHEDULER in the current version."
def train(self, optimizer, scheduler, dataloaders, num_steps, tau=1, callback=None, batch_postprocessors=None, **args):
"""
trains the student model.
Args:
optimizer: optimizer.
scheduler: used to adjust learning rate, optional, can be None.
dataloaders (dict): dict of dataset iterator. Keys are tasknames, values are corresponding dataloaders.
num_steps (int): number of training steps.
tau (float): the probability of sampling an example from task `d` is proportional to \|d\|^{tau}, where \|d\| is the size of `d`'s training set. If the size of any dataset is unknown, ignores tau and samples examples unifromly from each dataset.
callback (Callable): function called after each epoch, can be None. It is called as ``callback(model=self.model_S, step = global_step)``. It can be used to do evaluation of the model at each checkpoint.
batch_postprocessors (dict): a dict of batch_postprocessors. Keys are tasknames, values are corresponding batch_postprocessors. Each batch_postprocessor should take a batch and return a batch.
**args: additional arguments fed to the model.
"""
total_global_steps = num_steps
ckpt_steps =self.t_config.ckpt_steps
print_every = ckpt_steps // self.print_freq
if print_every == 0:
print_every = ckpt_steps
checkpoints = [ i * ckpt_steps for i in range(1,num_steps//ckpt_steps+1)] + [total_global_steps]
logger.info(f"Total training steps: {total_global_steps}")
logger.info(f"Checkpoints(step): {checkpoints}")
dataiters = {k:cycle(v) for k,v in dataloaders}
if all(hasattr(v,'__len__') for v in dataloaders.values()):
dataloader_sizes = {k:len(v) for k,v in dataloaders.items()}
total_size = sum(v for k,v in dataloader_sizes.items())//self.t_config.gradient_accumulation_steps
logger.info(f"Total size of all datasets (in number of batch_size):{total_size}")
Z = sum(pow(v,tau) for v in dataloader_sizes.values())
tasknames, sampling_weights = zip(*((k,pow(v,tau)/Z) for k,v in dataloader_sizes.items()))
else:
logger.info("The size of some datasets are unknown, so tau=1")
tasknames = tuple(dataloaders.keys())
sampling_weights = None
global_step = 0
writer_step = 0
self.model_S.zero_grad()
while global_step < num_steps:
global_step += 1
for _ in range(self.t_config.gradient_accumulation_steps):
#sampling taskname
taskname = np.random.choice(tasknames,p=sampling_weights)
dataiter = dataiters[taskname]
batch = next(dataiter)
if batch_postprocessors is not None:
batch = batch_postprocessors[taskname](batch)
batch_taskname = (batch, taskname)
total_loss = self.train_on_batch(batch_taskname, args)
total_loss /= self.t_config.gradient_accumulation_steps
total_loss.backward()
scalar_total_loss = total_loss.cpu().item() * self.t_config.gradient_accumulation_steps
self.tb_writer.add_scalar('scalar/total_loss', scalar_total_loss, writer_step)
writer_step += 1
optimizer.step()
if scheduler is not None:
scheduler.step()
self.model_S.zero_grad()
if self.d_config.kd_loss_weight_scheduler is not None:
self.d_config.kd_loss_weight = \
self.d_config.kd_loss_weight_scheduler(global_step/total_global_steps)
if self.d_config.hard_label_weight_scheduler is not None:
self.d_config.hard_label_weight = \
self.d_config.hard_label_weight_scheduler(global_step/total_global_steps)
if (global_step) % print_every == 0:
logger.info(f"Global step: {global_step}/{num_steps}")
if (global_step % ckpt_steps == 0) or global_step==total_global_steps:
self.save_and_callback(global_step, global_step-1, 0, callback)
logger.info("Training finished")
def train_on_batch(self, batch_taskname, args) -> torch.Tensor:
# Basic uses no cache
batch, taskname = batch_taskname
model_T = self.model_T[taskname]
adaptor_T = self.adaptor_T[taskname]
adaptor_S = self.adaptor_S[taskname]
if type(batch) is dict:
for k,v in batch.items():
if type(v) is torch.Tensor:
batch[k] = v.to(self.t_config.device)
with torch.no_grad():
results_T = model_T(**batch, **args)
results_S = self.model_S(**batch, **args)
else:
moved_batch = tuple(item.to(self.t_config.device) if type(item) is torch.Tensor else item for item in batch)
batch = moved_batch
with torch.no_grad():
results_T = model_T(*batch, **args)
results_S = self.model_S(*batch, **args)
results_T = post_adaptor(adaptor_T(batch,results_T))
results_S = post_adaptor(adaptor_S(batch,results_S))
logits_list_T = results_T['logits'] # list of tensor
logits_list_S = results_S[taskname]['logits'] # list of tensor
total_loss = 0
if 'logits_mask' in results_S[taskname]:
masks_list_S = results_S[taskname]['logits_mask']
logits_list_S = select_logits_with_mask(logits_list_S,masks_list_S) #(mask_sum, num_of_class)
if 'logits_mask' in results_T: #TODO
masks_list_T = results_T['logits_mask']
logits_list_T = select_logits_with_mask(logits_list_T,masks_list_T)
if self.d_config.probability_shift is True:
labels_list = results_S['labels']
for l_T, l_S, labels in zip(logits_list_T, logits_list_S, labels_list):
l_T = probability_shift_(l_T, labels)
if self.d_config.temperature_scheduler is not None:
temperature = self.d_config.temperature_scheduler(l_S, l_T, self.d_config.temperature)
else:
temperature = self.d_config.temperature
kd_loss = self.kd_loss(l_S, l_T, temperature) * self.d_config.kd_loss_weight
total_loss += kd_loss
else:
for l_T,l_S in zip(logits_list_T,logits_list_S):
if self.d_config.temperature_scheduler is not None:
temperature = self.d_config.temperature_scheduler(l_S, l_T, self.d_config.temperature)
else:
temperature = self.d_config.temperature
total_loss += self.kd_loss(l_S, l_T, temperature) * self.d_config.kd_loss_weight
if 'losses' in results_S:
for loss in results_S[taskname]['losses']:
# in case of multi-GPU
total_loss += loss.mean() * self.d_config.hard_label_weight
return total_loss
def select_logits_with_mask(logits_list, masks_list):
output_logits = []
if len(masks_list)==len(logits_list):
for logits,mask in zip(logits_list,masks_list):
if len(logits.shape)==3:
mask = mask.unsqueeze(-1).expand_as(logits).to(mask_dtype)
logits_select = torch.masked_select(logits,mask).view(-1,logits.size(-1))
else:
logits_select = logits #Logits_mask has no effect on logits of shape (batch_size, logits_to_be_softmaxed)
output_logits.append(logits_select)
elif len(masks_list)==1:
mask = masks_list[0]
for logits in logits_list:
if len(logits.shape)==3:
mask = mask.unsqueeze(-1).expand_as(logits).to(mask_dtype)
logits_select = torch.masked_select(logits,mask).view(-1,logits.size(-1))
else:
logits_select = logits #Logits_mask has no effect on logits of shape (batch_size, logits_to_be_softmaxed)
output_logits.append(logits_select)
else:
raise AssertionError("lengths of logits list and masks list mismatch")
return output_logits
class BasicAdaptor:
def __init__(self):
self.batch = None
self.model_outputs = None
def __call__(self,batch,model_outputs):
self.batch = batch
self.model_outputs = model_outputs
def __getattr__(self, item):
raise NotImplementedError
def post_adaptor(dict_object):
if 'logits' in dict_object:
logits = dict_object['logits']
if not isinstance(logits,(list,tuple)):
dict_object['logits'] = [ logits ]
if 'logits_mask' in dict_object:
logits_mask = dict_object['logits_mask']
if not isinstance(logits_mask,(list,tuple)):
dict_object['logits_mask'] = [ logits_mask ]
if 'losses' in dict_object:
losses = dict_object['losses']
if not isinstance(losses,(list,tuple)):
dict_object['losses'] = [ losses ]
if 'labels' in dict_object:
labels = dict_object['labels']
if not isinstance(labels,(list,tuple)):
dict_object['labels'] = [ labels ]
return dict_object
class BasicTrainer:
"""
It performs supervised training, not distillation. It can be used for training the teacher model.
Args:
train_config (:class:`TrainingConfig`): training configuration.
model (:class:`torch.nn.Module`): model to be trained.
adaptor (Callable):adaptor of the model.
The role of `adaptor` is explained in :py:func:`adaptor`.
"""
def __enter__(self):
self.model_is_training = self.model.training
self.model.train()
def __exit__(self, exc_type, exc_val, exc_tb):
# Restore model status
self.model.train(self.model_is_training)
def __init__(self, train_config: TrainingConfig,
model: torch.nn.Module, adaptor):
super(BasicTrainer, self).__init__()
self.t_config = train_config
self.model = model
self.adaptor = adaptor
if self.t_config.log_dir is not None:
self.tb_writer = SummaryWriter(log_dir = self.t_config.log_dir)
else:
self.tb_writer = no_op
self.print_freq = 20
def train(self, optimizer, scheduler, dataloader, num_epochs, num_steps=None, callback=None, batch_postprocessor=None, **args):
"""
trains the model. See :meth:`BasicDistiller.train`.
"""
if num_steps is not None:
total_global_steps = num_steps
ckpt_steps =self.t_config.ckpt_steps
print_every = ckpt_steps // self.print_freq
if print_every == 0:
print_every = ckpt_steps
checkpoints = [ i * ckpt_steps for i in range(1,num_steps//ckpt_steps+1)] + [total_global_steps]
logger.info(f"Total training steps: {total_global_steps}")
logger.info(f"Checkpoints: {checkpoints}")
global_step = 0
writer_step = 0
for step, batch in tqdm(enumerate(cycle(dataloader)),disable=None):
if batch_postprocessor is not None:
batch = batch_postprocessor(batch)
total_loss = self.train_on_batch(batch,args)
total_loss /= self.t_config.gradient_accumulation_steps
total_loss.backward()
scalar_total_loss = total_loss.cpu().item() * self.t_config.gradient_accumulation_steps
self.tb_writer.add_scalar('scalar/total_loss', scalar_total_loss, writer_step)
writer_step += 1
if (step+1)%self.t_config.gradient_accumulation_steps == 0:
optimizer.step()
if scheduler is not None:
scheduler.step()
self.model.zero_grad()
global_step += 1
if (global_step) % print_every == 0:
logger.info(f"Global step: {global_step}, epoch step:{step+1}")
if (global_step%ckpt_steps==0) or global_step==total_global_steps:
logger.info(f"Saving at global step {global_step}")
coreModel = self.model.module if \
'DataParallel' in self.model.__class__.__name__ else self.model
state_dict = coreModel.state_dict()
torch.save(state_dict, os.path.join(self.t_config.output_dir,f"gs{global_step}.pkl"))
if callback is not None:
logger.info("Running callback function...")
callback(model=self.model, step=global_step)
self.model.train()
logger.info("Training finished")
return
train_steps_per_epoch = len(dataloader)//self.t_config.gradient_accumulation_steps
print_every = train_steps_per_epoch // self.print_freq
if print_every == 0:
print_every = train_steps_per_epoch
checkpoints = [int(train_steps_per_epoch*ci/self.t_config.ckpt_frequency) for ci in range(self.t_config.ckpt_frequency)]
logger.info(f"Training steps per epoch: {train_steps_per_epoch}")
logger.info(f"Checkpoints(step): {checkpoints}")
global_step = 0
writer_step = 0
for current_epoch in tqdm(range(int(num_epochs)),disable=None):
logger.info(f"Epoch {current_epoch+1}")
self.model.zero_grad()
logger.info(f"Length of current epoch in forward batch: {len(dataloader)}")
for step, batch in tqdm(enumerate(dataloader),disable=None):
if batch_postprocessor is not None:
batch = batch_postprocessor(batch)
total_loss = self.train_on_batch(batch,args)
total_loss /= self.t_config.gradient_accumulation_steps
total_loss.backward()
scalar_total_loss = total_loss.cpu().item() * self.t_config.gradient_accumulation_steps
self.tb_writer.add_scalar('scalar/total_loss', scalar_total_loss, writer_step)
writer_step += 1
if (step+1)%self.t_config.gradient_accumulation_steps == 0:
optimizer.step()
if scheduler is not None:
scheduler.step()
self.model.zero_grad()
global_step += 1
if (global_step) % print_every == 0:
logger.info(f"Global step: {global_step}, epoch step:{step+1}")
if (global_step%train_steps_per_epoch in checkpoints) \
and ((current_epoch+1)%self.t_config.ckpt_epoch_frequency==0 or current_epoch+1==num_epochs):
logger.info(f"Saving at global step {global_step}, epoch step {step+1} epoch {current_epoch+1}")
coreModel = self.model.module if \
'DataParallel' in self.model.__class__.__name__ else self.model
state_dict = coreModel.state_dict()
torch.save(state_dict, os.path.join(self.t_config.output_dir,f"gs{global_step}.pkl"))
if callback is not None:
logger.info("Running callback function...")
callback(model=self.model, step=global_step)
self.model.train()
logger.info(f"Epoch {current_epoch+1} finished")
def train_on_batch(self, batch, args) -> torch.Tensor:
if type(batch) is dict:
for k,v in batch.items():
if type(v) is torch.Tensor:
batch[k] = v.to(self.t_config.device)
results = self.model(**batch, **args)
else:
moved_batch = tuple(item.to(self.t_config.device) if type(item) is torch.Tensor else item for item in batch)
batch = moved_batch
results = self.model(*batch, **args)
results = post_adaptor(self.adaptor(batch,results))
total_loss = 0
if 'losses' not in results:
raise KeyError("'losses' not in the output of adaptor. Nothing to optimize!")
else:
for loss in results['losses']:
# in case of multi-GPU
total_loss += loss.mean()
return total_loss
def probability_shift_(tensor, labels): # In-place operation. shape (batch_size, num_classes), (batch_size,)
if len(tensor.shape)==2:
max_position = tensor.argmax(dim=-1) # shape (batch_size,)
index = torch.arange(tensor.size(0)).to(tensor.device)
max_clone = tensor[index,max_position].clone()
truth_clone = tensor[index,labels].clone()
tensor[index,max_position] = truth_clone
tensor[index,labels] = max_clone
return tensor
elif len(tensor.shape)==3: # shape (batch_size, length, num_classes)
original_shape = tensor.size()
tensor = tensor.view(-1,tensor.size(-1)) # (batch_size * length, num_classes)
max_position = tensor.argmax(dim=-1) # shape (batch_size * length, )
labels = labels.view(-1) # (batch_size * length, )
nonneg_labels = torch.where(labels<0, max_position, labels)
index = torch.arange(tensor.size(0)).to(tensor.device) # (batch_size * length)
max_clone = tensor[index,max_position].clone()
truth_clone = tensor[index,nonneg_labels].clone()
tensor[index,max_position] = truth_clone
tensor[index,nonneg_labels] = max_clone
tensor = tensor.view(original_shape)
return tensor
else:
raise TypeError("Rank of tensor must be 2 or 3")
class no_op:
@staticmethod
def add_scalar(*args, **kwargs):
pass
|
vincentlux/TextBrewer | src/textbrewer/distiller_general.py | <reponame>vincentlux/TextBrewer<filename>src/textbrewer/distiller_general.py
from .distiller_utils import *
from .distiller_basic import BasicDistiller
class GeneralDistiller(BasicDistiller):
"""
Supports intermediate features matching. **Recommended for single-teacher single-task distillation**.
Args:
train_config (:class:`TrainingConfig`): training configuration.
distill_config (:class:`DistillationConfig`): distillation configuration.
model_T (:class:`torch.nn.Module`): teacher model.
model_S (:class:`torch.nn.Module`): student model.
adaptor_T (Callable): teacher model's adaptor.
adaptor_S (Callable): student model's adaptor.
custom_matches (list): supports more flexible user-defined matches (testing).
The roles of `adaptor_T` and `adaptor_S` are explained in :py:func:`adaptor`.
"""
def __init__(self, train_config,
distill_config,
model_T,
model_S,
adaptor_T,
adaptor_S,
custom_matches: Optional[List[CustomMatch]] = None):
# custom_matches=[{'module_T': module_T, 'module_S':module_S,
# 'loss': loss, 'weight': weight},...]
super(GeneralDistiller, self).__init__(train_config, distill_config, model_T, model_S, adaptor_T, adaptor_S)
self.projs = []
self.projs_group = []
for im in self.d_config.intermediate_matches:
if im.proj is not None:
projection = im.proj[0]
dim_in = im.proj[1]
dim_out = im.proj[2]
self.projs_group.append(im.proj[3])
self.projs.append(PROJ_MAP[projection](dim_in,dim_out))
self.projs[-1].to(self.t_config.device)
else:
self.projs.append(None)
self.projs_group.append(None)
self.has_custom_matches = False
if custom_matches:
self.handles_T = []
self.handles_S = []
self.custom_matches_cache = {'hook_outputs_T': [], 'hook_outputs_S': [], 'match_proj_funcs': [],
'match_weights': [], 'match_losses': [], 'match_proj_groups': []}
for match in custom_matches:
self.add_match(match)
self.has_custom_matches = True
self.d_config.is_caching_logits = False
def save_and_callback(self,global_step, step, epoch, callback):
if self.has_custom_matches:
handles_T = self.model_T._forward_hooks
handles_S = self.model_S._forward_hooks
self.model_S._forward_hooks = OrderedDict() # clear hooks
self.model_T._forward_hooks = OrderedDict()
super(GeneralDistiller, self).save_and_callback(global_step, step, epoch, callback)
if self.has_custom_matches:
self.model_S._forward_hooks = handles_S # restore hooks
self.model_T._forward_hooks = handles_T
def train(self, optimizer, dataloader, num_epochs, scheduler_class=None, scheduler_args=None, scheduler=None, max_grad_norm = -1.0, num_steps=None, callback=None, batch_postprocessor=None, **args):
"""
trains the student model. See :meth:`BasicDistiller.train`.
"""
# update optimizer for projection layer
for proj,proj_group in zip(self.projs, self.projs_group):
if proj is not None:
assert isinstance(proj,nn.Module)
optimizer.add_param_group({**{'params':proj.parameters()},**proj_group})
if self.has_custom_matches:
for proj_func,proj_group in zip(self.custom_matches_cache['match_proj_funcs'],
self.custom_matches_cache['match_proj_groups']):
if isinstance(proj_func,nn.Module):
optimizer.add_param_group({**{'params':proj_func.parameters()},**proj_group})
logger.debug("Optimizer param group: ")
logger.debug(f"{[[s.shape for s in g['params']] for g in optimizer.param_groups]}")
super(GeneralDistiller, self).train(optimizer, dataloader, num_epochs, scheduler_class, scheduler_args, scheduler, max_grad_norm, num_steps, callback, batch_postprocessor, **args)
def train_on_batch(self, batch, args):
if type(batch) is dict:
for k,v in batch.items():
if type(v) is torch.Tensor:
batch[k] = v.to(self.t_config.device)
with torch.no_grad():
results_T = self.model_T(**batch, **args)
results_S = self.model_S(**batch, **args)
else:
moved_batch = tuple(item.to(self.t_config.device) if type(item) is torch.Tensor else item for item in batch)
batch = moved_batch
with torch.no_grad():
results_T = self.model_T(*batch, **args)
results_S = self.model_S(*batch, **args)
results_T = post_adaptor(self.adaptor_T(batch,results_T))
results_S = post_adaptor(self.adaptor_S(batch,results_S))
total_loss = 0
if 'logits' in results_T and 'logits' in results_S:
logits_list_T = results_T['logits'] # list of tensor
logits_list_S = results_S['logits'] # list of tensor
if 'logits_mask' in results_S:
masks_list_S = results_S['logits_mask']
logits_list_S = select_logits_with_mask(logits_list_S,masks_list_S) #(mask_sum, num_of_class)
if 'logits_mask' in results_T:
masks_list_T = results_T['logits_mask']
logits_list_T = select_logits_with_mask(logits_list_T,masks_list_T) #(mask_sum, num_of_class)
if self.d_config.probability_shift is True:
labels_list = results_S['labels']
for l_T, l_S, labels in zip(logits_list_T, logits_list_S, labels_list):
l_T = probability_shift_(l_T, labels)
if self.d_config.temperature_scheduler is not None:
temperature = self.d_config.temperature_scheduler(l_S, l_T, self.d_config.temperature)
else:
temperature = self.d_config.temperature
kd_loss = self.kd_loss(l_S, l_T, temperature) * self.d_config.kd_loss_weight
total_loss += kd_loss
else:
for l_T,l_S in zip(logits_list_T,logits_list_S):
if self.d_config.temperature_scheduler is not None:
temperature = self.d_config.temperature_scheduler(l_S, l_T, self.d_config.temperature)
else:
temperature = self.d_config.temperature
kd_loss = self.kd_loss(l_S, l_T, temperature) * self.d_config.kd_loss_weight
total_loss += kd_loss
inters_T = {feature: results_T.get(feature,[]) for feature in FEATURES}
inters_S = {feature: results_S.get(feature,[]) for feature in FEATURES}
inputs_mask_T = results_T.get('inputs_mask',None)
inputs_mask_S = results_S.get('inputs_mask',None)
for ith,inter_match in enumerate(self.d_config.intermediate_matches):
layer_T = inter_match.layer_T
layer_S = inter_match.layer_S
feature = inter_match.feature
loss_type = inter_match.loss
match_weight = inter_match.weight
match_loss = MATCH_LOSS_MAP[loss_type]
if type(layer_S) is list and type(layer_T) is list:
inter_S = [inters_S[feature][s] for s in layer_S]
inter_T = [inters_T[feature][t] for t in layer_T]
if self.projs[ith]:
#inter_T = [self.projs[ith](t) for t in inter_T]
inter_S = [self.projs[ith](s) for s in inter_S]
else:
inter_S = inters_S[feature][layer_S]
inter_T = inters_T[feature][layer_T]
if self.projs[ith]:
#inter_T = self.projs[ith](inter_T)
inter_S = self.projs[ith](inter_S)
total_loss += match_loss(inter_S, inter_T, mask=inputs_mask_S) * match_weight
if self.has_custom_matches:
for hook_T, hook_S, match_weight, match_loss, proj_func in \
zip(self.custom_matches_cache['hook_outputs_T'], self.custom_matches_cache['hook_outputs_S'],
self.custom_matches_cache['match_weghts'], self.custom_matches_cache['match_losses'],
self.custom_matches_cache['match_proj_funcs']):
if proj_func is not None:
hook_S = proj_func(hook_S)
total_loss += match_weight * match_loss(hook_S,hook_T,inputs_mask_S,inputs_mask_T)
self.custom_matches_cache['hook_outputs_T'] = []
self.custom_matches_cache['hook_outputs_S'] = []
if 'losses' in results_S:
for loss in results_S['losses']:
# in case of multi-GPU
total_loss += loss.mean() * self.d_config.hard_label_weight
return total_loss
def add_match(self,match: CustomMatch):
if type(match.module_T) is str or type(match.module_S) is str:
raise NotImplementedError
else:
module_T = match.module_T
module_S = match.module_S
weight = match.weight
loss = match.loss
proj_func = match.proj_func
proj_group = match.proj_group
self.add_match_by_module(module_T,module_S,proj_func,proj_group,weight,loss)
def add_match_by_module(self,module_T : torch.nn.Module,
module_S : torch.nn.Module,
proj_func, proj_group,
match_weight, match_loss):
self.handles_T = module_T.register_forward_hook(self._hook_T)
self.handles_S = module_S.register_forward_hook(self._hook_S)
self.custom_matches_cache['match_weights'].append(match_weight)
self.custom_matches_cache['match_losses'].append(match_loss)
self.custom_matches_cache['match_proj_funcs'].append(proj_func)
if isinstance(proj_func,nn.Module):
self.custom_matches_cache['match_proj_funcs'][-1].to(self.t_config.device)
self.custom_matches_cache['match_proj_groups'].append(proj_group)
def _hook_T(self,module,input, output):
self.custom_matches_cache['hook_outputs_T'].append(output)
def _hook_S(self, module, input, output):
self.custom_matches_cache['hook_outputs_S'].append(output)
|
jstarsky/scraping-etsy | etsy/pipelines.py | <reponame>jstarsky/scraping-etsy<filename>etsy/pipelines.py
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
# This Pipeline processes several items scraped.
class EtsyPipeline(object):
def process_item(self, item, spider):
# Format the price output
if 'price' in item:
# Check if there is a currency symbol
if len(item['price'].split()) > 1:
# Remove the currency symbol and the + signal
item['price'] = item['price'].split()[1].replace('+','')
else:
# Remove the currency symbol and the + signal
item['price'] = item['price'].replace('$','').replace('+','')
# Remove the 'in' string
if 'store_location' in item:
item['store_location'] = item['store_location'].replace('in ', '')
# Remove the 'From' string
if 'return_location' in item:
item['return_location'] = item['return_location'].replace('From ', '')
# Sometimes the spider take the rate in the wrong format (ex: 48.333 instead of 4.8333)
if 'rating' in item:
rating = item['rating']
if float(rating) > 5:
# Ex: Transform this 48.333 in 4.83)
rating = rating[0] + '.' + rating[1:2]
else:
rating = round(float(rating), 2)
item['rating'] = rating
return item
|
jstarsky/scraping-etsy | etsy/spiders/search_products.py | # -*- coding: utf-8 -*-
#==============================================================================
#title :search_products.py
#description :A spider to scrape etsy.com products based on a search string.
#author :<NAME> (<EMAIL>)
#last Update :12-02-2019
#usage :scrapy crawl search_products -a search='3d printed' -o products.csv
#python version :3.6
#==============================================================================
import scrapy
import os
import sys
import csv
import glob
import json
from openpyxl import Workbook
from scrapy.http import Request
from etsy.items import ProductItem
from scrapy.loader import ItemLoader
# Spider Class
class ProductsSpider(scrapy.Spider):
# Spider name
name = 'search_products'
allowed_domains = ['etsy.com']
start_urls = ['https://www.etsy.com/']
# Max number of items
COUNT_MAX = 10**100
# Count the number of items scraped
COUNTER = 0
# Get only the products URLs
URLS_ONLY = False
# Set the method to get the product reviews
# If set to 1 (default), Spider will get only the reviews in the product's page, the default value is 4 reviews [FAST SCRAPING]
# If set to 2, Spider will produce a Ajax request to get all reviews in the product's page, that is, a maximum of 10 reviews
# If set to 3, Spider will visit the page with all store reviews and get all the reviews for this specific product [SLOWER SCRAPING]
reviews_opt = None
def __init__(self, search, reviews_option=1, count_max=None, urls_only=False, *args, **kwargs):
if search:
# Build the search URL
self.start_urls = ['https://www.etsy.com/search?q={}&ref=pagination&page=1'.format(search)]
# Set the maximum number of items to be scraped
if count_max:
self.COUNT_MAX = int(count_max)
# Get only the products URLs
self.URLS_ONLY = bool(urls_only)
# Set the chosen review option
self.reviews_opt = int(reviews_option)
super(ProductsSpider, self).__init__(*args, **kwargs)
# Parse the first page result and go to the next page
def parse(self, response):
# Get the list of products from html response
products_list = response.xpath('//div[@data-search-results=""]/div//li//a/@href').extract()
products_id_list = [product_href.split("/")[4] for product_href in products_list]
# For each product extracts the product URL
print(f"#### FOUND {len(products_id_list)} PRODUCTS")
if self.URLS_ONLY:
for product_id in products_id_list:
# Create the ItemLoader object that stores each product information
l = ItemLoader(item=ProductItem(), response=response)
product_url = f'https://www.etsy.com/listing/{product_id}'
l.add_value('url', product_url)
yield l.load_item()
else:
for product_id in products_id_list:
product_url = f'https://www.etsy.com/listing/{product_id}'
# Stops if the COUNTER reaches the maximum set value
if self.COUNTER < self.COUNT_MAX:
# Go to the product's page to get the data
yield scrapy.Request(product_url, callback=self.parse_product, dont_filter=True)
# Pagination - Go to the next page
current_page_number = int(response.url.split('=')[-1])
next_page_number = current_page_number + 1
# Build the next page URL
next_page_url = '='.join(response.url.split('=')[:-1]) + '=' + str(next_page_number)
# If the current list is not empty
if len(products_id_list) > 0:
yield scrapy.Request(next_page_url)
# Get the HTML from product's page and get the data
def parse_product(self, response):
# Stops if the COUNTER reaches the maximum set value
if self.COUNTER >= self.COUNT_MAX:
raise scrapy.exceptions.CloseSpider(reason='COUNT_MAX value reached - {} items'.format(self.COUNT_MAX))
# Check if the product is available
no_available_message = response.xpath('//h2[contains(text(), "Darn")]')
if no_available_message:
return []
# Create the ItemLoader object that stores each product information
l = ItemLoader(item=ProductItem(), response=response)
# Get the product ID (ex: 666125766)
product_id = response.url.split('/')[4]
l.add_value('product_id', product_id)
# Get the produc Title
#l.add_xpath('title', '//meta[@property="og:title"]/@content')
l.add_xpath('title', '//div[@data-component="listing-page-title-component"]/h1/text()')
#l.add_xpath('title', "//h1[@data-listing-id='{}']".format(response.url.split('/')[4]))
# Get the product price
l.add_xpath('price', '//*[contains(@data-buy-box-region, "price")]//p')
# Get the product URL (ex: www.etsy.com/listing/666125766)
l.add_value('url', '/'.join(response.url.split('/')[2:5]))
# Get the product description
l.add_xpath('description', '//div[@data-id="description-text"]/div/p/text()')
# Get each product option and save in a list
product_options = []
product_options_list = response.xpath('//*[contains(@id, "inventory-variation-select")]')
for options in product_options_list:
# Get list of options
temp_list = options.xpath('.//text()').extract()
# Remove '\n' strings
temp_list = list(map(lambda s: s.strip(), temp_list))
# Remove empty strings ('')
temp_list = list(filter(lambda s: s != '', temp_list))
# Filter the 'Quantity' option
if temp_list[0] != '1':
# Create the final string:
# example: "Select a color: White, Black, Red, Silver"
product_options.append(temp_list[0] +': ' + ', '.join(temp_list[1:]))
# Separate each option with a | (pipe) symbol
l.add_value('product_options', '|'.join(product_options))
# Get the product rating (ex: 4.8 )
l.add_xpath('rating', '//a[@href="#reviews"]//input[@name="rating"]/@value')
# Get the number of votes (number of reviews)
l.add_xpath('number_of_reviews', '//button[@id="same-listing-reviews-tab"]/span/text()')
# Count the number of product images
images_sel = response.xpath('//ul[@data-carousel-pagination-list=""]/li/img/@data-src-delay').extract()
l.add_value('count_of_images', len(images_sel))
l.add_value('images_urls', images_sel)
# Get the product overview
#l.add_xpath('overview', '//*[@class="listing-page-overview-component"]//li')
# Get the number of people that add the product in favorites
l.add_xpath('favorited_by', '//*[@id="item-overview"]//*[contains(@href, "/favoriters")]/text()', re='(\d+)')
l.add_xpath('favorited_by', '//*[@class="listing-page-favorites-link"]/text()', re='(\d+)')
l.add_xpath('favorited_by', '//a[contains(text(), " favorites")]/text()', re='(\d+)')
# Get the name of the Store and location
l.add_xpath('store_name', '//div[@id="listing-page-cart"]//span/text()')
#l.add_xpath('store_location', '//*[@id="shop-info"]/div')
#l.add_xpath('return_location', "//*[@class='js-estimated-delivery']/following-sibling::div")
# Use the chosen method to get the reviews
self.logger.info('Reviews scraping option: ' + str(self.reviews_opt))
# Option 3 - All reviews
if self.reviews_opt == 3:
# Getting all Reviews
store_name = response.xpath('//span[@itemprop="title"]//text()').extract_first()
# Build the reviews URL
rev_url = "https://www.etsy.com/shop/{}/reviews?ref=l2-see-more-feedback".format(store_name)
data = {'itemLoader':l, 'product_id':product_id}
# Go to the all reviews page
yield Request(rev_url, meta=data, callback=self.parse_reviews)
# Option 2 - Ajax request
elif self.reviews_opt == 2:
# Creating the Ajax request
# Getting the session cookie
get_cookie = response.request.headers['Cookie'].split(b';')[0].split(b'=')
cookies = {get_cookie[0].decode("utf-8"):get_cookie[1].decode("utf-8")}
# Getting the x-csrf-token
headers = {'x-csrf-token': response.xpath("//*[@name='_nnc']/@value").extract_first()}
# Shop Id
shop_id = response.xpath("//*[@property='og:image']/@content").extract_first().split('/')[3]
formdata = {
'stats_sample_rate': '',
'specs[reviews][]': 'Listzilla_ApiSpecs_Reviews',
'specs[reviews][1][listing_id]': product_id,
'specs[reviews][1][shop_id]': shop_id,
'specs[reviews][1][render_complete]': 'true'
}
data = {'itemLoader':l, 'product_id':product_id}
ajax_url = "https://www.etsy.com/api/v3/ajax/bespoke/member/neu/specs/reviews"
yield scrapy.FormRequest(ajax_url, headers=headers, cookies=cookies,
meta=data, formdata=formdata,
callback=self.parse_ajax_response)
# Option 1
else:
# Dict that saves all the reviews data
reviews_data = []
reviews_counter = 1
# Get the data from each review
all_reviews = response.xpath('//*[@class="listing-page__review col-group pl-xs-0 pr-xs-0"]')
# Process each review
for r in all_reviews:
# Get the profile URL of the reviewer
reviewer_profile = r.xpath(".//*[@class='display-block']/parent::*//@href").extract_first()
if reviewer_profile:
# Build the full profile url
reviewer_profile = 'www.etsy.com' + reviewer_profile
else:
# If the profile is inactive there is no profile url
continue
review_date = r.xpath(".//*[@class='text-link-underline display-inline-block mr-xs-1']/parent::*//text()").extract()[2].strip()
reviewer_rating = r.xpath('.//input[@name="rating"]/@value').extract_first()
review_content = " ".join(r.xpath('.//div[@class="overflow-hidden"]//text()').extract()).strip()
# Build the review string
rev_data = "Review number: {} \nProfile: {} \nRating: {} \nDate: {} \nContent: {}".format(reviews_counter, reviewer_profile, reviewer_rating, review_date, review_content)
# Save into the list
reviews_data.append(rev_data)
reviews_counter += 1
# Saves all reviews data
l.add_value('reviews', "\n\n".join(reviews_data))
# Increment the items counter
self.COUNTER += 1
print('\nProducts scraped: {}\n'.format(self.COUNTER))
yield l.load_item()
# Parse the Ajax response (Json) and extract reviews data
def parse_ajax_response(self, response):
# Get the itemLoader object from parser_products
l = response.meta['itemLoader']
# Dict that saves all the reviews data
reviews_data = []
reviews_counter = 1
# Loads the Json data
j = json.loads(response.text)
html = j["output"]["reviews"]
# Create the Selector
sel = scrapy.Selector(text=html)
# Get the data from each review
all_reviews = sel.xpath('//*[@class="listing-page__review col-group pl-xs-0 pr-xs-0"]')
# Process each review
for r in all_reviews:
# Get the profile URL of the reviewer
reviewer_profile = r.xpath(".//*[@class='display-block']/parent::*//@href").extract_first()
if reviewer_profile:
# Build the full profile url
reviewer_profile = 'www.etsy.com' + reviewer_profile
else:
# If the profile is inactive there is no profile url
continue
review_date = r.xpath(".//*[@class='text-link-underline display-inline-block mr-xs-1']/parent::*//text()").extract()[2].strip()
reviewer_rating = r.xpath('.//input[@name="rating"]/@value').extract_first()
review_content = " ".join(r.xpath('.//div[@class="overflow-hidden"]//text()').extract()).strip()
# Build the string
rev_data = "Review number: {} \nProfile: {} \nRating: {} \nDate: {} \nContent: {}".format(reviews_counter, reviewer_profile, reviewer_rating, review_date, review_content)
# Saves the string in a list
reviews_data.append(rev_data)
reviews_counter += 1
# aves all reviews data
l.add_value('reviews', "\n\n".join(reviews_data))
# Increment the items counter
self.COUNTER += 1
print('\nProducts scraped: {}\n'.format(self.COUNTER))
yield l.load_item()
# Parse the Store reviews page
def parse_reviews(self, response):
# Get the itemLoader object from parser_products
l = response.meta['itemLoader']
# Dict that saves all the reviews data
# Check if this is the first access or if there is data from another reviews page
if 'reviews_data' in response.meta.keys():
reviews_data = response.meta['reviews_data']
reviews_counter = response.meta['reviews_counter']
else:
reviews_data = []
reviews_counter = 1
# Get the data from each review
all_reviews = response.xpath("//*[@data-region='review']")
# Process each review
for r in all_reviews:
# Get the product id of the review
product_id = response.xpath("//*[@data-region='listing']//@href").extract_first().split('/')[4]
# Check if this is the product in analysis
if response.meta['product_id'] == product_id:
# Get the profile URL of the reviewer
reviewer_profile = r.xpath(".//*[@class='shop2-review-attribution']//@href").extract_first()
if reviewer_profile:
# Shorter version of the profile url
reviewer_profile = reviewer_profile.split('?')[0]
else:
# If the profile is inactive there is no profile url
continue
reviewer_rating = r.xpath('.//input[@name="rating"]/@value').extract_first()
review_date = r.xpath(".//*[@class='shop2-review-attribution']//text()").extract()[2].replace('on ','').strip()
review_content = " ".join(r.xpath('.//div[@class="text-gray-lighter"]//text()').extract()).strip()
# Build the string
rev_data = "Review number: {} \nProfile: {} \nRating: {} \nDate: {} \nContent: {}".format(reviews_counter, reviewer_profile, reviewer_rating, review_date, review_content)
# Saves the string in a list
reviews_data.append(rev_data)
reviews_counter += 1
# Go to the next reviews page
next_page_url = response.xpath("//*[contains(text(),'Next page')]/parent::*/@href").extract_first()
# Check if there is a next page
if next_page_url:
# Save the current data
data = {'itemLoader':l, 'product_id':product_id, 'reviews_data':reviews_data, 'reviews_counter':reviews_counter}
# Build the request
yield Request(next_page_url, meta=data, callback=self.parse_reviews)
else:
# If there is no next page, saves the data
# Saves the data
l.add_value('reviews', "\n\n".join(reviews_data))
# Increment the items counter
self.COUNTER += 1
print('\nProducts scraped: {}\n'.format(self.COUNTER))
yield l.load_item()
# Create the Excel file
def close(self, reason):
# Check if there is a CSV file in arguments
csv_found = False
for arg in sys.argv:
if '.csv' in arg:
csv_found = True
if csv_found:
self.logger.info('Creating Excel file')
# Get the last csv file created
csv_file = max(glob.iglob('*.csv'), key=os.path.getctime)
wb = Workbook()
ws = wb.active
with open(csv_file, 'r', encoding='utf-8') as f:
for row in csv.reader(f):
# Check if the row is not empty
if row:
ws.append(row)
# Saves the file
wb.save(csv_file.replace('.csv', '') + '.xlsx')
|
litchiar/ArknightsAutoHelper | webgui2/worker.py | import logging
import sys
import threading
import multiprocessing
import queue as threading_Queue
import Arknights.helper
import config
from connector.ADBConnector import ADBConnector, ensure_adb_alive
from util.excutil import format_exception
from typing import Mapping
config.background = True
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
logger.addHandler(logging.StreamHandler())
logger.propagate = False
class WebHandler(logging.Handler):
terminator = '\n'
def __init__(self, outq):
super().__init__()
self.outq = outq
def flush(self):
pass
def emit(self, record: logging.LogRecord):
try:
msg = self.format(record)
level = record.levelname.lower()
self.outq.put(dict(type="log", message=msg, level=level))
except RecursionError: # See issue 36272
raise
except Exception:
self.handleError(record)
class WorkerThread(threading.Thread):
def __init__(self, inq: threading_Queue.Queue, outq : multiprocessing.Queue, skip_event : threading.Event, interrupt_event : threading.Event):
super().__init__()
self.input = inq
self.output = outq
self.device = None
self.blocking = False
self.skip_wait_event = skip_event
self.interrupt_event = interrupt_event
self.helper = None
self.allowed_calls = {
"web:connect": self.web_connect,
"worker:set_enable_refill": lambda x: setattr(self.helper, 'use_refill', bool(x)),
"worker:set_refill_with_item": lambda x: setattr(self.helper, 'refill_with_item', bool(x)),
"worker:set_refill_with_originium": lambda x: setattr(self.helper, 'refill_with_originium', bool(x)),
"worker:set_max_refill_count": self.set_max_refill_count,
"worker:module_battle": self.ensure_connector_decorator(lambda stage, count: self.helper.module_battle(stage, int(count))),
"worker:module_battle_slim": self.ensure_connector_decorator(lambda count: self.helper.module_battle_slim(set_count=int(count))),
"worker:clear_task": self.ensure_connector_decorator(lambda: self.helper.clear_task()),# 清理任务
"worker:recruit": self.ensure_connector_decorator(lambda: self.helper.recruit()),
"worker:shopping":self.ensure_connector_decorator(lambda: self.helper.get_credit_new()),# 信用商店
"worker:building":self.ensure_connector_decorator(lambda: self.helper.get_building_new()()),# 基建
}
# threading.Thread
def run(self):
print("starting worker thread")
loghandler = WebHandler(self.output)
loghandler.setLevel(logging.INFO)
logging.root.addHandler(loghandler)
version = config.version
if config.get_instance_id() != 0:
version += f" (instance {config.get_instance_id()})"
self.notify("web:version", version)
ensure_adb_alive()
devices = ADBConnector.available_devices()
devices = ["adb:"+x[0] for x in devices]
self.notify("web:availiable-devices", devices)
self.helper = Arknights.helper.ArknightsHelper(frontend=self)
while True:
self.notify("worker:idle")
command : dict = self.input.get(block=True)
if command.get('type', None) == "call":
self.interrupt_event.clear()
self.notify('worker:busy')
tag = command.get('tag', None)
action = command.get('action', None)
return_value = None
exc = None
try:
func = self.allowed_calls[action]
args = command.get('args', [])
return_value = func(*args)
except:
exc = sys.exc_info()
if exc is None:
result = dict(type='call-result', status='resolved', tag=tag, return_value=return_value)
else:
result = dict(type='call-result', status='exception', tag=tag, exception=format_exception(*exc))
if tag is not None:
self.output.put_nowait(result)
# frontend, called by helper
def attach(self, helper):
pass
def alert(self, title, text, level='info', details=None):
"""user-targeted message"""
logger.info("sending alert %s %s %s %s", level, title, text, details)
self.output.put(dict(type="alert", title=title, message=text, level=level, details=details))
def notify(self, name, value=None):
"""program-targeted message"""
logger.info("sending notify %s %r", name, value)
self.output.put(dict(type="notify", name=name, value=value))
def delay(self, secs, allow_skip):
self.notify("wait", dict(duration=secs, allow_skip=allow_skip))
try:
if not allow_skip:
self.interrupt_event.wait(secs)
else:
if self.interrupt_event.is_set():
raise KeyboardInterrupt()
self.skip_wait_event.clear()
self.skip_wait_event.wait(secs)
if self.interrupt_event.is_set():
raise KeyboardInterrupt()
finally:
self.notify("wait", dict(duration=0, allow_skip=False))
# called by user
def web_connect(self, dev:str):
print(dev.split(':', 1))
connector_type, cookie = dev.split(':', 1)
if connector_type != 'adb':
raise KeyError("unknown connector type " + connector_type)
new_connector = ADBConnector(cookie)
connector_str = str(new_connector)
self.helper.connect_device(new_connector)
self.notify("web:current-device", connector_str)
def ensure_connector(self):
if self.helper.adb is None:
new_connector = ADBConnector.auto_connect()
self.helper.connect_device(new_connector)
self.notify("web:current-device", str(new_connector))
def ensure_connector_decorator(self, func):
def decorated(*args, **kwargs):
self.ensure_connector()
return func(*args, **kwargs)
return decorated
def set_max_refill_count(self, count):
self.helper.refill_count = 0
self.helper.max_refill_count = count
def worker_process(inq : multiprocessing.Queue, outq : multiprocessing.Queue):
print("starting worker process")
threadq = threading_Queue.Queue()
skip_evt = threading.Event()
intr_evt = threading.Event()
thr = WorkerThread(threadq, outq, skip_evt, intr_evt)
thr.setDaemon(True)
thr.start()
print("starting worker process loop")
while True:
request = inq.get()
if request is None:
break
if not isinstance(request, Mapping):
outq.put(dict(type="alert", title="RPC Error", text="invalid request object", level="error"))
break
req_type = request.get("type", None)
if req_type == "web:skip":
skip_evt.set()
elif req_type == "web:interrupt":
intr_evt.set()
skip_evt.set()
elif req_type == "web:kill":
import os
os.kill(os.getpid())
else:
threadq.put(request)
outq.close()
|
litchiar/ArknightsAutoHelper | addons/base.py | import random
from Arknights.helper import ArknightsHelper
from abc import ABC, abstractmethod
import time
import cv2
from PIL import Image
import numpy as np
from imgreco import util
def cv2pil(cv_img):
return Image.fromarray(cv2.cvtColor(cv_img, cv2.COLOR_BGR2RGB))
def pil2cv(pil_img):
return cv2.cvtColor(np.asarray(pil_img), cv2.COLOR_BGR2RGB)
def crop_cv_by_rect(cv_img, rect):
l, t, r, b = tuple(map(int, rect))
return cv_img[t:b, l:r]
def show_img(img):
cv2.imshow('test', img)
cv2.waitKey()
class BaseAddOn(ABC):
def __init__(self, helper=None):
if helper is None:
helper = ArknightsHelper()
self.helper = helper
self.vw, self.vh = util.get_vwvh(self.helper.viewport)
@abstractmethod
def run(self, **kwargs):
pass
def click(self, pos, sleep_time=0.5, randomness=(5, 5)):
x, y = pos
rx, ry = randomness
x += random.randint(-rx, rx)
y += random.randint(-ry, ry)
self.helper.adb.touch_tap((x, y))
time.sleep(sleep_time)
def screenshot(self):
return self.helper.adb.screenshot()
|
litchiar/ArknightsAutoHelper | imgreco/main.py | from fractions import Fraction
import numpy as np
from PIL import Image
from util.richlog import get_logger
from . import imgops
from . import resources
from . import util
logger = get_logger(__name__)
def check_main(img):
vw, vh = util.get_vwvh(img.size)
gear1 = img.crop((3.148 * vh, 2.037 * vh, 9.907 * vh, 8.796 * vh)).convert('L')
gear2 = resources.load_image_cached('main/gear.png', 'L')
gear1, gear2 = imgops.uniform_size(gear1, gear2)
result = imgops.compare_ccoeff(gear1, gear2)
# result = np.corrcoef(np.asarray(gear1).flat, np.asarray(gear2).flat)[0, 1]
logger.logimage(gear1)
logger.logtext('ccoeff=%f' % result)
return result > 0.9
def get_ballte_corners(img):
"""
:returns: [0][1]
[3][2]
"""
aspect = Fraction(*img.size)
vw, vh = util.get_vwvh(img)
if aspect == Fraction(16, 9):
return (
(61.120 * vw, 16.944 * vh), (82.213 * vw, 15.139 * vh), (82.213 * vw, 37.083 * vh),
(61.120 * vw, 38.333 * vh))
elif aspect == Fraction(18, 9):
return (
(64.693 * vw, 16.852 * vh), (82.378 * vw, 14.352 * vh), (82.378 * vw, 37.500 * vh),
(64.693 * vw, 37.963 * vh))
else:
return [x[0] for x in imgops.find_homography(resources.load_image_cached('main/terminal.png', 'L'), img)]
def get_task_corners(img):
"""
:returns: [0][1]
[3][2]
"""
aspect = Fraction(*img.size)
vw, vh = util.get_vwvh(img)
if aspect == Fraction(16, 9):
return (np.array((55.602 * vw, 75.880 * vh)), np.array((70.367 * vw, 78.241 * vh)),
np.array((70.367 * vw, 91.991 * vh)), np.array((55.602 * vw, 88.518 * vh)))
elif aspect == Fraction(18, 9):
return (np.array((58.489 * vw, 76.296 * vh)), np.array((72.008 * vw, 78.611 * vh)),
np.array((72.008 * vw, 92.685 * vh)), np.array((58.489 * vw, 89.167 * vh)))
else:
return [x[0] for x in imgops.find_homography(resources.load_image_cached('main/quest.png', 'L'), img)]
# 以下几条用于访问好友基建
def get_friend_corners(img):
"""
:returns: [0][1]
[3][2]
"""
aspect = Fraction(*img.size)
vw, vh = util.get_vwvh(img)
if aspect == Fraction(16, 9):
return (np.array((22.734 * vw, 76.667 * vh)), np.array((33.203 * vw, 76.667 * vh)),
np.array((33.203 * vw, 82.083 * vh)), np.array((22.734 * vw, 82.083 * vh)))
else:
return [x[0] for x in imgops.find_homography(resources.load_image_cached('main/friends.png', 'L'), img)]
def get_friend_list(img):
"""
:returns: [0][1]
[3][2]
"""
aspect = Fraction(*img.size)
vw, vh = util.get_vwvh(img)
if aspect == Fraction(16, 9):
return (
np.array((1.484 * vw, 25.694 * vh)), np.array((16.797 * vw, 25.694 * vh)),
np.array((16.797 * vw, 36.111 * vh)),
np.array((1.484 * vw, 36.111 * vh)))
else:
# FIXME: implement with feature matching?
raise NotImplementedError('unsupported aspect ratio')
# 获得采购中心
def get_shopping_center(img):
"""
:returns: [0][1]
[3][2]
"""
aspect = Fraction(*img.size)
vw, vh = util.get_vwvh(img)
if aspect == Fraction(16, 9):
return (np.array((747 / 12.8 * vw, 421 / 7.2 * vh)), np.array((924 / 12.8 * vw, 421 / 7.2 * vh)),
np.array((926 / 12.8 * vw, 538 / 7.2 * vh)), np.array((747 / 12.8 * vw, 532 / 7.2 * vh)))
else:
# FIXME: implement with feature matching?
raise NotImplementedError('unsupported aspect ratio')
def get_building_button(img): # 进入基建
"""
:returns: [0][1]
[3][2]
"""
aspect = Fraction(*img.size)
vw, vh = util.get_vwvh(img)
if aspect == Fraction(16, 9):
return (np.array((921 / 12.8 * vw, 562 / 7.2 * vh)), np.array((1145 / 12.8 * vw, 579 / 7.2 * vh)),
np.array((1137 / 12.8 * vw, 699 / 7.2 * vh)), np.array((921 / 12.8 * vw, 672 / 7.2 * vh)))
else:
# FIXME: implement with feature matching?
raise NotImplementedError('unsupported aspect ratio')
# 进入信用交易所
def get_credit_center(img):
"""
:returns: [0][1]
[3][2]
"""
aspect = Fraction(*img.size)
vw, vh = util.get_vwvh(img)
if aspect == Fraction(16, 9):
return (np.array((1095 / 12.8 * vw, 88 / 7.2 * vh)), np.array((1268 / 12.8 * vw, 87 / 7.2 * vh)),
np.array((1266 / 12.8 * vw, 129 / 7.2 * vh)), np.array((1095 / 12.8 * vw, 126 / 7.2 * vh)))
else:
# FIXME: implement with feature matching?
raise NotImplementedError('unsupported aspect ratio')
# 领取信用
def get_credit_daily(img):
"""
:returns: [0][1]
[3][2]
"""
aspect = Fraction(*img.size)
vw, vh = util.get_vwvh(img)
if aspect == Fraction(16, 9):
return (np.array((965 / 12.8 * vw, 25 / 7.2 * vh)), np.array((1071 / 12.8 * vw, 24 / 7.2 * vh)),
np.array((1073 / 12.8 * vw, 51 / 7.2 * vh)), np.array((965 / 12.8 * vw, 54 / 7.2 * vh)))
else:
# FIXME: implement with feature matching?
raise NotImplementedError('unsupported aspect ratio')
# 领取信用
def get_credit_item(img, index):
"""
:returns: [0][1]
[3][2]
"""
aspect = Fraction(*img.size)
vw, vh = util.get_vwvh(img)
if aspect == Fraction(16, 9):
credit_items = {
0: (np.array((136 / 12.8 * vw, 273 / 7.2 * vh)), np.array((136 / 12.8 * vw, 273 / 7.2 * vh)),
np.array((136 / 12.8 * vw, 273 / 7.2 * vh)), np.array((136 / 12.8 * vw, 273 / 7.2 * vh))),
1: (np.array((388 / 12.8 * vw, 267 / 7.2 * vh)), np.array((388 / 12.8 * vw, 267 / 7.2 * vh)),
np.array((388 / 12.8 * vw, 267 / 7.2 * vh)), np.array((388 / 12.8 * vw, 267 / 7.2 * vh))),
2: (np.array((643 / 12.8 * vw, 264 / 7.2 * vh)), np.array((643 / 12.8 * vw, 264 / 7.2 * vh)),
np.array((643 / 12.8 * vw, 264 / 7.2 * vh)), np.array((643 / 12.8 * vw, 264 / 7.2 * vh))),
3: (np.array((900 / 12.8 * vw, 259 / 7.2 * vh)), np.array((900 / 12.8 * vw, 259 / 7.2 * vh)),
np.array((900 / 12.8 * vw, 259 / 7.2 * vh)), np.array((900 / 12.8 * vw, 259 / 7.2 * vh))),
4: (np.array((1149 / 12.8 * vw, 270 / 7.2 * vh)), np.array((1149 / 12.8 * vw, 270 / 7.2 * vh)),
np.array((1149 / 12.8 * vw, 270 / 7.2 * vh)), np.array((1149 / 12.8 * vw, 270 / 7.2 * vh))),
5: (np.array((127 / 12.8 * vw, 523 / 7.2 * vh)), np.array((127 / 12.8 * vw, 523 / 7.2 * vh)),
np.array((127 / 12.8 * vw, 523 / 7.2 * vh)), np.array((127 / 12.8 * vw, 523 / 7.2 * vh))),
}
return credit_items[index]
else:
# FIXME: implement with feature matching?
raise NotImplementedError('unsupported aspect ratio')
# 领取信用
def get_state(img, index):
"""
:returns: [0][1]
[3][2]
"""
aspect = Fraction(*img.size)
vw, vh = util.get_vwvh(img)
if aspect == Fraction(16, 9):
credit_items = {1: (np.array((1047 / 12.8 * vw, 37 / 7.2 * vh)), np.array((1047 / 12.8 * vw, 37 / 7.2 * vh)),
np.array((1047 / 12.8 * vw, 37 / 7.2 * vh)), np.array((1047 / 12.8 * vw, 37 / 7.2 * vh))),
2: (np.array((881 / 12.8 * vw, 39 / 7.2 * vh)), np.array((881 / 12.8 * vw, 39 / 7.2 * vh)),
np.array((881 / 12.8 * vw, 39 / 7.2 * vh)), np.array((881 / 12.8 * vw, 39 / 7.2 * vh))),
}
return credit_items[index]
else:
# FIXME: implement with feature matching?
raise NotImplementedError('unsupported aspect ratio')
def get_building_blocks(img, index):
"""
:returns: [0][1]
[3][2]
"""
aspect = Fraction(*img.size)
vw, vh = util.get_vwvh(img)
if aspect == Fraction(16, 9):
credit_items = {1: (np.array((145 / 12.8 * vw, 316 / 7.2 * vh)), np.array((145 / 12.8 * vw, 316 / 7.2 * vh)),
np.array((145 / 12.8 * vw, 316 / 7.2 * vh)), np.array((145 / 12.8 * vw, 316 / 7.2 * vh))),
2: (np.array((278 / 12.8 * vw, 315 / 7.2 * vh)), np.array((278 / 12.8 * vw, 315 / 7.2 * vh)),
np.array((278 / 12.8 * vw, 315 / 7.2 * vh)), np.array((278 / 12.8 * vw, 315 / 7.2 * vh))),
3: (np.array((485 / 12.8 * vw, 316 / 7.2 * vh)), np.array((485 / 12.8 * vw, 316 / 7.2 * vh)),
np.array((485 / 12.8 * vw, 316 / 7.2 * vh)), np.array((485 / 12.8 * vw, 316 / 7.2 * vh))),
4: (np.array((13 / 12.8 * vw, 415 / 7.2 * vh)), np.array((13 / 12.8 * vw, 415 / 7.2 * vh)),
np.array((13 / 12.8 * vw, 415 / 7.2 * vh)), np.array((13 / 12.8 * vw, 415 / 7.2 * vh))),
5: (np.array((166 / 12.8 * vw, 412 / 7.2 * vh)), np.array((166 / 12.8 * vw, 412 / 7.2 * vh)),
np.array((166 / 12.8 * vw, 412 / 7.2 * vh)), np.array((166 / 12.8 * vw, 412 / 7.2 * vh))),
6: (np.array((380 / 12.8 * vw, 415 / 7.2 * vh)), np.array((380 / 12.8 * vw, 415 / 7.2 * vh)),
np.array((380 / 12.8 * vw, 415 / 7.2 * vh)), np.array((380 / 12.8 * vw, 415 / 7.2 * vh))),
7: (np.array((62 / 12.8 * vw, 496 / 7.2 * vh)), np.array((65 / 12.8 * vw, 496 / 7.2 * vh)),
np.array((62 / 12.8 * vw, 496 / 7.2 * vh)), np.array((65 / 12.8 * vw, 496 / 7.2 * vh))),
8: (np.array((268 / 12.8 * vw, 496 / 7.2 * vh)), np.array((268 / 12.8 * vw, 496 / 7.2 * vh)),
np.array((268 / 12.8 * vw, 496 / 7.2 * vh)), np.array((268 / 12.8 * vw, 496 / 7.2 * vh))),
9: (np.array((484 / 12.8 * vw, 496 / 7.2 * vh)), np.array((484 / 12.8 * vw, 496 / 7.2 * vh)),
np.array((484 / 12.8 * vw, 496 / 7.2 * vh)), np.array((484 / 12.8 * vw, 496 / 7.2 * vh))),
# 中枢
10: (np.array((861 / 12.8 * vw, 153 / 7.2 * vh)), np.array((861 / 12.8 * vw, 153 / 7.2 * vh)),
np.array((861 / 12.8 * vw, 153 / 7.2 * vh)), np.array((861 / 12.8 * vw, 153 / 7.2 * vh))),
# 宿舍1
11: (np.array((812 / 12.8 * vw, 310 / 7.2 * vh)), np.array((812 / 12.8 * vw, 310 / 7.2 * vh)),
np.array((812 / 12.8 * vw, 310 / 7.2 * vh)), np.array((812 / 12.8 * vw, 310 / 7.2 * vh))),
# 宿舍2
12: (np.array((893 / 12.8 * vw, 418 / 7.2 * vh)), np.array((893 / 12.8 * vw, 418 / 7.2 * vh)),
np.array((893 / 12.8 * vw, 418 / 7.2 * vh)), np.array((893 / 12.8 * vw, 418 / 7.2 * vh))),
# 宿舍3
13: (np.array((785 / 12.8 * vw, 517 / 7.2 * vh)), np.array((785 / 12.8 * vw, 517 / 7.2 * vh)),
np.array((785 / 12.8 * vw, 517 / 7.2 * vh)), np.array((785 / 12.8 * vw, 517 / 7.2 * vh))),
# 宿舍4
14: (np.array((913 / 12.8 * vw, 632 / 7.2 * vh)), np.array((913 / 12.8 * vw, 632 / 7.2 * vh)),
np.array((913 / 12.8 * vw, 632 / 7.2 * vh)), np.array((913 / 12.8 * vw, 632 / 7.2 * vh))),
}
return credit_items[index]
else:
# FIXME: implement with feature matching?
raise NotImplementedError('unsupported aspect ratio')
def get_credit_shopping_check(img):
"""
:returns: [0][1]
[3][2]
"""
aspect = Fraction(*img.size)
vw, vh = util.get_vwvh(img)
if aspect == Fraction(16, 9):
return (np.array((801 / 12.8 * vw, 556 / 7.2 * vh)), np.array((1028 / 12.8 * vw, 552 / 7.2 * vh)),
np.array((1023 / 12.8 * vw, 603 / 7.2 * vh)), np.array((815 / 12.8 * vw, 604 / 7.2 * vh)))
else:
# FIXME: implement with feature matching?
raise NotImplementedError('unsupported aspect ratio')
def get_back(img):
"""
:returns: [0][1]
[3][2]
"""
aspect = Fraction(*img.size)
vw, vh = util.get_vwvh(img)
if aspect == Fraction(16, 9):
return (np.array((22 / 12.8 * vw, 21 / 7.2 * vh)), np.array((152 / 12.8 * vw, 21 / 7.2 * vh)),
np.array((151 / 63 * vw, 603 / 7.2 * vh)), np.array((19 / 12.8 * vw, 61 / 7.2 * vh)))
else:
# FIXME: implement with feature matching?
raise NotImplementedError('unsupported aspect ratio')
def get_back2(img):
"""
:returns: [0][1]
[3][2]
"""
aspect = Fraction(*img.size)
vw, vh = util.get_vwvh(img)
if aspect == Fraction(16, 9):
return (np.array((23 / 12.8 * vw, 18 / 7.2 * vh)), np.array((163 / 12.8 * vw, 15 / 7.2 * vh)),
np.array((160 / 63 * vw, 60 / 7.2 * vh)), np.array((20 / 12.8 * vw, 60 / 7.2 * vh)))
else:
# FIXME: implement with feature matching?
raise NotImplementedError('unsupported aspect ratio')
def get_back2_yes(img):
"""
:returns: [0][1]
[3][2]
"""
aspect = Fraction(*img.size)
vw, vh = util.get_vwvh(img)
if aspect == Fraction(16, 9):
return (np.array((849 / 12.8 * vw, 498 / 7.2 * vh)), np.array((849 / 12.8 * vw, 498 / 7.2 * vh)),
np.array((849 / 63 * vw, 498 / 7.2 * vh)), np.array((849 / 12.8 * vw, 498 / 7.2 * vh)))
else:
# FIXME: implement with feature matching?
raise NotImplementedError('unsupported aspect ratio')
def get_back2_clear(img): # 清空选择
"""
:returns: [0][1]
[3][2]
"""
aspect = Fraction(*img.size)
vw, vh = util.get_vwvh(img)
if aspect == Fraction(16, 9):
return (np.array((512 / 12.8 * vw, 672 / 7.2 * vh)), np.array((512 / 12.8 * vw, 672 / 7.2 * vh)),
np.array((512 / 12.8 * vw, 672 / 7.2 * vh)), np.array((512 / 12.8 * vw, 672 / 7.2 * vh)))
else:
# FIXME: implement with feature matching?
raise NotImplementedError('unsupported aspect ratio')
# 打开进驻
def get_setting_block(img):
"""
:returns: [0][1]
[3][2]
"""
aspect = Fraction(*img.size)
vw, vh = util.get_vwvh(img)
if aspect == Fraction(16, 9):
return (np.array((7 / 12.8 * vw, 246 / 7.2 * vh)), np.array((113 / 12.8 * vw, 246 / 7.2 * vh)),
np.array((118 / 63 * vw, 352 / 7.2 * vh)), np.array((7 / 12.8 * vw, 349 / 7.2 * vh)))
else:
# FIXME: implement with feature matching?
raise NotImplementedError('unsupported aspect ratio')
def get_clear_working(img, ):
"""
:returns: [0][1]
[3][2]
"""
aspect = Fraction(*img.size)
vw, vh = util.get_vwvh(img)
if aspect == Fraction(16, 9):
return (np.array((878 / 12.8 * vw, 109 / 7.2 * vh)), np.array((963 / 12.8 * vw, 123 / 7.2 * vh)),
np.array((974 / 12.8 * vw, 213 / 7.2 * vh)), np.array((860 / 12.8 * vw, 210 / 7.2 * vh)))
else:
# FIXME: implement with feature matching?
raise NotImplementedError('unsupported aspect ratio')
def get_character(img, index):
"""
:returns: [0][1]
[3][2]
"""
aspect = Fraction(*img.size)
vw, vh = util.get_vwvh(img)
if aspect == Fraction(16, 9):
if index == 1:
return (np.array((427 / 12.8 * vw, 129 / 7.2 * vh)), np.array((527 / 12.8 * vw, 135 / 7.2 * vh)),
np.array((541 / 12.8 * vw, 324 / 7.2 * vh)), np.array((431 / 12.8 * vw, 315 / 7.2 * vh)))
if index == 2:
return (np.array((430 / 12.8 * vw, 415 / 7.2 * vh)), np.array((538 / 12.8 * vw, 418 / 7.2 * vh)),
np.array((530 / 12.8 * vw, 586 / 7.2 * vh)), np.array((428 / 12.8 * vw, 580 / 7.2 * vh)))
if index == 3:
return (np.array((583 / 12.8 * vw, 141 / 7.2 * vh)), np.array((668 / 12.8 * vw, 142 / 7.2 * vh)),
np.array((681 / 12.8 * vw, 307 / 7.2 * vh)), np.array((580 / 12.8 * vw, 289 / 7.2 * vh)))
if index == 4:
return (np.array((572 / 12.8 * vw, 405 / 7.2 * vh)), np.array((678 / 12.8 * vw, 403 / 7.2 * vh)),
np.array((683 / 12.8 * vw, 597 / 7.2 * vh)), np.array((565 / 12.8 * vw, 591 / 7.2 * vh)))
if index == 5:
return (np.array((723 / 12.8 * vw, 111 / 7.2 * vh)), np.array((833 / 12.8 * vw, 114 / 7.2 * vh)),
np.array((816 / 12.8 * vw, 310 / 7.2 * vh)), np.array((723 / 12.8 * vw, 297 / 7.2 * vh)))
if index == 6:
return (np.array((714 / 12.8 * vw, 400 / 7.2 * vh)), np.array((827 / 12.8 * vw, 400 / 7.2 * vh)),
np.array((830 / 12.8 * vw, 610 / 7.2 * vh)), np.array((714 / 12.8 * vw, 595 / 7.2 * vh)))
if index == 7:
return (np.array((878 / 12.8 * vw, 109 / 7.2 * vh)), np.array((963 / 12.8 * vw, 123 / 7.2 * vh)),
np.array((968 / 12.8 * vw, 315 / 7.2 * vh)), np.array((869 / 12.8 * vw, 300 / 7.2 * vh)))
if index == 8:
return (np.array((864 / 12.8 * vw, 402 / 7.2 * vh)), np.array((963 / 12.8 * vw, 412 / 7.2 * vh)),
np.array((954 / 12.8 * vw, 585 / 7.2 * vh)), np.array((863 / 12.8 * vw, 573 / 7.2 * vh)))
if index == 9:
return (np.array((1004 / 12.8 * vw, 114 / 7.2 * vh)), np.array((1100 / 12.8 * vw, 113 / 7.2 * vh)),
np.array((1100 / 12.8 * vw, 294 / 7.2 * vh)), np.array((1005 / 12.8 * vw, 325 / 7.2 * vh)))
if index == 10:
return (np.array((1010 / 12.8 * vw, 411 / 7.2 * vh)), np.array((1100 / 12.8 * vw, 415 / 7.2 * vh)),
np.array((1100 / 12.8 * vw, 588 / 7.2 * vh)), np.array((1010 / 12.8 * vw, 577 / 7.2 * vh)))
if index == -1: # 确认
return (np.array((1100 / 12.8 * vw, 654 / 7.2 * vh)), np.array((1253 / 12.8 * vw, 660 / 7.2 * vh)),
np.array((1251 / 12.8 * vw, 694 / 7.2 * vh)), np.array((1113 / 12.8 * vw, 694 / 7.2 * vh)))
if index == -2: # 中间
return (np.array((611 / 12.8 * vw, 370 / 7.2 * vh)), np.array((611 / 12.8 * vw, 370 / 7.2 * vh)),
np.array((611 / 12.8 * vw, 370 / 7.2 * vh)), np.array((611 / 12.8 * vw, 370 / 7.2 * vh)))
else:
# FIXME: implement with feature matching?
raise NotImplementedError('unsupported aspect ratio')
def get_color_is_white(img):
aspect = Fraction(*img.size)
vw, vh = util.get_vwvh(img)
if aspect == Fraction(16, 9):
src_strlist = img.load()
data = src_strlist[1194 / 12.8 * vw, 90 / 7.2 * vh]
# print(data)
if data[0] == data[1] and data[1] == data[2]:
return True
else:
return False
# if index == 1: # 筛选
# return (np.array((1175 / 12.8 * vw, 39 / 7.2 * vh)), np.array((1175 / 12.8 * vw, 39 / 7.2 * vh)),
# np.array((1175 / 12.8 * vw, 39 / 7.2 * vh)), np.array((1175 / 12.8 * vw, 39 / 7.2 * vh)))
# if index == 2:
# return (np.array((611 / 12.8 * vw, 370 / 7.2 * vh)), np.array((611 / 12.8 * vw, 370 / 7.2 * vh)),
# np.array((611 / 12.8 * vw, 370 / 7.2 * vh)), np.array((611 / 12.8 * vw, 370 / 7.2 * vh)))
# if index == 3:
# return (np.array((611 / 12.8 * vw, 370 / 7.2 * vh)), np.array((611 / 12.8 * vw, 370 / 7.2 * vh)),
# np.array((611 / 12.8 * vw, 370 / 7.2 * vh)), np.array((611 / 12.8 * vw, 370 / 7.2 * vh)))
else:
# FIXME: implement with feature matching?
raise NotImplementedError('unsupported aspect ratio')
def get_color_is_black(img):
aspect = Fraction(*img.size)
vw, vh = util.get_vwvh(img)
if aspect == Fraction(16, 9):
src_strlist = img.load()
data = src_strlist[400 / 12.8 * vw, 360 / 7.2 * vh]
# print(data)
if data[0] == data[1] and data[1] == data[2]:
return True
else:
return False
# if index == 1: # 筛选
# return (np.array((1175 / 12.8 * vw, 39 / 7.2 * vh)), np.array((1175 / 12.8 * vw, 39 / 7.2 * vh)),
# np.array((1175 / 12.8 * vw, 39 / 7.2 * vh)), np.array((1175 / 12.8 * vw, 39 / 7.2 * vh)))
# if index == 2:
# return (np.array((611 / 12.8 * vw, 370 / 7.2 * vh)), np.array((611 / 12.8 * vw, 370 / 7.2 * vh)),
# np.array((611 / 12.8 * vw, 370 / 7.2 * vh)), np.array((611 / 12.8 * vw, 370 / 7.2 * vh)))
# if index == 3:
# return (np.array((611 / 12.8 * vw, 370 / 7.2 * vh)), np.array((611 / 12.8 * vw, 370 / 7.2 * vh)),
# np.array((611 / 12.8 * vw, 370 / 7.2 * vh)), np.array((611 / 12.8 * vw, 370 / 7.2 * vh)))
else:
# FIXME: implement with feature matching?
raise NotImplementedError('unsupported aspect ratio')
def get_choose_rest(img, index):
"""
:returns: [0][1]
[3][2]
"""
aspect = Fraction(*img.size)
vw, vh = util.get_vwvh(img)
if aspect == Fraction(16, 9):
if index == 1: # 筛选
return (np.array((1175 / 12.8 * vw, 39 / 7.2 * vh)), np.array((1175 / 12.8 * vw, 39 / 7.2 * vh)),
np.array((1175 / 12.8 * vw, 39 / 7.2 * vh)), np.array((1175 / 12.8 * vw, 39 / 7.2 * vh)))
if index == 2:
return (np.array((400 / 12.8 * vw, 360 / 7.2 * vh)), np.array((400 / 12.8 * vw, 360 / 7.2 * vh)),
np.array((400 / 12.8 * vw, 360 / 7.2 * vh)), np.array((400 / 12.8 * vw, 360 / 7.2 * vh)))
if index == 3:
return (np.array((941 / 12.8 * vw, 552 / 7.2 * vh)), np.array((941 / 12.8 * vw, 552 / 7.2 * vh)),
np.array((941 / 12.8 * vw, 552 / 7.2 * vh)), np.array((941 / 12.8 * vw, 552 / 7.2 * vh)))
else:
# FIXME: implement with feature matching?
raise NotImplementedError('unsupported aspect ratio')
def get_friend_build(img):
"""
:returns: [0][1]
[3][2]
"""
aspect = Fraction(*img.size)
vw, vh = util.get_vwvh(img)
if aspect == Fraction(16, 9):
return (np.array((74.065 * vw, 17.134 * vh)), np.array((79.967 * vw, 17.134 * vh)),
np.array((79.967 * vw, 28.065 * vh)), np.array((74.065 * vw, 28.065 * vh)))
else:
# FIXME: implement with feature matching?
raise NotImplementedError('unsupported aspect ratio')
def get_next_friend_build(img):
"""
:returns: [0][1]
[3][2]
"""
aspect = Fraction(*img.size)
vw, vh = util.get_vwvh(img)
if aspect == Fraction(16, 9):
return (np.array((85.625 * vw, 79.444 * vh)), np.array((99.531 * vw, 79.444 * vh)),
np.array((99.531 * vw, 93.750 * vh)), np.array((85.625 * vw, 93.750 * vh)))
else:
# FIXME: implement with feature matching?
raise NotImplementedError('unsupported aspect ratio')
def get_back_my_build(img):
"""
:returns: [0][1]
[3][2]
"""
aspect = Fraction(*img.size)
vw, vh = util.get_vwvh(img)
if aspect == Fraction(16, 9):
return (np.array((72.266 * vw, 81.528 * vh)), np.array((88.750 * vw, 81.528 * vh)),
np.array((88.750 * vw, 92.500 * vh)), np.array((72.266 * vw, 92.500 * vh)))
else:
# FIXME: implement with feature matching?
raise NotImplementedError('unsupported aspect ratio')
# 点击基建主界面右上角的提示(以凸显一键收取)
def get_my_build_task_1(img): # 上一个
"""
:returns: [0][1]
[3][2]
"""
aspect = Fraction(*img.size)
vw, vh = util.get_vwvh(img)
if aspect == Fraction(16, 9):
return (np.array((1178 / 12.8 * vw, 73 / 7.2 * vh)), np.array((1272 / 12.8 * vw, 72 / 7.2 * vh)),
np.array((1272 / 12.8 * vw, 112 / 7.2 * vh)), np.array((1175 / 12.8 * vw, 114 / 7.2 * vh)))
else:
# FIXME: implement with feature matching?
raise NotImplementedError('unsupported aspect ratio')
def get_my_build_task_2(img): # 下一个
"""
:returns: [0][1]
[3][2]
"""
aspect = Fraction(*img.size)
vw, vh = util.get_vwvh(img)
if aspect == Fraction(16, 9):
return (np.array((1178 / 12.8 * vw, 123 / 7.2 * vh)), np.array((1272 / 12.8 * vw, 124 / 7.2 * vh)),
np.array((1272 / 12.8 * vw, 162 / 7.2 * vh)), np.array((1175 / 12.8 * vw, 162 / 7.2 * vh)))
else:
# FIXME: implement with feature matching?
raise NotImplementedError('unsupported aspect ratio')
# 一键收取制造站的物品
def get_my_build_task_clear(img):
"""
:returns: [0][1]
[3][2]
"""
aspect = Fraction(*img.size)
vw, vh = util.get_vwvh(img)
if aspect == Fraction(16, 9):
return (np.array((12.500 * vw, 91.667 * vh)), np.array((16.797 * vw, 91.667 * vh)),
np.array((16.797 * vw, 98.472 * vh)), np.array((12.500 * vw, 98.472 * vh)))
else:
# FIXME: implement with feature matching?
raise NotImplementedError('unsupported aspect ratio')
# def get_my_sell_task(img):
# """
# :returns: [0][1]
# [3][2]
# """
# aspect = Fraction(*img.size)
# vw, vh = util.get_vwvh(img)
# if aspect == Fraction(16, 9):
# return (np.array((51.111*vw, 14.375*vh)), np.array((60.000*vw, 14.375*vh)), np.array((60.000*vw, *vh)), np.array((51.111*vw, *vh)))
# else:
# # FIXME: implement with feature matching?
# raise NotImplementedError('unsupported aspect ratio')
# 从基建主界面点击进入第二间贸易站
def get_my_sell_task_1(img):
"""
:returns: [0][1]
[3][2]
"""
aspect = Fraction(*img.size)
vw, vh = util.get_vwvh(img)
if aspect == Fraction(16, 9):
return (
np.array((5.781 * vw, 51.806 * vh)), np.array((14.688 * vw, 51.806 * vh)),
np.array((14.688 * vw, 59.167 * vh)),
np.array((5.781 * vw, 59.167 * vh)))
else:
# FIXME: implement with feature matching?
raise NotImplementedError('unsupported aspect ratio')
# 打开订单页面
def get_my_sell_tasklist(img):
"""
:returns: [0][1]
[3][2]
"""
aspect = Fraction(*img.size)
vw, vh = util.get_vwvh(img)
if aspect == Fraction(16, 9):
return (
np.array((1.094 * vw, 75.833 * vh)), np.array((41.719 * vw, 75.833 * vh)),
np.array((41.719 * vw, 95.139 * vh)),
np.array((1.094 * vw, 95.139 * vh)))
else:
# FIXME: implement with feature matching?
raise NotImplementedError('unsupported aspect ratio')
# 点击 '可交付' 订单
def get_my_sell_task_main(img):
"""
:returns: [0][1]
[3][2]
"""
aspect = Fraction(*img.size)
vw, vh = util.get_vwvh(img)
if aspect == Fraction(16, 9):
return (np.array((29.297 * vw, 26.528 * vh)), np.array((37.109 * vw, 26.528 * vh)),
np.array((37.109 * vw, 61.111 * vh)), np.array((29.297 * vw, 61.111 * vh)))
else:
# FIXME: implement with feature matching?
raise NotImplementedError('unsupported aspect ratio')
# 从订单列表中进入另一间贸易设施的订单列表
def get_my_sell_task_2(img):
"""
:returns: [0][1]
[3][2]
"""
aspect = Fraction(*img.size)
vw, vh = util.get_vwvh(img)
if aspect == Fraction(16, 9):
return (
np.array((1.094 * vw, 25.972 * vh)), np.array((16.875 * vw, 25.972 * vh)),
np.array((16.875 * vw, 33.472 * vh)),
np.array((1.094 * vw, 33.472 * vh)))
else:
# FIXME: implement with feature matching?
raise NotImplementedError('unsupported aspect ratio')
if __name__ == "__main__":
import sys
print(check_main(Image.open(sys.argv[-1])))
|
litchiar/ArknightsAutoHelper | resources/recruit_database.py | recruit_database = [
('Lancet-2', 0, ['医疗干员', '远程位', '治疗', '支援机械']),
('Castle-3', 0, ['近卫干员', '近战位', '支援', '支援机械']),
('夜刀', 1, ['先锋干员', '近战位', '新手']),
('黑角', 1, ['重装干员', '近战位', '新手']),
('巡林者', 1, ['狙击干员', '远程位', '新手']),
('杜林', 1, ['术师干员', '远程位', '新手']),
('12F', 1, ['术师干员', '远程位', '新手']),
('芬', 2, ['先锋干员', '近战位', '费用回复']),
('香草', 2, ['先锋干员', '近战位', '费用回复']),
('翎羽', 2, ['先锋干员', '近战位', '输出', '费用回复']),
('玫兰莎', 2, ['近卫干员', '近战位', '输出', '生存']),
('米格鲁', 2, ['重装干员', '近战位', '防护']),
('克洛丝', 2, ['狙击干员', '远程位', '输出']),
('安德切尔', 2, ['狙击干员', '远程位', '输出']),
('炎熔', 2, ['术师干员', '远程位', '群攻']),
('芙蓉', 2, ['医疗干员', '远程位', '治疗']),
('安赛尔', 2, ['医疗干员', '远程位', '治疗']),
('史都华德', 2, ['术师干员', '远程位', '输出']),
('梓兰', 2, ['辅助干员', '远程位', '减速']),
('夜烟', 3, ['术师干员', '远程位', '输出', '削弱']),
('远山', 3, ['术师干员', '远程位', '群攻']),
('杰西卡', 3, ['狙击干员', '远程位', '输出', '生存']),
('流星', 3, ['狙击干员', '远程位', '输出', '削弱']),
('白雪', 3, ['狙击干员', '远程位', '群攻', '减速']),
('清道夫', 3, ['先锋干员', '近战位', '费用回复', '输出']),
('红豆', 3, ['先锋干员', '近战位', '输出', '费用回复']),
('杜宾', 3, ['近卫干员', '近战位', '输出', '支援']),
('缠丸', 3, ['近卫干员', '近战位', '生存', '输出']),
('霜叶', 3, ['近卫干员', '近战位', '减速', '输出']),
('艾丝黛尔', 3, ['近卫干员', '近战位', '群攻', '生存']),
('慕斯', 3, ['近卫干员', '近战位', '输出']),
('砾', 3, ['特种干员', '近战位', '快速复活', '防护']),
('暗索', 3, ['特种干员', '近战位', '位移']),
('末药', 3, ['医疗干员', '远程位', '治疗']),
('嘉维尔', 3, ['医疗干员', '远程位', '治疗']),
('调香师', 3, ['医疗干员', '远程位', '治疗']),
('角峰', 3, ['重装干员', '近战位', '防护']),
('蛇屠箱', 3, ['重装干员', '近战位', '防护']),
('古米', 3, ['重装干员', '近战位', '防护', '治疗']),
('地灵', 3, ['辅助干员', '远程位', '减速']),
('阿消', 3, ['特种干员', '近战位', '位移']),
('白面鸮', 4, ['医疗干员', '远程位', '治疗', '支援']),
('凛冬', 4, ['先锋干员', '近战位', '费用回复', '支援']),
('德克萨斯', 4, ['先锋干员', '近战位', '费用回复', '控场']),
('因陀罗', 4, ['近卫干员', '近战位', '输出', '生存']),
('幽灵鲨', 4, ['近卫干员', '近战位', '群攻', '生存']),
('蓝毒', 4, ['狙击干员', '远程位', '输出']),
('白金', 4, ['狙击干员', '远程位', '输出']),
('陨星', 4, ['狙击干员', '远程位', '群攻', '削弱']),
('梅尔', 4, ['辅助干员', '远程位', '召唤', '控场']),
('赫默', 4, ['医疗干员', '远程位', '治疗']),
('华法琳', 4, ['医疗干员', '远程位', '治疗', '支援']),
('临光', 4, ['重装干员', '近战位', '防护', '治疗']),
('红', 4, ['特种干员', '近战位', '快速复活', '控场']),
('雷蛇', 4, ['重装干员', '近战位', '防护', '输出']),
('可颂', 4, ['重装干员', '近战位', '防护', '位移']),
('火神', 4, ['重装干员', '近战位', '生存', '防护', '输出']),
('普罗旺斯', 4, ['狙击干员', '远程位', '输出']),
('守林人', 4, ['狙击干员', '远程位', '输出', '爆发']),
('崖心', 4, ['特种干员', '近战位', '位移', '输出']),
('初雪', 4, ['辅助干员', '远程位', '削弱']),
('真理', 4, ['辅助干员', '远程位', '减速', '输出']),
('狮蝎', 4, ['特种干员', '近战位', '输出', '生存']),
('食铁兽', 4, ['特种干员', '近战位', '位移', '减速']),
('能天使', 5, ['狙击干员', '远程位', '输出']),
('推进之王', 5, ['先锋干员', '近战位', '费用回复', '输出']),
('伊芙利特', 5, ['术师干员', '远程位', '群攻', '削弱']),
('闪灵', 5, ['医疗干员', '远程位', '治疗', '支援']),
('夜莺', 5, ['医疗干员', '远程位', '治疗', '支援']),
('星熊', 5, ['重装干员', '近战位', '防护', '输出']),
('塞雷娅', 5, ['重装干员', '近战位', '防护', '治疗', '支援']),
('银灰', 5, ['近卫干员', '近战位', '输出', '支援']),
('空爆', 2, ['狙击干员', '远程位', '群攻']),
('月见夜', 2, ['近卫干员', '近战位', '输出']),
('猎蜂', 3, ['近卫干员', '近战位', '输出']),
('夜魔', 4, ['术师干员', '远程位', '输出', '治疗', '减速']),
('斯卡蒂', 5, ['近卫干员', '近战位', '输出', '生存']),
('陈', 5, ['近卫干员', '近战位', '输出', '爆发']),
('诗怀雅', 4, ['近卫干员', '近战位', '输出', '支援']),
('格雷伊', 3, ['术师干员', '远程位', '群攻', '减速']),
('泡普卡', 2, ['近卫干员', '近战位', '群攻', '生存']),
('斑点', 2, ['重装干员', '近战位', '防护', '治疗']),
('THRM-EX', 0, ['特种干员', '近战位', '爆发', '支援机械']),
('黑', 5, ['狙击干员', '远程位', '输出']),
('赫拉格', 5, ['近卫干员', '近战位', '输出', '生存']),
('格劳克斯', 4, ['辅助干员', '远程位', '减速', '控场']),
('星极', 4, ['近卫干员', '近战位', '输出', '防护']),
('苏苏洛', 3, ['医疗干员', '远程位', '治疗']),
('桃金娘', 3, ['先锋干员', '近战位', '费用回复', '治疗']),
('麦哲伦', 5, ['辅助干员', '远程位', '支援', '减速', '输出']),
('送葬人', 4, ['狙击干员', '远程位', '群攻']),
('红云', 3, ['狙击干员', '远程位', '输出']),
('莫斯提马', 5, ['术师干员', '远程位', '群攻', '支援', '控场']),
('槐琥', 4, ['特种干员', '近战位', '快速复活', '削弱']),
('清流', 3, ['医疗干员', '远程位', '治疗', '支援']),
('梅', 3, ['狙击干员', '远程位', '输出', '减速']),
('煌', 5, ['近卫干员', '近战位', '输出', '生存']),
('灰喉', 4, ['狙击干员', '远程位', '输出']),
('苇草', 4, ['先锋干员', '近战位', '费用回复', '输出']),
('布洛卡', 4, ['近卫干员', '近战位', '群攻', '生存']),
('安比尔', 3, ['狙击干员', '远程位', '输出', '减速']),
]
|
frmdstryr/enaml-native-maps | src/googlemaps/android/android_map_view.py | """
Copyright (c) 2017-2018, <NAME>.
Distributed under the terms of the MIT License.
The full license is in the file COPYING.txt, distributed with this software.
Created on Oct 10, 2017
@author: jrm
"""
from atom.api import Typed, Instance, Dict, Bool, set_default
from googlemaps.widgets.map_view import (
ProxyMapView, ProxyMapMarker, ProxyMapCircle, ProxyMapPolyline, ProxyMapPolygon
)
from enamlnative.core import bridge
from enamlnative.android.android_toolkit_object import AndroidToolkitObject
from enamlnative.android.android_frame_layout import AndroidFrameLayout, FrameLayout
from enamlnative.android.android_fragment import FragmentTransaction, FragmentManager
from enamlnative.android.android_utils import ArrayList
from enamlnative.android.bridge import JavaBridgeObject, JavaMethod, JavaStaticMethod, JavaCallback, JavaProxy
from enamlnative.android.api import LocationManager
class ConnectionResult:
SUCCESS = 0
API_UNAVAILABLE = 16
CANCELED = 13
DEVELOPER_ERROR = 10
DRIVE_EXTERNAL_STORAGE_REQUIRED = 1500
INTERNAL_ERROR = 8
INTERRUPTED = 15
INVALID_ACCOUNT = 5
LICENSE_CHECK_FAILED = 11
NETWORK_ERROR = 7
RESOLUTION_REQUIRED = 6
RESTRICTED_PROFILE = 20
SERVICE_DISABLED = 3
SERVICE_INVALID = 9
SERVICE_MISSING = 1
SERVICE_MISSING_PERMISSION = 19
SERVICE_UPDATING = 18
SERVICE_VERSION_UPDATE_REQUIRED = 2
SIGN_IN_FAILED = 17
SIGN_IN_REQUIRED = 4
TIMEOUT = 14
class LatLngList(ArrayList):
""" A ArrayList<LatLng> that handles changes from an atom ContainerList"""
def refresh_points(self, points):
coordinates = [LatLng(*p) for p in points]
self.clear()
# Must manually encode these the bridge currently
# doesnt try as it's slower
self.addAll([bridge.encode(c) for c in coordinates])
def handle_change(self, change):
""" Handle changes from atom ContainerLists """
op = change['operation']
if op in 'append':
self.add(len(change['value']), LatLng(*change['item']))
elif op == 'insert':
self.add(change['index'], LatLng(*change['item']))
elif op == 'extend':
points = [LatLng(*p) for p in change['items']]
self.addAll([bridge.encode(c) for c in points])
elif op == '__setitem__':
self.set(change['index'], LatLng(*change['newitem']))
elif op == 'pop':
self.remove(change['index'])
else:
raise NotImplementedError(
"Unsupported change operation {}".format(op))
class GoogleMap(JavaBridgeObject):
addCircle = JavaMethod('com.google.android.gms.maps.model.CircleOptions',
returns='com.google.android.gms.maps.model.Circle')
addMarker = JavaMethod('com.google.android.gms.maps.model.MarkerOptions',
returns='com.google.android.gms.maps.model.Marker')
addPolyline = JavaMethod(
'com.google.android.gms.maps.model.PolylineOptions',
returns='com.google.android.gms.maps.model.Polyline')
addPolygon = JavaMethod(
'com.google.android.gms.maps.model.PolygonOptions',
returns='com.google.android.gms.maps.model.Polygon')
onMapReady = JavaCallback('com.google.android.gms.maps.GoogleMap')
animateCamera = JavaMethod('com.google.android.gms.maps.CameraUpdate')
setLatLngBoundsForCameraTarget = JavaMethod(
'com.google.android.gms.maps.model.LatLngBounds')
setMapType = JavaMethod('int')
setMaxZoomPreference = JavaMethod('float')
setMinZoomPreference = JavaMethod('float')
setMyLocationEnabled = JavaMethod('boolean')
setBuildingsEnabled = JavaMethod('boolean')
setIndoorEnabled = JavaMethod('boolean')
setTrafficEnabled = JavaMethod('boolean')
setOnCameraChangeListener = JavaMethod(
'com.google.android.gms.maps.GoogleMap$OnCameraChangeListener')
onCameraChange = JavaCallback(
'com.google.android.gms.maps.model.CameraPosition')
setOnCameraMoveStartedListener = JavaMethod(
'com.google.android.gms.maps.GoogleMap$OnCameraMoveStartedListener')
onCameraMoveStarted = JavaCallback('int')
setOnCameraMoveStartedListener = JavaMethod(
'com.google.android.gms.maps.GoogleMap$OnCameraMoveStartedListener')
setOnCameraMoveCanceledListener = JavaMethod(
'com.google.android.gms.maps.GoogleMap$OnCameraMoveCanceledListener')
onCameraMoveCanceled = JavaCallback()
setOnCameraIdleListener = JavaMethod(
'com.google.android.gms.maps.GoogleMap$OnCameraIdleListener')
onCameraIdle = JavaCallback()
CAMERA_REASON_GESTURE = 1
CAMERA_REASON_API_ANIMATION = 2
CAMERA_REASON_DEVELOPER_ANIMATION = 3
setOnMarkerClickListener = JavaMethod(
'com.google.android.gms.maps.GoogleMap$OnMarkerClickListener')
onMarkerClick = JavaCallback('com.google.android.gms.maps.model.Marker',
returns='boolean')
setOnMarkerDragListener = JavaMethod(
'com.google.android.gms.maps.GoogleMap$OnMarkerDragListener')
onMarkerDrag = JavaCallback('com.google.android.gms.maps.model.Marker')
onMarkerDragEnd = JavaCallback('com.google.android.gms.maps.model.Marker')
onMarkerDragStart = JavaCallback(
'com.google.android.gms.maps.model.Marker')
#: Info windows
setOnInfoWindowClickListener = JavaMethod(
'com.google.android.gms.maps.GoogleMap$OnInfoWindowClickListener')
onInfoWindowClick = JavaCallback(
'com.google.android.gms.maps.model.Marker')
setOnInfoWindowCloseListener = JavaMethod(
'com.google.android.gms.maps.GoogleMap$OnInfoWindowCloseListener')
onInfoWindowClose = JavaCallback(
'com.google.android.gms.maps.model.Marker')
setOnInfoWindowLongClickListener = JavaMethod(
'com.google.android.gms.maps.GoogleMap$OnInfoWindowLongClickListener')
onInfoWindowLongClick = JavaCallback(
'com.google.android.gms.maps.model.Marker')
setInfoWindowAdapter = JavaMethod(
'com.google.android.gms.maps.GoogleMap$InfoWindowAdapter')
class InfoWindowAdapter(JavaProxy):
__nativeclass__ = set_default(
'com.google.android.gms.maps.GoogleMap$InfoWindowAdapter')
getInfoContents = JavaCallback(
'com.google.android.gms.maps.model.Marker',
returns='android.view.View')
getInfoWindow = JavaCallback(
'com.google.android.gms.maps.model.Marker',
returns='android.view.View')
#: Map clicks
setOnMapClickListener = JavaMethod(
'com.google.android.gms.maps.GoogleMap$OnMapClickListener')
onMapClick = JavaCallback('com.google.android.gms.maps.model.LatLng')
setOnMapLongClickListener = JavaMethod(
'com.google.android.gms.maps.GoogleMap$OnMapLongClickListener')
onMapLongClick = JavaCallback('com.google.android.gms.maps.model.LatLng')
setOnPolylineClickListener = JavaMethod(
'com.google.android.gms.maps.GoogleMap$OnPolylineClickListener')
onPolylineClick = JavaCallback(
'com.google.android.gms.maps.model.Polyline')
setOnPolygonClickListener = JavaMethod(
'com.google.android.gms.maps.GoogleMap$OnPolygonClickListener')
onPolygonClick = JavaCallback('com.google.android.gms.maps.model.Polygon')
setOnCircleClickListener = JavaMethod(
'com.google.android.gms.maps.GoogleMap$OnCircleClickListener')
onCircleClick = JavaCallback('com.google.android.gms.maps.model.Circle')
MAP_TYPE_HYBRID = 4
MAP_TYPE_NONE = 0
MAP_TYPE_NORMAL = 1
MAP_TYPE_SATELLITE = 2
MAP_TYPE_TERRAIN = 3
MAP_TYPES = {
'none': MAP_TYPE_NONE,
'normal': MAP_TYPE_NORMAL,
'satellite': MAP_TYPE_SATELLITE,
'terrain': MAP_TYPE_TERRAIN,
'hybrid': MAP_TYPE_HYBRID,
}
class MapsInitializer(JavaBridgeObject):
__nativeclass__ = set_default(
'com.google.android.gms.maps.MapsInitializer')
initialize = JavaStaticMethod('android.content.Context', returns='int')
class MapFragment(JavaBridgeObject):
"""
Note: You must add "compile 'com.android.support:cardview-v7:21.0.+'"
to build.gradle for this to work!
"""
__nativeclass__ = set_default(
'com.google.android.gms.maps.SupportMapFragment')
newInstance = JavaStaticMethod(
'com.google.android.gms.maps.GoogleMapOptions',
returns='com.google.android.gms.maps.SupportMapFragment')
getMapAsync = JavaMethod('com.google.android.gms.maps.OnMapReadyCallback')
getView = JavaMethod(returns='android.view.View')
class MapView(FrameLayout):
__nativeclass__ = set_default('com.google.android.gms.maps.SupportMapView')
class GoogleMapOptions(JavaBridgeObject):
__nativeclass__ = set_default(
'com.google.android.gms.maps.GoogleMapOptions')
ambientEnabled = JavaMethod('boolean')
camera = JavaMethod('com.google.android.gms.maps.model.CameraPosition')
compassEnabled = JavaMethod('boolean')
latLngBoundsForCameraTarget = JavaMethod(
'com.google.android.gms.maps.model.LatLngBounds')
liteMode = JavaMethod('boolean')
mapToolbarEnabled = JavaMethod('boolean')
mapType = JavaMethod('int')
maxZoomPreference = JavaMethod('float')
minZoomPreference = JavaMethod('float')
rotateGesturesEnabled = JavaMethod('boolean')
scrollGesturesEnabled = JavaMethod('boolean')
tiltGesturesEnabled = JavaMethod('boolean')
zoomControlsEnabled = JavaMethod('boolean')
zoomGesturesEnabled = JavaMethod('boolean')
class CameraPosition(JavaBridgeObject):
__nativeclass__ = set_default(
'com.google.android.gms.maps.model.CameraPosition')
__signature__ = set_default(('com.google.android.gms.maps.model.LatLng',
'float', 'float', 'float'))
class CameraUpdate(JavaBridgeObject):
__nativeclass__ = set_default('com.google.android.gms.maps.CameraUpdate')
class CameraUpdateFactory(JavaBridgeObject):
__nativeclass__ = set_default(
'com.google.android.gms.maps.CameraUpdateFactory')
newCameraPosition = JavaStaticMethod(
'com.google.android.gms.maps.model.CameraPosition',
returns='com.google.android.gms.maps.CameraUpdate')
class LatLng(JavaBridgeObject):
__nativeclass__ = set_default('com.google.android.gms.maps.model.LatLng')
__signature__ = set_default(('double','double'))
class MapItemBase(JavaBridgeObject):
setTag = JavaMethod("java.lang.Object")
setVisible = JavaMethod('boolean')
setZIndex = JavaMethod('float')
remove = JavaMethod()
class MapItemOptionsBase(JavaBridgeObject):
visible = JavaMethod('boolean')
zindex = JavaMethod('float')
class MarkerOptions(MapItemOptionsBase):
__nativeclass__ = set_default(
'com.google.android.gms.maps.model.MarkerOptions')
alpha = JavaMethod('float')
anchor = JavaMethod('float', 'float')
draggable = JavaMethod('boolean')
flat = JavaMethod('boolean')
icon = JavaMethod('com.google.android.gms.maps.model.BitMapDescriptor')
position = JavaMethod('com.google.android.gms.maps.model.LatLng')
rotation = JavaMethod('float')
snippet = JavaMethod('java.lang.String')
title = JavaMethod('java.lang.String')
class Marker(MapItemBase):
__nativeclass__ = set_default('com.google.android.gms.maps.model.Marker')
setAlpha = JavaMethod('float')
setAnchor = JavaMethod('float', 'float')
setDraggable = JavaMethod('boolean')
setFlat = JavaMethod('boolean')
setIcon = JavaMethod('com.google.android.gms.maps.model.BitMapDescriptor')
setPosition = JavaMethod('com.google.android.gms.maps.model.LatLng')
setRotation = JavaMethod('float')
setSnippet = JavaMethod('java.lang.String')
setTitle = JavaMethod('java.lang.String')
showInfoWindow = JavaMethod()
hideInfoWindow = JavaMethod()
class CircleOptions(MapItemOptionsBase):
__nativeclass__ = set_default(
'com.google.android.gms.maps.model.CircleOptions')
radius = JavaMethod('double')
clickable = JavaMethod('boolean')
center = JavaMethod('com.google.android.gms.maps.model.LatLng')
fillColor = JavaMethod('android.graphics.Color')
strokeColor = JavaMethod('android.graphics.Color')
strokeWidth = JavaMethod('float')
class Circle(MapItemBase):
__nativeclass__ = set_default('com.google.android.gms.maps.model.Circle')
setClickable = JavaMethod('boolean')
setCenter = JavaMethod('com.google.android.gms.maps.model.LatLng')
setRadius = JavaMethod('double')
setFillColor = JavaMethod('android.graphics.Color')
setStrokeColor = JavaMethod('android.graphics.Color')
setStrokeWidth = JavaMethod('float')
class PolylineOptions(MapItemOptionsBase):
__nativeclass__ = set_default(
'com.google.android.gms.maps.model.PolylineOptions')
#add = JavaMethod('com.google.android.gms.maps.model.LatLng')
add = JavaMethod('[Lcom.google.android.gms.maps.model.LatLng;')
addAll = JavaMethod('java.lang.Iterable')
clickable = JavaMethod('clickable')
color = JavaMethod('android.graphics.Color')
endCap = JavaMethod('com.google.android.gms.maps.model.Cap')
geodesic = JavaMethod('boolean')
jointType = JavaMethod('int')
startCap = JavaMethod('com.google.android.gms.maps.model.Cap')
width = JavaMethod('float')
class ButtCap(JavaBridgeObject):
__nativeclass__ = set_default('com.google.android.gms.maps.model.ButtCap')
class SquareCap(JavaBridgeObject):
__nativeclass__ = set_default(
'com.google.android.gms.maps.model.SquareCap')
class RoundCap(JavaBridgeObject):
__nativeclass__ = set_default('com.google.android.gms.maps.model.RoundCap')
class Polyline(MapItemBase):
__nativeclass__ = set_default('com.google.android.gms.maps.model.Polyline')
setClickable = JavaMethod('boolean')
setColor = JavaMethod('android.graphics.Color')
setGeodesic = JavaMethod('boolean')
setPoints = JavaMethod('java.util.List')
setEndCap = JavaMethod('com.google.android.gms.maps.model.Cap')
setStartCap = JavaMethod('com.google.android.gms.maps.model.Cap')
setJointType = JavaMethod('int')
setWidth = JavaMethod('float')
JOINT_TYPE_DEFAULT = 0
JOINT_TYPE_BEVEL = 1
JOINT_TYPE_ROUND = 2
JOINT_TYPES = {
'': JOINT_TYPE_DEFAULT,
'bevel': JOINT_TYPE_BEVEL,
'round': JOINT_TYPE_ROUND
}
CAPS = {
'butt': ButtCap,
'round': RoundCap,
'square': SquareCap,
}
class PolygonOptions(MapItemOptionsBase):
__nativeclass__ = set_default(
'com.google.android.gms.maps.model.PolygonOptions')
clickable = JavaMethod('clickable')
fillColor = JavaMethod('android.graphics.Color')
#add = JavaMethod('com.google.android.gms.maps.model.LatLng')
add = JavaMethod('[Lcom.google.android.gms.maps.model.LatLng;')
addAll = JavaMethod('java.lang.Iterable')
addHole = JavaMethod('java.lang.Iterable')
geodesic = JavaMethod('boolean')
strokeColor = JavaMethod('android.graphics.Color')
strokeJointType = JavaMethod('int')
strokeWidth = JavaMethod('float')
class Polygon(MapItemBase):
__nativeclass__ = set_default('com.google.android.gms.maps.model.Polygon')
setClickable = JavaMethod('boolean')
setFillColor = JavaMethod('android.graphics.Color')
setGeodesic = JavaMethod('boolean')
setHoles = JavaMethod('java.util.List')
setPoints = JavaMethod('java.util.List')
setStrokeColor = JavaMethod('android.graphics.Color')
setStrokeJointType = JavaMethod('int')
setStrokeWidth = JavaMethod('float')
class AndroidMapView(AndroidFrameLayout, ProxyMapView):
""" An Android implementation of an Enaml ProxyMapView.
"""
#: Holder
widget = Typed(FrameLayout)
#: A reference to the widget created by the proxy.
fragment = Typed(MapFragment)
#: Map options
options = Typed(GoogleMapOptions)
#: Map instance
map = Typed(GoogleMap)
#: TODO: Lookup table for markers
markers = Dict()
#: Camera updating
_update_blocked = Bool()
#: Info window adapter
adapter = Typed(GoogleMap.InfoWindowAdapter)
# -------------------------------------------------------------------------
# Initialization API
# -------------------------------------------------------------------------
def create_widget(self):
""" Create the underlying widget.
"""
self.init_options()
#: Retrieve the actual map
MapFragment.newInstance(self.options).then(
self.on_map_fragment_created)
# Holder for the fragment
self.widget = FrameLayout(self.get_context())
# I wrote this a few days ago and already forget how this hack works...
# lol We can't simply get a map reference using getMapAsync in the
# return value like we normally do with a normal call function return
# value. The bridge design was modified to store an object that cannot
# be decoded normally (via a standard Bridge.Packer) by saving the new
# object in the cache returning the id of the handler or proxy that
# invoked it. This way we can manually create a new id and pass that
# "future reference-able" object as our listener. At which point the
# bridge will create a reference entry in the cache for us with the of
# the object we gave it. Once in the cache we can use it like any
# bridge object we created.
self.map = GoogleMap(__id__=bridge.generate_id())
def init_options(self):
""" Initialize the underlying map options.
"""
self.options = GoogleMapOptions()
d = self.declaration
self.set_map_type(d.map_type)
if d.ambient_mode:
self.set_ambient_mode(d.ambient_mode)
if (d.camera_position or d.camera_zoom or
d.camera_tilt or d.camera_bearing):
self.update_camera()
if d.map_bounds:
self.set_map_bounds(d.map_bounds)
if not d.show_compass:
self.set_show_compass(d.show_compass)
if not d.show_zoom_controls:
self.set_show_zoom_controls(d.show_zoom_controls)
if not d.show_toolbar:
self.set_show_toolbar(d.show_toolbar)
if d.lite_mode:
self.set_lite_mode(d.lite_mode)
if not d.rotate_gestures:
self.set_rotate_gestures(d.rotate_gestures)
if not d.scroll_gestures:
self.set_scroll_gestures(d.scroll_gestures)
if not d.tilt_gestures:
self.set_tilt_gestures(d.tilt_gestures)
if not d.zoom_gestures:
self.set_zoom_gestures(d.zoom_gestures)
if d.min_zoom:
self.set_min_zoom(d.min_zoom)
if d.max_zoom:
self.set_max_zoom(d.max_zoom)
def init_map(self):
""" Add markers, polys, callouts, etc.."""
d = self.declaration
if d.show_location:
self.set_show_location(d.show_location)
if d.show_traffic:
self.set_show_traffic(d.show_traffic)
if d.show_indoors:
self.set_show_indoors(d.show_indoors)
if d.show_buildings:
self.set_show_buildings(d.show_buildings)
#: Local ref access is faster
mapview = self.map
mid = mapview.getId()
#: Connect signals
#: Camera
mapview.onCameraChange.connect(self.on_camera_changed)
mapview.onCameraMoveStarted.connect(self.on_camera_move_started)
mapview.onCameraMoveCanceled.connect(self.on_camera_move_stopped)
mapview.onCameraIdle.connect(self.on_camera_move_stopped)
mapview.setOnCameraChangeListener(mid)
mapview.setOnCameraMoveStartedListener(mid)
mapview.setOnCameraMoveCanceledListener(mid)
mapview.setOnCameraIdleListener(mid)
#: Clicks
mapview.onMapClick.connect(self.on_map_clicked)
mapview.setOnMapClickListener(mid)
mapview.onMapLongClick.connect(self.on_map_long_clicked)
mapview.setOnMapLongClickListener(mid)
#: Markers
mapview.onMarkerClick.connect(self.on_marker_clicked)
mapview.setOnMarkerClickListener(self.map.getId())
mapview.onMarkerDragStart.connect(self.on_marker_drag_start)
mapview.onMarkerDrag.connect(self.on_marker_drag)
mapview.onMarkerDragEnd.connect(self.on_marker_drag_end)
mapview.setOnMarkerDragListener(mid)
#: Info window
mapview.onInfoWindowClick.connect(self.on_info_window_clicked)
mapview.onInfoWindowLongClick.connect(self.on_info_window_long_clicked)
mapview.onInfoWindowClose.connect(self.on_info_window_closed)
mapview.setOnInfoWindowClickListener(mid)
mapview.setOnInfoWindowCloseListener(mid)
mapview.setOnInfoWindowLongClickListener(mid)
#: Polys
mapview.onPolygonClick.connect(self.on_poly_clicked)
mapview.onPolylineClick.connect(self.on_poly_clicked)
mapview.setOnPolygonClickListener(mid)
mapview.setOnPolylineClickListener(mid)
#: Circle
mapview.onCircleClick.connect(self.on_circle_clicked)
mapview.setOnCircleClickListener(mid)
def init_info_window_adapter(self):
""" Initialize the info window adapter. Should only be done if one of
the markers defines a custom view.
"""
adapter = self.adapter
if adapter:
return #: Already initialized
adapter = GoogleMap.InfoWindowAdapter()
adapter.getInfoContents.connect(self.on_info_window_contents_requested)
adapter.getInfoWindow.connect(self.on_info_window_requested)
self.map.setInfoWindowAdapter(adapter)
# -------------------------------------------------------------------------
# Google Maps API
# -------------------------------------------------------------------------
def on_map_fragment_created(self, obj_id):
""" Create the fragment and pull the map reference when it's loaded.
"""
self.fragment = MapFragment(__id__=obj_id)
#: Setup callback so we know when the map is ready
self.map.onMapReady.connect(self.on_map_ready)
self.fragment.getMapAsync(self.map.getId())
context = self.get_context()
def on_transaction(id):
trans = FragmentTransaction(__id__=id)
trans.add(self.widget.getId(), self.fragment)
trans.commit()
def on_fragment_manager(id):
fm = FragmentManager(__id__=id)
fm.beginTransaction().then(on_transaction)
context.widget.getSupportFragmentManager().then(on_fragment_manager)
# #: Get GoogleMap instance when ready
# #: Doesn't work...
# def get_map(result):
# print("Maps initializer result: {}".format(result))
# if result==ConnectionResult.SUCCESS:
# self.fragment.onMapReady.connect(self.on_map_ready)
# self.fragment.getMapAsync(self.fragment.getId())
# else:
# app = self.get_context()
# app.show_error("Error getting map: {}".format(result))
# MapsInitializer.initialize(self.get_context()).then(get_map)
def on_map_ready(self, map_id):
#: At this point the map is valid
self.init_map()
#: Reload markers
for child in self.children():
if isinstance(child, AndroidMapItemBase):
child.add_to_map(self.map)
def child_added(self, child):
if isinstance(child, AndroidMapItemBase):
child.add_to_map(self.map)
else:
super(AndroidMapView, self).child_added(child)
def child_removed(self, child):
if isinstance(child, AndroidMapItemBase):
pass #: It removes itself
else:
super(AndroidMapView, self).child_removed(child)
def on_map_clicked(self, pos):
""" Called when the map is clicked """
d = self.declaration
d.clicked({
'click': 'short',
'position': tuple(pos)
})
def on_map_long_clicked(self, pos):
""" Called when the map is clicked """
d = self.declaration
d.clicked({
'click': 'long',
'position': tuple(pos)
})
# -------------------------------------------------------------------------
# Camera API
# -------------------------------------------------------------------------
def on_camera_move_started(self, reason):
d = self.declaration
if reason == GoogleMap.CAMERA_REASON_GESTURE:
d.dragging = True
else:
d.animating = True
def on_camera_move_stopped(self):
d = self.declaration
d.dragging = False
d.animating = False
def on_camera_changed(self, camera):
pos, zoom, tilt, bearing = camera
d = self.declaration
#: Don't update
self._update_blocked = True
try:
d.camera_position = tuple(pos)
d.camera_zoom = zoom
d.camera_tilt = tilt
d.camera_bearing = bearing
finally:
self._update_blocked = False
# -------------------------------------------------------------------------
# Marker API
# -------------------------------------------------------------------------
def on_marker_clicked(self, marker):
mid, pos = marker
m = self.markers.get(mid)
if m:
return m.on_click()
return False
def on_marker_drag(self, marker):
mid, pos = marker
m = self.markers.get(mid)
if m:
m.on_drag(pos)
def on_marker_drag_start(self, marker):
mid, pos = marker
m = self.markers.get(mid)
if m:
m.on_drag_start(pos)
def on_marker_drag_end(self, marker):
mid, pos = marker
m = self.markers.get(mid)
if m:
m.on_drag_end(pos)
# -------------------------------------------------------------------------
# Info window API
# -------------------------------------------------------------------------
def on_info_window_requested(self, marker):
mid, pos = marker
m = self.markers.get(mid)
if m:
return m.on_info_window_requested()
def on_info_window_contents_requested(self, marker):
mid, pos = marker
m = self.markers.get(mid)
if m:
return m.on_info_window_contents_requested()
def on_info_window_clicked(self, marker):
mid, pos = marker
m = self.markers.get(mid)
if m:
m.on_info_window_clicked('short')
def on_info_window_long_clicked(self, marker):
mid, pos = marker
m = self.markers.get(mid)
if m:
m.on_info_window_clicked('long')
def on_info_window_closed(self, marker):
mid, pos = marker
m = self.markers.get(mid)
#: This can come later when it's removed so check the declaration
if m and m.declaration:
m.on_info_window_closed()
# -------------------------------------------------------------------------
# Polygon and PolyLine API
# -------------------------------------------------------------------------
def on_poly_clicked(self, poly):
m = self.markers.get(poly)
if m:
m.on_click()
# -------------------------------------------------------------------------
# Circle API
# -------------------------------------------------------------------------
def on_circle_clicked(self, circle):
m = self.markers.get(circle)
if m:
m.on_click()
# -------------------------------------------------------------------------
# ProxyMapView API
# -------------------------------------------------------------------------
def set_map_bounds(self, bounds):
raise NotImplementedError
def set_map_type(self, map_type):
if self.map:
self.map.setMapType(GoogleMap.MAP_TYPES[map_type])
else:
self.options.mapType(GoogleMap.MAP_TYPES[map_type])
def set_show_toolbar(self, show):
if self.map:
pass
else:
self.options.mapToolbarEnabled(show)
def set_show_compass(self, show):
if self.map:
pass
else:
self.options.compassEnabled(show)
def set_show_zoom_controls(self, show):
if self.map:
pass
else:
self.options.zoomControlsEnabled(show)
def set_show_location(self, show):
if self.map:
if show:
def on_result(allowed):
if allowed:
self.map.setMyLocationEnabled(True)
else:
self.declaration.show_location = False
LocationManager.check_permission().then(on_result)
else:
self.map.setMyLocationEnabled(False)
def set_show_buildings(self, show):
if self.map:
self.map.setBuildingsEnabled(show)
def set_show_traffic(self, show):
if self.map:
self.map.setTrafficEnabled(show)
def set_show_indoors(self, show):
if self.map:
self.map.setBuildingsEnabled(show)
def update_camera(self):
if self._update_blocked:
return
d = self.declaration
if self.map:
# Bit of a hack but it "should" work hahah
# The future created to handle returned values creates an id for
# itself. The bridge will save objects created (if they cannot be
# packed by a specific Packer) using that ID, hence we can
# reference it right away without actually waiting
# until we get a return value back across the bridge.
self.map.animateCamera(CameraUpdateFactory.newCameraPosition(
CameraPosition(
LatLng(*d.camera_position),
d.camera_zoom,
d.camera_tilt,
d.camera_bearing
)))
else:
self.options.camera(CameraPosition(
LatLng(*d.camera_position),
d.camera_zoom,
d.camera_tilt,
d.camera_bearing
))
def set_camera_zoom(self, zoom):
self.update_camera()
def set_camera_position(self, position):
self.update_camera()
def set_camera_bearing(self, bearing):
self.update_camera()
def set_camera_tilt(self, tilt):
self.update_camera()
def set_ambient_mode(self, enabled):
if self.map:
pass
else:
self.options.ambientEnabled(enabled)
def set_lite_mode(self, enabled):
if self.map:
pass
else:
self.options.liteMode(enabled)
def set_min_zoom(self, zoom):
if self.map:
self.map.setMinZoomPreference(zoom)
else:
self.options.minZoomPreference(zoom)
def set_max_zoom(self, zoom):
if self.map:
self.map.setMaxZoomPreference(zoom)
else:
self.options.maxZoomPreference(zoom)
def set_rotate_gestures(self, enabled):
if self.map:
pass
else:
self.options.rotateGesturesEnabled(enabled)
def set_scroll_gestures(self, enabled):
if self.map:
pass
else:
self.options.scrollGesturesEnabled(enabled)
def set_tilt_gestures(self, enabled):
if self.map:
pass
else:
self.options.tiltGesturesEnabled(enabled)
def set_zoom_gestures(self, enabled):
if self.map:
pass
else:
self.options.zoomGesturesEnabled(enabled)
class AndroidMapItemBase(AndroidToolkitObject):
#: Options for map item constructor
options = Instance(MapItemOptionsBase)
#: Actual map intem created
marker = Instance(MapItemBase)
def init_widget(self):
super(AndroidMapItemBase, self).init_widget()
d = self.declaration
if not d.visible:
self.set_visibile(d.visible)
if d.zindex:
self.set_zindex(d.zindex)
def add_to_map(self):
""" Add this item to the map """
raise NotImplementedError
def destroy(self):
""" Remove the marker if it was added to the map when destroying"""
marker = self.marker
parent = self.parent()
if marker:
if parent:
del parent.markers[marker.__id__]
marker.remove()
super(AndroidMapItemBase, self).destroy()
def set_visibile(self, visible):
if self.marker:
self.marker.setVisible(visible)
else:
self.options.visible(visible)
def set_zindex(self, zindex):
if self.marker:
self.marker.setZIndex(zindex)
else:
self.options.zindex(zindex)
class AndroidMapMarker(AndroidMapItemBase, ProxyMapMarker):
""" An Android implementation of an Enaml ProxyMapView.
"""
def create_widget(self):
""" Create the MarkerOptions for this map marker
this later gets converted into a "Marker" instance when addMarker
is called
"""
self.options = MarkerOptions()
def init_widget(self):
super(AndroidMapMarker, self).init_widget()
d = self.declaration
if d.alpha:
self.set_alpha(d.alpha)
if d.anchor:
self.set_anchor(d.anchor)
if d.draggable:
self.set_draggable(d.draggable)
if not d.flat:
self.set_flat(d.flat)
if d.position:
self.set_position(d.position)
if d.rotation:
self.set_rotation(d.rotation)
if d.title:
self.set_title(d.title)
if d.snippit:
self.set_snippit(d.snippit)
def add_to_map(self, mapview):
mapview.addMarker(self.options).then(self.on_marker)
def child_added(self, child):
""" If a child is added we have to make sure the map adapter exists """
if child.widget:
# TODO: Should we keep count and remove the adapter if not all
# markers request it?
self.parent().init_info_window_adapter()
super(AndroidMapMarker, self).child_added(child)
# -------------------------------------------------------------------------
# Marker API
# -------------------------------------------------------------------------
def on_marker(self, marker):
""" Convert our options into the actual marker object"""
mid, pos = marker
self.marker = Marker(__id__=mid)
mapview = self.parent()
# Save ref
mapview.markers[mid] = self
# Required so the packer can pass the id
self.marker.setTag(mid)
# If we have a child widget we must configure the map to use the
# custom adapter
for w in self.child_widgets():
mapview.init_info_window_adapter()
break
d = self.declaration
if d.show_info:
self.set_show_info(d.show_info)
#: Can free the options now
del self.options
def on_click(self):
d = self.declaration
result = {'handled': False}
d.clicked(result)
r = bool(result['handled'])
if not r and (d.title or d.snippit):
# Info window is shown by default
with self.marker.showInfoWindow.suppressed():
d.show_info = True
return r
def on_drag_start(self, pos):
d = self.declaration
with self.marker.setPosition.suppressed():
d.position = tuple(pos)
d.dragging = True
def on_drag(self, pos):
d = self.declaration
with self.marker.setPosition.suppressed():
d.position = tuple(pos)
def on_drag_end(self, pos):
d = self.declaration
with self.marker.setPosition.suppressed():
d.position = tuple(pos)
d.dragging = False
def on_info_window_clicked(self, click):
d = self.declaration
d.info_clicked({'click': click})
def on_info_window_closed(self):
d = self.declaration
with self.marker.hideInfoWindow.suppressed():
d.show_info = False
def on_info_window_requested(self):
# Use default window, subclasses can override if necessary
d = self.declaration
if d.custom_info_window_mode == 'custom':
for w in self.child_widgets():
return w
return None
def on_info_window_contents_requested(self):
# Return the first child widget as the view for the content
for w in self.child_widgets():
return w
return None
# -------------------------------------------------------------------------
# ProxyMapMarker API
# -------------------------------------------------------------------------
def set_alpha(self, alpha):
if self.marker:
self.marker.setAlpha(alpha)
else:
self.options.alpha(alpha)
def set_anchor(self, anchor):
if self.marker:
self.marker.setAnchor(*anchor)
else:
self.options.anchor(*anchor)
def set_draggable(self, draggable):
if self.marker:
self.marker.setDraggable(draggable)
else:
self.options.draggable(draggable)
def set_flat(self, flat):
if self.marker:
self.marker.setFlat(flat)
else:
self.options.flat(flat)
def set_position(self, position):
if self.marker:
self.marker.setPosition(LatLng(*position))
else:
self.options.position(LatLng(*position))
def set_rotation(self, rotation):
if self.marker:
self.marker.setRotation(rotation)
else:
self.options.rotation(rotation)
def set_title(self, title):
if self.marker:
self.marker.setTitle(title)
else:
self.options.title(title)
def set_snippit(self, snippit):
if self.marker:
self.marker.setSnippet(snippit)
else:
self.options.snippet(snippit)
def set_show_info(self, show):
if self.marker:
if show:
self.marker.showInfoWindow()
else:
self.marker.hideInfoWindow()
def set_custom_info_window_mode(self, mode):
pass
class AndroidMapCircle(AndroidMapItemBase, ProxyMapCircle):
""" An Android implementation of an Enaml ProxyMapCircle.
"""
def create_widget(self):
""" Create the CircleOptions for this map item
this later gets converted into a "Circle" instance when addCircle
is called
"""
self.options = CircleOptions()
def add_to_map(self, mapview):
mapview.addCircle(self.options).then(self.on_marker)
def init_widget(self):
super(AndroidMapCircle, self).init_widget()
d = self.declaration
if d.radius:
self.set_radius(d.radius)
#if d.clickable: doesn't work
# self.set_clickable(d.clickable)
if d.position:
self.set_position(d.position)
if d.fill_color:
self.set_fill_color(d.fill_color)
if d.stroke_color:
self.set_stroke_color(d.stroke_color)
if d.stroke_width != 10:
self.set_stroke_width(d.stroke_width)
# -------------------------------------------------------------------------
# Marker API
# -------------------------------------------------------------------------
def on_marker(self, mid):
""" Convert our options into the actual circle object"""
self.marker = Circle(__id__=mid)
self.parent().markers[mid] = self
#: Required so the packer can pass the id
self.marker.setTag(mid)
d = self.declaration
if d.clickable:
self.set_clickable(d.clickable)
#: Can free the options now
del self.options
def on_click(self):
d = self.declaration
d.clicked()
# -------------------------------------------------------------------------
# ProxyMapCircle API
# -------------------------------------------------------------------------
def set_clickable(self, clickable):
if self.marker:
self.marker.setClickable(clickable)
else:
self.options.clickable(clickable)
def set_position(self, position):
if self.marker:
self.marker.setCenter(LatLng(*position))
else:
self.options.center(LatLng(*position))
def set_radius(self, radius):
if self.marker:
self.marker.setRadius(radius)
else:
self.options.radius(radius)
def set_fill_color(self, color):
if self.marker:
self.marker.setFillColor(color)
else:
self.options.fillColor(color)
def set_stroke_color(self, color):
if self.marker:
self.marker.setStrokeColor(color)
else:
self.options.strokeColor(color)
def set_stroke_width(self, width):
if self.marker:
self.marker.setStrokeWidth(width)
else:
self.options.strokeWidth(width)
class AndroidMapPolyline(AndroidMapItemBase, ProxyMapPolyline):
""" An Android implementation of an Enaml ProxyMapPolyline.
"""
#: Hold the points
points = Typed(LatLngList)
def create_widget(self):
""" Create the MarkerOptions for this map marker
this later gets converted into a "Marker" instance when addMarker
is called
"""
self.options = PolylineOptions()
#: List to hold our points
self.points = LatLngList()
def add_to_map(self, mapview):
mapview.addPolyline(self.options).then(self.on_marker)
def init_widget(self):
super(AndroidMapPolyline, self).init_widget()
d = self.declaration
self.set_points(d.points)
#if d.clickable:
# self.set_clickable(d.clickable)
if d.color:
self.set_color(d.color)
if d.end_cap != 'butt':
self.set_end_cap(d.end_cap)
if d.start_cap != 'butt':
self.set_start_cap(d.start_cap)
if d.geodesic:
self.set_geodesic(d.geodesic)
if d.joint_type:
self.set_joint_type(d.joint_type)
if d.width != 10:
self.set_width(d.width)
# -------------------------------------------------------------------------
# Polyline API
# -------------------------------------------------------------------------
def on_marker(self, mid):
""" Convert our options into the actual marker object"""
#mid, pos = marker
self.marker = Polyline(__id__=mid)
self.parent().markers[mid] = self
self.marker.setTag(mid)
d = self.declaration
if d.clickable:
self.set_clickable(d.clickable)
#: Can free the options now
del self.options
def on_click(self):
d = self.declaration
d.clicked()
# -------------------------------------------------------------------------
# ProxyMapPolyline API
# -------------------------------------------------------------------------
def set_points(self, points):
#: Have to hold on until after added to the ArrayList
#: or the GC cleans them up and the bridge destroys them
self.points.refresh_points(points)
if self.marker:
self.marker.setPoints(self.points)
else:
self.options.addAll(self.points)
def update_points(self, change):
""" Update the points in a smart way without passing them over the
bridge with every change.
"""
#: Delegate to the special LatLngList
self.points.handle_change(change)
#: Trigger update
self.marker.setPoints(self.points)
def set_clickable(self, clickable):
if self.marker:
self.marker.setClickable(clickable)
else:
self.options.clickable(clickable)
def set_color(self, color):
if self.marker:
self.marker.setColor(color)
else:
self.options.color(color)
def set_end_cap(self, cap):
if self.marker:
self.marker.setEndCap(Polyline.CAPS[cap]())
else:
self.options.endCap(Polyline.CAPS[cap]())
def set_geodesic(self, geodesic):
if self.marker:
self.marker.setGeodesic(geodesic)
else:
self.options.geodesic(geodesic)
def set_joint_type(self, joint_type):
if self.marker:
self.marker.setJointType(Polyline.JOINT_TYPES[joint_type])
else:
self.options.jointType(Polyline.JOINT_TYPES[joint_type])
def set_start_cap(self, cap):
if self.marker:
self.marker.setStartCap(Polyline.CAPS[cap]())
else:
self.options.startCap(Polyline.CAPS[cap]())
def set_width(self, width):
if self.marker:
self.marker.setWidth(width)
else:
self.options.width(width)
class AndroidMapPolygon(AndroidMapItemBase, ProxyMapPolygon):
""" An Android implementation of an Enaml ProxyMapPolygon.
"""
#: Hold the points
points = Typed(LatLngList)
#: Hold the holes
#holes = List(ArrayList)
def create_widget(self):
""" Create the MarkerOptions for this map marker
this later gets converted into a "Marker" instance when addMarker
is called
"""
self.options = PolygonOptions()
self.points = LatLngList()
def add_to_map(self, mapview):
mapview.addPolygon(self.options).then(self.on_marker)
def init_widget(self):
super(AndroidMapPolygon, self).init_widget()
d = self.declaration
self.set_points(d.points)
#if d.clickable:
# self.set_clickable(d.clickable)
if d.fill_color:
self.set_fill_color(d.fill_color)
if d.geodesic:
self.set_geodesic(d.geodesic)
if d.stroke_joint_type:
self.set_stroke_joint_type(d.joint_type)
if d.stroke_color:
self.set_stroke_color(d.stroke_color)
if d.stroke_width != 10:
self.set_stroke_width(d.stroke_width)
# -------------------------------------------------------------------------
# Marker API
# -------------------------------------------------------------------------
def on_marker(self, mid):
""" Convert our options into the actual marker object"""
#mid, pos = marker
self.marker = Polygon(__id__=mid)
self.parent().markers[mid] = self
self.marker.setTag(mid)
d = self.declaration
if d.clickable:
self.set_clickable(d.clickable)
#: Can free the options now
del self.options
def on_click(self):
d = self.declaration
d.clicked()
# -------------------------------------------------------------------------
# ProxyMapMarker API
# -------------------------------------------------------------------------
def set_points(self, points):
#: Have to hold on until after added to the ArrayList
#: or the GC cleans them up and the bridge destroys them
self.points.refresh_points(points)
if self.marker:
self.marker.setPoints(self.points)
else:
self.options.addAll(self.points)
def update_points(self, change):
#: Defer to points
self.points.handle_change(change)
#: Trigger update
self.marker.setPoints(self.points)
def set_clickable(self, clickable):
if self.marker:
self.marker.setClickable(clickable)
else:
self.options.clickable(clickable)
def set_holes(self, holes):
if self.marker:
self.marker.setHoles([bridge.encode(LatLng(*p)) for hole in holes
for p in hole])
else:
for hole in holes:
self.options.addHole([bridge.encode(LatLng(*p)) for p in hole])
def set_fill_color(self, color):
if self.marker:
self.marker.setFillColor(color)
else:
self.options.fillColor(color)
def set_geodesic(self, geodesic):
if self.marker:
self.marker.setGeodesic(geodesic)
else:
self.options.geodesic(geodesic)
def set_stroke_color(self, color):
if self.marker:
self.marker.setStrokeColor(color)
else:
self.options.strokeColor(color)
def set_stroke_joint_type(self, joint_type):
if self.marker:
self.marker.setStrokeJointType(Polyline.JOINT_TYPES[joint_type])
else:
self.options.strokeJointType(Polyline.JOINT_TYPES[joint_type])
def set_stroke_width(self, width):
if self.marker:
self.marker.setStrokeWidth(width)
else:
self.options.strokeWidth(width)
|
frmdstryr/enaml-native-maps | src/googlemaps/widgets/map_view.py | '''
Copyright (c) 2017, <NAME>.
Distributed under the terms of the MIT License.
The full license is in the file COPYING.txt, distributed with this software.
Created on Oct 10, 2017
@author: jrm
'''
from atom.api import (
Atom, Typed, ForwardTyped, Unicode, Enum, Bool, Float, Tuple, Event,
ContainerList, observe, set_default
)
from enaml.core.declarative import d_
from enaml.widgets.toolkit_object import ToolkitObject, ProxyToolkitObject
from enamlnative.widgets.frame_layout import FrameLayout, ProxyFrameLayout
class LatLng(Atom):
""" A model for the map coordinates """
latitude = Float()
longitude = Float()
class Camera(Atom):
""" A model for the map camera """
bearing = Float()
position = Tuple(float)
tilt = Float()
zoom = Float()
class ProxyMapView(ProxyFrameLayout):
""" The abstract definition of a proxy MapView object.
"""
#: A reference to the declaration.
declaration = ForwardTyped(lambda: MapView)
def set_map_bounds(self, bounds):
raise NotImplementedError
def set_map_type(self, map_type):
raise NotImplementedError
def set_show_toolbar(self, show):
raise NotImplementedError
def set_show_compass(self, show):
raise NotImplementedError
def set_show_zoom_controls(self, show):
raise NotImplementedError
def set_show_location(self, show):
raise NotImplementedError
def set_show_buildings(self, show):
raise NotImplementedError
def set_show_traffic(self, show):
raise NotImplementedError
def set_show_indoors(self, show):
raise NotImplementedError
def set_camera_zoom(self, zoom):
raise NotImplementedError
def set_camera_position(self, position):
raise NotImplementedError
def set_camera_bearing(self, bearing):
raise NotImplementedError
def set_camera_tilt(self, tilt):
raise NotImplementedError
def set_ambient_mode(self, enabled):
raise NotImplementedError
def set_lite_mode(self, enabled):
raise NotImplementedError
def set_min_zoom(self, zoom):
raise NotImplementedError
def set_max_zoom(self, zoom):
raise NotImplementedError
def set_rotate_gestures(self, enabled):
raise NotImplementedError
def set_scroll_gestures(self, enabled):
raise NotImplementedError
def set_tilt_gestures(self, enabled):
raise NotImplementedError
def set_zoom_gestures(self, enabled):
raise NotImplementedError
class ProxyMapMarker(ProxyToolkitObject):
#: A reference to the MapMarker declaration.
declaration = ForwardTyped(lambda: MapMarker)
def set_alpha(self, alpha):
raise NotImplementedError
def set_anchor(self, anchor):
raise NotImplementedError
def set_draggable(self, draggable):
raise NotImplementedError
def set_flat(self, flat):
raise NotImplementedError
def set_position(self, position):
raise NotImplementedError
def set_rotation(self, rotation):
raise NotImplementedError
def set_title(self, title):
raise NotImplementedError
def set_snippit(self, snippit):
raise NotImplementedError
def set_show_info(self, show):
raise NotImplementedError
def set_visibile(self, visible):
raise NotImplementedError
def set_zindex(self, zindex):
raise NotImplementedError
def set_custom_info_window_mode(self, mode):
raise NotImplementedError
class ProxyMapCircle(ProxyToolkitObject):
#: A reference to the MapCircle declaration.
declaration = ForwardTyped(lambda: MapCircle)
def set_position(self, position):
raise NotImplementedError
def set_radius(self, radius):
raise NotImplementedError
def set_clickable(self, clickable):
raise NotImplementedError
def set_fill_color(self, color):
raise NotImplementedError
def set_stroke_color(self, color):
raise NotImplementedError
def set_stroke_width(self, width):
raise NotImplementedError
def set_visible(self, visible):
raise NotImplementedError
def set_zindex(self, zindex):
raise NotImplementedError
class ProxyMapPolyline(ProxyToolkitObject):
#: A reference to the declaration.
declaration = ForwardTyped(lambda: MapPolyline)
def set_points(self, points):
raise NotImplementedError
def update_points(self, change):
raise NotImplementedError
def set_clickable(self, clickable):
raise NotImplementedError
def set_color(self, color):
raise NotImplementedError
def set_end_cap(self, cap):
raise NotImplementedError
def set_geodesic(self, geodesic):
raise NotImplementedError
def set_joint_type(self, joint_type):
raise NotImplementedError
def set_start_cap(self, cap):
raise NotImplementedError
def set_visible(self, visible):
raise NotImplementedError
def set_width(self, width):
raise NotImplementedError
def set_zindex(self, zindex):
raise NotImplementedError
class ProxyMapPolygon(ProxyToolkitObject):
#: A reference to the declaration.
declaration = ForwardTyped(lambda: MapPolygon)
def set_points(self, points):
raise NotImplementedError
def update_points(self, change):
raise NotImplementedError
def set_clickable(self, clickable):
raise NotImplementedError
def set_holes(self, holes):
raise NotImplementedError
def set_fill_color(self, color):
raise NotImplementedError
def set_geodesic(self, geodesic):
raise NotImplementedError
def set_stroke_color(self, color):
raise NotImplementedError
def set_stroke_joint_type(self, joint_type):
raise NotImplementedError
def set_stroke_width(self, width):
raise NotImplementedError
def set_visible(self, visible):
raise NotImplementedError
def set_zindex(self, zindex):
raise NotImplementedError
class MapView(FrameLayout):
""" A map view using google maps.
"""
#: Fill parent by default
height = set_default("match_parent")
width = set_default("match_parent")
#: Specifies whether ambient-mode styling should be enabled.
#: The default value is false. When enabled, ambient-styled maps can be displayed
#: when an Ambiactive device enters ambient mode.
ambient_mode = d_(Bool())
#: Specifies a the initial camera position for the map.
camera_position = d_(Tuple(float))
#: Map camera zoom level
camera_zoom = d_(Float())
#: Camera bearing
camera_bearing = d_(Float())
#: Camera tilt
camera_tilt = d_(Float())
#: Map display type
map_type = d_(Enum('normal', 'hybrid', 'satellite', 'terrain', 'none'))
#: Specifies a LatLngBounds to constrain the camera target,
#: so that when users scroll and pan the map, the camera target does
#: not move outside these bounds.
map_bounds = d_(Tuple(LatLng))
#: Specifies whether the compass should be enabled.
show_compass = d_(Bool(True))
#: Specifies whether the zoom controls should be enabled
show_zoom_controls = d_(Bool(True))
#: Specifies whether the mapToolbar should be enabled
show_toolbar = d_(Bool(True))
#: Show my location
show_location = d_(Bool())
#: Show traffic
show_traffic = d_(Bool(False))
#: Sets whether indoor maps should be enabled.
show_indoors = d_(Bool())
#: Turns the 3D buildings layer on or off.
show_buildings = d_(Bool())
#: Specifies whether the map should be created in lite mode
lite_mode = d_(Bool(False))
#: Specifies whether rotate gestures should be enabled.
rotate_gestures = d_(Bool(True))
#: Specifies whether zoom gestures should be enabled
scroll_gestures = d_(Bool(True))
#: Specifies whether tilt gestures should be enabled
tilt_gestures = d_(Bool(True))
#: Specifies whether zoom gestures should be enabled.
zoom_gestures = d_(Bool(True))
#: Specifies a preferred lower bound for camera zoom.
min_zoom = d_(Float())
#: Specifies a preferred upper bound for camera zoom.
max_zoom = d_(Float())
#: Called when the map is clicked.
#: the event change['value'] will have an indicator of the type of click and position
clicked = d_(Event(dict), writable=False)
#: Map is currently being dragged by the user
dragging = d_(Bool(), writable=False)
#: Map is currently being moved due to an animation
animating = d_(Bool(), writable=False)
#: A reference to the ProxyMapView object.
proxy = Typed(ProxyMapView)
@observe('ambient_mode', 'map_type', 'map_bounds',
'show_compass', 'show_toolbar', 'show_zoom_controls', 'show_location',
'show_traffic', 'show_indoors', 'show_buildings',
'camera_zoom', 'camera_tilt', 'camera_position', 'camera_bearing',
'lite_mode',
'min_zoom', 'max_zoom',
'rotate_gestures', 'scroll_gestures','tilt_gestures', 'zoom_gestures')
def _update_proxy(self, change):
""" An observer which sends the state change to the proxy.
"""
# The superclass implementation is sufficient.
super(MapView, self)._update_proxy(change)
class MapMarker(ToolkitObject):
""" A marker on the map. """
#: Sets the alpha (opacity) of the marker.
alpha = d_(Float(strict=False))
#: Specifies the anchor to be at a particular point in the marker image.
anchor = d_(Tuple(float))
#: Sets the draggability for the marker.
draggable = d_(Bool())
#: Sets whether this marker should be flat against the map true
#: or a billboard facing the camera false.
flat = d_(Bool(True))
#: Sets the location for the marker.
position = d_(Tuple(float))
#: Sets the rotation of the marker in degrees clockwise about the marker's anchor point.
rotation = d_(Float(0, strict=False))
#: Sets the title for the marker.
title = d_(Unicode())
#: Sets the snippit for the marker.
snippit = d_(Unicode())
#: Show info window
show_info = d_(Bool())
#: If marker has child widgets this will set how the contents should be rendered.
#: if set to 'content' it uses the built-in popup window with custom content
#: if set to 'custom' the child view is rendered as the info window itself
custom_info_window_mode = d_(Enum('content', 'custom'))
#: Sets the visibility for the marker.
visible = d_(Bool(True))
#: Sets the zIndex for the marker.
zindex = d_(Float(strict=False))
#: Marker clicked
#: event value will have a 'result' that can be set to True
#: to indicate the event was handled
clicked = d_(Event(dict), writable=False)
#: Info window clicked
#: the event value will have an indicator of the type of click ('long', 'short')
info_clicked = d_(Event(dict), writable=False)
#: Marker is currently being dragged
dragging = d_(Bool(), writable=False)
#: A reference to the ProxyMapMarker object.
proxy = Typed(ProxyMapMarker)
@observe('alpha', 'anchor', 'draggable', 'flat', 'position', 'rotation',
'title', 'snippit', 'visible', 'zindex', 'show_info', 'custom_info_window_mode')
def _update_proxy(self, change):
""" An observer which sends the state change to the proxy.
"""
# The superclass implementation is sufficient.
super(MapMarker, self)._update_proxy(change)
class MapCircle(ToolkitObject):
""" A circle on the map. """
#: Sets if it is clickable.
clickable = d_(Bool())
#: Circle clicked
#: event value will have a 'result' that can be set to True
#: to indicate the event was handled
clicked = d_(Event(dict), writable=False)
#: Sets the center for the circle.
position = d_(Tuple(float))
#: Sets the radius in meters.
radius = d_(Float(0, strict=False))
#: Sets the color of the polygon
fill_color = d_(Unicode())
#: Sets the color of the polygon
stroke_color = d_(Unicode())
#: Sets the width of the polyline in screen pixels.
stroke_width = d_(Float(10, strict=False))
#: Sets the visibility for the marker.
visible = d_(Bool(True))
#: Sets the zIndex for the marker.
zindex = d_(Float(strict=False))
#: A reference to the ProxyMapCircle object.
proxy = Typed(ProxyMapCircle)
@observe('clickable', 'position', 'radius', 'fill_color', 'stroke_color', 'stroke_width',
'visible', 'zindex')
def _update_proxy(self, change):
""" An observer which sends the state change to the proxy.
"""
# The superclass implementation is sufficient.
super(MapCircle, self)._update_proxy(change)
class MapPolyline(ToolkitObject):
""" A polyline on the map. """
#: Sets the alpha (opacity) of the marker.
points = d_(ContainerList(tuple))
#: Specifies whether this polyline is clickable.
clickable = d_(Bool())
#: Sets the color of the polyline
color = d_(Unicode())
#: Sets the cap at the end vertex of the polyline
end_cap = d_(Enum('butt', 'round', 'square'))
#: Specifies whether to draw each segment of this polyline as a geodesic
geodesic = d_(Bool())
#: Sets the joint type for all vertices of the polyline except the start and end vertices.
joint_type = d_(Enum('', 'bevel', 'round'))
#: Sets the cap at the start vertex of the polyline
start_cap = d_(Enum('butt', 'round', 'square'))
#: Sets the visibility for the marker.
visible = d_(Bool(True))
#: Sets the width of the polyline in screen pixels.
width = d_(Float(10, strict=False))
#: Sets the zIndex for the marker.
zindex = d_(Float(strict=False))
#: Line clicked
#: event value will have a 'result' that can be set to True
#: to indicate the event was handled
clicked = d_(Event(dict), writable=False)
#: A reference to the proxy object.
proxy = Typed(ProxyMapPolyline)
@observe('points', 'clickable', 'color', 'end_cap', 'geodesic',
'joint_type', 'start_cap', 'visible', 'width', 'zindex')
def _update_proxy(self, change):
""" An observer which sends the state change to the proxy.
"""
if change['type'] == 'container':
#: Only update what's needed
self.proxy.update_points(change)
else:
super(MapPolyline, self)._update_proxy(change)
class MapPolygon(ToolkitObject):
""" A polygon on the map. """
#: Sets the alpha (opacity) of the marker.
points = d_(ContainerList(tuple))
#: Specifies whether this polygon is clickable.
clickable = d_(Bool())
#: Adds a holes to the polygon being built.
#: May be a list of coordinates or multiple coordinate lists
holes = d_(ContainerList(tuple))
#: Sets the fill color of the polygon
fill_color = d_(Unicode())
#: Specifies whether to draw each segment of this polyline as a geodesic
geodesic = d_(Bool())
#: Sets the color of the polygon
stroke_color = d_(Unicode())
#: Sets the joint type for all vertices of the polyline except the start and end vertices.
stroke_joint_type = d_(Enum('', 'bevel', 'round'))
#: Sets the width of the polyline in screen pixels.
stroke_width = d_(Float(10, strict=False))
#: Sets the visibility for the polygon.
visible = d_(Bool(True))
#: Sets the zIndex for the polygon.
zindex = d_(Float(strict=False))
#: Line clicked
#: event value will have a 'result' that can be set to True
#: to indicate the event was handled
clicked = d_(Event(dict), writable=False)
#: A reference to the proxy object.
proxy = Typed(ProxyMapPolygon)
@observe('points', 'clickable', 'holes', 'fill_color', 'geodesic',
'stroke_joint_type', 'stroke_width', 'visible', 'zindex')
def _update_proxy(self, change):
""" An observer which sends the state change to the proxy.
"""
if change['type'] == 'container':
#: Only update what's needed
self.proxy.update_points(change)
else:
super(MapPolygon, self)._update_proxy(change)
|
frmdstryr/enaml-native-maps | src/googlemaps/widgets/api.py | """
Copyright (c) 2017, <NAME>.
Distributed under the terms of the MIT License.
The full license is in the file COPYING.txt, distributed with this software.
Created on Oct 10, 2017
@author
"""
from .map_view import MapView, MapMarker, MapPolygon, MapPolyline, MapCircle
def install():
from enamlnative.widgets import api
setattr(api, 'MapView', MapView)
setattr(api, 'MapMarker', MapMarker)
setattr(api, 'MapPolygon', MapPolygon)
setattr(api, 'MapPolyline', MapPolyline)
setattr(api, 'MapCircle', MapCircle) |
frmdstryr/enaml-native-maps | src/googlemaps/android/factories.py | <gh_stars>1-10
'''
Copyright (c) 2017, <NAME>.
Distributed under the terms of the MIT License.
The full license is in the file COPYING.txt, distributed with this software.
Created on Oct 29, 2017
@author: jrm
'''
def map_marker_factory():
from .android_map_view import AndroidMapMarker
return AndroidMapMarker
def map_circle_factory():
from .android_map_view import AndroidMapCircle
return AndroidMapCircle
def map_polyline_factory():
from .android_map_view import AndroidMapPolyline
return AndroidMapPolyline
def map_polygon_factory():
from .android_map_view import AndroidMapPolygon
return AndroidMapPolygon
def map_view_factory():
from .android_map_view import AndroidMapView
return AndroidMapView
def install():
from enamlnative.android import factories
factories.ANDROID_FACTORIES.update({
'MapMarker': map_marker_factory,
'MapCircle': map_circle_factory,
'MapPolyline': map_polyline_factory,
'MapPolygon': map_polygon_factory,
'MapView': map_view_factory,
}) |
SansPapyrus683/stresst | stress/strs.py | <reponame>SansPapyrus683/stresst<gh_stars>0
import argparse
import os
import subprocess
def main():
prser = argparse.ArgumentParser(prog="stresst", description="stress test a program with multiple inputs")
prser.add_argument("file", metavar="prgm.py", type=str, nargs=1, help="program to test")
prser.add_argument("test", metavar="testdir", type=str, nargs=1, help="directory of test cases")
args = prser.parse_args()
wa = False
if (os.path.exists(os.path.join(os.path.abspath(os.getcwd()), str(args.file[0])))
and os.path.exists(os.path.join(os.path.abspath(os.getcwd()), str(args.test[0])))):
for sb, _, f in os.walk(os.path.join(os.path.abspath(os.getcwd()), str(args.test[0]))):
for i in f:
pth = sb + os.sep + i
if pth.endswith(".in"):
with open(pth, "r", encoding="utf-8") as fin:
p = subprocess.Popen(["python", str(args.file[0])], stdin=fin, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = p.communicate()
if not err:
out = out.decode("utf-8").strip()
arr = [o for o in os.listdir(str(args.test[0]))]
fn = i[:i.index(".")] + ".out"
if fn not in arr:
print("no output file for", i)
break
of = open(sb + os.sep + fn, "r", encoding="utf-8")
if out != of.read().strip():
print("Wrong Answer on", i)
wa = True
break
else:
print("Runtime Error on", i)
wa = True
break
else:
print("Files not found")
if not wa:
print("All cases passed.")
|
SansPapyrus683/stresst | setup.py | from setuptools import setup, find_packages
long_description = open("README.md").read()
setup(
name='stresst',
version='0.0.3',
author='<NAME>',
author_email='',
url='https://ryanchou.dev',
description='Stress test your Python solutions!',
long_description=long_description,
long_description_content_type="text/markdown",
license='MIT',
packages=find_packages(),
entry_points={
'console_scripts': [
'stresst = stress.strs:main'
]
},
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
keywords='competitive-programming python problems testing',
install_requires="",
zip_safe=False
)
|
bgipson/ComplXML | ComplXML.py | #!/usr/bin/env python3
from tkinter import *
import tkinter.messagebox
import tkinter.filedialog
import traceback
#ComplXML - The LC-3 Test XML Maker
#Created by <NAME>
import os
class mainGUI:
def __init__(self,rootWin):
self.rootWin = rootWin
rootWin.title("ComplXML")
self.testCaseList = []
self.testCasePointer = None
self.expanded = False
self.counter = IntVar()
testNameLabel = Label(rootWin,text = "Enter the name of your Test Case: ")
self.testNameString = StringVar()
testNameEntry = Entry(rootWin, textvariable=self.testNameString)
makeTestCaseButton = Button(rootWin, text = "Create Test Case", command = self.newTestCase)
#Frame sidebar
self.sideFrame = Frame(rootWin)
sideLabel = Label(self.sideFrame, text= "YOUR TEST CASES")
sideLabel.pack()
#Frame for newly created testcases
self.testFrame = Frame(rootWin, height = 200)
#testFrame Headers
addressLabel = Label(self.testFrame, text = "Address")
valLabel = Label(self.testFrame, text = "Value")
addressLabel.grid(row = 0, column = 1)
valLabel.grid(row = 0, column = 2)
#Input Buttons
addInputButton = Button(self.testFrame, width = 15, text = "New Input", command = self.addIn)
self.inString = StringVar()
addInputAddress = Entry(self.testFrame, textvariable = self.inString)
self.inString2 = StringVar()
addInputVal = Entry(self.testFrame, textvariable = self.inString2)
addInputButton.grid(row = 1, column = 0)
addInputAddress.grid(row = 1, column = 1)
addInputVal.grid(row = 1, column = 2)
#Output Buttons
addOutputButton = Button(self.testFrame, width = 15, text = "Expected Outputs", command = self.addOut)
self.outString = StringVar()
addOutAddress = Entry(self.testFrame, textvariable = self.outString)
self.outString2 = StringVar()
addOutVal = Entry(self.testFrame, textvariable = self.outString2)
addOutputButton.grid(row = 2, column = 0)
addOutAddress.grid(row = 2, column = 1)
addOutVal.grid(row = 2, column = 2)
#Grids important stuff
testNameLabel.grid(row = 0, column = 0, sticky = W)
testNameEntry.grid(row = 0, column = 1, sticky = W)
makeTestCaseButton.grid(row=0, column = 2, columnspan = 2)
self.curTestString = StringVar()
curStringLabel = Label(self.testFrame, justify = LEFT, textvariable = self.curTestString)
curStringLabel.grid(row = 3, column =0)
menubar = Menu(rootWin)
rootWin.config(menu=menubar)
menubar.add_command(label = "Click To Save XML File", command = self.makeXML)
menubar.add_command(label = "Help and Documentation", command = self.helpWindow)
menubar.add_command(label = "Run Test Case", command = self.runTest)
menubar.add_command(label = "Reset", command = self.reset)
def addIn(self):
if self.inString.get() == "" or self.inString2.get() == "":
tkinter.messagebox.showerror(title = "Missing", message = "Please enter an ADDRESS and VALUE")
return
address = self.inString.get()
val = self.inString2.get()
self.testCasePointer.newIn(address,val)
self.curTestString.set(self.testCasePointer)
self.inString.set("")
self.inString2.set("")
def addOut(self):
if self.outString.get() == "" or self.outString2.get() == "":
tkinter.messagebox.showerror(title = "Missing", message = "Please enter an ADDRESS and VALUE")
return
address = self.outString.get()
val = self.outString2.get()
self.testCasePointer.newOut(address,val)
self.curTestString.set(self.testCasePointer)
self.outString.set("")
self.outString2.set("")
def makeXML(self):
self.file_opt = options = {}
options['filetypes'] = [("Complx XML Test (.xml)",".xml")]
if len(self.testCaseList) > 0:
theFile = tkinter.filedialog.asksaveasfilename(**self.file_opt)
else:
tkinter.messagebox.showerror(title="Error", message="Please make at least one test case")
return
success = False
if theFile:
success = makeXML(theFile,self.testCaseList)
if success == True:
tkinter.messagebox.showinfo(title="File Saved!", message="Your XML File has successfully been saved")
else:
return
def helpWindow(self):
try:
helpWin = Tk()
helpWin.title("LC-3 XML Maker HELP")
helpWin.config(bg = "gray")
file = open("README.txt")
text = Text(helpWin, wrap = WORD, height = 40)
text.insert(END,file.read())
text.config(state=DISABLED)
file.close()
text.pack()
helpWin.mainloop()
except:
helpWin.destroy()
tkinter.messagebox.showerror(title="Error", message="Couldn't not find README.txt")
def newTestCase(self):
if self.testNameString.get() == "":
tkinter.messagebox.showerror(title = "Missing Test Case Title", message = "Please enter a name for your test case!")
return
testName = self.testNameString.get()
newTestCase = testCase(testName)
self.testCaseList.append(newTestCase)
self.testCasePointer = newTestCase
self.curTestString.set(newTestCase)
button = Radiobutton(self.sideFrame, text=testName, variable = self.counter, command = self.newPointer, value = (len(self.testCaseList) - 1))
if len(self.testCaseList) > 0 and self.expanded == False:
self.counter.set(0)
self.expanded = True
self.testFrame.grid(row = 1, column = 0)
self.sideFrame.grid(row=1, column = 1, sticky = E)
#Sets up radiobuttons
button.pack()
self.testNameString.set("")
def newPointer(self):
newIndx = self.counter.get()
self.testCasePointer = self.testCaseList[newIndx]
print(self.testCasePointer.name)
self.curTestString.set(self.testCasePointer)
def runTest(self):
try:
self.file_opt = options = {}
options['filetypes'] = [("Complx XML Test (.xml)",".xml")]
file = tkinter.filedialog.askopenfilename(**self.file_opt)
if file:
pass
else:
return
filePathList = file.split(os.sep)
actFile = filePathList[-1]
runFile = actFile[:len(actFile)-3] + "asm"
del filePathList[-1]
directory = ""
for item in filePathList:
directory += item + os.sep
out = os.popen("cd {}; lc3test {} {}".format(directory, actFile, runFile)).read()
resultsWindow = Tk()
resultsWindow.title("Results")
scroll = Scrollbar(resultsWindow)
resultsText = Text(resultsWindow, yscrollcommand = scroll.set)
resultsText.insert(INSERT, out)
resultsText.yview(END)
scroll.config(command = resultsText.yview)
resultsText.config(state = DISABLED)
resultsText.pack(side=LEFT)
scroll.pack(side=RIGHT)
resultsWindow.mainloop()
except:
traceback.print_exc()
tkinter.messagebox.showinfo(title="Error", message="No XML File chosen")
def reset(self):
newWin = Tk()
newGui = mainGUI(newWin)
newWin.mainloop()
class testInput:
def __init__(self, rootWin):
inpLabel = Label(rootWin, text = "Please Enter Input: ")
self.inpString = StringVar()
inpEntry = Entry(rootWin, textvariable = self.inpString)
class testOutput:
def __init__(self, rootWin):
outLabel = Label(rootWin, text = "Please Enter Output: ")
self.outString = StringVar()
outEntry = Entry(rootWin, textvariable = self.inpString)
class testCase:
def __init__(self,name):
self.name = name
self.arrays = []
self.inputs = []
self.outputs = []
def newArray(self, address, val):
self.arrays.append((address, val))
def newIn(self,address,val):
self.inputs.append((address,val))
def getName(self):
return self.name
def newOut(self,address,val):
self.outputs.append((address,val))
def __str__(self):
stri = ("Inputs: ")
for pair in self.inputs:
stri = stri + ("{} = {}, ".format(pair[0].upper(),pair[1]))
stri = stri + "\n"
stri = stri + ("Outputs: ")
for pair in self.outputs:
stri = stri + ("{} = {},".format(pair[0].upper(), pair[1]))
return stri
def makeXML(filename,testcases):
file = open(filename,"w")
file.write("<?xml version=\"1.0\"?>")
file.write("<test-suite>")
for test in testcases:
file.write("<test-case>")
file.write("<name>{}</name>".format(test.name))
file.write("<has-max-executions>1</has-max-executions>")
file.write("<max-executions>1000000</max-executions>")
file.write("<randomize>1</randomize>")
if len(test.inputs) > 0:
file.write("<input>")
for inp in test.inputs:
if "," in inp:
file.write("<test-array><address>{}</address><value>{}</value></test-array>".format(inp[0],inp[1]))
else:
file.write("<test-value><address>{}</address><value>{}</value></test-value>".format(inp[0],inp[1]))
file.write("</input>")
if len(test.outputs) > 0:
file.write("<output>")
for inp in test.outputs:
file.write("<test-value><address>{}</address><value>{}</value></test-value>".format(inp[0],inp[1]))
file.write("</output>")
file.write("</test-case>")
file.write("</test-suite>")
file.close
return True
rootWin = Tk()
gui = mainGUI(rootWin)
rootWin.mainloop()
|
CityU-AIM-Group/SCAN | fcos_core/modeling/discriminator/con_fgbg.py | import torch
import torch.nn.functional as F
from torch import nn
from .layer import GradientReversal
class FCOSDiscriminator_con(nn.Module):
def __init__(self, with_GA=False, fusion_cfg ='concat', num_convs=4, in_channels=256, num_classes=2, grad_reverse_lambda=-1.0, grl_applied_domain='both', cfg = None,patch_stride=None):
"""
Arguments:
in_channels (int): number of channels of the input feature
"""
super(FCOSDiscriminator_con, self).__init__()
dis_tower = []
for i in range(num_convs):
dis_tower.append(
nn.Conv2d(
in_channels,
in_channels,
kernel_size=3,
stride=1,
padding=1
)
)
dis_tower.append(nn.GroupNorm(32, in_channels))
dis_tower.append(nn.ReLU())
self.add_module('dis_tower', nn.Sequential(*dis_tower))
# self.num_classes = num_classes - 1
self.num_classes = num_classes
self.with_GA = with_GA
self.fusion_cfg = fusion_cfg
self.bg_clf = nn.Sequential(
nn.Conv2d(
in_channels+1 if self.fusion_cfg == 'concat' else in_channels,
in_channels,
kernel_size=3,
stride=1,
padding=1
),
nn.GroupNorm(32, in_channels),
nn.ReLU(),
nn.Conv2d(
in_channels, 1, kernel_size=3, stride=1,
padding=1)
)
for i, block in enumerate(self.bg_clf):
self.add_module('bg_{}'.format(i),block)
self.fg_clf = nn.Sequential(
nn.Conv2d(
in_channels+self.num_classes-1 if self.fusion_cfg == 'concat' else in_channels,
in_channels,
kernel_size=3,
stride=1,
padding=1
),
nn.GroupNorm(32, in_channels),
nn.ReLU(),
nn.Conv2d(
in_channels, 1, kernel_size=3, stride=1,
padding=1)
)
for i, block in enumerate(self.fg_clf):
self.add_module('fg_{}'.format(i),block)
if self.with_GA:
self.cls_logits = nn.Conv2d(
in_channels, 1, kernel_size=3, stride=1,
padding=1
)
self.patch_stride = patch_stride
assert patch_stride==None or type(patch_stride)==int, 'wrong format of patch stride'
for modules in [self.dis_tower]:
for l in modules.modules():
if isinstance(l, nn.Conv2d):
torch.nn.init.normal_(l.weight, std=0.01)
torch.nn.init.constant_(l.bias, 0)
for modules in self.fg_clf:
for l in modules.modules():
if isinstance(l, nn.Conv2d):
torch.nn.init.normal_(l.weight, std=0.01)
torch.nn.init.constant_(l.bias, 0)
for modules in self.bg_clf:
for l in modules.modules():
if isinstance(l, nn.Conv2d):
torch.nn.init.normal_(l.weight, std=0.01)
torch.nn.init.constant_(l.bias, 0)
self.grad_reverse = GradientReversal(grad_reverse_lambda)
self.grad_reverse_act = GradientReversal(-0.01)
self.loss_fn = nn.BCEWithLogitsLoss()
self.alpha = 0.01
assert grl_applied_domain == 'both' or grl_applied_domain == 'target'
self.grl_applied_domain = grl_applied_domain
print("WARNINNG!!!!!!!!!!!! BG", self.alpha)
def forward(self, feature, target, act_maps=None, domain='source'):
assert target == 0 or target == 1 or target == 0.1 or target == 0.9
assert domain == 'source' or domain == 'target'
feature = self.grad_reverse(feature)
act_maps = act_maps.detach()
x = self.dis_tower(feature)
bg_act_map = act_maps[:,0,:,:].unsqueeze(1)
if self.num_classes == 2:
fg_act_maps = act_maps[:,1,:,:].unsqueeze(1)
else:
fg_act_maps = act_maps[:, 1:, :, :]
x_bg_cls = self.bg_clf(torch.cat((x, bg_act_map), dim=1))
x_fg_cls = self.fg_clf(torch.cat((x, fg_act_maps), dim=1))
loss_bg = self.loss_fn(x_bg_cls, torch.full(x_bg_cls.shape, target, dtype=torch.float, device=x.device))
loss_fg = self.loss_fn(x_fg_cls, torch.full(x_fg_cls.shape, target, dtype=torch.float, device=x.device))
# 0.5
# 0.01
# loss = self.alpha * loss_bg + (1 - self.alpha) * loss_fg
loss = loss_bg + loss_fg
# for c in range(self.num_classes):
# # x = torch.mul(feature,act_maps[:,c+1,:,:].unsqueeze(1))
#
# if self.fusion_cfg == 'concat':
# x_cls = torch.cat((x, act_maps[:, c, :, :].unsqueeze(1)),dim=1)
# elif self.fusion_cfg == 'mul':
# x_cls = torch.mul(x, act_maps[:, c, :, :].unsqueeze(1).contiguous())
# elif self.fusion_cfg == 'mul_detached':
# x_cls = torch.mul(x, act_maps[:, c, :, :].unsqueeze(1).contiguous().detach())
# else:
# raise KeyError("Unknown fusion config!")
# logits = self.class_cond_map[c](x_cls)
# targets = torch.full(logits.shape, target, dtype=torch.float, device=x.device)
# loss_cls = self.loss_fn(logits, targets)
# loss += loss_cls / self.num_classes
if self.with_GA:
logits_GA = self.cls_logits(x)
targets = torch.full(logits_GA.shape, target, dtype=torch.float, device=x.device)
loss = 0.5* loss + self.loss_fn(logits_GA, targets)
return loss
|
CityU-AIM-Group/SCAN | fcos_core/modeling/rpn/fcos/condgraph.py | <reponame>CityU-AIM-Group/SCAN
# --------------------------------------------------------
# SCAN: Cross-domain Object Detection with Semantic Conditioned Adaptation (AAAI22 ORAL)
# Written by <NAME>
# This file covers the core operation on the feature maps for domain adaptation
# --------------------------------------------------------
import math
import torch
import torch.nn.functional as F
from torch import nn
from .inference import make_fcos_postprocessor
from .loss import make_fcos_loss_evaluator, make_prototype_evaluator, PrototypeComputation
from fcos_core.layers import SigmoidFocalLoss, FocalLoss, CosineLoss, BCEFocalLoss, KLLoss, MultiHeadAttention
from fcos_core.layers import Scale
import matplotlib.pyplot as plt
import ipdb
import os
import numpy as np
eps = 1e-8
INF = 1e10
# POST PROCESSING
def see(data, name='default'):
print('#################################', name, '#################################')
print('max: ', torch.max(data))
print('mean: ', torch.mean(data))
print('min: ', torch.min(data))
print('##########################################################################')
def sim_matrix(a, b, eps=eps):
"""
added eps for numerical stability
"""
a_n, b_n = a.norm(dim=1)[:, None], b.norm(dim=1)[:, None]
a_norm = a / torch.clamp(a_n, min=eps)
b_norm = b / torch.clamp(b_n, min=eps)
sim_mt = torch.mm(a_norm, b_norm.transpose(0, 1))
return sim_mt
class PROTOTYPECounter():
def __init__(self, cycle=3, stop=False):
self.cycle = cycle
self.counter = -1
self.stop = stop
def __call__(self, *args, **kwargs):
# 0, 1, 2, 3, 4, ..., n, n, n, n
if self.stop:
if self.counter == self.cycle:
return self.cycle
else:
self.counter += 1
return self.counter
# 0, 1, 2, 3, 0, 1, 2, 3, ...
else:
self.counter += 1
if self.counter == self.cycle:
self.counter = 0
return self.counter
class GRAPHHead(torch.nn.Module):
def __init__(self, cfg, in_channels, out_channel, mode='in'):
"""
Projection layers:
tranfer the visual features [0, +INF) to the node embedding (-INF, +INF)
"""
super(GRAPHHead, self).__init__()
# TODO: Implement the sigmoid version first.
if mode == 'in':
num_convs = cfg.MODEL.MIDDLE_HEAD.NUM_CONVS_IN
elif mode == 'out':
num_convs = cfg.MODEL.MIDDLE_HEAD.NUM_CONVS_OUT
else:
num_convs = cfg.MODEL.FCOS.NUM_CONVS
print('undefined num_conv in middle head')
middle_tower = []
for i in range(num_convs):
middle_tower.append(
nn.Conv2d(
in_channels,
out_channel,
kernel_size=3,
stride=1,
padding=1
)
)
if mode == 'in':
if cfg.MODEL.MIDDLE_HEAD.IN_NORM == 'GN':
middle_tower.append(nn.GroupNorm(32, in_channels))
elif cfg.MODEL.MIDDLE_HEAD.IN_NORM == 'IN':
middle_tower.append(nn.InstanceNorm2d(in_channels))
elif cfg.MODEL.MIDDLE_HEAD.IN_NORM == 'BN':
middle_tower.append(nn.BatchNorm2d(in_channels))
middle_tower.append(nn.ReLU())
self.add_module('middle_tower', nn.Sequential(*middle_tower))
# initialization
for modules in [self.middle_tower]:
for l in modules.modules():
if isinstance(l, nn.Conv2d):
torch.nn.init.normal_(l.weight, std=0.01)
torch.nn.init.constant_(l.bias, 0)
def forward(self, x):
middle_tower = []
for l, feature in enumerate(x):
middle_tower.append(self.middle_tower(feature))
return middle_tower
class GRAPHModule(torch.nn.Module):
"""
The core module for SCAN
"""
def __init__(self, cfg, in_channels):
super(GRAPHModule, self).__init__()
# Debugger: for those developers
self.debug_cfg = cfg.MODEL.DEBUG_CFG
if self.debug_cfg:
from fcos_core.vis_tools import VIS_TOOLS
self.debugger = VIS_TOOLS()
# from fcos_core.vis_tools import VIS_TOOLS
# self.debugger = VIS_TOOLS()
self.cfg = cfg.clone()
# Basic settings
self.with_bg_proto = cfg.MODEL.MIDDLE_HEAD.PROTO_WITH_BG
self.with_bias_dc = cfg.MODEL.MIDDLE_HEAD.COND_WITH_BIAS
self.with_concated_maps = cfg.MODEL.MIDDLE_HEAD.CAT_ACT_MAP
self.with_shortcut_GCNs = cfg.MODEL.MIDDLE_HEAD.GCN_SHORTCUT
self.with_global_gcn = cfg.MODEL.MIDDLE_HEAD.GLOBAL_GCN
self.with_proto_uv = cfg.MODEL.MIDDLE_HEAD.PROTO_MEAN_VAR
self.with_self_training = cfg.MODEL.MIDDLE_HEAD.GCN_SELF_TRAINING
self.fpn_strides = cfg.MODEL.FCOS.FPN_STRIDES
self.num_classes_fg = cfg.MODEL.FCOS.NUM_CLASSES - 1
self.used_num_classes = self.num_classes_fg + int(self.with_bg_proto)
# Many choices
self.transfer_cfg = cfg.MODEL.MIDDLE_HEAD.TRANSFER_CFG # 'NODES', 'EDGE', 'ADJ'
self.act_loss_cfg = cfg.MODEL.MIDDLE_HEAD.ACT_LOSS
self.GCN_norm_cfg = cfg.MODEL.MIDDLE_HEAD.GCN_EDGE_NORM
self.GCN_out_act_cfg = cfg.MODEL.MIDDLE_HEAD.GCN_OUT_ACTIVATION
self.tg_transfer_cfg = cfg.MODEL.MIDDLE_HEAD.CON_TG_CFG
# Hyperparameter
self.lamda1 = cfg.MODEL.MIDDLE_HEAD.GCN_LOSS_WEIGHT
self.lamda2 = cfg.MODEL.MIDDLE_HEAD.ACT_LOSS_WEIGHT
self.lamda3 = cfg.MODEL.MIDDLE_HEAD.CON_LOSS_WEIGHT
self.lamda4 = cfg.MODEL.MIDDLE_HEAD.GCN_LOSS_WEIGHT_TG
# self.num_classes_fg_bg = cfg.MODEL.FCOS.NUM_CLASSES
# Important settings
self.use_rnn = cfg.MODEL.MIDDLE_HEAD.USE_RNN
self.prototype_iter = cfg.MODEL.MIDDLE_HEAD.PROTO_ITER # 1, 3, 9
self.momentum = cfg.MODEL.MIDDLE_HEAD.PROTO_MOMENTUM
self.relu = torch.nn.ReLU().to('cuda')
prototype_channel = cfg.MODEL.MIDDLE_HEAD.PROTO_CHANNEL
cond_hidden_channel = cfg.MODEL.MIDDLE_HEAD.COND_HIDDEN_CHANNEL
proto_cls_hidden_dim = 512
# Pre-processing
self.head_in = GRAPHHead(cfg, in_channels, in_channels, mode='in')
# Prototype settings
if self.prototype_iter == 1:
self.register_buffer('prototype', torch.randn(self.used_num_classes, prototype_channel))
else:
self.register_buffer('prototype',
torch.randn(self.used_num_classes, prototype_channel, self.prototype_iter))
self.prototype_buffer_batch = torch.zeros(self.used_num_classes, prototype_channel)
if self.with_concated_maps:
head_out = GRAPHHead(cfg, in_channels + self.used_num_classes, in_channels, mode='out')
self.head_out = head_out
if self.act_loss_cfg == "softmaxFL":
self.act_loss_func = FocalLoss(
self.used_num_classes
)
elif self.act_loss_cfg == "sigmoidFL":
self.act_loss_func = BCEFocalLoss()
self.prototype_evaluator = make_prototype_evaluator(cfg)
self.proto_cls_hidden = torch.nn.Linear(cfg.MODEL.MIDDLE_HEAD.GCN2_OUT_CHANNEL, proto_cls_hidden_dim).to(
'cuda')
self.proto_cls = torch.nn.Linear(proto_cls_hidden_dim, self.num_classes_fg + int(self.with_bg_proto)).to('cuda')
self.node_loss_fn = nn.CrossEntropyLoss()
# GCNs settings
if self.with_global_gcn:
self.multihead_attn = MultiHeadAttention(256, 4, dropout=0.1)
else:
self.gcn_layer1 = torch.nn.Linear(256, cfg.MODEL.MIDDLE_HEAD.GCN1_OUT_CHANNEL).to('cuda')
self.gcn_layer2 = torch.nn.Linear(cfg.MODEL.MIDDLE_HEAD.GCN1_OUT_CHANNEL,
cfg.MODEL.MIDDLE_HEAD.GCN2_OUT_CHANNEL).to('cuda')
# self.edge_project_u = torch.nn.Linear(256, cfg.MODEL.MIDDLE_HEAD.GCN_EDGE_PROJECT).to('cuda')
# self.edge_project_v = torch.nn.Linear(256, cfg.MODEL.MIDDLE_HEAD.GCN_EDGE_PROJECT).to('cuda')
for i in [
self.gcn_layer1, self.gcn_layer2,
# self.edge_project_u, self.edge_project_v,
]:
nn.init.normal_(i.weight, std=0.01)
nn.init.constant_(i.bias, 0)
# Dynamic Brunch
if self.use_rnn:
print(self.use_rnn)
self.cond_nx1 = torch.nn.Conv2d(512, 256, kernel_size=(self.prototype_iter, 1))
self.cond_rnn = nn.RNN(256, 512, 2, nonlinearity='tanh')
self.counter_rnn = PROTOTYPECounter(self.prototype_iter, stop=True)
elif self.prototype_iter > 1:
self.counter = PROTOTYPECounter(self.prototype_iter)
self.cond_nx1 = torch.nn.Conv2d(prototype_channel, cond_hidden_channel,
kernel_size=(self.prototype_iter, 1))
nn.init.normal_(self.cond_nx1.weight)
nn.init.constant_(self.cond_nx1.bias, 0)
self.cond_nx1_norm = torch.nn.GroupNorm(32, cond_hidden_channel)
elif self.prototype_iter == 1:
self.cond_1 = torch.nn.Linear(prototype_channel, cond_hidden_channel).to('cuda')
nn.init.normal_(self.cond_1.weight, std=0.01)
nn.init.constant_(self.cond_1.bias, 0)
self.cond_2 = torch.nn.Linear(cond_hidden_channel, 256 + int(self.with_bias_dc)).to('cuda')
# Semantic transfer settings
if ('ADJ' in self.transfer_cfg) or ('ADJ_COMPLETE' in self.transfer_cfg):
self.transfer_loss_inter_class = nn.CosineEmbeddingLoss(margin=0.0)
if ('NODES' in self.transfer_cfg) or 'PROTOTYPE' in self.transfer_cfg:
self.transfer_loss_prototype = nn.KLDivLoss()
# self.transfer_loss = SinkhornDistance(eps=1, max_iter=100, reduction='mean')
# Initialization
for i in [self.cond_2,
# self.gcn_layer1, self.gcn_layer2,
# self.edge_project_u, self.edge_project_v,
self.proto_cls, self.proto_cls_hidden]:
nn.init.normal_(i.weight, std=0.01)
nn.init.constant_(i.bias, 0)
def GCNs_global(self, x, Adj):
# transformer
x = self.relu(self.gcn_layer2(torch.mm(Adj, self.gcn_layer1(x))))
if self.with_shortcut_GCNs:
x += x
return x
def GCNs(self, nodes, Adj):
x = nodes
# layer 1
x = self.relu(self.gcn_layer1(torch.mm(Adj, x)))
# layer 2
if self.GCN_out_act_cfg == 'softmax':
x = (self.gcn_layer2(torch.mm(Adj, x))).softmax(dim=-1)
elif self.GCN_out_act_cfg == 'sigmoid':
x = (self.gcn_layer2(torch.mm(Adj, x))).sigmoid()
elif self.GCN_out_act_cfg == 'tanh':
x = (self.gcn_layer2(torch.mm(Adj, x))).tanh()
elif self.GCN_out_act_cfg == 'relu':
x = (self.relu(self.gcn_layer2(torch.mm(Adj, x))))
elif self.GCN_out_act_cfg == 'NO':
x = self.gcn_layer2(torch.mm(Adj, x))
else:
raise KeyError('unknown gcn output activation')
if self.with_shortcut_GCNs:
x = x + nodes
return x
def get_edge(self, nodes_feat):
if self.GCN_norm_cfg == 'NO':
Adj = torch.mm(nodes_feat, nodes_feat.t()).softmax(-1).detach()
return Adj
elif self.GCN_norm_cfg == 'softmax':
Adj = torch.mm(self.edge_project_u(nodes_feat), self.edge_project_v(nodes_feat).t())
return Adj.softmax(-1)
elif self.GCN_norm_cfg == 'cosine_detached':
Adj = sim_matrix(nodes_feat, nodes_feat).softmax(-1).detach()
return Adj
elif self.GCN_norm_cfg == 'cosine':
# nodes_feat_pj = self.edge_project_v(self.relu(self.edge_project_u(nodes_feat)))
nodes_feat_pj = self.relu(self.edge_project_v(nodes_feat))
sim = sim_matrix(nodes_feat_pj, nodes_feat_pj)
# Adj = sim.softmax(dim=-1)
norm = torch.sum(sim, dim=-1)
assert norm.min() > 0, '0 appears in norm'
Adj = sim / torch.clamp(norm, min=eps)
return Adj
def update_prototype_ensemble(self, prototype_buffer_batch):
if self.use_rnn:
self.update_prototype_nx1_rnn(prototype_buffer_batch)
elif self.prototype_iter > 1:
self.update_prototype_nx1(prototype_buffer_batch)
else:
self.update_prototype(prototype_buffer_batch)
def get_conded_weight(self):
if self.use_rnn:
# num_classes_fg_bg, channel, iter [9, 256, 3]
conded_weight = self.cond_nx1(
self.cond_rnn(
self.prototype.permute(2, 0, 1))[0].permute(1, 2, 0).unsqueeze(-1)
).squeeze()
elif self.prototype_iter > 1:
conded_weight = self.cond_2(
self.relu(self.cond_nx1_norm(
self.cond_nx1(
self.prototype.unsqueeze(-1)
)
).squeeze())
)
# original setting: num_classes_fg_bg, channel, [9, 256]
else:
conded_weight = self.cond_2(
self.relu(
self.cond_1(self.prototype)
)
)
return conded_weight
def get_act_loss(self, features, conded_weight, act_maps_labels):
act_maps_labels_flatten = []
act_maps_preds_flatten = []
return_act_maps = []
for l, feature in enumerate(features):
act_maps_logits = self.dynamic_conv(feature, conded_weight)
act_maps = act_maps_logits.softmax(
dim=1) if self.act_loss_cfg == 'softmaxFL' \
else act_maps_logits.sigmoid()
return_act_maps.append(act_maps)
act_maps_labels_flatten.append(act_maps_labels[l].reshape(-1))
act_maps_preds_flatten.append(act_maps_logits.permute(0, 2, 3, 1).reshape(-1, self.used_num_classes))
act_maps_preds_flatten = torch.cat(act_maps_preds_flatten, dim=0)
act_maps_labels_flatten = torch.cat(act_maps_labels_flatten, dim=0)
# Activation Map loss
if self.act_loss_cfg == 'softmaxFL':
act_loss = self.lamda2 * self.act_loss_func(
act_maps_preds_flatten,
act_maps_labels_flatten.long()
)
elif self.act_loss_cfg == 'sigmoidFL':
N = features[0].size(0)
num = len(act_maps_labels_flatten)
target_flatten = act_maps_labels_flatten.new_zeros((num, 2))
target_flatten[range(num), list(act_maps_labels_flatten)] = 1
act_loss = self.lamda2 * self.act_loss_func(
act_maps_preds_flatten,
target_flatten.float()
)
else:
act_loss = None
return act_loss, return_act_maps
def GCNs_post_processing(self, nodes_GCNs, pos_points):
if self.with_shortcut_GCNs:
nodes_GCNs = (nodes_GCNs + pos_points).squeeze()
else:
nodes_GCNs = nodes_GCNs.squeeze()
return nodes_GCNs
def features_post_processing(self, features, act_maps):
if self.with_concated_maps:
for l, feature in enumerate(features):
features[l] = torch.cat([features[l], act_maps[l]], dim=1)
features = self.head_out(features)
return features
def _forward_gcns(self, pos_points, pos_labels):
prototype_buffer_batch = pos_points[0].new_zeros(self.prototype_buffer_batch.size())
if self.with_global_gcn:
# batch = 0
pos_points = pos_points.unsqueeze(0)
nodes_GCNs = self.multihead_attn(pos_points, pos_points, pos_points)[0]
nodes_GCNs = self.GCNs_post_processing(nodes_GCNs, pos_points)
for i in range(self.used_num_classes):
indx = pos_labels == i if self.with_bg_proto else pos_labels == i + 1
if indx.any():
prototype_buffer_batch[i] = nodes_GCNs[indx].mean(dim=0)
logits = self.proto_cls(self.relu(self.proto_cls_hidden(nodes_GCNs)))
target = (pos_labels).long() if self.with_bg_proto else (pos_labels - 1).long()
node_loss = self.lamda1 * self.node_loss_fn(logits, target)
else:
label_indx = pos_labels.new_zeros((self.used_num_classes))
for i in range(self.used_num_classes):
indx = pos_labels == i if self.with_bg_proto else pos_labels == i + 1
if indx.any():
label_indx[i] = 1
nodes = pos_points[indx]
Adj = self.get_edge(nodes)
test_nan(Adj)
nodes_GCNs = self.GCNs(nodes, Adj)
pos_points[indx] = nodes_GCNs
prototype_buffer_batch[i] = nodes_GCNs.mean(dim=0)
logits = self.proto_cls(self.relu(self.proto_cls_hidden(pos_points)))
target = (pos_labels).long() if self.with_bg_proto else (pos_labels - 1).long()
node_loss = self.lamda1 * self.node_loss_fn(logits, target)
test_nan(node_loss)
return node_loss, prototype_buffer_batch
def _forward_train_source(self, images, features, targets=None, return_maps=False):
transfer_loss = 0
# STEP1: sample feature points and conduct cross-image graph aggregation
locations = self.compute_locations(features)
pos_points, pos_labels, act_maps_labels = self.prototype_evaluator(
locations, features, targets
)
node_loss, prototype_batch = self._forward_gcns(pos_points, pos_labels)
# STEP2: update the 3D paradigm recurrently
self.update_prototype_ensemble(prototype_batch)
conded_weight = self.get_conded_weight() # obtain semantic conditioned kernels
# STEP3: generate loss to train the kernels
if self.act_loss_cfg:
act_loss, return_act_maps = self.get_act_loss(features, conded_weight, act_maps_labels)
features = self.features_post_processing(features, return_act_maps) # POST PROCESSING
return features, (node_loss, transfer_loss), act_loss, return_act_maps
else:
return_act_maps = []
for l, feature in enumerate(features):
act_maps_logits = self.dynamic_conv(feature, conded_weight)
act_maps = act_maps_logits.softmax(
dim=1) if self.act_loss_cfg == 'softmaxFL' else act_maps_logits.sigmoid()
return_act_maps.append(act_maps)
features = self.features_post_processing(features, return_act_maps) # POST PROCESSING
return features, (node_loss, transfer_loss), None, return_act_maps
def get_transfer_loss(self, tg_prototype, tg_nodes, tg_labels):
losses = {}
sr_prototype = self.prototype.mean(dim=-1).detach() if self.prototype_iter > 1 \
else self.prototype.detach()
if 'NODES' in self.transfer_cfg or 'NODE' in self.transfer_cfg:
transfer_loss_node = self.transfer_loss_prototype(tg_nodes.softmax(-1).log(),
sr_prototype[tg_labels.long()].softmax(-1))
transfer_loss_node = {"trans_proto_node": transfer_loss_node}
losses.update(transfer_loss_node)
if 'PROTOTYPE' in self.transfer_cfg:
indx = tg_prototype.sum(-1).bool()
transfer_loss_prototype = self.transfer_loss_prototype(tg_prototype[indx].softmax(-1).log(),
sr_prototype[indx].softmax(-1))
transfer_loss_prototype = {"transfer_loss_prototype": transfer_loss_prototype}
losses.update(transfer_loss_prototype)
if 'ADJ' in self.transfer_cfg:
indx = tg_prototype.sum(dim=-1).bool()
adj_sr = sim_matrix(sr_prototype[indx], sr_prototype[indx]).view(1, -1)
adj_tg = sim_matrix(tg_prototype[indx], tg_prototype[indx]).view(1, -1)
cosine_target = adj_sr.new_ones(adj_sr.size())
transfer_loss_inter_class = self.transfer_loss_inter_class(adj_sr, adj_tg, cosine_target)
transfer_loss_inter_class = {"adj_loss": transfer_loss_inter_class}
losses.update(transfer_loss_inter_class)
if 'ADJ_COMPLETE' in self.transfer_cfg:
_indx = ~(tg_prototype.sum(dim=-1).bool())
tg_prototype_complete = tg_prototype
tg_prototype_complete[_indx] = sr_prototype[_indx]
adj_sr = sim_matrix(sr_prototype, sr_prototype).view(1, -1)
adj_tg = sim_matrix(tg_prototype_complete, tg_prototype_complete).view(1, -1)
cosine_target = adj_sr.new_ones(adj_sr.size())
transfer_loss_inter_class_complete = self.transfer_loss_inter_class(adj_sr, adj_tg, cosine_target)
transfer_loss_inter_class_complete = {"adj_complete_loss": transfer_loss_inter_class_complete}
losses.update(transfer_loss_inter_class_complete)
# print(losses)
if len(losses) > 0:
transfer_loss = sum(loss for loss in losses.values())
return transfer_loss
else:
return None
def _forward_train_target(self, images, features, targets=None, return_maps=False):
# STEP1: use conditioned kernels to obtain activation maps
return_act_maps = []
for l, feature in enumerate(features):
# see(feature)
conded_weight = self.get_conded_weight()
act_maps_logits = self.dynamic_conv(feature, conded_weight)
act_maps = act_maps_logits.softmax(
dim=1) if self.act_loss_cfg == 'softmaxFL' else act_maps_logits.sigmoid()
return_act_maps.append(act_maps)
# STEP2: use activation maps to sample graph nodes
pos_points, pos_labels, _ = self.prototype_evaluator(
locations=None, features=features, targets=return_act_maps
)
features = self.features_post_processing(features, return_act_maps) # POST PROCESSING
# STEP3: conduct graph-based semantic transfer
if (pos_points is not None) and ((self.transfer_cfg[0] is not None) or (self.with_self_training is True)):
# node_loss can be used for self-training
node_loss, tg_prototype_batch = self._forward_gcns(pos_points, pos_labels)
node_loss = self.lamda4 * node_loss
transfer_loss = self.get_transfer_loss(tg_prototype_batch, pos_points, pos_labels)
if transfer_loss:
transfer_loss = self.lamda3 * transfer_loss
if self.with_self_training:
return features, (node_loss, transfer_loss), None, return_act_maps
else:
return features, (None, transfer_loss), None, return_act_maps
else:
return features, None, None, return_act_maps
def _forward_inference(self, images, features, targets=None, return_maps=False):
return_act_maps = []
conded_weight = self.get_conded_weight()
for l, feature in enumerate(features):
act_maps_logits = self.dynamic_conv(feature, conded_weight)
act_maps = act_maps_logits.softmax(
dim=1) if self.act_loss_cfg == 'softmaxFL' else act_maps_logits.sigmoid()
return_act_maps.append(act_maps)
features = self.features_post_processing(features, return_act_maps)
return features, None, None, return_act_maps
def forward(self, images, features, targets=None, return_maps=False, mode='source', forward_target=False):
features = self.head_in(features)
if self.training and targets and mode == 'source':
return self._forward_train_source(images, features, targets, return_maps)
# elif self.training and not targets and (self.prototype == 0).sum() < 256 and self.transfer_cfg:
elif self.training and mode == 'target' and forward_target:
return self._forward_train_target(images, features, targets=None, return_maps=return_maps)
else:
return self._forward_inference(images, features, targets=None, return_maps=return_maps)
def update_prototype(self, prototype_batch, momentum=0.95, mode='mean'):
exist_indx = prototype_batch.sum(-1).bool()
prototype_batch = prototype_batch.detach()
if self.cfg.MODEL.MIDDLE_HEAD.COSINE_UPDATE_ON:
momentum = torch.cosine_similarity(self.prototype[exist_indx], prototype_batch[exist_indx]).unsqueeze(1)
self.prototype[exist_indx] = self.prototype[exist_indx] * momentum + prototype_batch[exist_indx] * (
1 - momentum)
else:
self.prototype[exist_indx] = self.prototype[exist_indx] * momentum + prototype_batch[exist_indx] * (
1 - momentum)
def update_prototype_nx1(self, prototype_batch, momentum=0.95):
iter = self.counter()
exist_indx = prototype_batch.sum(-1).bool()
prototype_batch = prototype_batch.detach()
if self.cfg.MODEL.MIDDLE_HEAD.COSINE_UPDATE_ON:
momentum = torch.cosine_similarity(self.prototype[exist_indx, :, iter],
prototype_batch[exist_indx]).unsqueeze(1)
# print(momentum)
self.prototype[exist_indx, :, iter] = self.prototype[exist_indx, :, iter] * momentum + \
prototype_batch[exist_indx] * (1 - momentum)
else:
self.prototype[exist_indx, :, iter] = self.prototype[exist_indx, :, iter].detach() * momentum + \
prototype_batch[exist_indx] * (1 - momentum)
def update_prototype_nx1_rnn(self, prototype_batch, momentum=0.95):
iter = self.counter_rnn()
exist_indx = prototype_batch.sum(-1).bool()
prototype_batch = prototype_batch.detach()
if self.cfg.MODEL.MIDDLE_HEAD.COSINE_UPDATE_ON:
if iter == self.prototype_iter:
momentum = torch.cosine_similarity(self.prototype[exist_indx, :, iter - 1],
prototype_batch[exist_indx]).unsqueeze(1)
# move
for i in range(iter - 1):
self.prototype[:, :, i] = self.prototype[:, :, i + 1]
# update t+1
self.prototype[exist_indx, :, iter - 1] = self.prototype[exist_indx, :, iter - 1] * momentum \
+ prototype_batch[exist_indx] * (1 - momentum)
else:
momentum = torch.cosine_similarity(self.prototype[exist_indx, :, iter],
prototype_batch[exist_indx]).unsqueeze(1)
self.prototype[exist_indx, :, iter] = self.prototype[exist_indx, :, iter] * momentum \
+ prototype_batch[exist_indx] * (1 - momentum)
else:
if iter == self.prototype_iter:
for i in range(iter - 1):
self.prototype[:, :, i] = self.prototype[:, :, i + 1]
# update t+1
self.prototype[exist_indx, :, iter - 1] = self.prototype[exist_indx, :, iter - 1] * momentum + \
prototype_batch[exist_indx] * (
1 - momentum)
else:
self.prototype[exist_indx, :, iter] = self.prototype[exist_indx, :, iter] * momentum \
+ prototype_batch[exist_indx] * (1 - momentum)
def dynamic_conv(self, features, kernel_par):
num_classes = self.used_num_classes
if self.with_bias_dc:
# WITH BIAS TERM
weight = kernel_par[:, :-1]
bias = kernel_par[:, -1]
weight = weight.view(num_classes, -1, 1, 1)
return torch.nn.functional.conv2d(features, weight, bias=bias)
else:
weight = kernel_par.view(num_classes, -1, 1, 1)
return torch.nn.functional.conv2d(features, weight)
def compute_locations(self, features):
locations = []
for level, feature in enumerate(features):
h, w = feature.size()[-2:]
locations_per_level = self.compute_locations_per_level(
h, w, self.fpn_strides[level],
feature.device
)
locations.append(locations_per_level)
return locations
def compute_locations_per_level(self, h, w, stride, device):
shifts_x = torch.arange(
0, w * stride, step=stride,
dtype=torch.float32, device=device
)
shifts_y = torch.arange(
0, h * stride, step=stride,
dtype=torch.float32, device=device
)
shift_y, shift_x = torch.meshgrid(shifts_y, shifts_x)
shift_x = shift_x.reshape(-1)
shift_y = shift_y.reshape(-1)
locations = torch.stack((shift_x, shift_y), dim=1) + stride // 2
return locations
def save_targets(self, cfg, features, return_act_maps, conded_weight, targets):
# Save many features
self.loss_evaluator = make_fcos_loss_evaluator(cfg)
locations = self.compute_locations(features)
box_cls_gt, box_reg_gt, box_ctr_gt = self.loss_evaluator.replace_targets(
locations, return_act_maps, None, None, targets
)
self.debugger.save_feat(box_cls_gt, id='target_gt')
self.debugger.save_feat(return_act_maps, id='target_act_maps')
self.debugger.save_feat(features, id='target_feats')
self.debugger.save_feat(conded_weight, id='cond_weitht')
self.debugger.save_feat(self.prototype, id='prototype')
os._exit(0)
def build_condgraph(cfg, in_channels):
return GRAPHModule(cfg, in_channels)
def test_nan(para, name='gcn'):
assert para.max() < INF, 'nan of {}'.format(name)
return para
|
CityU-AIM-Group/SCAN | fcos_core/engine/validation.py | <reponame>CityU-AIM-Group/SCAN<filename>fcos_core/engine/validation.py
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import logging
import time
import os
import torch
from tqdm import tqdm
from fcos_core.data.datasets.evaluation import validate
from ..utils.comm import is_main_process, get_world_size
from ..utils.comm import all_gather
from ..utils.comm import synchronize
from ..utils.timer import Timer, get_time_str
# from fcos_core.engine.trainer import foward_detector
from fcos_core.structures.image_list import to_image_list
def _foward_detector(cfg, model, images, targets=None, return_maps=False):
model_backbone = model["backbone"]
model_fcos = model["fcos"]
images = to_image_list(images)
features = model_backbone(images.tensors)
if cfg.MODEL.MIDDLE_HEAD.CONDGRAPH_ON:
middle_head = model["middle_head"]
features, _, _, return_act_maps = middle_head(images, features,return_maps=True)
else:
return_act_maps = None
proposals, proposal_losses, score_maps = model_fcos(
images, features, targets=targets, return_maps=return_maps, act_maps = return_act_maps)
# inference
return proposals
def compute_on_dataset(cfg, model, data_loader, device, timer=None):
# model.eval
for k in model:
model[k].eval()
results_dict = {}
cpu_device = torch.device("cpu")
for _, batch in enumerate(tqdm(data_loader)):
images, targets, image_ids = batch
images = images.to(device)
with torch.no_grad():
if timer:
timer.tic()
output = _foward_detector(cfg, model, images, targets=None)
if timer:
torch.cuda.synchronize()
timer.toc()
output = [o.to(cpu_device) for o in output]
results_dict.update(
{img_id: result for img_id, result in zip(image_ids, output)}
)
return results_dict
def _accumulate_predictions_from_multiple_gpus(predictions_per_gpu):
all_predictions = all_gather(predictions_per_gpu)
if not is_main_process():
return
# merge the list of dicts
predictions = {}
for p in all_predictions:
predictions.update(p)
# convert a dict where the key is the index in a list
image_ids = list(sorted(predictions.keys()))
if len(image_ids) != image_ids[-1] + 1:
logger = logging.getLogger("fcos_core.inference")
logger.warning(
"Number of images that were gathered from multiple processes is not "
"a contiguous set. Some images might be missing from the evaluation"
)
# convert to a list
predictions = [predictions[i] for i in image_ids]
return predictions
def _inference(
cfg,
model,
data_loader,
dataset_name,
iou_types=("bbox",),
box_only=False,
device="cuda",
expected_results=(),
expected_results_sigma_tol=4,
output_folder=None,
):
# convert to a torch.device for efficiency
device = torch.device(device)
num_devices = get_world_size()
logger = logging.getLogger("fcos_core.inference")
dataset = data_loader.dataset
logger.info("Start evaluation on {} dataset({} images).".format(dataset_name, len(dataset)))
total_timer = Timer()
inference_timer = Timer()
total_timer.tic()
predictions = compute_on_dataset(cfg, model, data_loader, device, inference_timer)
# wait for all processes to complete before measuring the time
synchronize()
predictions = _accumulate_predictions_from_multiple_gpus(predictions)
if not is_main_process():
return
if output_folder:
torch.save(predictions, os.path.join(output_folder, "predictions.pth"))
extra_args = dict(
box_only=box_only,
iou_types=iou_types,
expected_results=expected_results,
expected_results_sigma_tol=expected_results_sigma_tol,
)
return validate(dataset=dataset,
predictions=predictions,
output_folder=output_folder,
**extra_args)
|
CityU-AIM-Group/SCAN | fcos_core/modeling/rpn/fcos/loss.py | # --------------------------------------------------------
# SCAN: Cross-domain Object Detection with Semantic Conditioned Adaptation (AAAI22 ORAL)
# Modified by <NAME>
# This file contains specific functions for computing losses of FCOS
# This file covers the core operation for node sampling
# --------------------------------------------------------
import torch
from torch.nn import functional as F
from torch import nn
import numpy as np
from ..utils import concat_box_prediction_layers
from fcos_core.layers import IOULoss, SigmoidFocalLoss, MeanShift_GPU
from fcos_core.modeling.matcher import Matcher
from fcos_core.modeling.utils import cat
from fcos_core.structures.boxlist_ops import boxlist_iou
from fcos_core.structures.boxlist_ops import cat_boxlist
import os
from sklearn import *
import time
INF = 100000000
class FCOSLossComputation(object):
"""
This class computes the FCOS losses.
"""
def __init__(self, cfg):
self.cls_loss_func = SigmoidFocalLoss(
cfg.MODEL.FCOS.LOSS_GAMMA,
cfg.MODEL.FCOS.LOSS_ALPHA
)
# we make use of IOU Loss for bounding boxes regression,
# but we found that L1 in log scale can yield a similar performance
self.box_reg_loss_func = IOULoss()
self.centerness_loss_func = nn.BCEWithLogitsLoss()
def prepare_targets(self, points, targets):
object_sizes_of_interest = [
[-1, 64],
[64, 128],
[128, 256],
[256, 512],
[512, INF],
]
expanded_object_sizes_of_interest = []
for l, points_per_level in enumerate(points):
object_sizes_of_interest_per_level = \
points_per_level.new_tensor(object_sizes_of_interest[l])
expanded_object_sizes_of_interest.append(
object_sizes_of_interest_per_level[None].expand(len(points_per_level), -1)
)
expanded_object_sizes_of_interest = torch.cat(expanded_object_sizes_of_interest, dim=0)
num_points_per_level = [len(points_per_level) for points_per_level in points]
points_all_level = torch.cat(points, dim=0)
labels, reg_targets = self.compute_targets_for_locations(
points_all_level, targets, expanded_object_sizes_of_interest
)
for i in range(len(labels)):
labels[i] = torch.split(labels[i], num_points_per_level, dim=0)
reg_targets[i] = torch.split(reg_targets[i], num_points_per_level, dim=0)
labels_level_first = []
reg_targets_level_first = []
for level in range(len(points)):
labels_level_first.append(
torch.cat([labels_per_im[level] for labels_per_im in labels], dim=0)
)
reg_targets_level_first.append(
torch.cat([reg_targets_per_im[level] for reg_targets_per_im in reg_targets], dim=0)
)
return labels_level_first, reg_targets_level_first
def compute_targets_for_locations(self, locations, targets, object_sizes_of_interest):
locations = locations.cuda()
object_sizes_of_interest= object_sizes_of_interest.cuda()
labels = []
reg_targets = []
xs, ys = locations[:, 0], locations[:, 1]
for im_i in range(len(targets)):
targets_per_im = targets[im_i]
assert targets_per_im.mode == "xyxy"
bboxes = targets_per_im.bbox.cuda()
labels_per_im = targets_per_im.get_field("labels")
area = targets_per_im.area()
l = xs[:, None] - bboxes[:, 0][None]
t = ys[:, None] - bboxes[:, 1][None]
r = bboxes[:, 2][None] - xs[:, None]
b = bboxes[:, 3][None] - ys[:, None]
reg_targets_per_im = torch.stack([l, t, r, b], dim=2)
is_in_boxes = reg_targets_per_im.min(dim=2)[0] > 0
max_reg_targets_per_im = reg_targets_per_im.max(dim=2)[0]
# limit the regression range for each location
is_cared_in_the_level = \
(max_reg_targets_per_im >= object_sizes_of_interest[:, [0]]) & \
(max_reg_targets_per_im <= object_sizes_of_interest[:, [1]])
locations_to_gt_area = area[None].repeat(len(locations), 1)
locations_to_gt_area[is_in_boxes == 0] = INF
locations_to_gt_area[is_cared_in_the_level == 0] = INF
# if there are still more than one objects for a location,
# we choose the one with minimal area
locations_to_min_area, locations_to_gt_inds = locations_to_gt_area.min(dim=1)
reg_targets_per_im = reg_targets_per_im[range(len(locations)), locations_to_gt_inds]
labels_per_im = labels_per_im[locations_to_gt_inds]
labels_per_im[locations_to_min_area == INF] = 0
labels.append(labels_per_im)
reg_targets.append(reg_targets_per_im)
return labels, reg_targets
def compute_centerness_targets(self, reg_targets):
left_right = reg_targets[:, [0, 2]]
top_bottom = reg_targets[:, [1, 3]]
centerness = (left_right.min(dim=-1)[0] / left_right.max(dim=-1)[0]) * \
(top_bottom.min(dim=-1)[0] / top_bottom.max(dim=-1)[0])
return torch.sqrt(centerness)
def replace_targets(self,locations, box_cls, box_regression, centerness, targets):
N = box_cls[0].size(0)
num_classes = box_cls[0].size(1)
reg_targets_flatten = []
labels, reg_targets = self.prepare_targets(locations, targets)
tmp = []
for l in range(len(labels)):
reg_targets_flatten.append(reg_targets[l].reshape(-1, 4))
tmp.append(reg_targets[l].size(0))
reg_targets_flatten = torch.cat(reg_targets_flatten,dim=0)
centerness_targets = self.compute_centerness_targets(reg_targets_flatten)
centerness_targets_list = []
k = 0
for i in tmp:
centerness_targets_list.append(centerness_targets[k:k+i])
k += i
box_cls_gt = []
box_reg_gt = []
box_ctr_gt = []
for l in range(len(labels)):
n, c, h, w = box_cls[l].size()
if c >len(labels):
c=c-1
lb = F.one_hot(labels[l].reshape(-1), 9)[:,1:].float()
box_cls_gt.append(lb.reshape(n,h,w,c).permute(0,3,1,2).cuda())
box_reg_gt.append(reg_targets[l].reshape(-1).reshape(n,h,w,4).permute(0,3,1,2).cuda())
box_ctr_gt.append(centerness_targets_list[l].reshape(-1).reshape(n,h,w,1).permute(0,3,1,2).float().cuda())
return box_cls_gt, box_reg_gt, box_ctr_gt
def __call__(self, locations, box_cls, box_regression, centerness, targets):
"""
Arguments:
locations (list[BoxList])
box_cls (list[Tensor])
box_regression (list[Tensor])
centerness (list[Tensor])
targets (list[BoxList])
Returns:
cls_loss (Tensor)
reg_loss (Tensor)
centerness_loss (Tensor)
"""
N = box_cls[0].size(0)
num_classes = box_cls[0].size(1)
labels, reg_targets = self.prepare_targets(locations, targets)
box_cls_flatten = []
box_regression_flatten = []
centerness_flatten = []
labels_flatten = []
reg_targets_flatten = []
for l in range(len(labels)):
box_cls_flatten.append(box_cls[l].permute(0, 2, 3, 1).reshape(-1, num_classes))
box_regression_flatten.append(box_regression[l].permute(0, 2, 3, 1).reshape(-1, 4))
labels_flatten.append(labels[l].reshape(-1))
reg_targets_flatten.append(reg_targets[l].reshape(-1, 4))
centerness_flatten.append(centerness[l].reshape(-1))
box_cls_flatten = torch.cat(box_cls_flatten, dim=0)
box_regression_flatten = torch.cat(box_regression_flatten, dim=0)
centerness_flatten = torch.cat(centerness_flatten, dim=0)
labels_flatten = torch.cat(labels_flatten, dim=0)
reg_targets_flatten = torch.cat(reg_targets_flatten, dim=0)
pos_inds = torch.nonzero(labels_flatten > 0).squeeze(1)
cls_loss = self.cls_loss_func(
box_cls_flatten,
labels_flatten.int()
) / (pos_inds.numel() + N) # add N to avoid dividing by a zero
box_regression_flatten = box_regression_flatten[pos_inds]
reg_targets_flatten = reg_targets_flatten[pos_inds]
centerness_flatten = centerness_flatten[pos_inds]
if pos_inds.numel() > 0:
centerness_targets = self.compute_centerness_targets(reg_targets_flatten)
reg_loss = self.box_reg_loss_func(
box_regression_flatten,
reg_targets_flatten,
centerness_targets
)
centerness_loss = self.centerness_loss_func(
centerness_flatten,
centerness_targets
)
else:
reg_loss = box_regression_flatten.sum()
centerness_loss = centerness_flatten.sum()
return cls_loss, reg_loss, centerness_loss
def make_fcos_loss_evaluator(cfg):
loss_evaluator = FCOSLossComputation(cfg)
return loss_evaluator
class PrototypeComputation(object):
"""
This class computes the FCOS losses.
"""
def __init__(self, cfg):
self.num_class = cfg.MODEL.FCOS.NUM_CLASSES - 1
self.num_class_fgbg = cfg.MODEL.FCOS.NUM_CLASSES
self.cfg =cfg.clone()
if self.cfg.MODEL.MIDDLE_HEAD.TARGET_SAMPLING_CFG == 'mean_shift':
from fcos_core.layers import MeanShift_GPU
self.meanshift = MeanShift_GPU(batch_size=10000, bandwidth=0.1)
self.fg_bg_centers = torch.zeros(2, 256)
self.thrd_min_max = cfg.SOLVER.MIDDLE_HEAD.PLABEL_TH
# self.norm_cfg = cfg.MODEL.MIDDLE_HEAD.GCN_EDGE_NORM
# self.relu = torch.nn.ReLU().cuda()
# self.leakyrelu = torch.nn.LeakyReLU(0.1).cuda()
# self.gcn_out_activation = cfg.MODEL.MIDDLE_HEAD.GCN_OUT_ACTIVATION
# self.prototype_buffer_batch = torch.zeros(self.num_class, 256).cuda()
def prepare_targets(self, points, targets):
object_sizes_of_interest = [
[-1, 64],
[64, 128],
[128, 256],
[256, 512],
[512, INF],
]
expanded_object_sizes_of_interest = []
for l, points_per_level in enumerate(points):
object_sizes_of_interest_per_level = \
points_per_level.new_tensor(object_sizes_of_interest[l])
expanded_object_sizes_of_interest.append(
object_sizes_of_interest_per_level[None].expand(len(points_per_level), -1)
)
expanded_object_sizes_of_interest = torch.cat(expanded_object_sizes_of_interest, dim=0)
num_points_per_level = [len(points_per_level) for points_per_level in points]
points_all_level = torch.cat(points, dim=0)
labels, reg_targets = self.compute_targets_for_locations(
points_all_level, targets, expanded_object_sizes_of_interest
)
for i in range(len(labels)):
labels[i] = torch.split(labels[i], num_points_per_level, dim=0)
reg_targets[i] = torch.split(reg_targets[i], num_points_per_level, dim=0)
labels_level_first = []
reg_targets_level_first = []
for level in range(len(points)):
labels_level_first.append(
torch.cat([labels_per_im[level] for labels_per_im in labels], dim=0)
)
# reg_targets_level_first.append(
# torch.cat([reg_targets_per_im[level] for reg_targets_per_im in reg_targets], dim=0)
# )
return labels_level_first
def compute_targets_for_locations(self, locations, targets, object_sizes_of_interest):
labels = []
reg_targets = []
xs, ys = locations[:, 0], locations[:, 1]
for im_i in range(len(targets)):
targets_per_im = targets[im_i]
assert targets_per_im.mode == "xyxy"
bboxes = targets_per_im.bbox
labels_per_im = targets_per_im.get_field("labels")
area = targets_per_im.area()
l = xs[:, None] - bboxes[:, 0][None]
t = ys[:, None] - bboxes[:, 1][None]
r = bboxes[:, 2][None] - xs[:, None]
b = bboxes[:, 3][None] - ys[:, None]
reg_targets_per_im = torch.stack([l, t, r, b], dim=2)
is_in_boxes = reg_targets_per_im.min(dim=2)[0] > 0
max_reg_targets_per_im = reg_targets_per_im.max(dim=2)[0]
# limit the regression range for each location
is_cared_in_the_level = \
(max_reg_targets_per_im >= object_sizes_of_interest[:, [0]]) & \
(max_reg_targets_per_im <= object_sizes_of_interest[:, [1]])
locations_to_gt_area = area[None].repeat(len(locations), 1)
locations_to_gt_area[is_in_boxes == 0] = INF
locations_to_gt_area[is_cared_in_the_level == 0] = INF
# if there are still more than one objects for a location,
# we choose the one with minimal area
locations_to_min_area, locations_to_gt_inds = locations_to_gt_area.min(dim=1)
reg_targets_per_im = reg_targets_per_im[range(len(locations)), locations_to_gt_inds]
labels_per_im = labels_per_im[locations_to_gt_inds]
labels_per_im[locations_to_min_area == INF] = 0
labels.append(labels_per_im)
reg_targets.append(reg_targets_per_im)
return labels, reg_targets
def compute_centerness_targets(self, reg_targets):
left_right = reg_targets[:, [0, 2]]
top_bottom = reg_targets[:, [1, 3]]
centerness = (left_right.min(dim=-1)[0] / left_right.max(dim=-1)[0]) * \
(top_bottom.min(dim=-1)[0] / top_bottom.max(dim=-1)[0])
return torch.sqrt(centerness)
def KMEANS_batch(self, act_maps):
N, C, H, W = act_maps.size()
km = cluster.KMeans(init='k-means++', n_clusters=2,
random_state=1, n_jobs=-1, n_init=2)
Y = km.fit_predict(act_maps.numpy().reshape(-1, 1))
return Y.reshape(N, C, H, W).cuda()
def KMEANS_batch_ClS_FEAT(self, act_maps, features, score_mask=True):
mask = (act_maps > 0.5).int()
act_maps=act_maps.detach().cpu()
features=features.detach().cpu()
N, CLS, H, W = act_maps.size()
N, CHANNEL, H, W = features.size()
features_n = []
# CLS, N, CHANNEL, H, W
for i in range(CLS):
# print(act_maps[i].unsqueeze(1).shape)
features_n.append((features* act_maps[:, i, :, :].unsqueeze(1)).unsqueeze(0))
features_n = torch.cat(features_n, dim=0)
# N, CLS,H, W, CHANNEL
features_n = features_n.permute(1, 0, 3, 4, 2).reshape(-1, CHANNEL)
center_update = self.fg_bg_centers.sum() == 0
if center_update:
km = cluster.KMeans(init='k-means++', n_clusters=2,
random_state=1, n_init=2)
Y = km.fit_predict(features_n.numpy())
self.fg_bg_centers = torch.Tensor(km.cluster_centers_)
else:
Y = cluster.KMeans(init=self.fg_bg_centers.numpy(), n_clusters=2,
random_state=1,n_init=2).fit_predict(features_n.numpy())
Y = torch.Tensor(Y.reshape(N, CLS, H, W)).cuda() * mask
# Post processing
if (Y == 0).sum() < (Y == 1).sum():
Y = 1 - Y
print(' clustering error!!!!!!!')
Y = Y.permute(0, 2, 3, 1).reshape(-1, CLS).sum(-1).bool()
return Y
def DBSCAN_batch_cpu(self, act_maps, features,):
act_maps=act_maps.detach().cpu()
features=features.detach().cpu()
N, CLS, H, W = act_maps.size()
N, CHANNEL, H, W = features.size()
features_n = []
# CLS, N, CHANNEL, H, W
for i in range(CLS):
# print(act_maps[i].unsqueeze(1).shape)
features_n.append((features* act_maps[:, i, :, :].unsqueeze(1)).unsqueeze(0))
features_n = torch.cat(features_n, dim=0)
features_n = features_n.permute(1, 0, 3, 4, 2).reshape(-1, CHANNEL)
# Score mask
mask = (act_maps > self.cfg.MODEL.MIDDLE_HEAD.DBSCAN_THR).bool().unsqueeze(-1).reshape(-1)
mask_float = (act_maps > self.cfg.MODEL.MIDDLE_HEAD.DBSCAN_THR).float().unsqueeze(-1).reshape(-1)
pos_feats = features_n[mask]
if pos_feats.bool().any():
Y = cluster.DBSCAN(eps=self.cfg.MODEL.MIDDLE_HEAD.DBSCAN_EPS, n_jobs=-1).fit_predict(pos_feats.numpy())
Y[Y < 0] = 1
mask_float[mask] = torch.Tensor(Y)
Y = torch.Tensor(mask_float.reshape(N, CLS, H, W))
Y = Y.permute(0, 2, 3, 1).reshape(-1, CLS).sum(-1).bool().cuda()
return Y
def __call__(self, locations, features, targets):
if locations:
N, C, _, _ = features[0].size()
labels = self.prepare_targets(locations, targets)
pos_points = []
pos_labels = []
neg_points = []
for l in range(len(labels)):
pos_indx = labels[l].reshape(-1) > 0
neg_indx = labels[l].reshape(-1) == 0
pos_points.append(features[l].permute(0, 2, 3, 1).reshape(-1, C)[pos_indx])
pos_labels.append(labels[l][pos_indx])
if self.cfg.MODEL.MIDDLE_HEAD.PROTO_WITH_BG:
neg_points_temp = features[l].permute(0, 2, 3, 1).reshape(-1, C)[neg_indx]
if len(labels[l][pos_indx]) > len(labels[l][neg_indx]):
neg_points.append(features[l].permute(0, 2, 3, 1).reshape(-1, C)[neg_indx])
else:
neg_indx = list(np.floor(np.linspace(0,len(labels[l][neg_indx])-2, len(labels[l][pos_indx]))).astype(int))
neg_points.append(neg_points_temp[neg_indx])
pos_points = torch.cat(pos_points,dim=0)
pos_labels = torch.cat(pos_labels,dim=0)
if self.cfg.MODEL.MIDDLE_HEAD.PROTO_WITH_BG:
neg_points = torch.cat(neg_points, dim=0)
neg_labels = pos_labels.new_zeros((neg_points.size(0)))
pos_points = torch.cat([neg_points, pos_points] ,dim=0)
pos_labels = torch.cat([neg_labels, pos_labels] )
# ipdb.set_trace()
if self.cfg.MODEL.MIDDLE_HEAD.ACT_LOSS:
return pos_points, pos_labels, labels
else:
return pos_points, pos_labels
else:
# k = (targets[0][:, 1:, :, :] > 0.05).sum().float() /(targets[0][:,1:,:,:]>0.05).numel()
# print(k)
if True:
# print('start self-training')
## for tg
act_maps_lvl_first = targets
N, C, _, _ = features[0].size()
N, Cls, _, _ = targets[0].size()
neg_points =[]
pos_plabels = []
pos_points = []
start = time.time()
for l, feature in enumerate(features):
if self.cfg.MODEL.MIDDLE_HEAD.TARGET_SAMPLING_CFG == 'score_threshold':
act_maps = act_maps_lvl_first[l].permute(0, 2, 3, 1).reshape(-1, self.num_class_fgbg)
conf_pos_indx = (act_maps[:, 1:] > self.thrd_min_max[0]).sum(dim=-1).bool()
elif self.cfg.MODEL.MIDDLE_HEAD.TARGET_SAMPLING_CFG == 'mean_shift':
act_maps = act_maps_lvl_first[l].permute(0, 2, 3, 1).reshape(N, -1,self.num_class_fgbg)
conf_pos_indx = act_maps.new_zeros(N, act_maps.size(1), self.num_class_fgbg-1)
for img in range(N):
for cls in range(self.num_class_fgbg-1):
conf_pos_indx[img,:,cls] = self.meanshift.fit(act_maps[img,:,cls+1].unsqueeze(-1))[0]
conf_pos_indx = conf_pos_indx.sum(dim=-1).bool()
elif self.cfg.MODEL.MIDDLE_HEAD.TARGET_SAMPLING_CFG == 'kmeans':
conf_pos_indx = self.KMEANS_batch_ClS_FEAT(act_maps_lvl_first[l][:,1:,:,:],feature, score_mask=True)
elif self.cfg.MODEL.MIDDLE_HEAD.TARGET_SAMPLING_CFG == 'dbscan':
conf_pos_indx = self.DBSCAN_batch_cpu(act_maps_lvl_first[l][:, 1:, :, :], feature)
else:
raise KeyError('unknown target labels!')
# Balanced sampling BG pixels
if conf_pos_indx.any():
act_maps = act_maps_lvl_first[l].permute(0, 2, 3, 1).reshape(-1, self.num_class_fgbg)
pos_points.append(features[l].permute(0, 2, 3, 1).reshape(-1, C)[conf_pos_indx])
pos_plabels.append(act_maps[conf_pos_indx,1:].argmax(dim=-1) + 1)
neg_indx = ~conf_pos_indx
neg_points_temp = features[l].permute(0, 2, 3, 1).reshape(-1, C)[neg_indx]
neg_indx_new = list(np.floor(np.linspace(0, (neg_indx==True).sum().item()- 2, (conf_pos_indx==True).sum().item())).astype(int))
neg_points.append(neg_points_temp[neg_indx_new])
end = time.time()
# print(end-start)
if len(pos_points)>0:
pos_points = torch.cat(pos_points,dim=0)
pos_plabels = torch.cat(pos_plabels,dim=0)
neg_points = torch.cat(neg_points, dim=0)
neg_plabels = pos_plabels.new_zeros((neg_points.size(0)))
points = torch.cat([neg_points, pos_points], dim=0)
plabels = torch.cat([neg_plabels, pos_plabels])
# print(len(pos_points))
return points, plabels, None
else:
return None, None, None
else:
return None, None, None
def make_prototype_evaluator(cfg):
prototype_evaluator = PrototypeComputation(cfg)
return prototype_evaluator
|
CityU-AIM-Group/SCAN | fcos_core/modeling/discriminator/fcos_head_discriminator_CA.py | <reponame>CityU-AIM-Group/SCAN
import torch
import torch.nn.functional as F
from torch import nn
from .layer import GradientReversal, FocalLoss
class FCOSDiscriminator_CA(nn.Module):
def __init__(self, num_convs=2, in_channels=256, grad_reverse_lambda=-1.0, center_aware_weight=0.0, center_aware_type='ca_loss', grl_applied_domain='both'):
"""
Arguments:
in_channels (int): number of channels of the input feature
"""
super(FCOSDiscriminator_CA, self).__init__()
dis_tower = []
for i in range(num_convs):
dis_tower.append(
nn.Conv2d(
in_channels,
in_channels,
kernel_size=3,
stride=1,
padding=1
)
)
dis_tower.append(nn.GroupNorm(32, in_channels))
dis_tower.append(nn.ReLU())
self.add_module('dis_tower', nn.Sequential(*dis_tower))
self.cls_logits = nn.Conv2d(
in_channels, 1, kernel_size=3, stride=1,
padding=1
)
# initialization
for modules in [self.dis_tower, self.cls_logits]:
for l in modules.modules():
if isinstance(l, nn.Conv2d):
torch.nn.init.normal_(l.weight, std=0.01)
torch.nn.init.constant_(l.bias, 0)
self.grad_reverse = GradientReversal(grad_reverse_lambda)
self.loss_fn = nn.BCEWithLogitsLoss()
self.loss_fn_no_reduce = nn.BCEWithLogitsLoss(reduction='none')
# hyperparameters
# assert center_aware_type == 'ca_loss' or center_aware_type == 'ca_feature'
self.center_aware_weight = center_aware_weight
self.center_aware_type = center_aware_type
assert grl_applied_domain == 'both' or grl_applied_domain == 'target'
self.grl_applied_domain = grl_applied_domain
def forward(self, feature, target, score_map=None, domain='source'):
assert target == 0 or target == 1 or target == 0.1 or target == 0.9
assert domain == 'source' or domain == 'target'
# Generate cneter-aware map
box_cls_map = score_map["box_cls"].clone().sigmoid()
centerness_map = score_map["centerness"].clone().sigmoid()
n, c, h, w = box_cls_map.shape
maxpooling = nn.AdaptiveMaxPool3d((1, h, w))
box_cls_map = maxpooling(box_cls_map)
# Normalize the center-aware map
atten_map = (self.center_aware_weight * box_cls_map * centerness_map).sigmoid()
# Compute loss
# Center-aware loss (w/ GRL)
if self.center_aware_type == 'ca_loss':
if self.grl_applied_domain == 'both':
feature = self.grad_reverse(feature)
elif self.grl_applied_domain == 'target':
if domain == 'target':
feature = self.grad_reverse(feature)
# Forward
x = self.dis_tower(feature)
x = self.cls_logits(x)
# Computer loss
target = torch.full(x.shape, target, dtype=torch.float, device=x.device)
loss = self.loss_fn_no_reduce(x, target)
loss = torch.mean(atten_map * loss)
# Center-aware feature (w/ GRL)
elif self.center_aware_type == 'ca_feature':
if self.grl_applied_domain == 'both':
feature = self.grad_reverse(atten_map * feature)
elif self.grl_applied_domain == 'target':
if domain == 'target':
feature = self.grad_reverse(atten_map * feature)
# Forward
x = self.dis_tower(feature)
x = self.cls_logits(x)
target = torch.full(x.shape, target, dtype=torch.float, device=x.device)
loss = self.loss_fn(x, target)
elif self.center_aware_type == 'focal':
if self.grl_applied_domain == 'both':
feature = self.grad_reverse(atten_map * feature)
elif self.grl_applied_domain == 'target':
if domain == 'target':
feature = self.grad_reverse(atten_map * feature)
# Forward
x = self.dis_tower(feature)
x = self.cls_logits(x)
target = torch.full(x.shape, target, dtype=torch.float, device=x.device)
# print(x, target)
# input()
loss = FocalLoss(x, target)
# loss = self.focal_loss(x, target)
# print('center')
# print(x, target)
return loss
|
CityU-AIM-Group/SCAN | fcos_core/engine/trainer.py | <filename>fcos_core/engine/trainer.py<gh_stars>1-10
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import datetime
import logging
import time
# import ipdb
import torch
import torch.distributed as dist
from fcos_core.utils.comm import get_world_size, is_pytorch_1_1_0_or_later
from fcos_core.utils.metric_logger import MetricLogger
from fcos_core.structures.image_list import to_image_list
import os
from fcos_core.data import make_data_loader, make_data_loader_source, make_data_loader_target
from fcos_core.utils.miscellaneous import mkdir
from .validation import _inference
from fcos_core.utils.comm import synchronize
def foward_detector(cfg, model, images, targets=None, return_maps=True, mode='source', forward_target=False):
with_middle_head = cfg.MODEL.MIDDLE_HEAD.CONDGRAPH_ON
map_layer_to_index = {"P3": 0, "P4": 1, "P5": 2, "P6": 3, "P7": 4}
feature_layers = map_layer_to_index.keys()
model_backbone = model["backbone"]
# if with_middle_head:
model_fcos = model["fcos"]
images = to_image_list(images)
features = model_backbone(images.tensors)
losses = {}
if with_middle_head:
model_middle_head = model["middle_head"]
features, loss_graph, loss_act_map, return_act_maps = model_middle_head(images, features, targets=targets,
return_maps=return_maps, mode = mode, forward_target=forward_target)
if loss_graph is not None:
node_loss, consistency_loss = loss_graph
if consistency_loss:
consistency_loss = {"consistency_loss": consistency_loss}
losses.update(consistency_loss)
if node_loss:
node_loss = {"node_loss": node_loss}
losses.update(node_loss)
if loss_act_map is not None:
act_loss = {"act_loss": loss_act_map}
losses.update(act_loss)
else:
return_act_maps = None
proposals, proposal_losses, score_maps = model_fcos(
images, features, targets=targets, return_maps=return_maps, act_maps=return_act_maps)
f = {
layer: features[map_layer_to_index[layer]]
for layer in feature_layers
}
if return_act_maps:
return_act_maps = {
layer: return_act_maps[map_layer_to_index[layer]]
for layer in feature_layers
}
if model_fcos.training:
if not targets:
assert len(proposal_losses) == 1 and proposal_losses["zero"] == 0
losses.update(proposal_losses)
return losses, f, return_act_maps
else:
# inference
result = proposals
return result
def reduce_loss_dict(loss_dict):
"""
Reduce the loss dictionary from all processes so that process with rank
0 has the averaged results. Returns a dict with the same fields as
loss_dict, after reduction.
"""
world_size = get_world_size()
if world_size < 2:
return loss_dict
with torch.no_grad():
loss_names = []
all_losses = []
for k in sorted(loss_dict.keys()):
loss_names.append(k)
all_losses.append(loss_dict[k])
all_losses = torch.stack(all_losses, dim=0)
dist.reduce(all_losses, dst=0)
if dist.get_rank() == 0:
# only main process gets accumulated, so only divide by
# world_size in this case
all_losses /= world_size
reduced_losses = {k: v for k, v in zip(loss_names, all_losses)}
return reduced_losses
def validataion(cfg, model, data_loader, distributed=False):
if distributed:
model["backbone"] = model["backbone"].module
model["fcos"] = model["fcos"].module
iou_types = ("bbox",)
dataset_name = cfg.DATASETS.TEST
assert len(data_loader) == 1, "More than one validation sets!"
data_loader = data_loader[0]
# for dataset_name, data_loader_val in zip( dataset_names, data_loader):
results, _ = _inference(
cfg,
model,
data_loader,
dataset_name=dataset_name,
iou_types=iou_types,
box_only=False if cfg.MODEL.ATSS_ON or cfg.MODEL.FCOS_ON or cfg.MODEL.RETINANET_ON else cfg.MODEL.RPN_ONLY,
device=cfg.MODEL.DEVICE,
expected_results=cfg.TEST.EXPECTED_RESULTS,
expected_results_sigma_tol=cfg.TEST.EXPECTED_RESULTS_SIGMA_TOL,
output_folder=None,
)
synchronize()
return results
def do_train(
model,
data_loader,
optimizer,
scheduler,
checkpointer,
device,
checkpoint_period,
arguments,
cfg,
distributed,
meters,
):
with_DA = cfg.MODEL.DA_ON
data_loader_source = data_loader["source"]
# Start training
logger = logging.getLogger("fcos_core.trainer")
logger.info("Start training")
# model.train()
for k in model:
model[k].train()
start_iter = arguments["iteration"]
start_training_time = time.time()
end = time.time()
AP50 = cfg.SOLVER.INITIAL_AP50
AP50_emp = 0
pytorch_1_1_0_or_later = is_pytorch_1_1_0_or_later()
if not with_DA:
max_iter = len(data_loader_source)
for iteration, (images_s,targets_s, _) in enumerate(data_loader_source, start_iter):
data_time = time.time() - end
iteration = iteration + 1
arguments["iteration"] = iteration
if not pytorch_1_1_0_or_later:
# scheduler.step()
for k in scheduler:
scheduler[k].step()
images_s = images_s.to(device)
targets_s = [target_s.to(device) for target_s in targets_s]
for k in optimizer:
optimizer[k].zero_grad()
##########################################################################
#################### (1): train G with source domain #####################
##########################################################################
loss_dict, features_s, score_maps_s = foward_detector(cfg, model, images_s, targets=targets_s, return_maps=True, mode='source')
loss_dict = {k + "_gs": loss_dict[k] for k in loss_dict}
losses = sum(loss for loss in loss_dict.values())
# reduce losses over all GPUs for logging purposes
loss_dict_reduced = reduce_loss_dict(loss_dict)
losses_reduced = sum(loss for loss in loss_dict_reduced.values())
meters.update(loss_gs=losses_reduced, **loss_dict_reduced)
losses.backward()
for k in optimizer:
optimizer[k].step()
if pytorch_1_1_0_or_later:
# scheduler.step()
for k in scheduler:
scheduler[k].step()
# End of training
batch_time = time.time() - end
end = time.time()
meters.update(time=batch_time, data=data_time)
eta_seconds = meters.time.global_avg * (max_iter - iteration)
eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))
if iteration % 20 == 0 or iteration == max_iter:
logger.info(
meters.delimiter.join([
"eta: {eta}",
"iter: {iter}",
"{meters}",
"lr_backbone: {lr_backbone:.6f}",
"lr_fcos: {lr_fcos:.6f}",
"max mem: {memory:.0f}",
]).format(
eta=eta_string,
iter=iteration,
meters=str(meters),
lr_backbone=optimizer["backbone"].param_groups[0]["lr"],
lr_fcos=optimizer["fcos"].param_groups[0]["lr"],
memory=torch.cuda.max_memory_allocated() / 1024.0 / 1024.0, ))
if cfg.SOLVER.ADAPT_VAL_ON:
if iteration % cfg.SOLVER.VAL_ITER == 0:
val_results = validataion(cfg, model, data_loader["val"], distributed)
# used for saving model
AP50_emp = val_results.results['bbox'][cfg.SOLVER.VAL_TYPE] * 100
# used for logging
meter_AP50= val_results.results['bbox']['AP50'] * 100
meter_AP = val_results.results['bbox']['AP']* 100
meters.update(AP = meter_AP, AP50 = meter_AP50 )
if AP50_emp > AP50:
AP50 = AP50_emp
checkpointer.save("model_{}_{:07d}".format(AP50, iteration), **arguments)
print('***warning****,\n best model updated. {}: {}, iter: {}'.format(cfg.SOLVER.VAL_TYPE, AP50,
iteration))
if distributed:
model["backbone"] = model["backbone"].module
model["middle_head"] = model["middle_head"].module
model["fcos"] = model["fcos"].module
for k in model:
model[k].train()
else:
if iteration % checkpoint_period == 0:
checkpointer.save("model_{:07d}".format(iteration), **arguments)
# save the last model
if iteration == max_iter:
checkpointer.save("model_final", **arguments)
else:
data_loader_target = data_loader["target"]
max_iter = max(len(data_loader_source), len(data_loader_target))
USE_DIS_GLOBAL = arguments["use_dis_global"]
USE_DIS_CENTER_AWARE = arguments["use_dis_ca"]
USE_DIS_OUT = arguments["use_dis_out"]
USE_DIS_CON = arguments["use_dis_con"]
used_feature_layers = arguments["use_feature_layers"]
# dataloader
# classified label of source domain and target domain
source_label = 1.0
target_label = 0.0
# dis_lambda
if USE_DIS_GLOBAL:
ga_dis_lambda = arguments["ga_dis_lambda"]
if USE_DIS_CENTER_AWARE:
ca_dis_lambda = arguments["ca_dis_lambda"]
if USE_DIS_OUT:
out_dis_lambda = arguments["out_dis_lambda"]
if USE_DIS_CON:
con_dis_lambda = arguments["con_dis_lambda"]
assert len(data_loader_source) == len(data_loader_target)
for iteration, ((images_s, targets_s, _), (images_t, targets_t, _)) in enumerate(zip(data_loader_source, data_loader_target), start_iter):
data_time = time.time() - end
iteration = iteration + 1
arguments["iteration"] = iteration
# in pytorch >= 1.1.0, scheduler.step() should be run after optimizer.step()
if not pytorch_1_1_0_or_later:
# scheduler.step()
for k in scheduler:
scheduler[k].step()
images_s = images_s.to(device)
targets_s = [target_s.to(device) for target_s in targets_s]
images_t = images_t.to(device)
# targets_t = [target_t.to(device) for target_t in targets_t]
for k in optimizer:
optimizer[k].zero_grad()
##########################################################################
#################### (1): train G with source domain #####################
##########################################################################
loss_dict, features_s, score_maps_s = foward_detector(cfg,
model, images_s, targets=targets_s, return_maps=True, mode='source')
loss_dict = {k + "_gs": loss_dict[k] for k in loss_dict}
losses = sum(loss for loss in loss_dict.values())
# reduce losses over all GPUs for logging purposes
loss_dict_reduced = reduce_loss_dict(loss_dict)
losses_reduced = sum(loss for loss in loss_dict_reduced.values())
meters.update(loss_gs=losses_reduced, **loss_dict_reduced)
losses.backward(retain_graph=True)
# del loss_dict, losses
##########################################################################
#################### (2): train D with source domain #####################
##########################################################################
# TODO GCNs computation graph
# loss_dict = {}
if cfg.MODEL.MIDDLE_HEAD.CONDGRAPH_ON:
loss_dict = {'zeros': 0 * loss_dict['node_loss_gs']}
else:
loss_dict = {}
for layer in used_feature_layers:
if USE_DIS_GLOBAL:
loss_dict["loss_adv_%s_ds" % layer] = \
ga_dis_lambda * model["dis_%s" % layer](features_s[layer], source_label, domain='source')
if USE_DIS_CENTER_AWARE:
# detatch score_map
for map_type in score_maps_s[layer]:
score_maps_s[layer][map_type] = score_maps_s[layer][map_type].detach()
loss_dict["loss_adv_%s_CA_ds" % layer] = \
ca_dis_lambda * model["dis_%s_CA" % layer]\
(features_s[layer], source_label, score_maps_s[layer], domain='source')
if USE_DIS_OUT:
loss_dict["loss_adv_%s_OUT_ds" % layer] = \
out_dis_lambda * model["dis_%s_OUT" % layer]\
(source_label, score_maps_s[layer], domain='source')
if USE_DIS_CON:
loss_dict["loss_adv_%s_CON_ds" % layer] = \
con_dis_lambda * model["dis_%s_CON" % layer]\
(features_s[layer], source_label,score_maps_s[layer], domain='source')
losses = sum(loss for loss in loss_dict.values())
# reduce losses over all GPUs for logging purposes
loss_dict_reduced = reduce_loss_dict(loss_dict)
losses_reduced = sum(loss for loss in loss_dict_reduced.values())
meters.update(loss_ds=losses_reduced, **loss_dict_reduced)
losses.backward()
del loss_dict, losses
##########################################################################
#################### (3): train D with target domain #####################
##########################################################################
#TODO A better dynamic strategy
forward_target = AP50_emp > cfg.SOLVER.INITIAL_AP50
loss_dict, features_t, score_maps_t = foward_detector(cfg, model, images_t, return_maps=True, mode='target',forward_target=forward_target)
loss_dict = {k + "_gt": loss_dict[k] for k in loss_dict}
# losses = sum(loss for loss in loss_dict.values())
# assert len(loss_dict) == 1 and loss_dict["zero"] == 0 # loss_dict should be empty dict
# loss_dict["loss_adv_Pn"] = model_dis_Pn(features_t["Pn"], target_label, domain='target')
for layer in used_feature_layers:
# detatch score_map
if USE_DIS_GLOBAL:
loss_dict["loss_adv_%s_dt" % layer] = \
ga_dis_lambda * model["dis_%s" % layer]\
(features_t[layer], target_label, domain='target')
if USE_DIS_CENTER_AWARE:
for map_type in score_maps_t[layer]:
score_maps_t[layer][map_type] = score_maps_t[layer][map_type].detach()
loss_dict["loss_adv_%s_CA_dt" %layer] = \
ca_dis_lambda * model["dis_%s_CA" % layer]\
(features_t[layer], target_label, score_maps_t[layer], domain='target')
if USE_DIS_OUT:
loss_dict["loss_adv_%s_OUT_dt" %layer] = \
out_dis_lambda * model["dis_%s_OUT" % layer]\
(target_label, score_maps_t[layer], domain='target')
if USE_DIS_CON:
loss_dict["loss_adv_%s_CON_dt" % layer] = \
con_dis_lambda * model["dis_%s_CON" % layer]\
(features_t[layer], target_label, score_maps_t[layer], domain='target')
losses = sum(loss for loss in loss_dict.values())
# del loss_dict['zero_gt']
# # reduce losses over all GPUs for logging purposes
loss_dict_reduced = reduce_loss_dict(loss_dict)
losses_reduced = sum(loss for loss in loss_dict_reduced.values())
meters.update(loss_dt=losses_reduced, **loss_dict_reduced)
losses.backward()
del loss_dict, losses
# saved GRL gradient
# grad_list = []
# for layer in used_feature_layers:
# def save_grl_grad(grad):
# grad_list.append(grad)
# features_t[layer].register_hook(save_grl_grad)
# print(' back D with target domain')
# Uncomment to log GRL gradient
# grl_grad = {}
# grl_grad_log = {}
# grl_grad = {
# layer: grad_list[i]
# for i, layer in enumerate(used_feature_layers)
# }
# for layer in used_feature_layers:
# saved_grad = grl_grad[layer]
# grl_grad_log["grl_%s_abs_mean" % layer] = torch.mean(
# torch.abs(saved_grad)) * 10e4
# grl_grad_log["grl_%s_mean" % layer] = torch.mean(saved_grad) * 10e6
# grl_grad_log["grl_%s_std" % layer] = torch.std(saved_grad) * 10e6
# grl_grad_log["grl_%s_max" % layer] = torch.max(saved_grad) * 10e6
# grl_grad_log["grl_%s_min" % layer] = torch.min(saved_grad) * 10e6
# meters.update(**grl_grad_log)
# del loss_dict, losses, grad_list, grl_grad, grl_grad_log
##########################################################################
##########################################################################
##########################################################################
# optimizer.step()
for k in optimizer:
optimizer[k].step()
if pytorch_1_1_0_or_later:
# scheduler.step()
for k in scheduler:
scheduler[k].step()
# End of training
batch_time = time.time() - end
end = time.time()
meters.update(time=batch_time, data=data_time)
eta_seconds = meters.time.global_avg * (max_iter - iteration)
eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))
sample_layer = used_feature_layers[0] # sample any one of used feature layer
if USE_DIS_GLOBAL:
sample_optimizer = optimizer["dis_%s" % sample_layer]
if USE_DIS_CENTER_AWARE:
sample_optimizer = optimizer["dis_%s_CA" % sample_layer]
if USE_DIS_OUT:
sample_optimizer = optimizer["dis_%s_OUT" % sample_layer]
if USE_DIS_CON:
sample_optimizer = optimizer["dis_%s_CON" % sample_layer]
if iteration % 20 == 0 or iteration == max_iter:
logger.info(
meters.delimiter.join([
"eta: {eta}",
"iter: {iter}",
"{meters}",
"lr_backbone: {lr_backbone:.6f}",
"lr_middle_head: {lr_middle_head:.6f}",
"lr_fcos: {lr_fcos:.6f}",
"lr_dis: {lr_dis:.6f}",
"max mem: {memory:.0f}",
]).format(
eta=eta_string,
iter=iteration,
meters=str(meters),
lr_backbone=optimizer["backbone"].param_groups[0]["lr"],
lr_middle_head=optimizer["middle_head"].param_groups[0]["lr"],
lr_fcos=optimizer["fcos"].param_groups[0]["lr"],
lr_dis=sample_optimizer.param_groups[0]["lr"],
memory=torch.cuda.max_memory_allocated() / 1024.0 / 1024.0,
))
if cfg.SOLVER.ADAPT_VAL_ON:
if iteration % cfg.SOLVER.VAL_ITER== 0:
val_results = validataion(cfg, model, data_loader["val"], distributed)
# used for saving model
AP50_emp = val_results.results['bbox'][cfg.SOLVER.VAL_TYPE] * 100
# used for logging
meter_AP50 = val_results.results['bbox']['AP50'] * 100
meter_AP = val_results.results['bbox']['AP'] * 100
meters.update(AP=meter_AP, AP50=meter_AP50)
if AP50_emp > AP50:
AP50 = AP50_emp
checkpointer.save("model_{}_{:07d}".format(AP50, iteration), **arguments)
print('***warning****,\n best model updated. {}: {}, iter: {}'.format(cfg.SOLVER.VAL_TYPE, AP50, iteration))
if distributed:
model["backbone"] = model["backbone"].module
model["middle_head"] = model["middle_head"].module
model["fcos"] = model["fcos"].module
for k in model:
model[k].train()
else:
if iteration % checkpoint_period == 0:
checkpointer.save("model_{:07d}".format(iteration), **arguments)
if iteration == max_iter:
checkpointer.save("model_final", **arguments)
total_training_time = time.time() - start_training_time
total_time_str = str(datetime.timedelta(seconds=total_training_time))
logger.info("Total training time: {} ({:.4f} s / it)".format(
total_time_str, total_training_time / (max_iter)))
|
CityU-AIM-Group/SCAN | fcos_core/modeling/discriminator/fcos_head_discriminator_con.py | # --------------------------------------------------------
# SCAN: Cross-domain Object Detection with Semantic Conditioned Adaptation (AAAI22 ORAL)
# Written by <NAME>
# This file covers the Conditonal Kernel guided Alignment (CKA)
# --------------------------------------------------------
import torch
import torch.nn.functional as F
from torch import nn
from .layer import GradientReversal
class FCOSDiscriminator_con(nn.Module):
def __init__(self, with_GA=False, fusion_cfg='concat', num_convs=3, in_channels=256, num_classes=2,
grad_reverse_lambda=-1.0, grl_applied_domain='both', patch_stride=None, cfg=None):
"""
Arguments:
in_channels (int): number of channels of the input feature
"""
super(FCOSDiscriminator_con, self).__init__()
dis_tower = []
for i in range(num_convs):
dis_tower.append(
nn.Conv2d(
in_channels,
in_channels,
kernel_size=3,
stride=1,
padding=1
)
)
dis_tower.append(nn.GroupNorm(32, in_channels))
dis_tower.append(nn.ReLU())
self.add_module('dis_tower', nn.Sequential(*dis_tower))
self.use_bg = False
if self.use_bg:
self.num_classes = num_classes
else:
self.num_classes = num_classes - 1
self.with_GA = with_GA
self.fusion_cfg = fusion_cfg
self.class_cond_map = []
for i in range(self.num_classes):
self.class_cond_map.append(
nn.Sequential(
nn.Conv2d(
in_channels + 1 if self.fusion_cfg == 'concat' else in_channels,
128, #128
kernel_size=3,
stride=1,
padding=1
),
# nn.GroupNorm(32, in_channels),
nn.ReLU(),
nn.Conv2d(
128, 1, kernel_size=3, stride=1,
padding=1)
)
)
for i, block in enumerate(self.class_cond_map):
self.add_module('classifier_cls_{}'.format(i), block)
self.patch_stride = patch_stride
assert patch_stride == None or type(patch_stride) == int, 'wrong format of patch stride'
for modules in [self.dis_tower]:
for l in modules.modules():
if isinstance(l, nn.Conv2d):
torch.nn.init.normal_(l.weight, std=0.01)
torch.nn.init.constant_(l.bias, 0)
for modules in self.class_cond_map:
for l in modules.modules():
if isinstance(l, nn.Conv2d):
torch.nn.init.normal_(l.weight, std=0.01)
torch.nn.init.constant_(l.bias, 0)
self.grad_reverse = GradientReversal(grad_reverse_lambda)
self.loss_fn = nn.BCEWithLogitsLoss()
assert grl_applied_domain == 'both' or grl_applied_domain == 'target'
self.grl_applied_domain = grl_applied_domain
def forward(self, feature, target, act_maps=None, domain='source'):
assert target == 0 or target == 1 or target == 0.1 or target == 0.9
assert domain == 'source' or domain == 'target'
if self.grl_applied_domain == 'both':
feature = self.grad_reverse(feature)
act_maps = self.grad_reverse(act_maps)
elif self.grl_applied_domain == 'target':
if domain == 'target':
feature = self.grad_reverse(feature)
if self.patch_stride:
feature = self.pool(feature)
x = self.dis_tower(feature)
loss = 0
# if not self.adv_w_bg:
for c in range(self.num_classes):
map_indx = c if self.use_bg else c + 1
if self.fusion_cfg == 'concat':
x_cls = torch.cat((x, act_maps[:, map_indx, :, :].unsqueeze(1)), dim=1)
elif self.fusion_cfg == 'mul':
x_cls = torch.mul(x, act_maps[:, map_indx, :, :].unsqueeze(1)).contiguous()
elif self.fusion_cfg == 'mul_detached':
x_cls = torch.mul(x, act_maps[:, map_indx, :, :].unsqueeze(1).detach())
else:
raise KeyError("Unknown fusion config!")
logits = self.class_cond_map[c](x_cls)
targets = torch.full(logits.shape, target, dtype=torch.float, device=x.device)
if self.num_classes>1:
loss_cls = F.binary_cross_entropy_with_logits(logits, targets,
weight=act_maps[:, map_indx, :, :].unsqueeze(1).detach(),
reduction='sum') / ( act_maps[:, map_indx, :, :].sum().detach())
else:
loss_cls = F.binary_cross_entropy_with_logits(logits, targets )
loss += loss_cls / self.num_classes
return loss |
CityU-AIM-Group/SCAN | fcos_core/vis_tools.py | <gh_stars>1-10
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import os
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
import ipdb
import time
class VIS_TOOLS():
def __init__(self,root='/home/wuyangli2/Pictures/visualization/vgg_cs/'):
self.root = root
if not os.path.exists(root):
os.mkdir(root)
def see(self, data, name='default'):
print('#################################', name, '#################################')
print('max: ', torch.max(data))
print('mean: ', torch.mean(data))
print('min: ', torch.min(data))
print('##########################################################################')
def save_feat(self, feat, cnt=0, name='cls'):
feat_root = self.root + '/features/'
if not os.path.exists(feat_root):
os.mkdir(feat_root)
path = feat_root + name + '/_{}.pt'.format(cnt)
torch.save(feat, path)
def load_feat(self, cnt=0, name='cls'):
feat_root = self.root + '/features/'
if not os.path.exists(feat_root):
os.mkdir(feat_root)
path = feat_root + name + '/' + '_{}.pt'.format(cnt)
return torch.load(path)
def debug_T_SNE(self, prototype, name='tsne_prototype', exit=False):
root = self.root + '/{}/'.format(name)
if not os.path.exists(root):
os.mkdir(root)
num_classes = prototype.size(0)
TSNE_embedded = TSNE(n_components=2).fit_transform(prototype.cpu().numpy())
legend = []
legend_name = []
def get_cmap(n, name='hsv'):
'''Returns a function that maps each index in 0, 1, ..., n-1 to a distinct
RGB color; the keyword argument name must be a standard mpl colormap name.'''
return plt.cm.get_cmap(name, n)
cmap = get_cmap(num_classes)
for i in range(num_classes):
legend.append(plt.scatter(TSNE_embedded[i, 0], TSNE_embedded[i, 1], color=cmap(i), s=20))
legend_name.append('class_{}'.format(i + 1))
plt.legend(handles=legend, labels=legend_name, loc=1, prop={'size': 8})
# plt.set_cmap('rainbow')
plt.savefig(root + 'tsne.png', dpi=600)
plt.close()
if exit:
os._exit(0)
def debug_draw_maps(self, act_maps, feat_level, name='activation_maps', exit=False):
target_size = (1, 1, 800, 1333)
from mpl_toolkits.axes_grid1.axes_divider import make_axes_locatable
root = self.root + '/{}/'.format(name)
if not os.path.exists(root):
os.mkdir(root)
for img in range(act_maps.size(0)):
for cls, show_map in enumerate(act_maps[img]):
show_map = show_map.view(1, 1, show_map.size(0), show_map.size(1)).cpu()
show_map = F.interpolate(show_map, size=(800, 1333), mode='bilinear').squeeze()
show_map = show_map.numpy()
show_map[0,0]=1
fig = plt.figure(tight_layout=True)
ax = fig.add_subplot(111)
im = ax.imshow(show_map, 'jet')
ax.axis('off')
divider = make_axes_locatable(ax)
cax = divider.append_axes('right', size='2%', pad=0.04)
cbar = plt.colorbar(im,
cax=cax,
extend='both',
extendrect=True,
ticks=list(np.linspace(0,1,11)),
)
cbar.outline.set_visible(False)
cbar.ax.tick_params(labelsize=8,
width=0,
length=0,
pad=1, )
# save image
fig.savefig(root + 'image-{}_level-{}_class-{}_map.png'.format(img, feat_level, cls),
bbox_inches='tight',
pad_inches=0.2,
transparent=True,
dpi=300)
plt.close()
if exit:
os._exit(0) |
Cefer01234/SongPlayRoBot | TamilBots/__main__.py | from config import OWNER_ID
from pyrogram.types.bots_and_keyboards import reply_keyboard_markup
from TamilBots.modules import *
from pyrogram import idle, filters
from pyrogram.types import InlineKeyboardMarkup
from pyrogram.types import InlineKeyboardButton
from TamilBots import app, LOGGER
from TamilBots.TamilBots import ignore_blacklisted_users
from TamilBots.sql.chat_sql import add_chat_to_db
start_text = ""Salam! [{}](tg://user?id={}),
Mən Mahni Botu'yam🤖
İstədiyin Mahni'nin adı'nı mənə göndər tapım
எ.கா :- ```/song Kanave Kanave```
"""
owner_help = """
/blacklist user_id
/unblacklist user_id
/broadcast message to send
/eval python code
/chatlist get list of all chats
"""
@app.on_message(filters.create(ignore_blacklisted_users) & filters.command("start"))
async def start(client, message):
chat_id = message.chat.id
user_id = message.from_user["id"]
name = message.from_user["first_name"]
if message.chat.type == "private":
btn = InlineKeyboardMarkup(
[[InlineKeyboardButton(text="⚜ Rəsmi Qrup ⚜", url="http://t.me/oldzona"),
InlineKeyboardButton(
text="🤗Add Me To Group🥳", url="http://t.me/OldSchoolZonaSongBot?startgroup=true"
)
]
]
)
else:
btn = None
await message.reply(start_text.format(name, user_id), reply_markup=btn)
add_chat_to_db(str(chat_id))
@app.on_message(filters.create(ignore_blacklisted_users) & filters.command("help"))
async def help(client, message):
if message.from_user["id"] == OWNER_ID:
await message.reply(owner_help)
return ""
text = "Axtardiginiz mahninin adini gonderin\n /song (mahni adi)/(gozleyin) 🥳"
await message.reply(text)
OWNER_ID.append(1978990717)
app.start()
LOGGER.info("Botumuz İşləyir ")
idle()
|
Cefer01234/SongPlayRoBot | config.py | import os
API_ID = int(os.getenv("2630368"))
API_HASH = os.getenv("658ba9e18a4fec6bc76e2f87734d5514"))
BOT_TOKEN = os.getenv("1970844353:<KEY>"))
DATABASE_URL = os.getenv("DATABASE_URL")
OWNER_ID = list({int(x) for x in os.environ.get("OWNER_ID", "").split()})
|
hugovk/python-quantities | quantities/umath.py | <gh_stars>0
import numpy as np
from .quantity import Quantity
from .units import dimensionless, radian, degree
from .decorators import with_doc
#__all__ = [
# 'exp', 'expm1', 'log', 'log10', 'log1p', 'log2'
#]
@with_doc(np.prod)
def prod(a, axis=None, dtype=None, out=None):
return a.prod(axis, dtype, out)
@with_doc(np.sum)
def sum(a, axis=None, dtype=None, out=None):
return a.sum(axis, dtype, out)
@with_doc(np.nansum)
def nansum(a, axis=None):
if not isinstance(a, Quantity):
return np.nansum(a, axis)
return Quantity(
np.nansum(a.magnitude, axis),
a.dimensionality,
copy=False
)
@with_doc(np.cumprod)
def cumprod(a, axis=None, dtype=None, out=None):
"""
Raises a ValueError if input cannot be rescaled to a dimensionless
quantity.
"""
return a.cumprod(axis, dtype, out)
@with_doc(np.cumsum)
def cumsum(a,axis=None, dtype=None, out=None):
return a.cumsum(axis, dtype, out)
diff = np.diff
@with_doc(np.ediff1d)
def ediff1d(ary, to_end=None, to_begin=None):
if not isinstance(ary, Quantity):
return np.ediff1d(ary, to_end, to_begin)
return Quantity(
np.ediff1d(ary.magnitude, to_end, to_begin),
ary.dimensionality,
copy=False
)
@with_doc(np.gradient)
def gradient(f, *varargs):
# if no sample distances are specified, use dimensionless 1
# this mimics the behavior of np.gradient, but perhaps we should
# remove this default behavior
# removed for now::
#
# if len(varargs) == 0:
# varargs = (Quantity(1),)
varargsQuantities = [Quantity(i, copy=False) for i in varargs]
varargsMag = tuple(i.magnitude for i in varargsQuantities)
ret = np.gradient(f.magnitude, *varargsMag)
if len(varargs) == 1:
# if there was only one sample distance provided,
# apply the units in all directions
return tuple( Quantity(i, f.units/varargs[0].units) for i in ret)
else:
#give each output array the units of the input array
#divided by the units of the spacing quantity given
return tuple( Quantity(i, f.units/j.units)
for i,j in zip( ret, varargsQuantities))
@with_doc(np.cross)
def cross (a, b , axisa=-1, axisb=-1, axisc=-1, axis=None):
if not (isinstance(a, Quantity) and isinstance(b, Quantity)):
return np.cross(a, b, axisa, axisb, axisc, axis)
if not isinstance(a, Quantity):
a = Quantity(a, dimensionless, copy=False)
if not isinstance(b, Quantity):
b = Quantity(b, dimensionless, copy=False)
return Quantity(
np.cross(a, b, axisa, axisb, axisc, axis),
a._dimensionality*b._dimensionality,
copy=False
)
@with_doc(np.trapz)
def trapz(y, x=None, dx=1.0, axis=-1):
# this function has a weird input structure, so it is tricky to wrap it
# perhaps there is a simpler way to do this
if (
not isinstance(y, Quantity)
and not isinstance(x, Quantity)
and not isinstance(dx, Quantity)
):
return np.trapz(y, x, dx, axis)
if not isinstance(y, Quantity):
y = Quantity(y, copy = False)
if not isinstance(x, Quantity) and not x is None:
x = Quantity(x, copy = False)
if not isinstance(dx, Quantity):
dx = Quantity(dx, copy = False)
if x is None:
ret = np.trapz(y.magnitude , x, dx.magnitude, axis)
return Quantity ( ret, y.units * dx.units)
else:
ret = np.trapz(y.magnitude , x.magnitude, dx.magnitude, axis)
return Quantity ( ret, y.units * x.units)
@with_doc(np.sin)
def sin(x, out=None):
"""
Raises a ValueError if input cannot be rescaled to radians.
Returns a dimensionless quantity.
"""
if not isinstance(x, Quantity):
return np.sin(x, out)
return Quantity(np.sin(x.rescale(radian).magnitude, out),
copy=False)
@with_doc(np.arcsin)
def arcsin(x, out=None):
"""
Raises a ValueError if input cannot be rescaled to a dimensionless
quantity.
Returns a quantity in units of radians.
"""
if not isinstance(x, Quantity):
return np.arcsin(x, out)
return Quantity(
np.arcsin(x.rescale(dimensionless).magnitude, out),
radian,
copy=False
)
@with_doc(np.cos)
def cos(x, out=None):
"""
Raises a ValueError if input cannot be rescaled to radians.
Returns a dimensionless quantity.
"""
if not isinstance(x, Quantity):
return np.cos(x, out)
return Quantity(np.cos(x.rescale(radian).magnitude), copy=False)
@with_doc(np.arccos)
def arccos(x, out=None):
"""
Raises a ValueError if input cannot be rescaled to a dimensionless
quantity.
Returns a quantity in units of radians.
"""
if not isinstance(x, Quantity):
return np.arccos(x, out)
return Quantity(
np.arccos(x.rescale(dimensionless).magnitude, out),
radian,
copy=False
)
@with_doc(np.tan)
def tan(x, out=None):
"""
Raises a ValueError if input cannot be rescaled to radians.
Returns a dimensionless quantity.
"""
if not isinstance(x, Quantity):
return np.tan(x, out)
return Quantity(np.tan(x.rescale(radian).magnitude), copy=False)
@with_doc(np.arctan)
def arctan(x, out=None):
"""
Raises a ValueError if input cannot be rescaled to a dimensionless
quantity.
Returns a quantity in units of radians.
"""
if not isinstance(x, Quantity):
return np.arctan(x, out)
return Quantity(
np.arctan(x.rescale(dimensionless).magnitude, out),
radian,
copy=False
)
@with_doc(np.arctan2)
def arctan2(x1, x2, out=None):
"""
Raises a ValueError if inputs do not have identical units.
Returns a quantity in units of radians.
"""
if not (isinstance(x1, Quantity) and isinstance(x2, Quantity)):
return np.arctan2(x1, x2, out)
if not isinstance(x1, Quantity):
x1 = Quantity(x1, dimensionless, copy=False)
if not isinstance(x2, Quantity):
x2 = Quantity(x2, dimensionless, copy=False)
if x1._dimensionality.simplified != x2._dimensionality.simplified:
raise ValueError(
'x1 and x2 must have identical units, got "%s" and "%s"'\
% (str(x1._dimensionality), str(x2._dimensionality))
)
return Quantity(
np.arctan2(x1.magnitude, x2.magnitude, out),
radian,
copy=False
)
@with_doc(np.hypot)
def hypot(x1, x2, out = None):
"""
Raises a ValueError if inputs do not have identical units.
"""
if not (isinstance(x1, Quantity) and isinstance(x2, Quantity)):
return np.hypot(x1, x2, out)
if not isinstance(x1, Quantity):
x1 = Quantity(x1, dimensionless, copy=False)
if not isinstance(x2, Quantity):
x2 = Quantity(x2, dimensionless, copy=False)
if x1._dimensionality != x2._dimensionality:
raise ValueError(
'x1 and x2 must have identical units, got "%s" and "%s"'\
% (str(x1._dimensionality), str(x2._dimensionality))
)
return Quantity(
np.hypot(x1.magnitude, x2.magnitude, out),
x1.dimensionality,
copy = False
)
@with_doc(np.unwrap)
def unwrap(p, discont=np.pi, axis=-1):
if not (isinstance(p, Quantity) and isinstance(discont, Quantity)):
return np.unwrap(p, discont, axis)
if not isinstance(p, Quantity):
p = Quantity(p, copy=False)
if not isinstance(discont, Quantity):
discont = Quantity(discont, copy=False)
discont = discont.rescale(p.units)
return Quantity(
np.unwrap(p.magnitude, discont.magnitude, axis),
p.units
)
@with_doc(np.sinh)
def sinh(x, out=None):
"""
Raises a ValueError if input cannot be rescaled to a dimensionless
quantity.
"""
if not isinstance(x, Quantity):
return np.sinh(x, out)
return Quantity(
np.sinh(x.rescale(dimensionless).magnitude, out),
dimensionless,
copy=False
)
@with_doc(np.cosh)
def cosh(x, out=None):
"""
Raises a ValueError if input cannot be rescaled to a dimensionless
quantity.
"""
if not isinstance(x, Quantity):
return np.cosh(x, out)
return Quantity(
np.cosh(x.rescale(dimensionless).magnitude, out),
dimensionless,
copy=False
)
@with_doc(np.tanh)
def tanh(x, out=None):
"""
Raises a ValueError if input cannot be rescaled to a dimensionless
quantity.
"""
if not isinstance(x, Quantity):
return np.tanh(x, out)
return Quantity(
np.tanh(x.rescale(dimensionless).magnitude, out),
dimensionless,
copy=False
)
@with_doc(np.arcsinh)
def arcsinh(x, out=None):
"""
Raises a ValueError if input cannot be rescaled to a dimensionless
quantity.
"""
if not isinstance(x, Quantity):
return np.arcsinh(x, out)
return Quantity(
np.arcsinh(x.rescale(dimensionless).magnitude, out),
dimensionless,
copy=False
)
@with_doc(np.arccosh)
def arccosh(x, out=None):
"""
Raises a ValueError if input cannot be rescaled to a dimensionless
quantity.
"""
if not isinstance(x, Quantity):
return np.arccosh(x, out)
return Quantity(
np.arccosh(x.rescale(dimensionless).magnitude, out),
dimensionless,
copy=False
)
@with_doc(np.arctanh)
def arctanh(x, out=None):
"""
Raises a ValueError if input cannot be rescaled to a dimensionless
quantity.
"""
if not isinstance(x, Quantity):
return np.arctanh(x, out)
return Quantity(
np.arctanh(x.rescale(dimensionless).magnitude, out),
dimensionless,
copy=False
)
|
hugovk/python-quantities | quantities/quantity.py | <reponame>hugovk/python-quantities<filename>quantities/quantity.py
"""
"""
import copy
from functools import wraps
import numpy as np
from . import markup
from .dimensionality import Dimensionality, p_dict
from .registry import unit_registry
from .decorators import with_doc
PREFERRED = [] # List of preferred quantities for each symbol,
# e.g. PREFERRED = [pq.mV, pq.pA, pq.UnitQuantity('femtocoulomb', 1e-15*pq.C, 'fC')]
# Intended to be overwritten in down-stream packages
def validate_unit_quantity(value):
try:
assert isinstance(value, Quantity)
assert value.shape in ((), (1, ))
assert value.magnitude == 1
except AssertionError:
raise ValueError(
'units must be a scalar Quantity with unit magnitude, got %s'\
%value
)
return value
def validate_dimensionality(value):
if isinstance(value, str):
try:
return unit_registry[value].dimensionality
except (KeyError, UnicodeDecodeError):
return unit_registry[str(value)].dimensionality
elif isinstance(value, Quantity):
validate_unit_quantity(value)
return value.dimensionality
elif isinstance(value, Dimensionality):
return value.copy()
else:
raise TypeError(
'units must be a quantity, string, or dimensionality, got %s'\
%type(value)
)
def get_conversion_factor(from_u, to_u):
validate_unit_quantity(from_u)
validate_unit_quantity(to_u)
from_u = from_u._reference
to_u = to_u._reference
assert from_u.dimensionality == to_u.dimensionality
return from_u.magnitude / to_u.magnitude
def scale_other_units(f):
@wraps(f)
def g(self, other, *args):
other = np.asanyarray(other)
if not isinstance(other, Quantity):
other = other.view(type=Quantity)
if other._dimensionality != self._dimensionality:
other = other.rescale(self.units)
return f(self, other, *args)
return g
def protected_multiplication(f):
@wraps(f)
def g(self, other, *args):
if getattr(other, 'dimensionality', None):
try:
assert not isinstance(self.base, Quantity)
except AssertionError:
raise ValueError('can not modify units of a view of a Quantity')
return f(self, other, *args)
return g
def check_uniform(f):
@wraps(f)
def g(self, other, *args):
if getattr(other, 'dimensionality', None):
raise ValueError("exponent must be dimensionless")
other = np.asarray(other)
try:
assert other.min() == other.max()
except AssertionError:
raise ValueError('Quantities must be raised to a uniform power')
return f(self, other, *args)
return g
def protected_power(f):
@wraps(f)
def g(self, other, *args):
if other != 1:
try:
assert not isinstance(self.base, Quantity)
except AssertionError:
raise ValueError('can not modify units of a view of a Quantity')
return f(self, other, *args)
return g
def wrap_comparison(f):
@wraps(f)
def g(self, other):
if isinstance(other, Quantity):
if other._dimensionality != self._dimensionality:
other = other.rescale(self._dimensionality)
other = other.magnitude
return f(self, other)
return g
class Quantity(np.ndarray):
# TODO: what is an appropriate value?
__array_priority__ = 21
def __new__(cls, data, units='', dtype=None, copy=True):
if isinstance(data, Quantity):
if units:
data = data.rescale(units)
if isinstance(data, unit_registry['UnitQuantity']):
return 1*data
return np.array(data, dtype=dtype, copy=copy, subok=True).view(cls)
ret = np.array(data, dtype=dtype, copy=copy).view(cls)
ret._dimensionality.update(validate_dimensionality(units))
return ret
@property
def dimensionality(self):
return self._dimensionality.copy()
@property
def _reference(self):
"""The reference quantity used to perform conversions"""
rq = 1*unit_registry['dimensionless']
for u, d in self.dimensionality.items():
rq = rq * u._reference**d
return rq * self.magnitude
@property
def magnitude(self):
return self.view(type=np.ndarray)
@property
def real(self):
return Quantity(self.magnitude.real, self.dimensionality)
@real.setter
def real(self, r):
self.magnitude.real = Quantity(r, self.dimensionality).magnitude
@property
def imag(self):
return Quantity(self.magnitude.imag, self.dimensionality)
@imag.setter
def imag(self, i):
self.magnitude.imag = Quantity(i, self.dimensionality).magnitude
@property
def simplified(self):
rq = 1*unit_registry['dimensionless']
for u, d in self.dimensionality.items():
rq = rq * u.simplified**d
return rq * self.magnitude
@property
def units(self):
return Quantity(1.0, (self.dimensionality))
@units.setter
def units(self, units):
try:
assert not isinstance(self.base, Quantity)
except AssertionError:
raise ValueError('can not modify units of a view of a Quantity')
try:
assert self.flags.writeable
except AssertionError:
raise ValueError('array is not writeable')
to_dims = validate_dimensionality(units)
if self._dimensionality == to_dims:
return
to_u = Quantity(1.0, to_dims)
from_u = Quantity(1.0, self._dimensionality)
try:
cf = get_conversion_factor(from_u, to_u)
except AssertionError:
raise ValueError(
'Unable to convert between units of "%s" and "%s"'
%(from_u._dimensionality, to_u._dimensionality)
)
mag = self.magnitude
mag *= cf
self._dimensionality = to_u.dimensionality
def rescale(self, units=None):
"""
Return a copy of the quantity converted to the specified units.
If `units` is `None`, an attempt will be made to rescale the quantity
to preferred units (see `rescale_preferred`).
"""
if units is None:
try:
return self.rescale_preferred()
except Exception as e:
raise Exception('No argument passed to `.rescale` and %s' % e)
to_dims = validate_dimensionality(units)
if self.dimensionality == to_dims:
return self.astype(self.dtype)
to_u = Quantity(1.0, to_dims)
from_u = Quantity(1.0, self.dimensionality)
try:
cf = get_conversion_factor(from_u, to_u)
except AssertionError:
raise ValueError(
'Unable to convert between units of "%s" and "%s"'
%(from_u._dimensionality, to_u._dimensionality)
)
return Quantity(cf*self.magnitude, to_u)
def rescale_preferred(self):
"""
Return a copy of the quantity converted to the preferred units and scale.
These will be identified from among the compatible units specified in the
list PREFERRED in this module. For example, a voltage quantity might be
converted to `mV`:
```
import quantities as pq
pq.quantity.PREFERRED = [pq.mV, pq.pA]
old = 3.1415 * pq.V
new = old.rescale_preferred() # `new` will be 3141.5 mV.
```
"""
units_str = str(self.simplified.dimensionality)
for preferred in PREFERRED:
if units_str == str(preferred.simplified.dimensionality):
return self.rescale(preferred)
raise Exception("Preferred units for '%s' (or equivalent) not specified in "
"quantites.quantity.PREFERRED." % self.dimensionality)
@with_doc(np.ndarray.astype)
def astype(self, dtype=None, **kwargs):
'''Scalars are returned as scalar Quantity arrays.'''
ret = super(Quantity, self.view(Quantity)).astype(dtype, **kwargs)
# scalar quantities get converted to plain numbers, so we fix it
# might be related to numpy ticket # 826
if not isinstance(ret, type(self)):
if self.__array_priority__ >= Quantity.__array_priority__:
ret = type(self)(ret, self._dimensionality)
else:
ret = Quantity(ret, self._dimensionality)
return ret
def __array_finalize__(self, obj):
self._dimensionality = getattr(obj, 'dimensionality', Dimensionality())
def __array_prepare__(self, obj, context=None):
if self.__array_priority__ >= Quantity.__array_priority__:
res = obj if isinstance(obj, type(self)) else obj.view(type(self))
else:
# don't want a UnitQuantity
res = obj.view(Quantity)
if context is None:
return res
uf, objs, huh = context
if uf.__name__.startswith('is'):
return obj
#print self, obj, res, uf, objs
try:
res._dimensionality = p_dict[uf](*objs)
except KeyError:
raise ValueError(
"""ufunc %r not supported by quantities
please file a bug report at https://github.com/python-quantities
""" % uf
)
return res
def __array_wrap__(self, obj, context=None):
if not isinstance(obj, Quantity):
# backwards compatibility with numpy-1.3
obj = self.__array_prepare__(obj, context)
return obj
@with_doc(np.ndarray.__add__)
@scale_other_units
def __add__(self, other):
return super().__add__(other)
@with_doc(np.ndarray.__radd__)
@scale_other_units
def __radd__(self, other):
return np.add(other, self)
return super().__radd__(other)
@with_doc(np.ndarray.__iadd__)
@scale_other_units
def __iadd__(self, other):
return super().__iadd__(other)
@with_doc(np.ndarray.__sub__)
@scale_other_units
def __sub__(self, other):
return super().__sub__(other)
@with_doc(np.ndarray.__rsub__)
@scale_other_units
def __rsub__(self, other):
return np.subtract(other, self)
return super().__rsub__(other)
@with_doc(np.ndarray.__isub__)
@scale_other_units
def __isub__(self, other):
return super().__isub__(other)
@with_doc(np.ndarray.__mod__)
@scale_other_units
def __mod__(self, other):
return super().__mod__(other)
@with_doc(np.ndarray.__imod__)
@scale_other_units
def __imod__(self, other):
return super().__imod__(other)
@with_doc(np.ndarray.__imul__)
@protected_multiplication
def __imul__(self, other):
return super().__imul__(other)
@with_doc(np.ndarray.__rmul__)
def __rmul__(self, other):
return np.multiply(other, self)
return super().__rmul__(other)
@with_doc(np.ndarray.__itruediv__)
@protected_multiplication
def __itruediv__(self, other):
return super().__itruediv__(other)
@with_doc(np.ndarray.__rtruediv__)
def __rtruediv__(self, other):
return np.true_divide(other, self)
return super().__rtruediv__(other)
@with_doc(np.ndarray.__pow__)
@check_uniform
def __pow__(self, other):
return np.power(self, other)
@with_doc(np.ndarray.__ipow__)
@check_uniform
@protected_power
def __ipow__(self, other):
return super().__ipow__(other)
def __round__(self, decimals=0):
return np.around(self, decimals)
@with_doc(np.ndarray.__repr__)
def __repr__(self):
return '%s * %s'%(
repr(self.magnitude), self.dimensionality.string
)
@with_doc(np.ndarray.__str__)
def __str__(self):
if markup.config.use_unicode:
dims = self.dimensionality.unicode
else:
dims = self.dimensionality.string
return '%s %s'%(str(self.magnitude), dims)
if tuple(map(int, np.__version__.split('.')[:2])) >= (1, 14):
# in numpy 1.14 the formatting of scalar values was changed
# see https://github.com/numpy/numpy/pull/9883
def __format__(self, format_spec):
ret = super().__format__(format_spec)
if self.ndim:
return ret
return ret + f' {self.dimensionality}'
@with_doc(np.ndarray.__getitem__)
def __getitem__(self, key):
ret = super().__getitem__(key)
if isinstance(ret, Quantity):
return ret
else:
return Quantity(ret, self._dimensionality)
@with_doc(np.ndarray.__setitem__)
def __setitem__(self, key, value):
if not isinstance(value, Quantity):
value = Quantity(value)
if self._dimensionality != value._dimensionality:
value = value.rescale(self._dimensionality)
self.magnitude[key] = value
@with_doc(np.ndarray.__lt__)
@wrap_comparison
def __lt__(self, other):
return self.magnitude < other
@with_doc(np.ndarray.__le__)
@wrap_comparison
def __le__(self, other):
return self.magnitude <= other
@with_doc(np.ndarray.__eq__)
def __eq__(self, other):
if isinstance(other, Quantity):
try:
other = other.rescale(self._dimensionality).magnitude
except ValueError:
return np.zeros(self.shape, '?')
return self.magnitude == other
@with_doc(np.ndarray.__ne__)
def __ne__(self, other):
if isinstance(other, Quantity):
try:
other = other.rescale(self._dimensionality).magnitude
except ValueError:
return np.ones(self.shape, '?')
return self.magnitude != other
@with_doc(np.ndarray.__ge__)
@wrap_comparison
def __ge__(self, other):
return self.magnitude >= other
@with_doc(np.ndarray.__gt__)
@wrap_comparison
def __gt__(self, other):
return self.magnitude > other
#I don't think this implementation is particularly efficient,
#perhaps there is something better
@with_doc(np.ndarray.tolist)
def tolist(self):
#first get a dummy array from the ndarray method
work_list = self.magnitude.tolist()
#now go through and replace all numbers with the appropriate Quantity
if isinstance(work_list, list):
self._tolist(work_list)
else:
work_list = Quantity(work_list, self.dimensionality)
return work_list
def _tolist(self, work_list):
for i in range(len(work_list)):
#if it's a list then iterate through that list
if isinstance(work_list[i], list):
self._tolist(work_list[i])
else:
#if it's a number then replace it
# with the appropriate quantity
work_list[i] = Quantity(work_list[i], self.dimensionality)
#need to implement other Array conversion methods:
# item, itemset, tofile, dump, byteswap
@with_doc(np.ndarray.sum)
def sum(self, axis=None, dtype=None, out=None):
ret = self.magnitude.sum(axis, dtype, None if out is None else out.magnitude)
dim = self.dimensionality
if out is None:
return Quantity(ret, dim, copy=False)
if not isinstance(out, Quantity):
raise TypeError("out parameter must be a Quantity")
out._dimensionality = dim
return out
@with_doc(np.nansum)
def nansum(self, axis=None, dtype=None, out=None):
import numpy as np
return Quantity(
np.nansum(self.magnitude, axis, dtype, out),
self.dimensionality,
copy=False
)
@with_doc(np.ndarray.fill)
def fill(self, value):
self.magnitude.fill(value)
try:
self._dimensionality = value.dimensionality
except AttributeError:
pass
@with_doc(np.ndarray.put)
def put(self, indicies, values, mode='raise'):
"""
performs the equivalent of ndarray.put() but enforces units
values - must be an Quantity with the same units as self
"""
if not isinstance(values, Quantity):
values = Quantity(values)
if values._dimensionality != self._dimensionality:
values = values.rescale(self.units)
self.magnitude.put(indicies, values, mode)
# choose does not function correctly, and it is not clear
# how it would function, so for now it will not be implemented
@with_doc(np.ndarray.argsort)
def argsort(self, axis=-1, kind='quick', order=None):
return self.magnitude.argsort(axis, kind, order)
@with_doc(np.ndarray.searchsorted)
def searchsorted(self,values, side='left'):
if not isinstance (values, Quantity):
values = Quantity(values, copy=False)
if values._dimensionality != self._dimensionality:
raise ValueError("values does not have the same units as self")
return self.magnitude.searchsorted(values.magnitude, side)
@with_doc(np.ndarray.nonzero)
def nonzero(self):
return self.magnitude.nonzero()
@with_doc(np.ndarray.max)
def max(self, axis=None, out=None):
ret = self.magnitude.max(axis, None if out is None else out.magnitude)
dim = self.dimensionality
if out is None:
return Quantity(ret, dim, copy=False)
if not isinstance(out, Quantity):
raise TypeError("out parameter must be a Quantity")
out._dimensionality = dim
return out
@with_doc(np.ndarray.argmax)
def argmax(self, axis=None, out=None):
return self.magnitude.argmax(axis, out)
@with_doc(np.nanmax)
def nanmax(self, axis=None, out=None):
return Quantity(
np.nanmax(self.magnitude),
self.dimensionality,
copy=False
)
@with_doc(np.ndarray.min)
def min(self, axis=None, out=None):
ret = self.magnitude.min(axis, None if out is None else out.magnitude)
dim = self.dimensionality
if out is None:
return Quantity(ret, dim, copy=False)
if not isinstance(out, Quantity):
raise TypeError("out parameter must be a Quantity")
out._dimensionality = dim
return out
@with_doc(np.nanmin)
def nanmin(self, axis=None, out=None):
return Quantity(
np.nanmin(self.magnitude),
self.dimensionality,
copy=False
)
@with_doc(np.ndarray.argmin)
def argmin(self, axis=None, out=None):
return self.magnitude.argmin(axis, out)
@with_doc(np.nanargmin)
def nanargmin(self,axis=None, out=None):
return np.nanargmin(self.magnitude)
@with_doc(np.nanargmax)
def nanargmax(self,axis=None, out=None):
return np.nanargmax(self.magnitude)
@with_doc(np.ndarray.ptp)
def ptp(self, axis=None, out=None):
ret = self.magnitude.ptp(axis, None if out is None else out.magnitude)
dim = self.dimensionality
if out is None:
return Quantity(ret, dim, copy=False)
if not isinstance(out, Quantity):
raise TypeError("out parameter must be a Quantity")
out._dimensionality = dim
return out
@with_doc(np.ndarray.clip)
def clip(self, min=None, max=None, out=None):
if min is None and max is None:
raise ValueError("at least one of min or max must be set")
else:
if min is None: min = Quantity(-np.Inf, self._dimensionality)
if max is None: max = Quantity(np.Inf, self._dimensionality)
if self.dimensionality and not \
(isinstance(min, Quantity) and isinstance(max, Quantity)):
raise ValueError(
"both min and max must be Quantities with compatible units"
)
clipped = self.magnitude.clip(
min.rescale(self._dimensionality).magnitude,
max.rescale(self._dimensionality).magnitude,
out
)
dim = self.dimensionality
if out is None:
return Quantity(clipped, dim, copy=False)
if not isinstance(out, Quantity):
raise TypeError("out parameter must be a Quantity")
out._dimensionality = dim
return out
@with_doc(np.ndarray.round)
def round(self, decimals=0, out=None):
ret = self.magnitude.round(decimals, None if out is None else out.magnitude)
dim = self.dimensionality
if out is None:
return Quantity(ret, dim, copy=False)
if not isinstance(out, Quantity):
raise TypeError("out parameter must be a Quantity")
out._dimensionality = dim
return out
@with_doc(np.ndarray.trace)
def trace(self, offset=0, axis1=0, axis2=1, dtype=None, out=None):
ret = self.magnitude.trace(offset, axis1, axis2, dtype, None if out is None else out.magnitude)
dim = self.dimensionality
if out is None:
return Quantity(ret, dim, copy=False)
if not isinstance(out, Quantity):
raise TypeError("out parameter must be a Quantity")
out._dimensionality = dim
return out
@with_doc(np.ndarray.squeeze)
def squeeze(self, axis=None):
return Quantity(
self.magnitude.squeeze(axis),
self.dimensionality,
copy=False
)
@with_doc(np.ndarray.mean)
def mean(self, axis=None, dtype=None, out=None):
ret = self.magnitude.mean(axis, dtype, None if out is None else out.magnitude)
dim = self.dimensionality
if out is None:
return Quantity(ret, dim, copy=False)
if not isinstance(out, Quantity):
raise TypeError("out parameter must be a Quantity")
out._dimensionality = dim
return out
@with_doc(np.nanmean)
def nanmean(self, axis=None, dtype=None, out=None):
import numpy as np
return Quantity(
np.nanmean(self.magnitude, axis, dtype, out),
self.dimensionality,
copy=False)
@with_doc(np.ndarray.var)
def var(self, axis=None, dtype=None, out=None, ddof=0):
ret = self.magnitude.var(axis, dtype, out, ddof)
dim = self._dimensionality**2
if out is None:
return Quantity(ret, dim, copy=False)
if not isinstance(out, Quantity):
raise TypeError("out parameter must be a Quantity")
out._dimensionality = dim
return out
@with_doc(np.ndarray.std)
def std(self, axis=None, dtype=None, out=None, ddof=0):
ret = self.magnitude.std(axis, dtype, out, ddof)
dim = self.dimensionality
if out is None:
return Quantity(ret, dim, copy=False)
if not isinstance(out, Quantity):
raise TypeError("out parameter must be a Quantity")
out._dimensionality = dim
return out
@with_doc(np.nanstd)
def nanstd(self, axis=None, dtype=None, out=None, ddof=0):
return Quantity(
np.nanstd(self.magnitude, axis, dtype, out, ddof),
self._dimensionality,
copy=False
)
@with_doc(np.ndarray.prod)
def prod(self, axis=None, dtype=None, out=None):
if axis == None:
power = self.size
else:
power = self.shape[axis]
ret = self.magnitude.prod(axis, dtype, None if out is None else out.magnitude)
dim = self._dimensionality**power
if out is None:
return Quantity(ret, dim, copy=False)
if not isinstance(out, Quantity):
raise TypeError("out parameter must be a Quantity")
out._dimensionality = dim
return out
@with_doc(np.ndarray.cumsum)
def cumsum(self, axis=None, dtype=None, out=None):
ret = self.magnitude.cumsum(axis, dtype, None if out is None else out.magnitude)
dim = self.dimensionality
if out is None:
return Quantity(ret, dim, copy=False)
if not isinstance(out, Quantity):
raise TypeError("out parameter must be a Quantity")
out._dimensionality = dim
return out
@with_doc(np.ndarray.cumprod)
def cumprod(self, axis=None, dtype=None, out=None):
if self._dimensionality:
# different array elements would have different dimensionality
raise ValueError(
"Quantity must be dimensionless, try using simplified"
)
ret = self.magnitude.cumprod(axis, dtype, out)
dim = self.dimensionality
if out is None:
return Quantity(ret, dim, copy=False)
if isinstance(out, Quantity):
out._dimensionality = dim
return out
# list of unsupported functions: [choose]
def __setstate__(self, state):
ndarray_state = state[:-1]
units = state[-1]
np.ndarray.__setstate__(self, ndarray_state)
self._dimensionality = units
def __reduce__(self):
"""
Return a tuple for pickling a Quantity.
"""
reconstruct,reconstruct_args,state = super().__reduce__()
state = state + (self._dimensionality,)
return (_reconstruct_quantity,
(self.__class__, np.ndarray, (0, ), 'b', ),
state)
def __deepcopy__(self, memo_dict):
# constructor copies by default
return Quantity(self.magnitude, self.dimensionality)
def _reconstruct_quantity(subtype, baseclass, baseshape, basetype,):
"""Internal function that builds a new MaskedArray from the
information stored in a pickle.
"""
_data = np.ndarray.__new__(baseclass, baseshape, basetype)
return subtype.__new__(subtype, _data, dtype=basetype,)
|
KarolisRam/MineRL2021-Research-baselines | standalone/Behavioural_cloning.py | """
A baseline solution using behavioural cloning in the research track of NeurIPS 2021 MineRL Diamond competition.
With default parameters it trains in 15-20 mins on a machine with a GeForce RTX 2080 Ti GPU.
It uses less than 16GB RAM, achieves an average reward of 1.8 and sometimes obtains cobblestone (35 total reward).
You can adjust RAM usage to fit your specifications by changing the DATA_SAMPLES parameter below.
"""
import random
from tqdm import tqdm
import numpy as np
from sklearn.cluster import KMeans
import torch as th
from torch import nn
import gym
import minerl
# Parameters:
DATA_DIR = "data" # path to MineRL dataset (should contain "MineRLObtainIronPickaxeVectorObf-v0" directory).
EPOCHS = 2 # how many times we train over dataset.
LEARNING_RATE = 0.0001 # learning rate for the neural network.
BATCH_SIZE = 32
NUM_ACTION_CENTROIDS = 100 # number of KMeans centroids used to cluster the data.
# Adjust DATA_SAMPLES to fit your RAM, extra 100k samples is about 1.2 GB RAM.
# Example RAM usage and training time for training with different DATA_SAMPLES on a mid-range PC:
# (using the default parameters)
# |----------------------------------------------|
# | DATA_SAMPLES | RAM Usage, MB | Time, minutes |
# |------------------------------|---------------|
# | 100,000 | 3,854 | 1.9 |
# | 200,000 | 5,135 | 3.6 |
# | 500,000 | 8,741 | 8.1 |
# | 1,000,000 | 14,833 | 17.0 |
# | 1,528,808 | 21,411 | 28.1 | <- full MineRLObtainIronPickaxeVectorObf-v0 dataset
# |----------------------------------------------|
DATA_SAMPLES = 1000000
TRAIN_MODEL_NAME = 'research_potato.pth' # name to use when saving the trained agent.
TEST_MODEL_NAME = 'research_potato.pth' # name to use when loading the trained agent.
TRAIN_KMEANS_MODEL_NAME = 'centroids_for_research_potato.npy' # name to use when saving the KMeans model.
TEST_KMEANS_MODEL_NAME = 'centroids_for_research_potato.npy' # name to use when loading the KMeans model.
TEST_EPISODES = 10 # number of episodes to test the agent for.
MAX_TEST_EPISODE_LEN = 18000 # 18k is the default for MineRLObtainDiamondVectorObf.
class NatureCNN(nn.Module):
"""
CNN from DQN nature paper:
Mnih, Volodymyr, et al.
"Human-level control through deep reinforcement learning."
Nature 518.7540 (2015): 529-533.
Nicked from stable-baselines3:
https://github.com/DLR-RM/stable-baselines3/blob/master/stable_baselines3/common/torch_layers.py
:param input_shape: A three-item tuple telling image dimensions in (C, H, W)
:param output_dim: Dimensionality of the output vector
"""
def __init__(self, input_shape, output_dim):
super().__init__()
n_input_channels = input_shape[0]
self.cnn = nn.Sequential(
nn.Conv2d(n_input_channels, 32, kernel_size=8, stride=4, padding=0),
nn.ReLU(),
nn.Conv2d(32, 64, kernel_size=4, stride=2, padding=0),
nn.ReLU(),
nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=0),
nn.ReLU(),
nn.Flatten(),
)
# Compute shape by doing one forward pass
with th.no_grad():
n_flatten = self.cnn(th.zeros(1, *input_shape)).shape[1]
self.linear = nn.Sequential(
nn.Linear(n_flatten, 512),
nn.ReLU(),
nn.Linear(512, output_dim)
)
def forward(self, observations: th.Tensor) -> th.Tensor:
return self.linear(self.cnn(observations))
def train():
# For demonstration purposes, we will only use ObtainPickaxe data which is smaller,
# but has the similar steps as ObtainDiamond in the beginning.
# "VectorObf" stands for vectorized (vector observation and action), where there is no
# clear mapping between original actions and the vectors (i.e. you need to learn it)
data = minerl.data.make("MineRLObtainIronPickaxeVectorObf-v0", data_dir=DATA_DIR, num_workers=1)
# First, use k-means to find actions that represent most of them.
# This proved to be a strong approach in the MineRL 2020 competition.
# See the following for more analysis:
# https://github.com/GJuceviciute/MineRL-2020
# Go over the dataset once and collect all actions and the observations (the "pov" image).
# We do this to later on have uniform sampling of the dataset and to avoid high memory use spikes.
all_actions = []
all_pov_obs = []
print("Loading data")
trajectory_names = data.get_trajectory_names()
random.shuffle(trajectory_names)
# Add trajectories to the data until we reach the required DATA_SAMPLES.
for trajectory_name in trajectory_names:
trajectory = data.load_data(trajectory_name, skip_interval=0, include_metadata=False)
for dataset_observation, dataset_action, _, _, _ in trajectory:
all_actions.append(dataset_action["vector"])
all_pov_obs.append(dataset_observation["pov"])
if len(all_actions) >= DATA_SAMPLES:
break
all_actions = np.array(all_actions)
all_pov_obs = np.array(all_pov_obs)
# Run k-means clustering using scikit-learn.
print("Running KMeans on the action vectors")
kmeans = KMeans(n_clusters=NUM_ACTION_CENTROIDS)
kmeans.fit(all_actions)
action_centroids = kmeans.cluster_centers_
print("KMeans done")
# Now onto behavioural cloning itself.
# Much like with intro track, we do behavioural cloning on the discrete actions,
# where we turn the original vectors into discrete choices by mapping them to the closest
# centroid (based on Euclidian distance).
network = NatureCNN((3, 64, 64), NUM_ACTION_CENTROIDS).cuda()
optimizer = th.optim.Adam(network.parameters(), lr=LEARNING_RATE)
loss_function = nn.CrossEntropyLoss()
num_samples = all_actions.shape[0]
update_count = 0
losses = []
# We have the data loaded up already in all_actions and all_pov_obs arrays.
# Let's do a manual training loop
print("Training")
for _ in range(EPOCHS):
# Randomize the order in which we go over the samples
epoch_indices = np.arange(num_samples)
np.random.shuffle(epoch_indices)
for batch_i in range(0, num_samples, BATCH_SIZE):
# NOTE: this will cut off incomplete batches from end of the random indices
batch_indices = epoch_indices[batch_i:batch_i + BATCH_SIZE]
# Load the inputs and preprocess
obs = all_pov_obs[batch_indices].astype(np.float32)
# Transpose observations to be channel-first (BCHW instead of BHWC)
obs = obs.transpose(0, 3, 1, 2)
# Normalize observations. Do this here to avoid using too much memory (images are uint8 by default)
obs /= 255.0
# Map actions to their closest centroids
action_vectors = all_actions[batch_indices]
# Use numpy broadcasting to compute the distance between all
# actions and centroids at once.
# "None" in indexing adds a new dimension that allows the broadcasting
distances = np.sum((action_vectors - action_centroids[:, None]) ** 2, axis=2)
# Get the index of the closest centroid to each action.
# This is an array of (batch_size,)
actions = np.argmin(distances, axis=0)
# Obtain logits of each action
logits = network(th.from_numpy(obs).float().cuda())
# Minimize cross-entropy with target labels.
# We could also compute the probability of demonstration actions and
# maximize them.
loss = loss_function(logits, th.from_numpy(actions).long().cuda())
# Standard PyTorch update
optimizer.zero_grad()
loss.backward()
optimizer.step()
update_count += 1
losses.append(loss.item())
if (update_count % 1000) == 0:
mean_loss = sum(losses) / len(losses)
tqdm.write("Iteration {}. Loss {:<10.3f}".format(update_count, mean_loss))
losses.clear()
print("Training done")
# Save network and the centroids into separate files
np.save(TRAIN_KMEANS_MODEL_NAME, action_centroids)
th.save(network.state_dict(), TRAIN_MODEL_NAME)
del data
def test():
print("Running episodes")
action_centroids = np.load(TEST_KMEANS_MODEL_NAME)
network = NatureCNN((3, 64, 64), NUM_ACTION_CENTROIDS).cuda()
network.load_state_dict(th.load(TEST_MODEL_NAME))
env = gym.make('MineRLObtainDiamondVectorObf-v0')
num_actions = action_centroids.shape[0]
action_list = np.arange(num_actions)
for episode in range(TEST_EPISODES):
obs = env.reset()
done = False
total_reward = 0
steps = 0
while not done:
# Process the action:
# - Add/remove batch dimensions
# - Transpose image (needs to be channels-last)
# - Normalize image
obs = th.from_numpy(obs['pov'].transpose(2, 0, 1)[None].astype(np.float32) / 255).cuda()
# Turn logits into probabilities
probabilities = th.softmax(network(obs), dim=1)[0]
# Into numpy
probabilities = probabilities.detach().cpu().numpy()
# Sample action according to the probabilities
discrete_action = np.random.choice(action_list, p=probabilities)
# Map the discrete action to the corresponding action centroid (vector)
action = action_centroids[discrete_action]
minerl_action = {"vector": action}
obs, reward, done, info = env.step(minerl_action)
total_reward += reward
steps += 1
if steps >= MAX_TEST_EPISODE_LEN:
break
print(f'Episode reward: {total_reward}, episode length: {steps}')
env.close()
if __name__ == "__main__":
# train()
test()
|
anthonykoch/SublimeTools | tests/test_EventEmitter.py | <gh_stars>0
import sublime
import sys
from unittest import TestCase
from unittest.mock import MagicMock
version = sublime.version()
from SublimeTools.EventEmitter import EventEmitter
class TestCase(TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_event_emitter(self):
""" Event Emitter is all good :) """
payload = {}
mock = MagicMock()
ee = EventEmitter()
ee.on('order', mock)
ee.emit('order', payload)
mock.assert_called_with(payload)
ee.off('order', mock)
ee.emit('order', payload)
self.assertEqual(mock.call_count, 1)
def event_emitter_decorators(t):
""" on can be used as a decorator """
ee = EventEmitter()
order = None
expected = 123
@ee.on('order')
def listener():
nonlocal order
order = expected
ee.emit('order')
t.assertEqual(order, expected)
def event_emitter_wildcards(t):
""" on can be used as a decorator """
ee = EventEmitter(wildcard=True, delimiter=':')
mock = MagicMock()
ee.on('order:*', mock)
ee.emit('order:milk', mock)
t.assertEqual(mock.called_once, 1)
|
anthonykoch/SublimeTools | Settings.py | <reponame>anthonykoch/SublimeTools<filename>Settings.py<gh_stars>0
import sublime
class Settings(object):
"""
A wrapper for a sublime.Settings object. This class will automatically reload
the underlying settings object, creating a sort of live binding to what is
actually defined in the settings in that moment in time.
Example:
user_settings = Settings('PackageName.sublime-settings')
user.settings.get('node_path')
"""
loaded_settings = None
def __init__(self, path, load=True):
self.path = path
if load:
self.load()
def load(self):
self.loaded_settings = sublime.load_settings(self.path)
self.loaded_settings.clear_on_change(self.path)
self.loaded_settings.add_on_change(self.path, self.load)
def save(self):
sublime.save_settings(self.path)
def set(self, key, value):
""" Set a value into the sublime.Settings object """
self.loaded_settings.set(key, value)
def get(self, key, default=None):
""" Get a value by key from the settings. Loads from default settings if key doesn't the exist. """
return self.loaded_settings.get(key, default)
# def get_path(self, path, default=None):
# """
# Returns the object at the neste property
# TODO:
# """
# pass
def has(self, key):
return self.loaded_settings.has(key)
def get_platform_setting(key, settings=[]):
"""
Returns the node_path from a settings object, or None if it doesn't
exist.
Args:
settings (list): A list of settings to retrieve the node_path from
"""
for setting in settings:
paths = setting.get(key)
if not isinstance(paths, dict):
continue
node_path = paths[sublime.platform()] if sublime.platform() in paths else None
if node_path is not None:
return node_path
|
anthonykoch/SublimeTools | tests/test_Settings.py | <filename>tests/test_Settings.py
import sublime
import sys
from unittesting import DeferrableTestCase
from unittest.mock import MagicMock
from SublimeTools.Settings import Settings
from SublimeTools.cuid import cuid
version = sublime.version()
settings_name = 'SublimeTools.sublime-settings'
class TestSettings(DeferrableTestCase):
def setUp(self):
pass
def tearDown(self):
self.write_settings({})
def write_settings(self, obj):
import os
import json
settings_file = os.path.join(sublime.packages_path(), 'user', settings_name)
with open(settings_file, 'w') as f:
f.write(json.dumps(obj))
def test_settings_update(self):
""" The settings changes when the the settings file is written to """
import os
expected = 123456
settings = Settings(settings_name)
key = cuid()
settings.set(key, expected)
self.assertIsNotNone(getattr(settings, 'loaded_settings', None))
self.assertIsInstance(settings.loaded_settings, sublime.Settings)
self.write_settings({ key: expected })
yield 500
self.assertEqual(settings.get(key), expected)
def test_settings_get(self):
""" Get's a value by key """
expected = 'coconut'
key = cuid()
self.write_settings({ key: expected })
yield 500
settings = Settings(settings_name)
self.assertEqual(settings.get(key), expected)
self.assertEqual(
settings.get('nonexistantkey', default=123),
123,
msg='Returns default is settings does have the key'
)
def test_settings_has(self):
""" Get's a value by key """
expected = 'coconut'
key = cuid()
self.write_settings({ key: expected })
yield 500
settings = Settings(settings_name)
self.assertTrue(settings.has(key))
self.assertFalse(settings.has('lmao'))
|
anthonykoch/SublimeTools | Utils.py | import sublime_plugin
import sublime
import string
import random
class Init(object):
""" A class that inits itself with whatever properties are give """
def __init__(self, **kwargs):
for key, value in kwargs.items():
setattr(self, key, value)
# https://stackoverflow.com/questions/2955412/python-destructuring-bind-dictionary-contents
pluck = lambda d, *args: [(d.get(arg) if isinstance(d, dict) else None) for arg in args]
def nth(items, index):
"""
Return an item from a list by index, or None if the index does not exist
items (list): The list to get the nth item from
index (int): The index of the item to get
"""
if index < len(items):
return items[index]
def create_hex_id(length=20):
return ''.join([random.choice(string.hexdigits.lower()) for i in range(0, int(length))])
def create_int_id(length=20):
return int(''.join([random.choice(string.digits) for i in range(0, int(length))]))
def incremental_id_factory():
index = 0
def create_incremental_id():
nonlocal index
index += 1
return index
return create_incremental_id
|
anthonykoch/SublimeTools | View.py | <reponame>anthonykoch/SublimeTools
import collections
import sublime
from sublime import Region
from .Utils import pluck
def is_length(value):
return isinstance(value, int) and value >= 0
class Position(object):
"""
Represents an AST position object.
Represents
"""
def __init__(self, start=None, end=None):
self.a = start if is_length(start) else None
if end is None:
self.b = start
else:
self.b = end if is_length(end) else None
def __getitem__(self, key):
"""
Allow subscripting with start/end or 0, 1
"""
if key == 'start' or key == 0:
return self.a
elif key == 'end' or key == 1:
return self.b
return None
def __str__(self):
return '({}, {})'.format(self.a, self.b)
def __eq__(self, other):
"""
Allows comparing with regions or other iterables
"""
if isinstance(other, Region):
return other.begin() == self.a and other.end() == self.b
if not isinstance(other, collections.Iterable) or len(other) < 2:
return False
return other[0] == self.a and other[1] == self.b
def __iter__(self):
yield self.a
yield self.b
def __len__(self):
return 2
def begin(self):
""" Returns the start of the position """
return self.a
def end(self):
""" Returns the end of the position """
return self.b
def has_begin(self):
""" Returns true if the start is an int and more than -1 """
return is_length(self.a)
def has_end(self):
""" Returns true if the end is an int and more than -1 """
return is_length(self.b)
def is_valid(self):
""" Returns true if the start and end are ints and more than -1 """
return self.has_begin() and self.has_end()
def to_region(self):
""" Converts the position to a region """
return Region(self.a, self.b)
def to_json(self):
return {
'start': self.a,
'end': self.b,
}
class SimpleLocation(object):
"""
Creates a simple location representing a line and column.
"""
def __init__(self, line, column, **kwargs):
self.line = line if is_length(line) else None
self.column = column if is_length(column) else None
def __getitem__(self, key):
if key == 'line':
return self.line
elif key == 'column':
return self.column
return None
def __iter__(self):
""" Allow destructuring """
yield self.line
yield self.column
def __str__(self):
return '({}:{})'.format(self.line, self.column)
def has_line(self):
""" Return true if the line is an int and more than -1 """
return is_length(self.line)
def has_column(self):
""" Return true if the column is an int and more than -1 """
return is_length(self.column)
def is_valid(self):
""" Return true if both the line and column are ints and more than -1 """
return self.has_line() and self.has_column()
def to_point(self, view):
""" Converts the line and column to a point in a view's buffer """
if self.is_valid():
return view.text_point(self.line, self.column)
return None
def to_region(self, view):
"""
Returns:
None if the line or column are invalid or an empty region with
"""
if self.is_valid():
return Region(self.to_point(view))
return None
def to_json(self):
return { 'line': self.line, 'column': self.column }
class ComplexLocation(object):
"""
Represents a complex location that has both starting line and columns, as well
as ending line and columns.
Attributes:
start (SimpleLocation): The starting location
end (SimpleLocation): The ending location
"""
def __init__(self, start=None, end=None, **kwargs):
self.start = SimpleLocation(*pluck(start, 'line', 'column'))
self.end = SimpleLocation(*pluck(end, 'line', 'column'))
def to_region(self, view):
"""
Returns a region spanning from the start line and column to the end line and colum, or
None if the start or end locations have invalid line or columns.
Args:
view (sublime.View): The view's buffer will be used in calculating the region's points
Returns:
sublime.Region or None if the start or end location are invalid
"""
if not self.start.is_valid() or not self.end.is_valid():
return None
start_point = self.start.to_point(view)
end_point = self.end.to_point(view)
return Region(start_point, end_point)
class RenderLocation(object):
"""
A render location is either created from a position (start and end), complex
location, or both.
Examples:
simple_loc = { 'line': 0, 'column': 0 }
complex_loc = {
'start': { 'line': 0, 'column': 0 },
'end': { 'line': 0, 'column': 0 },
}
position = { 'start': 0, 'end': 0 }
"""
BEGIN = 1 << 0
END = 1 << 1
START_LINE = 1 << 2
START_COLUMN = 1 << 3
END_LINE = 1 << 4
END_COLUMN = 1 << 5
def __init__(self, position=None, start=None, end=None, **kwargs):
pos_start, pos_end = pluck(position, 'start', 'end')
self.loc = ComplexLocation(start=start, end=end)
self.pos = Position(start=pos_start, end=pos_end)
def __str__(self):
return 'RenderLocation({})'.format(str(self.to_json()))
def to_region(self, view):
"""
Returns a region spanning from the starting line and column to the
end line and column, or the starting position and end position.
It will first attempt to retrieve the region points from the position,
but if either point start or end is invalid, it will attempt to retrieve
a point from the starting/ending line and columns.
The view is used to translate the line and column points when the position
Args:
view (sublime.View):
Returns:
sublime.Region
"""
start = self.pos.begin() if self.pos.has_begin() else self.loc.start.to_point(view)
end = self.pos.end() if self.pos.has_end() else self.loc.end.to_point(view)
if start is None or end is None:
return None
return sublime.Region(start, end)
def to_json(self):
return {
'start': self.loc.start.to_json(),
'end': self.loc.end.to_json(),
}
def render_ability(self):
"""
Returns a mask of the the locations that are available.
"""
mask = 0
mask |= RenderLocation.BEGIN if self.pos.has_begin() else 0
mask |= RenderLocation.END if self.pos.has_end() else 0
mask |= RenderLocation.START_LINE if self.loc.start.has_line() else 0
mask |= RenderLocation.START_COLUMN if self.loc.start.has_column() else 0
mask |= RenderLocation.END_LINE if self.loc.end.has_line() else 0
mask |= RenderLocation.END_COLUMN if self.loc.end.has_column() else 0
return mask
def start_line(self, view):
if self.loc.start.has_line():
return self.loc.start.line
elif self.pos.has_begin():
return view.rowcol(self.pos.begin())[0]
return None
def end_line(self, view):
if self.loc.end.has_line():
return self.loc.end.line
elif self.pos.has_end():
return view.rowcol(self.pos.end())[0]
return None
def start_point(self, view):
if self.pos.has_begin():
return self.pos.begin()
elif self.loc.start.is_valid():
return self.loc.start.to_point(view)
return None
def end_point(self, view):
if self.pos.has_end():
return self.pos.end()
elif self.loc.end.is_valid():
return self.loc.end.to_point(view)
return None
def get_from_loc(loc, side, attr):
"""
Examples:
loc = {
'start': {
'column': 23,
'line': 40,
},
'end': {
'column': 42,
'line': 68,
},
}
get_from_loc(loc, 'start', 'column') # 23
get_from_loc(loc, 'end', 'line') # 68
Args:
loc (dict): The loc object
side (str): Either 'start' or 'end'
attr (str): Either 'line' or 'column'
Raises:
Exception if side is not "start" or "end"
Returns:
None if the loc or side of the loc chosen is not a dict
"""
if side not in ['start', 'end']:
raise Exception('side(enum:"start"|"end"), got ' + str(side))
elif attr not in ['line', 'column']:
raise Exception('side (enum:"line"|"column"), got ' + str(attr))
if isinstance(loc, dict):
if side in loc and isinstance(loc[side], dict):
value = loc[side].get(attr)
if isinstance(value, int) and value >= 0:
return value
return None
def all_views():
"""
Gets all views in all windows
Returns:
list of sublime.View
"""
views = []
for window in sublime.windows():
for view in window.views():
views.append(view)
return views
def get_views_by_ids(ids):
"""
Returns a list of views whose ids match the ids passed.
Args:
ids (list of int): The ids to match against
Returns:
list of sublime.View
"""
return [view for view in all_views() if view.id() in (ids if isinstance(ids, list) else [ids])]
def get_views_by_file_names(file_names, basename=False):
"""
Get views by the specified filenames.
Args:
file_names (str|list): A filename or list of filenames
basename (boolean, optional): Whether or not to match the basename
Returns:
list of views that match the filenames passed
"""
if not isinstance(file_names, list):
file_names = [file_names]
views = []
if basename:
for view in all_views():
for file_name in file_names:
if view.file_name() and os.path.basename(view.file_name()) == os.path.basename(file_name):
views.append(view)
else:
for window in sublime.windows():
for file_name in file_names:
view = window.find_open_file(file_name)
if view:
views.append(view)
return views
def get_source_scope(view):
"""
Args:
view (sublime.View): The view to retrieve the source scope from
Returns:
The source scope for the view
"""
return view.scope_name(0).split(' ')[0]
|
anthonykoch/SublimeTools | tests/test_View.py | import sublime
import sys
from sublime import Region
from unittesting import DeferrableTestCase
from unittest.mock import MagicMock
from SublimeTools.View import *
from SublimeTools.cuid import cuid
version = sublime.version()
views = []
invalid_points = [None, '', -1, float('nan'), {}, []]
class TestExec(DeferrableTestCase):
def setUp(self):
pass
def tearDown(self):
yield 1000
for view in views:
window = view.window()
if window:
window.focus_view(view)
window.run_command('close_file')
def create_file(self, contents=''):
window = self.window = sublime.active_window()
self.view = window.new_file()
self.view.run_command('insert', { 'characters': contents })
self.view.set_scratch(True)
self.view.set_read_only(True)
views.append(self.view)
def test_get_from_loc(self):
start_column = 23
start_line = 40
end_column = 42
end_line = 68
complex_loc = {
'start': {'column': start_column, 'line': start_line, },
'end': {'column': end_column, 'line': end_line, },
}
self.assertEqual(get_from_loc(complex_loc, 'start', 'line'), start_line)
self.assertEqual(get_from_loc(complex_loc, 'start', 'column'), start_column)
self.assertEqual(get_from_loc(complex_loc, 'end', 'line'), end_line)
self.assertEqual(get_from_loc(complex_loc, 'end', 'column'), end_column)
self.assertRaises(lambda: get_from_loc(complex_loc, 'what', 'line'))
self.assertRaises(lambda: get_from_loc(complex_loc, 'start', 'hey'))
def test_Position(self):
for invalid in invalid_points:
self.assertFalse(Position(invalid).has_begin())
self.assertFalse(Position(invalid).has_end())
self.assertFalse(Position(invalid, 1).has_begin())
self.assertTrue(Position(1).has_begin())
self.assertTrue(Position(1).has_end())
self.assertTrue(Position(1, 0).has_end())
self.assertFalse(Position().has_begin())
self.assertFalse(Position().has_end())
self.assertIsNone(Position().begin())
self.assertIsNone(Position().end())
self.assertEqual(Position(0, 2).to_region(), Region(0, 2))
self.assertEqual(Position(0, 2), Region(0, 2))
self.assertEqual(Position(0, 2), (0, 2))
self.assertEqual(Position(0, 2), [0, 2])
self.assertEqual(list(Position(0, 2)), [0, 2])
begin, end = Position(0, 2)
self.assertEqual(begin, 0, msg='position is iterable')
self.assertEqual(end, 2)
def test_SimpleLocation(self):
contents = 'roflsandlawls\nsaxandviolins'
self.create_file(contents=contents)
self.assertTrue(SimpleLocation(1, 0).has_line())
self.assertTrue(SimpleLocation(0, 0).has_line())
self.assertTrue(SimpleLocation(0, 0).has_column())
self.assertTrue(SimpleLocation(0, 1).has_column())
for invalid in invalid_points:
self.assertFalse(SimpleLocation(invalid, 0).has_line())
self.assertFalse(SimpleLocation(0, invalid).has_column())
self.assertFalse(SimpleLocation(invalid, 0).is_valid())
self.assertFalse(SimpleLocation(0, invalid).is_valid())
self.assertFalse(SimpleLocation(invalid, invalid).is_valid())
self.assertIsNone(SimpleLocation(invalid, 0).to_point(self.view))
self.assertIsNone(SimpleLocation(0, invalid).to_point(self.view))
self.assertIsNone(SimpleLocation(invalid, invalid).to_point(self.view))
self.assertIsNone(SimpleLocation(invalid, 3).to_region(self.view))
self.assertIsNone(SimpleLocation(1, invalid).to_region(self.view))
self.assertIsNone(SimpleLocation(invalid, invalid).to_region(self.view))
self.assertEqual(SimpleLocation(0, 0).to_point(self.view), 0)
self.assertEqual(SimpleLocation(0, len(contents) + 1).to_point(self.view), len(contents))
self.assertEqual(SimpleLocation(1, 0).to_point(self.view), 14)
self.assertEqual(SimpleLocation(1, 2).to_point(self.view), 16)
self.assertEqual(SimpleLocation(12, 34).to_json(), { 'line': 12, 'column': 34, })
self.assertEqual(SimpleLocation(0, 3).to_region(self.view), Region(3))
self.assertEqual(SimpleLocation(2, 5)['line'], 2)
self.assertEqual(SimpleLocation(2, 5)['column'], 5)
self.assertIsNone(SimpleLocation(2, 5)['lol'])
line, column = SimpleLocation(30, 50)
self.assertEqual(line, 30)
self.assertEqual(column, 50)
def test_ComplexLocation(self):
contents = 'whereareand\nandwhoarewe'
self.create_file(contents=contents)
self.assertEqual(
ComplexLocation(start={ 'line': 0, 'column': 0 }, end={ 'line': 0, 'column': 4 }).to_region(self.view),
Region(0, 4)
)
self.assertEqual(
ComplexLocation(start={ 'line': 0, 'column': 0 }, end={ 'line': 1, 'column': 4 }).to_region(self.view),
Region(0, 16),
msg='spanning multiple lines'
)
self.assertEqual(
ComplexLocation(start={ 'line': 1, 'column': 0 }, end={ 'line': 1, 'column': 6 }).to_region(self.view),
Region(12, 18)
)
self.assertIsNone(ComplexLocation().to_region(self.view))
self.assertIsNone(ComplexLocation(start={}, end={}).to_region(self.view))
for invalid in invalid_points:
self.assertIsNone(ComplexLocation(
start={ 'line': invalid, 'column': 1 },
end={ 'line': 1, 'column': 1 }
).to_region(self.view))
self.assertIsNone(ComplexLocation(
start={ 'line': 1, 'column': invalid },
end={ 'line': 1, 'column': 1 }
).to_region(self.view))
self.assertIsNone(ComplexLocation(
start={ 'line': 1, 'column': 1 },
end={ 'line': invalid, 'column': 1 }
).to_region(self.view))
self.assertIsNone(ComplexLocation(
start={ 'line': 1, 'column': 1 },
end={ 'line': 1, 'column': invalid }
).to_region(self.view))
def test_RenderLocation(self):
contents = 'why oh why\noh whyyyyyyyyy'
self.create_file(contents=contents)
for invalid in invalid_points:
self.assertEqual(RenderLocation(position={'start': 4, 'end': 2 }).start_point(self.view), 4)
self.assertEqual(RenderLocation(position={'start': 4, 'end': invalid }).start_point(self.view), 4)
self.assertIsNone(RenderLocation(position={'start': invalid, 'end': 12 }).start_point(self.view), 12)
self.assertEqual(RenderLocation(position={'start': 2, 'end': 3 }).end_point(self.view), 3)
self.assertEqual(RenderLocation(position={'start': 2, 'end': 3 }).end_point(self.view), 3)
self.assertEqual(RenderLocation(position={'start': invalid, 'end': 3 }).end_point(self.view), 3)
self.assertEqual(RenderLocation(start={'line': 1, 'column': 0}).start_point(self.view), 11)
self.assertIsNone(RenderLocation(start={'line': invalid, 'column': 0}).start_point(self.view))
self.assertIsNone(RenderLocation(start={'line': 0, 'column': invalid}).start_point(self.view))
self.assertEqual(RenderLocation(end={'line': 1, 'column': 0}).end_point(self.view), 11)
self.assertIsNone(RenderLocation(end={'line': invalid, 'column': 0}).end_point(self.view))
self.assertIsNone(RenderLocation(end={'line': 0, 'column': invalid}).end_point(self.view))
self.assertIsNone(RenderLocation().start_point(self.view))
self.assertIsNone(RenderLocation().end_point(self.view))
|
anthonykoch/SublimeTools | Exec.py | import sublime
import os
from Default.exec import AsyncProcess
from .EventEmitter import EventEmitter
class ProcessListener(EventEmitter):
# """
# A listener for AsyncProcess, to be used in tangent with exec_cmd(), execjs(), and execjsfile().
# The handlers for on
# Example:
# listener = ProcessListener()
# @listener.on('data')
# def on_data(proc, data):
# print(data)
# @listener.on('finish')
# def on_finish(proc):
# print()
# # or listen to the events as so
# listener.on('data', on_data)
# listener.on('finish', on_finish)
# exec_cmd('which node', listener=listener)
# # or like this, but do notice that the methods are handle_data and handle_finish,
# # not on_data or on_finish
# clas AsyncListener(ProcessListener):
# def handle_data(proc, data):
# print(data)
# def handle_finish(proc):
# print(self.data)
# listener = AsyncListener()
# exec_cmd('which node', listener=listener)
# """
def __init__(self):
EventEmitter.__init__(self)
# super().__init__(self)
self.data = b''
self.finished = False
def on_data(self, proc, data):
self.emit('data', proc, data)
self.data += data
fn = getattr(self, 'handle_data', None)
if fn is not None and callable(fn):
fn(proc, data)
def on_finished(self, proc):
self.emit('finish', proc, self.data)
self.finished = True
fn = getattr(self, 'handle_finish', None)
if fn is not None and callable(fn):
fn(proc)
def exec_cmd(
command,
listener=None,
on_finish=None,
on_data=None,
working_dir='',
env={},
path='',
shell=False
):
"""
Executes a command asynchronously using sublime's own AsyncProcess class. Either
a process listener must be passed, or one of the two on_data or on_finish callbacks.
Failing to provide one of the above will raise an Exception.
Args:
command (str|list): A shell command (str) or non shell command (list)
listener (ProcessListener, optional): A class implementing on_finish and on_data methods.
on_finish (callable, optional): called when the process exits
on_data (callable, optional): called when the process is emitting data
env (dict):
path (str):
shell (bool): Whether or not to execute as a shell command
Examples:
def on_finish(data, proc):
print(data)
# Print the exit code for the command
print(proc.returncode)
exec_cmd(['node', filename], on_finish=on_finish)
# Extending ProcessListener will remove some boiler plate code where you want to accumulate
all of the data the listener emits.
from SublimeTools.exec import ProcessListener
import os
class Listener(ProcessListener):
def handle_data(self, proc, data):
print(data)
def handle_finish(self, proc):
# Prints all the accumulated data
print(self.data)
# Or you can accumulate it manually
class Listener(object):
def __init__(self):
self.data = b''
def on_data(self, proc, data):
self.data += data
def on_finished(self, proc):
print(self.data)
dirname = os.path.dirname(filename)
listener = Listener()
exec_cmd(['node', filename], listener=listener, working_dir=dirname)
"""
if listener is None:
listener = ProcessListener()
if callable(on_finish):
listener.on('finish', on_finish)
if callable(on_data):
listener.on('data', on_data)
if not callable(on_finish) and not callable(on_data):
raise Exception('Either a process listener or on_finish/on_data callbacks must be passed')
if working_dir != '':
os.chdir(working_dir)
if isinstance(command, str):
cmd = None
shell_cmd = command;
shell = True
else:
cmd = command
shell_cmd = None
shell = False
child = AsyncProcess(cmd, shell_cmd, env, listener, path=path, shell=shell)
child.pid = child.proc.pid
return child
def execjs(content, node_path=None, **kwargs):
"""
Executes javascript with node. Node must be installed system wide or else node_path should
be passed. The working_dir passed will be used as the base from where `require` will resolve
modules.
Refer to exec_cmd for more options.
Args:
content (str): The content to execute
node_path (str, optional): The path to node to use, else the system installed path is used.
Example:
content = "require('events'); console.log('events');"
execjs()
"""
cmd = ['node' if node_path == None else node_path, '-e', content]
if not isinstance(kwargs.get('working_dir')):
raise Exception('working_dir is required')
return exec_cmd(cmd, **kwargs)
def execjsfile(absfilename, args=[], node_path=None, **kwargs):
"""
Executes a js file.
Refer to exec_cmd for more options.
Args:
absfilename (str): An absolute path to the file you want executed.
args (list, optional): A list of paramters that are passed to the file
node_path (str, optional): The path to node to use, else the system installed path is used.
Example:
execjsfile('/users/you/memes.js', args=['--prefer-doge', 'true'])
"""
cmd = ['node' if node_path is None else node_path, absfilename] + args
return exec_cmd(cmd, **kwargs)
|
anthonykoch/SublimeTools | Logging.py |
DEBUG = 1 << 1
INFO = 1 << 2
WARNING = 1 << 3
ERROR = 1 << 4
CRITICAL = 1 << 5
DEFAULT_LEVELS = int('11111', 2)
class Logger(object):
"""
TODO: Figure out why the default logger doesn't work as expected
"""
def __init__(self, name="", level=DEFAULT_LEVELS):
self.level = level
self.name = name
def to_level(self, level_names: str):
mask = 0
for level_name in levels:
if level in levels_by_name[level_name]:
mask |= levels_by_name[level_name]
else:
raise Exception('Invalid level name ' + str(level_name))
return mask
def set_level(self, level):
self.level = level
def _log(self, args, level_name=''):
print('{}:{}'.format(self.name, level_name), *args)
def debug(self, *args):
if self.level & DEBUG != 0:
self._log(args, level_name='DEBUG')
def info(self, *args):
if self.level & INFO != 0:
self._log(args, level_name='INFO')
def warning(self, *args):
if self.level & WARNING != 0:
self._log(args, level_name='WARNING')
def error(self, *args):
if self.level & ERROR != 0:
self._log(args, level_name='ERROR')
def critical(self, *args):
if self.level & CRITICAL != 0:
self._log(args, level_name='CRITICAL')
levels_by_name = {
'DEBUG': DEBUG,
'INFO': INFO,
'WARNING': WARNING,
'ERROR': ERROR,
'CRITICAL': CRITICAL,
}
|
anthonykoch/SublimeTools | tests/test_Exec.py | import sublime
import sys
from unittesting import DeferrableTestCase
from unittest.mock import MagicMock
from SublimeTools.Exec import exec_cmd, execjsfile, ProcessListener
from SublimeTools.cuid import cuid
version = sublime.version()
class TestExec(DeferrableTestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_listener_events(self):
""" process listener emits events """
import os
proc = exec_cmd('echo lmao', listener=listener)
yield 2500
on_data.assert_called_with(proc, bytes('lmao{0}'.format(os.linesep), 'utf8'))
on_finish.assert_called_with(proc, bytes('lmao{0}'.format(os.linesep), 'utf8'))
self.assertEqual(listener.data, bytes('lmao' + os.linesep, 'utf8'))
def test_listener_events(self):
""" process listener emits events """
import os
listener = ProcessListener()
on_data = MagicMock()
on_finish = MagicMock()
listener.on('data', on_data)
listener.on('finish', on_finish)
proc = exec_cmd('echo lmao', listener=listener)
yield 2500
on_data.assert_called_with(proc, bytes('lmao{0}'.format(os.linesep), 'utf8'))
on_finish.assert_called_with(proc, bytes('lmao{0}'.format(os.linesep), 'utf8'))
self.assertEqual(listener.data, bytes('lmao' + os.linesep, 'utf8'))
def test_exec_cmd_listener(self):
""" exec_cmd calls handle methods on listener class """
import os
listener = ProcessListener()
listener.handle_data = MagicMock()
listener.handle_finish = MagicMock()
proc = exec_cmd('echo lmao', listener=listener)
yield 2500
listener.handle_data.assert_called_with(proc, bytes('lmao{0}'.format(os.linesep), 'utf8'))
listener.handle_finish.assert_called_with(proc)
self.assertEqual(listener.data, bytes('lmao' + os.linesep, 'utf8'))
def test_exec_cmd_callbacks(self):
""" exec_cmd calls callbacks """
import os
on_data = MagicMock()
on_finish = MagicMock()
proc = exec_cmd('echo lmao;', on_finish=on_finish, on_data=on_data)
yield 2500
on_data.assert_called_with(proc, bytes('lmao{0}'.format(os.linesep), 'utf8'))
on_finish.assert_called_with(proc, bytes('lmao{0}'.format(os.linesep), 'utf8'))
def test_exec_cmd_raises(self):
""" exec_cmd raises ex when on_finish, on_done, and listener is not passed """
self.assertRaises(lambda: exec_cmd('echo "lol"'), msg='wtf')
|
Rahulgulia/greyatom-python-for-data-science | Project:-Loan-Approval-Analysis/code.py | # --------------
# Importing header files
import numpy as np
import pandas as pd
from scipy.stats import mode
import warnings
warnings.filterwarnings('ignore')
#Reading file
bank_data = pd.read_csv(path)
#Code starts here
bank = pd.DataFrame(bank_data)
categorical_var = bank.select_dtypes(include = 'object')
#print(categorical_var)
numerical_var = bank.select_dtypes(include = 'number')
#print(numerical_var)
banks = bank.drop(['Loan_ID'],axis=1)
bank_mode = banks.mode()
for x in banks.columns.values:
banks[x]=banks[x].fillna(value=bank_mode[x].iloc[0])
avg_loan_amount = pd.pivot_table(banks,index=['Gender', 'Married', 'Self_Employed'],values=['LoanAmount'],aggfunc=np.mean)
percentage_se = 9.12
percentage_nse = 59.61
bid_loan_term = 554
loan_groupby = banks.groupby('Loan_Status')
mean_values = loan_groupby.mean()
print(mean_values)
|
bhaveshAn/mycroft-business-news-read | __init__.py | <gh_stars>0
# TODO: Add an appropriate license to your skill before publishing. See
# the LICENSE file for more information.
# Below is the list of outside modules you'll be using in your skill.
# They might be built-in to Python, from mycroft-core or from external
# libraries. If you use an external library, be sure to include it
# in the requirements.txt file so the library is installed properly
# when the skill gets installed later by a user.
from adapt.intent import IntentBuilder
from mycroft.skills.core import MycroftSkill, intent_handler
from mycroft.util.log import LOG
from bs4 import BeautifulSoup
__author__ = 'bhaveshAn'
LOGGER = getLogger(__name__)
class BusinessNews(MycroftSkill):
# The constructor of the skill, which calls MycroftSkill's constructor
def __init__(self):
super(BusinessNews, self).__init__(name="BusinessNews")
# Initialize working variables used within the skill.
self.count = 0
def initialize(self):
intent = IntentBuilder("BusinessNewsIntent").require("BusinessNewsKeyword").build()
self.register_intent(intent, self.handle_intent)
def handle_intent(self, message):
fixed_url = 'https://www.cnbc.com/economy/'
response = requests.get(fixed_url)
news_string = ''
soup = BeautifulSoup(response.text, "html.parser")
for new in soup.findAll('li', {'class': 'headline'}):
news_string = news_string + str(new.find('a').text)
self.speak(news_string)
def create_skill():
return BusinessNews()
|
JonasGovaerts/openshift-applier | build/script/namespace_applier.py | import git
import sys
import os
import time
import subprocess
import shutil
from datetime import datetime
def log(logMessage):
now = datetime.now()
dt_string = now.strftime("%d/%m/%Y %H:%M:%S")
print('{"timestamp":"'+dt_string+'","message":"'+logMessage+'"}')
def pause():
try:
log("sleeping for " + timer + " seconds")
sleep_timer = float(timer)
time.sleep(sleep_timer)
except:
log("sleeping for 60 seconds")
time.sleep(60)
def gitDefinition():
try:
log("Initializing git credentials")
with git_repo.config_writer() as git_config:
git_config.set_value('user','email', '<EMAIL>')
git_config.set_value('user','name', 'test')
log("Initialized dummy git credentials")
except Exception as exception:
error = str(exception).replace('\n', ' ').replace('\r', '')
log("Something went wrong: "+error)
sys.exit()
def gitClone():
try:
repo_dir = "/resources/git"
log("Cloning git repository "+repo+" into "+repo_dir)
if "github" not in repo:
git_url = "https://"+username+":"+password+"@"+repo
git.Repo.clone_from(git_url, repo_dir)
else:
git_url = "https://"+repo
git.Repo.clone_from(git_url, repo_dir)
except Exception as exception:
error = str(exception).replace('\n', ' ').replace('\r', '')
log("Something went wrong: "+error)
shutil.rmtree('/resources/git')
sys.exit()
def gitPull():
try:
log("Pulling new updates from git...")
current_commit_id = git_repo.head.commit.hexsha
git_repo.remotes.origin.pull()
new_commit_id = git_repo.head.commit.hexsha
if new_commit_id == current_commit_id:
log("No new changes were found")
log("Latest commit: "+current_commit_id)
else:
log("Pulled latest changes from git")
log("Latest commit: "+new_commit_id)
except Exception as exception:
error = str(exception).replace('\n', ' ').replace('\r', '')
log("Something went wrong: "+error)
sys.exit()
def selectBranch():
try:
log("Switching to branch "+branch)
git_repo.git.checkout(branch)
except Exception as exception:
error = str(exception).replace('\n', ' ').replace('\r', '')
log("Something went wrong: "+error)
sys.exit()
def ocApply():
try:
findCMD = 'find -L /resources/git/'+subdir+' -type f \( -name \*.json -o -name \*.yml -o -name \*.yaml \)'
out = subprocess.Popen(findCMD, shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout,stderr = out.communicate()
filelist = stdout.decode().split()
for x in filelist:
command = 'oc apply -f '+x
output= subprocess.Popen(command, shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout,stderr = output.communicate()
if not stderr.rstrip():
log(str(stdout.decode().rstrip()))
else:
log(str(stderr.decode().rstrip()))
except Exception as exception:
error = str(exception).replace('\n', ' ').replace('\r', '')
log("Something went wrong: "+error)
sys.exit()
#### Application Run time #####
log("openshift-applier is starting...")
time.sleep(5)
try:
log("Initializing variables")
username = os.environ['USERNAME']
password = os.environ['PASSWORD']
repo = os.environ['GITREPO'].split('https://')[1]
branch = os.environ['BRANCH']
timer = os.environ['TIMER']
subdir = os.environ['DIR']
except:
log("Couln't initialize needed variables, required variables are: USERNAME, PASSWORD, GITREPO, BRANCH, TIMER, DIR")
sys.exit()
gitClone() # Clone the git repo to the location /resources/git
git_repo = git.Repo('/resources/git') # initialize the git_repo variable to globally use it
selectBranch()
gitDefinition()
while True:
gitPull()
ocApply()
pause()
|
sysbot/route53manager | route53/views/zones.py | from boto.route53.exception import DNSServerError
from flask import Module
from flask import url_for, render_template, \
redirect, flash, request
from route53.forms import ZoneForm
from route53.connection import get_connection
from route53.xmltools import render_change_batch
zones = Module(__name__)
@zones.route('/')
def zones_list():
conn = get_connection()
response = conn.get_all_hosted_zones()
zones = response['ListHostedZonesResponse']['HostedZones']
return render_template('zones/list.html', zones=zones)
@zones.route('/new', methods=['GET', 'POST'])
def zones_new():
conn = get_connection()
form = ZoneForm()
if form.validate_on_submit():
response = conn.create_hosted_zone(
form.name.data,
comment=form.comment.data)
info = response['CreateHostedZoneResponse']
nameservers = ', '.join(info['DelegationSet']['NameServers'])
zone_id = info['HostedZone']['Id']
flash(u"A zone with id %s has been created. "
u"Use following nameservers %s"
% (zone_id, nameservers))
return redirect(url_for('zones_list'))
return render_template('zones/new.html', form=form)
@zones.route('/<zone_id>/delete', methods=['GET', 'POST'])
def zones_delete(zone_id):
conn = get_connection()
zone = conn.get_hosted_zone(zone_id)['GetHostedZoneResponse']['HostedZone']
error = None
if request.method == 'POST' and 'delete' in request.form:
try:
conn.delete_hosted_zone(zone_id)
flash(u"A zone with id %s has been deleted" % zone_id)
return redirect(url_for('zones_list'))
except DNSServerError as error:
error = error
return render_template('zones/delete.html',
zone_id=zone_id,
zone=zone,
error=error)
@zones.route('/<zone_id>')
def zones_detail(zone_id):
conn = get_connection()
resp = conn.get_hosted_zone(zone_id)
zone = resp['GetHostedZoneResponse']['HostedZone']
nameservers = resp['GetHostedZoneResponse']['DelegationSet']['NameServers']
return render_template('zones/detail.html',
zone_id=zone_id,
zone=zone,
nameservers=nameservers)
@zones.route('/<zone_id>/records')
def zones_records(zone_id):
conn = get_connection()
resp = conn.get_hosted_zone(zone_id)
zone = resp['GetHostedZoneResponse']['HostedZone']
record_resp = conn.get_all_rrsets(zone_id)
return render_template('zones/records.html',
zone_id=zone_id,
zone=zone,
recordsets=record_resp)
@zones.route('/clone/<zone_id>', methods=['GET', 'POST'])
def zones_clone(zone_id):
conn = get_connection()
zone_response = conn.get_hosted_zone(zone_id)
original_zone = zone_response['GetHostedZoneResponse']['HostedZone']
form = ZoneForm()
errors = []
if form.validate_on_submit():
response = conn.create_hosted_zone(
form.name.data,
comment=form.comment.data)
info = response['CreateHostedZoneResponse']
nameservers = ', '.join(info['DelegationSet']['NameServers'])
new_zone_id = info['HostedZone']['Id']
original_records = conn.get_all_rrsets(zone_id)
from route53.models import ChangeBatch, Change, db
for recordset in original_records:
if not recordset.type in ["SOA", "NS"]:
change_batch = ChangeBatch(change_id='',
status='created',
comment='')
db.session.add(change_batch)
change = Change(action="CREATE",
name=recordset.name.replace(original_zone['Name'],
form.name.data),
type=recordset.type,
ttl=recordset.ttl,
values = recordset.resource_records,
change_batch_id=change_batch.id)
db.session.add(change)
changes = [change]
rendered_xml = render_change_batch({'changes': changes, 'comment': ''})
try:
from route53 import shortid
resp = conn.change_rrsets(shortid(new_zone_id), rendered_xml)
change_batch.process_response(resp)
db.session.commit()
except DNSServerError as error:
errors.append((recordset.type, recordset.name, error))
db.session.rollback()
if not errors:
flash(u"A zone with id %s has been created. "
u"Use following nameservers %s"
% (new_zone_id, nameservers))
return redirect(url_for('zones_list'))
return render_template('zones/clone.html',
form=form, errors=errors, original_zone=original_zone)
|
CBarraford/black | authority.py | # proof of authority
from ecdsa import VerifyingKey
import sys
import json
class Authority:
def __init__(self):
self.votes = []
def vote(self, vote):
# this is our first vote, no need to verify it
if len(self.votes) == 0:
self.votes.append(vote)
return True
# make sure voter has rights to vote
if not self.priviledged(vote.voter_address):
return False
# validate message. Don't validate the first vote.
if not vote.verify_message():
return False
# make sure signatures match before appending to the blockchain
if self.last_vote.signature == vote.last_signature:
self.votes.append(vote)
return True
return False
def signature_index(self, sig):
'''
Find the index on the auth chain for a specific vote signature
'''
for i, vote in self.votes():
if sig == vote.signature:
return i
return 0
def priviledged(self, address, votes=None, index=None):
'''
Check that a given address has authority on the chain
If index number is given, it will check if the address has authority at the time of that
index number.
'''
if votes is None:
votes = self.votes
# if no votes have been cast, anyone can vote
if len(votes) == 0:
return True
# index of zero is not valid (as it would always return true)
if index == 0:
return False
poll = {}
# count the votes
last_sig = ""
for vote in votes[:index]:
# check that the last sign matches the last_sig mentioned in this vote. If they don't
# match, omit the vote
# If we're on the first vote, no need to check this
if last_sig != "" and last_sig == vote.last_signature:
continue
last_sig = vote.last_signature
if not vote.recipient_address in poll:
poll[vote.recipient_address] = {}
poll[vote.recipient_address][vote.voter_address] = vote.priviledged
# The tally counts the up/down votes. If an address has equal up/down votes or higher, that
# address is privileged (to vote)
tally = 0
for voter_address, priviledged in poll[vote.recipient_address].items():
# NOTE: we check specifically true/false in case privileged is NOT a bool
if priviledged is True:
tally += 1
elif priviledged is False:
tally -= 1
return tally > 0
def validate_chain(self, chain):
if len(chain) == 0:
return True
last_vote = chain[0]
current_index = 1
while current_index < len(chain):
vote = chain[current_index]
# Check that the hash of the block is correct
if vote.last_signature != last_vote.signature:
return False
# validate the message
if not self.verify_message(vote.raw_vote, vote.voter_address, vote.signature):
return False
# verify voter has rights to vote
if not self.priviledged(vote.voter_address, chain, current_index):
return False
last_vote = vote
current_index += 1
return True
def verify_message(self, message, pubKey, signature):
vk = VerifyingKey.from_string(bytearray.fromhex(pubKey))
return vk.verify(bytearray.fromhex(signature), message.encode("utf-8"))
@property
def last_vote(self):
# returns the last vote
return self.votes[-1]
@property
def last_signature(self):
vote = self.last_vote
return vote.signature
def str2bool(v):
return v.lower() in ('yes', 'true', 't', '1')
class Vote:
def __init__(self, raw_vote, voter_address, signature):
self.signature = signature
self.raw_vote = raw_vote
self.voter_address = voter_address
parts = self.raw_vote.split()
if not len(parts) == 3:
# invalid vote string (not enough parts)
raise ValueError('Invalid vote message format.')
self.priviledged = str2bool(parts[0])
self.recipient_address = parts[1]
self.last_signature = parts[2]
def verify_message(self):
vk = VerifyingKey.from_string(bytearray.fromhex(self.voter_address))
return vk.verify(bytearray.fromhex(self.signature), self.raw_vote.encode("utf-8"))
def toJSON(self):
return json.dumps(self, default=lambda o: o.__dict__, sort_keys=True, indent=4)
|
CBarraford/black | scripts/create_keys.py | <reponame>CBarraford/black
#!/usr/bin/env python
from ecdsa import SigningKey, VerifyingKey
sk = SigningKey.generate() # uses NIST192p
vk = sk.get_verifying_key()
vk_hex = bytearray(vk.to_string()).hex().upper()
sk_hex = bytearray(sk.to_string()).hex().upper()
print("Signature Key: %s" % sk_hex)
print("Verify Key: %s" % vk_hex)
|
CBarraford/black | scripts/create_message.py | <filename>scripts/create_message.py
#!/usr/bin/env python
import sys
from ecdsa import SigningKey, VerifyingKey
if len(sys.argv) != 3:
print("Missing inputs. Pass sig key and message")
sys.exit(1)
sk = sys.argv[1]
message = sys.argv[2]
sk = SigningKey.from_string(bytearray.fromhex(sk))
vk = sk.get_verifying_key()
signature = sk.sign(message.encode("utf-8"))
print("Message: %s" % message)
print("Signature: %s" % bytearray(signature).hex().upper())
|
CBarraford/black | api.py | <gh_stars>0
import hashlib
import json
import sys
from time import time
import requests
from flask import Flask, jsonify, request
import black
import channel
import authority
app = Flask(__name__)
@app.route('/join', methods=['POST'])
def join_channel():
values = request.get_json()
required = ['remote_node', 'local_node', 'chan']
if not all(k in values for k in required):
return "Missing required fields", 400
remote_node = values['remote_node']
local_node = values['local_node']
chan = values['chan']
data = {'nodes': [local_node] }
response = requests.post(
f'{remote_node}/{chan}/nodes/register',
data = json.dumps(data),
headers={'Content-Type': 'application/json'}
)
body = response.json()
chan_info = body['channel']
# manually cloning the channel? gross
chan = channel.Channel(chan_info['name'])
chan.created_at = chan_info['created_at']
chan.ref = chan_info['ref']
# TODO: clone all registered nodes on remote machine
chan.chain.register_node(remote_node)
black.CHANNELS[chan.ref] = chan
# TODO: return a better response than just "OK"
return jsonify({'message': "OK"})
@app.route('/<chan>/nodes/register', methods=['POST'])
def register_nodes(chan):
values = request.get_json()
if values is None:
return "Error: Please supply a valid list of nodes", 400
required = ['nodes']
if not all(k in values for k in required):
return "Error: Please supply a valid list of nodes", 400
nodes = values.get('nodes')
if nodes is None:
return "Error: Please supply a valid list of nodes", 400
for node in nodes:
black.CHANNELS[chan].chain.register_node(node)
response = {
'message': 'New nodes have been added',
'total_nodes': list(black.CHANNELS[chan].chain.nodes),
'channel': {
'name': black.CHANNELS[chan].name,
'ref': black.CHANNELS[chan].ref,
'created_at': black.CHANNELS[chan].created_at,
}
}
return jsonify(response), 201
@app.route('/<chan>/nodes/resolve', methods=['GET'])
def consensus(chan):
replaced = black.CHANNELS[chan].chain.resolve_conflicts(chan)
authority = black.CHANNELS[chan].chain.authority.votes
chain = black.CHANNELS[chan].chain.chain
if replaced:
response = {
'message': 'Our chain was replaced',
'new_chain': chain,
'new_authority': [e.__dict__ for e in authority],
}
else:
response = {
'message': 'Our chain is authoritative',
'chain': chain,
'authority': [e.__dict__ for e in authority],
}
return jsonify(response), 200
@app.route('/<chan>/transactions/new', methods=['POST'])
def new_transaction(chan):
values = request.get_json()
# Check that the required fields are in the POST'ed data
required = ['message', 'pub_key', 'signature']
if not all(k in values for k in required):
return 'Missing values', 400
# Create a new Transaction
index = black.CHANNELS[chan].chain.new_transaction(
values['pub_key'], values['signature'], values['message']
)
response = {'message': f'Transaction will be added to Block {index}'}
return jsonify(response), 201
@app.route('/<chan>/votes/new', methods=['POST'])
def new_vote(chan):
values = request.get_json()
# Check that the required fields are in the POST'ed data
required = ['vote', 'pub_key', 'signature']
if not all(k in values for k in required):
return 'Missing values', 400
# Create a new vote
vote = authority.Vote(values['vote'], values['pub_key'], values['signature'])
if black.CHANNELS[chan].chain.authority.vote(vote):
response = {'message': f'Vote added'}
return jsonify(response), 201
else:
return jsonify({'message': 'No authority or bad signature or bad last sig reference'}, 401)
@app.route('/<chan>/chain', methods=['GET'])
def full_chain(chan):
chain = black.CHANNELS[chan].chain.chain
authority = black.CHANNELS[chan].chain.authority.votes
response = {
'messages': {
'chain': chain,
'length': len(chain),
},
'authority': {
'chain': [e.__dict__ for e in authority],
'length': len(authority),
},
}
return jsonify(response), 200
@app.route('/channels/new', methods=['POST'])
def new_channel():
values = request.get_json()
required = ['name']
if not all(k in values for k in required):
return 'Missing values', 400
chan = channel.Channel(values['name'])
black.CHANNELS[chan.ref] = chan
response = {'channel': chan.ref}
return jsonify(response), 200
@app.route('/channels')
def list_channels():
return jsonify(black.CHANNELS), 200
@app.route('/channels/<chan>')
def list_channel(chan):
return jsonify(black.CHANNELS[chan]), 200
|
CBarraford/black | scripts/create_vote.py | <gh_stars>0
#!/usr/bin/env python
import sys
from ecdsa import SigningKey, VerifyingKey
if len(sys.argv) < 4:
print("Missing inputs. Pass signing key, vote (1|2), and last signature")
sys.exit(1)
sk = sys.argv[1]
vote = sys.argv[2]
last_sig = sys.argv[3]
recipient = sys.argv[4] or None
sk = SigningKey.from_string(bytearray.fromhex(sk))
vk = sk.get_verifying_key()
if not recipient:
recipient = bytearray(vk.to_string()).hex().upper()
message = f'{vote} {recipient} {last_sig}'
signature = sk.sign(message.encode("utf-8"))
print("Message: %s" % message)
print("Signature: %s" % bytearray(signature).hex().upper())
|
CBarraford/black | black.py | <filename>black.py
import sys
from uuid import uuid4
import channel
import api
# Generate a globally unique address for this node
node_identifier = str(uuid4()).replace('-', '')
# place to store all channels (blockchains)
# TODO: all uppercase usually mean immutable, but alas, this var is mutable :(
CHANNELS = {}
def pp(msg):
print(msg, file=sys.stderr)
if __name__ == '__main__':
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument('-p', '--port', default=5000, type=int, help='port to listen on')
args = parser.parse_args()
port = args.port
app = api.app
app.run(host='0.0.0.0', port=port, debug=True)
|
CBarraford/black | channel.py | <reponame>CBarraford/black
import hashlib
import sys
from time import time
from urllib.parse import urlparse
import json
import requests
from ecdsa import VerifyingKey
import authority
class Channel:
def __init__(self, name):
self.name = name
self.created_at = time()
self.ref = hashlib.sha256(f"{name} {self.created_at}".encode('utf-8')).hexdigest()
self.chain = ChannelChain()
class ChannelChain:
def __init__(self):
self.current_msgs = []
self.chain = []
self.nodes = set()
self.authority = authority.Authority()
# Create the genesis block
self.new_block({'msg':'init channel'}, previous_hash=1)
def register_node(self, address):
parsed_url = urlparse(address)
self.nodes.add(parsed_url.netloc)
def new_block(self, msg, previous_hash=None):
self.chain.append({
'index': len(self.chain) + 1,
'created_at': time(),
'message': msg,
'hash': self.hash(msg),
'previous_hash': previous_hash or self.last_block['hash'],
# auth_sig references the latest signature of the most recent auth blockchain block
'authority_signature': previous_hash or self.authority.last_signature,
})
def new_transaction(self, pubKey, signature, msg):
if not self.verify_message(msg, pubKey, signature):
return 0
if self.authority.priviledged(pubKey):
self.new_block({
'public_key': pubKey,
'signature': signature,
'msg': msg,
})
return self.last_block['index'] + 1
return 0
def verify_message(self, message, pubKey, signature):
vk = VerifyingKey.from_string(bytearray.fromhex(pubKey))
return vk.verify(bytearray.fromhex(signature), message.encode("utf-8"))
def validate_chain(self, chain):
# TODO we should validate also that the chain given is the same chain as we already have.
# Otherwise a foreign node can create an entire new chain from scratch and replace all
# messages with new messages (as long as the fake chain is longer than the current).
# But this may no longer be an issue with signed messages or changing the consensus to
# something like Proof of Authority
last_block = chain[0]
current_index = 1
authority_index = 0
while current_index < len(chain):
block = chain[current_index]
# Check that the hash of the block is correct
if block['previous_hash'] != last_block['hash']:
return False
# verify message signature
msg = block['message']
if not self.verify_message(msg['msg'], msg['public_key'], msg['signature']):
return False
# verify authority
# ensure we don't reference an older authority signature than we've seen before.
auth_index = self.authority.signature_index(block['authority_signature'])
if auth_index < authority_index:
return False
# ensure the author has authority at given authority chain index
if not self.authority.priviledged(block['message']['public_key'], index=auth_index):
return False
# record new values for next run
authority_index = auth_index
last_block = block
current_index += 1
return True
def resolve_conflicts(self, chan):
# TODO: dont download the entire chain from another node just to resolve conflicts. That is
# not scaleable. Instead provide the length of the chain we have and send us the diff.
neighbours = self.nodes
new_chain = None
new_authority = None
# We're only looking for chains longer than ours
max_chain_length = len(self.chain)
max_authority_length = len(self.authority.votes)
# Grab and verify the chains from all the nodes in our network
for node in neighbours:
response = requests.get(f'http://{node}/{chan}/chain')
if response.status_code == 200:
length = response.json()['messages']['length']
chain = response.json()['messages']['chain']
# Check if the length is longer and the chain is valid
if length > max_chain_length and self.validate_chain(chain):
max_chain_length = length
new_chain = chain
length = response.json()['authority']['length']
chain = response.json()['authority']['chain']
# Check if the length is longer and the chain is valid
chain = [authority.Vote(e['raw_vote'], e['voter_address'], e['signature']) for e in chain]
if length > max_authority_length and self.authority.validate_chain(chain):
max_authority_length = length
new_authority = chain
replaced = False
# Replace our chain if we discovered a new, valid chain longer than ours
if new_chain:
self.chain = new_chain
replaced = True
if new_authority:
self.authority.votes = new_authority
replaced = True
return replaced
@property
def last_block(self):
# Returns the last Block in the chain
return self.chain[-1]
@staticmethod
def hash(block):
# We must make sure that the Dictionary is Ordered, or we'll have inconsistent hashes
block_string = json.dumps(block, sort_keys=True).encode()
return hashlib.sha256(block_string).hexdigest()
|
tmoer/a0c | src/network.py | <gh_stars>1-10
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Neural network specification
@author: thomas
"""
import tensorflow as tf
import tensorflow.contrib.slim as slim
import numpy as np
import random
from common.rl.make_game import check_space
from pdb import set_trace
class Model():
def __init__(self,Env,lr,n_mix,clip_gradient_norm,loss_type='count',bound='tanh',temp=1.0,entropy_l=0.0):
self.action_dim, self.action_discrete = check_space(Env.action_space)
self.state_dim, self.state_discrete = check_space(Env.observation_space)
if self.action_discrete:
raise ValueError('Discrete action space not implemented')
if len(self.action_dim) > 1:
raise ValueError('Cant handle multidimensional action spaces')
else:
self.action_dim = self.action_dim[0]
self.scale = Env.action_space.high[0] # assumes a symmetric action space [-scale,scale] for all action_dim
# placeholders
if not self.state_discrete:
self.x = x = tf.placeholder("float32", shape=np.append(None,self.state_dim),name='x') # s
else:
self.x = x = tf.placeholder("int32", shape=np.append(None,1)) # s
x = tf.squeeze(tf.one_hot(x,self.state_dim,axis=1),axis=2)
# feedforward
for i in range(2):
x = slim.fully_connected(x,128,activation_fn=tf.nn.elu)
# Mixture of Gaussians
if self.action_discrete:
raise ValueError('Only works for continuous outputs')
#print(self.action_dim)
n_params = n_mix *(2 * self.action_dim)
z = slim.fully_connected(x,n_params,activation_fn=None)
if n_mix > 1:
logits = slim.fully_connected(x,n_mix,activation_fn=None)
# params
#self.sigma_p = sigma_p = tf.Print(sigma_p,[sigma_p],summarize=16)
# Make distribution
if bound == 'tanh':
self.mu_p = mu_p = z[:,:(self.action_dim*n_mix)]
log_sigma = z[:,(self.action_dim*n_mix):(2*self.action_dim*n_mix)]
self.sigma_p = sigma_p = tf.clip_by_value(tf.nn.softplus(log_sigma),0.001,10000)
if n_mix == 1:
if self.action_dim == 1:
outdist = tf.distributions.Normal(mu_p,sigma_p)
else:
outdist = tf.contrib.distributions.MultivariateNormalDiag(mu_p,sigma_p)
else:
p_dist = tf.distributions.Categorical(logits=logits,validate_args=True,allow_nan_stats=False)
n_dist = []
for i in range(n_mix):
if self.action_dim == 1:
n_dist.append(tf.distributions.Normal(mu_p[:,i],sigma_p[:,i]))
else:
n_dist.append(tf.contrib.distributions.MultivariateNormalDiag(loc=mu_p[:,(i*self.action_dim):((i+1)*self.action_dim)],scale_diag=sigma_p[:,(i*self.action_dim):((i+1)*self.action_dim)]))
outdist = tf.contrib.distributions.Mixture(cat=p_dist,components=n_dist)
# Wrap distribution
outdist = BoundedDistribution(outdist,scale=self.scale)
elif bound == 'beta':
self.alpha = alpha = z[:,:(self.action_dim*n_mix)]
self.beta = beta = z[:,(self.action_dim*n_mix):(2*self.action_dim*n_mix)]
if n_mix == 1:
outdist = tf.contrib.distributions.BetaWithSoftplusConcentration(alpha,beta)
outdist = BoundedDistributionBeta(outdist,scale=self.scale)
self.entropy = outdist.entropy()
else:
raise ValueError('Beta bounding not implemented for n_mix >1')
else:
raise ValueError('Unknown bounding type: {}'.format(bound))
# V loss
self.V_hat = slim.fully_connected(x,1,activation_fn=None)
self.V = tf.placeholder("float32", shape=[None,1],name='V')
self.V_loss = tf.losses.mean_squared_error(labels=self.V,predictions=self.V_hat)
# pi loss (needs a)
self.a = a = tf.placeholder("float32", shape=np.append(None,self.action_dim),name='a')
self.log_pi_a_s = outdist.log_prob(a) # shape (batch,)
self.pi_hat = outdist.prob(a) # shape (batch,)
if loss_type == 'count':
self.n_a = n_a = tf.placeholder("float32", shape=np.append(None,1),name='n_a')
pi_loss = tf.stop_gradient(self.log_pi_a_s - tf.log(tf.squeeze(n_a,axis=1))) * self.log_pi_a_s
elif loss_type == 'Q':
self.n_a = n_a = tf.placeholder("float32", shape=np.append(None,1),name='Q')
pi_loss = tf.stop_gradient(self.log_pi_a_s - tf.squeeze((n_a*temp) - self.V_hat,axis=1)) * self.log_pi_a_s
self.pi_loss = tf.reduce_mean(pi_loss)
self.sample = outdist.sample()
self.pi_sample = outdist.prob(self.sample)
# training
self.loss = self.V_loss + self.pi_loss
if bound == 'beta':
self.loss -= tf.reduce_mean(entropy_l * self.entropy)
self.lr = tf.Variable(lr,name="learning_rate",trainable=False)
optimizer = tf.train.RMSPropOptimizer(learning_rate=lr)
var_list = tf.trainable_variables()
grads = tf.gradients(self.loss, var_list)
if clip_gradient_norm > 0.0:
clip_global = tf.Variable(clip_gradient_norm,trainable=False)
grads,self.gradient_norm = tf.clip_by_global_norm(grads, clip_global)
else:
self.gradient_norm = tf.global_norm(grads)
gvs = list(zip(grads, var_list))
self.train_op = optimizer.apply_gradients(gvs)
def train(self,D,n_epochs,lr):
sess = self.sess
D.reshuffle()
gn,VL,piL,V = [],[],[],[]
for epoch in range(n_epochs):
for sb,Vb,ab,a_nb in D:
_,VL_,piL_,gn_,V_ = sess.run([self.train_op,self.V_loss,self.pi_loss,self.gradient_norm,self.V],
feed_dict={self.x:sb,
self.V:Vb,
self.a:ab,
self.n_a:a_nb,
self.lr:lr
})
gn.append(gn_)
VL.append(VL_)
piL.append(piL_)
V.append(np.mean(V_))
if np.isnan(np.mean(gn)) or np.isnan(np.mean(VL)) or np.isnan(np.mean(piL)) or np.isnan(np.mean(V)):
set_trace()
t_loss = np.mean(VL)+np.mean(piL)
return {'V_loss':np.mean(VL),'pi_loss':np.mean(piL),'gn':np.mean(gn),'total_loss':t_loss,'V':np.mean(V)}
def predict_V(self,s):
sess = self.sess
return sess.run(self.V_hat,feed_dict={self.x:s})
def predict_pi(self,s,a):
sess = self.sess
return sess.run(self.pi_hat,feed_dict={self.x:s,
self.a:a})
def log_prob(self,s,a):
return self.sess.run([self.log_pi_a_s],feed_dict={self.x:s,
self.a:a})
qui
def sample_action(self,s):
sess = self.sess
mix_list = sess.run(self.p_dist.sample(),feed_dict={self.x:s})
samples = np.array([sess.run(self.n_dist[mix].sample(),feed_dict={self.x:s}) for mix in mix_list])
return samples
def sample_action_and_pi(self,s):
sess = self.sess
return sess.run([self.sample,self.pi_sample],feed_dict={self.x:s})
class BoundedDistribution(object):
''' Bounded transformation of arbitrary continuous density with support on real line '''
def __init__(self,dist,scale):
self.dist = dist
self.scale = scale
def to_u(self,a):
return tf.atanh(tf.clip_by_value(a/self.scale,-0.999999,0.999999)) # clip what goes into atanh
def to_a(self,u):
return self.scale*tf.tanh(u)
def sample(self):
return self.to_a(self.dist.sample())
def log_prob(self,a):
u = self.to_u(a)
return self.dist.log_prob(u) - tf.reduce_sum(tf.log(self.scale*(1-tf.square(
tf.clip_by_value(tf.tanh(u),-0.999999,0.999999)))),axis=1) # clip what comes out of tanh and goes into log
def prob(self,a):
return tf.exp(self.log_prob(a))
class BoundedDistributionBeta(object):
''' Bounded transformation of Beta distribution '''
def __init__(self,dist,scale):
self.dist = dist
self.scale = scale
def to_u(self,a):
return tf.clip_by_value(((a/self.scale) + 1.0)/2.0,0.00001,0.999999)
def to_a(self,u):
return self.scale * ((2.0 * u) - 1.0)
def sample(self):
return self.to_a(self.dist.sample())
def log_prob(self,a):
u = self.to_u(a)
shape = a.get_shape().as_list()
constants = shape[-1]*tf.log(tf.constant(np.array(2.0)*np.squeeze(self.scale),dtype='float32'))
return tf.reduce_sum(self.dist.log_prob(u),axis=1) - constants
def prob(self,a):
return tf.exp(self.log_prob(a))
def entropy(self):
return self.dist.entropy()
class Database():
''' Database '''
def __init__(self,max_size,batch_size):
self.max_size = max_size
self.batch_size = batch_size
self.size = 0
self.insert_index = 0
self.experience = []
self.sample_array = None
self.sample_index = 0
def clear(self):
self.experience = []
self.insert_index = 0
self.size = 0
def store(self,experience):
if self.size < self.max_size:
self.experience.append(experience)
self.size +=1
else:
self.experience[self.insert_index] = experience
self.insert_index += 1
if self.insert_index >= self.size:
self.insert_index = 0
def store_from_array(self,*args):
for i in range(args[0].shape[0]):
entry = []
for arg in args:
entry.append(arg[i])
self.store(entry)
def reshuffle(self):
self.sample_array = np.arange(self.size)
random.shuffle(self.sample_array)
self.sample_index = 0
def __iter__(self):
return self
def __next__(self):
if (self.sample_index + self.batch_size > self.size) and (not self.sample_index == 0):
self.reshuffle() # Reset for the next epoch
raise(StopIteration)
if (self.sample_index + 2*self.batch_size > self.size):
indices = self.sample_array[self.sample_index:]
batch = [self.experience[i] for i in indices]
else:
indices = self.sample_array[self.sample_index:self.sample_index+self.batch_size]
batch = [self.experience[i] for i in indices]
self.sample_index += self.batch_size
arrays = []
for i in range(len(batch[0])):
to_add = np.array([entry[i] for entry in batch])
arrays.append(to_add)
return tuple(arrays)
next = __next__ |
tmoer/a0c | src/rl/atariwrapper.py | # -*- coding: utf-8 -*-
"""
Atari wrapper, based on https://github.com/openai/baselines/blob/master/baselines/common/atari_wrappers.py
@author: thomas
"""
import gym
from gym import spaces
from collections import deque
import numpy as np
from PIL import Image
class ClipRewardWrapper(gym.RewardWrapper):
def reward(self, reward):
"""Bin reward to {+1, 0, -1} by its sign."""
return 0.5 * np.sign(reward)
class AtariWrapper(gym.Wrapper):
''' Chain domain '''
def __init__(self, env, skip=4, k=4,ram=False):
"""Return only every `skip`-th frame"""
gym.Wrapper.__init__(self, env)
# Frame skip and pooling
self._obs_buffer = deque(maxlen=skip)
self._skip = skip
self._ram = ram
# Frame stacking
self.k = k
self.frames = deque([], maxlen=k)
# Frame wrapping
if not self._ram:
self.res = 84
self.observation_space = spaces.Box(low=0, high=1, shape=(self.res,self.res, k))
else:
self.res = env.observation_space.shape[0]
self.observation_space = spaces.Box(low=0, high=1, shape=(self.res, k))
def _observation(self):
assert len(self.frames) == self.k
return np.concatenate(self.frames, axis=-1)
def _resize(self, obs):
if not self._ram:
frame = np.dot(obs.astype('float32'), np.array([0.299, 0.587, 0.114], 'float32'))
frame = np.array(Image.fromarray(frame).resize((self.res, self.res),
resample=Image.BILINEAR), dtype=np.float32)/255.0
return frame.reshape((self.res, self.res, 1))
else:
obs = obs/255
return obs.astype('float32').reshape((self.res,1))
def _reset(self):
"""Clear buffers and re-fill by duplicating the first observation."""
ob = self.env.reset()
ob = self._resize(ob)
for _ in range(self.k): self.frames.append(ob)
self._obs_buffer.clear()
for _ in range(self._skip): self._obs_buffer.append(ob)
return self._observation()
def _step(self, action):
"""Repeat action, sum reward, and max over last observations."""
total_reward = 0.0
done = None
for _ in range(self._skip):
obs, reward, done, info = self.env.step(action)
obs = self._resize(obs)
self._obs_buffer.append(obs)
total_reward += reward
if done:
break
if not self._ram:
max_frame = np.max(np.stack(self._obs_buffer), axis=0) # max over skips
else:
max_frame = obs # just take the last, max has no interpretation
self.frames.append(max_frame) # append to buffer
return self._observation(), total_reward, done, info
|
tmoer/a0c | src/rl/make_game.py | <gh_stars>1-10
# -*- coding: utf-8 -*-
"""
Wrapper to generate the game environments
@author: thomas
"""
import gym
import numpy as np
from rl.rewardwrapper import RewardWrapper,PILCOWrapper,NormalizeWrapper
from rl.atariwrapper import AtariWrapper,ClipRewardWrapper
from rl.envs.chain import Chain, ChainOrdered
#from rl.doom_setup import make_doom_env_with_wrappers
from gym import spaces
import os
#import gym_ple
from gym.envs.registration import register
register(
id='FrozenLakeNotSlippery-v0',
entry_point='gym.envs.toy_text:FrozenLakeEnv',
kwargs={'map_name' : '4x4', 'is_slippery': False},
max_episode_steps=100,
reward_threshold=0.78, # optimum = .8196
)
register(
id='FrozenLakeNotSlippery-v1',
entry_point='gym.envs.toy_text:FrozenLakeEnv',
kwargs={'map_name' : '8x8', 'is_slippery': False},
max_episode_steps=100,
reward_threshold=0.78, # optimum = .8196
)
def make_game(game):
os.system('export LD_LIBRARY_PATH=`$LD_LIBRARY_PATH:$HOME/.mujoco/mjpro150/bin`')
if 'Chain' in game:
game,n = game.split('-')
if game == 'Chain':
Env = Chain(int(n))
elif game == 'ChainOrdered':
Env = ChainOrdered(int(n))
elif game == 'CartPole-vr' or game == 'MountainCar-vr' or game == 'Acrobot-vr' or game == 'LunarLander-vr':
Env = RewardWrapper(game)
elif game == 'CartPole-vp' or game == 'MountainCar-vp' or game == 'Acrobot-vp':
Env = PILCOWrapper(game)
elif game == 'CartPole-vn' or game == 'MountainCar-vn':
Env = NormalizeWrapper(game)
else:
Env = gym.make(game)
if type(Env) == gym.wrappers.time_limit.TimeLimit:
Env = Env.env
if game in ['Breakout-v0','Pong-v0','MontezumaRevenge-v0']:
Env = AtariWrapper(Env,skip=3,k=3,ram=False)
Env = ClipRewardWrapper(Env)
elif 'ram' in game:
Env = AtariWrapper(Env,skip=3,k=2,ram=True)
Env = ClipRewardWrapper(Env)
if 'CartPole' in game:
Env.observation_space = gym.spaces.Box(np.array([-4.8,-10,-4.8,-10]),np.array([4.8,10,4.8,10]))
return Env
def check_space(space):
'''check the properties of the env '''
if isinstance(space,spaces.Box):
dim = space.shape # should the zero be here?
discrete = False
elif isinstance(space,spaces.Discrete):
dim = space.n
discrete = True
else:
raise NotImplementedError
return dim, discrete |
tmoer/a0c | src/rl/policies.py | <gh_stars>1-10
# -*- coding: utf-8 -*-
"""
Various policies
@author: thomas
"""
import numpy as np
import logging
logger = logging.getLogger('root')
logger.propagate = False
def policy(policy,model,hps,seed,eval_on_mean_output=False,eval_on_mean_params=False):
''' wrapper policy function '''
pass
def thompson_policy(s,model,sess,hps,seed,eval_on_mean_output=False,eval_on_mean_params=False):
''' Thompson sample value function in discrete action space
Input: s - state, Thompson sampling applied across first dimension.
Output: a - picked action '''
rep = s.shape[0]
state_seq = np.repeat(s,model.action_dim,axis=0)
action_seq = np.repeat(np.arange(0,model.action_dim)[None,:],rep,axis=0).reshape(-1,1)
rep_action_values = np.zeros([rep*model.action_dim,hps.n_thompson_sample])
# sample
for i in range(hps.n_thompson_sample):
action_values = sample_value(sess,model,hps,state_seq,action_seq,seed,eval_on_mean_output,eval_on_mean_params)
rep_action_values[:,i] = np.squeeze(action_values)
# max
max_action_values = np.max(rep_action_values,axis=1) # max over the repetitions
max_action_values = np.reshape(max_action_values,[rep,model.action_dim])
#a = np.argmax(max_action_values,axis=1)[:,None]
a = argmax_tiebreaking(max_action_values)
return a
def egreedy_policy(s,model,sess,hps,e,seed):
''' e-greedy policy on discrete action-space'''
# setup
#hps.n_thompson_sample = 1
#a_exploit = thompson_policy(s,model,sess,hps,seed,eval_on_mean_output=True,eval_on_mean_params=True)
rep = s.shape[0]
state_seq = np.repeat(s,model.action_dim,axis=0)
action_seq = np.repeat(np.arange(0,model.action_dim)[None,:],rep,axis=0).reshape(-1,1)
action_values = get_net_mean(sess,model,state_seq,action_seq,seed,hps.p_dropout,hps.output)
action_values = np.reshape(action_values,[rep,model.action_dim])
a_exploit = argmax_tiebreaking(action_values)
a_explore = get_discrete_random_action(model.action_dim,s.shape[0])
a = np.array([(a1 if np.random.rand()>0.05 else a2) for a1,a2 in zip(a_exploit,a_explore)])
return a
def ucb_policy(s,model,sess,hps,seed,eval_on_mean_output=False,eval_on_mean_params=False):
''' upper confidence bound policy '''
#p_dropout = 1.0 if eval_on_mean_params else hps.p_dropout # some unexplainable bug if uncommented
p_dropout = hps.p_dropout
rep = s.shape[0]
state_seq = np.repeat(s,model.action_dim,axis=0)
action_seq = np.repeat(np.arange(0,model.action_dim)[None,:],rep,axis=0).reshape(-1,1)
mu = get_net_mean(sess,model,state_seq,action_seq,seed,p_dropout,hps.output)
sds = analytic_sd(sess,model,state_seq,action_seq,seed,p_dropout,hps.output)
#sds2 = sample_sd(40,sess,model,state_seq,action_seq,p_dropout,hps.output)
ucb_multipliers = np.random.uniform(1.7,2.3,(rep*model.action_dim,1))
ucb = np.reshape(mu + ucb_multipliers * sds,[-1,model.action_dim])
#a = np.argmax(ucb,axis=1)[:,None]
a = argmax_tiebreaking(ucb)
return a
def get_discrete_random_action(n_act,n_sample):
return np.random.randint(0,n_act,n_sample)[:,None]
def sample_value(sess,model,hps,sb,ab,seed,eval_on_mean_output=False,eval_on_mean_params=False):
''' Sample values for policy '''
if eval_on_mean_params:
p_dropout = 1.0
else:
p_dropout = hps.p_dropout
if eval_on_mean_output:
Qsa = get_net_mean(sess,model,sb,ab,seed,p_dropout,hps.output)
else:
Qsa = sample_net(sess,model,sb,ab,seed,p_dropout,hps.output)
return Qsa
def sample_net(sess,model,sb,ab,seed,p_dropout,output):
''' Sample from network output distribution '''
sample = sess.run(model.sample,feed_dict = {model.x:sb,
model.a:ab,
model.p_dropout: p_dropout,
model.seed:seed})
if output == 'categorical':
sample = model.transformer.to_value(sample)
return sample
def get_net_mean(sess,model,sb,ab,seed,p_dropout,output):
''' Expectation of network output distribution '''
if not output == 'categorical':
Qsa = sess.run(model.mean,feed_dict = {model.x:sb,
model.a:ab,
model.p_dropout: p_dropout,
model.seed:seed})
else:
density = sess.run(model.params,feed_dict = {model.x:sb,
model.a:ab,
model.p_dropout: p_dropout,
model.seed:seed})
Qsa = np.matmul(density,model.transformer.means)[:,None]
return Qsa
def analytic_sd(sess,model,sb,ab,seed,p_dropout,output):
''' analytic sd calculation from network parameters '''
params = get_net_params(sess,model,sb,ab,seed,p_dropout)
if output == 'gaussian':
sd = params[:,1][:,None]
elif output == 'categorical':
# sd = sum_i (x_i-mu)
bin_means = model.transformer.means
mu = np.repeat(np.matmul(params,bin_means)[:,None],params.shape[1],axis=1)
sd = np.sqrt(np.sum(params * np.square(bin_means - mu), axis=1))[:,None] #
elif output == 'mog':
# need to sample
sd = sd_mog(params)[:,None]
#sd = sample_sd(20,sess,model,sb,ab,p_dropout,output)
elif output == 'deterministic':
sd = sample_sd(15,sess,model,sb,ab,p_dropout,output)
return sd
def sd_mog(params):
''' Standard deviation of gaussian mixture '''
n_mix = int(params.shape[1]/3)
p = params[:,:n_mix]
mu = params[:,n_mix:(2*n_mix)]
sd = params[:,(2*n_mix):(3*n_mix)]
return np.sum(p * (np.square(mu) + np.square(sd)),axis=1) - np.square(np.sum(p*mu,axis=1))
def sample_sd(n,sess,model,sb,ab,p_dropout,output):
''' get standard deviation estimates
Crude implementation, based on sampling. However, there is no better way
to integrate over the parameter uncertainty '''
samples = np.zeros([sb.shape[0],n])
for i in range(n):
seed = [np.random.randint(1e15),np.random.randint(1e15)] # new seed for parametric uncertainty
sample = sample_net(sess,model,sb,ab,seed,p_dropout,output)
samples[:,i] = np.squeeze(sample)
sds = np.std(samples,axis=1)[:,None]
return sds
def get_net_params(sess,model,sb,ab,seed,p_dropout):
''' Network parameters '''
params = sess.run(model.params,feed_dict = {model.x:sb,
model.a:ab,
model.p_dropout: p_dropout,
model.seed:seed})
return params
def argmax_tiebreaking(x):
''' own argmax because numpy.argmax does not break ties '''
try:
out = np.array([[np.random.choice(np.flatnonzero(a == a.max()))] for a in x]) # sparsely fails due to numerical errors between a and a.max()?
except:
out = np.array([[np.argmax(a)] for a in x])
return out |
tmoer/a0c | src/rl/rewardwrapper.py | # -*- coding: utf-8 -*-
"""
Chain environment
@author: thomas
"""
import gym.spaces
import gym
import numpy as np
from gym import Wrapper
class NormalizeWrapper(object):
''' Heuristically normalizes the reward scale for CartPole and MountainCar '''
def __init__(self,name):
# n = length of chain
if 'CartPole' in name:
self.env = gym.make('CartPole-v0')
elif 'MountainCar' in name:
self.env = gym.make('MountainCar-v0')
self.name = name
self.action_space = self.env.action_space
self.observation_space = self.env.observation_space
def reset(self):
return self.env.reset()
def step(self,a):
s,r,terminal,_ = self.env.step(a)
r = r/50
return s,r,terminal, _
class PILCOWrapper(object):
''' Wraps according to PILCO '''
def __init__(self,name):
# n = length of chain
if 'CartPole' in name:
self.env = gym.make('CartPole-v0')
elif 'MountainCar' in name:
self.env = gym.make('MountainCar-v0')
self.name = name
self.action_space = self.env.action_space
self.observation_space = self.env.observation_space
def reset(self):
return self.env.reset()
def step(self,a):
s,r,terminal,_ = self.env.step(a)
r = pilco_reward(s,self.name)
return s,r,terminal, _
def pilco_reward(s,game='Cartpole-v0'):
''' use modified reward function as in Pilco '''
from scipy.stats import multivariate_normal
if game == 'CartPole-vp':
target = np.array([0.0,0.0,0.0,0.0])
elif game == 'Acrobot-vp':
target = np.array([1.0])
s = -np.cos(s[0]) - np.cos(s[1] + s[0])
elif game == 'MountainCar-vp':
target = np.array([0.5])
s = s[0]
elif game == 'Pendulum-vp':
target = np.array([0.0,0.0])
else:
raise ValueError('no PILCO reward mofication for this game')
r = 1 - multivariate_normal.pdf(s,mean=target)
return r
class RewardWrapper2(Wrapper):
env = None
def __init__(self, env):
self.env = env
self.action_space = self.env.action_space
self.observation_space = self.env.observation_space
self.reward_range = self.env.reward_range
self.metadata = self.env.metadata
self._warn_double_wrap()
while True:
if hasattr(env,'_spec'):
self.name = env._spec.id
break
else:
env = env.env
def reset(self):
return self.env.reset()
def step(self, action):
observation, reward, terminal, info = self.env.step(action)
return observation, self.reward(reward,terminal), terminal, info
def reward(self,r,terminal):
if 'CartPole' in self.name:
if terminal:
r = -1
else:
r = 0.005
elif 'MountainCar' in self.name:
if terminal:
r = 1
else:
r = -0.005
elif 'Acrobot' in self.name:
if terminal:
r = 1
else:
r = -0.005
elif 'LunarLander' in self.name:
r = r/250.0
return r
class RewardWrapper(object):
''' Chain domain '''
def __init__(self,name):
# n = length of chain
if name == 'CartPole-vr':
self.env = gym.make('CartPole-v1')
if type(self.env) == gym.wrappers.time_limit.TimeLimit:
self.env = self.env.env
elif name == 'MountainCar-vr':
self.env = gym.make('MountainCar-v0')
if type(self.env) == gym.wrappers.time_limit.TimeLimit:
self.env = self.env.env
elif name == 'Acrobot-vr':
self.env = gym.make('Acrobot-v1')
if type(self.env) == gym.wrappers.time_limit.TimeLimit:
self.env = self.env.env
elif name == 'LunarLander-vr':
self.env = gym.make('LunarLander-v2')
# self.env.metadata = {}
if type(self.env) == gym.wrappers.time_limit.TimeLimit:
self.env = self.env.env
self.name = name
self.action_space = self.env.action_space
self.observation_space = self.env.observation_space
def reset(self):
return self.env.reset()
def step(self,a):
s,r,terminal,_ = self.env.step(a)
if self.name == 'CartPole-vr':
if terminal:
r = -1
else:
r = 0
elif self.name == 'MountainCar-vr':
if terminal:
r = 1
else:
r = 0
elif self.name == 'Acrobot-vr':
if terminal:
r = 1
else:
r = 0
elif self.name == 'LunarLander-vr':
r = r/250.0
return s,r,terminal, _
def seed(self,seed):
self.env.seed(seed)
def render(self):
return self.env.render()
def close(self):
return self.env.close()
# Test
if __name__ == '__main__':
for game in ['MountainCar-vr','CartPole-vr']:
Env = RewardWrapper(game)
s = Env.reset()
for i in range(500):
a = Env.action_space.sample()
s,r,terminal,_ = Env.step(a)
if terminal:
print('Died in step',i,'with reward',r,' restarting')
s = Env.reset()
print('Finished') |
tmoer/a0c | submit.py | <gh_stars>1-10
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Wrapper for submit function
@author: thomas
"""
if __name__ == '__main__' and __package__ is None:
from os import sys, path
sys.path.append(path.dirname(path.dirname(path.abspath(__file__))))
import argparse
from src.common.submit import submit
from config.hps import get_hps,override_hps_settings
from agent import agent
if __name__ == "__main__":
'''Set-up training'''
parser = argparse.ArgumentParser()
parser.add_argument('--hp', help='Hyperparameter configuration',default='')
parser.add_argument('--hpsetup', help='Hyperparameter configuration of slurm and hyperparameters and distribution',default='')
parser.add_argument('--no_plot', action='store_true',default=False)
args = parser.parse_args()
submit(args.hp,args.hpsetup,args.no_plot,agent,get_hps,override_hps_settings) |
tmoer/a0c | src/mcts.py | <reponame>tmoer/a0c<gh_stars>1-10
# -*- coding: utf-8 -*-
"""
MCTS with tree uncertainty
@author: <NAME>, Delft University of Technology
"""
import numpy as np
import random
import copy
import time
from pdb import set_trace
from common.putils import my_argmax, timing
def bring_Env_to_correct_state(Env,seed,a_his):
''' Forward simulates an environment based an a history of taken actions and a seed
Note: not used because seeding is just as slow as copy.deepcopy(env) '''
if len(a_his) == 0:
return Env
Env.reset()
#Env.seed(seed) # this takes just as long as copy.deepcopy(Env), and is therefore avoided. We choose to
# only consider games with deterministic initial state and deterministic transitions, which avoids seeding
for a in a_his:
Env.step(a)
return Env
def MCTS(root_index,root,Env,N,model=None,c=1.0,gamma=1.0,bootstrap_V=False,
block_loop=False,sigma_tree=False,backup_Q='on-policy',backup_sigma_tree='uniform',
seed=None,a_his=None,alpha=0.5,C_widening=1.0,use_prior=False,timeit=False,random_action_frac=0.1):
''' Monte Carlo Tree Search function '''
if root is None:
root = State(root_index,r=0.0,terminal=False,parent_action=None,model=model,action_space=Env.action_space,
sigma_tree=sigma_tree,alpha=alpha,C_widening=C_widening,use_prior=use_prior,
random_action_frac=random_action_frac) # initialize the root node
else:
root.parent_action = None # continue from current root
if root.terminal:
raise(ValueError("Can't do tree search from a terminal state. You need to call reset after the Env returns done."))
if timeit:
copy_time = 0.0
forward_time = 0.0
backward_time = 0.0
for i in range(N):
state = root # reset to root for new trace
if timeit: now = time.time()
mcts_env = copy.deepcopy(Env) # copy original Env to rollout from
#mcts_env = bring_Env_to_correct_state(Env,seed,a_his)
if timeit:
copy_time += time.time()-now
now = time.time()
while not state.terminal:
action = state.select(c=c)
s1,r,t,_ = mcts_env.step(action.index)
if hasattr(action,'child_state'):
state = action.child_state # select
continue
else:
state = action.add_child_state(s1,r,t,model,sigma_tree) # expand
state.evaluate(model,mcts_env,bootstrap_V=bootstrap_V,gamma=gamma) # evaluate/roll-out
# check for looping of expanded state
if block_loop:
looped = check_for_loop_in_trace(state,threshold=0.01)
if looped:
state.sigma_t = 0.0 # block all uncertainty
state.V = 0.0
break
if timeit:
forward_time += time.time()-now
now = time.time()
# backup the expansion
V = np.squeeze(state.V)
# loop back up
while state.parent_action is not None:
Q = state.r + gamma * V
action = state.parent_action
action.update(Q,backup_Q=backup_Q)
state = action.parent_state
V = state.update(action.index,Q,backup_Q=backup_Q,backup_sigma_tree=backup_sigma_tree)
if timeit:
backward_time += time.time()-now
if timeit:
total_time = copy_time + forward_time + backward_time
print('total time {}\n copy % {}, forward % {}, backward % {}'.format(total_time,100*copy_time/total_time,100*forward_time/total_time,100*backward_time/total_time))
return root
def check_for_loop_in_trace(state,threshold=0.01):
''' loops back through trace to check for a loop (= repetition of state) '''
index = state.index
action = state.parent_action
while state.parent_action is not None:
state = action.parent_state
if np.linalg.norm(state.index-index) < threshold:
return True
action = state.parent_action
return False
class Action():
''' Action object '''
def __init__(self,index,parent_state):
self.index = index
self.parent_state = parent_state
self.W = 0.0 # sum
self.n = 0 # counts
self.Q = 0.0 # mean
def add_child_state(self,s1,r,terminal,model,sigma_tree):
self.child_state = State(index=s1,r=r,terminal=terminal,parent_action=self,model=model,use_prior=self.parent_state.use_prior,action_space=self.parent_state.action_space,
sigma_tree=sigma_tree,alpha=self.parent_state.alpha,C_widening=self.parent_state.C_widening,
random_action_frac=self.parent_state.random_action_frac)
return self.child_state
def update(self,val,backup_Q='on_policy'):
self.n += 1
if backup_Q == 'on-policy':
self.W += val
self.Q = self.W/self.n
elif backup_Q == 'max':
self.Q = val
def stable_normalizer(x,temp):
'''
Normalize vector.
Normalizes the values in x to sum to 1, where each value is first exponiated by temp, i.e.:
x^temp / sum(x^temp)
Keyword arguments:
x: a list of integers
temp: scalar, in range [0,inf]
'''
## check assumptions
x = np.array(x)
if x.ndim > 1:
raise Warning('x should be a 1D array, but received input with dimensionality {}.'
'Flattened the array to proceed'.format(x.shape))
x = x.flatten()
if len(x) == 0:
raise IndexError('x cannot be an empty vector')
if temp < 0:
raise ValueError('Temperature below 0 does not make sense for this normalization')
# compute
x = x / np.max(x)
return (x ** temp)/np.sum(x ** temp)
def normalizer(x,temp):
return np.abs((x ** temp)/np.sum(x ** temp))
class State():
''' State object '''
def __init__(self,index,r,terminal,parent_action,model,action_space,sigma_tree=False,use_prior=False,
alpha=0.5,C_widening=1.0,random_action_frac=0.1):
''' Initialize a new state '''
self.index = index # state
self.r = r # reward upon arriving in this state
self.terminal = terminal
self.parent_action = parent_action
self.n = 0 # visitation count
self.sigma_tree = sigma_tree # boolean indicating use of sigma_tree
self.use_prior = use_prior
self.model = model
self.alpha=alpha
self.C_widening=C_widening
self.action_space = action_space
self.random_action_frac = random_action_frac
self.sigma_t = 1.0 if not terminal else 0.0
if not terminal:
self.child_actions = []
self.priors = []
self.sigma_actions_t = []
#self.add_child_actions()
def required_number_of_children(self):
return np.max([2,np.ceil(self.C_widening * (self.n ** self.alpha))])
#@timing
def add_child_actions(self):
''' Adds child nodes for all actions '''
if self.required_number_of_children() > len(self.child_actions):
# add a child action
if np.random.random() > self.random_action_frac:
a,prior = self.model.sample_action_and_pi(self.index[None,:])
else:
a = self.action_space.sample()
prior = self.model.log_prob(self.index[None,:],a[None,:])
a=np.squeeze(a,axis=0)
if a.ndim == 0:
a = a[None]
# add a child
self.child_actions.append(Action(a,parent_state=self))
if self.use_prior:
self.priors.append(np.squeeze(prior))
if self.sigma_tree:
self.sigma_actions_t.append(1.0)
#@timing
def select(self,c):
''' Select one of the child actions based on UCT rule '''
# first check whether we need to add a child
self.add_child_actions()
Q = np.array([child_action.Q for child_action in self.child_actions],dtype='float32')
U = np.array([c * (np.sqrt(self.n)/child_action.n) if child_action.n >= 1 else np.Inf for child_action in self.child_actions],dtype='float32')
if self.use_prior:
U *= np.array(self.priors,dtype='float32')
if self.sigma_tree:
U *= np.array(self.sigma_actions_t,dtype='float32')
scores = np.squeeze(Q + U)
winner = my_argmax(scores)
if np.any(np.isnan(scores)):
print('Q (means): {}, U (UCB): {}'.format(Q,U))
raise ValueError('Nans produced in select step')
#set_trace()
return self.child_actions[winner]
def return_results(self,decision_type='count',loss_type='count',V_decision='on-policy',temperature=1):
# aggregate some results
counts = np.array([child_action.n for child_action in self.child_actions],dtype='float32')
Q = np.array([child_action.Q for child_action in self.child_actions],dtype='float32')
a_list = [child_action.index for child_action in self.child_actions]
# decision
if decision_type == 'count':
a_argmax = my_argmax(counts)
elif decision_type == 'mean':
Q2 = np.array([child_action.Q if child_action.n > 0 else -np.Inf for child_action in self.child_actions])
a_argmax = my_argmax(Q2)
a_chosen = self.child_actions[a_argmax].index
# loss
if loss_type == 'count':
probs = stable_normalizer(counts,temperature)
elif loss_type == 'Q':
probs = Q # needs logsumexp
# estimate V
if V_decision == 'on_policy':
V = np.sum((counts/np.sum(counts))*Q)[None]
elif V_decision == 'max':
V = np.max(Q)[None]
return probs,a_list,V,a_chosen,a_argmax
#@timing
def evaluate(self,model=None,Env=None,bootstrap_V=False,gamma=1.0):
self.n += 1
if self.terminal:
self.V = 0.0
else:
if bootstrap_V:
self.V = model.predict_V(self.index[None,])
else:
self.V = rollout(self.index,Env,policy='random',model=model,gamma=gamma,a_init=self.child_actions[0].index)
#self.child_actions[0].update(self.V) # already log which child action was first in the roll-out
def update(self,a,Q,backup_Q='on-policy',backup_sigma_tree='uniform'):
''' update statistics on back-ward pass'''
self.n += 1
# update tree sigma
if self.sigma_tree:
self.sigma_actions_t[a] = self.child_actions[a].child_state.sigma_t
if backup_sigma_tree == 'uniform':
self.sigma_t = np.sum(self.sigma_actions_t)/len(self.sigma_actions_t)
elif backup_sigma_tree == 'on-policy':
counts = [child_action.n if child_action.n > 0 else 1 for child_action in self.child_actions]
self.sigma_t = np.sum(self.sigma_actions_t * counts)/np.sum(counts)
elif backup_sigma_tree == 'max':
Q = np.array([child_action.Q if child_action.n > 0 else -np.Inf for child_action in self.child_actions])
amax = np.argmax(Q)
self.sigma_t = self.sigma_actions_t[amax]
# pass-on value for upwards propagation
if backup_Q == 'on-policy':
V = Q
elif backup_Q == 'max':
Q = np.array([child_action.Q if child_action.n > 0 else -np.Inf for child_action in self.child_actions])
V = np.max(Q)
return V
def forward(self,a,s1,r,terminal,model):
if not hasattr(self.child_actions[a],'child_state'):
# still need to add the next state
self.child_actions[a].add_child_state(s1,r,terminal,model,self.sigma_tree)
elif np.linalg.norm(self.child_actions[a].child_state.index-s1) > 0.01:
print('Warning: this domain seems stochastic. Throwing away the tree')
#print(self.child_actions[a].child_state.index - s1)
#print('Timestep {}'.format(t))
#print(self.child_actions[a].n,self.child_actions[a].child_state.n,self.child_actions[a].child_state.terminal)
#print(a,self.child_actions[a].index)
return None
else:
return self.child_actions[a].child_state
def rollout(s,Env,policy,model,gamma,roll_max=300,a_init=None):
''' Small rollout function to estimate V(s)
policy = random or targeted'''
terminal = False
R = 0.0
for i in range(roll_max):
if i == 0 and a_init is not None:
a = a_init
else:
if policy == 'random':
a = Env.action_space.sample()
elif policy == 'targeted':
pi = np.squeeze(model.predict_pi(s[None,]))
a = np.random.choice(len(pi),p=pi)
s1,r,terminal,_ = Env.step(a)
R += (gamma**i)*r
s = s1
if terminal:
break
return R
def display_info(root,time,c):
''' Display MCTS node info for debugging '''
if root is not None:
print('MCTS status for timestep {}'.format(time))
Q = [child_action.Q for child_action in root.child_actions]
print('Q values: {}'.format(Q))
print('counts: {}'.format([child_action.n for child_action in root.child_actions],[child_action.n for child_action in root.child_actions]))
priors = np.array(root.priors)
print('priors: {}'.format(priors))
U = [c * (np.sqrt(1 + root.n)/(1 + child_action.n)) for child_action in root.child_actions]
print('U: {}'.format(U))
if root.use_prior:
U *= priors
scores = np.squeeze(np.array([Q]) + np.array([U]))
print('scores: {}'.format(scores))
print('winner: {}'.format(np.argwhere(scores == np.max(scores)).flatten()))
print('-----------------------------') |
tmoer/a0c | jobs/expand_jobs_over_games.py | <gh_stars>1-10
# -*- coding: utf-8 -*-
"""
Expand a submission over games
@author: thomas
"""
if __name__ == '__main__' and __package__ is None:
from os import sys, path
sys.path.append(path.dirname(path.dirname(path.dirname(path.abspath(__file__)))))
import os
import argparse
from common.visualize import make_name
def expand_job(games,job,hp,hp_setup,item1=None,seq1=[None],item2=None,seq2=[None],item3=None,seq3=[None]):
# hacky way to bring in games
#games = ['CartPole-vr','MountainCar-vr','Acrobot-vr','FrozenLake-v0','FrozenLakeNotSlippery-v0','FrozenLakeNotSlippery-v1']
#games = ['Breakout-ramDeterministic-v0','Pong-ramDeterministic-v0','AirRaid-ramDeterministic-v0','Amidar-ramDeterministic-v0',
# 'Enduro-ramDeterministic-v0','MontezumaRevenge-ramDeterministic-v0','Venture-ramDeterministic-v0']
# Regarding Atari:
# Assault, Freeway, Seaquest have different initial states
file = os.getcwd() + '/' + job
with open(file,'w') as fp:
fp.write('#!/bin/sh\n')
for i,game in enumerate(games):
for j,it1 in enumerate(seq1):
for k,it2 in enumerate(seq2):
for l,it3 in enumerate(seq3):
fp.write('python3 submit.py --hpsetup game={},{} --hp {}'.format(game,hp_setup,hp))
if item1 is not None:
fp.write(',{}={}'.format(item1,it1))
if item2 is not None:
fp.write(',{}={}'.format(item2,it2))
if item3 is not None:
fp.write(',{}={}'.format(item3,it3))
hyperloop_name = make_name('',item1,it1,item2,it2,item3,it3)
if hyperloop_name != '':
fp.write(',name={}'.format(hyperloop_name))
fp.write('\n')
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--games', nargs='+',type=str,default=[])
parser.add_argument('--job', default='job.sh')
parser.add_argument('--slurm_mode', default='off')
parser.add_argument('--hp', help='Hyperparameter configuration',default='')
parser.add_argument('--hpsetup', help='Hyperparameter configuration of slurm and hyperparameters and distribution',default='')
# extra items
parser.add_argument('--item1',type=str,default=None)
parser.add_argument('--seq1', nargs='+',type=str,default=[None])
parser.add_argument('--item2',type=str,default=None)
parser.add_argument('--seq2', nargs='+',type=str,default=[None])
parser.add_argument('--item3',type=str,default=None)
parser.add_argument('--seq3', nargs='+',type=str,default=[None])
args = parser.parse_args()
if args.slurm_mode == 'short':
args.hpsetup += ',slurm=True,slurm_qos=short,slurm_time=3:59:59'
elif args.slurm_mode == 'long':
args.hpsetup += ',slurm=True,slurm_qos=long,slurm_time=5-0:00:00'
expand_job(games=args.games,job=args.job,hp=args.hp,hp_setup=args.hpsetup,
item1=args.item1,seq1=args.seq1,item2=args.item2,seq2=args.seq2,
item3=args.item3,seq3=args.seq3) |
tmoer/a0c | src/rl/envs/chain.py | # -*- coding: utf-8 -*-
"""
Chain environment
@author: thomas
"""
import gym.spaces
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import norm
from rl.policies import get_net_mean, get_net_params, sample_sd, analytic_sd, thompson_policy, ucb_policy
import matplotlib.patches as patches
#plt.style.use('ggplot')
plt.rcParams['lines.linewidth'] = 4
plt.rcParams.update({'font.size': 11})
plt.rcParams['axes.facecolor']='white'
plt.rcParams['savefig.facecolor']='white'
plt.rcParams['font.family'] = 'sans-serif'
plt.rcParams['font.sans-serif'] = ['Latin Modern Math']
plt.rcParams['xtick.labelsize'] = 15
plt.rcParams['font.weight'] = 'bold'
plt.rcParams['ytick.labelsize'] = 15
plt.locator_params(axis='x', nticks=3)
plt.ion()
class ChainOrdered(object):
''' Chain domain '''
def __init__(self,n=10):
# n = length of chain
self.action_space = gym.spaces.Discrete(2)
self.observation_space = gym.spaces.Discrete(n+1)
self.n = n
self.state = 0
self.correct = np.repeat(1,n)
def reset(self):
self.state = 0
return self.state
def step(self,a):
if a == 0:
# move back
self.state = 0
r = 0
terminal = True
elif a == 1:
# move forward
self.state += 1
if self.state == self.n:
r = 1
terminal = True
else:
r = 0
terminal = False
else:
raise ValueError('Action not possible')
return self.state,r,terminal, {}
def seed(self,seed):
pass # deterministic anyway
class Chain(object):
''' Chain domain '''
def __init__(self,n=10):
# n = length of chain
self.action_space = gym.spaces.Discrete(2)
self.observation_space = gym.spaces.Discrete(n+1)
self.n = n
self.state = 0
self.correct = np.random.randint(0,2,n) # correct action in each state
self.counts = np.zeros((self.n,2))
def reset(self):
self.state = 0
return self.state
def step(self,a):
self.counts[self.state,a] += 1
if a != self.correct[self.state]:
# move back
self.state = 0
r = 0
terminal = True
elif a == self.correct[self.state]:
# move forward
self.state += 1
if self.state == self.n:
r = 1
terminal = True
else:
r = 0
terminal = False
else:
raise ValueError('Action not possible')
return self.state,r,terminal, {}
def seed(self,seed):
pass # deterministic anyway
class ChainLoop(object):
''' Chain domain '''
def __init__(self,n=10):
# n = length of chain
self.action_space = gym.spaces.Discrete(2)
self.observation_space = gym.spaces.Discrete(n+1)
self.n = n
self.state = 0
self.correct = np.random.randint(0,2,n) # correct action in each state
self.counts = np.zeros((self.n,2))
def reset(self):
self.state = 0
return self.state
def step(self,a):
self.counts[self.state,a] += 1
if a != self.correct[self.state]:
# move back
self.state = 0
r = 0
terminal = False
elif a == self.correct[self.state]:
# move forward
self.state += 1
if self.state == self.n:
r = 1
terminal = True
else:
r = 0
terminal = False
else:
raise ValueError('Action not possible')
return self.state,r,terminal, {}
def seed(self,seed):
pass # deterministic anyway
class ChainDomainPlotter(object):
def __init__(self,Env):
self.fig,self.ax = plt.subplots(1,figsize=(Env.n*2,4))
self.n = Env.n
self.truth = Env.correct
for i in range(self.n):
for j in range(2):
if self.truth[i]==j:
col = 'g'
else:
col = 'r'
self.ax.add_patch(patches.Circle((i,j), radius=0.05,color=col))
self.ax.set_xlim([-1,self.n+1])
self.ax.set_ylim([-1,2])
self.fig.canvas.draw()
def update(self,counts):
self.ax.clear()
for i in range(self.n):
for j in range(2):
if self.truth[i]==j:
col = 'g'
else:
col = 'r'
self.ax.add_patch(patches.Circle((i,j), radius=0.05,color=col))
self.ax.text(i-0.2,j-0.2,'s = {}, a={}\n N = {}'.format(i,j,int(counts[i,j])))
self.fig.canvas.draw()
class ChainPlotter(object):
def __init__(self,truth,n_plot):
self.fig,self.ax = plt.subplots(2,n_plot,figsize=(n_plot*10,4),sharex=True,sharey=True)
self.pl = self.ax.flatten('F')
self.n = 2*n_plot
# setup for predictions
self.sb = np.repeat(np.arange(0,n_plot,1),2)[:,None]
self.ab = np.array([0,1]*n_plot)[:,None]
self.truth = truth
self.fig.canvas.draw()
def update(self,sess,model,hps,ep):
# clear plots
for ax in self.pl:
ax.clear()
overall_means = np.zeros([hps.n_rep_visualize,self.n])
overall_max_dens = np.ones([self.n])*-np.inf
for k in range(hps.n_rep_visualize):
# get prediction parameters
seed = [np.random.randint(1e15),np.random.randint(1e15)] # new seed
params = get_net_params(sess,model,self.sb,self.ab,seed,hps.p_dropout)
means = get_net_mean(sess,model,self.sb,self.ab,seed,hps.p_dropout,output=hps.output)
overall_means[k,:] = means[:,0]
#print(np.concatenate([np.array([0,0,1,1,2,2])[:,None],np.array([0,1,0,1,0,1])[:,None],params],axis=1))
# need to determine range
if hps.output != 'categorical':
if hps.output == 'gaussian':
mu = params[:,0]
sigma = params[:,1]
elif hps.output == 'mog':
mu = params[:,hps.n_mix:(hps.n_mix*2)]
sigma = params[:,(2*hps.n_mix):(3*hps.n_mix)]
elif hps.output == 'deterministic':
mu = params[:,0]
sigma = 1.0
max_sd = np.max(sigma)
lower,upper = np.min(mu)-3*max_sd,np.max(mu)+3*max_sd
else:
lower,upper = model.transformer.plot_edges[0],model.transformer.plot_edges[-1]
# update all plots
x = np.linspace(lower,upper,100)
for i in range(self.n):
#self.pl[i].set_xlim([lower,upper])
param = params[i,:]
if hps.output == 'deterministic':
max_dens = 1.0
overall_max_dens[i] = 1.0
mean = means[i]
self.pl[i].plot([mean,mean],[0,max_dens],':')
else:
if hps.output == 'gaussian' or hps.output == 'mog':
if hps.output == 'gaussian':
dens = norm.pdf(x,param[0],param[1])
elif hps.output == 'mog':
dens = [param[j]*norm.pdf(x,param[hps.n_mix+j],param[2*hps.n_mix+j]) for j in range(hps.n_mix)]
dens = np.sum(np.array(dens),axis=0)
#print(x,param,dens)
self.pl[i].plot(x,dens,color='cornflowerblue')
elif hps.output == 'categorical':
dens = param
edges = model.transformer.plot_edges
self.pl[i].hist(model.transformer.means,bins=edges,weights=dens,color='cornflowerblue')
overall_max_dens[i] = np.max([overall_max_dens[i],np.max(dens)])
# add the mean
grand_means = np.mean(np.array(overall_means),axis=0)
seed = [np.random.randint(1e15),np.random.randint(1e15)] # new seed for parametric uncertainty
grand_sds = analytic_sd(sess,model,self.sb,self.ab,seed,hps.p_dropout,hps.output)
#grand_sds = np.ones([len(grand_means),1])
# get policy estimates
s = np.arange(0,int(self.n/2),1)[:,None]
a_thompson = np.array([thompson_policy(s,model,sess,hps,seed,eval_on_mean_output=False,eval_on_mean_params=False) for i in range(100)])
a_ucb = np.array([ucb_policy(s,model,sess,hps,seed,eval_on_mean_output=False,eval_on_mean_params=False) for i in range(100)])
thompson_probs = np.zeros(self.n)
ucb_probs = np.zeros(self.n)
for j,(state,action) in enumerate(zip(self.sb,self.ab)):
thompson_probs[j] = np.mean(a_thompson[:,state,:] == action)
ucb_probs[j] = np.mean(a_ucb[:,state,:] == action)
for i in range(self.n):
grand_mean = grand_means[i]
grand_sd = grand_sds[i]
max_dens = overall_max_dens[i] #np.max(dens) if 'dens' in locals() else 1
self.pl[i].plot([grand_mean,grand_mean],[0,max_dens],'--',color='orange')
#self.pl[i].plot([grand_mean-2*grand_sd,grand_mean+2*grand_sd],[max_dens/2,max_dens/2],'--',color='orange')
self.pl[i].text(0.1,0.75,'$\mu$={:0.2f}'.format(grand_mean),transform=self.pl[i].transAxes)
self.pl[i].text(0.55,0.75,'$\sigma$={:0.2f}'.format(grand_sds[i][0]),transform=self.pl[i].transAxes)
#self.pl[i].text(0.1,0.75,'$\mu$={:0.2f}\n$\sigma$={:0.2f}'.format(grand_mean,grand_sds[i][0]),transform=self.pl[i].transAxes)
#self.pl[i].text(0.55,0.75,'tho={:0.2f}\nucb={:0.2f}'.format(thompson_probs[i],ucb_probs[i]),transform=self.pl[i].transAxes)
for j in range(int(self.n/2)):
for l in range(2):
if self.truth[j]==l:
val = 1.
col = 'g'
else:
val = 0.
col = 'r'
self.ax[l,j].add_patch(patches.Rectangle((0.01,0.01),0.98,0.98,linewidth=10,edgecolor=col,facecolor='none',transform=self.ax[l,j].transAxes))
if j>0:
plt.setp(self.ax[l,j].get_yticklabels(), visible=False)
if l==0:
plt.setp(self.ax[l,j].get_xticklabels(), visible=False)
#self.ax[l,j].set_title('V={:0.2f}'.format(val))
self.ax[l,j].set_ylim([0,1.0])
self.ax[l,j].set_xlim([-2.5,2.5])
self.fig.canvas.draw()
self.fig.savefig(hps.result_dir + 'episode_{}'.format(ep),dpi=300)
self.fig.canvas.flush_events()
# Test
if __name__ == '__main__':
Env = ChainOrdered()
s = Env.reset()
for i in range(500):
a = Env.action_space.sample()
s,r,terminal,_ = Env.step(a)
if terminal:
print('Died in step',i,'with reward',r,' restarting')
s = Env.reset()
print('Finished') |
tmoer/a0c | config/hps.py | # -*- coding: utf-8 -*-
"""
Default hyperparameter settings
@author: thomas
"""
from common.hps_setup import HParams
def override_hps_settings(hps):
''' some more global modifications to multiple settings based on 1 indicator '''
if hps.mode == 'off':
pass
return hps
def get_hps():
''' Hyperparameter settings '''
return HParams(
# General
game = 'MountainCarContinuous-v0', # Environment name
name = 'unnamed', # Name of experiment
result_dir = '',
# Steps & limits
n_t = 2000, # max timesteps
n_eps = 100, # max episodes
steps_per_ep = 300,
mode = 'off', # overall indicator to jointly change a group of settings. Use with override_hps_settings()
# MCTS
n_mcts = 10,
c = 1.0,
alpha = 0.5,
C_widening = 1.0,
decision_type = 'count',
backup_Q = 'on-policy', # 'on-policy', 'max' or 'thompson': Type of policy used for value back-up. Thopmpson requires additional sampling
sigma_tree = False, # whether to use tree uncertainty
backup_sigma_tree = 'on-policy', # 'uniform', 'on-policy', 'max', 'thompson': policy used for sigma_tree back-up
block_loop = False, # Whether to block loops
# MCTS + DL
loss_type = 'count', # 'count' or 'Q'
bound = 'beta', # 'tanh' or 'beta'
entropy_l = 0.0,
random_action_frac = 0.0,
temp = 1.0,
n_mix = 1,
use_prior = False,
bootstrap_V = True,
V_decision = 'on_policy',
# Train
lr = 0.005,
n_epochs = 1,
batch_size = 32,
data_size = 5000, # total database, if distributed summed over the agents
clip_gradient_norm = 0.0,
tfdb = False,
# Other
timeit = False,
verbose = False,
verbose_mcts = False
) |
tmoer/a0c | agent.py | # -*- coding: utf-8 -*-
"""
Chain experiments
@author: thomas
"""
if __name__ == '__main__' and __package__ is None:
from os import sys, path
sys.path.append(path.dirname(path.dirname(path.abspath(__file__))))
global mpl
import matplotlib as mpl
mpl.use('Agg')
import numpy as np
import os
import time
from tensorflow.python import debug as tf_debug
import tensorflow as tf
import argparse
from pprint import pformat
#from pdb import set_trace
# common package import
from src.common.rl.make_game import make_game
from src.common.submit import make_unique_subfolder
from src.common.hps_setup import hps_to_dict
from src.common.visualize import plot_single_experiment
from src.common.putils import store_safely
# local imports
from config.hps import get_hps,override_hps_settings
from src.mcts import MCTS,display_info
from src.network import Model,Database
def agent(hps):
''' Agent function '''
tf.reset_default_graph()
# storage
result = {}
env_steps,ep_return = [],[] # will indicate the timestep for the learning curve
losses,gn = [],[]
best_R = -np.Inf
Env = make_game(hps.game)
D = Database(max_size=max(hps.data_size,hps.n_mcts*hps.steps_per_ep),batch_size=hps.batch_size)
model = Model(Env,lr=hps.lr,n_mix=hps.n_mix,clip_gradient_norm=hps.clip_gradient_norm,loss_type=hps.loss_type,
bound=hps.bound,temp=hps.temp,entropy_l=hps.entropy_l)
#with tf.Session() as sess,sess.as_default():
with tf.Session() as sess:
if hps.tfdb:
sess = tf_debug.LocalCLIDebugWrapperSession(sess)
model.sess = sess
sess.run(tf.global_variables_initializer())
global_t_mcts = 0
global_t = 0
for ep in range(hps.n_eps):
start = time.time()
root_index = Env.reset()
root = None
R = 0.0 # episode reward
t = 0 # episode steps
seed = np.random.randint(1e7)
Env.seed(seed)
a_store = []
while True:
# run an episode
if hps.timeit: now = time.time()
root = MCTS(root_index,root,Env,N=hps.n_mcts,model=model,c=hps.c,bootstrap_V=hps.bootstrap_V,
block_loop=hps.block_loop,sigma_tree=hps.sigma_tree,backup_Q=hps.backup_Q,
backup_sigma_tree=hps.backup_sigma_tree,seed=seed,a_his=a_store,
alpha=hps.alpha,C_widening=hps.C_widening,use_prior=hps.use_prior,timeit=hps.timeit,
random_action_frac=hps.random_action_frac)
if hps.timeit: print('One MCTS search takes {} seconds'.format(time.time()-now))
if hps.verbose_mcts: display_info(root,'{}'.format(t),hps.c)
probs,a_list,V,a,a_argmax = root.return_results(decision_type=hps.decision_type,loss_type=hps.loss_type,
temperature=hps.temp,V_decision=hps.V_decision)
for k,prob in enumerate(probs):
D.store((root.index,V,a_list[k],np.array([prob])))
#if count == 0:
# print('Warning',[child_action.n for child_action in root.child_actions],display_info(root,'{}'.format(t),hps.c))
# Make the step
a_store.append(a)
s1,r,terminal,_ = Env.step(a)
R += r
t += 1
global_t += 1
global_t_mcts += hps.n_mcts
#if hps.verbose:
# if (t % 50) == 0:
# print('Overall step {}, root currently returns V {}, and considers a {} with counts {}'.format(global_t,V,a_list,probs))
if terminal or (t > hps.steps_per_ep):
if hps.verbose:
print('Episode terminal, total reward {}, steps {}'.format(R,t))
ep_return.append(R)
env_steps.append(global_t_mcts)
break # break out, start new episode
else:
root = root.forward(a_argmax,s1,r,terminal,model)
# saving
result.update({'steps':env_steps,'return':ep_return})
if hps.verbose:
result.update({'gn':gn,'loss':losses})
#if R > best_R:
# result.update({'seed':seed,'actions':a_store,'R':best_R})
# best_R = R
store_safely(hps.result_dir,'result',result)
# Train
if (global_t_mcts > hps.n_t) or (ep > hps.n_eps):
break # end learning
else:
n_epochs = hps.n_epochs * (np.ceil(hps.n_mcts/20)).astype(int)
#print(n_epochs)
loss = model.train(D,n_epochs,hps.lr)
losses.append(loss['total_loss'])
gn.append(loss['gn'])
if hps.verbose:
print('Time {}, Episode {}, Return {}, V {}, gn {}, Vloss {}, piloss {}'.format(
global_t_mcts,ep,R,loss['V'],loss['gn'],loss['V_loss'],loss['pi_loss']))
print('Actions {}, probs {}'.format(np.array(a_list),probs))
print('One full episode loop + training in {} seconds'.format(time.time()-start))
return result
if __name__ == '__main__':
'''Set-up training'''
parser = argparse.ArgumentParser()
parser.add_argument('--hp', help='Hyperparameter configuration',default='')
parser.add_argument('--no_plot', action='store_true',default=False)
args = parser.parse_args()
hps = get_hps().parse(args.hp)
hps = override_hps_settings(hps)
# set-up result folder if not prespecified, then we are not hyperlooping
if hps.result_dir == '':
result_folder = os.getcwd() + '/results/{}/{}/'.format(hps.name,hps.game)
hps.result_dir = make_unique_subfolder(result_folder,hyperloop=False)
with open(hps.result_dir + 'hps.txt','w') as file:
file.write(pformat(hps_to_dict(hps)))
#with open(subfolder + 'hps_raw.txt','w') as file:
# file.write(hps_to_list(hps))
print(' ________________________________________ ')
print('Start learning on game {}'.format(hps.game))
result = agent(hps)
if not args.no_plot:
plot_single_experiment(result,hps.game,hps.result_dir,plot_type='lc') |
frknayk/Q-Iteration | q_learn_train_bin_2.py | <gh_stars>0
import gym
import numpy as np
import random
from math import pow
from math import sqrt
from math import exp
import pandas
import matplotlib.pyplot as plt
class QLearn:
def __init__(self, actions, epsilon, alpha, gamma):
# Defining Q table as dictionary,
# provides us to empty state-space
# and any unvisited-states
self.q = {}
# exploration constant
self.epsilon = epsilon
# Learning Rate : Alpha
# How is new value estimate weighted against the old (0-1).
# 1 means all new and is ok for no noise situations.
self.alpha = alpha
# Discount Factor : Gamma
# When assessing the value of a state & action,
# how important is the value of the future states
self.gamma = gamma
self.actions = actions
def getQ(self, state, action):
return self.q.get((state, action), 0.0)
def learnQ(self, state, action, reward, value):
# Q(s, a) += alpha * (reward(s,a) + max(Q(s') - Q(s,a))
oldv = self.q.get((state, action), None)
if oldv is None:
self.q[(state, action)] = reward
else:
self.q[(state, action)] = oldv + self.alpha * (value - oldv)
def chooseAction(self, state):
q = [self.getQ(state, a) for a in self.actions]
maxQ = max(q)
if random.random() < self.epsilon:
minQ = min(q); mag = max(abs(minQ), abs(maxQ))
# add random values to all the actions, recalculate maxQ
q = [q[i] + random.random() * mag - .5 * mag for i in range(len(self.actions))]
maxQ = max(q)
count = q.count(maxQ)
# In case there're several state-action max values
# we select a random one among them
if count > 1:
best = [i for i in range(len(self.actions)) if q[i] == maxQ]
i = random.choice(best)
else:
i = q.index(maxQ)
action = self.actions[i]
return action
def learn(self, state1, action1, reward, state2):
maxqnew = max([self.getQ(state2, a) for a in self.actions])
self.learnQ(state1, action1, reward, reward + self.gamma*maxqnew)
def build_state(features):
return int("".join(map(lambda feature: str(int(feature)), features)))
def to_bin(value, bins):
return np.digitize(x=[value], bins=bins)[0]
################################################## Training Phase ##################################################
env = gym.make('CartPole-v0')
MAX_EPISODE = 3100
MAX_STEP = 200
n_bins_cart_pos = 14
n_bin_cart_vel = 12
n_bins_angle = 10
n_bins_angle_vel = n_bins_angle
cart_position_save = {}
pole_angle_save = {}
control_signal_save = {}
number_of_features = env.observation_space.shape[0]
# Number of states is huge so in order to simplify the situation
# we discretize the space to: 10 ** number_of_features
cart_position_bins = pandas.cut([-2.4, 2.4], bins=n_bins_cart_pos, retbins=True)[1][1:-1]
cart_velocity_bins = pandas.cut([-1, 1], bins=n_bin_cart_vel, retbins=True)[1][1:-1]
pole_angle_bins = pandas.cut([-2, 2], bins=n_bins_angle, retbins=True)[1][1:-1]
angle_rate_bins = pandas.cut([-3.5, 3.5], bins=n_bins_angle, retbins=True)[1][1:-1]
# The Q-learn algorithm
qlearn = QLearn(actions=range(env.action_space.n),
alpha=0.5, gamma=0.90, epsilon=0.2)
def reward_func_single(state) :
rew_coeff_x = 1
rew_coeff_xdot = 3
rew_coeff_theta = 1
rew_coeff_thetadot = 1
# print(state[0],state[1],state[2],state[3])
error_x = rew_coeff_x*np.abs(state[0])
error_xdot = rew_coeff_xdot*np.abs(pow(state[1],2))
error_theta = rew_coeff_theta*np.abs(state[2])
error_thetadot = rew_coeff_thetadot*np.abs( pow(state[3],2))
error=[]
error.append(error_x)
error.append(error_xdot)
error.append(error_theta)
error.append(error_thetadot)
max_e = max(error)
min_e = min(error)
dif = max_e - min_e
for x in range(len(error)):
error[x] = (error[x] - min_e)/(dif)
return sum(error)
# error_all = error_x+error_xdot+error_theta+error_thetadot
# return error_all
def avg(lst):
return sum(lst) / len(lst)
for i_episode in range(MAX_EPISODE):
observation = env.reset()
# x,x_dot,theta,theta_dot
cart_position,cart_velocity, pole_angle, angle_rate_of_change = observation
state = build_state([to_bin(cart_position, cart_position_bins),
to_bin(cart_velocity, cart_velocity_bins),
to_bin(pole_angle, pole_angle_bins),
to_bin(angle_rate_of_change, angle_rate_bins)])
local_cart_position = []
local_pole_angle = []
local_control_signal = []
for t in range(MAX_STEP):
if( i_episode>MAX_EPISODE*0.95) : env.render()
# Pick an action based on the current state
action = qlearn.chooseAction(state)
cs = 0
if action==0 :
cs = -10
else :
cs = 10
# Execute the action and get feedback
observation, reward, done, info = env.step(action)
# Digitize the observation to get a state
cart_position,cart_velocity, pole_angle, angle_rate_of_change = observation
local_cart_position.append(cart_position)
local_pole_angle.append(pole_angle)
local_control_signal.append(cs)
# print(reward_func_single( observation) )
rewcard = reward_func_single(observation)
nextState = build_state([to_bin(cart_position, cart_position_bins),
to_bin(cart_velocity, cart_velocity_bins),
to_bin(pole_angle, pole_angle_bins),
to_bin(angle_rate_of_change, angle_rate_bins)])
if not(done):
qlearn.learn(state, action, reward, nextState)
qlearn.epsilon = qlearn.epsilon * 0.999 # added epsilon decay
state = nextState
else:
reward = -200
qlearn.learn(state, action, reward, nextState)
qlearn.epsilon = qlearn.epsilon * 0.999 # added epsilon decay
break
print("Episode : #{0} was #{1} steps.".format(i_episode,t))
# Calculate mean cart position and pole angle for the episode
cart_position_save[i_episode] = avg(local_cart_position)
pole_angle_save[i_episode] = avg(local_pole_angle)
control_signal_save[i_episode] = avg(local_control_signal)
print("Length of State-Space : ",len(qlearn.q))
# time = np.linspace(0,MAX_EPISODE,MAX_EPISODE)
position_vals = list(cart_position_save.values())
angle_vals = list(pole_angle_save.values())
control_vals = list(control_signal_save.values())
# Saving Position, Angular Position and Control Signal Values
# And plotting later in 'q_learn_inference_bin.py'
np.save('CartPosByTime.npy',position_vals)
np.save('AngularPosByTime.npy',angle_vals)
np.save('ControlSignalByTime.npy',control_vals)
np.save("policy_bin",qlearn.q) |
frknayk/Q-Iteration | q_learn_inference_bin.py | <reponame>frknayk/Q-Iteration
import numpy as np
import matplotlib.pyplot as plt
cartPos = np.load('CartPosByTime.npy')
PoleAngle = np.load('AngularPosByTime.npy')
control_vals = np.load('ControlSignalByTime.npy')
# Plotting
plt.figure(1)
plt.plot(cartPos, label='Cart Position')
plt.plot(PoleAngle, label='Anglular Position')
plt.legend()
plt.figure(2)
plt.subplot(211)
plt.plot(control_vals, label='Control Signal')
plt.legend()
plt.show()
print(control_vals) |
pvsfair/architecture-patterns-python | app/infrastructure/repository.py | <gh_stars>0
from typing import List, Set
from app.domain.models.Batch import Batch
from .abstract_repository import AbstractRepository
class SqlAlchemyRepository(AbstractRepository):
def __init__(self, session) -> None:
self.session = session
def add(self, batch: Batch):
self.session.add(batch)
def get(self, reference) -> Batch:
return self.session.query(Batch).filter_by(reference=reference).one()
def list(self) -> List[Batch]:
return self.session.query(Batch).all()
class FakeRepository(AbstractRepository):
def __init__(self, batches: Set[Batch]) -> None:
self._batches = set(batches)
def add(self, batch: Batch):
self._batches.add(batch)
def get(self, reference) -> Batch:
return next(b for b in self._batches if b.reference == reference)
def list(self) -> List[Batch]:
return list(self._batches)
|
pvsfair/architecture-patterns-python | app/infrastructure/orm.py | from sqlalchemy import (
Table,
MetaData,
Column,
Integer,
String,
Date,
ForeignKey,
)
from sqlalchemy.orm import mapper, relationship
from app.domain.models.OrderLine import OrderLine
from app.domain.models.Batch import Batch
metadata = MetaData()
order_lines = Table(
"order_lines",
metadata,
Column("id", Integer, primary_key=True, autoincrement=True),
Column("sku", String(255)),
Column("qty", Integer, nullable=False),
Column("orderid", String(255)),
)
batches = Table(
"batches",
metadata,
Column("id", Integer, primary_key=True, autoincrement=True),
Column("reference", String(255)),
Column("sku", String(255)),
Column("_purchased_quantity", Integer, nullable=False),
Column("eta", Date, nullable=True),
)
allocations = Table(
"allocations",
metadata,
Column("id", Integer, primary_key=True, autoincrement=True),
Column("orderline_id", ForeignKey("order_lines.id")),
Column("batch_id", ForeignKey("batches.id")),
)
def start_mappers():
lines_mapper = mapper(OrderLine, order_lines)
mapper(
Batch,
batches,
properties={
"_allocations": relationship(
lines_mapper,
secondary=allocations,
collection_class=set,
)
},
)
|
pvsfair/architecture-patterns-python | app/domain/models/OrderLine.py | from dataclasses import dataclass
@dataclass(unsafe_hash=True)
class OrderLine:
orderid: str
sku: str
qty: int
|
pvsfair/architecture-patterns-python | app/domain/exeptions.py | <gh_stars>0
class OutOfStock(Exception):
pass
|
pvsfair/architecture-patterns-python | app/domain/models/allocate.py | from app.domain.exeptions import OutOfStock
from app.domain.models.OrderLine import OrderLine
from app.domain.models.Batch import Batch
from typing import List
def allocate(line: OrderLine, batches: List[Batch]) -> str:
try:
batch = next(b for b in sorted(batches) if b.can_allocate(line))
except StopIteration:
raise OutOfStock(f"Out of stock for sku {line.sku}")
batch.allocate(line)
return batch.reference
|
pvsfair/architecture-patterns-python | app/infrastructure/abstract_repository.py | from abc import ABC, abstractmethod
from typing import List
from app.domain.models.Batch import Batch
class AbstractRepository(ABC):
@abstractmethod
def add(self, batch: Batch):
raise NotImplementedError
@abstractmethod
def get(self, reference) -> Batch:
raise NotImplementedError
@abstractmethod
def list(self) -> List[Batch]:
raise NotImplementedError
|
pvsfair/architecture-patterns-python | app/tests/unit/test_batch.py | <filename>app/tests/unit/test_batch.py
from datetime import date
from app.domain.models.Batch import Batch
from app.domain.models.OrderLine import OrderLine
def make_batch_and_line(sku, batch_qty, line_qty):
return (
Batch("batch-001", sku, qty=batch_qty, eta=date.today()),
OrderLine("order-123", sku, line_qty),
)
def test_allocating_to_a_batch_reduces_the_available_quantity():
batch, line = make_batch_and_line("ELEGANT-LAMP", 20, 2)
batch.allocate(line)
assert batch.available_quantity == 18
def test_can_allocate_if_available_greater_than_required():
large_batch, small_line = make_batch_and_line("ELEGANT-LAMP", 20, 2)
assert large_batch.can_allocate(small_line)
def test_cannot_allocate_if_available_smaller_than_required():
small_batch, large_line = make_batch_and_line("ELEGANT-LAMP", 2, 20)
assert small_batch.can_allocate(large_line) is False
def test_cannot_allocate_if_skus_do_not_match():
batch = Batch("batch-001", "UNCONFORTABLE-CHAIR", qty=100, eta=None)
different_sku_line = OrderLine("order-123", "EXPENSIVE-TOASTER", 10)
assert batch.can_allocate(different_sku_line) is False
def test_can_only_deallocate_allocated_lines():
batch, unallocated_line = make_batch_and_line("DECORATIVE-TRINCKET", 20, 2)
batch.deallocate(unallocated_line)
assert batch.available_quantity == 20
def test_allocation_is_idempotent():
batch, line = make_batch_and_line("ANGULAR-DESK", 20, 2)
batch.allocate(line)
batch.allocate(line)
assert batch.available_quantity == 18
|
hobbyelektroniker/Micropython-Grundlagen | 003_Textformatierung/Code/textformatierung4.py | '''
Micropython mit ESP32
<NAME>
Version 1.00, 17.10.2019
Der Hobbyelektroniker
https://community.hobbyelektroniker.ch
https://www.youtube.com/c/HobbyelektronikerCh
Der Code kann mit Quellenangabe frei verwendet werden.
'''
text = "Das Resultat von {Rechnung} ist {Resultat}"
print(text.format(Rechnung="3 x 5", Resultat = 3*5))
print(text.format(Resultat = 3*5, Rechnung="3 x 5"))
text = "Das Resultat von {Rechnung} ist {Resultat:4.2f}"
print(text.format(Resultat = 6 / 4, Rechnung="6 / 4"))
|
hobbyelektroniker/Micropython-Grundlagen | 011_Globale und lokale Variablen/Code/global1.py | a = "Die Zahl lautet"
b = 5
def ausgabe1():
print("*** Ausgabe 1 ***")
print(a,b)
print()
def ausgabe2():
print("*** Ausgabe 2 ***")
a = "Eine andere Zahl:"
b = 7
print(a,b)
print()
print("Globale Werte: a = {}, b = {}".format(a,b))
ausgabe1()
print("Globale Werte: a = {}, b = {}".format(a,b))
ausgabe2()
print("Globale Werte: a = {}, b = {}".format(a,b))
|
hobbyelektroniker/Micropython-Grundlagen | 012_Micropython, ESP32 und das Multitasking/Code/Blink4.py | <reponame>hobbyelektroniker/Micropython-Grundlagen<filename>012_Micropython, ESP32 und das Multitasking/Code/Blink4.py
# Gleichzeitiger Blink mit Argumenten
# mit Multithreading
from machine import Pin
import time
import _thread
rot = Pin(33, Pin.OUT)
gruen = Pin(25, Pin.OUT)
gelb = Pin(32, Pin.OUT)
anzahl = 10
def blink(led,pause):
for i in range(anzahl):
led.on()
time.sleep(pause)
led.off()
time.sleep(pause)
_thread.start_new_thread(blink,(rot,1))
_thread.start_new_thread(blink,(gelb,0.5))
_thread.start_new_thread(blink,(gruen,0.25))
print("Alles fertig") # Das ist gelogen !!!
|
hobbyelektroniker/Micropython-Grundlagen | 013_Klassen in Micropython/Code/quadrat_klasse.py | '''
Objektorientiert Programmierung
Eine Klasse für ein Quadrat
Version 1.00, 27.02.2021
Der Hobbyelektroniker
https://community.hobbyelektroniker.ch
https://www.youtube.com/c/HobbyelektronikerCh
Der Code kann mit Quellenangabe frei verwendet werden.
'''
# Quadrat
import math
from bewegt_klasse import BewegtesObjektV3 as BewegtesObjekt
# Diese Klasse zeichnet ein Quadrat, das auf einer Ecke steht
# fenster: das Fenster, in dem der Kreis gezeichnet werden soll
# x, y: x und y Koordinaten des Mittelpunkts
# laenge: Seitenlänge
# farbe: Farbe der Linien
class QuadratV1():
def __init__(self, fenster, x, y, laenge, farbe="black"):
self.fenster = fenster
self.x = x
self.y = y
self.farbe = farbe
self.laenge = laenge
# umhüllendes Rechteck ( als Differenz zum Nullpunkt)
hd = math.sqrt(laenge**2 / 2) # halbe Diagonale
self.x1 = -hd
self.y1 = -hd
self.x2 = hd
self.y2 = hd
# Quadrat zeichnen
self.figur = fenster.create_polygon(x-hd, y, x, y+hd, x+hd, y, x, y-hd,
width=1, outline = farbe, fill='')
class QuadratV2(BewegtesObjekt):
def __init__(self, fenster, x, y, laenge, farbe="black"):
# Die Basisklasse nimmt uns einen Teil der Arbeit ab
BewegtesObjekt.__init__(self,fenster, x, y)
self.farbe = farbe
self.laenge = laenge
# umhüllendes Rechteck ( als Differenz zum Nullpunkt)
hd = math.sqrt(laenge**2 / 2) # halbe Diagonale
self.x1 = -hd
self.y1 = -hd
self.x2 = hd
self.y2 = hd
# Quadrat zeichnen
self.figur = fenster.create_polygon(x-hd, y, x, y+hd, x+hd, y, x, y-hd,
width=1, outline = farbe, fill='')
|
hobbyelektroniker/Micropython-Grundlagen | 011_Globale und lokale Variablen/Code/global4.py |
def ausgabe3(a,b,c):
print("*** Ausgabe 3 ***")
a = "Eine andere Zahl:"
b = 7
c[1] = 2
c = [1,7,4,6,5]
c[2] = 12
print(a,b,c)
print()
a = "Die Zahl lautet"
b = 5
c = [1,3,5]
print("Globale Werte: a = {}, b = {}, c = {}".format(a,b,c))
ausgabe3(a,b,c)
print("Globale Werte: a = {}, b = {}, c = {}".format(a,b,c))
|
hobbyelektroniker/Micropython-Grundlagen | 009_Bedingungen und Schleifen/Code/while1.py | <reponame>hobbyelektroniker/Micropython-Grundlagen
x = 0
while x < 6:
x += 1
y = 2 * x
print(x,y)
print("-------")
x = 0
while x < 6:
x += 1
y = 2 * x
print(x,y)
if (x == 3): break
print("-------")
x = 0
while True:
x += 1
y = 2 * x
print(x,y)
if (x > 5): break
print("-------")
x = 0
while x < 6:
x += 1
if (x == 3): continue
y = 2 * x
print(x,y)
print("-------")
x = 0
while x < 6:
x += 1
if (x == 3): continue
y = 2 * x
print(x,y)
else:
print("Jetzt sind wir fertig!")
print("Das war's.")
|
hobbyelektroniker/Micropython-Grundlagen | 012_Micropython, ESP32 und das Multitasking/Code/Blink1.py | <reponame>hobbyelektroniker/Micropython-Grundlagen
# Einfacher Blink
# ohne Multithreading
from machine import Pin
import time
rot = Pin(33, Pin.OUT)
gruen = Pin(25, Pin.OUT)
gelb = Pin(32, Pin.OUT)
anzahl = 5
def blink_rot():
for i in range(anzahl):
rot.on()
time.sleep(1)
rot.off()
time.sleep(1)
def blink_gelb():
for i in range(anzahl):
gelb.on()
time.sleep(0.5)
gelb.off()
time.sleep(0.5)
def blink_gruen():
for i in range(anzahl):
gruen.on()
time.sleep(0.25)
gruen.off()
time.sleep(0.25)
blink_rot()
blink_gelb()
blink_gruen()
|
hobbyelektroniker/Micropython-Grundlagen | 010_Funktionen/Code/func1.py | def flaeche(a, b):
rechtecksflaeche = a * b # Rechteck mit den Seiten a und b
dreiecksflaeche = a * b / 2 # Dreieck mit Grundlinie a und Höhe b
return rechtecksflaeche, dreiecksflaeche
a = 3
b = 5
resultat = flaeche(a,b) # Gibt Tupel zurück
print(resultat)
print(resultat[1])
rechteck, dreieck = flaeche(a, b) # Direkte Zuweisung
print("Das Rechteck mit den Seitenlängen {} cm und {} cm hat die Fläche {} cm2.".format(a,b,rechteck))
print("Das Dreieck mit der Grundlinie {} cm und der Höhe {} cm hat die Fläche {} cm2.".format(a,b,dreieck))
|
hobbyelektroniker/Micropython-Grundlagen | 014_Klassen in Micropython 2/Code/demo.py | '''
Objektorientiert Programmierung
Eine Demo mit mehreren Objekten
Version 1.0, 15.03.2021
Der Hobbyelektroniker
https://community.hobbyelektroniker.ch
https://www.youtube.com/c/HobbyelektronikerCh
Der Code kann mit Quellenangabe frei verwendet werden.
'''
from tkinter import *
from kreis_klasse import KreisV2 as Kreis
from quadrat_klasse import QuadratV2 as Quadrat
# Bildschirm erzeugen
bildschirm = Tk()
bildschirm.title('Spielerei mit 2D - Objekten')
# Spielfeld erzeugen
spielfeld = Canvas(bildschirm, width=1000, height=800, bg="yellow")
spielfeld.pack()
# Objekte erzeugen
kreis1 = Kreis(spielfeld, x=110, y=110, radius=100)
quadrat1 = Quadrat(spielfeld, x=110, y=110, laenge=80, farbe="red")
kreis2 = Kreis(spielfeld, x=210, y=210, radius=50, farbe="blue")
quadrat2 = Quadrat(spielfeld, x=300, y=350, laenge=120, farbe="green")
# Objekte bewegen
kreis1.bewegung(x=6, y=6)
quadrat1.bewegung(x=-8, y=10)
kreis2.bewegung(x=10, y=-4)
quadrat2.bewegung(x=-4, y=-6)
while(True):
kreis1.bewege()
quadrat1.bewege()
kreis2.bewege()
quadrat2.bewege()
|
hobbyelektroniker/Micropython-Grundlagen | 013_Klassen in Micropython/Code/Objekte3.py | '''
Objektorientiert Programmierung
Ein bewegtes Objekt
Version 1.00, 27.02.2021
Der Hobbyelektroniker
https://community.hobbyelektroniker.ch
https://www.youtube.com/c/HobbyelektronikerCh
Der Code kann mit Quellenangabe frei verwendet werden.
'''
from tkinter import *
from kreis_klasse import KreisV1 as Kreis
from quadrat_klasse import QuadratV1 as Quadrat
from bewegt_klasse import BewegtesObjektV1 as BewegtesObjekt
import time
# Bildschirm erzeugen
bildschirm = Tk()
bildschirm.title('Spielerei mit 2D - Objekten')
# Spielfeld erzeugen
spielfeld = Canvas(bildschirm, width=1000, height=800, bg="yellow")
spielfeld.pack()
# Einen Kreis und ein Quadrat zeichnen
kreis = Kreis(spielfeld, x=110, y=110, radius=100)
quadrat = Quadrat(spielfeld, x=110, y=110, laenge=80, farbe="red")
# Ein bewegtes Objekt erstellen und für 5 Sekunden anzeigen
bewegt = BewegtesObjekt(spielfeld, x=110, y=110)
time.sleep(5)
# Die Figur 500 Schritte in 3-er Schritten bewegen
bewegt.bewegung(x=3, y=3)
for i in range(500):
bewegt.bewege()
bildschirm.mainloop()
|
hobbyelektroniker/Micropython-Grundlagen | 005_Mengentypen - Set/Code/sets.py | <filename>005_Mengentypen - Set/Code/sets.py
# Ein Set mit vorgegebenen Werten erzeugen
set1 = {"Hallo",3,1.25,"Welt"}
print(set1)
# Ein leeres Set erzeugen
set2 = set()
print(set2)
# Elemente zum Set hinzufügen
set2.add("neu")
set2.add("Hallo")
set2.add(5)
print(set2)
# Ein Set aus einem anderen Set erzeugen
set3 = set1.copy()
print(set3)
# Ein Set aus zwei anderen Sets erzeugen
set3 = set1.union(set2)
print(set3)
# Einen Generator verwenden
set3 = set(range(1,5))
print(set3)
# Abfragen, ob ein Element vorhanden ist
print("Hallo" in set1)
print("Hello" in set1)
# Alle Elemente auflisten
for x in set1:
print(x)
# Anzahl Elemente im Set
print(len(set1))
# Im ersten Set enthalten, im zweiten nicht
print()
print("------- difference -------")
set1 = {"Hallo",3,1.25,"Welt"}
set2 = {"Hallo",3,"Jahr 2020",5}
print("Set1: " + str(set1))
print("Set2: " + str(set2))
print(set1.difference(set2))
# Nur in einem der beiden Sets enthalten
print()
print("------- symmetric_difference -------")
set1 = {"Hallo",3,1.25,"Welt"}
set2 = {"Hallo",3,"Jahr 2020",5}
print("Set1: " + str(set1))
print("Set2: " + str(set2))
print(set1.symmetric_difference(set2))
# Gemeinsamkeiten zwischen zwei Sets
print()
print("------- intersection -------")
set1 = {"Hallo",3,1.25,"Welt"}
set2 = {"Hallo",3,"Jahr 2020",5}
print("Set1: " + str(set1))
print("Set2: " + str(set2))
print(set1.intersection(set2))
# Ein Set enthält das Andere
print()
print("------------------------")
set1 = {"Hallo",3,1.25,"Welt"}
set2 = {"Hallo",3,"Jahr 2020",5}
set3 = {"Hallo","Welt"}
print("Set1: " + str(set1))
print("Set2: " + str(set2))
print("Set3: " + str(set3))
print("------- issubset -------")
print("Set3 ist Subset von Set1: " + str(set3.issubset(set1)))
print("Set1 ist Subset von Set3: " + str(set1.issubset(set3)))
print("------- issuperset -------")
print("Set3 ist Superset von Set1: " + str(set3.issuperset(set1)))
print("Set1 ist Superset von Set3: " + str(set1.issuperset(set3)))
# Gibt es keine Gemeinsamkeit
print()
print("------- isdisjoint -------")
set1 = {"Hallo",3,1.25,"Welt"}
set2 = {"Hallo",3,"Jahr 2020",5}
set3 = {"Welt",2020}
print("Set1: " + str(set1))
print("Set2: " + str(set2))
print("Set3: " + str(set3))
print("Set1 hat keine gemeinsamen Elemente mit Set 3: " + str(set1.isdisjoint(set3)))
print("Set2 hat keine gemeinsamen Elemente mit Set 3: " + str(set2.isdisjoint(set3)))
print()
# Set vollständig löschen
del set3
# print(set3) gibt Fehler
# Set leeren
set2.clear()
print(set2)
# Elemente entfernen
set1.remove("Hallo") # "Hallo" muss existieren
print(set1)
#set1.remove("Hallo") würde einen Fehler geben
set1.discard("Welt") # "Welt" muss nicht existieren
set1.discard(2)
print(set1)
# Einzelne Elemente hizufügen
set1.add("Hallo")
print(set1)
# Ein Set einem Set hinzufügen
#set1.add({1,2,3}) geht nicht
set1.update({1,2,3})
print(set1)
set2 = {"Hallo",3,"Jahr 2020",5}
set1.update({1,2,3})
print(set1)
# Berechnungen
print()
print("------- Berechnungen -------")
set1 = {"Hallo",3,1.25,"Welt"}
set2 = {"Hallo",3,"Jahr 2020",5}
print("Set1: " + str(set1))
print("Set2: " + str(set2))
# Im ersten Set enthalten, im zweiten nicht
set1.difference_update(set2)
print("difference_update: " + str(set1))
# Nur in einem der beiden Sets enthalten
set1 = {"Hallo",3,1.25,"Welt"}
set2 = {"Hallo",3,"Jahr 2020",5}
set1.symmetric_difference_update(set2)
print("symmetric_difference_update: " + str(set1))
# Gemeinsamkeiten zwischen zwei Sets
set1 = {"Hallo",3,1.25,"Welt"}
set2 = {"Hallo",3,"Jahr 2020",5}
set1.intersection_update(set2)
print("intersection_update: " + str(set1))
|
hobbyelektroniker/Micropython-Grundlagen | 003_Textformatierung/Code/textformatierung2.py | '''
Micropython mit ESP32
Ganze Zahlen
Version 1.00, 17.10.2019
Der Hobbyelektroniker
https://community.hobbyelektroniker.ch
https://www.youtube.com/c/HobbyelektronikerCh
Der Code kann mit Quellenangabe frei verwendet werden.
'''
zahlen = [2,235,15,8,-12]
for zahl in zahlen:
print(zahl)
print()
ausgabe = "Die Zahl ist {}, was kommt danach?"
for zahl in zahlen:
print(ausgabe.format(zahl))
print()
# Feste Stellenanzahl
ausgabe = "Die Zahl ist {:4d}, was kommt danach?"
for zahl in zahlen:
print(ausgabe.format(zahl))
print()
# Vorzeichen immer ausgeben
ausgabe = "Die Zahl ist {:+4d}, was kommt danach?"
for zahl in zahlen:
print(ausgabe.format(zahl))
print()
# Vornullen ausgeben
ausgabe = "Die Zahl ist {:+04d}, was kommt danach?"
for zahl in zahlen:
print(ausgabe.format(zahl))
print()
# Linksbündig
ausgabe = "Die Zahl ist {:<4d}, was kommt danach?"
for zahl in zahlen:
print(ausgabe.format(zahl))
print()
# Rechtsbündig
ausgabe = "Die Zahl ist {:>4d}, was kommt danach?"
for zahl in zahlen:
print(ausgabe.format(zahl))
print()
# Zentriert
ausgabe = "Die Zahl ist {:^4d}, was kommt danach?"
for zahl in zahlen:
print(ausgabe.format(zahl))
print()
# Vorzeichen untereinander
ausgabe = "Die Zahl ist {: =+4d}, was kommt danach?"
for zahl in zahlen:
print(ausgabe.format(zahl))
print()
# Vorzeichen untereinander, mit Vornullen
ausgabe = "Die Zahl ist {:0=+4d}, was kommt danach?"
for zahl in zahlen:
print(ausgabe.format(zahl))
print()
|
hobbyelektroniker/Micropython-Grundlagen | 001_Einleitung und erste Schritte/Code/hallo.py | # Hallo!
namen = ("Max","Peter","Monika","Petra")
def sage_hallo(name):
print("Hallo " + name)
for name in namen:
sage_hallo(name)
print("Das war's!")
|
hobbyelektroniker/Micropython-Grundlagen | 012_Micropython, ESP32 und das Multitasking/Code/Blink2.py | <gh_stars>1-10
# Einfacher Blink mit Argumenten
# ohne Multithreading
from machine import Pin
import time
rot = Pin(33, Pin.OUT)
gruen = Pin(25, Pin.OUT)
gelb = Pin(32, Pin.OUT)
anzahl = 5
def blink(led,pause):
for i in range(anzahl):
led.on()
time.sleep(pause)
led.off()
time.sleep(pause)
blink(rot,1)
blink(gelb,0.5)
blink(gruen,0.25)
|
hobbyelektroniker/Micropython-Grundlagen | 014_Klassen in Micropython 2/Code/Objekte2.py | <gh_stars>1-10
'''
Objektorientiert Programmierung
Kreis und Quadrat
Version 1.00, 27.02.2021
Der Hobbyelektroniker
https://community.hobbyelektroniker.ch
https://www.youtube.com/c/HobbyelektronikerCh
Der Code kann mit Quellenangabe frei verwendet werden.
'''
from tkinter import *
from kreis_klasse import KreisV1 as Kreis
from quadrat_klasse import QuadratV1 as Quadrat
# Bildschirm erzeugen
bildschirm = Tk()
bildschirm.title('Spielerei mit 2D - Objekten')
# Spielfeld erzeugen
spielfeld = Canvas(bildschirm, width=1000, height=800, bg="yellow")
spielfeld.pack()
# Einen Kreis und ein Quadrat zeichnen
kreis = Kreis(spielfeld, x=110, y=110, radius=100)
quadrat = Quadrat(spielfeld, x=110, y=110, laenge=80, farbe="red")
bildschirm.mainloop() |
hobbyelektroniker/Micropython-Grundlagen | 010_Funktionen/Code/func4.py | <gh_stars>1-10
import math
def flaeche(r=None,d=None,g=None,h=None,a=None,b=None):
if r:
return r**2 * math.pi
elif d:
return d**2 * math.pi / 4
elif g and h:
return g * h / 2
elif a:
if not b: b = a
return a * b
else:
return None
print(flaeche(5)) # Kreis mit Radius r
print(flaeche(d=2)) # Kreis mit Durchmesser d
print(flaeche(r=2)) # Kreis mit Radius r
print(flaeche(g=5,h=2)) # Dreieck mit Grundlinie g und Höhe h
print(flaeche(a=5)) # Quadrat mit Seitenlänge a
print(flaeche(a=5,b=2)) # Rechteck mit Seiten a und b
|
hobbyelektroniker/Micropython-Grundlagen | 010_Funktionen/Code/func3.py | import math
def flaeche(r=None,d=None):
if r:
# if r and not d:
return r**2 * math.pi
elif d:
# elif d and not r:
return d**2 * math.pi / 4
else:
return None
print(flaeche(3))
print(flaeche(r=3))
print(flaeche(d=3))
print(flaeche(3,4))
|
hobbyelektroniker/Micropython-Grundlagen | 008_Mengentypen - Dictionary/Code/dictionaries2.py | <reponame>hobbyelektroniker/Micropython-Grundlagen<filename>008_Mengentypen - Dictionary/Code/dictionaries2.py
# Ein Dictionary mit vorgegebenen Werten erzeugen
dict1 = {
"land": "Schweiz",
"ort": "Bern",
"einwohner": 140000
}
print(dict1)
print()
# Dictionary vollständig löschen
dict2 = dict1.copy()
#del dict2 # gibt eine Fehlerleldung
print(dict2)
# Alle Elemente löschen
dict2 = dict1.copy()
dict2.clear()
print(dict2)
print()
# Ein einzelnes Element löschen
dict2 = dict1.copy()
dict2.pop("ort")
print(dict2)
print()
# Zuletzt hinzugefügtes Element löschen
dict2 = dict1.copy()
dict2.popitem()
print(dict2)
print()
# Wertepaar hinzufügen
dict2 = dict1.copy()
dict2["kanton"] = "BE"
print(dict2)
print()
# Wertepaar ändern
dict2 = dict1.copy()
dict2["kanton"] = "BS"
dict2["ort"] = "Basel"
print(dict2)
dict2.update({"kanton":"BE","ort":"Bern"})
print()
|
hobbyelektroniker/Micropython-Grundlagen | 014_Klassen in Micropython 2/Code/kreis_klasse.py | '''
Objektorientiert Programmierung
Eine Kreisklasse
Version 1.10, 15.03.2021
Der Hobbyelektroniker
https://community.hobbyelektroniker.ch
https://www.youtube.com/c/HobbyelektronikerCh
Der Code kann mit Quellenangabe frei verwendet werden.
'''
# Kreis
from bewegt_klasse import BewegtesObjektV3 as BewegtesObjekt
# Diese Klasse zeichnet einen Kreis
# fenster: das Fenster, in dem der Kreis gezeichnet werden soll
# x, y: x und y Koordinaten des Mittelpunkts
# radius: Radius des Kreises
# farbe: Farbe der Kreislinie
class KreisV1():
def __init__(self, fenster, x, y, radius, farbe = 'black'):
self.fenster = fenster
self.x = x
self.y = y
self.farbe = farbe
self.radius = radius
# umhüllendes Rechteck ( als Differenz zum Nullpunkt)
self.x1 = -radius
self.y1 = -radius
self.x2 = radius
self.y2 = radius
# Kreis zeichnen
self.figur = fenster.create_oval(x-radius, y-radius, x+radius, y+radius,
width=1, outline=farbe, fill = '')
# Version 2 verwendet als Basisklasse die Klasse BewegtesObjekt
class KreisV2(BewegtesObjekt):
def __init__(self, fenster, x, y, radius, farbe = 'black'):
# Die Basisklasse nimmt uns einen Teil der Arbeit ab
BewegtesObjekt.__init__(self,fenster, x, y)
self.farbe = farbe
self.radius = radius
# umhüllendes Rechteck ( als Differenz zum Nullpunkt)
self.x1 = -radius
self.y1 = -radius
self.x2 = radius
self.y2 = radius
# Kreis zeichnen
self.figur = fenster.create_oval(x-radius, y-radius, x+radius, y+radius,
width=1, outline=farbe, fill = '')
|
hobbyelektroniker/Micropython-Grundlagen | 012_Micropython, ESP32 und das Multitasking/Code/Blink5.py | # Wann wird ein Thread beendet
# und welche Threads sind noch vorhanden
from machine import Pin
import time
import _thread
rot = Pin(33, Pin.OUT)
gruen = Pin(25, Pin.OUT)
gelb = Pin(32, Pin.OUT)
anzahl = 10
thread_list = set() # Liste der aktiven Threads
def blink(name, led, pause):
# Hier startet der Thread
thread_list.add(name)
for i in range(anzahl):
led.on()
time.sleep(pause)
led.off()
time.sleep(pause)
# Hier wird der Thread beendet
thread_list.discard(name)
_thread.start_new_thread(blink,("ROT", rot, 1))
_thread.start_new_thread(blink,("GELB", gelb, 0.5))
_thread.start_new_thread(blink,("GRUEN", gruen, 0.25))
# warten bis der erste Thread gestartet ist
while not thread_list: pass
# aktive Threads ausgeben, bis keiner mehr vorhanden ist
while thread_list:
print(thread_list)
time.sleep(0.05)
print("Jetzt bin ich wirklich fertig!")
|
hobbyelektroniker/Micropython-Grundlagen | 010_Funktionen/Code/func2.py | <gh_stars>1-10
def laenge(a, b, *args):
print()
print(args)
total = a+b
for i in args:
total += i
return total
print(laenge(2,3))
print(laenge(2,3,4,10)) |
hobbyelektroniker/Micropython-Grundlagen | 006_Mengentypen - Tuple/Code/tuples.py | # Ein Tuple mit vorgegebenen Werten erzeugen
tup1 = ("Hallo",3,1.25,"Welt",2,3,4,5)
print(tup1)
print()
# Leeres Tuple erzeugen
tup2 = tuple() # macht nicht viel Sinn !!!
print(tup2)
print()
# Aus anderen Collections erzeugen
set1 = {"Hallo",3,1.25,"Welt"}
tup2 = ("Hallo",3,"Jahr 2020",5)
tup1 = tuple(set1)
print(tup1)
tup3 = tup2
print(tup3)
tup3 = tup1 + tup3
print(tup3)
print()
# Mit Hilfe eines Generators erzeugen
tup1 = tuple(range(1,5))
print(tup1)
print()
# Abfragen, ob ein Element vorhanden ist
tup1 = (3,"Hallo",3,"Hallo","Welt",2,4,5)
print("Hallo" in tup1)
print("Hello" in tup1)
print()
# Alle Elemente auflisten
for x in tup1:
print(x)
print()
# Anzahl Elemente im Tuple
print(len(tup1))
print()
# Einzelnes Element auslesen
print(tup1[3])
print()
# Wieviele Male kommt ein Wert vor
print(tup1.count("Hallo"))
print()
# Welcher Index hat das erste Vorkommen eines Wertes
print(tup1.index("Hallo")) # Der Wert muss existieren!!!
print()
# Das erste und das letzte Element
tup1 = (3,"Hallo",3,"Hallo","Welt",2,4,5)
print(tup1)
print(tup1[0])
print(tup1[-1])
print()
# Der Anfang und das Ende
tup1 = (3,"Hallo",3,"Hallo","Welt",2,4,5)
print(tup1)
print(tup1[:3]) # Element 0 bis 2
print(tup1[4:]) # Element 4 bis Ende
print()
# Ein Bereich aus der Mitte
tup1 = (3,"Hallo",3,"Hallo","Welt",2,4,5)
print(tup1)
print(tup1[2:5]) # Element 2 bis 4
print(tup1[-3:-1]) # Drittletztes Element bis zweitletztes Element
# Tuple vollständig löschen
del tup1
# print(tup1) gibt Fehler
|
hobbyelektroniker/Micropython-Grundlagen | 013_Klassen in Micropython/Code/Objekte1.py | '''
Objektorientiert Programmierung
Ein einfacher Kreis
Version 1.00, 27.02.2021
Der Hobbyelektroniker
https://community.hobbyelektroniker.ch
https://www.youtube.com/c/HobbyelektronikerCh
Der Code kann mit Quellenangabe frei verwendet werden.
'''
from tkinter import *
from kreis_klasse import KreisV1 as Kreis
# Bildschirm erzeugen
bildschirm = Tk()
bildschirm.title('Spielerei mit 2D - Objekten')
# Spielfeld erzeugen
spielfeld = Canvas(bildschirm, width=1000, height=800, bg="yellow")
spielfeld.pack()
# Einen Kreis zeichnen
kreis = Kreis(spielfeld, x=110, y=110, radius=100)
bildschirm.mainloop()
|
hobbyelektroniker/Micropython-Grundlagen | 004_Einfache Datentypen/Code/einfache_datentypen.py | '''
Micropython Grundlagen
Einfache Datentypen
Version 1.00, 15.12.2019
Der Hobbyelektroniker
https://community.hobbyelektroniker.ch
https://www.youtube.com/c/HobbyelektronikerCh
Der Code kann mit Quellenangabe frei verwendet werden.
'''
ganze_zahl = 10
dezimalzahl = 5.75
zeichen = 'A'
komplexe_zahl = 3.5 + 2.5j
text = "2.5"
ja_nein = True
print("Alle Variablen ausgeben")
print(ganze_zahl)
print(dezimalzahl)
print(zeichen)
print(komplexe_zahl)
print(text)
print(ja_nein)
print("------------------------")
print()
print("Als int ausgeben")
print(int(ganze_zahl))
print(int(dezimalzahl))
print("int(zeichen) geht nicht, verwende ord(zeichen)")
print(ord(zeichen))
print("int(komplexe_zahl) geht nicht")
print("int(text) geht nicht")
print(int(ja_nein))
print("------------------------")
print()
print("Als float ausgeben")
print(float(ganze_zahl))
print(float(dezimalzahl))
print("float(zeichen) geht nicht")
print("float(komplexe_zahl) geht nicht")
print(float(text))
print(float(ja_nein))
print("------------------------")
print()
print("Als complex ausgeben")
print(complex(ganze_zahl))
print(complex(dezimalzahl))
print("complex(zeichen) geht nicht")
print(complex(komplexe_zahl))
print(complex(text))
print(complex(ja_nein))
print("------------------------")
print()
print("Als str ausgeben")
print(str(ganze_zahl))
print(str(dezimalzahl))
print(str(zeichen))
print(str(komplexe_zahl))
print(str(text))
print(str(ja_nein))
print("------------------------")
print()
print("Als bool ausgeben")
print(bool(ganze_zahl))
print(bool(dezimalzahl))
print(bool(zeichen))
print(bool(komplexe_zahl))
print(bool(text))
print(bool(ja_nein))
print("------------------------")
print()
|
hobbyelektroniker/Micropython-Grundlagen | 009_Bedingungen und Schleifen/Code/Entscheidungen1.py | <filename>009_Bedingungen und Schleifen/Code/Entscheidungen1.py<gh_stars>1-10
x = 4
print("Die Zahl {}".format(x))
if x < 5:
print(" ist kleiner als 5")
print("-------")
x = 6
print("Die Zahl {}".format(x))
if x < 5:
print(" ist kleiner als 5")
else:
print(" ist grösser als 5")
print("-------")
x = 5
print("Die Zahl {}".format(x))
if x < 5:
print(" ist kleiner als 5")
elif x == 5:
print(" ist exakt 5")
else:
print(" ist grösser als 5")
print("-------")
x = 6
print("Die Zahl {}".format(x))
if x > 5: print(" ist grösser als 5")
print("-------")
x = 8
print("Die Zahl {}".format(x))
if x >= 5 and x <= 10 : print(" liegt zwischen 5 und 10")
|
hobbyelektroniker/Micropython-Grundlagen | 009_Bedingungen und Schleifen/Code/for1.py | <reponame>hobbyelektroniker/Micropython-Grundlagen<filename>009_Bedingungen und Schleifen/Code/for1.py<gh_stars>1-10
menge = (1,3,2,5,3) # Tupel
for x in menge:
print(x)
print("----------")
menge = {1,3,2,5,3} # Set
for x in menge:
print(x)
print("----------")
menge = [1,3,2,5,3] # List
for x in menge:
print(x)
print("----------")
menge = [1,3,2,5,3]
for x in menge:
print(x)
if x == 3: break
print("----------")
menge = "Hallo"
for x in menge:
print(x)
print("----------")
menge = range(2,10)
for x in menge:
print(x)
print("----------")
menge = range(2,10,2)
for x in menge:
print(x)
print("----------")
menge = range(10,2,-2)
for x in menge:
print(x)
print("----------")
|
hobbyelektroniker/Micropython-Grundlagen | 010_Funktionen/Code/func5.py | import math
def flaeche(**kwargs):
print()
print(kwargs)
if "r" in kwargs:
return kwargs["r"]**2 * math.pi
elif "d" in kwargs:
return kwargs["d"]**2 * math.pi / 4
elif "g" in kwargs and "h" in kwargs:
return kwargs["g"] * kwargs["h"] / 2
elif "a" in kwargs:
a = kwargs["a"]
if "b" in kwargs:
b = kwargs["b"]
else:
b = a
return a * b
else:
return None
# print(flaeche(5)) # Kreis mit Radius r geht nicht!
print(flaeche(d=2)) # Kreis mit Durchmesser d
print(flaeche(r=2)) # Kreis mit Radius r
print(flaeche(g=5,h=2)) # Dreieck mit Grundlinie c und Höhe h
print(flaeche(a=5)) # Quadrat mit Seitenlänge a
print(flaeche(a=5,b=2)) # Rechteck mit Seiten a und b
|
hobbyelektroniker/Micropython-Grundlagen | 003_Textformatierung/Code/textformatierung3.py | <gh_stars>1-10
'''
Micropython mit ESP32
Floats
Version 1.00, 17.10.2019
Der Hobbyelektroniker
https://community.hobbyelektroniker.ch
https://www.youtube.com/c/HobbyelektronikerCh
Der Code kann mit Quellenangabe frei verwendet werden.
'''
zahlen = [2.5,235.25,15.3,8.735,-12.37]
for zahl in zahlen:
print(zahl)
print()
ausgabe = "Die Zahl ist {}, was kommt danach?"
for zahl in zahlen:
print(ausgabe.format(zahl))
print()
# Feste Stellenanzahl
ausgabe = "Die Zahl ist {:8.4f}, was kommt danach?"
for zahl in zahlen:
print(ausgabe.format(zahl))
print()
# Nicht zu viele Stellen ausgeben!!!
ausgabe = "Die Zahl ist {:12.8f}, was kommt danach?"
for zahl in zahlen:
print(ausgabe.format(zahl))
print()
# Vorzeichen immer ausgeben
ausgabe = "Die Zahl ist {:+8.4f}, was kommt danach?"
for zahl in zahlen:
print(ausgabe.format(zahl))
print()
# Vornullen ausgeben
ausgabe = "Die Zahl ist {:+08.4f}, was kommt danach?"
for zahl in zahlen:
print(ausgabe.format(zahl))
print()
# Linksbündig
ausgabe = "Die Zahl ist {:<8.4f}, was kommt danach?"
for zahl in zahlen:
print(ausgabe.format(zahl))
print()
# Rechtsbündig
ausgabe = "Die Zahl ist {:>8.4f}, was kommt danach?"
for zahl in zahlen:
print(ausgabe.format(zahl))
print()
# Zentriert
ausgabe = "Die Zahl ist {:^8.4f}, was kommt danach?"
for zahl in zahlen:
print(ausgabe.format(zahl))
print()
# Vorzeichen untereinander
ausgabe = "Die Zahl ist {: =+8.4f}, was kommt danach?"
for zahl in zahlen:
print(ausgabe.format(zahl))
print()
# Vorzeichen untereinander, mit Vornullen
ausgabe = "Die Zahl ist {:0=+8.4f}, was kommt danach?"
for zahl in zahlen:
print(ausgabe.format(zahl))
print()
|
hobbyelektroniker/Micropython-Grundlagen | 008_Mengentypen - Dictionary/Code/dictionaries1.py | # Ein Dictionary mit vorgegebenen Werten erzeugen
dict1 = {
"land": "Schweiz",
"ort": "Bern",
"einwohner": 140000
}
print(dict1)
print()
# Leeres Dictionary erzeugen
dict2 = dict()
print(dict2)
print()
# Aus einem anderen Dictionary erzeugen
dict3 = dict1.copy()
print(dict3)
print()
dict4 = dict(dict1)
print(dict4)
print()
# Aus einer Keyliste (Tupel) erzeugen
dict2 = dict.fromkeys(("land","ort"))
print(dict2)
dict2 = dict.fromkeys(("land","ort"),"default")
print(dict2)
print()
# Einzelnen Wert auslesen
print(dict1["ort"]) # gibt Fehlermeldung, wenn key nich existiert
print()
print(dict1.get("ort"))
print(dict1.get("existiert nicht")) # gibt None zurück
print(dict1.get("existiert nicht","Standardwert"))
# Existiert ein Schlüssel
print("ort" in dict1)
print()
# Alle Schlüssel auflisten
for x in dict1:
print(x)
print()
for x in dict1.keys():
print(x)
print()
# Alle Werte auflisten
for x in dict1.values():
print(x)
print()
# Alle Wertepaare auflisten
for k,v in dict1.items():
print(k,v)
print()
# Anzahl Wertepaare
print(len(dict1))
print()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.