content
stringlengths 5
1.05M
|
|---|
def ficha(n='<desconhecido>', g=0):
return f'O jogador {n} fez {g} gol(s) no campeonato.'
print('\033[30m-'*35)
nome = str(input('Nome do Jogador: ')).capitalize().strip()
gol = input('Número de Gols: ').strip()
if gol == '' and nome == '':
print(ficha())
elif nome == '' and gol.isnumeric():
print(ficha(g=int(gol)))
else:
print(ficha(nome))
|
from __future__ import unicode_literals
from django.contrib.auth import get_user_model
from django.http import HttpResponse
from django.shortcuts import get_object_or_404
from django.shortcuts import render, redirect
from django.template.loader import get_template
from django.urls import reverse
from django.utils import timezone
from django.utils.decorators import method_decorator
from django.views.generic import View, TemplateView, ListView, DetailView, FormView, CreateView, UpdateView
from filetransfers.api import serve_file, public_download_url
from django.views.static import serve
from PIAs.forms import NewPIAForm
from PIAs.fusioncharts import FusionCharts
from PIAs.models import Profile, RiesgoInherente, Results, Risk, Project, PIA, LastAccessMixin, LastEditMixin, Organization
from PIAs.scripts.writer import TSV
from shared.decorators import active_only
import os
User = get_user_model()
@method_decorator(active_only, name='dispatch')
class ProjectsView(LastAccessMixin,ListView):
model = Project
template_name = 'projects.html'
def get_queryset(self):
return self.model.objects.filter(organization=Profile.objects.get(user=self.request.user).organization)
@method_decorator(active_only, name='dispatch')
class ProjectView(DetailView):
model = Project
template_name = 'project.html'
@method_decorator(active_only, name='dispatch')
class PIAsView(DetailView):
model = Project
template_name = 'project.html'
def get(self, request, *args, **kwargs):
self.object = self.get_object()
self.object.deny_if_not_owner(request.user)
return super().get(request, *args, **kwargs)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['pias'] = PIA.objects.filter(project=self.get_object())
return context
@method_decorator(active_only, name='dispatch')
class PIAView(TemplateView):
template_name = 'pia1.html'
pia = Profile.objects.get
def get(self, request, *args, **kwargs):
self.object = PIA.objects.get(id=self.kwargs['pk'])
if self.object.project.organization != Profile.objects.get(user=self.request.user).organization:
self.object.deny_if_not_owner(request.user)
return super().get(request, *args, **kwargs)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['riesgosinh'] = RiesgoInherente.objects.filter(PIA=self.kwargs['pk'])
context['pia'] = PIA.objects.get(id=self.kwargs['pk'])
return context
@method_decorator(active_only, name='dispatch')
class createPIAView(CreateView):
model = PIA
fields = ['name',]
template_name = 'pia_form.html'
def dispatch(self, *args, **kwargs):
"""
Overridden so we can make sure the instance exists
before going any further.
"""
self.project = get_object_or_404(Project, id=kwargs['project_pk'])
self.p_id = self.project.id
return super().dispatch(*args, **kwargs)
def form_valid(self, form):
# This method is called when valid form data has been POSTed.
# It should return an HttpResponse.
form.instance.project = self.project
return super().form_valid(form)
@method_decorator(active_only, name='dispatch')
class createProjectView(CreateView):
model = Project
fields = ['title',]
template_name = 'project_form.html'
def dispatch(self, *args, **kwargs):
"""
Overridden so we can make sure the instance exists
before going any further.
"""
self.organization = get_object_or_404(Organization, orgName=Profile.objects.get(user=self.request.user).organization)
return super().dispatch(*args, **kwargs)
def form_valid(self, form):
# This method is called when valid form data has been POSTed.
# It should return an HttpResponse.
form.instance.organization = self.organization
return super().form_valid(form)
@method_decorator(active_only, name='dispatch')
class PIAsDetailedView(TemplateView):
template_name = 'pia.html'
prof = Profile.objects.get
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['profile'] = Profile.objects.get(user=self.request.user).organization
return context
@method_decorator(active_only, name='dispatch')
class createRIView(CreateView):
model = RiesgoInherente
fields = ['risk','probability']
template_name = 'risk_form.html'
def get(self, request, *args, **kwargs):
self.object = PIA.objects.get(id=self.kwargs['pk'])
if self.object.project.organization != Profile.objects.get(user=self.request.user).organization:
self.object.deny_if_not_owner(request.user)
self.object.last_edited = timezone.now()
self.object.save(update_fields=['last_edited'])
return super().get(request, *args, **kwargs)
def dispatch(self, *args, **kwargs):
"""
Overridden so we can make sure the `Ipsum` instance exists
before going any further.
"""
self.pia = get_object_or_404(PIA, id=kwargs['pk'])
self.pia_id = self.pia.id
self.project = get_object_or_404(Project, id=kwargs['project_pk'])
self.p_id = self.project.id
return super().dispatch(*args, **kwargs)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def form_valid(self, form):
# This method is called when valid form data has been POSTed.
# It should return an HttpResponse.
form.instance.PIA = self.pia
form.instance.confModImpact = Risk.objects.get(id=form.instance.risk.id).confImpact
form.instance.integModImpact = Risk.objects.get(id=form.instance.risk.id).integImpact
form.instance.disModImpact = Risk.objects.get(id=form.instance.risk.id).disImpact
form.instance.noRepInhRisk = Risk.objects.get(id=form.instance.risk.id).noRepImpact
return super().form_valid(form)
def get_success_url(self, **kwargs):
return reverse("pia", kwargs={'pk': self.pia_id,'project_pk':self.p_id})
@method_decorator(active_only, name='dispatch')
class updateRIView(LastEditMixin,UpdateView):
model = RiesgoInherente
fields = ['risk','probability','confModImpact','integModImpact','disModImpact','noRepInhRisk']
template_name = 'editrisk_form.html'
def dispatch(self, *args, **kwargs):
self.pia = get_object_or_404(PIA, id=kwargs['pia_pk'])
self.pia_id = self.pia.id
self.project = get_object_or_404(Project, id=kwargs['project_pk'])
self.p_id = self.project.id
return super().dispatch(*args, **kwargs)
def get_success_url(self, **kwargs):
return reverse("pia", kwargs={'pk': self.pia_id, 'project_pk': self.p_id})
#@method_decorator(active_only, name='dispatch')
def deleteRIView(request, **kwargs):
model = RiesgoInherente
instance = get_object_or_404(RiesgoInherente, id=kwargs['pk'])
instance.deny_if_not_owner(request.user)
instance.delete()
return redirect("pia", pk = kwargs['pia_pk'], project_pk = kwargs['project_pk'])
#@method_decorator(active_only, name='dispatch')
def download(request, **kwargs):#id
object = PIA.objects.get(id=kwargs['pk'])
if object.project.organization != Profile.objects.get(user=request.user).organization:
object.deny_if_not_owner(request.user)
RI = []
ri = RiesgoInherente.objects.filter(PIA=kwargs['pk'])
if ri:
for r in ri:
RI.append({'Family':"essential",'Thread':str(r.risk.thread), 'likely':r.probability,
'A': r.disModImpact/100, 'I':r.integModImpact/100, 'C': r.confModImpact/100,
'Auth':r.noRepInhRisk/100})
tsv = TSV(kwargs['pk']).file(RI)
result_obj,created = Results.objects.update_or_create(PIA = PIA.objects.get(id=kwargs['pk']),
defaults={'tsv': tsv})
return serve_file(request, result_obj.tsv, save_as="tsv_pia"+result_obj.date.strftime('%m/%d/%Y-%H%M%S'))
def download_catalog(request, **kwargs):
filepath = 'TSV_FILES/ext_threats_pia_en.xml'
fsock = open(filepath, 'r',encoding='utf-8')
response = HttpResponse(fsock, content_type='application/vnd.ms-excel')
response['Content-Disposition'] = "attachment; filename=ext_threats_pia_en.xml"
return response
class NewPIAView(FormView):
template_name = 'pia_form.html'
form_class = NewPIAForm
def form_valid(self, form):
# This method is called when valid form data has been POSTed.
# It should return an HttpResponse.
p = get_object_or_404(id=self.kwargs.get('project_pk'))
return super().form_valid(form)
class GeneratePdf(View):
def get(self, request, *args, **kwargs):
template = get_template('report.html')
context = {
"pia": PIA.objects.get(id=kwargs['pk']),
'riesgosinh' : RiesgoInherente.objects.filter(PIA=self.kwargs['pk']),
"customer_name": "John Cooper",
"amount": 1399.99,
"today": "Today",
}
html = template.render(context)
pdf = render_to_pdf('report.html', context)
if pdf:
response = HttpResponse(pdf, content_type='application/pdf')
filename = "PIA_%s.pdf" % ("12341231")
content = "inline; filename='%s'" % (filename)
download = request.GET.get("download")
if download:
content = "attachment; filename='%s'" % (filename)
response['Content-Disposition'] = content
return response
return HttpResponse("Not found")
class GeneratePdf2(View):
def get(self, request, *args, **kwargs):
ri = RiesgoInherente.objects.filter(PIA=self.kwargs['pk'])
datas = []
rows = []
for r in ri:
datas.append({
"rowid": str(r.risk),
"columnid": "confidentiality",
"value": r.confModImpact,
"tllabel": str(r.risk),
})
datas.append({
"rowid": str(r.risk),
"columnid": "integrity",
"value": r.integModImpact,
"tllabel": str(r.risk),
})
datas.append({
"rowid": str(r.risk),
"columnid": "availability",
"value": r.disModImpact,
"tllabel": str(r.risk),
})
datas.append({
"rowid": str(r.risk),
"columnid": "authenticity",
"value": r.noRepInhRisk,
"tllabel": str(r.risk),
})
rows.append({
"id": str(r.risk),
"label": str(r.risk)
})
draw_data = {
"chart": {
"caption": "Privacy Risk Impact",
"subcaption": "By Security Domain",
"xAxisName": "Security Domain",
"yAxisName": "Privacy Risk",
"showplotborder": "1",
"showValues": "1",
"xAxisLabelsOnTop": "1",
"plottooltext": "<div id='nameDiv' style='font-size: 12px; border-bottom: 1px dashed #666666; font-weight:bold; padding-bottom: 3px; margin-bottom: 5px; display: inline-block; color: #888888;' >$rowLabel :</div>{br}Rating : <b>$dataValue</b>{br}$columnLabel : <b>$tlLabel</b>{br}<b>$trLabel</b>",
"baseFontColor": "#333333",
"baseFont": "Helvetica Neue,Arial",
"toolTipBorderRadius": "2",
"toolTipPadding": "5",
"theme": "fusion"
},
"rows": {
"row": [
{
"id": "SGS5",
"label": "Samsung Galaxy S5"
},
{
"id": "HTC1M8",
"label": "HTC One (M8)"
},
{
"id": "IPHONES5",
"label": "Apple iPhone 5S"
},
{
"id": "LUMIA",
"label": "Nokia Lumia 1520"
}
]
},
"columns": {
"column": [
{
"id": "processor",
"label": "Processor"
},
{
"id": "screen",
"label": "Screen Size"
},
{
"id": "price",
"label": "Price"
},
{
"id": "backup",
"label": "Battery Backup"
},
{
"id": "cam",
"label": "Camera"
}
]
},
"dataset": [
{
"data": []
}
],
"colorrange": {
"gradient": "100",
"minvalue": "0",
"code": "E24B1A",
"startlabel": "Poor",
"endlabel": "Good",
"color": [
{
"code": "E24B1A",
"minvalue": "1",
"maxvalue": "5",
"label": "Bad"
},
{
"code": "F6BC33",
"minvalue": "5",
"maxvalue": "8.5",
"label": "Average"
},
{
"code": "6DA81E",
"minvalue": "8.5",
"maxvalue": "10",
"label": "Good"
}
]
}
}
draw_data2 = {
"chart": {
"caption": "Privacy Risk Impact",
"subcaption": "By Security Domain",
"xAxisName": "Security Domain",
"yAxisName": "Privacy Risk",
"showplotborder": "1",
"xAxisLabelsOnTop": "1",
"plottooltext": "<div id='nameDiv' style='font-size: 12px; border-bottom: 1px dashed #666666; font-weight:bold; padding-bottom: 3px; margin-bottom: 5px; display: inline-block; color: #888888;' >$rowLabel :</div>{br}Rating : <b>$dataValue</b>{br}$columnLabel : <b>$tlLabel</b>{br}<b>$trLabel</b>",
"baseFontColor": "#333333",
"baseFont": "Helvetica Neue,Arial",
"captionFontSize": "14",
"subcaptionFontSize": "14",
"subcaptionFontBold": "0",
"showBorder": "0",
"bgColor": "#ffffff",
"showShadow": "0",
"canvasBgColor": "#ffffff",
"canvasBorderAlpha": "0",
"legendBgAlpha": "0",
"legendBorderAlpha": "0",
"legendShadow": "0",
"legendItemFontSize": "10",
"legendItemFontColor": "#666666",
"toolTipColor": "#ffffff",
"toolTipBorderThickness": "0",
"toolTipBgColor": "#000000",
"toolTipBgAlpha": "80",
"toolTipBorderRadius": "2",
"toolTipPadding": "5"
},
"rows": {
"row": [
]
},
"columns": {
"column": [
{
"id": "confidentiality",
"label": "Confidentiality"
},
{
"id": "integrity",
"label": "Integrity"
},
{
"id": "availability",
"label": "Availability"
},
{
"id": "authenticity",
"label": "Authenticity"
}
]
},
"dataset": [
{
"data": [
]
}
],
"colorrange": {
"gradient": "80",
"minvalue": "0",
"code": "6DA81E",
"startlabel": "Negligible",
"endlabel": "Maximum",
"color": [
{
"code": "CC0000",
"minvalue": "75",
"maxvalue": "100",
"label": "Maximum"
},
{
"code": "FF8000",
"minvalue": "40",
"maxvalue": "75",
"label": "Significant"
},
{
"code": "F6BC33",
"minvalue": "0",
"maxvalue": "40",
"label": "Limited"
}
]
}
}
[draw_data2["rows"]["row"].append(r) for r in rows]
[draw_data2["dataset"][0]["data"].insert(0,d) for d in datas]
# Create an object for the column2d chart using the FusionCharts class constructor
scatter = FusionCharts("heatmap","ex1", "1000", "400", "chart-1", "json", str(draw_data2)
)
context = {
"pia": PIA.objects.get(id=kwargs['pk']),
'riesgosinh': RiesgoInherente.objects.filter(PIA=self.kwargs['pk']),
"customer_name": "John Cooper",
"amount": 1399.99,
"today": "Today",
'output': scatter.render(),
}
# returning complete JavaScript and HTML code,
# which is used to generate chart in the browsers.
return render(request, 'fusion.html', context)
|
import numpy as np
def flipLabels(Y, perc):
"""Flips randomly selected labels of a binary classification problem with labels +1,-1
Arguments:
Y: array of labels
perc: percentage of labels to be flipped
Returns:
Y: array with flipped labels
"""
if perc < 1 or perc > 100:
print("p should be a percentage value between 0 and 100.")
return -1
if any(np.abs(Y) != 1):
print("The values of Ytr should be +1 or -1.")
return -1
Y_noisy = np.copy(np.squeeze(Y))
if Y_noisy.ndim > 1:
print("Please supply a label array with only one dimension")
return -1
n = Y_noisy.size
n_flips = int(np.floor(n * perc / 100))
idx_to_flip = np.random.choice(n, size=n_flips, replace=False)
Y_noisy[idx_to_flip] = -Y_noisy[idx_to_flip]
return Y_noisy
|
class AbstractLstmModelBuilder(object):
"""The model combines all the components
and provides interfaces for users to train the
model and generate sequences with the model.
"""
def _build_generating_model(self,
tensors):
"""Unroll and compile tensors in self.lstm_training
together with self.tensors, self.mixture_density_builder
and etc. The unrolled and compile tensors are used for
training.
"""
raise NotImplementedError
def _build_training_model(self,
tensors):
"""Unroll and compile tensors in self.lstm_generating
together with self.tensors, self.mixture_density_builder
and etc. The unrolled and compile tensors are used for
generating.
"""
raise NotImplementedError
def train(self,
input_values_dict,
epochs):
"""Train the lstm model with the unrolled and compiled
tensors in self.lstm_training. The input and output
sequences used in training are stored in input_values_dict.
Args:
input_values_dict(dict:(str -> np.array)): A dictionary
of data with the keys being name as strings and
values being the data as np.array.
epochs(int): number of epochs that the model should
be trained.
"""
raise NotImplementedError
def generate_complete_sequences(self,
input_values_dict):
"""Generate the entire sequences with the input sequences
given the input_values_dict.
Args:
input_values_dict(dict:(str -> np.array)): A dictionary
of data with the keys being name as strings and
values being the data as np.array.
"""
raise NotImplementedError
def generate_partial_sequences(self,
input_values_dict):
"""Generate partially observed with the input sequences
given the input_values_dict.
Args:
input_values_dict(dict:(str -> np.array)): A dictionary
of data with the keys being name as strings and
values being the data as np.array.
"""
raise NotImplementedError
def _save_model(self, tensorflow_session):
"""Save all weight on the graph that is realted to the
tensorflow_session.
tensorflow_session
"""
raise NotImplementedError
def _load_model(self, tensorflow_session):
raise NotImplementedError
|
from scoring_engine.engine.basic_check import BasicCheck
class MSSQLCheck(BasicCheck):
required_properties = ['database', 'command']
CMD = "/opt/mssql-tools/bin/sqlcmd -S {0},{1} -U {2} -P {3} -d {4} -Q {5}"
def command_format(self, properties):
account = self.get_random_account()
return (self.host, self.port, account.username, account.password, properties['database'], properties['command'])
|
'''
Extract tables from HTML/XBRL files in an input directory,
and put the resulting files in an output directory -
one table per file.
'''
import os
import re
import glob
import html
from bs4 import BeautifulSoup
from utils.environ import data_dir, extracted_tables_dir
from utils.html import replace_html_tags
TABLES_EXTRACTED_DIR_SUFFIX = os.path.split(extracted_tables_dir())[1]
TABLES_EXTRACTED_FILE_SUFFIX = 'table-extracted'
HTML_FILE_TYPE = 0
TEXT_FILE_TYPE = 1
XBRL_FILE_TYPE = 2
MIN_TABLE_SIZE = 10240 # 10KB
# Get everything including the table tags.
regex_table_for_text = re.compile(r'(<TABLE.*?>.*?<\/TABLE>)',
flags=re.DOTALL |
re.IGNORECASE)
# Get everything including the table tags.
regex_table_for_html = re.compile(r'(<TABLE.*?>.*?<\/TABLE>)',
flags=re.DOTALL | re.IGNORECASE)
regex_xbrl_file = re.compile(r'<XBRL>',
flags=re.DOTALL | re.IGNORECASE)
def tables_extracted_dirname(f_name):
prefix, date_range = os.path.split(f_name)
prefix, filing_type = os.path.split(prefix)
_, company_name = os.path.split(prefix)
dir_name = os.path.join(os.path.split(data_dir())[0],
TABLES_EXTRACTED_DIR_SUFFIX,
company_name,
filing_type,
date_range)
return dir_name
def extracted_tables_filename(dir_name, id):
return os.path.join(dir_name,
TABLES_EXTRACTED_DIR_SUFFIX,
f'{id}.{TABLES_EXTRACTED_FILE_SUFFIX}')
def write_extracted_table_file(filename, filedata):
os.makedirs(os.path.dirname(filename), exist_ok=True)
with open(filename, 'w') as f:
f.write(filedata)
def save_files(dir_name, file_type, matches):
id = 0
# Ignore the first match, since it is the full match
for match in matches[1:]:
# Ignore small tables - many times they're just formatting.
if len(match) < MIN_TABLE_SIZE:
continue
if file_type == TEXT_FILE_TYPE:
match = replace_html_tags(match)
if file_type == HTML_FILE_TYPE:
match = prettify_html(match)
filename = extracted_tables_filename(dir_name, id)
write_extracted_table_file(filename, match)
id += 1
def save_tables(matches, filename, file_type):
if len(matches) == 0:
return False
else:
save_files(tables_extracted_dirname(filename), file_type, matches)
return True
def is_xbrl_file(filedata):
upper_case_xbrl_tags = \
all([s in filedata for s in ['<XBRL', '</XBRL']])
lower_case_xbrl_tags = \
all([s in filedata for s in ['<xbrl', '</xbrl']])
return upper_case_xbrl_tags or lower_case_xbrl_tags
def is_html_file(filedata):
upper_case_body_tags = \
all([s in filedata for s in ['<BODY', '/BODY']])
lower_case_body_tags = \
all([s in filedata for s in ['<body', '/body']])
return upper_case_body_tags or lower_case_body_tags
def prettify_html(data):
result = data
try:
soup = BeautifulSoup(data, 'html.parser')
result = soup.prettify()
except RecursionError as e:
print('Recursion Error occurred - dont prettify HTML')
return result
def get_tables_from_single_file(top_input_dirname,
filename, top_output_dirname,
num_files_of_type):
with open(filename, 'r') as f:
filedata = html.unescape(f.read())
if is_xbrl_file(filedata):
# Although it says XBRL, it can be processed as HTML
print(' xbrl file')
matches = regex_table_for_html.findall(filedata)
tables_saved = save_tables(matches, filename, HTML_FILE_TYPE)
num_files_of_type[XBRL_FILE_TYPE] += 1
else:
if is_html_file(filedata):
print(' html file')
matches = regex_table_for_html.findall(filedata)
tables_saved = save_tables(matches, filename, HTML_FILE_TYPE)
num_files_of_type[HTML_FILE_TYPE] += 1
else:
num_files_of_type[TEXT_FILE_TYPE] += 1
# We don't have the code to deal with text files yet.
return
# print(' text file')
# matches = regex_table_for_text.findall(filedata)
# tables_saved = save_tables(matches, filename, TEXT_FILE_TYPE)
if tables_saved is False:
print(f' >>> Error extracting file: {filename}')
def extract_all_tables():
top_input_dirname = data_dir()
search_path = os.path.join(data_dir(), '00*', '10-k', '*')
output_dirname = extracted_tables_dir()
i = 0
num_files_of_type = [0, 0, 0]
print(f'search_path: {search_path}')
for filename in glob.iglob(search_path):
# if i > 20:
# break
print(f'Extracting[{i}]: {filename}', end='')
get_tables_from_single_file(top_input_dirname, filename,
output_dirname, num_files_of_type)
print(f'num_files_of_type: {num_files_of_type}')
i += 1
if __name__ == '__main__':
extract_all_tables()
|
GRID = ['XXXXXXXXX',
'X X',
'X X X X X',
'X X',
'XXXXXXXXX']
GRID_SQ_SIZE = 64
HALF_GRID_SQ_SIZE = 64 // 2
SPEED = 2
# Convert pixel coordinates to grid coordinates
def get_grid_pos(x, y):
return x // GRID_SQ_SIZE, y // GRID_SQ_SIZE
|
from decisionai_plugin.common.plugin_service import PluginService
from decisionai_plugin.common.util.constant import STATUS_SUCCESS, STATUS_FAIL
from decisionai_plugin.common.util.timeutil import get_time_offset, str_to_dt, dt_to_str
from telemetry import log
import copy
class DemoService(PluginService):
def __init__(self):
super().__init__(False)
def do_verify(self, parameters, context):
# Check series set permission
for data in parameters['seriesSets']:
meta = self.tsanaclient.get_metric_meta(parameters['apiEndpoint'], parameters['apiKey'], data['metricId'])
if meta is None:
return STATUS_FAIL, 'You have no permission to read Metric {}'.format(data['metricId'])
return STATUS_SUCCESS, ''
def do_inference(self, model_dir, parameters, context):
log.info('Start to inference {}'.format('Demo'))
try:
amplifier = parameters['instance']['params']['amplifier']
end_time = str_to_dt(parameters['endTime'])
if 'startTime' in parameters:
start_time = str_to_dt(parameters['startTime'])
else:
start_time = end_time
series = self.tsanaclient.get_timeseries(parameters['apiEndpoint'], parameters['apiKey'], parameters['seriesSets'], start_time, end_time)
res = []
for data in series or []:
for value in data.value or []:
v = {
'dim': data.dim,
'metric_id': data.metric_id,
'series_id': data.series_id,
'value': value['value'] * amplifier,
'timestamp': value['timestamp']
}
res.append(v)
self.tsanaclient.save_inference_result(parameters, res)
return STATUS_SUCCESS, ''
except Exception as e:
log.error('Exception thrown by inference: ' + repr(e))
return STATUS_FAIL, 'Exception thrown by inference: ' + repr(e)
|
import pytest
@pytest.mark.django_db
def test_challenge_set_fixture(ChallengeSet):
assert ChallengeSet.challenge.is_admin(ChallengeSet.creator)
assert not ChallengeSet.challenge.is_participant(ChallengeSet.creator)
assert ChallengeSet.challenge.is_admin(ChallengeSet.admin)
assert not ChallengeSet.challenge.is_participant(ChallengeSet.admin)
assert not ChallengeSet.challenge.is_admin(ChallengeSet.participant)
assert ChallengeSet.challenge.is_participant(ChallengeSet.participant)
assert not ChallengeSet.challenge.is_admin(ChallengeSet.participant1)
assert ChallengeSet.challenge.is_participant(ChallengeSet.participant1)
assert ChallengeSet.participant != ChallengeSet.participant1
assert not ChallengeSet.challenge.is_admin(ChallengeSet.non_participant)
assert not ChallengeSet.challenge.is_participant(
ChallengeSet.non_participant
)
@pytest.mark.django_db
def test_two_challenge_sets_fixture(TwoChallengeSets):
assert TwoChallengeSets.ChallengeSet1.challenge.is_admin(
TwoChallengeSets.admin12
)
assert TwoChallengeSets.ChallengeSet2.challenge.is_admin(
TwoChallengeSets.admin12
)
assert not TwoChallengeSets.ChallengeSet1.challenge.is_participant(
TwoChallengeSets.admin12
)
assert not TwoChallengeSets.ChallengeSet2.challenge.is_participant(
TwoChallengeSets.admin12
)
assert TwoChallengeSets.ChallengeSet1.challenge.is_participant(
TwoChallengeSets.participant12
)
assert TwoChallengeSets.ChallengeSet2.challenge.is_participant(
TwoChallengeSets.participant12
)
assert not TwoChallengeSets.ChallengeSet1.challenge.is_admin(
TwoChallengeSets.participant12
)
assert not TwoChallengeSets.ChallengeSet2.challenge.is_admin(
TwoChallengeSets.participant12
)
assert not TwoChallengeSets.ChallengeSet1.challenge.is_participant(
TwoChallengeSets.admin1participant2
)
assert TwoChallengeSets.ChallengeSet2.challenge.is_participant(
TwoChallengeSets.admin1participant2
)
assert TwoChallengeSets.ChallengeSet1.challenge.is_admin(
TwoChallengeSets.admin1participant2
)
assert not TwoChallengeSets.ChallengeSet2.challenge.is_admin(
TwoChallengeSets.admin1participant2
)
@pytest.mark.django_db
def test_eval_challenge_set_fixture(EvalChallengeSet):
assert EvalChallengeSet.ChallengeSet.challenge.use_evaluation
assert (
EvalChallengeSet.ChallengeSet.challenge
== EvalChallengeSet.method.challenge
)
|
# -*- coding: utf-8 -*-
from flair.parser.modules.dropout import SharedDropout
import torch.nn as nn
class MLP(nn.Module):
def __init__(self, n_in, n_hidden, dropout=0, identity = False):
super(MLP, self).__init__()
self.linear = nn.Linear(n_in, n_hidden)
self.identity = identity
if not self.identity:
self.activation = nn.LeakyReLU(negative_slope=0.1)
self.dropout = SharedDropout(p=dropout)
self.reset_parameters()
def reset_parameters(self):
nn.init.orthogonal_(self.linear.weight)
nn.init.zeros_(self.linear.bias)
def forward(self, x):
x = self.linear(x)
if not self.identity:
x = self.activation(x)
x = self.dropout(x)
return x
|
# Generated by Django 3.0.4 on 2020-03-24 12:28
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('customer', '0003_auto_20200313_0738'),
('eadmin', '0009_salesreport'),
]
operations = [
migrations.AddField(
model_name='salesreport',
name='day',
field=models.CharField(default=24, editable=False, max_length=5, verbose_name='Day'),
),
migrations.AlterField(
model_name='salesreport',
name='customer_id',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='customer.Customer', to_field='card_number'),
),
migrations.AlterUniqueTogether(
name='salesreport',
unique_together=set(),
),
]
|
# -*- encoding: utf-8 -*-
import pytest
from bs4 import BeautifulSoup as BSoup
from slack_emoji_uploader import beautiful_soup_helper
@pytest.mark.parametrize('html, name, expected', [
# A single <input/> tag with a value
('<input name="alexander" value="armstrong" />', 'alexander', 'armstrong'),
# A single <input/> tag with no value
('<input name="brutus" />', 'brutus', None),
# No <input/> tags
('<body><p>Not an input</p></body>', 'caesar', None),
# Multiple matching <input/> tags
('<input name="daedalus" value="diggle" /><input name=daedalus value=duck />', 'daedalus', 'diggle'),
# A single non-matching <input/> tag
('<input name="ernie" value="els" />', 'eric', None),
# Multiple <input/> tags of which the second matches
('<input name="fiona" value="finnegan" /><input name="fred" value="forsyth" />', 'fred', 'forsyth'),
# Multiple <input/> tags of which none match
('<input name="godfrey" value="goodwood" /><input name="grayson" value="gray" />', 'george', None),
])
def test_get_input_value(html, name, expected):
parsed_html = BSoup(html, 'html.parser')
assert beautiful_soup_helper.get_input_value(parsed_html, name) == expected
@pytest.mark.parametrize('html, expected', [
# A single page error
('<p class="alert_error"><i>Awful!</i></p>', 'Awful!'),
# No errors
('<p class="alert_warn"><i>Bad!</i></p>', None),
# Multiple errors
('<p class="alert_error">Contemptible!</p><p class="alert_error">Curses!</p>', 'Contemptible!'),
])
def test_get_page_error(html, expected):
parsed_html = BSoup(html, 'html.parser')
assert beautiful_soup_helper.get_page_error(parsed_html) == expected
|
'''
http://code.djangoproject.com/ticket/7198
'''
|
import maml_rl.envs
import gym
import torch
import numpy as np
# from tqdm import trange
import yaml
from maml_rl.baseline import LinearFeatureBaseline
from maml_rl.samplers import MultiTaskSampler
from maml_rl.utils.helpers import get_policy_for_env, get_input_size
from maml_rl.utils.reinforcement_learning import get_returns
def main(args):
with open(args.config, 'r') as f:
config = yaml.load(f, Loader=yaml.FullLoader)
if args.seed is not None:
torch.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
if args.output_folder is not None:
if not os.path.exists(args.output_folder):
os.makedirs(args.output_folder, exist_ok=True)
logs_filename = os.path.join(args.output_folder, 'logs_eval')
env = gym.make(config['env-name'], **config['env-kwargs'])
env.close()
# Policy
policy = get_policy_for_env(env,
hidden_sizes=config['hidden-sizes'],
nonlinearity=config['nonlinearity'])
with open(args.policy, 'rb') as f:
state_dict = torch.load(f, map_location=torch.device(args.device))
policy.load_state_dict(state_dict)
policy.share_memory()
# Baseline
baseline = LinearFeatureBaseline(get_input_size(env))
# Sampler
sampler = MultiTaskSampler(config['env-name'],
env_kwargs=config['env-kwargs'],
batch_size=args.fast_batch_size,
policy=policy,
baseline=baseline,
env=env,
seed=args.seed,
num_workers=args.num_workers)
logs = {'tasks': []}
train_returns, valid_returns = [], []
for i in range(args.num_steps):
exec("after_%s_gradient_step=[]" % i)
for batch in range(args.num_batches):
tasks = sampler.sample_tasks(num_tasks=args.meta_batch_size)
'''
train_episodes is a list whose length is the number of gradient steps,
and each element is also a list of length meta_batch_size containing the different episodes.
For example, train_episodes[0] contains the episodes before any gradient update,
train_episodes[1] the episodes after 1 gradient update (if the number of steps of adaptation is > 1), and so on.
valid_episodes is a list containing the episodes after all the steps of adaptation.
MultiTaskSampler, which is responsible for sampling the trajectories, is doing adaptation locally in each worker.
from line 270 to line 275 in multi_task_sampler.py:
with self.policy_lock:
loss = reinforce_loss(self.policy, train_episodes, params=params)
params = self.policy.update_params(loss,
params=params,
step_size=fast_lr,
first_order=True)
So in test.py, you do get both trajectories before and after adaptation with the simple call to MultiTaskSampler.
And with a few changes to test.py you can even use different number of gradient steps for adaptation by changing
num_steps in your call to sampler.sample().
'''
train_episodes, valid_episodes = sampler.sample(tasks,
num_steps=args.num_steps,
fast_lr=config['fast-lr'],
gamma=config['gamma'],
gae_lambda=config['gae-lambda'],
device=args.device)
logs['tasks'].extend(tasks)
train_returns.append(get_returns(train_episodes[0]))
valid_returns.append(get_returns(valid_episodes))
for i in range(args.num_steps):
exec("after_%s_gradient_step.append(get_returns(train_episodes[%i]))" % (i,i))
logs['train_returns'] = np.concatenate(train_returns, axis=0)
logs['valid_returns'] = np.concatenate(valid_returns, axis=0)
for i in range(args.num_steps):
exec("logs['after_%s_gradient_step'] = np.concatenate(after_%s_gradient_step, axis=0)" % (i,i))
with open(logs_filename, 'wb') as f:
np.savez(f, **logs)
print('batch: ', batch)
if __name__ == '__main__':
import argparse
import os
import multiprocessing as mp
parser = argparse.ArgumentParser(description='Reinforcement learning with '
'Model-Agnostic Meta-Learning (MAML) - Test')
parser.add_argument('--config', type=str, required=True,
help='path to the configuration file')
parser.add_argument('--policy', type=str, required=True,
help='path to the policy checkpoint')
# Evaluation
evaluation = parser.add_argument_group('Evaluation')
evaluation.add_argument('--num-batches', type=int, default=1,
help='number of batches (default: 1)')
evaluation.add_argument('--meta-batch-size', type=int, default=100,
help='number of tasks per batch (default: 100)')
evaluation.add_argument('--fast-batch-size', type=int, default=1,
help='Number of trajectories to sample for each task (default: 1)')
evaluation.add_argument('--num-steps', type=int, default=10,
help='Number of gradient steps in the inner loop / fast adaptation (default: 10)')
'''
--num-batches: total evaluation tasks batches
--meta-batch-size: number of tasks per batch
--fast-batch-size: Number of trajectories to sample for each task
--num-steps: Number of gradient steps in the inner loop / fast adaptation
'''
# Miscellaneous
misc = parser.add_argument_group('Miscellaneous')
misc.add_argument('--output-folder', type=str, required=True,
help='name of the output folder (default: maml)')
misc.add_argument('--seed', type=int, default=1,
help='random seed (default: 1)')
misc.add_argument('--num-workers', type=int, default=mp.cpu_count() - 1,
help='number of workers for trajectories sampling (default: '
'{0})'.format(mp.cpu_count() - 1))
misc.add_argument('--use-cuda', action='store_true',
help='use cuda (default: false, use cpu). WARNING: Full upport for cuda '
'is not guaranteed. Using CPU is encouraged.')
args = parser.parse_args()
args.device = ('cuda' if (torch.cuda.is_available()
and args.use_cuda) else 'cpu')
main(args)
|
# -*- coding: utf-8 -*-
#
# Copyright 2017-2021 AVSystem <avsystem@avsystem.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import enum
import inspect
import os
import re
import shutil
import subprocess
import threading
import time
import unittest
import logging
from typing import TypeVar
from .asserts import Lwm2mAsserts
from .lwm2m_test import *
from framework.lwm2m.coap.transport import Transport
try:
import dpkt
_DPKT_AVAILABLE = True
except ImportError:
_DPKT_AVAILABLE = False
T = TypeVar('T')
class LogType(enum.Enum):
Console = 'console'
Valgrind = 'valgrind'
Pcap = 'pcap'
def extension(self):
if self == LogType.Pcap:
return '.pcapng'
else:
return '.log'
def read_some_with_timeout(fd, timeout_s):
import select
deadline = time.time() + timeout_s
while True:
if timeout_s < 0:
return b''
r, w, x = select.select([fd], [], [fd], timeout_s)
if len(r) > 0 or len(x) > 0:
buf = fd.read(65536)
if buf is not None and len(buf) > 0:
return buf
timeout_s = deadline - time.time()
def ensure_dir(dir_path):
try:
os.makedirs(dir_path)
except OSError:
if not os.path.isdir(dir_path):
raise
class CleanupList(list):
def __call__(self):
def merge_exceptions(old, new):
"""
Adds the "old" exception as a context of the "new" one and returns
the "new" one.
If the "new" exception already has a context, the "old" one is added
at the end of the chain, as the context of the innermost exception
that does not have a context.
When the returned exception is rethrown, it will be logged by the
standard Python exception formatter as something like:
Exception: old exception
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
...
Exception: new exception
:param old: "old" exception
:param new: "new" exception
:return: "new" exception with updated context information
"""
tmp = new
while tmp.__context__ is not None:
if tmp.__context__ is old:
return new
tmp = tmp.__context__
tmp.__context__ = old
return new
exc = None
for cleanup_func in self:
try:
cleanup_func()
except Exception as e:
exc = merge_exceptions(exc, e)
if exc is not None:
raise exc
def __enter__(self):
return self
def __exit__(self, _type, value, _traceback):
return self()
class Lwm2mDmOperations(Lwm2mAsserts):
DEFAULT_OPERATION_TIMEOUT_S = 5
def _perform_action(self, server, request, expected_response, timeout_s=None):
server.send(request)
if timeout_s is None:
timeout_s = self.DEFAULT_OPERATION_TIMEOUT_S
res = server.recv(timeout_s=timeout_s)
self.assertMsgEqual(expected_response, res)
return res
def _make_expected_res(self, req, success_res_cls, expect_error_code):
req.fill_placeholders()
if expect_error_code is None:
return success_res_cls.matching(req)()
else:
return Lwm2mErrorResponse.matching(req)(code=expect_error_code)
def create_instance_with_arbitrary_payload(self, server, oid,
format=coap.ContentFormat.APPLICATION_LWM2M_TLV,
iid=None, payload=b'', expect_error_code=None,
**kwargs):
if iid is None:
raise ValueError("IID cannot be None")
req = Lwm2mCreate(path='/%d' % oid, content=payload, format=format)
expected_res = self._make_expected_res(
req, Lwm2mCreated, expect_error_code)
return self._perform_action(server, req, expected_res, **kwargs)
def create_instance_with_payload(self, server, oid, iid=None, payload=b'',
expect_error_code=None, **kwargs):
if iid is None:
raise ValueError("IID cannot be None")
instance_tlv = TLV.make_instance(
instance_id=iid, content=payload).serialize()
return self.create_instance_with_arbitrary_payload(server=server, oid=oid, iid=iid,
payload=instance_tlv,
expect_error_code=expect_error_code,
**kwargs)
def create_instance(self, server, oid, iid=None, expect_error_code=None, **kwargs):
instance_tlv = None if iid is None else TLV.make_instance(
instance_id=iid).serialize()
req = Lwm2mCreate('/%d' % oid, instance_tlv)
expected_res = self._make_expected_res(
req, Lwm2mCreated, expect_error_code)
return self._perform_action(server, req, expected_res, **kwargs)
def create(self, server, path, expect_error_code=None, **kwargs):
req = Lwm2mCreate(Lwm2mPath(path), None)
expected_res = self._make_expected_res(
req, Lwm2mCreated, expect_error_code)
return self._perform_action(server, req, expected_res, **kwargs)
def delete_instance(self, server, oid, iid, expect_error_code=None, **kwargs):
req = Lwm2mDelete('/%d/%d' % (oid, iid))
expected_res = self._make_expected_res(
req, Lwm2mDeleted, expect_error_code)
return self._perform_action(server, req, expected_res, **kwargs)
def read_path(self, server, path, expect_error_code=None, accept=None, **kwargs):
req = Lwm2mRead(path, accept=accept)
expected_res = self._make_expected_res(
req, Lwm2mContent, expect_error_code)
return self._perform_action(server, req, expected_res, **kwargs)
def read_resource(self, server, oid, iid, rid, expect_error_code=None, accept=None, **kwargs):
return self.read_path(server, '/%d/%d/%d' % (oid, iid, rid), expect_error_code,
accept=accept, **kwargs)
def read_instance(self, server, oid, iid, expect_error_code=None, accept=None, **kwargs):
return self.read_path(server, '/%d/%d' % (oid, iid), expect_error_code,
accept=accept, **kwargs)
def read_object(self, server, oid, expect_error_code=None, accept=None, **kwargs):
return self.read_path(server, '/%d' % oid, expect_error_code, accept=accept, **kwargs)
def write_object(self, server, oid, content=b'', expect_error_code=None,
format=coap.ContentFormat.APPLICATION_LWM2M_TLV, **kwargs):
req = Lwm2mWrite('/%d' % (oid,), content, format=format)
expected_res = self._make_expected_res(
req, Lwm2mChanged, expect_error_code)
return self._perform_action(server, req, expected_res, **kwargs)
def write_instance(self, server, oid, iid, content=b'', partial=False, expect_error_code=None,
format=coap.ContentFormat.APPLICATION_LWM2M_TLV, **kwargs):
req = Lwm2mWrite('/%d/%d' % (oid, iid), content,
format=format,
update=partial)
expected_res = self._make_expected_res(
req, Lwm2mChanged, expect_error_code)
return self._perform_action(server, req, expected_res, **kwargs)
def write_resource(self, server, oid, iid, rid, content=b'', partial=False,
format=coap.ContentFormat.TEXT_PLAIN,
expect_error_code=None, **kwargs):
req = Lwm2mWrite('/%d/%d/%d' % (oid, iid, rid), content, format=format,
update=partial)
expected_res = self._make_expected_res(
req, Lwm2mChanged, expect_error_code)
return self._perform_action(server, req, expected_res, **kwargs)
def execute_resource(self, server, oid, iid, rid, content=b'', expect_error_code=None,
**kwargs):
req = Lwm2mExecute('/%d/%d/%d' % (oid, iid, rid), content=content)
expected_res = self._make_expected_res(
req, Lwm2mChanged, expect_error_code)
return self._perform_action(server, req, expected_res, **kwargs)
@staticmethod
def make_path(*args):
def ensure_valid_path(args):
import itertools
valid_args = list(itertools.takewhile(
lambda x: x is not None, list(args)))
if not all(x is None for x in args[len(valid_args):]):
raise AttributeError
return valid_args
return '/' + '/'.join(map(lambda arg: '%d' % arg, ensure_valid_path(list(args))))
def discover(self, server, oid=None, iid=None, rid=None, expect_error_code=None, **kwargs):
req = Lwm2mDiscover(self.make_path(oid, iid, rid))
expected_res = self._make_expected_res(
req, Lwm2mContent, expect_error_code)
return self._perform_action(server, req, expected_res, **kwargs)
def observe(self, server, oid=None, iid=None, rid=None, riid=None, expect_error_code=None,
**kwargs):
req = Lwm2mObserve(
Lwm2mDmOperations.make_path(oid, iid, rid, riid), **kwargs)
expected_res = self._make_expected_res(
req, Lwm2mContent, expect_error_code)
return self._perform_action(server, req, expected_res)
def write_attributes(self, server, oid=None, iid=None, rid=None, query=[],
expect_error_code=None, **kwargs):
req = Lwm2mWriteAttributes(
Lwm2mDmOperations.make_path(oid, iid, rid), query=query)
expected_res = self._make_expected_res(
req, Lwm2mChanged, expect_error_code)
return self._perform_action(server, req, expected_res, **kwargs)
class Lwm2mTest(unittest.TestCase, Lwm2mAsserts):
DEFAULT_MSG_TIMEOUT = 9000.0
DEFAULT_COMM_TIMEOUT = 9000.0
def __init__(self, test_method_name):
super().__init__(test_method_name)
self.servers = []
self.bootstrap_server = None
def setUp(self, *args, **kwargs):
self.setup_demo_with_servers(*args, **kwargs)
@unittest.skip
def runTest(self):
raise NotImplementedError('runTest not implemented')
def tearDown(self, *args, **kwargs):
self.teardown_demo_with_servers(*args, **kwargs)
def set_config(self, config):
self.config = config
def log_filename(self, extension='.log'):
return os.path.join(self.suite_name(), self.test_name() + extension)
def test_name(self):
return self.__class__.__name__
def suite_name(self):
test_root = self.config.suite_root_path or os.path.dirname(
os.path.dirname(os.path.abspath(__file__)))
name = os.path.abspath(inspect.getfile(type(self)))
if name.endswith('.py'):
name = name[:-len('.py')]
name = name[len(test_root):] if name.startswith(test_root) else name
name = name.lstrip('/')
return name.replace('/', '.')
def make_demo_args(self,
endpoint_name,
servers,
fw_updated_marker_path,
ciphersuites=(0xC030, 0xC0A8, 0xC0AE)):
"""
Helper method for easy generation of demo executable arguments.
"""
# 0xC030 = TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 - used by TLS (over TCP, including HTTPS) in tests
# Default ciphersuites mandated by LwM2M:
# 0xC0A8 = TLS_PSK_WITH_AES_128_CCM_8
# 0xC0AE = TLS_ECDHE_ECDSA_WITH_AES_128_CCM_8
security_modes = set(serv.security_mode() for serv in servers)
self.assertLessEqual(len(security_modes), 1,
'Attempted to mix security modes')
security_mode = next(iter(security_modes), 'nosec')
if security_mode == 'nosec':
protocol = 'coap'
else:
protocol = 'coaps'
args = ['--endpoint-name', endpoint_name,
'--security-mode', security_mode]
if fw_updated_marker_path is not None:
args += ['--fw-updated-marker-path', fw_updated_marker_path]
if ciphersuites is not None:
args += ['--ciphersuites', ','.join(map(hex, ciphersuites))]
for serv in servers:
args += ['--server-uri', '%s://127.0.0.1:%d' %
(protocol, serv.get_listen_port(),)]
return args
def logs_path(self, log_type, log_root=None, **kwargs):
assert type(log_type) == LogType
dir_path = os.path.join(
log_root or self.config.logs_path, log_type.value)
log_path = os.path.join(dir_path, self.log_filename(
**kwargs, extension=log_type.extension()))
ensure_dir(os.path.dirname(log_path))
return log_path
def read_log_until_match(self, regex, timeout_s):
deadline = time.time() + timeout_s
out = bytearray()
while True:
# Retain only the last two lines - two, because the regexes sometimes check for the end-of-line
last_lf = out.rfind(b'\n')
if last_lf >= 0:
second_to_last_lf = out.rfind(b'\n', 0, last_lf)
if second_to_last_lf >= 0:
del out[0:second_to_last_lf + 1]
if self.demo_process.poll() is not None:
partial_timeout = 0
else:
partial_timeout = min(max(deadline - time.time(), 0.0), 1.0)
out += read_some_with_timeout(self.demo_process.log_file, partial_timeout)
match = re.search(regex, out)
if match:
return match
elif partial_timeout <= 0.0:
return None
def _get_valgrind_args(self):
import shlex
valgrind_list = []
if 'VALGRIND' in os.environ and os.environ['VALGRIND']:
valgrind_list = shlex.split(os.environ['VALGRIND'])
valgrind_list += ['--log-file=' + self.logs_path(LogType.Valgrind)]
return valgrind_list
def _get_demo_executable(self):
demo_executable = os.path.join(
self.config.demo_path, self.config.demo_cmd)
def is_file_executable(file_path):
return os.path.isfile(file_path) and os.access(file_path, os.X_OK)
if not is_file_executable(demo_executable):
print('ERROR: %s is NOT executable' % (demo_executable,), file=sys.stderr)
sys.exit(-1)
return demo_executable
def _start_demo(self, cmdline_args, timeout_s=30, prepend_args=None):
"""
Starts the demo executable with given CMDLINE_ARGS.
"""
demo_executable = self._get_demo_executable()
if (os.environ.get('RR')
or ('RRR' in os.environ
and test_or_suite_matches_query_regex(self, os.environ['RRR']))):
logging.info('*** rr-recording enabled ***')
# ignore valgrind if rr was requested
args_prefix = ['rr', 'record']
else:
args_prefix = self._get_valgrind_args()
demo_args = (prepend_args or []) + args_prefix + [demo_executable] + cmdline_args
import shlex
console_log_path = self.logs_path(LogType.Console)
console = open(console_log_path, 'ab')
console.write((' '.join(map(shlex.quote, demo_args)) + '\n\n').encode('utf-8'))
console.flush()
log_file_pos = console.tell()
logging.debug('starting demo: %s', ' '.join(
'"%s"' % arg for arg in demo_args))
import subprocess
self.demo_process = subprocess.Popen(demo_args,
stdin=subprocess.PIPE,
stdout=console,
stderr=console,
bufsize=0)
self.demo_process.log_file_write = console
self.demo_process.log_file_path = console_log_path
self.demo_process.log_file = open(
console_log_path, mode='rb', buffering=0)
self.demo_process.log_file.seek(log_file_pos)
if timeout_s is not None:
# wait until demo process starts
if self.read_log_until_match(regex=re.escape(b'*** ANJAY DEMO STARTUP FINISHED ***'),
timeout_s=timeout_s) is None:
raise self.failureException(
'demo executable did not start in time')
DUMPCAP_COMMAND = 'dumpcap'
@staticmethod
def dumpcap_available():
return not os.getenv('NO_DUMPCAP') and shutil.which(Lwm2mTest.DUMPCAP_COMMAND) is not None
def _start_dumpcap(self, udp_ports):
self.dumpcap_process = None
if not self.dumpcap_available():
return
udp_ports = list(udp_ports)
def _filter_expr():
"""
Generates a pcap_compile()-compatible filter program so that dumpcap will only capture packets that are
actually relevant to the current tests.
Captured packets will include:
- UDP datagrams sent or received on any of the udp_ports
- ICMP Port Unreachable messages generated in response to a UDP datagram sent or received on any of the
udp_ports
"""
if len(udp_ports) == 0:
return ''
# filter expression for "source or destination UDP port is any of udp_ports"
udp_filter = ' or '.join('(udp port %s)' % (port,)
for port in udp_ports)
# below is the generation of filter expression for the ICMP messages
#
# note that icmp[N] syntax accesses Nth byte since the beginning of ICMP header
# and icmp[N:M] syntax accesses M-byte value starting at icmp[N]
# - icmp[0] - ICMP types; 3 ~ Destination Unreachable
# - icmp[1] - ICMP code; for Destination Unreachable: 3 ~ Destination port unreachable
# - icmp[8] is the first byte of the IP header of copy of the packet that caused the error
# - icmp[17] is the IP protocol number; 17 ~ UDP
# - IPv4 header is normally 20 bytes long (we don't anticipate options), so UDP header starts at icmp[28]
# - icmp[28:2] is the source UDP port of the original packet
# - icmp[30:2] is the destination UDP port of the original packet
icmp_pu_filter = ' or '.join(
'(icmp[28:2] = 0x%04x) or (icmp[30:2] = 0x%04x)' % (port, port) for port in
udp_ports)
return '%s or ((icmp[0] = 3) and (icmp[1] = 3) and (icmp[17] = 17) and (%s))' % (
udp_filter, icmp_pu_filter)
self.dumpcap_file_path = self.logs_path(LogType.Pcap)
dumpcap_command = [self.DUMPCAP_COMMAND, '-w',
self.dumpcap_file_path, '-i', 'lo', '-f', _filter_expr()]
self.dumpcap_process = subprocess.Popen(dumpcap_command,
stdin=subprocess.DEVNULL,
stdout=subprocess.DEVNULL,
stderr=subprocess.PIPE,
bufsize=0)
# It takes a little while (around 0.5-0.6 seconds on a normal PC) for dumpcap to initialize and actually start
# capturing packets. We want all relevant packets captured, so we need to wait until dumpcap reports it's ready.
# Also, if we haven't done this, there would be a possibility that _terminate_dumpcap() would be called before
# full initialization of dumpcap - it would then essentially ignore the SIGTERM and our test would hang waiting
# for dumpcap's termination that would never come.
dumpcap_stderr = bytearray(b'')
MAX_DUMCAP_STARTUP_WAIT_S = 30
deadline = time.time() + MAX_DUMCAP_STARTUP_WAIT_S
while time.time() < deadline:
dumpcap_stderr += read_some_with_timeout(
self.dumpcap_process.stderr, 1)
if b'File:' in dumpcap_stderr:
break
if self.dumpcap_process.poll() is not None:
raise ChildProcessError(
'Could not start %r\n' % (dumpcap_command,))
else:
raise ChildProcessError(
'Could not start %r\n' % (dumpcap_command,))
def _reader_func():
try:
while True:
data = self.dumpcap_process.stderr.read()
if len(data) == 0: # EOF
break
except:
pass
self.dumpcap_stderr_reader_thread = threading.Thread(
target=_reader_func)
self.dumpcap_stderr_reader_thread.start()
def setup_demo_with_servers(self,
servers=1,
num_servers_passed=None,
bootstrap_server=False,
legacy_server_initiated_bootstrap_allowed=True,
extra_cmdline_args=[],
auto_register=True,
endpoint_name=DEMO_ENDPOINT_NAME,
lifetime=None,
binding=None,
fw_updated_marker_path=None,
**kwargs):
"""
Starts the demo process and creates any required auxiliary objects (such as Lwm2mServer objects) or processes.
:param servers:
Lwm2mServer objects that shall be accessible to the test - they will be accessible through the self.servers
list. May be either an iterable of Lwm2mServer objects, or an integer - in the latter case, an appropriate
number of Lwm2mServer objects will be created.
:param num_servers_passed:
If passed, it shall be an integer that controls how many of the servers configured through the servers argument,
will be passed to demo's command line. All of them are passed by default. This option may be useful if some
servers are meant to be later configured e.g. via the Bootstrap Interface.
:param bootstrap_server:
Boolean value that controls whether to create a Bootstrap Server Lwm2mServer object. If true, it will be stored
in self.bootstrap_server. The bootstrap server is not included in anything related to the servers and
num_servers_passed arguments.
:param extra_cmdline_args:
List of command line arguments to pass to the demo process in addition to the ones generated from other
arguments.
:param auto_register:
If true (default), self.assertDemoRegisters() will be called for each server provisioned via the command line.
:param version:
Passed down to self.assertDemoRegisters() if auto_register is true
:param lifetime:
Passed down to self.assertDemoRegisters() if auto_register is true
:param binding:
Passed down to self.assertDemoRegisters() if auto_register is true
:return: None
"""
demo_args = []
if isinstance(servers, int):
self.servers = [Lwm2mServer() for _ in range(servers)]
else:
self.servers = list(servers)
servers_passed = self.servers
if num_servers_passed is not None:
servers_passed = servers_passed[:num_servers_passed]
if bootstrap_server is True:
self.bootstrap_server = Lwm2mServer()
elif bootstrap_server:
self.bootstrap_server = bootstrap_server
else:
self.bootstrap_server = None
if self.bootstrap_server is not None:
demo_args += [
'--bootstrap' if legacy_server_initiated_bootstrap_allowed else '--bootstrap=client-initiated-only']
all_servers = [self.bootstrap_server] + self.servers
all_servers_passed = [self.bootstrap_server] + servers_passed
else:
all_servers = self.servers
all_servers_passed = servers_passed
if fw_updated_marker_path is None:
fw_updated_marker_path = generate_temp_filename(
dir='/tmp', prefix='anjay-fw-updated-')
demo_args += self.make_demo_args(
endpoint_name, all_servers_passed,
fw_updated_marker_path, **kwargs)
demo_args += extra_cmdline_args
if lifetime is not None:
demo_args += ['--lifetime', str(lifetime)]
try:
self._start_dumpcap(server.get_listen_port()
for server in all_servers)
self._start_demo(demo_args)
if auto_register:
for serv in servers_passed:
if serv.security_mode() != 'nosec':
serv.listen()
for serv in servers_passed:
self.assertDemoRegisters(serv,
lifetime=lifetime,
binding=binding)
except Exception:
try:
self.teardown_demo_with_servers(auto_deregister=False)
finally:
raise
def teardown_demo_with_servers(self,
auto_deregister=True,
shutdown_timeout_s=5.0,
force_kill=False,
*args,
**kwargs):
"""
Shuts down the demo process, either by:
- closing its standard input ("Ctrl+D" on its command line)
- sending SIGTERM to it
- sending SIGKILL to it
Each of the above methods is tried one after another.
:param auto_deregister:
If true (default), self.assertDemoDeregisters() is called before shutting down for each server in the
self.servers list (unless overridden by the deregister_servers argument).
:param shutdown_timeout_s:
Number of seconds to wait after each attempted method of shutting down the demo process before moving to the
next one (close input -> SIGTERM -> SIGKILL).
:param force_kill:
If set to True, demo will be forcefully terminated, and its exit code will be ignored.
:param deregister_servers:
If auto_deregister is true, specifies the list of servers to call self.assertDemoDeregisters() on, overriding
the default self.servers.
:param args:
Any other positional arguments to this function are passed down to self.assertDemoDeregisters().
:param kwargs:
Any other keyword arguments to this function are passed down to self.assertDemoDeregisters().
:return: None
"""
if auto_deregister and not 'deregister_servers' in kwargs:
kwargs = kwargs.copy()
kwargs['deregister_servers'] = self.servers
with CleanupList() as cleanup_funcs:
if not force_kill:
cleanup_funcs.append(
lambda: self.request_demo_shutdown(*args, **kwargs))
cleanup_funcs.append(lambda: self._terminate_demo(
timeout_s=shutdown_timeout_s, force_kill=force_kill))
for serv in self.servers:
cleanup_funcs.append(serv.close)
if self.bootstrap_server:
cleanup_funcs.append(self.bootstrap_server.close)
cleanup_funcs.append(self._terminate_dumpcap)
def seek_demo_log_to_end(self):
self.demo_process.log_file.seek(
os.fstat(self.demo_process.log_file.fileno()).st_size)
def communicate(self, cmd, timeout=-1, match_regex=re.escape('(DEMO)>')):
"""
Writes CMD to the demo process stdin. If MATCH_REGEX is not None,
blocks until given regex is found on demo process stdout.
"""
if timeout < 0:
timeout = self.DEFAULT_COMM_TIMEOUT
self.seek_demo_log_to_end()
self.demo_process.stdin.write((cmd.strip('\n') + '\n').encode())
self.demo_process.stdin.flush()
if match_regex:
result = self.read_log_until_match(match_regex.encode(), timeout_s=timeout)
if result is not None:
# we need to convert bytes-based match object to string-based one...
return re.search(match_regex, result.group(0).decode(errors='replace'))
return None
def _terminate_demo_impl(self, demo, timeout_s, force_kill):
if force_kill:
demo.kill()
demo.wait(timeout_s)
return 0
cleanup_actions = [
(timeout_s, lambda _: None), # check if the demo already stopped
(timeout_s, lambda demo: demo.terminate()),
(None, lambda demo: demo.kill())
]
for timeout, action in cleanup_actions:
action(demo)
try:
return demo.wait(timeout)
except subprocess.TimeoutExpired:
pass
else:
break
return -1
def _terminate_demo(self, timeout_s=5.0, force_kill=False):
if self.demo_process is None:
return
exc = sys.exc_info()
try:
return_value = self._terminate_demo_impl(
self.demo_process, timeout_s, force_kill)
self.assertEqual(
return_value, 0, 'demo terminated with nonzero exit code')
except:
if not exc[1]:
raise
finally:
self.demo_process.log_file.close()
self.demo_process.log_file_write.close()
def _terminate_dumpcap(self):
if self.dumpcap_process is None:
logging.debug('dumpcap not started, skipping')
return
# wait until all packets are written
last_size = -1
size = -1
MAX_DUMCAP_SHUTDOWN_WAIT_S = 30
deadline = time.time() + MAX_DUMCAP_SHUTDOWN_WAIT_S
while time.time() < deadline:
if size != last_size:
break
time.sleep(0.1)
last_size = size
size = os.stat(self.dumpcap_file_path).st_size
else:
logging.warn(
'dumpcap did not shut down on time, terminating anyway')
self.dumpcap_process.terminate()
self.dumpcap_process.wait()
self.dumpcap_stderr_reader_thread.join()
logging.debug('dumpcap terminated')
def coap_ping(self, server=None, timeout_s=-1):
serv = server or self.serv
req = Lwm2mEmpty(type=coap.Type.CONFIRMABLE)
serv.send(req)
self.assertMsgEqual(Lwm2mReset.matching(req)(), serv.recv(timeout_s=timeout_s))
def request_demo_shutdown(self, deregister_servers=[], timeout_s=-1, *args, **kwargs):
"""
Attempts to cleanly terminate demo by closing its STDIN.
If DEREGISTER_SERVERS is a non-empty list, the function waits until
demo deregisters from each server from the list.
"""
for serv in deregister_servers:
# send a CoAP ping to each of the connections
# to make sure that all data has been processed by the client
self.coap_ping(serv, timeout_s=timeout_s)
logging.debug('requesting clean demo shutdown')
if self.demo_process is None:
logging.debug('demo not started, skipping')
return
self.demo_process.stdin.close()
for serv in deregister_servers:
self.assertDemoDeregisters(serv, reset=False, timeout_s=timeout_s, *args, **kwargs)
logging.debug('demo terminated')
def get_socket_count(self):
return int(
self.communicate('socket-count', match_regex='SOCKET_COUNT==([0-9]+)\n').group(1))
def wait_until_socket_count(self, expected, timeout_s):
deadline = time.time() + timeout_s
while self.get_socket_count() != expected:
if time.time() > deadline:
raise TimeoutError('Desired socket count not reached')
time.sleep(0.1)
def get_non_lwm2m_socket_count(self):
return int(self.communicate('non-lwm2m-socket-count',
match_regex='NON_LWM2M_SOCKET_COUNT==([0-9]+)\n').group(1))
def get_demo_port(self, server_index=None):
if server_index is None:
server_index = -1
return int(
self.communicate('get-port %s' % (server_index,), match_regex='PORT==([0-9]+)\n').group(
1))
def get_transport(self, socket_index=-1):
return self.communicate('get-transport %s' % (socket_index,),
match_regex='TRANSPORT==([0-9a-zA-Z]+)\n').group(1)
def get_all_connections_failed(self):
return bool(int(self.communicate('get-all-connections-failed',
match_regex='ALL_CONNECTIONS_FAILED==([0-9])\n').group(1)))
def ongoing_registration_exists(self):
result = self.communicate('ongoing-registration-exists',
match_regex='ONGOING_REGISTRATION==(true|false)\n').group(1)
if result == "true":
return True
elif result == "false":
return False
raise ValueError("invalid value")
class SingleServerAccessor:
@property
def serv(self) -> Lwm2mServer:
return self.servers[0]
@serv.setter
def serv(self, new_serv: Lwm2mServer):
self.servers[0] = new_serv
@serv.deleter
def serv(self):
del self.servers[0]
class Lwm2mSingleServerTest(Lwm2mTest, SingleServerAccessor):
def runTest(self):
pass
def setUp(self, extra_cmdline_args=None, psk_identity=None, psk_key=None, client_ca_path=None,
client_ca_file=None, server_crt_file=None, server_key_file=None, binding=None,
*args, **kwargs):
assert ((psk_identity is None) == (psk_key is None))
extra_args = []
dtls_server_kwargs = {}
if 'ciphersuites' in kwargs:
dtls_server_kwargs['ciphersuites'] = kwargs['ciphersuites']
if psk_identity:
extra_args += ['--identity', str(binascii.hexlify(psk_identity), 'ascii'),
'--key', str(binascii.hexlify(psk_key), 'ascii')]
coap_server = coap.DtlsServer(psk_identity=psk_identity, psk_key=psk_key,
**dtls_server_kwargs)
elif server_crt_file:
coap_server = coap.DtlsServer(ca_path=client_ca_path, ca_file=client_ca_file,
crt_file=server_crt_file, key_file=server_key_file,
**dtls_server_kwargs)
else:
coap_server = coap.Server()
if extra_cmdline_args is not None:
extra_args += extra_cmdline_args
if 'servers' not in kwargs:
kwargs['servers'] = [Lwm2mServer(coap_server)]
self.setup_demo_with_servers(extra_cmdline_args=extra_args,
binding=binding,
*args,
**kwargs)
def tearDown(self, *args, **kwargs):
self.teardown_demo_with_servers(*args, **kwargs)
class Lwm2mDtlsSingleServerTest(Lwm2mSingleServerTest):
PSK_IDENTITY = b'test-identity'
PSK_KEY = b'test-key'
def setUp(self, *args, **kwargs):
super().setUp(psk_identity=self.PSK_IDENTITY, psk_key=self.PSK_KEY, *args, **kwargs)
# This class **MUST** be specified as the first in superclass list, due to Python's method resolution order
# (see https://www.python-course.eu/python3_multiple_inheritance.php) and the fact that not all setUp() methods
# call super().setUp(). Failure to fulfill this requirement may lead to "make check" failing on systems
# without dpkt or dumpcap available.
class PcapEnabledTest(Lwm2mTest):
def setUp(self, *args, **kwargs):
if not (_DPKT_AVAILABLE and Lwm2mTest.dumpcap_available()):
raise unittest.SkipTest('This test involves parsing PCAP file')
return super().setUp(*args, **kwargs)
def read_pcap(self):
def decode_packet(data):
# dumpcap captures contain Ethernet frames on Linux and
# loopback ones on BSD
for frame_type in [dpkt.ethernet.Ethernet, dpkt.loopback.Loopback]:
pkt = frame_type(data)
if isinstance(pkt.data, dpkt.ip.IP):
return pkt
raise ValueError('Could not decode frame: %s' % pkt.hex())
with open(self.dumpcap_file_path, 'rb') as f:
r = dpkt.pcapng.Reader(f)
for pkt in iter(r):
yield decode_packet(pkt[1]).data
def _wait_until_condition(self, timeout_s, step_s, condition: lambda pkts: True):
if timeout_s is None:
timeout_s = self.DEFAULT_MSG_TIMEOUT
deadline = time.time() + timeout_s
while True:
if condition(self.read_pcap()):
return
if time.time() >= deadline:
raise TimeoutError(
'Condition was not true in specified time interval')
time.sleep(step_s)
def _count_packets(self, condition: lambda pkts: True):
result = 0
for pkt in self.read_pcap():
if condition(pkt):
result += 1
return result
@staticmethod
def is_icmp_unreachable(pkt):
return isinstance(pkt, dpkt.ip.IP) \
and isinstance(pkt.data, dpkt.icmp.ICMP) \
and isinstance(pkt.data.data, dpkt.icmp.ICMP.Unreach)
@staticmethod
def is_dtls_client_hello(pkt):
header = b'\x16' # Content Type: Handshake
header += b'\xfe\xfd' # Version: DTLS 1.2
header += b'\x00\x00' # Epoch: 0
if isinstance(pkt, dpkt.ip.IP) and isinstance(pkt.data, dpkt.udp.UDP):
return pkt.udp.data[:len(header)] == header
else:
return False
@staticmethod
def is_nosec_register(pkt):
try:
# If it successfully parses as Lwm2mRegister it is a register
Lwm2mRegister.from_packet(coap.Packet.parse(pkt.data.data))
return True
except:
return False
def count_nosec_register_packets(self):
return self._count_packets(PcapEnabledTest.is_nosec_register)
def count_icmp_unreachable_packets(self):
return self._count_packets(PcapEnabledTest.is_icmp_unreachable)
def count_dtls_client_hello_packets(self):
return self._count_packets(PcapEnabledTest.is_dtls_client_hello)
def wait_until_icmp_unreachable_count(self, value, timeout_s=None, step_s=0.1):
def count_of_icmps_is_expected(pkts):
return self.count_icmp_unreachable_packets() >= value
try:
self._wait_until_condition(
timeout_s=timeout_s, step_s=step_s, condition=count_of_icmps_is_expected)
except TimeoutError:
raise TimeoutError('ICMP Unreachable packet not generated')
def get_test_name(test):
if isinstance(test, Lwm2mTest):
return test.test_name()
return test.id()
def get_full_test_name(test):
if isinstance(test, Lwm2mTest):
return test.suite_name() + '.' + test.test_name()
return test.id()
def get_suite_name(suite):
suite_names = []
for test in suite:
if isinstance(test, Lwm2mTest):
suite_names.append(test.suite_name())
elif isinstance(test, unittest.TestSuite):
suite_names.append(get_suite_name(test))
else:
suite_names.append(test.id())
suite_names = set(suite_names)
assert len(suite_names) == 1
return next(iter(suite_names)).replace('/', '.')
def test_or_suite_matches_query_regex(test_or_suite, query_regex):
"""
Test or test suite matches regex query when at least one of following
matches the regex:
* test name,
* suite name,
* "suite_name.test_name" string.
Substring matches are allowed unless the regex is anchored using ^ or $.
"""
if isinstance(test_or_suite, unittest.TestCase):
return (re.search(query_regex, get_test_name(test_or_suite))
or re.search(query_regex, get_full_test_name(test_or_suite)))
elif isinstance(test_or_suite, unittest.TestSuite):
return re.search(query_regex, get_suite_name(test_or_suite))
else:
raise TypeError('Neither a test nor suite: %r' % test_or_suite)
|
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="lightning-talk-pipelines",
version="0.0.8",
author="Rory Murdock",
author_email="rory@itmatic.com.au",
description="A sample package for testing",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/rorymurdock/Lightning-Talk-Pipelines-2",
packages=setuptools.find_packages(),
classifiers=[
"Development Status :: 4 - Beta",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
]
)
|
'''
Tests for the OligoAssembly design class.
'''
from nose.tools import assert_equal
from coral import design, DNA
def test_oligo_assembly():
'''
Tests output of OligoAssembly class.
'''
# Expected outputs
olig1 = 'ATGCGTAAAGGAGAAGAACTTTTCACTGGAGTTGTCCCAATTCTTGTTGAATTAGATGGTG' + \
'ATGTTAATGGGCACAAATTTTCTGTCAGTGGAGAGGGTGAA'
olig2 = 'TGGCCATGGAACAGGTAGTTTTCCAGTAGTGCAAATAAATTTAAGGGTAAGTTTTCCGTAT' + \
'GTTGCATCACCTTCACCCTCTCCACTGACAGAAAATTTGTG'
olig3 = 'TGGAAAACTACCTGTTCCATGGCCAACACTTGTCACTACTTTCGGTTATGGTGTTCAATGC' + \
'TTTGCGAGATACCCAGATCATATGAAACAGCATGACTTTTTCAA'
olig4 = 'CGTGTCTTGTAGTTCCCGTCATCTTTGAAAAATATAGTTCTTTCCTGTACATAACCTTCGG' + \
'GCATGGCACTCTTGAAAAAGTCATGCTGTTTCATATGATCTGGG'
olig5 = 'TTCAAAGATGACGGGAACTACAAGACACGTGCTGAAGTCAAGTTTGAAGGTGATACCCTTG' + \
'TTAATAGAATCGAGTTAAAAGGTATTGATTTTAAAGAAGATGGAAACA'
olig6 = 'TTTGTCTGCCATGATGTATACATTGTGTGAGTTATAGTTGTATTCCAATTTGTGTCCAAGA' + \
'ATGTTTCCATCTTCTTTAAAATCAATACCTTTTAACTCGATTCTATT'
olig7 = 'AACTATAACTCACACAATGTATACATCATGGCAGACAAACAAAAGAATGGAATCAAAGTTA' + \
'ACTTCAAAATTAGACACAACATTGAAGATGGAAGCGTTCAACTAGCA'
olig8 = 'TTGTGTGGACAGGTAATGGTTGTCTGGTAAAAGGACAGGGCCATCGCCAATTGGAGTATTT' + \
'TGTTGATAATGGTCTGCTAGTTGAACGCTTCCATCTTCAATGT'
olig9 = 'CCAGACAACCATTACCTGTCCACACAATCTGCCCTTTCGAAAGATCCCAACGAAAAGAGAG' + \
'ACCACATGGTCCTTCTTGAGTTTGTAACAGCTGCTGGGA'
olig10 = 'TTAAGCTACTAAAGCGTAGTTTTCGTCGTTTGCAGCAGGCCTTTTGTATAGTTCATCCAT' + \
'GCCATGTGTAATCCCAGCAGCTGTTACAAACTCAAGAAGG'
reference_oligos = [olig1, olig2, olig3, olig4, olig5, olig6, olig7, olig8,
olig9, olig10]
reference_tms = [73.513413945987, 72.73367624289534, 73.73563193690484,
72.70706564878299, 72.72193323127533, 72.23050918438184,
72.07546311550101, 72.27046461560099, 73.67230272019759]
# Run oligo synthesis on BBa_K082003
seq = 'atgcgtaaaggagaagaacttttcactggagttgtcccaattcttgttgaattagatggtgat' + \
'gttaatgggcacaaattttctgtcagtggagagggtgaaggtgatgcaacatacggaaaactt' + \
'acccttaaatttatttgcactactggaaaactacctgttccatggccaacacttgtcactact' + \
'ttcggttatggtgttcaatgctttgcgagatacccagatcatatgaaacagcatgactttttc' + \
'aagagtgccatgcccgaaggttatgtacaggaaagaactatatttttcaaagatgacgggaac' + \
'tacaagacacgtgctgaagtcaagtttgaaggtgatacccttgttaatagaatcgagttaaaa' + \
'ggtattgattttaaagaagatggaaacattcttggacacaaattggaatacaactataactca' + \
'cacaatgtatacatcatggcagacaaacaaaagaatggaatcaaagttaacttcaaaattaga' + \
'cacaacattgaagatggaagcgttcaactagcagaccattatcaacaaaatactccaattggc' + \
'gatggccctgtccttttaccagacaaccattacctgtccacacaatctgccctttcgaaagat' + \
'cccaacgaaaagagagaccacatggtccttcttgagtttgtaacagctgctgggattacacat' + \
'ggcatggatgaactatacaaaaggcctgctgcaaacgacgaaaactacgctttagtagcttaa'
dna_seq = DNA(seq)
assembly = design.OligoAssembly(dna_seq,
tm=72,
length_range=(120, 120),
require_even=True,
start_5=True)
assembly.design_assembly()
# Prepare outputs vs reference
output_oligos = [str(oligo).lower() for oligo in assembly.oligos]
reference_oligos = [oligo.lower() for oligo in reference_oligos]
assert_equal(output_oligos, reference_oligos)
assert_equal(assembly.overlap_tms, reference_tms)
# Test too short of oligo input
too_short = DNA(seq[0:100])
too_short_assembly = design.OligoAssembly(too_short,
tm=72,
length_range=(120, 120),
require_even=True,
start_5=True)
too_short_assembly.design_assembly()
assert_equal(str(too_short_assembly.oligos[0]), str(too_short))
|
__product__ = None
__copyright__ = None
__version__ = None
__date__ = None
try:
import bhamon_orchestra_master.__metadata__
__product__ = bhamon_orchestra_master.__metadata__.__product__
__copyright__ = bhamon_orchestra_master.__metadata__.__copyright__
__version__ = bhamon_orchestra_master.__metadata__.__version__
__date__ = bhamon_orchestra_master.__metadata__.__date__
except ImportError:
pass
|
import sys
import datetime
import asyncio
import traceback
from aiohttp_json_rpc import JsonRpcClient
class WorkerClient(JsonRpcClient):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.add_methods(
("", self.start_job),
)
self.current_job = None
self._id = None
def set_id(self, worker_id):
self._id = worker_id
async def start_job(self, job_data):
if self.current_job is not None:
return False
print("Got new job {}".format(str(job_data)))
self.current_job = job_data
return True
def finish_job(self, success, message, data):
asyncio.ensure_future(
self._finish_job(success, message, data),
loop=self._loop
)
async def _finish_job(self, success, message, data):
print("Current job", self.current_job)
job_id = self.current_job["job_id"]
self.current_job = None
return await self.call(
"job_done", [self._id, job_id, success, message, data]
)
class WorkerJobsConnection:
"""WS connection to Job server.
Helper class to create a connection to process jobs from job server.
To be able receive jobs is needed to create a connection and then register
as worker for specific host.
"""
retry_time_seconds = 5
def __init__(self, server_url, host_name, loop=None):
self.client = None
self._loop = loop
self._host_name = host_name
self._server_url = server_url
self._is_running = False
self._connecting = False
self._connected = False
self._stopped = False
def stop(self):
print("Stopping worker")
self._stopped = True
@property
def is_running(self):
return self._is_running
@property
def current_job(self):
if self.client is not None:
return self.client.current_job
return None
def finish_job(self, success=True, message=None, data=None):
"""Worker finished job and sets the result which is send to server."""
if self.client is None:
print((
"Couldn't sent job status to server because"
" client is not connected."
))
else:
self.client.finish_job(success, message, data)
async def main_loop(self, register_worker=True):
"""Main loop of connection which keep connection to server alive."""
self._is_running = True
while not self._stopped:
start_time = datetime.datetime.now()
await self._connection_loop(register_worker)
delta = datetime.datetime.now() - start_time
print("Connection loop took {}s".format(str(delta)))
# Check if was stopped and stop while loop in that case
if self._stopped:
break
if delta.seconds < 60:
print((
"Can't connect to server will try in {} seconds."
).format(self.retry_time_seconds))
await asyncio.sleep(self.retry_time_seconds)
self._is_running = False
async def _connect(self):
self.client = WorkerClient()
print("Connecting to {}".format(self._server_url))
try:
await self.client.connect_url(self._server_url)
except KeyboardInterrupt:
raise
except Exception:
traceback.print_exception(*sys.exc_info())
async def _connection_loop(self, register_worker):
self._connecting = True
future = asyncio.run_coroutine_threadsafe(
self._connect(), loop=self._loop
)
while self._connecting:
if not future.done():
await asyncio.sleep(0.07)
continue
session = getattr(self.client, "_session", None)
ws = getattr(self.client, "_ws", None)
if session is not None:
if session.closed:
self._connecting = False
self._connected = False
break
elif ws is not None:
self._connecting = False
self._connected = True
if self._stopped:
break
await asyncio.sleep(0.07)
if not self._connected:
self.client = None
return
print("Connected to job queue server")
if register_worker:
self.register_as_worker()
while self._connected and self._loop.is_running():
if self._stopped or ws.closed:
break
await asyncio.sleep(0.3)
await self._stop_cleanup()
def register_as_worker(self):
"""Register as worker ready to work on server side."""
asyncio.ensure_future(self._register_as_worker(), loop=self._loop)
async def _register_as_worker(self):
worker_id = await self.client.call(
"register_worker", [self._host_name]
)
self.client.set_id(worker_id)
print(
"Registered as worker with id {}".format(worker_id)
)
async def disconnect(self):
await self._stop_cleanup()
async def _stop_cleanup(self):
print("Cleanup after stop")
if self.client is not None and hasattr(self.client, "_ws"):
await self.client.disconnect()
self.client = None
self._connecting = False
self._connected = False
|
import re
import json
from sqlalchemy.orm import Session
from .. import schemas, models
from fastapi import HTTPException, status
from sqlalchemy.sql import text
from sqlalchemy import desc
# from fastapi_pagination import Page, pagination_params, page_size
# Show All Incomes ##.order_by(desc('date'))
def get_all(db: Session, limit: int = 10, offset: int = 0 ):
income = db.query(models.Income).offset(offset).limit(limit).all()
# income = db.query(models.Income).filter(models.Income.date >= "2021-09-01", models.Income.date <= "2021-09-31").all()
return income
# Show a Specific Income by date year: str, month: str,
def show_by_date(year:str, month:str, db: Session, limit: int = 10, offset: int = 0 ):
# print(" TEST.....show_by_date22")
income = db.query(models.Income).filter(models.Income.date >= f'{year}-{month}-01', models.Income.date <= f'{year}-{month}-31').all()
return income
# Show a Specific Income by id
def show(id: int, db: Session):
income = db.query(models.Income).filter(models.Income.id == id).first()
if not income:
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=f'The Income with ID {id} is not found')
return income
# Create and Post a new Income
def create(request: schemas.Income, db: Session):
# print("HALLO TEST..............")
new_income = models.Income(date=request.date, service_income_1 = request.service_income_1, service_income_2 = request.service_income_2, bar_income_1=request.bar_income_1, bar_income_2=request.bar_income_2, pos=request.pos, z_count=request.z_count, vat=request.vat, waitress_1=request.waitress_1, waitress_2=request.waitress_2, barman_1=request.barman_1, barman_2=request.barman_2, notes=request.notes, shift_id=request.shift_id )
db.add(new_income)
db.commit()
db.refresh(new_income)
# print("HALLO TEST..............222222222")
return new_income
# Update an Income
def update(id: int, request: schemas.Income, db: Session):
query = text("""UPDATE income SET date=:date, service_income_1=:service_income_1, service_income_2=:service_income_2, bar_income_1=:bar_income_1, bar_income_2=:bar_income_2, pos=:pos, z_count=:z_count, vat=:vat, waitress_1=:waitress_1, waitress_2=:waitress_2, barman_1=:barman_1, barman_2=:barman_2, notes=:notes, shift_id=:shift_id WHERE id = :id""").params( date=request.date, service_income_1 = request.service_income_1, service_income_2 = request.service_income_2, bar_income_1=request.bar_income_1, bar_income_2=request.bar_income_2, pos=request.pos, z_count=request.z_count, vat=request.vat, waitress_1=request.waitress_1, waitress_2=request.waitress_2, barman_1=request.barman_1, barman_2=request.barman_2, notes=request.notes, shift_id=request.shift_id , id=id)
result = db.execute(query)
if not result:
raise HTTPException(status_code=status.HTTP_202_ACCEPTED, detail=f'The income with id {id} is not found')
db.commit()
return request
# Delete an Income
def destroy(id: int, db: Session):
income = db.query(models.Income).filter(models.Income.id == id)
if not income.first():
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=f"The income with id {id} is not found")
income.delete(synchronize_session=False)
db.commit()
return "deleted!"
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Problem 24
# Lexicographic permutations
from itertools import permutations
import numpy as np
a = [int(''.join(k)) for k in
list(permutations([str(i) for i in np.arange(10)], 10))]
a.sort()
print(a[999999])
# 2783915460
# 3.190 s
|
from django.utils import simplejson as json
from jsonate.json_encoder import JsonateEncoder
def jsonate(obj, *args, **kwargs):
kwargs['cls'] = JsonateEncoder
return json.dumps(obj, *args, **kwargs)
|
# Generated by Django 3.2.5 on 2021-07-28 07:48
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('LabNet', '0013_ip_ip_to_interger_ip'),
]
operations = [
migrations.RemoveField(
model_name='ip',
name='comment',
),
migrations.AddField(
model_name='reservation',
name='comment',
field=models.TextField(blank=True, default='', null=True),
),
]
|
# -*- coding: utf-8 -*-
# The LLVM Compiler Infrastructure
#
# This file is distributed under the University of Illinois Open Source
# License. See LICENSE.TXT for details.
""" This module is a collection of methods commonly used in this project. """
import collections
import functools
import json
import logging
import os
import os.path
import re
import shlex
import subprocess
import sys
import pprint
from typing import List, Any, Dict, Callable # noqa: ignore=F401
ENVIRONMENT_KEY = 'INTERCEPT_BUILD'
Execution = collections.namedtuple('Execution', ['pid', 'cwd', 'cmd'])
def shell_split(string):
# type: (str) -> List[str]
""" Takes a command string and returns as a list. """
def unescape(arg):
# type: (str) -> str
""" Gets rid of the escaping characters. """
if len(arg) >= 2 and arg[0] == arg[-1] and arg[0] == '"':
return re.sub(r'\\(["\\])', r'\1', arg[1:-1])
return re.sub(r'\\([\\ $%&\(\)\[\]\{\}\*|<>@?!])', r'\1', arg)
return [unescape(token) for token in shlex.split(string)]
def run_build(command, *args, **kwargs):
# type: (...) -> int
""" Run and report build command execution
:param command: list of tokens
:return: exit code of the process
"""
environment = kwargs.get('env', os.environ)
logging.debug('run build %s, in environment:\n%s',
command,
pprint.pformat(environment, indent=1, width=79))
exit_code = subprocess.call(command, *args, **kwargs)
logging.debug('build finished with exit code: %d', exit_code)
return exit_code
def run_command(command, cwd=None):
# type: (List[str], str) -> List[str]
""" Run a given command and report the execution.
:param command: array of tokens
:param cwd: the working directory where the command will be executed
:return: output of the command
"""
def decode_when_needed(result):
# type: (Any) -> str
""" check_output returns bytes or string depend on python version """
if not isinstance(result, str):
return result.decode('utf-8')
return result
try:
directory = os.path.abspath(cwd) if cwd else os.getcwd()
logging.debug('exec command %s in %s', command, directory)
output = subprocess.check_output(command,
cwd=directory,
stderr=subprocess.STDOUT)
return decode_when_needed(output).splitlines()
except subprocess.CalledProcessError as ex:
ex.output = decode_when_needed(ex.output).splitlines()
raise ex
def reconfigure_logging(verbose_level):
""" Reconfigure logging level and format based on the verbose flag.
:param verbose_level: number of `-v` flags received by the command
:return: no return value
"""
# exit when nothing to do
if verbose_level == 0:
return
root = logging.getLogger()
# tune level
level = logging.WARNING - min(logging.WARNING, (10 * verbose_level))
root.setLevel(level)
# be verbose with messages
if verbose_level <= 3:
fmt_string = '%(name)s: %(levelname)s: %(message)s'
else:
fmt_string = '%(name)s: %(levelname)s: %(funcName)s: %(message)s'
handler = logging.StreamHandler(sys.stdout)
handler.setFormatter(logging.Formatter(fmt=fmt_string))
root.handlers = [handler]
def command_entry_point(function):
# type: (Callable[[], int]) -> Callable[[], int]
""" Decorator for command entry methods.
The decorator initialize/shutdown logging and guard on programming
errors (catch exceptions).
The decorated method can have arbitrary parameters, the return value will
be the exit code of the process. """
@functools.wraps(function)
def wrapper():
# type: () -> int
""" Do housekeeping tasks and execute the wrapped method. """
try:
logging.basicConfig(format='%(name)s: %(message)s',
level=logging.WARNING,
stream=sys.stdout)
# this hack to get the executable name as %(name)
logging.getLogger().name = os.path.basename(sys.argv[0])
return function()
except KeyboardInterrupt:
logging.warning('Keyboard interrupt')
return 130 # signal received exit code for bash
except (OSError, subprocess.CalledProcessError):
logging.exception('Internal error.')
if logging.getLogger().isEnabledFor(logging.DEBUG):
logging.error("Please report this bug and attach the output "
"to the bug report")
else:
logging.error("Please run this command again and turn on "
"verbose mode (add '-vvvv' as argument).")
return 64 # some non used exit code for internal errors
finally:
logging.shutdown()
return wrapper
def wrapper_entry_point(function):
# type: (Callable[[int, Execution], None]) -> Callable[[], int]
""" Implements compiler wrapper base functionality.
A compiler wrapper executes the real compiler, then implement some
functionality, then returns with the real compiler exit code.
:param function: the extra functionality what the wrapper want to
do on top of the compiler call. If it throws exception, it will be
caught and logged.
:return: the exit code of the real compiler.
The :param function: will receive the following arguments:
:result: the exit code of the compilation.
:execution: the command executed by the wrapper. """
def is_cxx_wrapper():
# type: () -> bool
""" Find out was it a C++ compiler call. Compiler wrapper names
contain the compiler type. C++ compiler wrappers ends with `c++`,
but might have `.exe` extension on windows. """
wrapper_command = os.path.basename(sys.argv[0])
return True if re.match(r'(.+)c\+\+(.*)', wrapper_command) else False
def run_compiler(executable):
# type: (List[str]) -> int
""" Execute compilation with the real compiler. """
command = executable + sys.argv[1:]
logging.debug('compilation: %s', command)
result = subprocess.call(command)
logging.debug('compilation exit code: %d', result)
return result
@functools.wraps(function)
def wrapper():
# type: () -> int
""" It executes the compilation and calls the wrapped method. """
# get relevant parameters from environment
parameters = json.loads(os.environ[ENVIRONMENT_KEY])
reconfigure_logging(parameters['verbose'])
# execute the requested compilation and crash if anything goes wrong
cxx = is_cxx_wrapper()
compiler = parameters['cxx'] if cxx else parameters['cc']
result = run_compiler(compiler)
# call the wrapped method and ignore it's return value
try:
call = Execution(
pid=os.getpid(),
cwd=os.getcwd(),
cmd=['c++' if cxx else 'cc'] + sys.argv[1:])
function(result, call)
except (OSError, subprocess.CalledProcessError):
logging.exception('Compiler wrapper failed complete.')
# always return the real compiler exit code
return result
return wrapper
def wrapper_environment(args):
# type: (...) -> Dict[str, str]
""" Set up environment for interpose compiler wrapper."""
return {
ENVIRONMENT_KEY: json.dumps({
'verbose': args.verbose,
'cc': shell_split(args.cc),
'cxx': shell_split(args.cxx)
})
}
|
from typing import (
get_args,
Type,
ClassVar,
TypeVar,
)
T = TypeVar("T")
def add_on(data_class: Type) -> Type:
class AddOn(data_class):
size: ClassVar[int] = sum(
get_args(field_type)[0].size
for field_type in data_class.__annotations__.values()
)
is_add_on: ClassVar[bool] = True
__qualname__ = data_class.__name__
@classmethod
def from_bytes(cls: Type[T], data: bytes) -> "T":
dic = {}
pointer = 0
for field_name, field_type in data_class.__annotations__.items():
typings = get_args(field_type)[0]
reading_size = typings.size
read_bytes = data[pointer : pointer + reading_size]
pointer += reading_size
dic[field_name] = typings.from_bytes(read_bytes)
return cls(**dic)
def __bytes__(self) -> bytes:
result = b"".join(
bytes(getattr(self, field_name))
for field_name in data_class.__annotations__.keys()
)
return result
AddOn.__name__ = data_class.__name__
return AddOn
|
import requests
from API.library.support.data import tr
def get_token(user_name, password):
body = {
'userName': user_name,
'password': password
}
url = tr
token = requests.post(
url,
json=body
)
return 'Bearer ' + token.text
|
###############################################################################
# PyDial: Multi-domain Statistical Spoken Dialogue System Software
###############################################################################
#
# Copyright 2015 - 2017
# Cambridge University Engineering Department Dialogue Systems Group
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###############################################################################
'''
LSPIPolicy.py - Least-Squares Policy Iteration (LSPI) policy
============================================
Copyright TSI-TUC 2017
**Relevant Config variables** [Default values]::
[lspipolicy]
phitype = block
.. seealso:: CUED Imports/Dependencies:
import :mod:`policy.LSPILib` |.|
import :mod:`policy.Policy` |.|
import :mod:`ontology.Ontology` |.|
import :mod:`utils.Settings` |.|
import :mod:`utils.ContextLogger`
************************
'''
__author__ = "tsi_tuc_group"
import math
import copy
import numpy as np
import json
import os
import sys
import time
import pickle
import PolicyUtils
from itertools import starmap, izip, combinations, product
from operator import mul # ,sub
from scipy.stats import entropy
from collections import OrderedDict
from Policy import Policy, Action, State, TerminalAction, TerminalState
from policy import PolicyCommittee, SummaryUtils
from LSPILib2 import BlockBasis
from ontology import Ontology
from utils import Settings, ContextLogger
# Fotis
# Modifications for autoencoder
from policy.flatten_state import flatten_belief
import ontology.FlatOntologyManager as FlatOnt
# End of modifications
logger = ContextLogger.getLogger('')
class LSPIPolicy(Policy, PolicyCommittee.CommitteeMember):
'''
An implementation of the dialogue policy based on the LSPI algorithm to optimise actions.
The class implements the public interfaces from :class:`~Policy.Policy` and :class:`~PolicyCommittee.CommitteeMember`.
'''
def __init__(self, domainString, learning, sharedParams=None):
super(LSPIPolicy, self).__init__(domainString, learning)
# DEFAULTS:
self.discount = 0.99
self.inpolicyfile = ''
self.outpolicyfile = ''
self.phitype = 'block'
self.pcafile = ''
self.episodesperbatch = 50
self.trainingepisodes = {}
self.trainingepisodes_count = 0
self.doForceSave = False
self.delta = 0.001 # Precondition value
self._byeAction = None
self.replace = {}
self.slot_abstraction_file = os.path.join(Settings.root,
'policy/slot_abstractions/' + domainString + '.json') # default mappings
self.abstract_slots = False
self.unabstract_slots = False
# CONFIG:
if Settings.config.has_option('policy', 'inpolicyfile'):
self.inpolicyfile = Settings.config.get('policy', 'inpolicyfile')
if Settings.config.has_option('policy', 'outpolicyfile'):
self.outpolicyfile = Settings.config.get('policy', 'outpolicyfile')
if Settings.config.has_option('policy_' + domainString, 'inpolicyfile'):
self.inpolicyfile = Settings.config.get('policy_' + domainString, 'inpolicyfile')
if Settings.config.has_option('policy_' + domainString, 'outpolicyfile'):
self.outpolicyfile = Settings.config.get('policy_' + domainString, 'outpolicyfile')
if Settings.config.has_option("lspi_" + domainString, "discount"):
self.discount = Settings.config.getfloat("lspi_" + domainString, "discount")
if Settings.config.has_option('policy_' + domainString, 'inpolicyfile'):
self.inpolicyfile = Settings.config.get('policy_' + domainString, 'inpolicyfile')
self.basefilename = '.'.join(self.inpolicyfile.split('.')[:-1])
self.inpolicyfile = self.inpolicyfile + '.' + str(os.getpid())
self.basefilename = self.basefilename + '.' + str(os.getpid())
# if Settings.config.has_option('policy_'+domainString, 'outpolicyfile'):
# self.outpolicyfile = Settings.config.get('policy_'+domainString, 'outpolicyfile')
# self.outpolicyfile = self.outpolicyfile + '.' + str(os.getpid())
if Settings.config.has_option('lspipolicy_' + domainString, 'phitype'):
self.phitype = Settings.config.get('lspipolicy_' + domainString, 'phitype')
if Settings.config.has_option('lspipolicy_' + domainString, 'pcafile'):
self.pcafile = Settings.config.get('lspipolicy_' + domainString, 'pcafile')
if Settings.config.has_option('exec_config', 'traindialogsperbatch'):
self.episodesperbatch = int(Settings.config.get('exec_config', 'traindialogsperbatch'))
policyType = 'all'
if Settings.config.has_option('policy_' + domainString, 'policytype'):
policyType = Settings.config.get('policy_' + domainString, 'policytype')
# Fotis
# Modifications for autoencoder
self.save_step = 100
if Settings.config.has_option('policy_' + domainString, 'save_step'):
self.save_step = Settings.config.getint('policy_' + domainString, 'save_step')
self.episodecount = 0
self.learning = learning
# LSPI stuff
if os.path.isfile(self.inpolicyfile):
self.loadLSPIParameters()
self.setBasisFunctions()
self.isInitialized = True
else:
self.isInitialized = False
"""Needed for AE | Fotis"""
# Start
self.domainUtil = FlatOnt.FlatDomainOntology(domainString)
self.state_buffer = []
self.is_transfer = False
if Settings.config.has_option('exec_config', 'transfer'):
self.is_transfer = Settings.config.getboolean('exec_config', 'transfer')
self.is_Single = False
if Settings.config.has_option('exec_config', 'single_autoencoder_type'):
self.is_Single = True
# self.batch_size = 256
# if Settings.config.has_option('exec_config', 'autoencoder_minibatch_size'):
# self.batch_size = Settings.config.getint('exec_config', 'autoencoder_minibatch_size')
# fotis
self.save_step = 100
if Settings.config.has_option('exec_config', 'save_step'):
self.save_step = Settings.config.getint('exec_config', 'save_step')
self.save_episodes = False
if Settings.config.has_option('exec_config', 'save_episodes'):
self.save_episodes = Settings.config.getboolean('exec_config', 'save_episodes')
self.episodecount = 0
# Modifications for autoencoders | fotis
self.dae = None
self.transfer_autoencoder = None
# Modifications for Adversarial autoencoder | gdiale
self.aae = False
if Settings.config.has_option('exec_config', 'autoencoder') and Settings.config.getboolean(
'exec_config', 'autoencoder'):
autoencoder_type = Settings.config.get('exec_config', 'single_autoencoder_type')
if autoencoder_type == 'adversarial':
self.aae = True
self.dae = self.initSingleAutoEncoder(domainString, autoencoder_type)
self.isAE = False
if Settings.config.has_option('exec_config', 'autoencoder'):
if Settings.config.getboolean('exec_config', 'autoencoder'):
self.isAE = True
# End
#########################################################
# Fotis | Initialisation method for Autoencoders
#########################################################
def initSingleAutoEncoder(self, domainString, autoencoder_type=None):
if autoencoder_type == 'dense':
from autoencoder.src.model import Autoencoder
elif autoencoder_type == 'dae_transfer':
from autoencoder.dae_transfer.model import Autoencoder
elif autoencoder_type == 'variational_dense_denoising':
from autoencoder.variational_dense_denoising.model import Autoencoder
elif autoencoder_type == 'dense_denoising':
from autoencoder.dense_denoising.model import Autoencoder
elif autoencoder_type == 'variational':
from autoencoder.variational.model import Autoencoder
elif autoencoder_type == 'dense_multi':
from autoencoder.dense_multi.model import Autoencoder
elif autoencoder_type == 'sparse':
from autoencoder.dense_sparse.model import Autoencoder
elif autoencoder_type == 'adversarial':
from autoencoder.adversarial_autoencoder.model import Autoencoder
else:
from autoencoder.dense_multi.model import Autoencoder
single_autoencoder = Autoencoder(domainString=domainString, policyType="lspi",
variable_scope_name=domainString)
return single_autoencoder
def setBasisFunctions(self):
# Basis functions:
if self.phitype == 'block':
self.basis = BlockBasis(self.domainString, self.numActions, self.stateDim)
else:
self.basis = None
def initializeLSPIparameters(self, stateDim):
self.stateDim = stateDim
self.setBasisFunctions()
self.A_inv = np.eye(self.basis.size())
np.fill_diagonal(self.A_inv, 1.0 / self.delta)
self.b = np.zeros((self.basis.size(), 1))
self.w = np.random.uniform(-0.1, 0.1, self.basis.size())
#########################################################
# overridden methods from Policy
#########################################################
def nextAction(self, belief):
'''
Selects next action to take based on the current belief and a list of non executable actions
NOT Called by BCM
:param belief:
:type belief:
:param hyps:
:type hyps:
:returns:
'''
nonExecutableActions = self.actions.getNonExecutable(belief, self.lastSystemAction)
goalMethod = belief["beliefs"]["method"]
if "finished" in goalMethod:
if goalMethod["finished"] > 0.85 and self._byeAction is not None:
return self._byeAction
if self._byeAction is not None:
nonExecutableActions.append(self._byeAction)
currentstate = self.get_State(belief)
executable = self._createExecutable(nonExecutableActions)
if len(executable) < 1:
logger.error("No executable actions")
if not self.isInitialized:
self.initializeLSPIparameters(len(currentstate.getStateVector()))
self.isInitialized = True
best_action, best_Qvalue = self.policy(belief=currentstate, executable=executable)
summaryAct = self._actionString(best_action.act) # best_action[0].act
if self.learning:
best_action.Qvalue = best_Qvalue
self.actToBeRecorded = best_action # summaryAct
# Finally convert action to MASTER ACTION
masterAct = self.actions.Convert(belief, summaryAct, self.lastSystemAction)
return masterAct
def savePolicy(self, FORCE_SAVE=False):
'''
Saves the LSPI policy.
:param belief:
:type belief:
'''
pass
# if self.learning or (FORCE_SAVE and self.doForceSave):
# self.saveLSPIParameters() #learner.savePolicy()
def savePolicyInc(self, FORCE_SAVE=False):
"""
save model and replay buffer
"""
if self.episodecount % self.save_step == 0:
if self.learning or (FORCE_SAVE and self.doForceSave):
self.saveLSPIParameters()
# Fotis
if self.dae is not None:
self.dae.save_variables()
print('savePolicyInc')
# print "episode", self.episodecount
# save_path = self.saver.save(self.sess, self.out_policy_file+'.ckpt')
'''self.dqn.save_network(self.out_policy_file + '.dqn.ckpt')
f = open(self.out_policy_file + '.episode', 'wb')
for obj in [self.samplecount, self.episodes[self.domainString]]:
pickle.dump(obj, f, protocol=pickle.HIGHEST_PROTOCOL)
f.close()
'''
# logger.info("Saving model to %s and replay buffer..." % save_path)
def saveLSPIParameters(self):
'''
Saves the LSPI policy.
'''
print("Saving LSPI parameters to: " + self.outpolicyfile)
PolicyUtils.checkDirExistsAndMake(self.outpolicyfile)
with open(self.outpolicyfile, 'wb') as f:
pickle.dump(self.stateDim, f)
pickle.dump(self.A_inv, f)
pickle.dump(self.b, f)
pickle.dump(self.w, f)
return
def loadLSPIParameters(self):
'''DAE_transfer
Loads the LSPI policy.
'''
print("Loading LSPI parameters from:", self.inpolicyfile)
with open(self.inpolicyfile, 'rb') as f:
self.stateDim = pickle.load(f)
self.A_inv = pickle.load(f)
self.b = pickle.load(f)
self.w = pickle.load(f)
return
def train(self):
'''
At the end of learning episode calls LearningStep for accumulated states and actions and rewards
'''
# SOMEWHAT TEMPORARY THING FOR HYPERPARAM PLAY
# if self.collect_data:
# if self.episode.rtrace[-1] == 20: # check success
# self.data_for_hp.add_data(blist=self.episode.strace, alist=self.episode.atrace)
# if self.data_for_hp.met_length():
# self.data_for_hp.write_data()
# raw_input('ENOUGH DATA COLLECTED')
# return
#
if self.USE_STACK:
self.episode_stack.add_episode(copy.deepcopy(self.episodes))
if self.episode_stack.get_stack_size() == self.PROCESS_EPISODE_STACK:
self._process_episode_stack(self.episode_stack)
self.savePolicyInc()
return
# process single episode
else:
for dstring in self.episodes:
if self.episodes[dstring] is not None:
if len(self.episodes[dstring].atrace): # domain may have just been part of committee but
# never in control - and whenever policy is booted an Episode() is created for its own domain ...
episode = self.episodes[dstring]
self._process_single_episode(episode)
self.savePolicyInc()
return
def convertStateAction(self, state, action):
'''
:param belief:
:type belief:
:param belief:
:type belief:
'''
cState = state
cAction = action
if not isinstance(state, LSPIState):
if isinstance(state, TerminalState):
cState = TerminalLSPIState()
else:
cState = self.get_State(state)
if not isinstance(action, LSPIAction):
if isinstance(action, TerminalAction):
cAction = TerminalLSPIAction()
else:
cAction = self.get_Action(action)
return cState, cAction
#########################################################
# overridden methods from CommitteeMember
#########################################################
def get_State(self, beliefstate, keep_none=False):
'''
Called by BCM
:param beliefstate:
:type beliefstate:
:param keep_none:
:type keep_none:
'''
# Fotis
return LSPIState(beliefstate, autoencoder=self.dae, keep_none=keep_none, replace=self.replace,
domainString=self.domainString, domainUtil=self.domainUtil)
def get_Action(self, action):
'''
Called by BCM
:param action:
:type action:
'''
actionIndex = self.actions.action_names.index(action.act)
return LSPIAction(action.act, actionIndex, self.numActions, replace=self.replace)
def abstract_actions(self, actions):
'''
convert a list of domain acts to their abstract form based on self.replace
:param actions:
:type actions:
'''
if len(self.replace) > 0:
abstract = []
for act in actions:
if '_' in act:
[prefix, slot] = act.split('_')
if slot in self.replace:
abstract.append(prefix + '_' + self.replace[slot])
else:
abstract.append(act)
else:
abstract.append(act)
return abstract
else:
logger.error('No slot abstraction mapping has been given - check config')
def unabstract_action(self, actions):
'''
action is a string
:param actions:
:type actions:
'''
if len(actions.split("_")) != 2: # handle not abstracted acts like 'inform' or 'repeat'
return actions
[prefix, slot] = actions.split("_")
if prefix == 'inform': # handle not abstracted acts like 'inform_byname' or 'inform_requested'
return actions
else: # handle abstracted acts like 'request_slot00' or 'confirm_slot03'
matching_actions = []
for abs_slot in self.abstraction_mapping['abstract2real'].keys():
if abs_slot == slot:
match = prefix + '_' + self.abstraction_mapping['abstract2real'][abs_slot]
matching_actions.append(match)
Settings.random.shuffle(matching_actions)
return Settings.random.choice(matching_actions)
logger.error('{} - no real slot found for this abstract slot'.format(actions))
#########################################################
# public methods
#########################################################
def getPolicyFileName(self):
'''
Returns the policy file name
'''
return self.policy_file
#########################################################
# private methods
#########################################################
def _createExecutable(self, nonExecutableActions):
'''
Produce a list of executable actions from non executable actions
:param nonExecutableActions:
:type nonExecutableActions:
'''
executable_actions = []
for act_i in self.actions.action_names:
act_i_index = self.actions.action_names.index(act_i)
if act_i in nonExecutableActions:
continue
elif len(self.replace) > 0: # with abstraction (ie BCM)
# check if possibly abstract act act_i is in nonExecutableActions
if '_' in act_i:
[prefix, slot] = act_i.split('_')
if slot in self.replace.keys():
if prefix + '_' + self.replace[
slot] not in nonExecutableActions: # assumes nonExecutable is abstract
executable_actions.append(
LSPIAction(act_i, act_i_index, self.numActions, replace=self.replace))
else:
pass # dont add in this case
else: # some actions like 'inform_byname' have '_' in name but are not abstracted
executable_actions.append(LSPIAction(act_i, act_i_index, self.numActions, replace=self.replace))
else: # only abstract actions with '_' in them like request_area --> request_slot1 etc
executable_actions.append(LSPIAction(act_i, act_i_index, self.numActions, replace=self.replace))
else: # no abstraction
executable_actions.append(
LSPIAction(act_i, act_i_index, self.numActions)) # replace not needed here - no abstraction
return executable_actions
def _actionString(self, act):
'''
Produce a string representation from an action - checking as well that the act coming in is valid
Should only be called with non abstract action. Use _unabstract_action() otherwise
:param act:
:type act:
'''
if act in self.actions.action_names:
return act
logger.error('Failed to find action %s' % act)
def _process_episode_stack(self, episode_stack):
'''With BCM - items on the stack are now dictionaries (keys=domain names, values=Episode() instances)
'''
# copy original policy to observe how far we deviate from it as we sequentially move through our batch of episodes, updating
# self.orig_learner = copy.deepcopy(self.learner) # nb: deepcopy is slow
# process episodes - since adding BCM - now have domain_episodes --
for episode_key in episode_stack.episode_keys():
domain_episodes = episode_stack.retrieve_episode(episode_key)
for dstring in domain_episodes:
if domain_episodes[dstring] is not None:
if len(domain_episodes[dstring].atrace): # domain may have just been part of committee but
# never in control - and whenever policy is booted an Episode() is created for its own domain ...
self._process_single_episode(domain_episodes[dstring], USE_STACK=True)
return
def _process_single_episode(self, episode, USE_STACK=False):
if len(episode.strace) == 0:
logger.warning("Empty episode")
return
if not self.learning:
logger.warning("Policy not learning")
return
episode.check() # just checks that traces match up.
# Fotis
# Modifications for autoencoder
# Transfered the state buffer in the autoencoder
# transfer AE exists in PolicyManager.py
if self.isAE:
if self.aae:
self.check_n_train_aae(episode)
else:
self.check_n_train_ae(episode)
self.episodecount += 1
# End of modifications
i = 1
r = 0
is_ratios = []
while i < len(episode.strace) and self.learning:
# FIXME how are state/action-pairs recorded? generic or specific objects, ie, State or LSPIState?
# pLSPIState = self.get_State(episode.strace[i-1])
# pLSPIAction = self.get_Action(episode.atrace[i-1])
# cLSPIState = self.get_State(episode.strace[i])
# cLSPIAction = self.get_Action(episode.atrace[i])
pLSPIState = episode.strace[i - 1]
pLSPIAction = episode.atrace[i - 1]
cLSPIState = episode.strace[i]
cLSPIAction = episode.atrace[i]
self.isInitial = False
self.isTerminal = False
if i == 1:
self.isInitial = True
if i + 1 == len(episode.strace) or isinstance(episode.strace[i], TerminalLSPIState):
self.isTerminal = True
r = episode.getWeightedReward()
self.learningStep(pLSPIState, pLSPIAction, r, cLSPIState, cLSPIAction)
i += 1
if (self.isTerminal and i < len(episode.strace)):
logger.warning(
"There are {} entries in episode after terminal state for domain {} with episode of domain {}".format(
len(episode.strace) - i, self.domainString, episode.learning_from_domain))
break
# self.saveLSPIParameters()
return
# Fotis
def check_n_train_ae(self, episode):
if self.learning:
# if not (type(episode).__module__ == np.__name__):
for i in range(len(episode.strace)):
if episode.atrace[i].toString() != 'TerminalLSPIAction':
if self.is_Single:
self.dae.saveToStateBuffer(episode.strace[i].flatBeliefVec,
episode.strace_clean[i].flatBeliefVec)
if self.dae.checkReadyToTrain():
state_batch, state_clean_batch = self.dae.getTrainBatch()
self.dae.train(state_batch, state_clean_batch)
# self.autoencoder.saveEpisodesToFile(self.save_episodes)
self.dae.resetStateBuffer()
try:
path = self.dae.save_variables()
# print("Variables Saved at: ", path)
except:
print("Variables Save Failed!")
pass
if self.is_transfer:
# check if we use the AE in PolicyManager
self.transfer_autoencoder.saveToStateBuffer(episode.strace[i].flatBeliefVec,
episode.strace_clean[i].flatBeliefVec)
if self.transfer_autoencoder.checkReadyToTrain():
state_batch, state_clean_batch = self.transfer_autoencoder.getTrainBatch()
self.transfer_autoencoder.train(state_batch, state_clean_batch)
# self.autoencoder.saveEpisodesToFile(self.save_episodes)
self.transfer_autoencoder.resetStateBuffer()
try:
path = self.transfer_autoencoder.save_variables()
# print("Variables Saved at: ", path)
except:
print("Variables Save Failed!")
pass
# gdiale
def check_n_train_aae(self, episode):
if self.learning:
# if not (type(episode).__module__ == np.__name__):
for i in range(len(episode.strace)):
if episode.atrace[i].toString() != 'TerminalLSPIAction':
if self.is_Single:
self.dae.saveToStateBuffer(episode.strace[i].flatBeliefVec)
if self.dae.checkReadyToTrain():
state_batch = self.dae.getTrainBatch()
self.dae.train(state_batch)
# self.autoencoder.saveEpisodesToFile(self.save_episodes)
self.dae.resetStateBuffer()
try:
path = self.dae.save_variables()
# print("Variables Saved at: ", path)
except:
print("Variables Save Failed!")
pass
if self.is_transfer:
# check if we use the AE in PolicyManager
self.transfer_autoencoder.saveToStateBuffer(episode.strace[i].flatBeliefVec)
if self.transfer_autoencoder.checkReadyToTrain():
state_batch = self.transfer_autoencoder.getTrainBatch()
self.transfer_autoencoder.train(state_batch)
# self.autoencoder.saveEpisodesToFile(self.save_episodes)
self.transfer_autoencoder.resetStateBuffer()
try:
path = self.transfer_autoencoder.save_variables()
# print("Variables Saved at: ", path)
except:
print("Variables Save Failed!")
pass
def Qvalue(self, belief, action):
"""
:returns: Q value for a given state, action and the basis function
"""
phi = self.basis.evaluate(belief, action)
qvalue = self.w.dot(phi)
return qvalue
def policy(self, belief, executable):
"""
:returns: best action according to Q values
"""
if len(executable) == 0:
logger.error("No executable actions")
if not self.isInitialized:
# Settings.random.shuffle(executable) -- can be random.choose()
# print "Choosing randomly ", executable[0].act
action = Settings.random.choice(executable)
cAction = self.get_Action(action)
return [cAction, 0.0]
Q = []
for action in executable:
cAction = self.get_Action(action)
value = self.Qvalue(belief, cAction)
Q.append((cAction, value))
Q = sorted(Q, key=lambda val: val[1], reverse=True)
best_action, best_Qvalue = Q[0][0], Q[0][1]
return best_action, best_Qvalue
def learningStep(self, pLSPIState, pLSPIAction, r, cLSPIState, cLSPIAction):
k = self.basis.size()
phi_sa = self.basis.evaluate(pLSPIState, pLSPIAction).reshape((-1, 1))
if pLSPIState is not TerminalLSPIState:
# best_action = self.best_action(cLSPIState)
phi_sprime = self.basis.evaluate(cLSPIState, cLSPIAction).reshape((-1, 1))
else:
phi_sprime = np.zeros((k, 1))
A1 = np.dot(self.A_inv, phi_sa)
g = (phi_sa - self.discount * phi_sprime).T
self.A_inv += - np.dot(A1, np.dot(g, self.A_inv)) / (1 + np.dot(g, A1))
self.b += phi_sa * r
self.w = np.dot(self.A_inv, self.b).reshape((-1,))
class LSPIAction(Action):
'''
Definition of summary action used for LSPI.
'''
def __init__(self, action, actionIndex, numActions, replace={}):
self.numActions = numActions
self.act = action
self.actid = actionIndex
self.is_abstract = True if len(replace) else False # record whether this state has been abstracted -
# append to the action the Q value from when we chose it --> for access in batch calculations later
self.Qvalue = 0
if len(replace) > 0:
self.act = self.replaceAction(action, replace)
def replaceAction(self, action, replace):
'''
Used for making abstraction of an action
'''
if "_" in action:
slot = action.split("_")[1]
if slot in replace:
replacement = replace[slot]
return action.replace(slot, replacement) # .replace() is a str operation
return action
def __eq__(self, a):
"""
Action are the same if their strings match
:rtype : bool
"""
if self.numActions != a.numActions:
return False
if self.act != a.act:
return False
return True
def __ne__(self, a):
return not self.__eq__(a)
def show(self):
'''
Prints out action and total number of actions
'''
print str(self.act), " ", str(self.numActions)
def toString(self):
'''
Prints action
'''
return self.act
def __repr__(self):
return self.toString()
class LSPIState(State):
'''
Definition of state representation needed for LSPI algorithm
Main requirement for the ability to compute kernel function over two states
'''
def __init__(self, belief, autoencoder=None, keep_none=False, replace={}, domainString=None, domainUtil=None):
self.domainString = domainString
self.autoencoder = autoencoder
self._bstate = {}
self.keep_none = keep_none
self.is_abstract = True if len(replace) else False # record whether this state has been abstracted -
# self.is_abstract = False
self.beliefStateVec = None
self.flatBeliefVec = None
self.isSummary = None
if Settings.config.has_option('policy', 'summary'):
self.isSummary = Settings.config.get('policy', 'summary')
# self.extractBelief(b, replace)
# Fotis
if belief is not None:
if autoencoder is None: # Modifications for autoencoder
if isinstance(belief, LSPIState):
self._convertState(belief, replace)
else:
if self.isSummary:
self.extractBelief(belief, replace)
else:
self.extractSimpleBelief(belief, replace)
else:
# Modifications for AE
# Fotis
self.flatBeliefVec = np.array(flatten_belief(belief, domainUtil), dtype=np.float32)
self.beliefStateVec = autoencoder.encode(self.flatBeliefVec.reshape((1, -1))).reshape((-1,))
#print('Encoder output: ')
#print(self.beliefStateVec)
self.hello = True
# End of modifications
def extractBeliefWithOther(self, belief, sort=True):
'''
Copies a belief vector, computes the remaining belief, appends it and returnes its sorted value
:return: the sorted belief state value vector
'''
bel = copy.deepcopy(belief)
res = []
if '**NONE**' not in belief:
res.append(1.0 - sum(belief.values())) # append the none probability
else:
res.append(bel['**NONE**'])
del bel['**NONE**']
# ensure that all goal slots have dontcare entry for GP belief representation
if 'dontcare' not in belief:
bel['dontcare'] = 0.0
if sort:
# sorting all possible slot values including dontcare
res.extend(sorted(bel.values(), reverse=True))
else:
res.extend(bel.values())
return res
def extractSingleValue(self, val):
'''
for a probability p returns a list [p,1-p]
'''
return [val, 1 - val]
def extractSimpleBelief(self, b, replace={}):
'''
From the belief state b extracts discourseAct, method, requested slots, name, goal for each slot,
history whether the offer happened, whether last action was inform none, and history features.
Sets self._bstate
'''
with_other = 0
without_other = 0
self.isFullBelief = True
for elem in b['beliefs'].keys():
if elem == 'discourseAct':
self._bstate["goal_discourseAct"] = b['beliefs'][elem].values()
without_other += 1
elif elem == 'method':
self._bstate["goal_method"] = b['beliefs'][elem].values()
without_other += 1
elif elem == 'requested':
for slot in b['beliefs'][elem]:
cur_slot = slot
if len(replace) > 0:
cur_slot = replace[cur_slot]
self._bstate['hist_' + cur_slot] = self.extractSingleValue(b['beliefs']['requested'][slot])
without_other += 1
else:
if elem == 'name':
self._bstate[elem] = self.extractBeliefWithOther(b['beliefs']['name'])
with_other += 1
else:
cur_slot = elem
if len(replace) > 0:
cur_slot = replace[elem]
self._bstate['goal_' + cur_slot] = self.extractBeliefWithOther(b['beliefs'][elem])
with_other += 1
additionalSlots = 2
# if elem not in Ontology.global_ontology.get_system_requestable_slots(self.domainString):
# additionalSlots = 1
if len(self._bstate['goal_' + cur_slot]) != \
Ontology.global_ontology.get_len_informable_slot(self.domainString,
slot=elem) + additionalSlots:
print self._bstate['goal_' + cur_slot]
logger.error("Different number of values for slot " + cur_slot + " " + str(
len(self._bstate['goal_' + cur_slot])) + \
" in ontology " + str(
Ontology.global_ontology.get_len_informable_slot(self.domainString, slot=elem) + 2))
self._bstate["hist_offerHappened"] = self.extractSingleValue(1.0 if b['features']['offerHappened'] else 0.0)
without_other += 1
self._bstate["hist_lastActionInformNone"] = self.extractSingleValue(
1.0 if len(b['features']['informedVenueSinceNone']) > 0 else 0.0)
without_other += 1
for i, inform_elem in enumerate(b['features']['inform_info']):
self._bstate["hist_info_" + str(i)] = self.extractSingleValue(1.0 if inform_elem else 0.0)
without_other += 1
# Tom's speedup: convert belief dict to numpy vector
self.beliefStateVec = self.slowToFastBelief(self._bstate)
return
def extractBelief(self, b, replace={}):
'''NB - untested function since __init__ makes choice to use extractSimpleBelief() instead
'''
self.isFullBelief = True
self._bstate["hist_lastActionInformNone"] = self.extractSingleValue(b["features"]["lastActionInformNone"])
self._bstate["hist_offerHappened"] = self.extractSingleValue(b["features"]["offerHappened"])
self._bstate["goal_name"] = self.extractBeliefWithOther(b["beliefs"]["name"])
self._bstate["goal_discourseAct"] = b["beliefs"]["discourseAct"].values()
self._bstate["goal_method"] = b["beliefs"]["method"].values()
'''
for i in xrange(len(b["goal"])):
curSlotName = b["slotAndName"][i]
if len(replace) > 0:
curSlotName = replace[curSlotName]
self._bstate["goal_" + curSlotName] = self.extractBeliefWithOther(b["goal"][i])
for i in range(min(len(b["slotAndName"]), len(b["goal_grounding"]))):
histName = b["slotAndName"][i]
if len(replace) > 0:
histName = replace[histName]
self._bstate["hist_" + histName] = b["goal_grounding"][i].values()
for i in range(min(len(b["infoSlots"]), len(b["info_grounding"]))):
infoName = b["infoSlots"][i]
if len(replace) > 0:
infoName = replace[infoName]
self._bstate["hist_" + infoName] = b["info_grounding"][i].values()
'''
self.state_size = len(self._bstate)
# Tom's speedup: convert belief dict to numpy vector
self.beliefStateVec = self.slowToFastBelief(self._bstate)
def slowToFastBelief(self, bdic):
'''Converts dictionary format to numpy vector format
'''
values = np.array([])
for slot in sorted(bdic.keys()):
if slot == "hist_location":
continue
# if "goal" in slot and slot != "goal_discourseAct" and slot != "goal_method":
# toadd = np.array(bdic[slot])
# values = np.concatenate((values, np.sort(toadd)[::-1]))
# else :
# values = np.concatenate((values, np.array(bdic[slot])))
# su259 sorting already done before
values = np.concatenate((values, np.array(bdic[slot])))
return values
def _convertState(self, b, replace={}):
'''
converts GPState to GPState of shape of current domain by padding/truncating slots/values
assumes that non-slot information is the same for both
'''
# 1. take care of non-slot information
self._bstate["goal_discourseAct"] = copy.deepcopy(b._bstate['goal_discourseAct'])
self._bstate["goal_method"] = copy.deepcopy(b._bstate['goal_method'])
self._bstate['hist_offerHappened'] = copy.deepcopy(b._bstate['hist_offerHappened'])
self._bstate['hist_lastActionInformNone'] = copy.deepcopy(b._bstate['hist_lastActionInformNone'])
# copy remaining hist information:
for elem in b._bstate:
if 'hist_info_' in elem:
self._bstate[elem] = copy.deepcopy(b._bstate[elem])
# requestable slots
origRequestSlots = Ontology.global_ontology.get_requestable_slots(self.domainString)
if len(replace) > 0:
requestSlots = map(lambda x: replace[x], origRequestSlots)
else:
requestSlots = origRequestSlots
for slot in requestSlots:
if 'hist_' + slot in b._bstate:
self._bstate['hist_' + slot] = copy.deepcopy(b._bstate['hist_' + slot])
else:
self._bstate['hist_' + slot] = self.extractSingleValue(0.0)
# informable slots
origInformSlots = Ontology.global_ontology.get_informable_slots(self.domainString)
informSlots = {}
for slot in origInformSlots:
curr_slot = slot
if len(replace) > 0:
curr_slot = replace[curr_slot]
informSlots[curr_slot] = Ontology.global_ontology.get_len_informable_slot(self.domainString,
slot) + 2 # dontcare + none
slot = 'name'
self._bstate[slot] = b._bstate[slot]
if len(self._bstate[slot]) > informSlots[slot]:
# truncate
self._bstate[slot] = self._bstate[slot][0:informSlots[slot]]
elif len(self._bstate[slot]) < informSlots[slot]: # 3 < 5 => 5 - 3
# pad with 0
self._bstate[slot].extend([0] * (informSlots[slot] - len(self._bstate[slot])))
del informSlots[slot]
for curr_slot in informSlots:
slot = 'goal_' + curr_slot
if slot in b._bstate:
self._bstate[slot] = b._bstate[slot]
if len(self._bstate[slot]) > informSlots[curr_slot]:
# truncate
self._bstate[slot] = self._bstate[slot][0:informSlots[curr_slot]]
elif len(self._bstate[slot]) < informSlots[curr_slot]: # 3 < 5 => 5 - 3
# pad with 0
self._bstate[slot].extend([0] * (informSlots[curr_slot] - len(self._bstate[slot])))
else:
# create empty entry
self._bstate[slot] = [0] * informSlots[curr_slot]
self._bstate[slot][0] = 1.0 # the none value set to 1.0
# Tom's speedup: convert belief dict to numpy vector
self.beliefStateVec = self.slowToFastBelief(self._bstate)
return
def getStateVector(self):
return self.beliefStateVec
def toString(self):
'''
String representation of the belief
'''
res = ""
if len(self._bstate) > 0:
res += str(len(self._bstate)) + " "
for slot in self._bstate:
res += slot + " "
for elem in self._bstate[slot]:
for e in elem:
res += str(e) + " "
return res
def __repr__(self):
return self.toString()
class TerminalLSPIAction(TerminalAction, LSPIAction):
'''
Class representing the action object recorded in the (b,a) pair along with the final reward.
'''
def __init__(self):
self.act = 'TerminalLSPIAction'
self.actid = -1
self.is_abstract = None
self.numActions = None
class TerminalLSPIState(LSPIState, TerminalState):
'''
Basic object to explicitly denote the terminal state. Always transition into this state at dialogues completion.
'''
def __init__(self):
super(TerminalLSPIState, self).__init__(None)
# END OF FILE
|
import grpc
import account_pb2
import account_pb2_grpc
with grpc.insecure_channel('0.0.0.0:8000') as channel:
# ------- ----
# 1 2
# 1. Docker を使っているときは localhost ではなく 0.0.0.0 にする
# 2. ポート番号
stub = account_pb2_grpc.UserControllerStub(channel)
for user in stub.List(account_pb2.UserListRequest()):
print(user, end='')
|
class Condor(object):
def __init__(self, slottable):
self.slottable = slottable
def host_score(self, pnode, vnode_capacity):
vnode_cpu = vnode_capacity.get_by_type("CPU")
vnode_memory = vnode_capacity.get_by_type("Memory")
vnode_net_in = vnode_capacity.get_by_type("Net-in")
vnode_net_out = vnode_capacity.get_by_type("Net-out")
vnode_disk = vnode_capacity.get_by_type("Disk")
pnode_capacity = self.slottable.nodes[pnode].capacity
pnode_cpu = pnode_capacity.get_by_type("CPU")
pnode_memory = pnode_capacity.get_by_type("Memory")
pnode_net_in = pnode_capacity.get_by_type("Net-in")
pnode_net_out = pnode_capacity.get_by_type("Net-out")
pnode_disk = pnode_capacity.get_by_type("Disk")
if pnode_disk < vnode_disk:
return 0
elif pnode_memory < vnode_memory:
return 0
elif pnode_cpu < vnode_cpu:
return 0
elif pnode_net_in < vnode_net_in:
return 0
elif pnode_net_out < vnode_net_out:
return 0
score = 0.0
if pnode_disk == vnode_disk:
score += 1
else:
score += 1 - (pnode_disk - vnode_disk) / pnode_disk
if pnode_memory == vnode_memory:
score += 1
else:
score += 1 - (pnode_memory - vnode_memory) / pnode_memory
if pnode_cpu == vnode_cpu:
score += 1
else:
score += 1 - (pnode_cpu - vnode_cpu) / pnode_cpu
if pnode_net_in == vnode_net_in:
score += 1
else:
score += 1 - (pnode_net_in - vnode_net_in) / pnode_net_in
if pnode_net_out == vnode_net_out:
score += 1
else:
score += 1 - (pnode_net_out - vnode_net_out) / pnode_net_out
return score / 5.0
def lease_score(self):
return 1
def get_pnodes(self, vnode_capacity, pnode_ids,lease):
result = []
for pnode in pnode_ids:
host_score = self.host_score(pnode, vnode_capacity)
if host_score == 0.0:
continue
lease_score = self.lease_score()
if lease_score == 0.0:
continue
avg_score = (host_score + lease_score) / 2.0
result.append((pnode, avg_score))
result.sort(key=lambda tup:tup[1], reverse=True)
return [e[0] for e in result]
|
def return_json_temprate(MODALITY: str) -> dict:
"""テンプレート辞書を返す
Args:
MODALITY (str): MODALITY
Returns:
[dict]: temprate
"""
header = {
'PRIMARY KEY':' ',
'Identified Modality':"",
"SOPInstanceUID":" ",
"StudyID":" ",
"ManufacturerModelName": " ",
"PatientID": " ",
"StudyDate": " ",
"PatientName": " ",
"StudyDescription": " ",
"PatientBirthDate": " ",
"PatientSex": " ",
"PatientAge": " ",
"PatientSize": " ",
"PatientWeight": " ",
'AccessionNumber': ' '
}
if MODALITY == "CT":
# ct_temp
temp = {
"MeanCTDIvol": " ",
"DLP": " ",
"Comment": " ",
"XRayModulationType": " ",
"CTDIwPhantomType": " ",
"AcquisitionProtocol": " ",
"TargetRegion": " ",
"CTAcquisitionType": " ",
"ProcedureContext": " ",
"ExposureTime": " ",
"ScanningLength": " ",
"ExposedRange": " ",
"NominalSingleCollimationWidth": " ",
"NominalTotalCollimationWidth": " ",
"PitchFactor": " ",
"IdentificationoftheXRaySource": " ",
"KVP": " ",
"MaximumXRayTubeCurrent": " ",
"MeanXRayTubeCurrent": " ",
"ExposureTimeperRotation": " ",
"DeviceManufacturer": " ",
"DeviceSerialNumber": " ",
"DLPNotificationValue": " ",
"CTDIvolNotificationValue": " ",
"ReasonforProceeding": " ",
"CTDoseLengthProductTotal": " "
}
elif MODALITY == "ANGIO":
# angio_temp
temp = {
'Projection X-Ray': ' ',
'Irradiation Event X-Ray Data': ' ',
'Acquisition Plane': ' ',
'Irradiation Event Type': ' ',
'Acquisition Protocol': ' ',
'Reference Point Definition': ' ',
'Dose Area Product': ' ',
'Dose (RP)': ' ',
'Collimated Field Area': ' ',
'Positioner Primary Angle': ' ',
'Positioner Secondary Angle': ' ',
'Distance Source to Detector': ' ',
'Table Longitudinal Position': ' ',
'Table Lateral Position': ' ',
'Table Height Position': ' ',
'KVP': ' ',
'X-Ray Tube Current': ' ',
'Focal Spot Size': ' '
}
elif MODALITY in ["PT", "NM"]:
# pet_temp
temp = {
"MeanCTDIvol": " ",
"DLP": " ",
"Comment": " ",
"XRayModulationType": " ",
"CTDIwPhantomType": " ",
"AcquisitionProtocol": " ",
"TargetRegion": " ",
"CTAcquisitionType": " ",
"ProcedureContext": " ",
"ExposureTime": " ",
"ScanningLength": " ",
"ExposedRange": " ",
"NominalSingleCollimationWidth": " ",
"NominalTotalCollimationWidth": " ",
"PitchFactor": " ",
"IdentificationoftheXRaySource": " ",
"KVP": " ",
"MaximumXRayTubeCurrent": " ",
"MeanXRayTubeCurrent": " ",
"ExposureTimeperRotation": " ",
"DeviceManufacturer": " ",
"DeviceSerialNumber": " ",
"DLPNotificationValue": " ",
"CTDIvolNotificationValue": " ",
"ReasonforProceeding": " ",
"CTDoseLengthProductTotal": " ",
"RadionuclideTotalDose": " "
}
elif MODALITY == 'Auto':
temp = {
'Identified Modality':"",
'Projection X-Ray': ' ',
'Irradiation Event X-Ray Data': ' ',
'Acquisition Plane': ' ',
'Irradiation Event Type': ' ',
'Acquisition Protocol': ' ',
'Reference Point Definition': ' ',
'Dose Area Product': ' ',
'Dose (RP)': ' ',
'Collimated Field Area': ' ',
'Positioner Primary Angle': ' ',
'Positioner Secondary Angle': ' ',
'Distance Source to Detector': ' ',
'Table Longitudinal Position': ' ',
'Table Lateral Position': ' ',
'Table Height Position': ' ',
'KVP': ' ',
'X-Ray Tube Current': ' ',
'Focal Spot Size': ' ',
"MeanCTDIvol": " ",
"DLP": " ",
"Comment": " ",
"XRayModulationType": " ",
"CTDIwPhantomType": " ",
"AcquisitionProtocol": " ",
"TargetRegion": " ",
"CTAcquisitionType": " ",
"ProcedureContext": " ",
"ExposureTime": " ",
"ScanningLength": " ",
"ExposedRange": " ",
"NominalSingleCollimationWidth": " ",
"NominalTotalCollimationWidth": " ",
"PitchFactor": " ",
"IdentificationoftheXRaySource": " ",
"KVP": " ",
"MaximumXRayTubeCurrent": " ",
"MeanXRayTubeCurrent": " ",
"ExposureTimeperRotation": " ",
"DeviceManufacturer": " ",
"DeviceSerialNumber": " ",
"DLPNotificationValue": " ",
"CTDIvolNotificationValue": " ",
"ReasonforProceeding": " ",
"CTDoseLengthProductTotal": " ",
"RadionuclideTotalDose": " "
}
else:
print("MODALITYが不正です")
header.update(temp)
return header
|
import math
def newton(function,function1,startingInt): #function is the f(x) and function1 is the f'(x)
x_n=startingInt
while True:
x_n1=x_n-function(x_n)/function1(x_n)
if abs(x_n-x_n1)<0.00001:
return x_n1
x_n=x_n1
def f(x):
return math.pow(x,3)-2*x-5
def f1(x):
return 3*math.pow(x,2)-2
print(newton(f,f1,3))
|
#!/usr/bin/env python
import sys
from matplotlib import pyplot as plt
import TraceView
import TraceModel
import scipy
import argparse
import glob
import os.path
__version__="01.00.00"
__author__ ="Robert Shelansky"
DEFAULT_LENGTH =2246
DEFAULT_THRESHOLD=4
DEFAULT_SMOOTH =10
parser = argparse.ArgumentParser(description='Analyze Chromatin Ring Trace Files.')
parser.add_argument('-v','--version', action='version', version='%(prog)s '+__version__)
parser.add_argument('files',
nargs='+',
type =str,
help ='The list of trace files you wished to be analyzed.')
parser.add_argument('-t','--threshold',
default=DEFAULT_THRESHOLD,
type =float,
help='The threshold distance required for two points on opposing strands to be considered as part of a linker.')
parser.add_argument('-l','--length',
default=DEFAULT_LENGTH,
type =int,
help='The expected length in basepairs for the DNA molecule.')
parser.add_argument('-s','--smooth',
default=DEFAULT_SMOOTH,
type =int,
help='The # of coordinates to include in a sliding window of the average distance between points on opposing strands.')
parser.add_argument('-u','--user',
default=None,
type =str,
help='The name of the person who completed the trace and is using this software: for record keeping.')
parser.add_argument('-o','--out_path',
default=None,
type =str,
help='The name of the folder you wish to save the output to.')
parser.add_argument('-p', '--plotless',
action='store_true')
parser.add_argument('-i','--imgres',
default = scipy.NAN,
type = scipy.float16,
help ='The image resolution of the raw image used to make the trace.')
parser.add_argument('-d','--directory',
action='store_false',
default='true')
args = parser.parse_args()
args.files=[f for g in args.files for f in glob.glob(g)]
if len(args.files) == 0:
sys.stderr.write('No Trace Files Found. Check the file path.')
sys.exit()
params = vars(args)
params['version'] = __version__
params['title'] = os.path.basename(sys.argv[0]).split('.')[0]
model = TraceModel.TraceModel(**params)
if args.plotless:
for i in range(len(args.files)):
model.seek(i)
model.find_midpoint()
model.analyze()
if model.molecule['molecule'] is None:
print(i, model.context['path'])
else:
model.save()
else:
view = TraceView.TraceView (plt, model)
view.show()
|
# Copyright (C) 2017 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Module for testing constant regex expressions."""
import re
from lib.constants import regex
def test_url_to_widget_info_regex():
"""Test regex for parsing the source object name, source object id,
widget name, mapped object name, mapped object id from URL."""
urls = [
("https://grc-test.appspot.com/dashboard/",
"dashboard", "", "", "", ""),
("https://grc-test.appspot.com/dashboard#data_asset_widget/",
"dashboard", "", "data_asset_widget", "", ""),
("https://grc-test.appspot.com/data_assets/90#/clause/90/",
"data_assets", 90, "info_widget", "clause", 90),
("https://grc-test.appspot.com/data_assets/90#/",
"data_assets", 90, "info_widget", "", ""),
("https://grc-test.appspot.com/data_assets/90/",
"data_assets", 90, "info_widget", "", ""),
("https://grc-test.appspot.com/data_assets/90#data_asset_widget/",
"data_assets", 90, "data_asset_widget", "", ""),
("https://grc-test.appspot.com/data_assets/90#info_widget/",
"data_assets", 90, "info_widget", "", ""),
("https://grc-test.appspot.com/data_assets/107/",
"data_assets", 107, "info_widget", "", ""),
("https://grc-test.appspot.com/data_assets/107#task_group_widget/",
"data_assets", 107, "task_group_widget", "", ""),
(("https://grc-test.appspot.com/"
"data_assets/107#info_widget/workflow/107/"),
"data_assets", 107, "info_widget", "workflow", 107),
("https://grc-test.appspot.com/data_assets/107#/data_asset/107/",
"data_assets", 107, "info_widget", "data_asset", 107),
]
for (url, expected_source_object_name, expected_source_object_id,
expected_widget_name, expected_mapped_object_name,
expected_mapped_object_id) in urls:
(source_object_name, source_object_id, widget_name, mapped_object_name,
mapped_object_id) = re.search(regex.URL_WIDGET_INFO, url).groups()
if source_object_id:
source_object_id = int(source_object_id)
if mapped_object_id:
mapped_object_id = int(mapped_object_id)
if widget_name == "" and source_object_name != "dashboard":
widget_name = "info_widget" # if '#' in URL without name
assert (
expected_source_object_name, expected_source_object_id,
expected_widget_name, expected_mapped_object_name,
expected_mapped_object_id) == (
source_object_name, source_object_id, widget_name,
mapped_object_name, mapped_object_id)
|
from distutils.version import StrictVersion
VERSION = StrictVersion('1.1.14')
|
"""
Git commands module
Module for interacting with Git command line interface
author: Ryan Long <ryan.long@noaa.gov>
"""
import os
import subprocess
import logging
logger = logging.getLogger(__name__)
def _command_safe(cmd, cwd=os.getcwd()) -> subprocess.CompletedProcess:
"""_command_safe ensures commands are run safely and raise exceptions
on error
https://stackoverflow.com/questions/4917871/does-git-return-specific-return-error-codes
"""
try:
logger.debug("running '%s' in '%s'", cmd, cwd)
return subprocess.run(
cmd,
cwd=cwd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
check=True,
encoding="utf-8",
)
except subprocess.CalledProcessError as error:
logger.info(error.stdout)
if error.stderr:
logger.error(error.stderr)
raise
return subprocess.CompletedProcess(returncode=0, args="", stdout=error.stdout)
def add(_file_path=None, repopath=os.getcwd()):
"""git_add
Args:
_path (str): path of assets to add
repopath (str, optional): local repository path if not cwd. Defaults to os.getcwd().
Returns:
CompletedProcess:
"""
cmd = ["git", "add", "--all"]
if _file_path is not None:
cmd = ["git", "add", _file_path]
return _command_safe(cmd, repopath)
def checkout(branch_name, target=None, destination=None, repopath=os.getcwd()):
"""git_checkout
Args:
branch_name (str): name of the branch being checked out
repopath (str, optional): local repository path if not cwd. Defaults to os.getcwd().
Returns:
CompletedProcess:
"""
cmd = ["git", "checkout"]
if target is None:
cmd.append(branch_name)
else:
cmd.append(f"{target}/{branch_name}")
cmd.append(f"{destination}/{branch_name}")
return _command_safe(cmd, repopath)
def commit(message, repopath=os.getcwd()):
"""git_commit
Args:
username (str):
name (str): name of report to commit
repopath (str, optional): local repository path if not cwd. Defaults to os.getcwd().
Returns:
CompletedProcess:
"""
cmd = ["git", "commit", "-m", f"'{message}'"]
return _command_safe(cmd, repopath)
def status(repopath=os.getcwd()):
"""status returns the output from git status
Args:
repopath (str, optional): The root path of the repo. Defaults to os.getcwd().
Returns:
CompletedProcess
"""
return _command_safe(["git", "status"], repopath)
def pull(destination="origin", branch=None, repopath=os.getcwd()):
"""git_pull
Args:
destination (str, optional): Defaults to "origin".
branch (str, optional): Defaults to current branch.
repopath (str, optional): Defaults to os.getcwd().
Returns:
CompletedProcess
"""
cmd = ["git", "pull", destination]
if branch:
cmd.append(branch)
return _command_safe(cmd, repopath)
def push(destination="origin", branch=None, repopath=os.getcwd()):
"""git_push
Args:
destination (str, optional): Defaults to "origin".
branch (str, optional): Defaults to current branch.
repopath (str, optional): Defaults to os.getcwd().
Returns:
CompletedProcess
"""
cmd = ["git", "push", destination]
if branch:
cmd.append(branch)
return _command_safe(cmd, repopath)
def clone(url, target_path):
"""git_clone
Args:
url (str): remote url
target_path (str): local target path
Returns:
CompletedProcess
"""
cmd = ["git", "clone", url, target_path]
return _command_safe(cmd, target_path)
|
# This is an auto-generated Django model module.
# You'll have to do the following manually to clean this up:
# * Rearrange models' order
# * Make sure each model has one field with primary_key=True
# * Make sure each ForeignKey has `on_delete` set to the desired behavior.
# * Remove `managed = False` lines if you wish to allow Django to create, modify, and delete the table
# Feel free to rename the models, but don't rename db_table values or field names.
from __future__ import unicode_literals
from django.db import models
import django.utils.timezone
import datetime
#后台管理员
class TbAdminManager(models.Model):
loginName = models.CharField(max_length=100, db_column="loginName", unique=True, verbose_name="登录名称")
password = models.CharField(max_length=100, db_column="password", verbose_name="密码", default="")
userName = models.CharField(max_length=100, db_column="userName", verbose_name="用户名")
email = models.CharField(max_length=50, verbose_name="用户邮箱", default="")
superManager = models.IntegerField(default=0, verbose_name="状态 0不是 1是")
state = models.IntegerField(default=1, verbose_name="状态 0删除 1有效")
addBy = models.CharField(max_length=25, db_column='addBy', null=True, blank=True, verbose_name="创建者登录名")
modBy = models.CharField(max_length=25, db_column='modBy', null=True, blank=True, verbose_name="修改者登录名")
addTime = models.DateTimeField(db_column='addTime', auto_now_add=True, verbose_name="创建时间")
modTime = models.DateTimeField(db_column='modTime', auto_now=True, verbose_name="修改时间")
class Meta:
db_table = "tb_admin_manager"
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
'''
@File : data_pack.py
@Time : 2022/03/08 22:18:18
@Author : felix
@Version : 1.0
@Contact : laijia2008@126.com
@License : (C)Copyright 2021-2025, felix&lai
@Desc : 用于数据封包解包
'''
# here put the import lib
import struct
from tool_define import *
from client.reload import *
from google.protobuf import reflection
from google.protobuf import json_format
from proto_xml import *
import threading
@Singleton
class DataPack(object):
def __init__(self, parent=None):
self.reload = ReLoadFiles()
self.protoXml = ToolProtoXml()
pass
def dataPack(self, msg_id, msg_proto):
msg_pack = None
msg_content = msg_proto.SerializeToString()
msg_len = 4 + len(msg_content)
msg_pack = struct.pack('i', msg_len)
msg_pack = msg_pack + struct.pack('i', msg_id)
msg_pack = msg_pack+msg_content
return msg_pack
pass
def dataUnpack(self, recv_data, msg_proto):
msg_len = struct.unpack('i', recv_data[:4])[0]
msg_id = struct.unpack('i', recv_data[4:8])[0]
msg_content = msg_proto.ParseFromString(recv_data[8:])
return msg_id, msg_content
pass
# 包={4字节data长度,4字节消息id,data}
def dataPack2(self, msg_id, msg_proto):
msg_pack = None
msg_content = msg_proto.SerializeToString()
msg_len = len(msg_content)
msg_pack = struct.pack('i', msg_len)
msg_pack = msg_pack + struct.pack('i', msg_id)
msg_pack = msg_pack + msg_content
return msg_pack
pass
def dataUnpack2(self, recv_data):
msg_len = struct.unpack('i', recv_data[:4])[0]
msg_id = struct.unpack('i', recv_data[4:8])[0]
dynamicData = self.protoXml.getDynamicMsg(str(msg_id))
if not dynamicData:
print('dynamicData is null, msgid==', msg_id)
return
msg_proto = self.getMsgProto(dynamicData.msgClass, dynamicData.msgName)
if not msg_proto:
return None, None
msg_proto.ParseFromString(recv_data[8:])
return msg_id, json_format.MessageToJson(msg_proto)
pass
def dataLen(self, recv_data):
msg_len = struct.unpack('i', recv_data[:4])[0]
return msg_len
pass
def getMsgProto(self, msgClass, msgName): # msgClass=login msgName=C2SLoginMsg
self.reload.readLoadModule()
module = self.reload.getModule(msgClass+"_pb2")
if not module: return None
if msgName not in module.DESCRIPTOR.message_types_by_name.keys():
return None
descriptor = module.DESCRIPTOR.message_types_by_name[msgName]
protoMsgType = reflection.MakeClass(descriptor)
msgProto = protoMsgType()
if not msgProto:
return None
return msgProto
pass
|
from typing import List
from .attributed_lines import *
from .attributed_lines_widget import *
from .attributed_text_widget import *
from .config import *
from .cursor_rendering import *
from .cursor_tree_widget import *
from .element import *
from .element_supply import *
from .exceptions import *
from .markup import *
from .rendered_element_cache import *
from .utils import *
__all__: List[str] = []
__all__ += attributed_lines.__all__
__all__ += attributed_lines_widget.__all__
__all__ += attributed_text_widget.__all__
__all__ += config.__all__
__all__ += cursor_rendering.__all__
__all__ += cursor_tree_widget.__all__
__all__ += element.__all__
__all__ += element_supply.__all__
__all__ += exceptions.__all__
__all__ += markup.__all__
__all__ += rendered_element_cache.__all__
__all__ += utils.__all__
|
#!/usr/bin/env python
# Copyright 2017 The Forseti Security Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test project data."""
FAKE_PROJECTS = [
{
'projects': [
{
'name': 'project1',
'parent': {
'type': 'organization',
'id': '888888888888'
},
'projectId': 'project1',
'projectNumber': '25621943694',
'lifecycleState': 'ACTIVE',
'createTime': '2016-10-22T16:57:36.096Z'
},
{
'name': 'project2',
'parent': {
'type': 'organization',
'id': '888888888888'
},
'projectId': 'project2',
'projectNumber': '94226340476',
'lifecycleState': 'ACTIVE',
'createTime': '2016-11-13T05:32:10.930Z'
},
{
'name': 'project3',
'parent': {
'type': 'organization',
'id': '888888888888'
},
'projectId': 'project3',
'projectNumber': '133851422272',
'lifecycleState': 'ACTIVE',
'createTime': '2016-11-13T05:32:49.377Z'
},
{
'name': 'project4',
'projectId': 'project4',
'projectNumber': '133851422244',
'lifecycleState': 'ACTIVE',
'createTime': '2016-11-13T05:32:49.377Z'
},
{
'name': 'project5',
'parent': {
'type': 'organization',
'id': '888888888888'
},
'projectId': 'project5',
'projectNumber': '133851422255',
'lifecycleState': 'ACTIVE'
}
]
},
{
'projects': [
{
'name': 'project6',
'parent': {
'type': 'organization',
'id': '888888888888'
},
'projectId': 'project6',
'projectNumber': '25621943666',
'lifecycleState': 'ACTIVE',
'createTime': '2016-10-22T16:57:36.066Z'
},
{
'name': 'project7',
'parent': {
'type': 'organization',
'id': '888888888888'
},
'projectId': 'project7',
'projectNumber': '94226340477',
'lifecycleState': 'ACTIVE',
'createTime': '2016-11-13T05:32:10.977Z'
}
]
}
]
EXPECTED_LOADABLE_PROJECTS = [
{
'project_name': 'project1',
'create_time': '2016-10-22 16:57:36',
'parent_id': '888888888888',
'project_number': '25621943694',
'parent_type': 'organization',
'project_id': 'project1',
'lifecycle_state': 'ACTIVE',
'raw_project': '{"name": "project1", "parent": {"type": "organization", "id": "888888888888"}, "projectId": "project1", "projectNumber": "25621943694", "lifecycleState": "ACTIVE", "createTime": "2016-10-22T16:57:36.096Z"}',
},
{
'project_name': 'project2',
'create_time': '2016-11-13 05:32:10',
'parent_id': '888888888888',
'project_number': '94226340476',
'parent_type': 'organization',
'project_id': 'project2',
'lifecycle_state': 'ACTIVE',
'raw_project': '{"name": "project2", "parent": {"type": "organization", "id": "888888888888"}, "projectId": "project2", "projectNumber": "94226340476", "lifecycleState": "ACTIVE", "createTime": "2016-11-13T05:32:10.930Z"}',
},
{
'project_name': 'project3',
'create_time': '2016-11-13 05:32:49',
'parent_id': '888888888888',
'project_number': '133851422272',
'parent_type': 'organization',
'project_id': 'project3',
'lifecycle_state': 'ACTIVE',
'raw_project': '{"name": "project3", "parent": {"type": "organization", "id": "888888888888"}, "projectId": "project3", "projectNumber": "133851422272", "lifecycleState": "ACTIVE", "createTime": "2016-11-13T05:32:49.377Z"}',
},
{
'project_name': 'project4',
'create_time': '2016-11-13 05:32:49',
'parent_id':None,
'project_number': '133851422244',
'parent_type':None,
'project_id': 'project4',
'lifecycle_state': 'ACTIVE',
'raw_project': '{"projectId": "project4", "lifecycleState": "ACTIVE", "name": "project4", "createTime": "2016-11-13T05:32:49.377Z", "projectNumber": "133851422244"}',
},
{
'project_name': 'project5',
'create_time': None,
'parent_id': '888888888888',
'project_number': '133851422255',
'parent_type': 'organization',
'project_id': 'project5',
'lifecycle_state': 'ACTIVE',
'raw_project': '{"projectId": "project5", "lifecycleState": "ACTIVE", "name": "project5", "parent": {"type": "organization", "id": "888888888888"}, "projectNumber": "133851422255"}',
},
{
'project_name': 'project6',
'create_time': '2016-10-22 16:57:36',
'parent_id': '888888888888',
'project_number': '25621943666',
'parent_type': 'organization',
'project_id': 'project6',
'lifecycle_state': 'ACTIVE',
'raw_project': '{"name": "project6", "parent": {"type": "organization", "id": "888888888888"}, "projectId": "project6", "projectNumber": "25621943666", "lifecycleState": "ACTIVE", "createTime": "2016-10-22T16:57:36.066Z"}',
},
{
'project_name': 'project7',
'create_time': '2016-11-13 05:32:10',
'parent_id': '888888888888',
'project_number': '94226340477',
'parent_type': 'organization',
'project_id': 'project7',
'lifecycle_state': 'ACTIVE',
'raw_project': '{"name": "project7", "parent": {"type": "organization", "id": "888888888888"}, "projectId": "project7", "projectNumber": "94226340477", "lifecycleState": "ACTIVE", "createTime": "2016-11-13T05:32:10.977Z"}',
}
]
|
#!/usr/bin/python
'''
Generates splunk configurations from manifest files under the security_content repo.
'''
import glob
import yaml
import argparse
from os import path
import sys
import datetime
from jinja2 import Environment, FileSystemLoader
import re
from attackcti import attack_client
import csv
import shutil
# Global variable
global_product = 'ESCU'
def load_objects(file_path, VERBOSE, REPO_PATH):
files = []
manifest_files = path.join(path.expanduser(REPO_PATH), file_path)
for file in sorted(glob.glob(manifest_files)):
if VERBOSE:
print("processing manifest: {0}".format(file))
files.append(load_file(file))
return files
def process_deprecated(file,file_path):
DESCRIPTION_ANNOTATION = "WARNING, this detection has been marked deprecated by the Splunk Threat Research team, this means that it will no longer be maintained or supported. If you have any questions feel free to email us at: research@splunk.com. "
if 'deprecated' in file_path:
file['deprecated'] = True
file['description'] = DESCRIPTION_ANNOTATION + file['description']
return file
def load_file(file_path):
with open(file_path, 'r', encoding="utf-8") as stream:
try:
file = list(yaml.safe_load_all(stream))[0]
# mark any files that have been deprecated
file = process_deprecated(file,file_path)
except yaml.YAMLError as exc:
print(exc)
sys.exit("ERROR: reading {0}".format(file_path))
return file
def generate_lookup_files(lookups, TEMPLATE_PATH, OUTPUT_PATH,REPO_PATH):
sorted_lookups = sorted(lookups, key=lambda i: i['name'])
utc_time = datetime.datetime.utcnow().replace(microsecond=0).isoformat()
for i in sorted_lookups:
for k,v in i.items():
if k == 'filename':
lookup_file = REPO_PATH +'/lookups/'+ v
dist_lookup_dir = OUTPUT_PATH +'/lookups'
shutil.copy(lookup_file,dist_lookup_dir)
return sorted_lookups
def generate_transforms_conf(lookups, TEMPLATE_PATH, OUTPUT_PATH):
sorted_lookups = sorted(lookups, key=lambda i: i['name'])
utc_time = datetime.datetime.utcnow().replace(microsecond=0).isoformat()
j2_env = Environment(loader=FileSystemLoader(TEMPLATE_PATH), # nosemgrep
trim_blocks=True)
template = j2_env.get_template('transforms.j2')
output_path = path.join(OUTPUT_PATH, 'default/transforms.conf')
output = template.render(lookups=sorted_lookups, time=utc_time)
with open(output_path, 'w', encoding="utf-8") as f:
f.write(output)
return output_path
def generate_collections_conf(lookups, TEMPLATE_PATH, OUTPUT_PATH):
filtered_lookups = list(filter(lambda i: 'collection' in i, lookups))
sorted_lookups = sorted(filtered_lookups, key=lambda i: i['name'])
utc_time = datetime.datetime.utcnow().replace(microsecond=0).isoformat()
j2_env = Environment(loader=FileSystemLoader(TEMPLATE_PATH), # nosemgrep
trim_blocks=True)
template = j2_env.get_template('collections.j2')
output_path = path.join(OUTPUT_PATH, 'default/collections.conf')
output = template.render(lookups=sorted_lookups, time=utc_time)
with open(output_path, 'w', encoding="utf-8") as f:
f.write(output)
return output_path
def generate_ssa_yaml(detections, TEMPLATE_PATH, OUTPUT_PATH):
'''
@param detections: input list of individual YAML detections in detections/ directory
@return: the enhanced yaml file located in /detections directory
'''
# disable yaml pointers https://stackoverflow.com/questions/51272814/python-yaml-dumping-pointer-references
yaml.Dumper.ignore_aliases = lambda *args : True
for d in detections:
manifest_file = OUTPUT_PATH + '/detections/' + d['name'].lower().replace(" ", "_") + '.yml'
# remove unused fields
del d['risk']
del d['deployment']
del d['mappings']
del d['savedsearch_annotations']
with open(manifest_file, 'w') as file:
documents = yaml.dump(d, file, sort_keys=True)
return OUTPUT_PATH + '/detections/'
def generate_savedsearches_conf(detections, deployments, TEMPLATE_PATH, OUTPUT_PATH):
'''
@param detections: input list of individual YAML detections in detections/ directory
@param deployments:
@return: the savedsearches.conf file located in package/default/
'''
utc_time = datetime.datetime.utcnow().replace(microsecond=0).isoformat()
j2_env = Environment(loader=FileSystemLoader(TEMPLATE_PATH), # nosemgrep
trim_blocks=True)
j2_env.filters['custom_jinja2_enrichment_filter'] = custom_jinja2_enrichment_filter
template = j2_env.get_template('savedsearches.j2')
output_path = path.join(OUTPUT_PATH, 'default/savedsearches.conf')
output = template.render(detections=detections, time=utc_time)
with open(output_path, 'w') as f:
output = output.encode('ascii', 'ignore').decode('ascii')
f.write(output)
return output_path
def generate_use_case_library_conf(stories, detections, TEMPLATE_PATH, OUTPUT_PATH):
utc_time = datetime.datetime.utcnow().replace(microsecond=0).isoformat()
j2_env = Environment(loader=FileSystemLoader(TEMPLATE_PATH), # nosemgrep
trim_blocks=True)
template = j2_env.get_template('analyticstories.j2')
output_path = path.join(OUTPUT_PATH, 'default/analyticstories.conf')
output = template.render(stories=stories, detections=detections,
time=utc_time)
with open(output_path, 'w', encoding="utf-8") as f:
f.write(output)
return output_path
def generate_macros_conf(macros, detections, TEMPLATE_PATH, OUTPUT_PATH):
filter_macros = []
for detection in detections:
new_dict = {}
new_dict['definition'] = 'search *'
new_dict['description'] = 'Update this macro to limit the output results to filter out false positives. '
new_dict['name'] = detection['name']. \
replace(' ', '_').replace('-', '_').replace('.', '_').replace('/', '_').lower() + '_filter'
filter_macros.append(new_dict)
all_macros = macros + filter_macros
utc_time = datetime.datetime.utcnow().replace(microsecond=0).isoformat()
j2_env = Environment(loader=FileSystemLoader(TEMPLATE_PATH), # nosemgrep
trim_blocks=True)
template = j2_env.get_template('macros.j2')
output_path = path.join(OUTPUT_PATH, 'default/macros.conf')
output = template.render(macros=all_macros, time=utc_time)
with open(output_path, 'w', encoding="utf-8") as f:
f.write(output)
return output_path
def generate_workbench_panels(response_tasks, stories, TEMPLATE_PATH, OUTPUT_PATH):
workbench_panel_objects = []
for response_task in response_tasks:
if response_task['type'] == 'Investigation':
if 'search' in response_task:
if 'inputs' in response_task:
response_file_name = response_task['name'].replace(' ', '_').replace('-','_').replace('.','_').replace('/','_').lower()
response_file_name_xml = response_file_name + "___response_task.xml"
response_task['lowercase_name'] = response_file_name
workbench_panel_objects.append(response_task)
j2_env = Environment(loader=FileSystemLoader(TEMPLATE_PATH), # nosemgrep
trim_blocks=True)
template = j2_env.get_template('panel.j2')
file_path = "default/data/ui/panels/workbench_panel_" + response_file_name_xml
output_path = path.join(OUTPUT_PATH, file_path)
response_task['search']= response_task['search'].replace(">",">")
response_task['search']= response_task['search'].replace("<","<")
output = template.render(search=response_task['search'])
with open(output_path, 'w') as f:
f.write(output)
j2_env = Environment(loader=FileSystemLoader(TEMPLATE_PATH), # nosemgrep
trim_blocks=True)
template = j2_env.get_template('es_investigations.j2')
output_path = path.join(OUTPUT_PATH, 'default/es_investigations.conf')
output = template.render(response_tasks=workbench_panel_objects, stories=stories)
with open(output_path, 'w', encoding="utf-8") as f:
f.write(output)
j2_env = Environment(loader=FileSystemLoader(TEMPLATE_PATH), # nosemgrep
trim_blocks=True)
template = j2_env.get_template('workflow_actions.j2')
output_path = path.join(OUTPUT_PATH, 'default/workflow_actions.conf')
output = template.render(response_tasks=workbench_panel_objects)
with open(output_path, 'w', encoding="utf-8") as f:
f.write(output)
return workbench_panel_objects
def parse_data_models_from_search(search):
match = re.search(r'from\sdatamodel\s?=\s?([^\s.]*)', search)
if match is not None:
return match.group(1)
return False
def parse_author_company(story):
match_author = re.search(r'^([^,]+)', story['author'])
if match_author is None:
match_author = 'no'
else:
match_author = match_author.group(1)
match_company = re.search(r',\s?(.*)$', story['author'])
if match_company is None:
match_company = 'no'
else:
match_company = match_company.group(1)
return match_author, match_company
def get_deployments(object, deployments):
matched_deployments = []
for deployment in deployments:
for tag in object['tags'].keys():
if tag in deployment['tags'].keys():
if type(object['tags'][tag]) is str:
tag_array = [object['tags'][tag]]
else:
tag_array = object['tags'][tag]
for tag_value in tag_array:
if type(deployment['tags'][tag]) is str:
tag_array_deployment = [deployment['tags'][tag]]
else:
tag_array_deployment = deployment['tags'][tag]
for tag_value_deployment in tag_array_deployment:
if tag_value == tag_value_deployment:
matched_deployments.append(deployment)
continue
# grab default for all stories if deployment not set
if len(matched_deployments) == 0:
for deployment in deployments:
if 'analytic_story' in deployment['tags']:
if deployment['tags']['analytic_story'] == 'all':
last_deployment = deployment
else:
last_deployment = matched_deployments[-1]
return last_deployment
def get_nes_fields(search, deployment):
nes_fields_matches = []
if 'alert_action' in deployment:
if 'notable' in deployment['alert_action']:
if 'nes_fields' in deployment['alert_action']['notable']:
for field in deployment['alert_action']['notable']['nes_fields']:
if (search.find(field + ' ') != -1):
nes_fields_matches.append(field)
return nes_fields_matches
def map_response_tasks_to_stories(response_tasks):
sto_res = {}
for response_task in response_tasks:
if response_task['type'] == 'Investigation':
if 'tags' in response_task:
if 'analytic_story' in response_task['tags']:
for story in response_task['tags']['analytic_story']:
if 'type' in response_task.keys():
if response_task['type'] == 'Investigation':
task_name = str('ESCU - ' + response_task['name'] + ' - Response Task')
else:
task_name = str('ESCU - ' + response_task['name'] + ' - Response Task')
if not (story in sto_res):
sto_res[story] = {task_name}
else:
sto_res[story].add(task_name)
return sto_res
def map_baselines_to_stories(baselines):
sto_bas = {}
for baseline in baselines:
if 'tags' in baseline:
if 'analytic_story' in baseline['tags']:
for story in baseline['tags']['analytic_story']:
if 'Splunk Behavioral Analytics' in baseline['tags']['product']:
continue
baseline_name = str('ESCU - ' + baseline['name'])
if not (story in sto_bas):
sto_bas[story] = {baseline_name}
else:
sto_bas[story].add(baseline_name)
return sto_bas
def custom_jinja2_enrichment_filter(string, object):
customized_string = string
for key in object.keys():
[key.encode('utf-8') for key in object]
customized_string = customized_string.replace("%" + key + "%", str(object[key]))
for key in object['tags'].keys():
customized_string = customized_string.replace("%" + key + "%", str(object['tags'][key]))
return customized_string
def add_annotations(detection):
# used for upstream processing of risk scoring annotations in ECSU
# this is not currently compatible with newer instances of ESCU (6.3.0+)
# we are duplicating the code block above for now and just changing variable names to make future
# changes to this data structure separate from the mappings generation
# @todo expose the JSON data structure for newer risk type
annotation_keys = ['mitre_attack', 'kill_chain_phases', 'cis20', 'nist', 'analytic_story', 'observable', 'context', 'impact', 'confidence', 'cve']
savedsearch_annotations = {}
for key in annotation_keys:
if key == 'mitre_attack':
if 'mitre_attack_id' in detection['tags']:
savedsearch_annotations[key] = detection['tags']['mitre_attack_id']
else:
if key in detection['tags']:
savedsearch_annotations[key] = detection['tags'][key]
detection['savedsearch_annotations'] = savedsearch_annotations
# add SSA risk_severity
if 'risk_score' in detection['tags']:
if detection['tags']['risk_score'] >= 80:
detection['tags']['risk_severity'] = 'high'
elif(50>= detection['tags']['risk_score'] <=79):
detection['tags']['risk_severity'] = 'medium'
else:
detection['tags']['risk_severity'] = 'low'
return detection
def add_rba(detection):
# grab risk message
if 'message' in detection['tags']:
detection['risk_message'] = detection['tags']['message']
risk_objects = []
risk_object_user_types = {'user', 'username', 'email address'}
risk_object_system_types = {'device', 'endpoint', 'hostname', 'ip address'}
if 'observable' in detection['tags'] and 'risk_score' in detection['tags']:
# go through each obervable
for entity in detection['tags']['observable']:
risk_object = dict()
# determine if is a user type, create risk
if entity['type'].lower() in risk_object_user_types:
for r in entity['role']:
if 'attacker' == r.lower() or 'victim' ==r.lower():
risk_object['risk_object_type'] = 'user'
risk_object['risk_object_field'] = entity['name']
risk_object['risk_score'] = detection['tags']['risk_score']
risk_objects.append(risk_object)
# determine if is a system type, create risk
elif entity['type'].lower() in risk_object_system_types:
for r in entity['role']:
if 'attacker' == r.lower() or 'victim' ==r.lower():
risk_object['risk_object_type'] = 'system'
risk_object['risk_object_field'] = entity['name']
risk_object['risk_score'] = detection['tags']['risk_score']
risk_objects.append(risk_object)
# if is not a system or user, it is a threat object
else:
risk_object['threat_object_field'] = entity['name']
risk_object['threat_object_type'] = entity['type'].lower()
risk_objects.append(risk_object)
continue
detection['risk'] = risk_objects
return detection
def add_playbook(detection, playbooks):
preface = " The following Splunk SOAR playbook can be used to respond to this detection: "
for playbook in playbooks:
try:
if detection['name'] in playbook['tags']['detections']:
detection['how_to_implement'] = detection['how_to_implement'] + preface + playbook['name']
except KeyError:
print("playbook " + playbook['name'] + " has no detections, passing....")
pass
return detection
def map_playbooks_to_stories(playbooks):
sto_play = {}
for playbook in playbooks:
if 'tags' in playbook:
if 'analytic_story' in playbook['tags']:
for story in playbook['tags']['analytic_story']:
if not (story in sto_play):
sto_play[story] = {playbook['name']}
else:
sto_play[story].add(playbook['name'])
return sto_play
def prepare_detections(detections, deployments, playbooks, OUTPUT_PATH):
for detection in detections:
# only for DevSecOps
if global_product == 'DevSecOps':
if detection['tags']['risk_score']:
detection['search'] = detection['search'] + ' | eval risk_score=' + str(detection['tags']['risk_score'])
if detection['tags']['mitre_attack_id']:
detection['search'] = detection['search'] + ' | eval mitre_attack_id=' + detection['tags']['mitre_attack_id'][0]
if detection['type'] == 'Anomaly':
detection['search'] = detection['search'] + ' | collect index=signals'
elif detection['type'] == 'TTP':
detection['search'] = detection['search'] + ' | collect index=alerts'
elif detection['type'] == 'Correlation':
detection['search'] = detection['search'] + ' | collect index=alerts'
# parse out data_models
data_model = parse_data_models_from_search(detection['search'])
if data_model:
detection['data_model'] = data_model
if detection['type'] != 'Investigation':
matched_deployment = get_deployments(detection, deployments)
detection['deployment'] = matched_deployment
nes_fields = get_nes_fields(detection['search'], detection['deployment'])
if len(nes_fields) > 0:
detection['nes_fields'] = nes_fields
keys = ['mitre_attack', 'kill_chain_phases', 'cis20', 'nist']
mappings = {}
for key in keys:
if key == 'mitre_attack':
if 'mitre_attack_id' in detection['tags']:
mappings[key] = detection['tags']['mitre_attack_id']
else:
if key in detection['tags']:
mappings[key] = detection['tags'][key]
detection['mappings'] = mappings
detection = add_annotations(detection)
detection = add_rba(detection)
detection = add_playbook(detection, playbooks)
# add additional metadata
if 'product' in detection['tags']:
detection['product'] = detection['tags']['product']
# enable all SAAWS detections
if (OUTPUT_PATH) == 'dist/saaws':
detection['disabled'] = 'false'
return detections
def prepare_stories(stories, detections, playbooks):
# enrich stories with information from detections: data_models, mitre_ids, kill_chain_phases, nists
sto_to_data_models = {}
sto_to_mitre_attack_ids = {}
sto_to_kill_chain_phases = {}
sto_to_ciss = {}
sto_to_nists = {}
sto_to_det = {}
preface = " /n**SOAR:** The following Splunk SOAR playbooks can be used in the response to this story's analytics: "
baselines = [object for object in detections if 'Baseline' in object['type']]
for detection in detections:
if detection['type'] == 'Baseline':
rule_name = str('ESCU - ' + detection['name'])
continue
if 'analytic_story' in detection['tags']:
for story in detection['tags']['analytic_story']:
if detection['type'] != "Investigation":
rule_name = str('ESCU - ' + detection['name'] + ' - Rule')
if story in sto_to_det.keys():
sto_to_det[story].add(rule_name)
else:
sto_to_det[story] = {rule_name}
data_model = parse_data_models_from_search(detection['search'])
if data_model:
if story in sto_to_data_models.keys():
sto_to_data_models[story].add(data_model)
else:
sto_to_data_models[story] = {data_model}
if 'mitre_attack_id' in detection['tags']:
if story in sto_to_mitre_attack_ids.keys():
for mitre_attack_id in detection['tags']['mitre_attack_id']:
sto_to_mitre_attack_ids[story].add(mitre_attack_id)
else:
sto_to_mitre_attack_ids[story] = set(detection['tags']['mitre_attack_id'])
if 'kill_chain_phases' in detection['tags']:
if story in sto_to_kill_chain_phases.keys():
for kill_chain in detection['tags']['kill_chain_phases']:
sto_to_kill_chain_phases[story].add(kill_chain)
else:
sto_to_kill_chain_phases[story] = set(detection['tags']['kill_chain_phases'])
if 'cis20' in detection['tags']:
if story in sto_to_ciss.keys():
for cis in detection['tags']['cis20']:
sto_to_ciss[story].add(cis)
else:
sto_to_ciss[story] = set(detection['tags']['cis20'])
if 'nist' in detection['tags']:
if story in sto_to_nists.keys():
for nist in detection['tags']['nist']:
sto_to_nists[story].add(nist)
else:
sto_to_nists[story] = set(detection['tags']['nist'])
sto_res = map_response_tasks_to_stories(detections)
sto_bas = map_baselines_to_stories(baselines)
sto_play = map_playbooks_to_stories(playbooks)
for story in stories:
story['author_name'], story['author_company'] = parse_author_company(story)
story['lowercase_name'] = story['name'].replace(' ', '_').replace('-','_').replace('.','_').replace('/','_').lower()
story['detections'] = sorted(sto_to_det[story['name']])
story['searches'] = story['detections']
if story['name'] in sto_to_data_models:
story['data_models'] = sorted(sto_to_data_models[story['name']])
if story['name'] in sto_play:
story['description'] = str(story['description']) + preface + str(sto_play[story['name']])
story['description'] = story['description'].replace('{', ' ').replace('}', ' ')
if story['name'] in sto_to_mitre_attack_ids:
story['mitre_attack'] = sorted(sto_to_mitre_attack_ids[story['name']])
if story['name'] in sto_to_kill_chain_phases:
story['kill_chain_phases'] = sorted(sto_to_kill_chain_phases[story['name']])
if story['name'] in sto_to_ciss:
story['cis20'] = sorted(sto_to_ciss[story['name']])
if story['name'] in sto_to_nists:
story['nist'] = sorted(sto_to_nists[story['name']])
if story['name'] in sto_res:
story['response_tasks'] = sorted(list(sto_res[story['name']]))
story['searches'] = story['searches'] + story['response_tasks']
story['workbench_panels'] = []
for response_task_name in story['response_tasks']:
s = 'panel://workbench_panel_' + response_task_name[7:].replace(' ', '_').replace('-','_').replace('.','_').replace('/','_').lower()
story['workbench_panels'].append(s)
if story['name'] in sto_bas:
story['baselines'] = sorted(list(sto_bas[story['name']]))
keys = ['mitre_attack', 'kill_chain_phases', 'cis20', 'nist']
mappings = {}
for key in keys:
if key in story:
mappings[key] = story[key]
story['mappings'] = mappings
return stories
def generate_mitre_lookup(OUTPUT_PATH):
csv_mitre_rows = [["mitre_id", "technique", "tactics", "groups"]]
lift = attack_client()
all_enterprise = lift.get_enterprise(stix_format=False)
enterprise_relationships = lift.get_enterprise_relationships()
enterprise_groups = lift.get_enterprise_groups()
for technique in all_enterprise['techniques']:
apt_groups = []
for relationship in enterprise_relationships:
if (relationship['target_ref'] == technique['id']) and relationship['source_ref'].startswith('intrusion-set'):
for group in enterprise_groups:
if relationship['source_ref'] == group['id']:
apt_groups.append(group['name'])
if not ('revoked' in technique):
if len(apt_groups) == 0:
apt_groups.append('no')
csv_mitre_rows.append([technique['technique_id'], technique['technique'], '|'.join(technique['tactic']).replace('-',' ').title(), '|'.join(apt_groups)])
with open(path.join(OUTPUT_PATH, 'lookups/mitre_enrichment.csv'), 'w', newline='', encoding="utf-8") as file:
writer = csv.writer(file,quoting=csv.QUOTE_ALL)
writer.writerows(csv_mitre_rows)
def import_objects(VERBOSE, REPO_PATH):
objects = {
"stories": load_objects("stories/*.yml", VERBOSE, REPO_PATH),
"macros": load_objects("macros/*.yml", VERBOSE, REPO_PATH),
"lookups": load_objects("lookups/*.yml", VERBOSE, REPO_PATH),
"responses": load_objects("responses/*.yml", VERBOSE, REPO_PATH),
"deployments": load_objects("deployments/*.yml", VERBOSE, REPO_PATH),
"detections": load_objects("detections/*/*.yml", VERBOSE, REPO_PATH),
"playbooks": load_objects("playbooks/*.yml", VERBOSE, REPO_PATH),
}
objects["detections"].extend(load_objects("detections/*/*/*.yml", VERBOSE, REPO_PATH))
return objects
def compute_objects(objects, PRODUCT, OUTPUT_PATH):
if PRODUCT == "SAAWS":
objects["detections"] = [object for object in objects["detections"] if 'Splunk Security Analytics for AWS' in object['tags']['product']]
objects["stories"] = [object for object in objects["stories"] if 'Splunk Security Analytics for AWS' in object['tags']['product']]
if PRODUCT == "DevSecOps":
objects["detections"] = [object for object in objects["detections"] if 'Dev Sec Ops Analytics' in object['tags']['product']]
objects["stories"] = [object for object in objects["stories"] if 'Dev Sec Ops Analytics' in object['tags']['product']]
if PRODUCT == "ESCU":
# only use ESCU detections to the configurations
objects["detections"] = sorted(filter(lambda d: not 'Splunk Behavioral Analytics' in d['tags']['product'], objects["detections"]), key=lambda d: d['name'])
objects["stories"] = sorted(filter(lambda s: not 'Splunk Behavioral Analytics' in s['tags']['product'], objects["stories"]), key=lambda s: s['name'])
if PRODUCT == "SSA":
# only SSA detections, also no need to calculate stories
objects["detections"] = sorted(filter(lambda d: 'Splunk Behavioral Analytics' in d['tags']['product'], objects["detections"]), key=lambda d: d['name'])
objects["stories"] = sorted(filter(lambda s: 'Splunk Behavioral Analytics' in s['tags']['product'], objects["stories"]), key=lambda s: s['name'])
objects["macros"] = sorted(objects["macros"], key=lambda m: m['name'])
objects["detections"] = prepare_detections(objects["detections"], objects["deployments"], objects["playbooks"], OUTPUT_PATH)
objects["stories"] = prepare_stories(objects["stories"], objects["detections"], objects["playbooks"])
return objects
def get_objects(REPO_PATH, OUTPUT_PATH, PRODUCT, VERBOSE):
objects = import_objects(VERBOSE, REPO_PATH)
objects = compute_objects(objects, PRODUCT, OUTPUT_PATH)
return objects
def main(REPO_PATH, OUTPUT_PATH, PRODUCT, VERBOSE):
global global_product
global_product = PRODUCT
TEMPLATE_PATH = path.join(REPO_PATH, 'bin/jinja2_templates')
objects = get_objects(REPO_PATH, OUTPUT_PATH, PRODUCT, VERBOSE)
try:
if VERBOSE:
print("generating Mitre lookups")
# generate_mitre_lookup(OUTPUT_PATH)
except Exception as e:
print('Error: ' + str(e))
print("WARNING: Generation of Mitre lookup failed.")
# calculate deprecation totals
deprecated = []
for d in objects['detections']:
if 'deprecated' in d:
deprecated.append(d)
detection_path = ''
lookups_path = ''
lookups_files= ''
use_case_lib_path = ''
macros_path = ''
workbench_panels_objects = ''
if global_product == 'SSA':
detection_path = generate_ssa_yaml(objects["detections"], TEMPLATE_PATH, OUTPUT_PATH)
objects["macros"] = []
else:
detection_path = generate_savedsearches_conf(objects["detections"], objects["deployments"], TEMPLATE_PATH, OUTPUT_PATH)
lookups_path = generate_transforms_conf(objects["lookups"], TEMPLATE_PATH, OUTPUT_PATH)
lookups_path = generate_collections_conf(objects["lookups"], TEMPLATE_PATH, OUTPUT_PATH)
lookups_files = generate_lookup_files(objects["lookups"], TEMPLATE_PATH, OUTPUT_PATH,REPO_PATH)
use_case_lib_path = generate_use_case_library_conf(objects["stories"], objects["detections"], TEMPLATE_PATH, OUTPUT_PATH)
macros_path = generate_macros_conf(objects["macros"], objects["detections"], TEMPLATE_PATH, OUTPUT_PATH)
workbench_panels_objects = generate_workbench_panels(objects["detections"], objects["stories"], TEMPLATE_PATH, OUTPUT_PATH)
if VERBOSE:
print("{0} stories have been successfully written to {1}".format(len(objects["stories"]), use_case_lib_path))
print("{0} detections have been successfully written to {1}".format(len(objects["detections"]), detection_path))
print("{0} detections have been marked deprecated on {1}".format(len(deprecated), detection_path))
print("{0} macros have been successfully written to {1}".format(len(objects["macros"]), macros_path))
print("{0} workbench panels have been successfully written to {1}, {2} and {3}".format(len(workbench_panels_objects), OUTPUT_PATH + "/default/es_investigations.conf", OUTPUT_PATH + "/default/workflow_actions.conf", OUTPUT_PATH + "/default/data/ui/panels/*"))
print("security content generation completed..")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="generates splunk conf files out of security_content manifests", epilog="""
This tool converts manifests to the source files to be used by products like Splunk Enterprise.
It generates the savesearches.conf, analytics_stories.conf files for ES.""")
parser.add_argument("-p", "--path", required=True, help="path to security_content repo")
parser.add_argument("-o", "--output", required=True, help="path to the output directory")
parser.add_argument("-v", "--verbose", required=False, default=False, action='store_true', help="prints verbose output")
parser.add_argument("--product", required=True, default="ESCU", help="package type")
# parse them
args = parser.parse_args()
REPO_PATH = args.path
OUTPUT_PATH = args.output
VERBOSE = args.verbose
PRODUCT = args.product
main(REPO_PATH, OUTPUT_PATH, PRODUCT, VERBOSE)
|
###############################
#
# Created by Patrik Valkovic
# 4/3/2021
#
###############################
import torch as t
import numpy as np
import time
REPEATS = 1000
PARENTS = 10000
TO_PICK = 1
CHILDREN = 5000
# init
t.set_num_threads(4)
d = t.device('cuda:0')
t.rand(1,device=d)
print("=== CPU ===")
# randint
bufferp, bufferf = [], []
s = time.time()
for _ in range(REPEATS):
startp = time.process_time()
startf = time.perf_counter()
x = t.randint(PARENTS, (CHILDREN, TO_PICK), dtype=t.long)
float(x.max())
endp = time.process_time()
endf = time.perf_counter()
bufferp.append(endp - startp)
bufferf.append(endf - startf)
e = time.time()
##############################
print(f"randint p:{np.mean(bufferp):.6f} f:{np.mean(bufferf):.6f} in reality {e-s:.6f}")
# rand permutation
bufferp, bufferf = [], []
s = time.time()
for _ in range(REPEATS):
startp = time.process_time()
startf = time.perf_counter()
x = t.randperm(PARENTS, dtype=t.long)[:CHILDREN]
float(x.max())
endp = time.process_time()
endf = time.perf_counter()
bufferp.append(endp - startp)
bufferf.append(endf - startf)
e = time.time()
##############################
print(f"randperm p:{np.mean(bufferp):.6f} f:{np.mean(bufferf):.6f} in reality {e-s:.6f}")
# uniform
bufferp, bufferf = [], []
s = time.time()
for _ in range(REPEATS):
startp = time.process_time()
startf = time.perf_counter()
x = t.rand((CHILDREN, TO_PICK))
x.multiply_(PARENTS)
x = x.to(t.long)
float(x.max())
endp = time.process_time()
endf = time.perf_counter()
bufferp.append(endp - startp)
bufferf.append(endf - startf)
e = time.time()
##############################
print(f"uniform p:{np.mean(bufferp):.6f} f:{np.mean(bufferf):.6f} in reality {e-s:.6f}")
# multinomial
bufferp, bufferf = [], []
probs = t.tensor(1 / PARENTS).as_strided_((CHILDREN,PARENTS),(0,0))
s = time.time()
for _ in range(REPEATS):
startp = time.process_time()
startf = time.perf_counter()
x = t.multinomial(probs, TO_PICK)
float(x.max())
endp = time.process_time()
endf = time.perf_counter()
bufferp.append(endp - startp)
bufferf.append(endf - startf)
e = time.time()
##############################
print(f"multinomial repeat p:{np.mean(bufferp):.6f} f:{np.mean(bufferf):.6f} in reality {e-s:.6f}")
bufferp, bufferf = [], []
probs = t.tensor(1 / PARENTS).as_strided_((CHILDREN,PARENTS),(0,0))
s = time.time()
for _ in range(REPEATS):
startp = time.process_time()
startf = time.perf_counter()
x = t.multinomial(probs, TO_PICK, replacement=False)
float(x.max())
endp = time.process_time()
endf = time.perf_counter()
bufferp.append(endp - startp)
bufferf.append(endf - startf)
e = time.time()
##############################
print(f"multinomial no repeat p:{np.mean(bufferp):.6f} f:{np.mean(bufferf):.6f} in reality {e-s:.6f}")
print("=== GPU ===")
# randint
bufferp, bufferf = [], []
s = time.time()
for _ in range(REPEATS):
startp = time.process_time()
startf = time.perf_counter()
x = t.randint(PARENTS, (CHILDREN, TO_PICK), dtype=t.long, device=d)
float(x.max())
endp = time.process_time()
endf = time.perf_counter()
bufferp.append(endp - startp)
bufferf.append(endf - startf)
e = time.time()
##############################
print(f"randint p:{np.mean(bufferp):.6f} f:{np.mean(bufferf):.6f} in reality {e-s:.6f}")
# rand permutation
bufferp, bufferf = [], []
s = time.time()
for _ in range(REPEATS):
startp = time.process_time()
startf = time.perf_counter()
x = t.randperm(PARENTS, dtype=t.long, device=d)[:CHILDREN]
float(x.max())
endp = time.process_time()
endf = time.perf_counter()
bufferp.append(endp - startp)
bufferf.append(endf - startf)
e = time.time()
##############################
print(f"randperm p:{np.mean(bufferp):.6f} f:{np.mean(bufferf):.6f} in reality {e-s:.6f}")
# uniform
bufferp, bufferf = [], []
s = time.time()
for _ in range(REPEATS):
startp = time.process_time()
startf = time.perf_counter()
x = t.rand((CHILDREN, TO_PICK), device=d)
x.multiply_(PARENTS)
x = x.to(t.long)
float(x.max())
endp = time.process_time()
endf = time.perf_counter()
bufferp.append(endp - startp)
bufferf.append(endf - startf)
e = time.time()
##############################
print(f"uniform p:{np.mean(bufferp):.6f} f:{np.mean(bufferf):.6f} in reality {e-s:.6f}")
# multinomial
bufferp, bufferf = [], []
probs = t.tensor(1 / PARENTS, device=d).as_strided_((CHILDREN,PARENTS),(0,0))
s = time.time()
for _ in range(REPEATS):
startp = time.process_time()
startf = time.perf_counter()
x = t.multinomial(probs, TO_PICK)
float(x.max())
endp = time.process_time()
endf = time.perf_counter()
bufferp.append(endp - startp)
bufferf.append(endf - startf)
e = time.time()
##############################
print(f"multinomial repeat p:{np.mean(bufferp):.6f} f:{np.mean(bufferf):.6f} in reality {e-s:.6f}")
bufferp, bufferf = [], []
probs = t.tensor(1 / PARENTS, device=d).as_strided_((CHILDREN,PARENTS),(0,0))
s = time.time()
for _ in range(REPEATS):
startp = time.process_time()
startf = time.perf_counter()
x = t.multinomial(probs, TO_PICK, replacement=False)
float(x.max())
endp = time.process_time()
endf = time.perf_counter()
bufferp.append(endp - startp)
bufferf.append(endf - startf)
e = time.time()
##############################
print(f"multinomial no repeat p:{np.mean(bufferp):.6f} f:{np.mean(bufferf):.6f} in reality {e-s:.6f}")
|
class Graph:
def __init__(self, art_installations, weighted_connect):
self.installations = art_installations
self.adjacency_mtx = weighted_connect
self.artwork_to_index = {}
for i, artwork in enumerate(self.installations):
self.artwork_to_index[artwork] = i
def __str__(self):
return (str(self.installations) + '\n' + str(self.adjacency_mtx))
class Installation:
def __init__(self, name, ward, position, indoor):
self.name = name
self.ward = ward
self.position = position
self.indoor = indoor
def __str__(self):
return "Installation " + self.name + " in Ward " + str(self.ward)
|
#!/usr/bin/env python
from flattery.cext import *
|
# -*- coding: utf-8 -*-
import os
import sys
import numpy as np
from keras.layers import (
Activation, TimeDistributed, Dense, RepeatVector, Embedding
)
from keras.layers.recurrent import LSTM
from keras.models import Sequential
from keras.preprocessing.sequence import pad_sequences
from preporcessing import one_hot_encoding
from utils import load_test_data, plot_history
def create_model(x_vocab_len, x_max_len, y_vocab_len, y_max_len, hidden_size,
num_layers):
model = Sequential()
# Encoder
model.add(Embedding(
x_vocab_len, 1000, input_length=x_max_len, mask_zero=True)
)
model.add(LSTM(hidden_size))
model.add(RepeatVector(y_max_len))
# Decoder
for _ in range(num_layers):
model.add(LSTM(hidden_size, return_sequences=True))
model.add(TimeDistributed(Dense(y_vocab_len)))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
return model
def train(X, Y, y_word_to_ix, y_max_len, saved_weights, model, conf):
k_start = 1
# If any trained weight was found, then load them into the model
if len(saved_weights) > 0:
print('[INFO] Saved weights found, loading...')
epoch = saved_weights[
saved_weights.rfind('_') + 1:saved_weights.rfind('.')]
model.load_weights(saved_weights)
k_start = int(epoch) + 1
for k in range(k_start, conf['EPOCHS'] + 1):
# Shuffling the training data every epoch to avoid local minima
indices = np.arange(len(X))
np.random.shuffle(indices)
X = X[indices]
Y = Y[indices]
# Training 1000 sequences at a time
for i in range(0, len(X), 1000):
if i + 1000 >= len(X):
i_end = len(X)
else:
i_end = i + 1000
y_sequences = one_hot_encoding(Y[i:i_end], y_max_len,
y_word_to_ix)
print(f'[INFO] Training model: epoch {k}th {i}/{len(X)} samples')
history = model.fit(X[i:i_end], y_sequences, batch_size=conf[
'BATCH_SIZE'],
epochs=1, verbose=2)
# actions on epoch finalization
model.save_weights('checkpoint_epoch_{}.hdf5'.format(k))
def run_test(saved_weights, model, conf, x_word_to_idx, x_max_len,
y_idx_to_word, num=None):
# Only performing test if there is any saved weights
if len(saved_weights) == 0:
print("The network hasn't been trained! Program will exit...")
sys.exit()
else:
print(" - loading test data")
x_test = load_test_data('test', x_word_to_idx, conf['MAX_LEN'])
if num:
x_test = x_test[0:num]
x_test = pad_sequences(x_test, maxlen=x_max_len, dtype='int32')
print(" - loading model")
model.load_weights(saved_weights)
print(" - calculating predictions")
predictions = np.argmax(model.predict(x_test), axis=2)
sequences = []
print(" - processing")
for prediction in predictions:
sequence = ' '.join(
[y_idx_to_word[index] for index in prediction if index > 0])
print(sequence)
sequences.append(sequence)
np.savetxt('test_result', sequences, fmt='%s')
def find_checkpoint_file(folder):
checkpoint_file = [f for f in os.listdir(folder) if
'checkpoint' in f]
if len(checkpoint_file) == 0:
return []
modified_time = [os.path.getmtime(f) for f in checkpoint_file]
print("found checkpoint(s):")
print(modified_time)
return checkpoint_file[int(np.argmax(modified_time))]
|
import os
import sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import zipfile
import tarfile
import tempfile
import shutil
import unittest
import getfile
class DownloadTest(unittest.TestCase):
def create_sample_text_file(self):
"""Create a basic text file for use in archives."""
path = os.path.join(self.temp_dir, self.sample_name)
with open(path, 'w') as f:
f.write("sample data")
return path
def setUp(self):
# test data
self.remote_zip = r'http://www.naturalearthdata.com/http//www.naturalearthdata.com/download/110m/cultural/ne_110m_admin_0_tiny_countries.zip'
# temporary workspace
self.temp_dir = tempfile.mkdtemp()
self.sample_name = 'sample.txt'
self.sample_path = os.path.join(self.temp_dir, self.sample_name)
# create a bunch of compressed files for testing decompression
sample_file = self.create_sample_text_file()
with tarfile.TarFile(os.path.join(self.temp_dir, 'sample.tar'), 'w') as tar:
tar.add(sample_file, arcname=self.sample_name)
with zipfile.ZipFile(os.path.join(self.temp_dir, 'sample.zip'), 'w') as z:
z.write(sample_file, self.sample_name)
# remove the sample text file to properly test extraction
os.unlink(sample_file)
def tearDown(self):
"""Delete all the test data."""
shutil.rmtree(self.temp_dir, True)
def test_download(self):
"""Attempt to download a remote file."""
test_file = getfile.download_file(self.remote_zip, self.temp_dir)
self.assertTrue(os.path.exists(test_file))
def test_decompress_zip(self):
archive = os.path.join(self.temp_dir,'sample.zip')
self.assertTrue(getfile.decompress(archive, self.temp_dir))
self.assertTrue(os.path.exists(self.sample_path))
def test_decompress_tar(self):
archive = os.path.join(self.temp_dir,'sample.tar')
self.assertTrue(getfile.decompress(archive, self.temp_dir))
self.assertTrue(os.path.exists(self.sample_path))
if __name__ == '__main__':
unittest.main()
|
import json
from django.template.loader import render_to_string
from django.utils.safestring import mark_safe
from wagtail.admin.edit_handlers import FieldPanel
from .models import TaxonomyTerms
class TaxonomyPanel(FieldPanel):
object_template = "wagtailadmin/edit_handlers/taxonomy_panel.html"
field_template = "wagtailadmin/edit_handlers/taxonomy_panel.html"
def render_as_object(self):
return mark_safe(render_to_string(self.object_template, {
'self': self,
self.TEMPLATE_VAR: self,
'field': self.bound_field,
'taxonomy_terms_json': self.load_taxonomy_terms(),
'taxonomy_terms_error_message': self.taxonomy_terms_error_message,
}))
def load_taxonomy_terms(self):
taxonomy_terms_json = None
try:
data = TaxonomyTerms.objects.get(taxonomy_id=self.taxonomy_terms_id)
try:
json.loads(data.terms_json)
except json.decoder.JSONDecodeError:
self.taxonomy_terms_error_message = '"Taxonomy Terms" json wrong format'
taxonomy_terms_json = data.terms_json
except TaxonomyTerms.DoesNotExist:
self.taxonomy_terms_error_message = 'No "Taxonomy Terms" for this id: "{}"'.format(
self.taxonomy_terms_id
)
return taxonomy_terms_json
def __init__(self, field_name, taxonomy_terms_id, *args, **kwargs):
super().__init__(field_name, *args, **kwargs)
self.taxonomy_terms_id = taxonomy_terms_id
self.taxonomy_terms_error_message = None
def clone_kwargs(self):
kwargs = super().clone_kwargs()
kwargs.update(
field_name=self.field_name,
taxonomy_terms_id=self.taxonomy_terms_id,
widget=self.widget if hasattr(self, 'widget') else None,
)
return kwargs
class PermissionsPanel(FieldPanel):
object_template = "wagtailadmin/edit_handlers/permissions_panel.html"
field_template = "wagtailadmin/edit_handlers/permissions_panel.html"
def render_as_object(self):
return mark_safe(render_to_string(self.object_template, {
'self': self,
self.TEMPLATE_VAR: self,
'field': self.bound_field,
'permission_terms_json': self.load_permission_terms(),
'permission_terms_error_message': self.permission_terms_error_message,
'permission_actions': json.dumps(self.permission_actions),
'permission_type': self.permission_type,
}))
def load_permission_terms(self):
permission_terms_json = None
try:
data = TaxonomyTerms.objects.get(taxonomy_id=self.permission_terms_id)
try:
json.loads(data.terms_json)
except json.decoder.JSONDecodeError:
self.permission_terms_error_message = '"Permission Terms" json wrong format'
permission_terms_json = data.terms_json
except TaxonomyTerms.DoesNotExist:
self.permission_terms_error_message = 'No "Permission Terms" for this id: "{}"'.format(
self.permission_terms_id
)
return permission_terms_json
def __init__(self, field_name, permission_terms_id, permission_actions, permission_type, *args, **kwargs):
super().__init__(field_name, *args, **kwargs)
self.permission_terms_id = permission_terms_id
self.permission_actions = permission_actions
self.permission_type = permission_type
self.permission_terms_error_message = None
def clone_kwargs(self):
kwargs = super().clone_kwargs()
kwargs.update(
field_name=self.field_name,
permission_terms_id=self.permission_terms_id,
permission_actions=self.permission_actions,
permission_type=self.permission_type,
widget=self.widget if hasattr(self, 'widget') else None,
)
return kwargs
|
import os
from django.db import models
# from django.core.exceptions import ValidationError
#
# def validate_only_one_instance(obj):
# model = obj.__class__
# if (model.objects.count() > 0 and
# obj.id != model.objects.get().id):
# raise ValidationError("Can only create 1 %s instance. Delete previous or modify it." % model.__name__)
def get_image_path(self, file):
return os.path.join("chemdeptsite", "static", "images", type(self).__name__, file)
class HeadsDesk(models.Model):
name = models.CharField(max_length=100, blank=False) # need for alt tag of image
picture = models.ImageField(upload_to=get_image_path, blank=True, null=True)
message = models.TextField()
def save(self, *args, **kwargs):
# object is possibly being updated, if so, clean up.
self.remove_on_image_update()
return super(HeadsDesk, self).save(*args, **kwargs)
def remove_on_image_update(self):
try:
# is the object in the database yet?
obj = HeadsDesk.objects.get(pk=self.pk)
except HeadsDesk.DoesNotExist:
# object is not in db, nothing to worry about
return
# is the save due to an update of the actual image file?
if obj.picture and self.picture and obj.picture != self.picture:
# delete the old image file from the storage in favor of the new file
obj.picture.delete()
def get_image_url(self):
return str(self.picture.url)[19:]
def __str__(self):
return "Entry : " + str(self.id)
# def clean(self):
# validate_only_one_instance(self)
class Meta:
verbose_name_plural = "HeadsDesk"
class NoticeBoard(models.Model):
title = models.CharField(max_length=300, blank=False)
body = models.CharField(max_length=300, blank=True, null=True)
date = models.DateTimeField()
def __str__(self):
return self.title
class Meta:
verbose_name_plural = "NoticeBoard"
class News(models.Model):
title = models.CharField(max_length=300, blank=False)
date = models.DateTimeField()
more_info = models.TextField()
def __str__(self):
return self.title
class Meta:
verbose_name_plural = "News"
class QuickLinks(models.Model):
title = models.CharField(max_length=300, blank=False)
link = models.URLField()
date = models.DateTimeField()
def __str__(self):
return self.title
class Meta:
verbose_name_plural = "QuickLinks"
class Facilities(models.Model):
name = models.CharField(max_length=500, blank=False)
text = models.TextField(max_length=2000, blank=True)
picture = models.ImageField(upload_to=get_image_path, blank=True, null=True)
def save(self, *args, **kwargs):
# object is possibly being updated, if so, clean up.
self.remove_on_image_update()
return super(Facilities, self).save(*args, **kwargs)
def remove_on_image_update(self):
try:
# is the object in the database yet?
obj = Facilities.objects.get(pk=self.pk)
except Facilities.DoesNotExist:
# object is not in db, nothing to worry about
return
# is the save due to an update of the actual image file?
if obj.picture and self.picture and obj.picture != self.picture:
# delete the old image file from the storage in favor of the new file
obj.picture.delete()
def get_image_url(self):
return str(self.picture.url)[19:]
def __str__(self):
return self.name
class Meta:
verbose_name_plural = "Facilities"
|
"""
Provide access to clipboard from rc.conf
USE: eval fm.get_clipboard([<p/b/s>])
SEE: http://kendriu.com/how-to-use-pipes-in-python-subprocesspopen-objects
"""
import os
from subprocess import PIPE, CalledProcessError, run
import ranger.api
from ranger.ext.shell_escape import shell_quote
old_hook_init = ranger.api.hook_init
def hook_init(fm):
old_hook_init(fm)
def _xc(**kw):
kw.setdefault("check", True)
kw.setdefault("stdout", PIPE)
try:
rc = run(**kw)
if kw["stdout"]:
return rc.stdout.decode().rstrip(os.linesep)
except CalledProcessError as exc:
fm.notify(exc, bad=True)
return ""
fm.get_clipboard = lambda: _xc(args=["xco"])
fm.get_clipboard_q = lambda: shell_quote(_xc(args=["xco"]))
fm.set_clipboard = lambda x: _xc(args=["xci"], input=x, stdout=None)
ranger.api.hook_init = hook_init
|
from deap import gp
import operator
import math
import random
def logabs(x):
return math.log(math.fabs(x)) if x != 0 else 0
def safediv(x, y):
return x/y if y != 0 else 1
def safesqrt(x):
return math.sqrt(abs(x))
def inverse(x):
return 1/x if x != 0 else 1
def sqr(x):
return x**2
def cube(x):
return x**3
def v_f1(x, y):
return abs(x)**y if x != 0 else 1
def v_f2(x, y):
return x+y
def id(x):
return x
def cos(x):
return math.cos(x) if abs(x) < float('inf') else 0
def sin(x):
return math.sin(x) if abs(x) < float('inf') else 0
def tan(x):
try:
return math.tan(x)
except ValueError:
return 0
def get_primitive_set_for_benchmark(benchmark_name: str, num_variables: int):
""" Creates and returns the primitive sets for given benchmark based on its name
:param benchmark_name: the name of the benchmark
:param num_variables: number of variables in this benchmark
:return: the primitive set for the benchmark
"""
if benchmark_name.startswith('keijzer'):
pset = gp.PrimitiveSet('MAIN', num_variables)
pset.addPrimitive(operator.add, 2)
pset.addPrimitive(operator.mul, 2)
pset.addPrimitive(inverse, 1)
pset.addPrimitive(operator.neg, 1)
pset.addPrimitive(safesqrt, 1)
pset.addEphemeralConstant('keijzer_const', lambda: random.gauss(0, 5))
return pset
# in fact, all the numbers in the following are float, the int is used only to ensure that the constants are used
# only inside the functions
if benchmark_name.startswith('vladislavleva-4'):
pset = gp.PrimitiveSetTyped('MAIN', [float]*num_variables, float)
pset.addPrimitive(operator.add, [float, float], float)
pset.addPrimitive(operator.sub, [float, float], float)
pset.addPrimitive(operator.mul, [float, float], float)
pset.addPrimitive(safediv, [float, float], float)
pset.addPrimitive(sqr, [float], float, name='sqr')
pset.addPrimitive(v_f1, [float, int], float, name='V_F1')
pset.addPrimitive(v_f2, [float, int], float, name='V_F2')
pset.addPrimitive(operator.mul, [float, int], float, name='V_F3')
pset.addEphemeralConstant('vf_const', lambda: random.uniform(-5, 5), int)
pset.addPrimitive(id, [int], int, name='id')
return pset
if benchmark_name.startswith('nguyen') or benchmark_name.startswith('pagie'):
pset = gp.PrimitiveSet('MAIN', num_variables)
pset.addPrimitive(operator.add, 2)
pset.addPrimitive(operator.sub, 2)
pset.addPrimitive(operator.mul, 2)
pset.addPrimitive(safediv, 2)
pset.addPrimitive(math.exp, 1)
pset.addPrimitive(logabs, 1)
pset.addPrimitive(cos, 1)
pset.addPrimitive(sin, 1)
return pset
if benchmark_name.startswith('korns'):
pset = gp.PrimitiveSet('MAIN', num_variables)
pset.addPrimitive(operator.add, 2)
pset.addPrimitive(operator.sub, 2)
pset.addPrimitive(operator.mul, 2)
pset.addPrimitive(safediv, 2)
pset.addPrimitive(math.exp, 1)
pset.addPrimitive(logabs, 1)
pset.addPrimitive(cos, 1)
pset.addPrimitive(sin, 1)
pset.addPrimitive(sqr, 1, name='square')
pset.addPrimitive(cube, 1, name='cube')
pset.addPrimitive(inverse, 1, name='inverse')
pset.addPrimitive(tan, 1)
pset.addPrimitive(math.tanh, 1)
pset.addEphemeralConstant('korns_const', lambda: random.uniform(-1e10, 1e10))
return pset
|
from covid_model_seiir_pipeline.pipeline.regression.task.beta_regression import (
beta_regression,
)
from covid_model_seiir_pipeline.pipeline.regression.task.hospital_correction_factors import (
hospital_correction_factors,
)
|
num = cont = soma = 0
while True:
num = int(input('Digite um numero (999 para parar: '))
if num !=999:
soma = soma + num
cont +=1
else:
break
print(f'A soma dos {cont} valores foi {soma}')
print("FIM")
|
import numpy as np
from matplotlib import pylab as plt
from tqdm import tqdm
def K(x):
left = 1 / np.sqrt(2 * np.pi)
right = x * x / 2
return left * np.exp(- right)
def hat_f(x, D, h):
N = D.shape[0]
k = [K((x - w) / h) for w in D]
return sum(k) / (N * h)
def hat_f_k(x, D, k):
_ = np.fabs(D - x)
_.sort()
h = _[k]
N = D.shape[0]
k = [K((x - w) / h) for w in D]
return 1 / (N * h) * sum(k)
D = [list(map(float, x.strip().split())) for x in open("a1882_25.dat").readlines()]
D = np.array(D)
D = D[:, 2]
D = D[D <= 0.2]
x = np.linspace(D.min(), D.max(), D.shape[0])
plt.plot(x, [hat_f_k(_, D, 100) for _ in tqdm(x)])
plt.xlabel("x")
plt.ylabel("Estimación de f")
plt.savefig("estimador_knn.png")
|
# -*- coding: utf-8 -*-
"""
Balanced Error Rate error functions
"""
__author__ = """Giovanni Colavizza"""
import numpy as np
def BER(yn, ynhat):
"""
Implementation of Balanced Error Rate
:param yn: ground truth
:param ynhat: predicted values
:return: error score
"""
y = list()
for z in yn:
y.extend(z)
yhat = list()
for z in ynhat:
yhat.extend(z)
yn = np.array(y)
ynhat = np.array(yhat)
c = set(list(yn) + list(ynhat)) # set of unique classes
error = 0.0
numClasses = 0
for C in c:
if(len(np.array(yn == C)) != 0):
error += np.sum(np.array(yn == C) * np.array(yn != ynhat))/float(np.sum(np.array(yn == C)))
numClasses += 1
if numClasses == 0: return 1.0
error = error/numClasses
return error
def BER_vector(yn, ynhat):
"""
Implementation of Balanced Error Rate, returns a vector with errors for each class
:param yn: ground truth
:param ynhat: predicted values
:return: error score vector, scores for each class
"""
y = list()
for z in yn:
y.extend(z)
yhat = list()
for z in ynhat:
yhat.extend(z)
yn = np.array(y)
ynhat = np.array(yhat)
c = set(list(yn) + list(ynhat)) # set of unique classes
error = list()
classes = list()
for C in c:
if(np.sum(np.array(yn == C)) != 0):
error.append(np.sum(np.array(yn == C) * np.array(yn != ynhat))/float(np.sum(np.array(yn == C))))
classes.append(C)
return error, classes
|
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
import json
import scrapy
from scrapy.exporters import JsonItemExporter
from scrapy.exporters import CsvItemExporter
class FundaspiderPipeline(object):
def process_item(self, item, spider):
return item
class JaapNLSpiderPipeline(object):
def open_spider(self, spider):
self.file = open('items.jl', 'wb')
def close_spider(self, spider):
self.file.close()
def process_item(self, item, spider):
print('++++++++'*75)
# pause
# print(item)
JsonItemExporter('items.jl')
# line = json.dumps(dict(item)) + "\n"
# self.file.write(line)
return item
class JsonPipeline(object):
def __init__(self):
self.file = open("run_dennis_goeie.json", 'wb')
self.exporter = JsonItemExporter(self.file, encoding='utf-8', ensure_ascii=False)
self.exporter.start_exporting()
def close_spider(self, spider):
self.exporter.finish_exporting()
self.file.close()
def process_item(self, item, spider):
self.exporter.export_item(item)
return item
class CsvPipeline(object):
def __init__(self):
self.file = open("booksdata.csv", 'wb')
self.exporter = CsvItemExporter(self.file, unicode)
self.exporter.start_exporting()
def close_spider(self, spider):
self.exporter.finish_exporting()
self.file.close()
def process_item(self, item, spider):
self.exporter.export_item(item)
return item
# def process_item(self, item, spider):
# print('-'*50)
# scrapy.exporters.JsonItemExporter('test.json').export_item(item)
# return item
|
def evaluate(pos_df):
"""
position.csv
------------
X,Y,BLOCK,TABLE,PARTICIPANT,GENRE_CODE
0,0,C,27,2,Red
1,0,C,26,1,Red
2,0,C,25,37,Red
"""
# 評価値
value = 0
# block_dict[block][genre_code] = value
block_dict = {}
for _index, row in pos_df.iterrows():
# x = row["X"]
# y = row["Y"]
block = row["BLOCK"]
# table_id = row["TABLE"]
# participant_id = row["PARTICIPANT"]
genre_code = row["GENRE_CODE"]
if not(block in block_dict):
block_dict[block] = {}
if not(genre_code in block_dict[block]):
block_dict[block][genre_code] = 0
block_dict[block][genre_code] += 1
# 集計。ブロックに同じ色が集まっているほど高評価。
for _block_name, genre_code_dict in block_dict.items():
for _genre_code_name, count in genre_code_dict.items():
value += count ** 2
break
# 集計。テーブル番号順にして、同じ色が連続したら、連続した数だけ加点。
# ただし、ブロックの切れ目は連続しない。
continue_bonus = 0
sorted_pos_df = pos_df.sort_values(by=["TABLE"], ascending=True)
# print(sorted_pos_df.head(5))
table_ordered_list = sorted_pos_df[[
"TABLE", "BLOCK", "GENRE_CODE"]].values.tolist()
# print("table_ordered_list: {}".format(table_ordered_list))
prev_block = None
prev_genre_code = None
for entry in table_ordered_list:
if prev_genre_code == entry[2] and prev_block == entry[1]:
continue_bonus += 1
value += continue_bonus
# print("prev_genre_code: {}, entry[2]: {}, value: {}".format(
# prev_genre_code, entry[2], value))
else:
prev_genre_code = entry[2]
continue_bonus = 0
prev_block = entry[1]
return value
|
class Node:
def __init__(self,data):
self.data = data
self.next = None
class Stack:
def __init__(self):
self.head = None
def isempty(self):
if self.head == None:
return True
else:
return False
def push(self,data):
if self.head == None:
self.head=Node(data)
else:
new_node = Node(data)
new_node.next = self.head
self.head = new_node
def pop(self):
if self.isempty():
print("Stack Underflow!")
return None
else:
pop_node = self.head
self.head = self.head.next
pop_node.next = None
return pop_node.data
def peek(self):
if self.isempty():
return None
else:
return self.head.data
def display(self):
iter_node = self.head
if self.isempty():
print("Stack Underflow!")
else:
while(iter_node != None):
print(iter_node.data,"->",end = " ")
iter_node = iter_node.next
return
# Driver code
MyStack = Stack()
MyStack.push(1)
MyStack.push(2)
MyStack.push(3)
MyStack.push(4)
# Display stack elements
MyStack.display()
# Print top element of stack
print("\nStack Top: ",MyStack.peek())
# Delete top elements of stack
MyStack.pop()
MyStack.pop()
# Display stack elements
MyStack.display()
print("\nStack Top: ", MyStack.peek())
# Output:
# 4 -> 3 -> 2 -> 1 ->
# Stack Top: 4
# 2 -> 1 ->
# Stack Top: 2
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classes and methods for Mentor Settings."""
__author__ = 'Thejesh GN (tgn@google.com)'
from common import tags
from common.schema_fields import FieldArray
from common.schema_fields import FieldRegistry
from common.schema_fields import SchemaField
from models import courses
from modules.dashboard import tabs
from modules.mentor import base
def mentor_key(key):
return '%s:%s' % (base.MentorBase.MENTOR_SECTION, key)
class MentorSettings(base.MentorBase):
enable_mentor_support = SchemaField(
mentor_key(base.MentorBase.ENABLE_KEY),'Enable Mentor Support', 'boolean')
@classmethod
def get_fields(cls):
mentor_fields = set()
mentor_fields.add(lambda c: cls.enable_mentor_support)
return mentor_fields
@classmethod
def register(cls):
courses.DEFAULT_COURSE_YAML_DICT[cls.MENTOR_SECTION] = dict()
courses.DEFAULT_COURSE_YAML_DICT[cls.MENTOR_SECTION]['enable_mentor_support'] = False
courses.DEFAULT_EXISTING_COURSE_YAML_DICT[cls.MENTOR_SECTION] = {
'enable_mentor_support': False }
courses.Course.OPTIONS_SCHEMA_PROVIDERS[
cls.MENTOR_SECTION] += cls.get_fields()
tabs.Registry.register('settings', 'mentor', 'Mentor', cls.MENTOR_SECTION)
@classmethod
def unregister(cls):
for field in cls.get_fields():
courses.Course.OPTIONS_SCHEMA_PROVIDERS[
cls.MENTOR_SECTION].remove(field)
|
""" An example message family definition.
"""
from typing import Dict, Any
from message import Message
from router import Router
from serializer import JSONSerializer as Serializer
class MESSAGE_TYPES:
SEND_MESSAGE = 'urn:ssi:message:sovrin.org/testing/1.0/send_message_command'
def is_valid_send_message(msg: Message):
""" Validate that a given message has the correct structure for a "send_message_command."
"""
expected_attributes = [
'type',
'to',
'content'
]
for attribute in expected_attributes:
if attribute not in msg:
return False
return True
# -- Handlers --
# These handlers are used exclusively in the included agent,
# not the test-suite.
async def handle_send_message(msg: Message, **kwargs):
""" Message handler for send_message_command.
"""
transport = kwargs['transport']
if is_valid_send_message(msg):
await transport.send(msg.to, Serializer.pack(msg.content))
return
print('invalid send message command dropped')
# -- Routes --
# These routes are used exclusivel in the included agent, not the test-suite.
async def register_routes(router: Router):
""" Route registration for send_message_command.
"""
await router.register(MESSAGE_TYPES.SEND_MESSAGE, handle_send_message)
|
import sys
def sum_triangular_numbers(n):
if n <= 0:
return 0
else:
t = [int((i + 1) * (i + 2) / 2) for i in range(n)]
return sum(t)
if __name__ == "__main__":
if len(sys.argv) == 2:
print(sum_triangular_numbers(n=int(sys.argv[1])))
else:
sys.exit(1)
|
#!/usr/bin/env python3
import re
from collections import defaultdict, deque
class Compy:
def __init__(self, mem):
self.memory = list(mem)
self.reg = defaultdict(int)
self.pc = 0
self.n_mulls = 0
def get_value(self, value):
if type(value) == int:
return value
else:
return self.reg[value]
def still_in_mem(self):
return 0 <= self.pc < len(memory)
def step(self):
if not self.still_in_mem():
return
instruction = memory[self.pc]
inst = instruction[0]
arg1 = instruction[1]
arg2 = instruction[2]
if inst == 'set':
self.reg[arg1] = self.get_value(arg2)
self.pc += 1
elif inst == 'add':
self.reg[arg1] += self.get_value(arg2)
self.pc += 1
elif inst == 'sub':
self.reg[arg1] -= self.get_value(arg2)
self.pc += 1
elif inst == 'mul':
self.reg[arg1] *= self.get_value(arg2)
self.pc += 1
self.n_mulls += 1
elif inst == 'jnz':
if self.get_value(arg1) != 0:
self.pc += self.get_value(arg2)
else:
self.pc += 1
else:
print("Unknown instruction", inst)
assert False
def run(self):
while self.still_in_mem():
self.step()
memory = []
with open('input.txt', 'r') as f:
inst_re = re.compile(r'(\w{3})\s+(\w)\s+(.*)')
for line in f:
m = inst_re.match(line)
if m[2].startswith('-') or m[2].isnumeric():
m_2 = int(m[2])
else:
m_2 = m[2]
if m[3].startswith('-') or m[3].isnumeric():
m_3 = int(m[3])
else:
m_3 = m[3]
memory.append((m[1], m_2, m_3))
c = Compy(memory)
c.run()
print(c.n_mulls)
|
#!/usr/bin/env python
# Copyright 2015-2016 Yelp Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from paasta_tools.autoscaling import load_boost
from paasta_tools.cli.utils import execute_paasta_cluster_boost_on_remote_master
from paasta_tools.cli.utils import lazy_choices_completer
from paasta_tools.utils import DEFAULT_SOA_DIR
from paasta_tools.utils import list_clusters
from paasta_tools.utils import load_system_paasta_config
from paasta_tools.utils import paasta_print
def add_subparser(subparsers):
boost_parser = subparsers.add_parser(
"boost",
help="Set, print the status, or clear a capacity boost for a given region in a PaaSTA cluster",
description=(
"'paasta boost' is used to temporary provision more capacity in a given cluster "
"It operates by ssh'ing to a Mesos master of a remote cluster, and "
"interacting with the boost in the local zookeeper cluster. If you set or clear "
"a boost, you may want to run the cluster autoscaler manually afterward."
),
epilog=(
"The boost command may time out during heavy load. When that happens "
"users may execute the ssh command directly, in order to bypass the timeout."
),
)
boost_parser.add_argument(
"-v",
"--verbose",
action="count",
dest="verbose",
default=0,
help="""Print out more output regarding the state of the cluster.
Multiple v options increase verbosity. Maximum is 3.""",
)
boost_parser.add_argument(
"-c",
"--cluster",
type=str,
required=True,
help="""Paasta cluster(s) to boost. This option can take comma separated values.
If auto-completion doesn't work, you can get a list of cluster with `paasta list-clusters'""",
).completer = lazy_choices_completer(list_clusters)
boost_parser.add_argument(
"--soa-dir",
dest="soa_dir",
metavar="SOA_DIR",
default=DEFAULT_SOA_DIR,
help="define a different soa config directory",
)
boost_parser.add_argument(
"-p",
"--pool",
type=str,
default="default",
help="Name of the pool you want to increase the capacity. Default is 'default' pool.",
)
boost_parser.add_argument(
"-b",
"--boost",
type=float,
default=load_boost.DEFAULT_BOOST_FACTOR,
help="Boost factor to apply. Default is 1.5. A big failover should be 2, 3 is the max.",
)
boost_parser.add_argument(
"-d",
"--duration",
type=int,
default=load_boost.DEFAULT_BOOST_DURATION,
help="Duration of the capacity boost in minutes. Default is 40",
)
boost_parser.add_argument(
"-f",
"--force",
action="store_true",
dest="override",
help="Replace an existing boost. Default is false",
)
boost_parser.add_argument(
"action",
choices=["set", "status", "clear"],
help="You can view the status, set or clear a boost.",
)
boost_parser.set_defaults(command=paasta_boost)
def paasta_boost(args):
soa_dir = args.soa_dir
system_paasta_config = load_system_paasta_config()
all_clusters = list_clusters(soa_dir=soa_dir)
clusters = args.cluster.split(",")
for cluster in clusters:
if cluster not in all_clusters:
paasta_print(
f"Error: {cluster} doesn't look like a valid cluster. "
+ "Here is a list of valid paasta clusters:\n"
+ "\n".join(all_clusters)
)
return 1
return_code, output = execute_paasta_cluster_boost_on_remote_master(
clusters=clusters,
system_paasta_config=system_paasta_config,
action=args.action,
pool=args.pool,
duration=args.duration if args.action == "set" else None,
override=args.override if args.action == "set" else None,
boost=args.boost if args.action == "set" else None,
verbose=args.verbose,
)
paasta_print(output)
return return_code
|
# criando um Cliente para conexão com o servidor Python
import socket
host = '127.0.0.1' # mesmo local do servidor
porta = 8800 # mesma porta do servidor
soquete = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
envio = (host,porta)
soquete.connect(envio)
print('Digite: S e pressione ENTER para encerrar...')
print('DIGITE A MENSAGEM: ')
mensagem = input()
while mensagem not in ('s','S'):
soquete.send(str(mensagem).encode())
mensagem = input()
soquete.close()
|
from typing import List
import fastapi
from models.instrument import Instrument
router = fastapi.APIRouter()
@router.get('/instruments')
def get_instruments() -> List[Instrument]:
pass
@router.get('/instrument/{beamline}')
def read_beamline(beamline: str):
# TODO: Validate beamline string is valid
return {"name": "TST"}
@router.get('/instrument/{beamline}/{endstation}')
async def instrument() -> List[Instrument]:
pass
|
# -*- coding: utf-8 -*-
# This file is part of the Ingram Micro Cloud Blue Connect connect-cli.
# Copyright (c) 2019-2021 Ingram Micro. All Rights Reserved.
ITEMS_COLS_HEADERS = {
'A': 'ID',
'B': 'MPN',
'C': 'Action',
'D': 'Name',
'E': 'Description',
'F': 'Type',
'G': 'Precision',
'H': 'Unit',
'I': 'Billing Period',
'J': 'Commitment',
'K': 'Status',
'L': 'Created',
'M': 'Modified',
}
PARAMS_COLS_HEADERS = {
'A': 'Verbose ID',
'B': 'ID',
'C': 'Action',
'D': 'Title',
'E': 'Description',
'F': 'Phase',
'G': 'Scope',
'H': 'Type',
'I': 'Required',
'J': 'Unique',
'K': 'Hidden',
'L': 'JSON Properties',
'M': 'Created',
'N': 'Modified',
}
MEDIA_COLS_HEADERS = {
'A': 'Position',
'B': 'ID',
'C': 'Action',
'D': 'Type',
'E': 'Image File',
'F': 'Video URL Location',
}
CAPABILITIES_COLS_HEADERS = {
'A': 'Capability',
'B': 'Action',
'C': 'Value',
}
STATIC_LINK_HEADERS = {
'A': 'Type',
'B': 'Title',
'C': 'Action',
'D': 'Url',
}
TEMPLATES_HEADERS = {
'A': 'ID',
'B': 'Title',
'C': 'Action',
'D': 'Scope',
'E': 'Type',
'F': 'Content',
'G': 'Created',
'H': 'Modified',
}
CONFIGURATION_HEADERS = {
'A': 'ID',
'B': 'Parameter',
'C': 'Scope',
'D': 'Action',
'E': 'Item ID',
'F': 'Item Name',
'G': 'Marketplace ID',
'H': 'Marketplace Name',
'I': 'Value',
}
ACTIONS_HEADERS = {
'A': 'Verbose ID',
'B': 'ID',
'C': 'Action',
'D': 'Name',
'E': 'Title',
'F': 'Description',
'G': 'Scope',
'H': 'Created',
'I': 'Modified',
}
PARAM_TYPES = [
'email',
'address',
'checkbox',
'choice',
'domain',
'subdomain',
'url',
'dropdown',
'object',
'password',
'phone',
'text',
]
PRECISIONS = ('integer', 'decimal(1)', 'decimal(2)', 'decimal(4)', 'decimal(8)')
COMMITMENT = ('-', '1 year', '2 years', '3 years', '4 years', '5 years')
BILLING_PERIOD = (
'onetime',
'monthly',
'yearly',
'2 years',
'3 years',
'4 years',
'5 years',
)
CAPABILITIES = (
'Pay-as-you-go support and schema',
'Pay-as-you-go dynamic items support',
'Pay-as-you-go future charges support',
'Consumption reporting for Reservation Items',
'Dynamic Validation of the Draft Requests',
'Dynamic Validation of the Inquiring Form',
'Reseller Authorization Level',
'Tier Accounts Sync',
'Administrative Hold',
)
|
from typing import List, Optional
import crud
import models
import schemas
from core.database import get_db
from core.logger import get_logger
from exceptions.core import APIException
from exceptions.error_messages import ErrorMessage
from fastapi import APIRouter, Depends
from schemas.core import FilterQueryIn, PagingQueryIn
from sqlalchemy import func
from sqlalchemy.orm import Session
logger = get_logger(__name__)
router = APIRouter()
@router.get("/count")
def get_count(db: Session = Depends(get_db)) -> List[models.Job]:
return db.query(func.count(models.Job.id)).all()
@router.get("/{id}", response_model=schemas.JobResponse)
def get_job(
id: str, include_deleted: bool = False, db: Session = Depends(get_db)
) -> models.Job:
job = crud.job.get(db, id=id, include_deleted=include_deleted)
if not job:
raise APIException(ErrorMessage.ID_NOT_FOUND)
return job
@router.get("", response_model=schemas.JobsPagedResponse)
def get_jobs(
q: Optional[str] = None,
paging: PagingQueryIn = Depends(),
filter_params: FilterQueryIn = Depends(),
db: Session = Depends(get_db),
) -> schemas.JobsPagedResponse:
ALLOWED_COLUMNS = ["title", "created_at", "updated_at"]
if not filter_params.validate_allowed_sort_column(ALLOWED_COLUMNS):
raise APIException(ErrorMessage.COLUMN_NOT_ALLOWED)
if q:
query = db.query(models.Job).filter(models.Job.title.like(f"%{q}%"))
else:
query = db.query(models.Job)
# if filter and filter.sort and (filter.start or filter.end):
# filter_dict = [
# {"model": "Job", "field": filter.sort, "op": ">=", "value": filter.start},
# {"model": "Job", "field": filter.sort, "op": "<=", "value": filter.end}
# ]
# query = apply_filters(query, filter_dict, do_auto_join=False)
# if filter and filter.sort:
# sort_dict = [{
# "model": "Job", "field": filter.sort, "direction": filter.direction
# }]w
# query = apply_sort(query, sort_dict)
return crud.job.get_paged_list(
db, paging=paging, filtered_query=query, filter_params=filter_params
)
@router.post("", response_model=schemas.JobResponse)
def create_job(data_in: schemas.JobCreate, db: Session = Depends(get_db)) -> models.Job:
return crud.job.create(db, data_in)
@router.put("/{id}", response_model=schemas.JobResponse)
def update(
id: str,
data_in: schemas.JobUpdate,
db: Session = Depends(get_db),
) -> models.Job:
job = db.query(models.Job).filter_by(id=id).first()
if not job:
raise APIException(ErrorMessage.ID_NOT_FOUND)
return crud.job.update(db, db_obj=job, obj_in=data_in)
@router.delete("/{id}", response_model=schemas.JobResponse)
def delete(
id: str,
db: Session = Depends(get_db),
) -> models.Job:
job = db.query(models.Job).filter_by(id=id).first()
if not job:
raise APIException(ErrorMessage.ID_NOT_FOUND)
return crud.job.delete(db, db_obj=job)
|
from __future__ import unicode_literals
from moto.core import BaseBackend, BaseModel
from moto.compat import OrderedDict
from.exceptions import DatabaseAlreadyExistsException, TableAlreadyExistsException
class GlueBackend(BaseBackend):
def __init__(self):
self.databases = OrderedDict()
def create_database(self, database_name):
if database_name in self.databases:
raise DatabaseAlreadyExistsException()
database = FakeDatabase(database_name)
self.databases[database_name] = database
return database
def get_database(self, database_name):
return self.databases[database_name]
def create_table(self, database_name, table_name, table_input):
database = self.get_database(database_name)
if table_name in database.tables:
raise TableAlreadyExistsException()
table = FakeTable(database_name, table_name, table_input)
database.tables[table_name] = table
return table
def get_table(self, database_name, table_name):
database = self.get_database(database_name)
return database.tables[table_name]
def get_tables(self, database_name):
database = self.get_database(database_name)
return [table for table_name, table in database.tables.items()]
class FakeDatabase(BaseModel):
def __init__(self, database_name):
self.name = database_name
self.tables = OrderedDict()
class FakeTable(BaseModel):
def __init__(self, database_name, table_name, table_input):
self.database_name = database_name
self.name = table_name
self.table_input = table_input
self.storage_descriptor = self.table_input.get('StorageDescriptor', {})
self.partition_keys = self.table_input.get('PartitionKeys', [])
glue_backend = GlueBackend()
|
import torch.nn as nn
from collections import OrderedDict
class VGGFace(nn.Module):
def __init__(self):
super(VGGFace, self).__init__()
self.features = nn.ModuleDict(OrderedDict(
{
# === Block 1 ===
'conv_1_1': nn.Conv2d(in_channels=3, out_channels=64, kernel_size=3, padding=1),
'relu_1_1': nn.ReLU(inplace=True),
'conv_1_2': nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, padding=1),
'relu_1_2': nn.ReLU(inplace=True),
'maxp_1_2': nn.MaxPool2d(kernel_size=2, stride=2),
# === Block 2 ===
'conv_2_1': nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, padding=1),
'relu_2_1': nn.ReLU(inplace=True),
'conv_2_2': nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, padding=1),
'relu_2_2': nn.ReLU(inplace=True),
'maxp_2_2': nn.MaxPool2d(kernel_size=2, stride=2),
# === Block 3 ===
'conv_3_1': nn.Conv2d(in_channels=128, out_channels=256, kernel_size=3, padding=1),
'relu_3_1': nn.ReLU(inplace=True),
'conv_3_2': nn.Conv2d(in_channels=256, out_channels=256, kernel_size=3, padding=1),
'relu_3_2': nn.ReLU(inplace=True),
'conv_3_3': nn.Conv2d(in_channels=256, out_channels=256, kernel_size=3, padding=1),
'relu_3_3': nn.ReLU(inplace=True),
'maxp_3_3': nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True),
# === Block 4 ===
'conv_4_1': nn.Conv2d(in_channels=256, out_channels=512, kernel_size=3, padding=1),
'relu_4_1': nn.ReLU(inplace=True),
'conv_4_2': nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, padding=1),
'relu_4_2': nn.ReLU(inplace=True),
'conv_4_3': nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, padding=1),
'relu_4_3': nn.ReLU(inplace=True),
'maxp_4_3': nn.MaxPool2d(kernel_size=2, stride=2),
# === Block 5 ===
'conv_5_1': nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, padding=1),
'relu_5_1': nn.ReLU(inplace=True),
'conv_5_2': nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, padding=1),
'relu_5_2': nn.ReLU(inplace=True),
'conv_5_3': nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, padding=1),
'relu_5_3': nn.ReLU(inplace=True),
'maxp_5_3': nn.MaxPool2d(kernel_size=2, stride=2)
}))
self.fc = nn.ModuleDict(OrderedDict(
{
'fc6': nn.Linear(in_features=512 * 7 * 7, out_features=4096),
'fc6-relu': nn.ReLU(inplace=True),
'fc6-dropout': nn.Dropout(p=0.5),
'fc7': nn.Linear(in_features=4096, out_features=4096),
'fc7-relu': nn.ReLU(inplace=True),
'fc7-dropout': nn.Dropout(p=0.5),
'fc8': nn.Linear(in_features=4096, out_features=2622),
}))
def forward(self, x):
# Forward through feature layers
for k, layer in self.features.items():
x = layer(x)
# Flatten convolution outputs
x = x.view(x.size(0), -1)
# Forward through FC layers
for k, layer in self.fc.items():
x = layer(x)
return x
|
from .detecttrend import detecttrend
from .vizplot import *
from .maxtrend import getmaxtrend
from . import version
__version__ = version.__version__
|
import flask
from flask import Flask
from flask.globals import request
from flask.json import JSONEncoder
from statscontroller import StatsController
import hero
from flask.helpers import url_for
class CustomEncoder(JSONEncoder):
def default(self, obj):
encoded = {}
for attr in dir(obj):
if not attr.startswith("_"):
encoded[attr] = getattr(obj, attr)
return encoded
# Production build
# This should be uncommented in production
application = Flask(__name__)
# Development build
# This should be uncommented in develop
# application = Flask(__name__, static_url_path='')
# application.static_folder = "web"
application.json_encoder = CustomEncoder
@application.route('/')
def landing_page():
return flask.render_template('index.html', request=request.args)
@application.route('/redirect', methods=['GET'])
def redirect_picks():
# Inform user if id is wrong
return flask.redirect(url_for('get_suggestions',
player_id=request.values['id'],
sample=request.values['sample'],
allies=request.values['allies'],
mode=request.values['mode'],
sortOrder=request.values['sort'],
hero_id=request.values.get('heroPick'),
query=request.values.get('query')))
@application.route('/<player_id>/suggestions')
def get_suggestions(player_id):
try:
controller = StatsController(player_id, request.args)
result = controller.get_suggestions(request.args.get('sortOrder'), request.args.get('query'), request.args.get('hero_id'))
except TypeError:
flask.abort(422)
if request.headers.get('Content-Type') == 'application/json':
return flask.jsonify(result)
elif request.accept_mimetypes.accept_html:
return flask.render_template('suggestions.html', result=result, id=player_id, mode=request.args.get('mode'),
query=request.query_string.decode('UTF-8'))
else:
flask.abort(415)
@application.route('/suggestions')
def empty_suggestions():
return flask.redirect(url_for('landing_page'))
@application.route('/<player_id>/synergies/<hero_id>')
def get_synergies(player_id, hero_id):
try:
controller = StatsController(player_id, request.args)
with_result = controller.get_suggestions(request.args.get('sortOrder'), '&with_hero_id=', hero_id)
against_result = controller.get_suggestions(request.args.get('sortOrder'), '&against_hero_id=', hero_id)
except (TypeError):
flask.abort(422)
if request.accept_mimetypes.accept_html:
return flask.render_template('synergies.html', with_result=with_result, against_result=against_result,
id=player_id, synergy_id=hero_id, mode=request.args.get('mode'),
query=request.query_string.decode('UTF-8'),
heroes=sorted(hero.HERO_INFORMATION.values(), key=lambda x: x['localized_name']))
else:
flask.abort(415)
@application.route('/<player_id>/synergies/redirect')
def redirect_synergies(player_id):
return flask.redirect(url_for('get_synergies',
player_id=player_id,
hero_id=request.values['synergy_id']) + '?' + request.values['query'])
@application.route('/heroes')
def get_heroes():
heroes = sorted(hero.HERO_INFORMATION.values(), key=lambda h: h['localized_name'])
return flask.jsonify(heroes)
if __name__ == '__main__':
application.run()
|
# Importing Libraries:
import pandas as pd
import numpy as np
import pickle
# for displaying all feature from dataset:
pd.pandas.set_option('display.max_columns', None)
# Reading Dataset:
dataset = pd.read_csv("Liver_data.csv")
# Filling NaN Values of "Albumin_and_Globulin_Ratio" feature with Median:
dataset['Albumin_and_Globulin_Ratio'] = dataset['Albumin_and_Globulin_Ratio'].fillna(dataset['Albumin_and_Globulin_Ratio'].median())
# Label Encoding:
dataset['Gender'] = np.where(dataset['Gender']=='Male', 1,0)
# Droping 'Direct_Bilirubin' feature:
dataset = dataset.drop('Direct_Bilirubin', axis=1)
# Independent and Dependent Feature:
X = dataset.iloc[:, :-1]
y = dataset.iloc[:, -1]
# Train Test Split:
from sklearn.model_selection import train_test_split
X_train,X_test,y_train,y_test = train_test_split(X,y, test_size=0.3, random_state=33)
# RandomForestClassifier:
from sklearn.ensemble import RandomForestClassifier
RandomForest = RandomForestClassifier()
RandomForest = RandomForest.fit(X_train,y_train)
# Creating a pickle file for the classifier
filename = 'Liver.pkl'
pickle.dump(RandomForest, open(filename, 'wb'))
|
# -*- coding: utf-8 -*-
from typing import List, Any
class TokenizedDataPage:
"""
Data transfer object that is used to pass results of paginated queries.
It contains items of retrieved page and optional total number of items.
Most often this object type is used to send responses to paginated queries.
Pagination parameters are defined by
:class:`TokenizedPagingParams <pip_services3_commons.data.TokenizedPagingParams.TokenizedPagingParams>` object.
The `token` parameter in the TokenizedPagingParams there means where to start the searxh.
The `takes` parameter sets number of items to return in the page.
And the optional `total` parameter tells to return total number of items in the query.
The data page returns a token that shall be passed to the next search as a starting point.
Remember: not all implementations support the `total` parameter
because its generation may lead to severe performance implications.
See :class:`PagingParams <pip_services3_commons.data.PagingParams.PagingParams>`
Example:
.. code-block:: python
page = my_data_client.get_data_by_filter(
"123",
FilterParams.from_tuples("completed", True),
TokenizedPagingParams(None, 100, True)
)
"""
def __init__(self, data: List[Any], token: str = None, total: int = None):
"""
Creates a new instance of data page and assigns its values.
:param data: a list of items from the retrieved page.
:param token: (optional) a token to define astarting point for the next search.
:param total: (optional) a total number of objects in the result.
"""
# The total amount of items in a request.
self.total: int = total
# The starting point for the next search.
self.token: str = token
# The items of the retrieved page.
self.data: List[Any] = data
|
from .RC4 import *
|
from .errors import *
from .plot import *
from .util import *
from . import exec
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2020 CERN.
#
# CDS-ILS is free software; you can redistribute it and/or modify it under
# the terms of the MIT License; see LICENSE file for more details.
"""CDS-ILS migrator module."""
from cds_dojson.overdo import OverdoBase
serial_marc21 = OverdoBase(entry_point_models="cds_ils.migrator.serial_model")
journal_marc21 = OverdoBase(
entry_point_models="cds_ils.migrator.journal_model"
)
multipart_marc21 = OverdoBase(
entry_point_models="cds_ils.migrator.multipart_model"
)
|
import os
import json
import torch
import pickle
import numpy as np
import torch.nn.functional as F
from torch.utils.data import DataLoader
import torcheras
from dataset import QANetDataset
from constants import device
from qanet.qanet import QANet
from utils import convert_tokens, evaluate
def variable_data(sample_batched, device):
x = sample_batched[0]
y = sample_batched[1]
if type(x) is list or type(x) is tuple:
for i, _x in enumerate(x):
x[i] = x[i].to(device)
else:
x = x.to(device)
if type(y) is list or type(y) is tuple:
for i, _y in enumerate(y):
y[i] = y[i].to(device)
else:
y = y.to(device)
return x, y
def evaluate_scores(y_true, y_pred, test_eval):
qa_id = y_true[1]
c_mask, q_mask = y_pred[2:]
y_p1 = F.softmax(y_pred[0], dim=-1)
y_p2 = F.softmax(y_pred[1], dim=-1)
p1 = []
p2 = []
p_matrix = torch.bmm(y_p1.unsqueeze(2), y_p2.unsqueeze(1))
for i in range(p_matrix.shape[0]):
p = torch.triu(p_matrix[i])
indexes = torch.argmax(p).item()
p1.append(indexes // p.shape[0])
p2.append(indexes % p.shape[0])
answer_dict, _ = convert_tokens(
test_eval, qa_id.tolist(), p1, p2)
metrics = evaluate(test_eval, answer_dict)
return metrics
def evaluate_model(params, dtype='test', model_folder='', model_epoch=''):
test_dataset = QANetDataset('data', dtype)
test_dataloader = DataLoader(test_dataset, batch_size=32, shuffle=True)
test_eval = pickle.load(open('data/' + dtype + '_eval.pkl', 'rb'))
word_emb_mat = np.array(pickle.load(open(os.path.join(params['target_dir'], 'word_emb_mat.pkl'), 'rb')),
dtype=np.float32)
char_emb_mat = np.array(pickle.load(open(os.path.join(params['target_dir'], 'char_emb_mat.pkl'), 'rb')),
dtype=np.float32)
qanet = QANet(params, word_emb_mat, char_emb_mat).to(device)
qanet = torcheras.Model(qanet, 'log/qanet')
qanet.load_model(model_folder, epoch=model_epoch, ema=True)
qanet = qanet.model
qanet.eval()
all_scores = {'em': 0, 'f1': 0}
with torch.no_grad():
for i_batch, sample_batched in enumerate(test_dataloader):
x, y_true = variable_data(sample_batched, device)
y_pred = qanet(x)
metrics = evaluate_scores(y_true, y_pred, test_eval)
print(metrics)
all_scores['em'] += metrics['exact_match']
all_scores['f1'] += metrics['f1']
print('em', all_scores['em'] / i_batch, 'f1', all_scores['f1'] / i_batch)
if __name__ == '__main__':
params = json.load(open('params.json', 'r'))
model_folder = '2018_7_24_13_45_8_514568'
model_epoch = 25
evaluate_model(params, dtype='test', model_folder=model_folder, model_epoch=model_epoch)
|
import re
from copy import copy
from unidecode import unidecode
from django.db import models
from datagrowth.datatypes import DocumentBase
PRIVATE_PROPERTIES = ["pipeline", "from_youtube", "lowest_educational_level"]
class DocumentManager(models.Manager):
def build_from_seed(self, seed, collection=None, metadata_pipeline_key=None):
properties = copy(seed) # TODO: use setters that update the pipeline?
properties["id"] = seed["external_id"]
properties["language"] = {
"metadata": seed.get("language", None)
}
metadata_pipeline = properties.pop(metadata_pipeline_key, None)
document = Document(properties=properties, collection=collection, pipeline={"metadata": metadata_pipeline})
if collection:
document.dataset_version = collection.dataset_version
document.clean()
return document
class Document(DocumentBase):
objects = DocumentManager()
dataset_version = models.ForeignKey("DatasetVersion", blank=True, null=True, on_delete=models.CASCADE)
pipeline = models.JSONField(default=dict, blank=True)
extension = models.ForeignKey("core.Extension", null=True, on_delete=models.SET_NULL)
# NB: Collection foreign key is added by the base class
def get_language(self):
language = self.properties.get('language', None)
if language is None:
return
return language.get("metadata", "unk")
def get_search_document_extras(self, reference_id, title, text, video, material_types):
suggest_completion = []
if title:
suggest_completion += title.split(" ")
if text:
suggest_completion += text.split(" ")[:1000]
alpha_pattern = re.compile("[^a-zA-Z]+")
suggest_completion = [ # removes reading signs and acutes for autocomplete suggestions
alpha_pattern.sub("", unidecode(word))
for word in suggest_completion
]
extras = {
'_id': reference_id,
"language": self.get_language(),
'suggest_completion': suggest_completion,
'harvest_source': self.collection.name,
'text': text,
'suggest_phrase': text,
'video': video,
'material_types': material_types
}
return extras
def get_extension_extras(self):
extension_data = copy(self.extension.properties)
if "keywords" in extension_data:
extension_data["keywords"] = [entry["label"] for entry in extension_data["keywords"]]
themes = extension_data.pop("themes", None)
if themes:
extension_data["research_themes"] = [entry["label"] for entry in themes]
parents = extension_data.pop("parents", None)
if parents:
is_part_of = self.properties.get("is_part_of", [])
is_part_of += parents
is_part_of = list(set(is_part_of))
extension_data["is_part_of"] = is_part_of
children = extension_data.pop("children", None)
if children:
has_parts = self.properties.get("has_parts", [])
has_parts += children
has_parts = list(set(has_parts))
extension_data["has_parts"] = has_parts
return extension_data
def to_search(self):
if self.properties["state"] != "active":
yield {
"_id": self.properties["external_id"],
"_op_type": "delete"
}
return
elastic_base = copy(self.properties)
elastic_base.pop("language")
text = elastic_base.pop("text", None)
if text and len(text) >= 1000000:
text = " ".join(text.split(" ")[:10000])
if self.extension:
extension_details = self.get_extension_extras()
elastic_base.update(extension_details)
for private_property in PRIVATE_PROPERTIES:
elastic_base.pop(private_property, False)
video = elastic_base.pop("video", None)
material_types = elastic_base.pop("material_types", None) or ["unknown"]
elastic_details = self.get_search_document_extras(
self.properties["external_id"],
self.properties["title"],
text,
video,
material_types=material_types
)
elastic_details.update(elastic_base)
yield elastic_details
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-02-24 03:34
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('transactions', '0003_auto_20161117_0224'),
]
operations = [
migrations.AlterModelOptions(
name='change',
options={'verbose_name': 'Change', 'verbose_name_plural': 'Changes'},
),
migrations.AlterModelOptions(
name='debitscredits',
options={'verbose_name': 'DebitsCredit', 'verbose_name_plural': 'DebitsCredits'},
),
migrations.AddField(
model_name='abstracttransaction',
name='last_edited',
field=models.DateTimeField(auto_now=True, verbose_name='Last edited'),
),
]
|
""" graph conversions
"""
import autoparse.pattern as app
import autoparse.find as apf
from automol.util import dict_
from automol.convert.inchi import standard_form
from automol.convert.inchi import object_to_hardcoded_inchi_by_key
from automol.convert import _molfile
from automol.convert import _rdkit
from automol.convert import _util
from automol.graph._graph_dep import atom_keys
from automol.graph._graph_dep import bond_keys
from automol.graph._graph_dep import atom_symbols
from automol.graph._graph_dep import bond_orders
# getters
from automol.graph._graph_dep import without_dummy_atoms
from automol.graph._graph_dep import atom_bond_valences
from automol.graph._graph_dep import explicit
from automol.graph._graph_dep import atom_unsaturated_valences
# stereo
from automol.graph._graph_dep import has_stereo
from automol.graph._graph_dep import dominant_resonance
# dep
from automol.graph._embed_dep import fake_stereo_geometry
from automol.graph._embed_dep import geometry as embed_geometry
from automol.graph._embed_dep import backbone_isomorphic
from automol.graph.geom import coordinates
# graph => inchi
def inchi(gra, stereo=True):
""" Generate an InChI string from a molecular graph.
:param gra: molecular graph
:type gra: automol graph data structure
:param stereo: parameter to include stereochemistry information
:type stereo: bool
:rtype: str
"""
ich = object_to_hardcoded_inchi_by_key(
'graph', gra, comp=_compare)
if ich is None:
if not stereo or not has_stereo(gra):
ich, _ = inchi_with_sort_from_geometry(gra)
ich = standard_form(ich, stereo=stereo)
else:
gra = explicit(gra)
geo, geo_idx_dct = fake_stereo_geometry(gra)
ich, _ = inchi_with_sort_from_geometry(
gra, geo=geo, geo_idx_dct=geo_idx_dct)
return ich
def _compare(gra1, gra2):
""" Compare the backbone structure of two moleculare graphs.
:param gra1: molecular graph 1
:type gra1: automol graph data structure
:param gra2: molecular graph 2
:type gra2: automol graph data structure
:rtype: bool
"""
gra1 = without_dummy_atoms(gra1)
gra2 = without_dummy_atoms(gra2)
return backbone_isomorphic(gra1, gra2)
def inchi_with_sort_from_geometry(gra, geo=None, geo_idx_dct=None):
""" Generate an InChI string from a molecular graph.
If coordinates are passed in, they are used to determine stereo.
:param gra: molecular graph
:type gra: automol graph data structure
:param geo: molecular geometry
:type geo: automol geometry data structure
:param geo_idx_dct:
:type geo_idx_dct: dict[:]
:rtype: (str, tuple(int))
"""
gra = without_dummy_atoms(gra)
gra = dominant_resonance(gra)
atm_keys = sorted(atom_keys(gra))
bnd_keys = list(bond_keys(gra))
atm_syms = dict_.values_by_key(atom_symbols(gra), atm_keys)
atm_bnd_vlcs = dict_.values_by_key(
atom_bond_valences(gra), atm_keys)
atm_rad_vlcs = dict_.values_by_key(
atom_unsaturated_valences(gra), atm_keys)
bnd_ords = dict_.values_by_key(bond_orders(gra), bnd_keys)
if geo is not None:
assert geo_idx_dct is not None
atm_xyzs = coordinates(geo)
atm_xyzs = [atm_xyzs[geo_idx_dct[atm_key]] if atm_key in geo_idx_dct
else (0., 0., 0.) for atm_key in atm_keys]
else:
atm_xyzs = None
mlf, key_map_inv = _molfile.from_data(
atm_keys, bnd_keys, atm_syms, atm_bnd_vlcs, atm_rad_vlcs, bnd_ords,
atm_xyzs=atm_xyzs)
rdm = _rdkit.from_molfile(mlf)
ich, aux_info = _rdkit.to_inchi(rdm, with_aux_info=True)
nums = _parse_sort_order_from_aux_info(aux_info)
nums = tuple(map(key_map_inv.__getitem__, nums))
return ich, nums
def _parse_sort_order_from_aux_info(aux_info):
ptt = app.escape('/N:') + app.capturing(
app.series(app.UNSIGNED_INTEGER, ','))
num_str = apf.first_capture(ptt, aux_info)
nums = tuple(map(int, num_str.split(',')))
return nums
def geometry(gra):
""" Convert a molecular graph to a molecular geometry.
:param gra: molecular graph
:type gra: automol graph data structure
:rtype: automol molecular geometry data structure
"""
symbs = atom_symbols(gra)
if len(symbs) != 1:
gra = explicit(gra)
geo = embed_geometry(gra)
else:
symb = list(symbs.values())[0]
# symb = list(symbs.keys())[0]
geo = ((symb, (0.00, 0.00, 0.00)),)
return geo
def formula(gra):
""" Generate a stoichiometric formula dictionary from a molecular graph.
:param gra: molecular graph
:type gra: automol graph data structure
:type: dict[str: int]
"""
gra = explicit(gra)
syms = atom_symbols(gra).values()
fml = _util.formula(syms)
return fml
#
#
# if __name__ == '__main__':
# import automol
#
# for ICH in [
# 'InChI=1S/C4H7O2/c1-3-4(2)6-5/h3-5H,1-2H2/t4-/m0/s1',
# 'InChI=1S/C4H7O/c1-4(2)3-5/h3-4H,1H2,2H3/t4-/m1/s1',
# 'InChI=1S/C5H7/c1-3-5-4-2/h1,5H,4H2,2H3',
# 'InChI=1S/C5H9/c1-4-5(2)3/h4-5H,1-2H2,3H3/t5-/m1/s1',
# 'InChI=1S/C5H5O/c1-2-3-4-5-6/h1-5H/b4-3+',
# 'InChI=1S/C5H7O/c1-5-3-2-4-6-5/h2-5H,1H3/t5-/m0/s1',
# 'InChI=1S/C6H11/c1-5(2)6(3)4/h5H,1,3H2,2,4H3/t5-/m1/s1',
# 'InChI=1S/C6H6O/c7-6-4-2-1-3-5-6/h1-2,4-5H,3H2',
# 'InChI=1S/C8H15O2/c1-7(2)5-8(3,4)6-10-9/h5,9H,3,6H2,1-2,'
# '4H3/t8-/m0/s1', ]:
# GEO = automol.inchi.geometry(ICH)
# print(automol.geom.string(GEO))
# print()
# GRA = automol.geom.graph(GEO)
# RAD_GRP_DCT = automol.graph.radical_group_dct(GRA)
# for ATM, GRPS in RAD_GRP_DCT.items():
# print(len(GRPS))
# for GRP in GRPS:
# print(len(GRP))
# print('atom', ATM)
# print('group', GRP)
# ATM_KEYS = automol.graph.atom_keys(GRP)
# GRP_GEO = automol.geom.from_subset(GEO, ATM_KEYS)
# print(automol.geom.string(GRP_GEO))
# print()
#
# print(automol.graph.string(GRP, one_indexed=False))
# STE_ATM_KEYS = automol.graph.stereogenic_atom_keys(GRP)
# print(STE_ATM_KEYS)
# # GRP_ICH = automol.graph.inchi(GRP, stereo=False)
# # GRP_ICH = automol.inchi.add_stereo(GRP_ICH)
# # print(GRP_ICH)
# GRP_ICH = automol.graph.inchi(GRP, stereo=True)
|
# SPDX-License-Identifier: MIT
from bobber import __version__
from setuptools import setup
with open('README.md', 'r') as f:
long_description = f.read()
setup(
name='nvidia-bobber',
version=__version__,
description='Containerized testing of system components that impact AI workload performance',
long_description=long_description,
packages=['bobber',
'bobber/lib',
'bobber/lib/analysis',
'bobber/lib/docker',
'bobber/lib/system',
'bobber/lib/tests'],
include_package_data=True,
package_data={'': ['lib/docker/Dockerfile',
'test_scripts/call_dali_multi.sh',
'test_scripts/dali_multi.sh',
'test_scripts/fio_fill_single.sh',
'test_scripts/fio_multi.sh',
'test_scripts/mdtest_multi.sh',
'test_scripts/nccl_multi.sh',
'test_scripts/setup_fio.sh']},
license='MIT',
python_requires='>=3.6',
entry_points={
'console_scripts': ['bobber=bobber.bobber:main']
},
install_requires=[
'docker >= 4.3.1',
'numpy >= 1.9.5',
'pyyaml >= 5.4.0',
'tabulate >= 0.8.7',
'six>=1.15.0'
]
)
|
import re
from abc import ABC, abstractmethod
from typing import Dict, Any
from pydantic import ConstrainedStr
from ulid import ULID as _ULID, base32
from .abstract import JsonSerializable, Restoreable, Generatable, T
class PrimitiveBase(JsonSerializable, Restoreable, Generatable, ABC):
@classmethod
@abstractmethod
def __modify_schema__(cls, field_schema: Dict[str, Any]) -> None:
...
@classmethod
def __get_validators__(cls):
# one or more validators may be yielded which will be called in the
# order to validate the input, each validator will receive as an input
# the value returned from the previous validator
yield cls.validate
@classmethod
@abstractmethod
def validate(cls, v):
...
class Primitive(PrimitiveBase):
coerce = True
@classmethod
def __modify_schema__(cls, field_schema: Dict[str, Any]) -> None:
# using string as the default type as it's the natural type when encoding to JSON
field_schema.update(type="string")
@classmethod
def validate(cls, v):
if isinstance(v, cls):
return v
if cls.coerce:
# noinspection PyArgumentList
return cls(v)
raise TypeError(f"expected {repr(v)} to be a instance of {cls}")
def __repr__(self):
return f"{self.__class__.__name__}({super().__repr__()})"
def __json__(self):
return self
@classmethod
def __restore__(cls, value):
# noinspection PyArgumentList
return cls(value)
@classmethod
def __generate__(cls):
return cls()
_ulid_hash_obj = object()
class ULID(_ULID, ConstrainedStr, PrimitiveBase):
strip_whitespace = True
min_length = 26
max_length = 26
regex = re.compile(
r"^[0123456789abcdefghjkmnpqrstvwxyzABCDEFGHJKMNPQRSTVWXYZ]{26}$"
)
def __new__(cls, *args, **kwargs):
if not args:
args = (_ULID(),)
return super().__new__(cls, *args, **kwargs)
def __init__(self, *args, **kwargs):
super(ULID, self).__init__(base32.decode(self), *args[1:], **kwargs)
def __hash__(self):
return hash((_ulid_hash_obj, self.bytes))
@classmethod
def __modify_schema__(cls, field_schema: Dict[str, Any]) -> None:
# using string as the default type as it's the natural type when encoding to JSON
super().__modify_schema__(field_schema)
field_schema["format"] = "ulid"
@classmethod
def validate(cls, value):
return cls(super().validate(value))
@classmethod
def from_bytes(cls, bytes_):
return cls(_ULID.from_bytes(bytes_))
@classmethod
def from_str(cls, string):
return cls(string)
def __json__(self):
return self
@classmethod
def __restore__(cls, value):
return cls(value)
@classmethod
def __generate__(cls):
return cls()
|
from ossConfig import ossConfig
import Oss
access_key = 'XXXXXXXXX'
secret_key = 'XXXXXXXXXXXXXXXXXXX'
endpoint_url = 'http://XXXXXXXXXXXXXXXXX.com'
config = ossConfig(access_key, secret_key, endpoint_url)
bucket_name = 'test1'
object_name = 'mytestput'
URL = Oss.PresignedURLs(config, bucket_name, object_name)
print(URL)
|
#!/usr/bin/python
import ConfigParser
from shutil import copyfile
import sys
import os.path
if not os.path.exists("MobaXterm.ini") or not os.path.exists("WinSCP.ini"):
print "MobaXterm.ini and/or WinSCP.ini are missing. Please place them in the same directory as the script"
sys.exit()
moba_parser = ConfigParser.ConfigParser()
print "Reading MobaXterm.ini"
moba_parser.read('MobaXterm.ini')
winscp_parser = ConfigParser.ConfigParser()
print "Reading WinSCP.ini"
winscp_parser.read('WinSCP.ini')
tmp_parser = ConfigParser.ConfigParser()
tmp_parser.optionxform = str #keep options capitalization#
servers =[]
for section_name in moba_parser.sections():
if "Bookmarks" in section_name:
#print 'Section:', section_name
#print ' Options:', moba_parser.options(section_name)
for name, value in moba_parser.items(section_name):
if value.startswith("#109#"): #Only fetch ssh sessions - 109 "
split1 = value.split('#')
split2 = split1[2].split('%')
dir = moba_parser.get(section_name, 'subrep')
tmp = (dir.replace("\\","/")+'/'+name).replace(" ","%20")
session = 'Sessions\\' + tmp
if winscp_parser.has_section(session):
print "Session already present in WinSCP.ini...Will not add session [" + name + "]"
else:
print "Session not present in WinSCP.ini...Will add session [" + name + "]"
server=[session,split2[1], split2[2],split2[3]]
servers.append(server)
for srv in servers:
tmp_parser.add_section(srv[0])
tmp_parser.set(srv[0],"HostName",srv[1])
tmp_parser.set(srv[0],"UserName", srv[3])
tmp_parser.set(srv[0],"PortNumber", srv[2])
tmp_parser.set(srv[0],"FSProtocol", 0)
tmp_parser.set(srv[0],"Password", "password") #add a dummy password so you get prompt to change it and
#save after the first unsuccessful login in WinSCP)
copyfile('WinSCP.ini', 'WinSCP.ini.bkp') #backup before writing new file#
#winscp_parser.write(sys.stdout)
with open('WinSCP.ini', 'ap') as f:
tmp_parser.write(f)
|
import os
import unittest
from os import path
import numpy as np
from numpy.testing import assert_allclose
from hazma.decay import charged_pion, muon, neutral_pion
class TestDecay(unittest.TestCase):
def setUp(self):
self.base_dir = path.dirname(__file__)
def load_data(self, data_dir):
"""Loads test data.
Arguments
---------
data_dir : str
Directory containing test data relative to this file.
Returns
-------
spectra : dict(float, (np.array, np.array))
Reference data. The keys are the decaying particle's energies and
the values are a tuple of photon energies and spectrum values,
assumed to be sorted by photon energy.
"""
# Count number of tests. There are two files for each test (one
# containing the particle's energy, the other with the spectrum values)
# and one file containing the photon energies.
data_dir = path.join(self.base_dir, data_dir)
n_tests = (len(os.listdir(data_dir)) - 1) // 2
# Load energies, spectrum values and photon energies
e_gams = np.load(path.join(data_dir, "e_gams.npy"))
spectra = {}
for i in range(1, n_tests + 1):
e = float(np.load(path.join(data_dir, "e_{}.npy".format(i))))
spectra[e] = e_gams, np.load(path.join(data_dir, "dnde_{}.npy".format(i)))
return spectra
def compare_spectra(self, data_dir, dnde_func):
"""Compares recomputed spectra with reference data.
"""
spectra = self.load_data(data_dir)
for e, (e_gams, dnde_ref) in spectra.items():
# Compute spectrum
dnde = dnde_func(e_gams, e)
# Compare
for e_gam, val, val_ref in zip(e_gams, dnde, dnde_ref):
assert_allclose(
val,
val_ref,
rtol=1e-5,
err_msg="reference spectrum from {} does not match recomputed value at e_gam = {}".format(
data_dir, e_gam
),
)
def test_dnde_muon(self):
self.compare_spectra("mu_data", muon)
def test_dnde_neutral_pion(self):
self.compare_spectra("pi0_data", neutral_pion)
def test_dnde_charged_pion(self):
self.compare_spectra("pi_data", charged_pion)
|
# Generated by Django 3.2.6 on 2021-08-20 12:24
from django.db import migrations, models
import gnd.fields
class Migration(migrations.Migration):
dependencies = [
('example', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Person',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('gnd_gnd_id', gnd.fields.GndField(max_length=250)),
('title', models.CharField(blank=True, max_length=250, null=True)),
],
options={
'abstract': False,
},
),
]
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2018-07-19 11:13
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('orgManager', '0003_auto_20180719_1431'),
]
operations = [
migrations.AddField(
model_name='organization',
name='depth',
field=models.CharField(max_length=140, null=True),
),
migrations.AddField(
model_name='organization',
name='numchild',
field=models.PositiveIntegerField(default=0),
),
migrations.AddField(
model_name='organization',
name='path',
field=models.CharField(max_length=140, null=True),
),
]
|
# Copyright 2020 Open Climate Tech Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
Library code to manage image archives
"""
from firecam.lib import goog_helper
import os
import logging
import urllib.request
import time, datetime, dateutil.parser
from html.parser import HTMLParser
import requests
import re
import pathlib
from PIL import Image, ImageMath, ImageStat
import numpy as np
import cv2
def isPTZ(cameraID):
return False
def fetchImageAndMeta(dbManager, cameraID, cameraUrl, imgDir):
"""Fetch the image file and metadata for given camera
Args:
cameraID (str): ID of camera
cameraUrl (str): URL with image and metadata
imgDir (str): Output directory to store iamge
Returns:
Tuple containing filepath of the image, current heading and timestamp
"""
fov = 110 # camera horizontal field of view is 110 for most Mobotix cameras
timestamp = int(time.time())
imgPath = getImgPath(imgDir, cameraID, timestamp)
urllib.request.urlretrieve(cameraUrl, imgPath)
heading = getHeading(cameraID)
# read EXIF header for original timestamp and rename file
img = Image.open(imgPath)
imgExif = ('exif' in img.info) and img.info['exif']
img.close()
timeMatch = imgExif and re.findall('(\d+) UTC', imgExif.decode('utf-8','ignore'))
if timeMatch and len(timeMatch):
newTimestamp = int(timeMatch[0])
if (newTimestamp > timestamp - 5*60) and (newTimestamp < timestamp + 5*60):
newImgPath = getImgPath(imgDir, cameraID, newTimestamp)
timestamp = newTimestamp
os.rename(imgPath, newImgPath)
imgPath = newImgPath
return (imgPath, heading, timestamp, fov)
def markImageProcessed(dbManager, cameraID, heading, timestamp):
return
def getImgPath(outputDir, cameraID, timestamp, cropCoords=None, diffMinutes=0):
"""Generate properly formatted image filename path following Firecam conventions
E.g.: lo-s-mobo-c__2018-06-06T11;12;23_Diff1_Crop_627x632x1279x931.jpg
Args:
outputDir (str): Output directory
cameraID (str): ID of camera
timestamp (int): timestamp
cropCoords (tuple): (x0, y0, x1, y1) coordinates of the crop rectangle
diffMinutes (int): number of minutes separating the images (for subtracted images)
Returns:
String to full path name
"""
timeStr = datetime.datetime.fromtimestamp(timestamp).isoformat()
timeStr = timeStr.replace(':', ';') # make windows happy
imgName = '__'.join([cameraID, timeStr])
if diffMinutes:
imgName += ('_Diff%d' % diffMinutes)
if cropCoords:
imgName += '_Crop_' + 'x'.join(list(map(lambda x: str(x), cropCoords)))
imgPath = os.path.join(outputDir, imgName + '.jpg')
return imgPath
def repackFileName(parsedName):
"""Generate properly formatted image filename following Firecam conventions
based on information from parsedName dictionary
E.g.: lo-s-mobo-c__2018-06-06T11;12;23_Diff1_Crop_627x632x1279x931.jpg
Args:
parsedName (dict): Dictionary containing various attributes of image
(likely result from earlier call to parseFilename())
Returns:
String to file name
"""
cropCoords = None
if 'minX' in parsedName:
cropCoords=(parsedName['minX'], parsedName['minY'], parsedName['maxX'], parsedName['maxY'])
return getImgPath('', parsedName['cameraID'], parsedName['unixTime'],
cropCoords=cropCoords,
diffMinutes=parsedName['diffMinutes'])
def parseFilename(fileName):
"""Parse the image source attributes given the properly formatted image filename
Args:
fileName (str):
Returns:
Dictionary with parsed out attributes
"""
# regex to match names like Axis-BaldCA_2018-05-29T16_02_30_129496.jpg
# and bm-n-mobo-c__2017-06-25z11;53;33.jpg
regexExpanded = '([A-Za-z0-9-_]+[^_])_+(\d{4}-\d\d-\d\d)T(\d\d)[_;](\d\d)[_;](\d\d)'
# regex to match diff minutes spec for subtracted images
regexDiff = '(_Diff(\d+))?'
# regex to match optional crop information e.g., Axis-Cowles_2019-02-19T16;23;49_Crop_270x521x569x820.jpg
regexOptionalCrop = '(_Crop_(-?\d+)x(-?\d+)x(\d+)x(\d+))?'
matchesExp = re.findall(regexExpanded + regexDiff + regexOptionalCrop, fileName)
# regex to match names like 1499546263.jpg
regexUnixTime = '(1\d{9})'
matchesUnix = re.findall(regexUnixTime + regexDiff + regexOptionalCrop, fileName)
cropInfo = None
if len(matchesExp) == 1:
match = matchesExp[0]
parsed = {
'cameraID': match[0],
'date': match[1],
'hours': match[2],
'minutes': match[3],
'seconds': match[4]
}
isoStr = '{date}T{hour}:{min}:{sec}'.format(date=parsed['date'],hour=parsed['hours'],min=parsed['minutes'],sec=parsed['seconds'])
dt = dateutil.parser.parse(isoStr)
unixTime = time.mktime(dt.timetuple())
parsed['diffMinutes'] = int(match[6] or 0)
cropInfo = match[-4:]
elif len(matchesUnix) == 1:
match = matchesUnix[0]
unixTime = int(match[0])
dt = datetime.datetime.fromtimestamp(unixTime)
isoStr = datetime.datetime.fromtimestamp(unixTime).isoformat()
parsed = {
'cameraID': 'UNKNOWN_' + fileName,
'date': dt.date().isoformat(),
'hours': str(dt.hour),
'minutes': str(dt.minute),
'seconds': str(dt.second)
}
parsed['diffMinutes'] = int(match[2] or 0)
cropInfo = match[-4:]
else:
logging.error('Failed to parse name %s', fileName)
return None
if cropInfo[0]:
parsed['minX'] = int(cropInfo[0])
parsed['minY'] = int(cropInfo[1])
parsed['maxX'] = int(cropInfo[2])
parsed['maxY'] = int(cropInfo[3])
parsed['isoStr'] = isoStr
parsed['unixTime'] = int(unixTime)
return parsed
def getHeading(cameraID):
"""Return the heading (direction in degrees where 0 = North) of the given camera
Args:
cameraID: (string) camera ID (e.g. bh-w-mobo-c)
Returns:
Numerical heading or None
"""
cardinalHeadings = {
'n': 0,
'e': 90,
's': 180,
'w': 270,
'ne': 45,
'se': 135,
'sw': 225,
'nw': 315,
}
regexDirMobo = '-([ns]?[ew]?)-mobo-c'
matches = re.findall(regexDirMobo, cameraID)
if len(matches) == 1:
camDir = matches[0]
if camDir in cardinalHeadings:
return cardinalHeadings[camDir]
return None
class HpwrenHTMLParser(HTMLParser):
"""Dervied class from HTMLParser to pull out file information from HTML directory listing pages
Allows caller to specify fileType (extension) the caller cares about
"""
def __init__(self, fileType):
self.table = []
self.filetype = fileType
super().__init__()
def handle_starttag(self, tag, attrs):
"""Handler for HTML starting tag (<).
If the tag type is <a> and it contains an href link pointing to file of specified type,
then save the name for extraction by getTable()
"""
if (tag == 'a') and len(attrs) > 0:
# print('Found <a> %s', len(attrs), attrs)
for attr in attrs:
# print('Found attr %s', len(attr), attr)
if len(attr) == 2 and attr[0]=='href' and attr[1][-4:] == self.filetype:
self.table.append(attr[1])
def getTable(self):
return self.table
def parseDirHtml(dirHtml, fileType):
"""Wrapper around HpwrenHTMLParser to pull out entries of given fileType
Args:
dirHtml (str): HTML page for directory listing
fileType (str): File extension (e.g.: '.jpg')
Returns:
List of file names matching extension
"""
parser = HpwrenHTMLParser(fileType)
parser.feed(dirHtml)
return parser.getTable()
def fetchImgOrDir(url, verboseLogs):
"""Read the given URL and return the data. Also note if data is an image
Args:
url (str): URL to read
verboseLogs (bool): Write verbose logs for debugging
Returns:
Tuple indicating image or directory and the data
"""
try:
resp = urllib.request.urlopen(url)
except Exception as e:
if verboseLogs:
logging.error('Result of fetch from %s: %s', url, str(e))
return (None, None)
if resp.getheader('content-type') == 'image/jpeg':
return ('img', resp)
else:
return ('dir', resp)
def readUrlDir(urlPartsQ, verboseLogs, fileType):
"""Get the files of given fileType from the given HPWREN Q directory URL
Args:
urlPartsQ (list): HPWREN Q directory URL as list of string parts
verboseLogs (bool): Write verbose logs for debugging
fileType (str): File extension (e.g.: '.jpg')
Returns:
List of file names matching extension
"""
# logging.warning('Dir URLparts %s', urlPartsQ)
url = '/'.join(urlPartsQ)
# logging.warning('Dir URL %s', url)
(imgOrDir, resp) = fetchImgOrDir(url, verboseLogs)
if not imgOrDir:
return None
assert imgOrDir == 'dir'
dirHtml = resp.read().decode('utf-8')
return parseDirHtml(dirHtml, fileType)
def listTimesinQ(urlPartsQ, verboseLogs):
"""Get the timestamps of images from the given HPWREN Q directory URL
Args:
urlPartsQ (list): HPWREN Q directory URL as list of string parts
verboseLogs (bool): Write verbose logs for debugging
Returns:
List of timestamps
"""
files = readUrlDir(urlPartsQ, verboseLogs, '.jpg')
if files:
return list(map(lambda x: {'time': int(x[:-4])}, files))
return None
def downloadHttpFileAtTime(outputDir, urlPartsQ, cameraID, closestTime, verboseLogs):
"""Download HPWREN image from given HPWREN Q directory URL at given time
Args:
outputDir (str): Output directory path
urlPartsQ (list): HPWREN Q directory URL as list of string parts
cameraID (str): ID of camera
closestTime (int): Desired timestamp
verboseLogs (bool): Write verbose logs for debugging
Returns:
Local filesystem path to downloaded image
"""
imgPath = getImgPath(outputDir, cameraID, closestTime)
if verboseLogs:
logging.warning('Local file %s', imgPath)
if os.path.isfile(imgPath):
logging.warning('File %s already downloaded', imgPath)
return imgPath
closestFile = str(closestTime) + '.jpg'
urlParts = urlPartsQ[:] # copy URL parts array
urlParts.append(closestFile)
# logging.warning('File URLparts %s', urlParts)
url = '/'.join(urlParts)
logging.warning('File URL %s', url)
# urllib.request.urlretrieve(url, imgPath)
resp = requests.get(url, stream=True)
with open(imgPath, 'wb') as f:
for chunk in resp.iter_content(chunk_size=8192):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
resp.close()
return imgPath
def downloadGCSFileAtTime(outputDir, closestEntry):
"""Download HPWREN image from GCS folder from ffmpeg Google Cloud Function
Args:
outputDir (str): Output directory path
closestEntry (dict): Desired timestamp and GCS file
Returns:
Local filesystem path to downloaded image
"""
imgPath = os.path.join(outputDir, closestEntry['name'])
logging.warning('Local file %s', imgPath)
if os.path.isfile(imgPath):
logging.warning('File %s already downloaded', imgPath)
return imgPath
parsedPath = goog_helper.parseGCSPath(closestEntry['id'])
goog_helper.downloadBucketFile(parsedPath['bucket'], parsedPath['name'], imgPath)
return imgPath
def getMp4Url(urlPartsDate, qNum, verboseLogs):
"""Get the URL for the MP4 video for given Q
Args:
urlPartsDate (list): HPWREN date directory URL as list of string parts
qNum (int): Q number (1-8) where each Q represents 3 hour period
verboseLogs (bool): Write verbose logs for debugging
Returns:
URL to Q diretory
"""
urlPartsMp4 = urlPartsDate[:] # copy URL
urlPartsMp4.append('MP4')
files = readUrlDir(urlPartsMp4, verboseLogs, '.mp4')
if verboseLogs:
logging.warning('MP4s %s', files)
qMp4Name = 'Q' + str(qNum) + '.mp4'
if files and (qMp4Name in files):
urlPartsMp4.append(qMp4Name)
return '/'.join(urlPartsMp4)
return None
def gcfFfmpeg(gcfUrl, googleServices, hpwrenSource, qNum, folderID):
"""invoke the Google Cloud Function for ffpeg decompression with proper parameters and credentials
Args:
gcfUrl (str): URL for ffmpeg cloud function
googleServices (): Google services and credentials
hpwrenSource (dict): Dictionary containing various HPWREN source information
qNum (int): Q number (1-8) where each Q represents 3 hour period
folderID (str): google drive ID of folder where to extract images
Returns:
Cloud function result
"""
gcfParams = {
'hostName': hpwrenSource['server'],
'cameraID': hpwrenSource['cameraID'],
'archiveCamDir': hpwrenSource['urlParts'][1],
'yearDir': hpwrenSource['year'],
'dateDir': hpwrenSource['dateDirName'],
'qNum': qNum,
'uploadDir': folderID
}
maxRetries = 3
retriesLeft = maxRetries
while retriesLeft > 0:
token = goog_helper.getIdToken(googleServices, gcfUrl, retriesLeft != maxRetries)
headers = {'Authorization': 'bearer {}'.format(token)}
rawResponse = requests.post(gcfUrl, headers=headers, data=gcfParams)
response = rawResponse.content.decode()
if response == 'done':
return response
retriesLeft -= 1
logging.error('Error calling GCF. %d retries left. resp=%s raw=%s', retriesLeft, str(response), str(rawResponse))
time.sleep(5) # wait 5 seconds before retrying
return response
def getGCSMp4(googleServices, settings, hpwrenSource, qNum):
"""Extract images from Q MP4 video into GCS folder
Args:
googleServices (): Google services and credentials
settings (): settings module
hpwrenSource (dict): Dictionary containing various HPWREN source information
qNum (int): Q number (1-8) where each Q represents 3 hour period
Returns:
list of files in GCS bucket with metadata
"""
ffmpegParsedGCS = goog_helper.parseGCSPath(settings.ffmpegFolder)
folderName = hpwrenSource['cameraID'] + '__' + hpwrenSource['dateDirName'] + 'Q' + str(qNum)
folderPath = ffmpegParsedGCS['name'] + '/' + folderName
files = goog_helper.listBucketEntries(ffmpegParsedGCS['bucket'], prefix=(folderPath + '/'))
logging.warning('Found %d GCS files', len(files))
if not files:
logging.warning('Calling Cloud Function for folder %s', folderName)
uploadDir = goog_helper.repackGCSPath(ffmpegParsedGCS['bucket'],folderPath)
gcfRes = gcfFfmpeg(settings.ffmpegUrl, googleServices, hpwrenSource, qNum, uploadDir)
logging.warning('Cloud function result %s', gcfRes)
files = goog_helper.listBucketEntries(ffmpegParsedGCS['bucket'], prefix=(folderPath + '/'))
# logging.warning('GDM4: files %d %s', len(files), files)
imgTimes = []
for filePath in files:
fileName = filePath.split('/')[-1]
nameParsed = parseFilename(fileName)
imgTimes.append({
'time': nameParsed['unixTime'],
'id': goog_helper.repackGCSPath(ffmpegParsedGCS['bucket'], filePath),
'name': fileName
})
return imgTimes
outputDirCheckOnly = '/CHECK:WITHOUT:DOWNLOAD'
def downloadFilesForDate(googleServices, settings, outputDir, hpwrenSource, gapMinutes, verboseLogs):
"""Download HPWREN images from given given date time range with specified gaps
If outputDir is special value outputDirCheckOnly, then just check if files are retrievable
Args:
googleServices (): Google services and credentials
settings (): settings module
outputDir (str): Output directory path
hpwrenSource (dict): Dictionary containing various HPWREN source information
gapMinutes (int): Number of minutes of gap between images for downloading
verboseLogs (bool): Write verbose logs for debugging
Returns:
List of local filesystem paths to downloaded images
"""
startTimeDT = hpwrenSource['startTimeDT']
endTimeDT = hpwrenSource['endTimeDT']
dateDirName = '{year}{month:02d}{date:02d}'.format(year=startTimeDT.year, month=startTimeDT.month, date=startTimeDT.day)
hpwrenSource['dateDirName'] = dateDirName
urlPartsDate = hpwrenSource['urlParts'][:] # copy URL
urlPartsDate.append(dateDirName)
hpwrenSource['urlPartsDate'] = urlPartsDate
timeGapDelta = datetime.timedelta(seconds = 60*gapMinutes)
imgTimes = None
lastQNum = 0 # 0 never matches because Q numbers start with 1
curTimeDT = startTimeDT
downloaded_files = []
prevTime = None
while curTimeDT <= endTimeDT:
qNum = 1 + int(curTimeDT.hour/3)
urlPartsQ = urlPartsDate[:] # copy URL
urlPartsQ.append('Q' + str(qNum))
if qNum != lastQNum:
# List times of files in Q dir and cache
useHttp = True
imgTimes = listTimesinQ(urlPartsQ, verboseLogs)
if not imgTimes:
if verboseLogs:
logging.error('No images in Q dir %s', '/'.join(urlPartsQ))
mp4Url = getMp4Url(urlPartsDate, qNum, verboseLogs)
if not mp4Url:
return downloaded_files
if outputDir != outputDirCheckOnly:
imgTimes = getGCSMp4(googleServices, settings, hpwrenSource, qNum)
useHttp = False
# logging.warning('imgTimes %d %s', len(imgTimes), imgTimes)
lastQNum = qNum
if outputDir == outputDirCheckOnly:
downloaded_files.append(outputDirCheckOnly)
else:
desiredTime = time.mktime(curTimeDT.timetuple())
closestEntry = min(imgTimes, key=lambda x: abs(x['time']-desiredTime))
closestTime = closestEntry['time']
downloaded = None
if closestTime != prevTime: # skip if closest timestamp is still same as previous iteration
prevTime = closestTime
if useHttp:
downloaded = downloadHttpFileAtTime(outputDir, urlPartsQ, hpwrenSource['cameraID'], closestTime, verboseLogs)
else:
downloaded = downloadGCSFileAtTime(outputDir, closestEntry)
if downloaded and verboseLogs:
logging.warning('Successful download for time %s', str(datetime.datetime.fromtimestamp(closestTime)))
if downloaded:
downloaded_files.append(downloaded)
curTimeDT += timeGapDelta
return downloaded_files
def downloadFilesHpwren(googleServices, settings, outputDir, hpwrenSource, gapMinutes, verboseLogs):
"""Download HPWREN images from given given date time range with specified gaps
Calls downloadFilesForDate to do the heavy lifting, but first determines the hpwren server.
First tries without year directory in URL path, and if that fails, then retries with year dir
Args:
googleServices (): Google services and credentials
settings (): settings module
outputDir (str): Output directory path
hpwrenSource (dict): Dictionary containing various HPWREN source information
gapMinutes (int): Number of minutes of gap between images for downloading
verboseLogs (bool): Write verbose logs for debugging
Returns:
List of local filesystem paths to downloaded images
"""
regexDir = '(c[12])/([^/]+)/large/?'
matches = re.findall(regexDir, hpwrenSource['dirName'])
if len(matches) != 1:
logging.error('Could not parse dir: %s', hpwrenSource['dirName'])
return None
match = matches[0]
(server, subdir) = match
hpwrenBase = 'http://{server}.hpwren.ucsd.edu/archive'.format(server=server)
hpwrenSource['server'] = server
urlParts = [hpwrenBase, subdir, 'large']
hpwrenSource['urlParts'] = urlParts
# first try without year directory
hpwrenSource['year'] = ''
downloaded_files = downloadFilesForDate(googleServices, settings, outputDir, hpwrenSource, gapMinutes, verboseLogs)
if downloaded_files:
return downloaded_files
# retry with year directory
hpwrenSource['year'] = str(hpwrenSource['startTimeDT'].year)
urlParts.append(hpwrenSource['year'])
hpwrenSource['urlParts'] = urlParts
return downloadFilesForDate(googleServices, settings, outputDir, hpwrenSource, gapMinutes, verboseLogs)
def getHpwrenCameraArchives(hpwrenArchivesPath):
"""Get the HPWREN camera archive directories from given file
Args:
hpwrenArchivesPath (str): path (local of GCS) to file with archive info
Returns:
List of archive directories
"""
archiveData = goog_helper.readFile(hpwrenArchivesPath)
camArchives = []
for line in archiveData.split('\n'):
camInfo = line.split(' ')
# logging.warning('info %d, %s', len(camInfo), camInfo)
if len(camInfo) != 2:
logging.warning('Ignoring archive entry without two columns %s', camInfo)
continue
dirInfo = camInfo[1].split('/')
if len(dirInfo) < 2:
logging.warning('Ignoring archive entry without proper ID %s', dirInfo)
continue
cameraID = dirInfo[1]
matchesID = list(filter(lambda x: cameraID == x['id'], camArchives))
if matchesID:
if camInfo[1] not in matchesID[0]['dirs']:
matchesID[0]['dirs'].append(camInfo[1])
# logging.warning('Merging duplicate ID dir %s, %s', camInfo[1], matchesID[0])
continue
preIndex = camInfo[0].find('pre')
if preIndex > 0:
searchName = camInfo[0][:(preIndex-1)]
matchesName = list(filter(lambda x: searchName in x['name'], camArchives))
for match in matchesName:
if camInfo[1] not in match['dirs']:
match['dirs'].append(camInfo[1])
# logging.warning('Mergig pre dir %s to %s', camInfo[1], match)
continue
camData = {'id': cameraID, 'name': camInfo[0], 'dirs': [camInfo[1]]}
# logging.warning('data %s', camData)
camArchives.append(camData)
logging.warning('Discovered total %d camera archive dirs', len(camArchives))
return camArchives
def findCameraInArchive(camArchives, cameraID):
"""Find the entries in the camera archive directories for the given camera
Args:
camArchives (list): Result of getHpwrenCameraArchives() above
cameraID (str): ID of camera to fetch images from
Returns:
List of archive dirs that matching camera
"""
matchingCams = list(filter(lambda x: cameraID == x['id'], camArchives))
# logging.warning('Found %d match(es): %s', len(matchingCams), matchingCams)
if matchingCams:
return matchingCams[0]['dirs']
else:
return []
def getHpwrenImages(googleServices, settings, outputDir, camArchives, cameraID, startTimeDT, endTimeDT, gapMinutes):
"""Download HPWREN images from given camera and date time range with specified gaps
Iterates over all directories for given camera in the archives and then downloads the images
by calling downloadFilesHpwren
Args:
googleServices (): Google services and credentials
settings (): settings module
outputDir (str): Output directory path or cache object
camArchives (list): Result of getHpwrenCameraArchives() above
cameraID (str): ID of camera to fetch images from
startTimeDT (datetime): starting time of time range
endTimeDT (datetime): ending time of time range
gapMinutes (int): Number of minutes of gap between images for downloading
Returns:
List of local filesystem paths to downloaded images
"""
# If outputDir is a cache object, fetch the real outputDir and set 'cache' variable
cache = None
if (not isinstance(outputDir, str)) and ('writeDir' in outputDir):
cache = outputDir
outputDir = cache['writeDir']
# In cache mode, check local cache for existing files before checking remote archive
if cache:
curTimeDT = startTimeDT
timeGapDelta = datetime.timedelta(seconds = 60*gapMinutes)
downloaded_files = []
while curTimeDT <= endTimeDT:
filePath = cacheFindEntry(cache, cameraID, time.mktime(curTimeDT.timetuple()))
if filePath:
downloaded_files.append(filePath)
else:
downloaded_files = []
break
curTimeDT += timeGapDelta
if len(downloaded_files) > 0:
# all files are in cache, return results
logging.warning('already downloaded: %s', downloaded_files)
return downloaded_files
matchingDirs = findCameraInArchive(camArchives, cameraID)
found = None
for matchingDir in matchingDirs:
hpwrenSource = {
'cameraID': cameraID,
'dirName': matchingDir,
'startTimeDT': startTimeDT,
'endTimeDT': endTimeDT
}
logging.warning('Searching for files in dir %s', hpwrenSource['dirName'])
found = downloadFilesHpwren(googleServices, settings, outputDir, hpwrenSource, gapMinutes, False)
if found:
break
# If new files were added to cache directory, update cache object
if cache and found and (cache['readDir'] == cache['writeDir']):
for filePath in found:
cacheInsert(cache, filePath)
return found
def getArchiveImages(googleServices, settings, dbManager, outputDir, camArchives, cameraID, heading, startTimeDT, endTimeDT, gapMinutes):
return getHpwrenImages(googleServices, settings, outputDir, camArchives, cameraID, startTimeDT, endTimeDT, gapMinutes)
def cacheInsert(cache, fileName):
"""Insert given file into given cache object
Args:
cache (dict): Cache object created by cacheDir()
fileName (str): name or path of file to insert
"""
nameParsed = parseFilename(fileName)
if nameParsed and nameParsed['cameraID']:
cameraID = nameParsed['cameraID']
unixTime = nameParsed['unixTime']
if not cameraID in cache:
cache[cameraID] = []
cameraTimes = cache[cameraID]
ppath = pathlib.PurePath(fileName)
cameraTimes.append({'time': unixTime, 'fileName': str(ppath.name)})
def cacheFindEntry(cache, cameraID, desiredTime):
"""Search given cache for image from given camera at given timestamp (within 30 seconds)
Args:
cache (dict): Cache object created by cacheDir()
cameraID (str): ID of camera to fetch images from
desiredTime (int): unix time of desired image
Returns:
File path of image or None
"""
if not cameraID in cache:
return None
cameraTimes = cache[cameraID]
closestEntry = min(cameraTimes, key=lambda x: abs(x['time'] - desiredTime))
if abs(closestEntry['time'] - desiredTime) < 30:
# logging.warning('close: %s', str(closestEntry))
return os.path.join(cache['readDir'], closestEntry['fileName'])
else:
# logging.warning('far: %s, %s', str(desiredTime), str(closestEntry))
return None
def cacheFetchRange(cache, cameraID, maxTime, desiredOffset, minOffset):
if not cameraID in cache:
return None
cameraTimes = cache[cameraID]
minTime = maxTime + minOffset
desiredTime = maxTime + desiredOffset
allowedEntries = list(filter(lambda x: (x['time'] > minTime) and (x['time'] < maxTime), cameraTimes))
if len(allowedEntries) == 0:
return None
sortedEntries = sorted(allowedEntries, key=lambda x: abs(x['time'] - desiredTime))
return list(map(lambda x: os.path.join(cache['readDir'], x['fileName']), sortedEntries))
def cacheDir(readDirPath, writeDirPath=None):
"""Create a cache of iamges in given directory and return the cache object
Args:
readDirPath (str): path to directory containing images
Returns:
Cache object
"""
imageFileNames = sorted(os.listdir(readDirPath))
cache = {
'readDir': readDirPath,
'writeDir': writeDirPath or readDirPath
}
for fileName in imageFileNames:
if fileName[-4:] != '.jpg':
continue
cacheInsert(cache, fileName)
return cache
def findTranslationOffset(cvImgA, cvImgB, maxIterations, eps):
if cvImgA.shape[0] > 1000:
headerHeight = 250 # clouds, metadata, and watermark
footerHeight = 250 # nearby trees moving with wind and shadows, metadata, and watermark
elif cvImgA.shape[0] > 300:
headerHeight = 100 # clouds, metadata, and watermark
footerHeight = 100 # nearby trees moving with wind and shadows, metadata, and watermark
else:
headerHeight = 0 # too small for headers and footers
footerHeight = 0
footerPos = cvImgA.shape[0] - footerHeight
grayA = cv2.cvtColor(cvImgA[headerHeight:footerPos], cv2.COLOR_BGR2GRAY)
grayB = cv2.cvtColor(cvImgB[headerHeight:footerPos], cv2.COLOR_BGR2GRAY)
warp_matrix = np.eye(2, 3, dtype=np.float32)
try:
# find optimal shifts limited to given maxIterations
criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, maxIterations, eps)
(cc0, warp_matrix0) = cv2.findTransformECC(grayA, grayB, warp_matrix, cv2.MOTION_TRANSLATION, criteria)
# check another 10 iterations to determine if findTransformECC has converged
criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, eps)
(cc1, warp_matrix1) = cv2.findTransformECC(grayA, grayB, warp_matrix0, cv2.MOTION_TRANSLATION, criteria)
except Exception as e:
return (False, None, None) # alignment failed
epsAllowance = 10 * eps # allow up 10 eps change to mean convergence (truly unaligned images are > 1000*eps)
dx = warp_matrix1[0][2]
dy = warp_matrix1[1][2]
logging.warning('Translation: %s: %s, %s, %s , %s', cc0 >= cc1 - epsAllowance, round((cc1-cc0)/eps,1), round(cc1, 4), round(dx, 1), round(dy, 1))
if (cc0 < cc1 - epsAllowance) or (cc1 < 0.85) or (abs(dx) > 20) or (abs(dy) > 10):
return (False, None, None) # alignment failed
return (True, dx, dy)
def alignImageObj(imgFileName, baseImgFileName, noShift=False):
maxIterations = 40
terminationEps = 1e-6
imgCv = cv2.imread(imgFileName)
baseImgCv = cv2.imread(baseImgFileName)
(alignable, dx, dy) = findTranslationOffset(baseImgCv, imgCv, maxIterations, terminationEps)
if alignable:
if round(dx) == 0 and round(dy) == 0: # optimization for sub-pixel shifts
return Image.open(imgFileName)
if noShift:
return None
logging.warning('shifting image dx, dy: %s, %s', round(dx), round(dy))
img = Image.open(imgFileName)
shiftedImg = img.transform(img.size, Image.AFFINE, (1, 0, dx, 0, 1, dy))
img.close()
return shiftedImg
return None
def alignImage(imgFileName, baseImgFileName):
shiftedImg = alignImageObj(imgFileName, baseImgFileName)
if shiftedImg:
shiftedImg.load() # ensure file read before remove
os.remove(imgFileName)
shiftedImg.save(imgFileName, format='JPEG', quality=95)
shiftedImg.close()
return True
return False
def diffImages(imgA, imgB):
"""Subtract two images (r-r, g-g, b-b). Also add 128 to reduce negative values
If a pixel is exactly same in both images, then the result will be 128,128,128 gray
Out of range values (<0 and > 255) are moved to 0 and 255 by the convert('L') function
Args:
imgA: Pillow image object to subtract from
imgB: Pillow image object to subtract
Returns:
Pillow image object containing the results of the subtraction with 128 mean
"""
bandsImgA = imgA.split()
bandsImgB = imgB.split()
bandsImgOut = []
for bandNum in range(len(bandsImgA)):
out = ImageMath.eval("convert(128+a-b,'L')", a=bandsImgA[bandNum], b=bandsImgB[bandNum])
bandsImgOut.append(out)
return Image.merge('RGB', bandsImgOut)
def smoothAndCache(imgPath, outputDir):
ppath = pathlib.PurePath(imgPath)
# add 's_' prefix to denote smoothed images
smoothImgPath = os.path.join(outputDir, 's_' + str(ppath.name))
if os.path.isfile(smoothImgPath): # smooth image already generated
return smoothImgPath
img = cv2.imread(imgPath)
smoothImg = cv2.fastNlMeansDenoisingColored(img, None, 10,10,7,21)
cv2.imwrite(smoothImgPath, smoothImg)
return smoothImgPath
def diffSmoothImageFiles(imgAFile, imgBFile, cachedSmoothDir='.'):
smoothImgAPath = smoothAndCache(imgAFile, cachedSmoothDir)
smoothImgAPillow = Image.open(smoothImgAPath)
smoothImgBPath = smoothAndCache(imgBFile, cachedSmoothDir)
smoothImgBPillow = Image.open(smoothImgBPath)
return diffImages(smoothImgAPillow, smoothImgBPillow)
def smoothImage(img):
"""Smooth the given image
Args:
img: Pillow image object
Returns:
Pillow image object after smoothing
"""
# Pillow uses RGB and cv2 uses GBR, so have to convert before and after smoothing
imgBGR = cv2.cvtColor(np.asarray(img), cv2.COLOR_BGR2RGB)
# smoothImgBGR = cv2.fastNlMeansDenoisingColored(imgBGR, None, 10,10,7,21)
smoothImgBGR = cv2.bilateralFilter(imgBGR, 9, 75, 75)
smoothImgRGB = cv2.cvtColor(smoothImgBGR, cv2.COLOR_BGR2RGB)
return Image.fromarray(smoothImgRGB)
def diffSmoothImages(imgA, imgB):
"""Subtract two images (r-r, g-g, b-b) after smoothing them first.
Args:
imgA: Pillow image object to subtract from
imgB: Pillow image object to subtract
Returns:
Pillow image object containing the results of the subtraction with 128 mean
"""
smoothImgA = smoothImage(imgA)
smoothImgB = smoothImage(imgB)
return diffImages(smoothImgA, smoothImgB)
def rescaleValues(img, ratios):
bandsImg = img.split()
evalCmds = [
"convert(float(a)*%s, 'L')" % ratios[0],
"convert(float(a)*%s, 'L')" % ratios[1],
"convert(float(a)*%s, 'L')" % ratios[2],
]
bandsImgOut = [
ImageMath.eval(evalCmds[0], a = bandsImg[0]),
ImageMath.eval(evalCmds[1], a = bandsImg[1]),
ImageMath.eval(evalCmds[2], a = bandsImg[2]),
]
return Image.merge('RGB', bandsImgOut)
def brightness(img):
medians = ImageStat.Stat(img).median
brightness = (medians[0] + medians[1] + medians[2]) / 3
return max(brightness, .01) # to avoid div by 0
def diffWithChecks(baseImg, earlierImg):
brightnessRatio = brightness(baseImg)/brightness(earlierImg)
if (brightnessRatio < 0.92) or (brightnessRatio > 1.08): # large diffs hide the smoke
logging.warning('Skipping extreme brigthness diff %s', brightnessRatio)
return None
diffImg = diffSmoothImages(baseImg, earlierImg)
extremas = diffImg.getextrema()
if (extremas[0][0] == 128 and extremas[0][1] == 128) or (extremas[1][0] == 128 and extremas[1][1] == 128) or (extremas[2][0] == 128 and extremas[2][1] == 128):
logging.warning('Skipping no diffs %s', str(extremas))
return None
return diffImg
|
# BOJ 17779
"""
선거구를 선정하는 방법
가장 윗 점인 x, y를 선정 (1 <= x <= N, 1 <= y <= N)
d1, d2를 이용하여 구획 쪼개기 (d1, d2는 브루트포스를 이용하여 모두 선정하기, d1, d2 >= 1)
x, y 정하기 1 <= x < x + d1 + d2 <= N
1. x, y의 위치를 정하기
"""
import sys
si = sys.stdin.readline
dy = [-1, 1, 0, 0]
dx = [0, 0, -1, 1]
def label_5(y, x, d1, d2):
i = 0
while i <= d1:
label[y + i][x - i] = 5
i += 1
i = 0
while i <= d2:
label[y + i][x + i] = 5
i += 1
i = 0
while i <= d2:
label[y + d1 + i][x - d1 + i] = 5
i += 1
i = 0
while i <= d1:
label[y + d2 + i][x + d2 - i] = 5
i += 1
# 5 labeling
flag = True
for i in range(1, n + 1):
for j in range(1, n + 1):
if i < y:
continue
if i == y and j < x:
continue
if i == y + d1 + d2 and j == x + d2 - d1:
break
if label[i][j] == 5:
flag = not flag
elif flag:
label[i][j] = 5
# 1, 2, 3, 4 labeling
def labeling(y, x, d1, d2):
for i in range(1, n + 1):
for j in range(1, n + 1):
if label[i][j] == 5:
continue
if 1 <= i < y + d1 and 1 <= j <= x:
label[i][j] = 1
elif 1 <= i <= y + d2 and x < j <= n:
label[i][j] = 2
elif y + d1 <= i <= n and 1 <= j < x - d1 + d2:
label[i][j] = 3
elif y + d2 < i <= n and x - d1 + d2 <= j <= n:
label[i][j] = 4
n = int(si())
graph = [[0 for _ in range(n + 1)] for _ in range(n + 1)]
for i in range(1, n + 1):
graph[i] = [0] + list(map(int, si().split()))
res = 987654321
boundary = []
for i in range(1, n + 1):
for j in range(1, n + 1):
for d1 in range(1, n + 1):
for d2 in range(1, n + 1):
if not (1 <= i <= i + d1 + d2 <= n):
continue
if not (1 <= j - d1 <= j <= j + d2 <= n):
continue
boundary.append((i, j, d1, d2))
for i in range(len(boundary)):
label = [[0 for _ in range(n + 1)] for _ in range(n + 1)]
visited = [[False for _ in range(n + 1)] for _ in range(n + 1)]
s1, s2, s3, s4, s5 = 0, 0, 0, 0, 0
max_val = 0
min_val = 987654321
r, c, d1, d2 = boundary[i]
label_5(r, c, d1, d2)
labeling(r, c, d1, d2)
for i in range(1, n + 1):
for j in range(1, n + 1):
if label[i][j] == 1:
s1 += graph[i][j]
elif label[i][j] == 2:
s2 += graph[i][j]
elif label[i][j] == 3:
s3 += graph[i][j]
elif label[i][j] == 4:
s4 += graph[i][j]
elif label[i][j] == 5:
s5 += graph[i][j]
max_val = max(s1, s2, s3, s4, s5)
min_val = min(s1, s2, s3, s4, s5)
res = min(max_val - min_val, res)
print(res)
|
import math
import pandas as pd
import teller as tr
import numpy as np
from sklearn import datasets
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier
from sklearn.model_selection import train_test_split
from sklearn.datasets import load_breast_cancer, load_wine, load_iris, make_classification
breast_cancer = load_breast_cancer()
Z = breast_cancer.data
t = breast_cancer.target
np.random.seed(123)
X_train, X_test, y_train, y_test = train_test_split(Z, t, test_size=0.2)
print(X_train.shape)
print(X_test.shape)
clf1 = ExtraTreesClassifier(n_estimators=250,
max_features=int(math.sqrt(X_train.shape[1])),
random_state=24869)
clf1.fit(X_train, y_train)
# creating the explainer
expr1 = tr.Explainer(obj=clf1, y_class=1, normalize=False)
# fitting the explainer (for heterogeneity of effects only)
expr1.fit(X_test, y_test, X_names=breast_cancer.feature_names,
method="avg")
# summary of results for the model
print(expr1.summary())
# confidence intervals and tests on marginal effects (Jackknife)
expr1.fit(X_test, y_test, X_names=breast_cancer.feature_names,
method="ci")
# summary of results for the model
print(expr1.summary())
|
from skidl import *
# Create input & output voltages and ground reference.
vin, vout, gnd = Net("VI"), Net("VO"), Net("GND")
# Create two resistors.
r1, r2 = 2 * Part("Device", "R", TEMPLATE, footprint="Resistors_SMD:R_0805")
r1.value = "1K" # Set upper resistor value.
r2.value = "500" # Set lower resistor value.
# Connect the nets and resistors.
vin += r1[1] # Connect the input to the upper resistor.
gnd += r2[2] # Connect the lower resistor to ground.
vout += r1[2], r2[1] # Output comes from the connection of the two resistors.
print(generate_netlist())
|
'''
Created on Dec 16, 2018
@author: gsnyder
Add tags to a project
'''
from blackduck.HubRestApi import HubInstance
import argparse
parser = argparse.ArgumentParser("Add tags to a project")
parser.add_argument("project_name")
parser.add_argument("tag")
args = parser.parse_args()
hub = HubInstance()
project_list = hub.get_projects(parameters={"q":"name:{}".format(args.project_name)})
if 'totalCount' in project_list and project_list['totalCount'] > 0:
project_tags_url = hub.get_tags_url(project_list['items'][0])
delete_tag_url = project_tags_url + "/{}".format(args.tag)
print("Deleting tag {} from project {} using url: {}".format(args.tag, args.project_name, delete_tag_url))
response = hub.execute_delete(delete_tag_url)
if response.status_code == 204:
print("Successfully deleted tag {} from project {}".format(args.tag, args.project_name))
else:
print("Failed to delete tag {} from project {} due to unknown reason, response status code was {}".format(
args.tag, args.project_name, response.status_code))
else:
print("Count not find project {}".format(args.project_name))
|
"""Collection of Ivy loss functions."""
# local
import ivy
from typing import Optional, Union
from ivy.func_wrapper import to_native_arrays_and_back
# Extra #
# ------#
@to_native_arrays_and_back
def cross_entropy(
true: Union[ivy.Array, ivy.NativeArray],
pred: Union[ivy.Array, ivy.NativeArray],
axis: Optional[int] = -1,
epsilon: Optional[float] = 1e-7,
*,
out: Optional[ivy.Array] = None
) -> ivy.Array:
"""Computes cross-entropy between predicted and true discrete distributions.
Parameters
----------
true
input array containing true labels.
pred
input array containing the predicted labels.
axis
the axis along which to compute the cross-entropy. If axis is ``-1``,
the cross-entropy will be computed along the last dimension. Default: ``-1``.
epsilon
a float in [0.0, 1.0] specifying the amount of smoothing when calculating
the loss. If epsilon is ``0``, no smoothing will be applied. Default: ``1e-7``.
out
optional output array, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
The cross-entropy loss between the given distributions
Examples
--------
>>> x = ivy.array([0, 0, 1, 0])
>>> y = ivy.array([0.25, 0.25, 0.25, 0.25])
>>> print(ivy.cross_entropy(x, y))
ivy.array(1.3862944)
>>> z = ivy.array([0.1, 0.1, 0.7, 0.1])
>>> print(ivy.cross_entropy(x, z))
ivy.array(0.35667497)
"""
pred = ivy.clip(pred, epsilon, 1 - epsilon)
log_pred = ivy.log(pred)
return ivy.negative(ivy.sum(log_pred * true, axis, out=out), out=out)
@to_native_arrays_and_back
def binary_cross_entropy(
true: Union[ivy.Array, ivy.NativeArray],
pred: Union[ivy.Array, ivy.NativeArray],
epsilon: Optional[float] = 1e-7,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Computes the binary cross entropy loss.
Parameters
----------
true
input array containing true labels.
pred
input array containing Predicted labels.
epsilon
a float in [0.0, 1.0] specifying the amount of smoothing when calculating the
loss. If epsilon is ``0``, no smoothing will be applied. Default: ``1e-7``.
out
optional output array, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
The binary cross entropy between the given distributions.
Functional Examples
-------------------
With :code:`ivy.Array` input:
>>> x = ivy.array([0, 1, 0, 0])
>>> y = ivy.array([0.2, 0.8, 0.3, 0.8])
>>> z = ivy.binary_cross_entropy(x, y)
>>> print(z)
ivy.array([0.2231, 0.2231, 0.3567, 1.6094])
>>> x = ivy.array([[0, 1, 0, 0]])
>>> y = ivy.array([[0.6, 0.2, 0.7, 0.3]])
>>> z = ivy.binary_cross_entropy(x, y, epsilon=1e-3)
>>> print(z)
ivy.array([[0.9163, 1.6094, 1.2040, 0.3567]])
With :code:`ivy.NativeArray` input:
>>> x = ivy.native_array([0, 1, 0, 1])
>>> y = ivy.native_array([0.2, 0.7, 0.2, 0.6])
>>> z = ivy.binary_cross_entropy(x, y)
>>> print(z)
ivy.array([0.2231, 0.3567, 0.2231, 0.5108])
With a mix of :code:`ivy.Array` and :code:`ivy.NativeArray` inputs:
>>> x = ivy.array([0, 0, 1, 1])
>>> y = ivy.native_array([0.1, 0.2, 0.8, 0.6])
>>> z = ivy.binary_cross_entropy(x, y)
>>> print(z)
ivy.array([0.1054, 0.2231, 0.2231, 0.5108])
With :code:`ivy.Container` input:
>>> x = ivy.Container(a=ivy.array([1, 0, 0]),
>>> b=ivy.array([0, 0, 1]))
>>> y = ivy.Container(a=ivy.array([0.6, 0.2, 0.3]),
>>> b=ivy.array([0.8, 0.2, 0.2]))
>>> z = ivy.binary_cross_entropy(x, y)
>>> print(z)
{
a: ivy.array([0.5108, 0.2231, 0.3567]),
b: ivy.array([1.6094, 0.2231, 1.6094])
}
With a mix of :code:`ivy.Array` and :code:`ivy.Container` inputs:
>>> x = ivy.array([1 , 1, 0])
>>> y = ivy.Container(a=ivy.array([0.7, 0.8, 0.2]))
>>> z = ivy.binary_cross_entropy(x, y)
>>> print(z)
{
a: ivy.array([0.3567, 0.2231, 0.2231])
}
Instance Method Examples
------------------------
Using :code:`ivy.Array` instance method:
>>> x = ivy.array([1, 0, 0, 0])
>>> y = ivy.array([0.8, 0.2, 0.2, 0.2])
>>> z = ivy.binary_cross_entropy(x, y)
>>> print(z)
ivy.array([0.2231, 0.2231, 0.2231, 0.2231])
"""
pred = ivy.clip(pred, epsilon, 1 - epsilon)
return ivy.negative(
ivy.add(ivy.log(pred) * true, ivy.log(1 - pred) * (1 - true), out=out), out=out
)
@to_native_arrays_and_back
def sparse_cross_entropy(
true: Union[ivy.Array, ivy.NativeArray],
pred: Union[ivy.Array, ivy.NativeArray],
axis: Optional[int] = -1,
epsilon: Optional[float] = 1e-7,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Computes sparse cross entropy between logits and labels.
Parameters
----------
true
input array containing the true labels as logits.
pred
input array containing the predicted labels as logits.
axis
the axis along which to compute the cross-entropy. If axis is ``-1``, the
cross-entropy will be computed along the last dimension. Default: ``-1``.
epsilon
a float in [0.0, 1.0] specifying the amount of smoothing when calculating the
loss. If epsilon is ``0``, no smoothing will be applied. Default: ``1e-7``.
out
optional output array, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
The sparse cross-entropy loss between the given distributions
Functional Examples
-------------------
With :code:`ivy.Array` input:
>>> x = ivy.array([2])
>>> y = ivy.array([0.1, 0.1, 0.7, 0.1])
>>> print(ivy.sparse_cross_entropy(x, y))
ivy.array([0.35667497 ])
>>> x = ivy.array([3])
>>> print(ivy.cross_entropy(x, y))
ivy.array([2.3025851 ])
>>> x = ivy.array([2,3])
>>> print(ivy.cross_entropy(x, y))
ivy.array([0.35667497, 2.3025851 ])
With :code:`ivy.NativeArray` input:
>>> x = ivy.native_array([4])
>>> y = ivy.native_array([0.1, 0.2, 0.1, 0.1, 0.5])
>>> print(ivy.sparse_cross_entropy(x, y))
ivy.array([0.693])
With :code:`ivy.Container` input:
>>> x = ivy.Container(a=ivy.array([4]))
>>> y = ivy.Container(a=ivy.array([0.1, 0.2, 0.1, 0.1, 0.5]))
>>> print(ivy.sparse_cross_entropy(x, y))
{
a: ivy.array([0.693])
}
With a mix of :code:`ivy.Array` and :code:`ivy.NativeArray` inputs:
>>> x = ivy.array([0])
>>> y = ivy.native_array([0.1, 0.2, 0.6, 0.1])
>>> print(ivy.sparse_cross_entropy(x,y))
ivy.array([2.3])
With a mix of :code:`ivy.Array` and :code:`ivy.Container` inputs:
>>> x = ivy.array([0])
>>> y = ivy.Container(a=ivy.array([0.1, 0.2, 0.6, 0.1]))
>>> print(ivy.sparse_cross_entropy(x,y))
{
a: ivy.array([2.3])
}
Instance Method Examples
------------------------
With :code:`ivy.Array` input:
>>> x = ivy.array([2])
>>> y = ivy.array([0.1, 0.1, 0.7, 0.1])
>>> print(x.sparse_cross_entropy(y))
ivy.array([0.357])
With :code:`ivy.Container` input:
>>> x = ivy.Container(a=ivy.array([2]))
>>> y = ivy.Container(a=ivy.array([0.1, 0.1, 0.7, 0.1]))
>>> print(x.sparse_cross_entropy(y))
{
a: ivy.array([0.357])
}
"""
true = ivy.one_hot(true, pred.shape[axis])
return cross_entropy(true, pred, axis, epsilon, out=out)
|
#!/usr/bin/python
"""
(C) Copyright 2020-2022 Intel Corporation.
SPDX-License-Identifier: BSD-2-Clause-Patent
"""
import time
import random
import threading
import copy
from osa_utils import OSAUtils
from daos_utils import DaosCommand
from dmg_utils import check_system_query_status
from exception_utils import CommandFailure
from test_utils_pool import TestPool
from test_utils_base import LabelGenerator
from apricot import skipForTicket
import queue
class OSAOfflineParallelTest(OSAUtils):
# pylint: disable=too-many-ancestors
"""
Test Class Description: This test runs
daos_server offline drain,reintegration,
extend test cases in parallel.
:avocado: recursive
"""
def setUp(self):
"""Set up for test case."""
super().setUp()
self.dmg_command = self.get_dmg_command()
self.daos_command = DaosCommand(self.bin)
self.ior_test_sequence = self.params.get("ior_test_sequence",
'/run/ior/iorflags/*')
# Start an additional server.
self.extra_servers = self.params.get("test_servers",
"/run/extra_servers/*")
self.test_oclass = self.params.get("oclass", '/run/test_obj_class/*')
self.out_queue = queue.Queue()
self.dmg_command.exit_status_exception = True
self.server_boot = None
def dmg_thread(self, action, action_args, results):
"""Generate different dmg command related to OSA.
Args:
action_args(dict) : {action: {"puuid":
pool[val].uuid,
"rank": rank,
"target": t_string,
"action": action,}
results (queue) : dmg command output queue.
"""
dmg = copy.copy(self.dmg_command)
try:
if action == "reintegrate":
text = "Waiting for rebuild to complete"
time.sleep(3)
self.print_and_assert_on_rebuild_failure(text)
# For each action, read the values from the
# dictionary.
# example {"exclude" : {"puuid": self.pool, "rank": rank
# "target": t_string, "action": exclude}}
# getattr is used to obtain the method in dmg object.
# eg: dmg -> pool_exclude method, then pass arguments like
# puuid, rank, target to the pool_exclude method.
if action == "exclude" and self.server_boot is True:
ranks = action_args[action][1]
getattr(dmg, "system stop --ranks={}".format(ranks))
output = "Stopping the rank : {}".format(ranks)
self.print_and_assert_on_rebuild_failure(output)
getattr(dmg, "system start --ranks={}".format(ranks))
self.print_and_assert_on_rebuild_failure(output)
else:
getattr(dmg, "pool_{}".format(action))(**action_args[action])
except CommandFailure as _error:
results.put("{} failed".format(action))
def run_offline_parallel_test(self, num_pool, data=False, oclass=None):
"""Run multiple OSA commands in parallel with or without data.
Args:
num_pool (int) : total pools to create for testing purposes.
data (bool) : whether pool has no data or to create
some data in pool. Defaults to False.
oclass (str) : Daos object class (RP_2G1,etc)
"""
# Create a pool
label_generator = LabelGenerator()
pool = {}
pool_uuid = []
target_list = []
if oclass is None:
oclass = self.ior_cmd.dfs_oclass.value
# Exclude target : random two targets (target idx : 0-7)
n = random.randint(0, 6) #nosec
target_list.append(n)
target_list.append(n+1)
t_string = "{},{}".format(target_list[0], target_list[1])
# Exclude rank 2.
rank = 2
test_seq = self.ior_test_sequence[0]
for val in range(0, num_pool):
pool[val] = TestPool(
context=self.context, dmg_command=self.get_dmg_command(),
label_generator=label_generator)
pool[val].get_params(self)
pool[val].create()
self.pool = pool[val]
pool_uuid.append(self.pool.uuid)
# Use only pool UUID while running the test.
self.pool.use_label = False
self.pool.set_property("reclaim", "disabled")
if data:
self.run_ior_thread("Write", oclass, test_seq)
if oclass != "S1":
self.run_mdtest_thread()
# if self.test_during_aggregation is set,
# Create another container and run the IOR
# command using the second container.
if self.test_during_aggregation is True:
self.run_ior_thread("Write", oclass, test_seq)
# Start the additional servers and extend the pool
self.log.info("Extra Servers = %s", self.extra_servers)
self.start_additional_servers(self.extra_servers)
# Give sometime for the additional server to come up.
for retry in range(0, 10):
scan_info = self.get_dmg_command().system_query()
if not check_system_query_status(scan_info):
if retry == 9:
self.fail("One or more servers not in expected status")
else:
break
# Exclude and reintegrate the pool_uuid, rank and targets
for val in range(0, num_pool):
self.pool = pool[val]
self.pool.display_pool_daos_space("Pool space: Beginning")
pver_begin = self.get_pool_version()
self.log.info("Pool Version at the beginning %s", pver_begin)
# If we need to trigger aggregation on pool 1, delete
# the second container which has IOR data.
if self.test_during_aggregation is True and val == 0:
self.delete_extra_container(self.pool)
# Create the threads here
threads = []
# Action dictionary with OSA dmg command parameters
action_args = {
"drain": {"pool": self.pool.uuid, "rank": rank,
"tgt_idx": None},
"exclude": {"pool": self.pool.uuid, "rank": (rank + 1),
"tgt_idx": t_string},
"reintegrate": {"pool": self.pool.uuid, "rank": (rank + 1),
"tgt_idx": t_string},
"extend": {"pool": self.pool.uuid, "ranks": (rank + 2),
"scm_size": self.pool.scm_size,
"nvme_size": self.pool.nvme_size}
}
for action in sorted(action_args):
# Add a dmg thread
process = threading.Thread(target=self.dmg_thread,
kwargs={"action": action,
"action_args":
action_args,
"results":
self.out_queue})
process.start()
threads.append(process)
# Wait to finish the threads
for thrd in threads:
thrd.join()
time.sleep(5)
# Check the queue for any failure.
tmp_list = list(self.out_queue.queue)
for failure in tmp_list:
if "FAIL" in failure:
self.fail("Test failed : {0}".format(failure))
for val in range(0, num_pool):
self.pool = pool[val]
display_string = "Pool{} space at the End".format(val)
self.pool.display_pool_daos_space(display_string)
self.is_rebuild_done(3)
self.assert_on_rebuild_failure()
pver_end = self.get_pool_version()
self.log.info("Pool Version at the End %s", pver_end)
if self.server_boot is True:
self.assertTrue(pver_end >= 17,
"Pool Version Error: at the end")
else:
self.assertTrue(pver_end >= 25,
"Pool Version Error: at the end")
# Finally run IOR to read the data and perform daos_container_check
for val in range(0, num_pool):
self.pool = pool[val]
if data:
self.run_ior_thread("Read", oclass, test_seq)
if oclass != "S1":
self.run_mdtest_thread()
self.container = self.pool_cont_dict[self.pool][0]
kwargs = {"pool": self.pool.uuid,
"cont": self.container.uuid}
output = self.daos_command.container_check(**kwargs)
self.log.info(output)
def test_osa_offline_parallel_test(self):
"""
JIRA ID: DAOS-4752
Test Description: Runs multiple OSA commands in parallel.
:avocado: tags=all,daily_regression
:avocado: tags=hw,medium,ib2
:avocado: tags=osa,checksum
:avocado: tags=offline_parallel,offline_parallel_basic_test
"""
self.log.info("Offline Parallel Test: Basic Test")
self.run_offline_parallel_test(1, data=True)
def test_osa_offline_parallel_test_without_csum(self):
"""
JIRA ID: DAOS-7161
Test Description: Runs multiple OSA commands in parallel
without enabling checksum.
:avocado: tags=all,full_regression
:avocado: tags=hw,medium,ib2
:avocado: tags=osa
:avocado: tags=offline_parallel,offline_parallel_without_csum
"""
self.test_with_checksum = self.params.get("test_with_checksum",
'/run/checksum/*')
self.log.info("Offline Parallel Test: Without Checksum")
self.run_offline_parallel_test(1, data=True)
def test_osa_offline_parallel_test_rank_boot(self):
"""
JIRA ID: DAOS-7161
Test Description: Runs multiple OSA commands in parallel
with a rank rebooted using system stop/start.
:avocado: tags=all,full_regression
:avocado: tags=hw,medium,ib2
:avocado: tags=osa
:avocado: tags=offline_parallel,offline_parallel_srv_rank_boot
"""
self.test_with_checksum = self.params.get("test_with_checksum",
'/run/checksum/*')
self.server_boot = self.params.get("flags",
'/run/system_stop_start/*')
self.log.info("Offline Parallel Test: Restart a rank")
self.run_offline_parallel_test(1, data=True)
@skipForTicket("DAOS-7195,DAOS-7247")
def test_osa_offline_parallel_test_with_aggregation(self):
"""
JIRA ID: DAOS-7161
Test Description: Runs multiple OSA commands in parallel
with aggregation turned on.
:avocado: tags=all,full_regression
:avocado: tags=hw,medium,ib2
:avocado: tags=osa
:avocado: tags=offline_parallel,offline_parallel_with_aggregation
"""
self.test_during_aggregation = self.params.get("test_with_aggregation",
'/run/aggregation/*')
self.log.info("Offline Parallel Test : Aggregation")
self.run_offline_parallel_test(1, data=True)
def test_osa_offline_parallel_test_oclass(self):
"""
JIRA ID: DAOS-7161
Test Description: Runs multiple OSA commands in parallel
with different object class.
:avocado: tags=all,full_regression
:avocado: tags=hw,medium,ib2
:avocado: tags=osa
:avocado: tags=offline_parallel,offline_parallel_oclass
"""
self.log.info("Offline Parallel Test : OClass")
# Presently, the script is limited and supports only one extra
# object class testing. We are testing S1 apart from RP_2G1.
self.run_offline_parallel_test(1, data=True,
oclass=self.test_oclass[0])
|
# Copyright International Business Machines Corp, 2020
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import sys
import json
import os
from os import path
import socket
import fnmatch
from nextgen_rc_config import NextGenTemplate, NextGenConfig, GetNextGenConfigs
class RCInstance:
def __init__(self):
self.machineId = ""
self.name = ""
self.result = ""
self.status = ""
self.privateIpAddress = ""
self.launchtime = 0
self.message = ""
self.reqId = ""
self.retId = ""
self.template = ""
self.rcAccount = ""
def copy(self, inst):
self.machineId = inst.machineId
self.name = inst.name
self.result = inst.result
self.status = inst.status
self.privateIpAddress = inst.privateIpAddress
self.launchtime = inst.launchtime
self.message = inst.message
self.reqId = inst.reqId
self.retId = inst.retId
self.template = inst.template
self.rcAccount = inst.rcAccount
def populate(self, data):
if 'machineId' in data:
self.machineId = data['machineId']
if 'name' in data:
self.name = data['name']
if 'result' in data:
self.result = data['result']
if 'status' in data:
self.status = data['status']
if 'privateIpAddress' in data:
self.privateIpAddress = data['privateIpAddress']
if 'launchtime' in data:
self.launchtime = data['launchtime']
if 'message' in data:
self.message = data['message']
if 'reqId' in data:
self.reqId = data['reqId']
if 'retId' in data:
self.retId = data['retId']
if 'template' in data:
self.template = data['template']
if 'rcAccount' in data:
self.rcAccount = data['rcAccount']
@property
def machineId(self):
return self._machineId
@machineId.setter
def machineId(self, value):
self._machineId = value
@property
def name(self):
return self._name
@name.setter
def name(self, value):
self._name = value
@property
def result(self):
return self._result
@result.setter
def result(self, value):
self._result = value
@property
def status(self):
return self._status
@status.setter
def status(self, value):
self._status = value
@property
def privateIpAddress(self):
return self._privateIpAddress
@privateIpAddress.setter
def privateIpAddress(self, value):
self._privateIpAddress = value
@property
def launchtime(self):
return self._launchtime
@launchtime.setter
def launchtime(self, value):
self._launchtime = value
@property
def message(self):
return self._message
@message.setter
def message(self, value):
self._message = value
@property
def reqId(self):
return self._reqId
@reqId.setter
def reqId(self, value):
self._reqId = value
@property
def retId(self):
return self._retId
@retId.setter
def retId(self, value):
self._retId = value
@property
def template(self):
return self._template
@template.setter
def template(self, value):
self._template = value
@property
def rcAccount(self):
return self._rcAccount
@rcAccount.setter
def rcAccount(self, value):
self._rcAccount = value
class RcInOut:
def __init__(self, dirname=""):
self.dirname = dirname
providerName = os.environ["PROVIDER_NAME"]
if providerName is None or len(providerName) == 0:
providerName = 'ibmcloudgen2'
self.statusFile = providerName + "-db.json"
def getVmList(self, instanceList, reqId, retId, templateId):
data = []
for instance in instanceList:
data.append({
'name': instance.name,
'machineId': instance.machineId,
'result': instance.result,
'status': instance.status,
'privateIpAddress': instance.privateIpAddress,
'launchtime': instance.launchtime,
'message': instance.message,
'reqId': reqId if len(reqId) != 0 else instance.reqId,
'retId': retId if len(retId) != 0 else instance.retId,
'template': instance.template if len(instance.template) != 0 else templateId,
'rcAccount': instance.rcAccount,
})
return data
def getDictFromVmList(self, requestId, instanceList, templateId, retId):
data = {}
data['requestId'] = requestId
data['templateId'] = templateId
data['machines'] = self.getVmList(instanceList, requestId, retId, templateId)
return data
def getFullPath(self, filename):
if self.dirname == "":
return filename
else:
return (self.dirname + "/" + self.statusFile)
def dumpVmListToFile(self, requestId, rcInstanceList, templateId, retId):
full_path = self.getFullPath(requestId)
requestsObj = {}
requestList = []
data = self.getDictFromVmList(requestId, rcInstanceList, templateId, retId)
if not os.path.exists(full_path):
requestList.append(data)
requestsObj['requests'] = requestList
with open(full_path, 'w+') as outfile:
json.dump(requestsObj, outfile, indent=2)
outfile.close()
else:
isUpdate = False
fp = open(full_path, "r")
if fp.mode == 'r':
contents = fp.read()
theJson = json.loads(contents)
if "requests" in theJson:
for req in theJson['requests']:
if requestId == req['requestId']:
isUpdate = True
req['machines'] = self.getVmList(rcInstanceList, requestId, retId, templateId)
fp.close()
if not isUpdate:
if "requests" in theJson:
reqList = theJson['requests']
reqList.append(data)
else:
requestList.append(data)
theJson['requests'] = requestList
fp = open(full_path, "w")
json.dump(theJson, fp, indent=2)
fp.close()
def updateVmListToFile(self, requestId, rcInstanceList, retId):
full_path = self.getFullPath(requestId)
requestsObj = {}
requestList = []
machineIds = []
tmpInstList = []
for vm in rcInstanceList:
machineIds.append(vm.machineId)
if not os.path.exists(full_path):
logging.info("Error: "+ full_path + "do not exist")
return
else:
fp = open(full_path, "r")
if fp.mode == 'r':
contents = fp.read()
theJson = json.loads(contents)
if "requests" in theJson:
for req in theJson['requests']:
if requestId == req['requestId']:
if "machines" in req:
for vm in req['machines']:
machineId = vm['machineId']
machineName = vm['name']
if machineId not in machineIds:
tmpInstList.append(vm)
templateId = req['templateId']
rcInstList = self.getVmList(rcInstanceList, requestId, retId, templateId)
if tmpInstList:
rcInstList.extend(tmpInstList)
req['machines'] = rcInstList
fp.close()
fp = open(full_path, "w")
json.dump(theJson, fp, indent=2)
fp.close()
def getVmListFromFile(self, reqId):
full_path = self.getFullPath(reqId)
instanceList = []
templateId = ""
requestId = ""
if not os.path.exists(full_path):
return templateId, instanceList, requestId
fp = open(full_path, "r")
if fp.mode == 'r':
contents = fp.read()
theJson = json.loads(contents)
if "requests" in theJson:
for req in theJson['requests']:
requestId = req['requestId']
if fnmatch.fnmatch(requestId, reqId):
if "machines" in req:
for vm in req['machines']:
rcInstance = RCInstance()
rcInstance.populate(vm)
instanceList.append(rcInstance)
if "templateId" in req:
templateId = req['templateId']
break
fp.close()
return templateId, instanceList, requestId
def readAllRequests(self):
full_path = self.getFullPath("")
data = {}
if not os.path.exists(full_path):
return data
fp = open(full_path, "r")
if fp.mode == 'r':
contents = fp.read()
data = json.loads(contents)
fp.close()
return data
def writeAllRequests(self, data):
full_path = self.getFullPath("")
fp = open(full_path, "w")
json.dump(data, fp, indent=2)
fp.close()
def getVmListFromJson(self, filename):
full_path = self.getFullPath(filename)
instanceList = []
fp = open(full_path, "r")
if fp.mode == 'r':
contents = fp.read()
theJson = json.loads(contents)
if "machines" in theJson:
for vm in theJson['machines']:
rcInstance = RCInstance()
rcInstance.populate(vm)
instanceList.append(rcInstance)
fp.close()
return instanceList
def getMultiVmListFromFile(self, retId):
full_path = self.getFullPath(retId)
logging.info("looking up retId " + retId + "from db " + full_path)
instanceList = []
fp = open(full_path, "r")
if fp.mode == 'r':
contents = fp.read()
theJson = json.loads(contents)
if "requests" in theJson:
for req in theJson['requests']:
templateId = ""
if "machines" in req:
for vm in req['machines']:
if "retId" in vm:
if retId == vm['retId']:
rcInstance = RCInstance()
rcInstance.populate(vm)
instanceList.append(rcInstance)
fp.close()
return instanceList
def GetLocalHostnameAndIp():
host_name = ""
try:
host_name = socket.gethostname()
except:
logging.error("Fail to get local hostname")
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
host_ip = ""
try:
# doesn't even have to be reachable
s.connect(('10.255.255.255', 1))
host_ip = s.getsockname()[0]
except Exception:
logging.error("Fail to get my local ip")
finally:
s.close()
return host_name, host_ip
|
from terial import models
from terial.database import session_scope
from terial.materials import loader
def main():
with session_scope() as sess:
materials = (sess.query(models.Material)
.filter_by(type=models.MaterialType.MDL)
.all())
for material in materials:
bmat = loader.material_to_brender(material)
print(bmat)
del bmat
if __name__ == '__main__':
main()
|
import os
import sys
from os3.fs.directory import DirList
from os3.fs.entry import get_path
from os3.fs.utils import get_node
from os3.utils.decorators import withrepr
@withrepr(lambda x: DirList().print_format())
def ls(path='', depth=None, **kwargs):
return DirList(path).ls(depth, **kwargs)
ls.filter = DirList().filter
def symlink(source, link_name):
return get_node(source).symlink(link_name)
def mkdir(path, mode=511, exists_ok=False):
if sys.version_info >= (3,0):
return os.makedirs(get_path(path), mode, exists_ok)
return os.makedirs(get_path(path), mode)
def cp(src, dst, symlinks=False, ignore=None):
return get_node(src).copy(dst, symlinks, ignore)
|
#This program will calculate a persons BMI using figures provided
#The inputs are the person's height in centimetres and weight in kilograms.
#The output is their weight divided by their height in metres squared.
weight = float(input("Enter your weight in KG; "))
height = float(input("Enter your height in meters; "))
heightsquared = (height * height)
bmi = weight / heightsquared
bmi2 = (round(bmi, 2))
print("Your BMI is", bmi2, "based on the figures you have provided")
|
import bz2
import os
import pickle
from faker.providers import BaseProvider
with bz2.open(os.path.join(os.path.dirname(__file__), "substances.bz2"), "rb") as f:
SUBSTANCES = pickle.load(f)
class SubstanceFaker(BaseProvider):
substances = SUBSTANCES
def sid(self):
return f"DTXSID{self.random_int(2000000, 3000000)}"
def casrn(self):
return self.random_element(self.substances)
|
"""
Input/output functions for the word_graphs API, including
functions for storing and retrieving word graphs and subgraphs.
Functions:
get_integer, get_word_graph_filename, get_word_subgraph_filename,
store_word_graph, retrieve_word_graph, retrieve_word_graph_statistics,
store_word_subgraphs, retrieve_word_subgraphs,
retrieve_subgraph_statistics, store_subgraph_statistics,
store_external_paths
"""
import os
import re
from itertools import chain
from word_explorer.objects import Word
from word_explorer.io import store_data, retrieve_data
OUTPUT_FOLDER = "word_graphs"
def get_integer(string):
integers = re.findall(r"\d+", string)
if not integers:
raise NotImplementedError
else:
return int(integers[-1])
def add_output_folder(file_name):
if not file_name.startswith(OUTPUT_FOLDER + "\\"):
file_name = os.path.join(OUTPUT_FOLDER, file_name)
return file_name
def get_word_graph_filename(ascending_order, size, name_base, name_suffix=""):
file_name = name_base + str(size) + "_" + name_suffix + ".txt"
if ascending_order:
file_name = "ao" + file_name
return file_name
def get_word_subgraph_filename(ascending_order, size, subgraph_type,
name_base="word_graph_size", name_suffix="",
sorted_=False):
file_name = get_word_graph_filename(
ascending_order, size, name_base, name_suffix)[:-4]
file_name += "_" + subgraph_type
if not subgraph_type.endswith("s"):
file_name += "s"
file_name = file_name + "_sorted" if sorted_ else file_name
return file_name + ".txt"
def store_word_graph(word_graph, file_name=None):
from .word_graphs import WordGraph, expand_word_graph # For circular import
if file_name is None:
file_name = word_graph.file_name
file_name = add_output_folder(file_name)
if isinstance(word_graph, WordGraph):
vertex_count = word_graph.vertex_count
edge_count = word_graph.edge_count
words = list(word_graph.directed_neighborhoods.keys())
else:
vertex_count = len(expand_word_graph(word_graph))
edge_count = sum(len(neighors) for neighors in word_graph.values())
words = list(word_graph)
word_graph_data = ["Vertex count: " + str(vertex_count),
"Edge count: " + str(edge_count) + "\n\n"]
words.sort()
for word in words:
if (isinstance(word_graph, WordGraph)
and word_graph.directed_neighborhoods[word]):
neighborhood = word + ": " + str(
word_graph.directed_neighborhoods[word]).replace("\'", "")
elif type(word_graph) == dict and word_graph[word]:
neighborhood = word + ": " + str(word_graph[word]).replace("\'", "")
word_graph_data.append(neighborhood)
store_data(word_graph_data, file_name)
def retrieve_word_graph(ascending_order, size,
name_base="word_graph_size", name_suffix=""):
file_name = add_output_folder(get_word_graph_filename(
ascending_order, size, name_base, name_suffix))
word_graph = {}
for i, line in enumerate(retrieve_data(file_name)):
if i >= 4:
colon_index = line.find(":")
word_graph[Word(line[:colon_index])] = {
Word(word) for word
in line.strip()[colon_index+3:-2].split(", ")}
return word_graph
def retrieve_word_graph_statistics(ascending_order, size,
name_base="word_graph_size", name_suffix=""):
file_name = add_output_folder(get_word_graph_filename(
ascending_order, size, name_base, name_suffix))
for line in retrieve_data(file_name):
if line.startswith("Vertex count"):
vertex_count = get_integer(line)
elif line.startswith("Edge count"):
edge_count = get_integer(line)
return {"vertices": vertex_count, "edges": edge_count}
def store_word_subgraphs(subgraphs, subgraph_type, ascending_order,
size, name_base="word_graph_size", name_suffix=""):
sorted_ = True if type(subgraphs) == dict else False
subgraph_count = (len(subgraphs) if type(subgraphs) == list
else sum(len(subgraph_list) for subgraph_list
in subgraphs.values()))
subgraph_data = [subgraph_type.title() + " subgraph count: "
+ str(subgraph_count) + "\n"]
if (subgraph_type in ["triangle", "3-path", "4-path", "square"] and not sorted_):
subgraph_data += subgraphs
elif sorted_:
for subgraph_class, subgraph_list in subgraphs.items():
subgraph_data.append(str(subgraph_class) + " " + subgraph_type
+ " subgraph count: " + str(len(subgraph_list)))
subgraph_data.append("\n\n")
for subgraph_class, subgraph_list in subgraphs.items():
subgraph_data.append(str(subgraph_class) + ":\n")
subgraph_data += subgraph_list
subgraph_data.append("\n\n")
subgraph_file_name = add_output_folder(get_word_subgraph_filename(
ascending_order, size, subgraph_type,
name_base, name_suffix, sorted_))
store_data(subgraph_data, subgraph_file_name)
def retrieve_word_subgraphs(ascending_order, size, subgraph_type,
name_base="word_graph_size", name_suffix="",
sorted_=False):
subgraph_file_name = add_output_folder(get_word_subgraph_filename(
ascending_order, size, subgraph_type,
name_base, name_suffix, sorted_))
if sorted_:
subgraphs = {}
else:
subgraphs = []
for line in retrieve_data(subgraph_file_name):
line = line.strip()
if sorted_ and line.endswith(":"):
if line.startswith("((") or line.startswith("[("):
# Assumes it's a 'directed structure'
subgraph_class = tuple((int(edge[0]), int(edge[3]))
for edge in line[2:-3].split("), ("))
subgraphs[subgraph_class] = []
else:
subgraph_class = line[:-1]
if (line.startswith("(") or line.startswith("[")):
if sorted_:
square = tuple(Word(word, ascending_order=ascending_order,
optimize=ascending_order)
for word in line[2:-2].split("', '"))
subgraphs[subgraph_class].append(square)
else:
subgraph = tuple(Word(word, ascending_order=ascending_order,
optimize=ascending_order)
for word in line[2:-2].split("', '"))
subgraphs.append(subgraph)
return subgraphs
def retrieve_subgraph_statistics(ascending_order, sizes,
name_base="word_graph_size", name_suffix="",
sorted_squares=True):
from .subgraphs import SUBGRAPH_TYPES # For circular import
statistics = {}
for size in sizes:
statistics[size] = retrieve_word_graph_statistics(
ascending_order, size, name_base, name_suffix)
for subgraph_type in SUBGRAPH_TYPES:
sorted_ = True if subgraph_type == "square" and sorted_squares else False
file_name = add_output_folder(get_word_subgraph_filename(
ascending_order, size, subgraph_type,
name_base, name_suffix, sorted_))
statistics[size][subgraph_type] = {}
try:
for line in retrieve_data(file_name):
if "count" in line:
if line.startswith("((") or line.startswith("[("):
subgraph_class = line[:line.find("))")+2]
statistics[size][subgraph_type][subgraph_class] = (
get_integer(line))
else:
statistics[size][subgraph_type]["total"] = (
get_integer(line))
except FileNotFoundError:
pass
return statistics
def store_subgraph_statistics(subgraph_statistics, ascending_order,
name_base="word_graph", name_suffix=""):
file_name = name_base + "_" + name_suffix + "_subgraph_statistics.txt"
file_name = add_output_folder(file_name)
title = ((name_base + "_" + name_suffix).replace("_", " ").title()
+ " Subgraph Statistics")
if ascending_order:
"Ascending Order " + title
subgraph_data = ["\n" + title]
for size in subgraph_statistics:
subgraph_data.append("\n\n---------------- Words of size <= "
+ str(size) + " ----------------")
subgraph_data.append(
"\nVertices: " + str(subgraph_statistics[size]["vertices"]))
subgraph_data.append(
"Edges: " + str(subgraph_statistics[size]["edges"]))
for subgraph_type, statistics_dict in subgraph_statistics[size].items():
if isinstance(statistics_dict, dict):
for subgraph_class, value in statistics_dict.items():
if subgraph_class == "total":
subgraph_data.append("\n" + subgraph_type.title()
+ "s: " + str(value))
else:
subgraph_data.append(subgraph_class.title() + " "
+ subgraph_type + "s: " + str(value))
store_data(subgraph_data, file_name)
def store_external_paths(external_paths_container, ascending_order, name_base):
file_name = name_base + "_minimal_external_paths.txt"
if ascending_order:
file_name = "ao" + file_name
file_name = add_output_folder(file_name)
external_path_data = []
for size, minimal_external_paths in external_paths_container.items():
identifier = get_word_graph_filename(ascending_order, size, name_base)
external_path_data.append("\n\n" + identifier + "\n")
for vertex1 in paths_by_vertex in minimal_external_paths.items():
for vertex2, paths in paths_by_vertex.items():
external_path_data.append("\n" + vertex1 + " --> " + vertex2 + ":")
for path in paths:
external_path_data.append(path)
store_data(external_path_data, file_name)
|
def solution(N):
return len(max(format(N,'b').strip('0').split('1')))
if __name__ =="__main__":
X= solution(20)
print (X)
|
"""
Quantiphyse - Base class for logging
Copyright (c) 2013-2020 University of Oxford
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging
logging.basicConfig(format='%(asctime)s %(levelname)s %(name)s %(message)s', datefmt='%H:%M:%S')
def set_base_log_level(level):
"""
Set the base logging level
"""
logging.getLogger("quantiphyse").setLevel(level)
class LogSource(object):
"""
Base class for anything which wants to log messages
"""
def __init__(self):
logname = "%s.%s" % (self.__module__, self.__class__.__name__)
if not logname.startswith("quantiphyse."):
# Plugins do not come under the quantiphyse namespace but we want them
# to be ancestors of the generic logger
logname = "quantiphyse." + logname
self.logger = logging.getLogger(logname)
def debug(self, *args, **kwargs):
"""
Log a debug level message
"""
self.logger.debug(*args, **kwargs)
def warn(self, *args, **kwargs):
"""
Log a warning
"""
self.logger.warn(*args, **kwargs)
def debug_enabled(self):
return self.logger.getEffectiveLevel() <= logging.DEBUG
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.