content
stringlengths 5
1.05M
|
|---|
from dimagi.ext import jsonobject
from dimagi.utils.logging import notify_exception
from soil.progress import STATES, get_task_status
from soil.util import get_task
class TaskStatus(jsonobject.StrictJsonObject):
# takes on values of soil.progress.STATES
state = jsonobject.IntegerProperty()
progress = jsonobject.ObjectProperty(lambda: TaskStatusProgress)
result = jsonobject.ObjectProperty(lambda: TaskStatusResult)
def is_finished(self):
return self.state in (STATES.success, STATES.failed)
class TaskStatusProgress(jsonobject.StrictJsonObject):
percent = jsonobject.IntegerProperty()
class TaskStatusResult(jsonobject.StrictJsonObject):
match_count = jsonobject.IntegerProperty()
created_count = jsonobject.IntegerProperty()
num_chunks = jsonobject.IntegerProperty()
errors = jsonobject.ListProperty(lambda: TaskStatusResultError)
class TaskStatusResultError(jsonobject.StrictJsonObject):
title = jsonobject.StringProperty()
description = jsonobject.StringProperty()
column = jsonobject.StringProperty()
# usually an int, but field has been hijacked to include other debug info
# search 'row_number=' in tasks.py
# longer-term solution would be to have another field for debug info
rows = jsonobject.ListProperty()
def normalize_task_status_result(result):
if result:
return TaskStatusResult(
match_count=result['match_count'],
created_count=result['created_count'],
num_chunks=result['num_chunks'],
errors=normalize_task_status_result_errors(result),
)
else:
return None
def normalize_task_status_result_errors(result):
"""
result is the return value of do_import
it is important that when changes are made to the return value of do_import
this function remains backwards compatible,
i.e. compatible with old return values of do_import,
because those values are saved directly in the database,
and we need to be able to process them in the future
"""
result_errors = []
for _, columns_to_error_value in result['errors'].items():
for column_name, error_value in columns_to_error_value.items():
result_errors.append(TaskStatusResultError(
title=str(error_value['error']),
description=str(error_value['description']),
column=column_name,
rows=error_value['rows']
))
return result_errors
def get_task_status_json(task_id):
try:
task_status = get_task_status(get_task(task_id))
except Exception:
# There was a period of time where the format of metadata we were setting
# from the task would cause a celery-internal failure
notify_exception(None, "Error fetching task")
return TaskStatus(
state=STATES.failed,
progress=None,
result=TaskStatusResult(errors=[TaskStatusResultError(description='Unknown Failure')]),
)
if task_status.state == STATES.failed:
errors = (
task_status.error if isinstance(task_status.error, (list, tuple))
else [task_status.error]
)
return TaskStatus(
state=task_status.state,
progress=TaskStatusProgress(
percent=task_status.progress.percent,
),
result=TaskStatusResult(errors=[TaskStatusResultError(description=error)
for error in errors]),
)
else:
return TaskStatus(
state=task_status.state,
progress=TaskStatusProgress(
percent=task_status.progress.percent,
),
result=normalize_task_status_result(task_status.result),
)
def make_task_status_success(result):
return TaskStatus(
state=STATES.success,
progress=TaskStatusProgress(
percent=0,
),
result=normalize_task_status_result(result),
)
|
from typing import Callable, Dict
import json
from decimal import Decimal
from collections import namedtuple
import urllib.request
import urllib.parse
import asyncio
import websockets
# Define urls needed for order book streams
URL_ORDER_BOOK = "https://api.binance.com/api/v1/depth"
URL_ORDER_BOOK_PERP = "https://dapi.binance.com/dapi/v1/depth"
# OrderBook object
OrderBook = namedtuple("OrderBook", "bids asks")
def on_order_book(symbol: str, limit: int = 1000) -> OrderBook:
"""Get the full order book of a pair on Binance through public endpoint.
Parameters
----------
symbol: str
Pair without underscore in between base/quote coin, e.g: BTCETH, ETHBRL.
limit: int
Maximum depth for the order book, which should be 1000 by default.
Returns
-------
order_book: OrderBook
"""
bids, asks = list(), list()
attributes = {"symbol": symbol, "limit": limit}
if '_' in symbol:
url = f'{URL_ORDER_BOOK_PERP}?{urllib.parse.urlencode(attributes)}'
else:
url = f'{URL_ORDER_BOOK}?{urllib.parse.urlencode(attributes)}'
try:
req = urllib.request.Request(url, method='GET')
resp = urllib.request.urlopen(req)
res = json.loads(resp.read())
except urllib.error.HTTPError as e:
raise Exception(f'{e.read()}')
for bid in res['bids']:
bids.append((Decimal(bid[0]), Decimal(bid[1])))
for ask in res['asks']:
asks.append((Decimal(ask[0]), Decimal(ask[1])))
order_book = OrderBook(bids, asks)
return order_book
class OrderBookStream:
def __init__(self):
"""Handles order book updates every 1000ms by creating asynchronous
tasks for each streaming object. Map objects to keep track of each
streaming object with the symbol as key.
"""
self._order_books = dict()
self._order_books_perp = dict()
self._sockets = set()
self._tasks = dict()
def get_order_book(self, symbol: str) -> Dict:
return self._order_books[symbol]
def get_order_book_perp(self, symbol: str) -> Dict:
return self._order_books_perp[symbol]
def update_order_book(self, symbol: str, updates: Dict):
"""With incoming stream updates for the order book, update this object
by each depth. Overwrites existing depth level price with update's
quantity or remove the depth level with no remaining quantity.
Parameters
----------
symbol: str
Pair without underscore in between base/quote coin, e.g: BTCETH,
ETHBRL.
updates: Dict
Dict containing order book updates.
"""
# TODO: easiest way to differentiate spots from perpetuals
if '_' in symbol:
book = self._order_books_perp[symbol]
else:
book = self._order_books[symbol]
# initialize the order book.
if len(book['asks']) == 0 and len(book['bids']) == 0:
order_book = on_order_book(symbol)
for (p, q) in order_book.asks:
book['asks'][p] = q
for (p, q) in order_book.bids:
book['bids'][p] = q
# update order book with incoming updates across each depth.
asks, bids = updates['a'], updates['b']
for ask in asks:
p, q = Decimal(ask[0]), Decimal(ask[1])
if q > 0:
book['asks'][p] = q
elif p in book['asks']:
del book['asks'][p]
for bid in bids:
p, q = Decimal(bid[0]), Decimal(bid[1])
if q > 0:
book['bids'][p] = q
elif p in book['bids']:
del book['bids'][p]
def open_stream_order_book(self, symbol: str, callback: Callable):
"""Open order book stream for the given pair, provides callback function
for the asynchronous task for streaming order book object which is used
to process the updated depth levels of the order book.
From Binance's API doc:
The data in each event is the absolute quantity for a price level.
If the quantity is 0, remove the price level. Receiving an event
that removes a price level that is not in your local order book can
happen and is normal.
Parameters
----------
symbol: str
Pair without underscore in between base/quote coin, e.g: BTCETH,
ETHBRL.
callback: Callable
Callback to handle the processing of the stream data.
"""
self._order_books[symbol] = {'bids': {}, 'asks': {}}
url = f'wss://stream.binance.com:9443/ws/{symbol.lower()}@depth'
asyncio.Task(
self.run(url=url, id=f'depth_{symbol.lower()}', callback=callback))
def open_stream_order_book_perp(self, symbol: str, callback: Callable):
"""For perpetual only, open order book stream for the given pair,
provides callback function for the asynchronous task for streaming order
book object which is used to process the updated depth levels of the
order book.
From Binance's API doc:
The data in each event is the absolute quantity for a price level.
If the quantity is 0, remove the price level. Receiving an event
that removes a price level that is not in your local order book can
happen and is normal.
Parameters
----------
symbol: str
Pair without underscore in between base/quote coin, e.g: BTCETH,
ETHBRL.
callback: Callable
Callback to handle the processing of the stream data.
"""
self._order_books_perp[symbol] = {'bids': {}, 'asks': {}}
url = f'wss://dstream.binance.com/stream?streams={symbol.lower()}@depth'
asyncio.Task(self.run(url=url, id=f'depth_perp_{symbol.lower()}',
callback=callback))
async def run(self, url: str, id: str, callback: Callable):
"""Responsible for opening a stream for a given object, such as order
book. Able to handle multiple streams and update the stored objects
from incoming updates. Once the stored objects have been updated, the
provided callback for the stream object is used for process/handle the
updated object.
Parameters
----------
url: str
URL.
id: str
Identifier for the object, which should refer to v1 url.
callback: Callable
Callback to handle the processing of the stream data.
"""
# keeping track of streams to avoid duplicates.
if id in self._sockets:
print(f'Warning: socket {id} already opened!')
return
print(f'Starting stream: {url}')
# keep track of opened sockets and async tasks.
# process the updates based on stream object's ID.
async with websockets.connect(url) as socket:
self._sockets.add(id)
while id in self._sockets:
recv_task = asyncio.Task(socket.recv())
self._tasks[id] = recv_task
data = await recv_task
data = json.loads(data)
del self._tasks[id]
if id.find('depth') == 0:
if id.find('depth_perp') == 0:
data = data['data']
symbol = data['s']
self.update_order_book(symbol=symbol, updates=data)
callback(data)
await(asyncio.sleep(.1))
def close(self):
"""Close all streams upon keyboard interruption for all async tasks used
for streaming, also doable by setting a timer for the duration of the
streams.
"""
print('closing all streams')
for key in self._tasks:
self._tasks[key].cancel()
self._sockets.clear()
print('closed all streams')
|
import json
import os
import random
import time
import requests
from bs4 import BeautifulSoup
from firebase import init_firebase, updateData
base_url = 'https://movie.naver.com/movie/point/af/list.nhn?&page={}'
init_firebase()
def main():
while True:
for idx in range(1, 1001):
print(f'Page #{idx}')
url = base_url.format(idx)
response = requests.get(url)
if response.status_code == 200:
soup = BeautifulSoup(response.text, 'html.parser')
trs = soup.select('table.list_netizen tbody tr')
for tr in trs:
# 평점 번호
id = tr.find(class_='ac num').text
title = tr.find(class_='title')
# 감상평
document = title.select_one('br').next_sibling.strip()
# 평점
score = title.select_one('div.list_netizen_score em').text
# 이미 해당 id 를 가진 리뷰를 크롤링 했으면 종료한다.
if os.path.exists(f'./jsons/{id}.json'):
print(f'{id} is already crawled.')
continue
with open(f'./jsons/{id}.json', 'w', encoding='utf-8') as f:
print(f'{id} has been successfully crawled.')
data = {'document': document, 'score': int(score)}
updateData({ id: data })
json.dump(data, f, ensure_ascii=False, indent=2)
# 1 ~ 10 초 사이 랜덤한 시간 만큼 멈춘다 -> 너무 빠르게 하면 block 가능성이 있습니다.
time.sleep(random.uniform(1.0, 10.0))
else:
raise NotImplementedError
# 1시간 정도 기다리기 ( 데이터는 계속 모이기 때문 )
time.sleep(3600)
pass
if __name__ == '__main__':
main()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from PythonMiddleware.graphene import Graphene
from PythonMiddleware.instance import set_shared_graphene_instance
from pprint import pprint
import time
#nodeAddress = "wss://api.cocosbcx.net"
nodeAddress = "ws://test.cocosbcx.net"
#nodeAddress = "ws://127.0.0.1:8049"
gph = Graphene(node=nodeAddress)
set_shared_graphene_instance(gph)
while True:
print('>> info')
pprint(gph.info())
time.sleep(2)
|
from django.shortcuts import render, get_object_or_404
from django.http import HttpResponse, JsonResponse, HttpResponseRedirect
from django.urls import reverse
from django.contrib.auth.decorators import login_required
from rest_framework.parsers import JSONParser
from rest_framework.decorators import api_view
from .models import Camera
import json
@login_required
# Create your views here.
def index(request):
cameras = Camera.objects.all()[:15]
context= {
'cameras': cameras
}
return render(request, 'cameras/index.html', context)
@login_required
def create(request):
return render(request, 'cameras/create.html')
@login_required
def edit(request, id):
camera = get_object_or_404(Camera, pk=id)
return render(request, 'cameras/edit.html', { 'camera': camera })
@login_required
def search(request):
q = request.GET['q']
cameras = Camera.objects.filter(description__contains=q).values('description', 'url')
return JsonResponse({
'cameras': json.dumps(list(cameras))
}, safe=False)
@login_required
@api_view(['POST'])
def store(request):
try:
body = request.data
camera = Camera.objects.create(description=body['description'], url=body['url'])
return JsonResponse({
'message': 'Successfully created a camera.'
}, safe=False)
except Exception as e:
return JsonResponse({
'message': str(e)
}, safe=False, status=400)
@login_required
@api_view(['PATCH'])
def update(request, id):
try:
body = request.data
camera = Camera.objects.get(pk=id)
camera.url = body['url']
camera.description = body['description']
camera.save()
return JsonResponse({
'message': 'Successfully updated a camera.'
}, safe=False)
except Camera.DoesNotExist:
return JsonResponse({
'message': 'Camera does not exists.'
}, safe=False, status=400)
except Exception as e:
return JsonResponse({
'message': str(e)
}, safe=False, status=400)
@login_required
@api_view(['DELETE'])
def delete(request, id):
try:
camera = Camera.objects.get(pk=id)
camera.delete()
return JsonResponse({
'message': 'Successfully deleted a camera.'
})
except Camera.DoesNotExist:
return JsonResponse({
'message': 'Camera does not exists.'
}, safe=False, status=400)
except Exception as e:
return JsonResponse({
'message': str(e)
}, safe=False, status=400)
|
'''
Что покажет приведенный ниже фрагмент кода?
'''
counter = 0
for i in range(99, 102):
temp = i
while temp > 0:
counter += 1
temp //= 10
print(counter)
|
"""ordia.query.
Usage:
ordia.query iso639-to-q <iso639>
ordia.query get-wikidata-language-codes [options]
ordia.query form-to-iso639 <form>
Options:
--min-count=<min-count> Minimum count [default: 0]
Description:
Functions in this module query the Wikidata Query Service and
thus requires Internet access.
Examples
--------
$ python -m ordia.query iso639-to-q en
Q1860
"""
from __future__ import absolute_import, division, print_function
from re import compile
from six import string_types
try:
from functools import lru_cache
except ImportError:
# For Python 2.
from functools32 import lru_cache
import requests
USER_AGENT = 'Ordia'
HEADERS = {'User-Agent': USER_AGENT}
FORM_PATTERN = compile(r'L[1-9]\d*-F[1-9]\d*')
def escape_string(string):
r"""Escape string to be used in SPARQL query.
Parameters
----------
string : str
String to be escaped.
Returns
-------
escaped_string : str
Excaped string.
Examples
--------
>>> escape_string('"hello"')
'\\"hello\\"'
>>> escape_string(r'\"hello"')
'\\\\\\"hello\\"'
"""
return string.replace('\\', '\\\\').replace('"', r'\"')
def form_to_representation_and_iso639(form):
"""Return representation and iso639 for a form.
Parameters
----------
form : str
String for the form identifier.
Returns
-------
representation_and_form : tuple with str or None
Tuple with two strings or None if not found
Raises
------
ValueError
If the `form` input argument does not matches the
form identifier.
Examples
--------
>>> result = form_to_representation_and_iso639('L33930-F1')
>>> result == ("fyr", "da")
True
"""
# Validate input
if not isinstance(form, string_types):
raise ValueError('`form` input should be a string')
if not FORM_PATTERN.match(form):
raise ValueError(('`form` input should be a form identifier, '
'e.g., "L33930-F1"'))
lexeme = form.split('-')[0]
url = "https://www.wikidata.org/wiki/Special:EntityData/{}.json".format(
lexeme)
response = requests.get(url, headers=HEADERS)
# Handle response
if not response.ok:
return None
data = response.json()
if 'entities' in data and lexeme in data['entities']:
entities = data['entities'][lexeme]
for entity_form in entities['forms']:
if form == entity_form['id']:
break
else:
return None
if 'representations' in entity_form:
representations = entity_form['representations']
if len(representations) > 0:
first_representation = next(iter(representations.values()))
representation = first_representation['value']
iso639 = first_representation['language']
return (representation, iso639)
return None
def get_wikidata_language_codes(min_count=0):
"""Get all Wikidata language codes.
Query the Wikidata Query Service to get language codes that
Wikidata uses for the lemmas.
Parameters
----------
min_count : int, optional
Minimum count of lexemes for a particular language. The default is 0
meaning that all language will be returned.
Returns
-------
codes : list of str
List of strings with language codes, e.g., ['ru', 'en', ...].
Examples
--------
>>> codes = get_wikidata_language_codes()
>>> 'da' in codes
True
"""
query = """
# tool: Ordia
SELECT (COUNT(?lexeme) AS ?count) ?language
{
?lexeme wikibase:lemma ?lemma .
BIND(LANG(?lemma) AS ?language) .
}
GROUP BY ?language
"""
if min_count:
try:
min_count_value = int(min_count)
if min_count_value < 0:
raise ValueError('min_count should be non-negative')
except ValueError:
raise ValueError('min_count should be an integer.')
query += "\nHAVING (?count > {})".format(min_count_value)
query += "\nORDER BY DESC(?count)"
url = 'https://query.wikidata.org/sparql'
params = {'query': query, 'format': 'json'}
response = requests.get(url, params=params, headers=HEADERS)
data = response.json()
bindings = data['results']['bindings']
if bindings:
return [binding['language']['value'] for binding in bindings]
else:
return []
@lru_cache(maxsize=128)
def get_wikidata_language_codes_cached(*args, **kwargs):
"""Get unique language codes from Wikidata's lexemes.
Cached version of `get_wikidata_language_codes`.
Parameters
----------
min_count : int, optional
Minimum count of lexemes for a particular language. The default is 0
meaning that all language will be returned.
Returns
-------
codes : list of str
List of strings with language codes, e.g., ['ru', 'en', ...].
"""
return get_wikidata_language_codes(*args, **kwargs)
def iso639_to_q(iso639):
"""Convert ISO 639 to Wikidata ID.
Convert an ISO 639-1 or ISO 639-2 identifier to the associated Q
identifier by a lookup with the Wikidata Query Service.
Parameters
----------
iso639 : str
ISO 639 identifier as a string.
Returns
-------
q : str
String with Wikidata ID. It is empty if the code is not found.
Examples
--------
>>> iso639_to_q('en') == 'Q1860'
True
>>> iso639_to_q('xnx') == ''
True
>>> iso639_to_q('dan') == 'Q9035'
True
"""
if len(iso639) == 2:
property = "wdt:P218"
elif len(iso639) == 3:
property = "wdt:P219"
else:
raise ValueError('Wrong length of `iso639`')
query = 'SELECT ?code WHERE {{ ?code {property} "{iso639}" }}'.format(
property=property, iso639=escape_string(iso639))
url = 'https://query.wikidata.org/sparql'
params = {'query': query, 'format': 'json'}
response = requests.get(url, params=params, headers=HEADERS)
data = response.json()
bindings = data['results']['bindings']
if bindings:
return bindings[0]['code']['value'][31:]
else:
return ""
@lru_cache(maxsize=1048)
def spacy_token_to_lexemes(token):
"""Identify Wikidata lexeme from spaCy token.
Parameters
----------
token : spacy.tokens.token.Token
Returns
-------
lexemes : list of strings
Examples
--------
>>> class Token(object):
... pass
>>> token = Token()
>>> setattr(token, 'lang_', 'da')
>>> setattr(token, 'norm_', 'biler')
>>> setattr(token, 'pos_', 'NOUN')
>>> spacy_token_to_lexemes(token)
['L36385']
"""
POSTAG_TO_Q = {
"ADJ": "Q34698",
"ADV": "Q380057",
"INTJ": "Q83034",
"NOUN": "Q1084",
"PROPB": "Q147276",
"VERB": "Q24905",
"ADP": "Q134316",
"AUX": "Q24905",
"CCONJ": "Q36484",
"DET": "Q576271",
"NUM": "Q63116",
"PART": "Q184943",
"PRON": "Q36224",
"SCONJ": "Q36484",
}
if token.pos_ in ['PUNCT', 'SYM', 'X']:
return []
iso639 = token.lang_
language = iso639_to_q(iso639)
representation = token.norm_
if token.pos_ not in POSTAG_TO_Q:
return []
lexical_category = POSTAG_TO_Q[token.pos_]
query = '''
SELECT DISTINCT ?lexeme {{
?lexeme dct:language wd:{language} ;
wikibase:lexicalCategory / wdt:P279* wd:{lexical_category} ;
ontolex:lexicalForm / ontolex:representation
"{representation}"@{iso639} .
}}'''.format(language=language, lexical_category=lexical_category,
representation=representation, iso639=iso639)
url = 'https://query.wikidata.org/sparql'
params = {'query': query, 'format': 'json'}
response = requests.get(url, params=params, headers=HEADERS)
data = response.json()
bindings = data['results']['bindings']
if bindings:
lexemes = [binding['lexeme']['value'][31:] for binding in bindings]
return lexemes
else:
return []
def main():
"""Handle command-line interface."""
from docopt import docopt
arguments = docopt(__doc__)
if arguments['form-to-iso639']:
result = form_to_representation_and_iso639(arguments['<form>'])
if result is not None:
print(result[1])
elif arguments['iso639-to-q']:
q = iso639_to_q(arguments['<iso639>'])
print(q)
elif arguments['get-wikidata-language-codes']:
if arguments['--min-count']:
try:
min_count = int(arguments['--min-count'])
except ValueError:
raise ValueError('min-count parameter should be an integer')
language_codes = get_wikidata_language_codes(min_count=min_count)
else:
language_codes = get_wikidata_language_codes()
for language_code in language_codes:
print(language_code)
if __name__ == '__main__':
main()
|
# app/elephant_queries.py
import os
from dotenv import load_dotenv
import psycopg2
load_dotenv() #> loads contents of the .env file into the script's environment
DB_NAME = os.getenv("DB_NAME")
DB_USER = os.getenv("DB_USER")
DB_PASSWORD = os.getenv("DB_PASSWORD")
DB_HOST = os.getenv("DB_HOST")
connection = psycopg2.connect(dbname=DB_NAME, user=DB_USER, password=DB_PASSWORD, host=DB_HOST)
print("CONNECTION:", connection)
cursor = connection.cursor()
print("CURSOR:", cursor)
cursor.execute('SELECT * from test_table;')
result = cursor.fetchall()
print(result)
query = '''
CREATE TABLE if not exists titanic (
id Serial Primary Key,
Survived int,
Pclass int,
Name varchar,
Sex varchar,
Age int,
Siblings_Spouses_Aboard int,
Parents_Children_Aboard int,
Fare int
);
'''
cursor.execute(query)
# inserting records (single)
insertion_query = """
INSERT INTO titanic
(Survived, Pclass, Name, Sex, Age, Siblings_Spouses_Aboard,
Parents_Children_Aboard, Fare) VALUES (%s, %s, %s, %s, %s, %s, %s, %s)
"""
record_to_insert = (0, 3, 'Mr. Owen Harris Brown', 'male', 22, 1, 0, 7.25)
cursor.execute(insertion_query, record_to_insert)
# save the transactions
connection.commit()
cursor.close()
connection.close()
|
__version__ = "4.0.1"
# __version__ has to be define in the first line
"""
pysal.lib: Python Spatial Analysis Library (core)
================================================
Documentation
-------------
PySAL documentation is available in two forms: python docstrings and an html \
webpage at http://pysal.org/
Available sub-packages
----------------------
cg
Basic data structures and tools for Computational Geometry
examples
Example data sets for testing and documentation
io
Basic functions used by several sub-packages
weights
Tools for creating and manipulating weights
"""
from . import cg
from . import io
from . import weights
from . import examples
|
from rest_framework import permissions
from rest_framework.viewsets import ReadOnlyModelViewSet
from ggongsul.agreement.models import Agreement
from ggongsul.agreement.serializers import (
AgreementFullSerializer,
AgreementShortSerializer,
)
class AgreementViewSet(ReadOnlyModelViewSet):
queryset = Agreement.objects.all()
permission_classes = [permissions.AllowAny]
def get_serializer_class(self):
if self.action == "list":
return AgreementShortSerializer
return AgreementFullSerializer
|
"""
recursive parse
"""
from typing import List, Any, Union, Tuple
def parse(filename: str) -> List[Any]:
with open(filename, "r") as file:
lines = [line.rstrip() for line in file.readlines()] # remove newline character
lines = [line for line in lines if len(line) > 0] # remove empty lines
return parse_lines(lines)
def parse_lines(lines: List[str]) -> List[Any]:
"""Iterate over each line to parse it.
If it find a list it will capture the corresponding lines
and recursively call itself on the captured lines to parse the list"""
parsed:List[Any] = []
if len(lines) == 0:
return parsed
tabs = find_tabs(lines[0])
while len(lines) > 0:
line = lines.pop(0)
key, end = find_group(line, tabs)
if end == len(line) and (len(lines) == 0 or lines[0][tabs] != "\t"):
# On this line we found a list item
parsed.append(key)
elif end < len(line):
# On this line we found a key value pair
start = find_tabs(line, end)
value, _ = find_group(line, start)
parsed.append((key, value))
else:
# On this line we found the start of a list
next_level = []
while len(lines) > 0 and lines[0][tabs] == "\t":
next_level.append(lines.pop(0))
parsed.append((key, parse_lines(next_level)))
return parsed
def find_group(line: str, start: int) -> Tuple[str, int]:
"""Capture the group of non tabs character
return the capture group and the end position of the group"""
end = start
length = len(line)
while end < length and line[end].isalnum():
end += 1
return line[start:end], end
def find_tabs(line: str, start: int = 0) -> int:
"""return the position of the next non tabs character in the string"""
while line[start] == "\t":
start += 1
return start
|
import numpy as np
import torch
import os
from common import tensor
from pde2d_base import RegularPDE
from spinn2d import Plotter2D, App2D, SPINN2D
from mayavi import mlab
class SquareSlit(RegularPDE):
def __init__(self, n_nodes, ns, nb=None, nbs=None, sample_frac=1.0):
self.sample_frac = sample_frac
# Interior nodes
n = round(np.sqrt(n_nodes) + 0.49)
self.n = n
dxb2 = 1.0/(n + 1)
xl, xr = dxb2 - 1.0, 1.0 - dxb2
sl = slice(xl, xr, n*1j)
x, y = np.mgrid[sl, sl]
cond = ~((x >= 0) & (np.abs(y) < dxb2))
self.i_nodes = (x[cond].ravel(), y[cond].ravel())
# Fixed nodes
nb = n if nb is None else nb
self.nb = nb
dxb2 = 1.0/(nb + 1)
_x = np.linspace(dxb2 - 1.0, 1.0 - dxb2, nb)
_o = np.ones_like(_x)
nslit = int(nb//2 + 1)
x = np.hstack((_x, _o, _x, -1*_o, np.linspace(0, 1, nslit)))
y = np.hstack((_o*-1, _x, _o, _x, np.zeros(nslit)))
self.f_nodes = (x, y)
# Interior samples
self.ns = ns = round(np.sqrt(ns) + 0.49)
dxb2 = 1.0/(ns)
xl, xr = dxb2 - 1.0, 1.0 - dxb2
sl = slice(xl, xr, ns*1j)
x, y = np.mgrid[sl, sl]
cond = ~((x >= 0) & (np.abs(y) < dxb2))
xs, ys = (tensor(t.ravel(), requires_grad=True)
for t in (x[cond], y[cond]))
self.p_samples = (xs, ys)
self.n_interior = len(self.p_samples[0])
self.rng_interior = np.arange(self.n_interior)
self.sample_size = int(self.sample_frac*self.n_interior)
# Boundary samples
nbs = ns if nbs is None else nbs
self.nbs = nbs
sl = slice(-1.0, 1.0, nbs*1j)
x, y = np.mgrid[sl, sl]
cond = ((x < xl) | (x > xr)) | ((y < xl) | (y > xr))
x, y = x[cond].ravel(), y[cond].ravel()
nslit = int(nbs//2 + 1)
xb = np.hstack((x, np.linspace(0, 1.0, nslit)))
yb = np.hstack((y, np.zeros(nslit)))
xb, yb = (tensor(t, requires_grad=True) for t in (xb, yb))
self.b_samples = (xb, yb)
def plot_points(self):
n = self.ns*2
x, y = np.mgrid[-1:1:n*1j, -1:1:n*1j]
return x, y
def pde(self, x, y, u, ux, uy, uxx, uyy):
return uxx + uyy + 1.0
def has_exact(self):
return False
def boundary_loss(self, nn):
xb, yb = self.boundary()
u = nn(xb, yb)
ub = 0.0
bc = u - ub
return (bc**2).sum()
def _vtu2data(fname):
src = mlab.pipeline.open(fname, figure=False)
ug = src.reader.output
pts = ug.points.to_array()
scalar = ug.point_data.scalars.to_array()
return pts, scalar
def _get_errors(nn, fvtu):
pts, u_exact = _vtu2data(fvtu)
x = pts[:,0]
y = pts[:,1]
xt, yt = tensor(x.ravel()), tensor(y.ravel())
u = nn(xt, yt).detach().cpu().numpy()
u.shape = x.shape
du = u - u_exact
L1 = np.mean(np.abs(du))
L2 = np.sqrt(np.mean(du**2))
Linf = np.max(np.abs(du))
return L1, L2, Linf
class FEM(Plotter2D):
def save(self, dirname):
'''Save the model and results.
'''
fvtu = 'code/fem/poisson_solution000000.vtu'
modelfname = os.path.join(dirname, 'model.pt')
torch.save(self.nn.state_dict(), modelfname)
rfile = os.path.join(dirname, 'results.npz')
pts, u_exact = _vtu2data(fvtu)
x = pts[:,0]
y = pts[:,1]
xt, yt = tensor(x.ravel()), tensor(y.ravel())
u = self.nn(xt, yt).detach().cpu().numpy()
u.shape = x.shape
du = u - u_exact
L1 = np.mean(np.abs(du))
L2 = np.sqrt(np.mean(du**2))
Linf = np.max(np.abs(du))
np.savez(rfile, x=x, y=y, u=u, u_exact=u_exact,
L1=L1, L2=L2, Linf=Linf)
if __name__ == '__main__':
app = App2D(
pde_cls=SquareSlit,
nn_cls=SPINN2D,
plotter_cls=FEM
)
app.run(nodes=200, samples=600, n_train=10000, lr=1e-3, tol=1e-4)
# fvtu = 'fem/poisson_solution000000.vtu'
# L1, L2, Linf = _get_errors(app.nn, fvtu)
# print("L1 error = ", L1)
# print("L2 error = ", L2)
# print("Linf error = ", Linf)
|
# Copyright (C) 2020 TU Dresden
# Licensed under the ISC license (see LICENSE.txt)
#
# Authors: Robert Khasanov
import copy
from mocasin.representations import SimpleVectorRepresentation
from mocasin.tetris.schedule import (
Schedule,
MultiJobSegmentMapping,
SingleJobSegmentMapping,
)
class BaseVariantSelector:
"""Base Variant Selector class.
Finalize the schedule by selecting non-overlapped mapping variants in each
segment.
"""
def __init__(self, platform):
self.platform = platform
def finalize_schedule(self, schedule):
"""Finalize the schedule.
This method takes the schedule consisting of the canonical mappings.
The output returns the schedule, where no mappings overlap in the same
segment, and each transformed mapping is equivalent to the original one.
"""
raise NotImplementedError(
"This method needs to be overridden by a subclass"
)
def _update_job_segment_mapping(job, mapping):
"""Copy a job segment with the new mapping."""
return SingleJobSegmentMapping(
job.request,
mapping,
start_time=job.start_time,
start_cratio=job.start_cratio,
end_time=job.end_time,
)
class CounterVariantSelector(BaseVariantSelector):
"""Counter-based Variant Selector.
Finalize the schedule by selecting non-overlapped mapping variants in each
segment. In this version, the mapping variant is selected by choosing the
first available cores in the list. If the preselected operating point for
the application is the same during subsequent segments, the variant selector
selects a single variant on these segments.
"""
def __init__(self, platform):
super().__init__(platform)
self._processor_id = {}
for index, processor in enumerate(sorted(platform.processors())):
self._processor_id[processor] = index
def finalize_schedule(self, schedule):
"""Finalize the schedule.
This method takes the schedule consisting of the canonical mappings.
The output returns the schedule, where no mappings overlap in the same
segment, and each transformed mapping is equivalent to the original one.
"""
if not schedule:
return None
# TODO: Check that the transformed mapping is equivalent. (Check that
# SymmetryRepresentation does not create the whole orbit for that.)
rotated_schedule = Schedule(self.platform)
prev_segment = None
prev_rotated = None
for segment in schedule.segments():
rotated_segment = self._finalize_segment(
segment, prev=prev_segment, prev_rotated=prev_rotated
)
rotated_schedule.add_segment(rotated_segment)
prev_segment = segment
prev_rotated = rotated_segment
return rotated_schedule
def _finalize_segment(self, segment, prev=None, prev_rotated=None):
"""Rotate mappings in the segment.
This method is called during the schedule finalization.
Args:
segment (MultiJobSegmentMapping): a segment to be transformed
prev (MultiJobSegmentMapping): a previous segment in the original
schedule
prev_rotated (MultiJobSegmentMapping): a rotated segment variant of
`prev`.
"""
rotated_segment = MultiJobSegmentMapping(self.platform)
processors = set(self.platform.processors())
# Find jobs with equal mappings as in previous one
keep_mapping = self._find_keeping_mappings(segment, prev)
for job in keep_mapping:
prev_job = prev_rotated.find_job_segment(job.request)
rotated_job = _update_job_segment_mapping(job, prev_job.mapping)
rotated_segment.append_job(rotated_job)
# update available processors
processors = processors - prev_job.mapping.get_used_processors()
for job in segment.jobs():
if job in keep_mapping:
continue
rotated_mapping = self._adjust_mapping_to_processors(
job.mapping, processors
)
rotated_job = _update_job_segment_mapping(job, rotated_mapping)
rotated_segment.append_job(rotated_job)
# update available processors
processors = processors - rotated_mapping.get_used_processors()
return rotated_segment
def _adjust_mapping_to_processors(self, mapping, processors):
"""Adjust mapping to the given processors."""
mapping_pes = mapping.get_used_processors()
# If the current mapping fits the given processor list, use it.
if mapping_pes.issubset(processors):
return mapping
available_pe_dict = {}
for pe in sorted(processors):
if pe.type not in available_pe_dict:
available_pe_dict[pe.type] = []
available_pe_dict[pe.type].append(pe)
mapping_pe_dict = {}
for pe in mapping_pes:
if pe.type not in mapping_pe_dict:
mapping_pe_dict[pe.type] = []
mapping_pe_dict[pe.type].append(pe)
perm = {}
for ptype, cores in mapping_pe_dict.items():
available = available_pe_dict[ptype]
if len(cores) > len(available):
raise RuntimeError(
f"Not enough available processors of the type {ptype}"
)
for in_core, out_core in zip(cores, available):
perm[in_core] = out_core
return self._permutate_mapping(mapping, perm)
def _permutate_mapping(self, mapping, perm):
# This custom permutation is probably better to implement in one of
# the representations. TODO: Consider it
rep = SimpleVectorRepresentation(mapping.graph, self.platform)
mapping_list = rep.toRepresentation(mapping)
_perm = {}
for k, v in perm.items():
_perm[self._processor_id[k]] = self._processor_id[v]
rotated_list = list(map(lambda x: _perm[x], mapping_list))
rotated_mapping = rep.fromRepresentation(rotated_list)
rotated_mapping.metadata = copy.copy(mapping.metadata)
return rotated_mapping
def _find_keeping_mappings(self, segment, prev):
"""Find jobs which use the same mapping as in a previous segment."""
res = []
if not prev:
return res
for job in segment.jobs():
job_prev = prev.find_job_segment(job.request)
if not job_prev:
continue
if job.mapping == job_prev.mapping:
res.append(job)
return res
|
from setuptools import setup, Extension
import numpy
import os
from setuptools import Extension, setup
### ---
# peakfinder8 Cython installation adapted from OnDA: https://github.com/ondateam/onda
DIFFRACTEM_USE_CYTHON = os.getenv("DIFFRACTEM_USE_CYTHON")
ext = ".pyx" if DIFFRACTEM_USE_CYTHON else ".c" # pylint: disable=invalid-name
peakfinder8_ext = Extension( # pylint: disable=invalid-name
name="diffractem.peakfinder8_extension",
include_dirs=[numpy.get_include()],
libraries=["stdc++"],
sources=[
"src/peakfinder8_extension/peakfinder8.cpp",
"src/peakfinder8_extension/peakfinder8_extension.pyx",
]
if DIFFRACTEM_USE_CYTHON
else [
"src/peakfinder8_extension/peakfinder8_extension.cpp",
"src/peakfinder8_extension/peakfinder8.cpp",
],
language="c++",
)
if DIFFRACTEM_USE_CYTHON:
from Cython.Build import cythonize
print('USING CYTHON')
extensions = cythonize(peakfinder8_ext) # pylint: disable=invalid-name
else:
extensions = [peakfinder8_ext] # pylint: disable=invalid-name
### ---
setup(
name='diffractem',
version='0.3.2',
packages=['diffractem'],
url='https://github.com/robertbuecker/diffractem',
license='',
scripts=['bin/nxs2tif.py', 'bin/edview.py'],
# scripts=['bin/nxs2tif.py', 'bin/edview.py', 'bin/quick_proc.py'],
entry_points={
'console_scripts': [
'pre_process = diffractem.quick_proc:main',
'stream2sol = diffractem.stream_convert:main'
],
},
author='Robert Buecker',
author_email='robert.buecker@mpsd.mpg.de',
description='Some tools for working with serial electron microscopy data.',
install_requires=['h5py', 'numpy', 'pandas', 'tables', 'hdf5plugin',
'dask[complete]', 'tifffile', 'scipy', 'astropy',
'matplotlib', 'numba', 'pyqtgraph', 'pyyaml', 'scikit-learn',
'scikit-image', 'ruamel.yaml', 'opencv-python-headless', 'PyQt5',
'cfelpyutils'],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
],
ext_modules = extensions
)
|
# Using py.test framework
from service import Intro, MNIST
def test_example_message(client):
"""Example message should be returned"""
client.app.add_route('/mnist', Intro())
result = client.simulate_get('/mnist')
assert result.json == {
'message': 'This service verifies a model using the MNIST Test data set. '
'Invoke using the form /mnist/<index of test sample>. For example, /mnist/24'}, \
"The service test will fail until a trained model has been approved"
def test_classification_request(client):
"""Expected classification for Iris sample should be returned"""
client.app.add_route('/mnist/{index:int(min=0)}', MNIST())
result = client.simulate_get('/mnist/1')
assert result.status == "200 OK", "The service test will fail until a trained model has been approved"
assert all(k in result.json for k in (
"index", "predicted_label", "predicted")), "The service test will fail until a trained model has been approved"
|
from __future__ import print_function
import cv2 as cv
import numpy as np
import argparse
source_window = 'Source image'
corners_window = 'Corners detected'
max_thresh = 255
def cornerHarris_demo(val):
thresh = val
# Detector parameters
blockSize = 2
apertureSize = 3
k = 0.04
# Detecting corners
dst = cv.cornerHarris(src_gray, blockSize, apertureSize, k)
# Normalizing
dst_norm = np.empty(dst.shape, dtype=np.float32)
cv.normalize(dst, dst_norm, alpha=0, beta=255, norm_type=cv.NORM_MINMAX)
dst_norm_scaled = cv.convertScaleAbs(dst_norm)
# Drawing a circle around corners
for i in range(dst_norm.shape[0]):
for j in range(dst_norm.shape[1]):
if int(dst_norm[i,j]) > thresh:
cv.circle(dst_norm_scaled, (j,i), 5, (0), 2)
# Showing the result
cv.namedWindow(corners_window)
cv.imshow(corners_window, dst_norm_scaled)
# Load source image and convert it to gray
parser = argparse.ArgumentParser(description='Code for Harris corner detector tutorial.')
parser.add_argument('--input', help='Path to input image.', default='data/demo/cows.jpg')
args = parser.parse_args()
src = cv.imread(cv.samples.findFile(args.input))
if src is None:
print('Could not open or find the image:', args.input)
exit(0)
src_gray = cv.cvtColor(src, cv.COLOR_BGR2GRAY)
# Create a window and a trackbar
cv.namedWindow(source_window)
thresh = 200 # initial threshold
cv.createTrackbar('Threshold: ', source_window, thresh, max_thresh, cornerHarris_demo)
cv.imshow(source_window, src)
cornerHarris_demo(thresh)
cv.waitKey()
|
# incomplete_iteration.py
#
# LICENSE
#
# The MIT License
#
# Copyright (c) 2020 TileDB, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# DESCRIPTION
#
# Please refer to the TileDB and TileDB-Py documentation for more information:
# https://docs.tiledb.com/main/solutions/tiledb-embedded/api-usage
# https://tiledb-inc-tiledb.readthedocs-hosted.com/projects/tiledb-py/en/stable/python-api.html
#
# When run, this program will create a 1D dense array, write some data
# to it, and read slices back by iteration over incomplete queries.
#
import numpy as np
import sys
import tiledb
# Name of the array to create.
array_name = "incomplete_iteration"
def create_array():
# The array will be 100 cells with dimensions "x".
dom = tiledb.Domain(tiledb.Dim(name="x", domain=(0, 99), tile=100, dtype=np.int64))
# The array will be dense with a single string typed attribute "a"
schema = tiledb.ArraySchema(
domain=dom, sparse=True, attrs=[tiledb.Attr(name="a", dtype=str)]
)
# Create the (empty) array on disk.
tiledb.SparseArray.create(array_name, schema)
def write_array():
# Open the array and write to it.
with tiledb.open(array_name, mode="w") as A:
extent = A.schema.domain.dim("x").domain
ncells = extent[1] - extent[0] + 1
# Data is the Latin alphabet with varying repeat lengths
data = [chr(i % 26 + 97) * (i % 52) for i in range(ncells)]
# Coords are the dimension range
coords = np.arange(extent[0], extent[1] + 1)
A[coords] = data
def read_array_iterated():
# in order to force iteration, restrict the buffer sizes
# this setting gives 5 iterations for the example data
init_buffer_bytes = 800
cfg = tiledb.Config(
{
"py.init_buffer_bytes": init_buffer_bytes,
"py.exact_init_buffer_bytes": "true",
}
)
with tiledb.open(array_name, config=cfg) as A:
# iterate over results as a dataframe
iterable = A.query(return_incomplete=True).df[:]
for i, result in enumerate(iterable):
print(f"--- result {i} is a '{type(result)}' with size {len(result)}")
print(result)
print("---")
print(f"Query completed after {i} iterations")
create_array()
write_array()
read_array_iterated()
|
# -*- coding: utf-8 -*-
import itertools
from metrika.contender import Contender
from metrika.variable import *
__author__ = 'Javier Pimás'
class Suite:
def __init__(self, name=""):
self.name = name
self.variables = []
def add_variable_from_dict(self, name, values):
values = [NamedValue(name, value) for name, value in values.items()]
self.variables.append(Variable(name, values))
def add_variable_from_list(self, name, values):
values = [AnonValue(value) for value in values]
self.variables.append(Variable(name, values))
def restrict(self, arguments):
# for arg in arguments:
# if self.typical_parameters[arg] is None:
# args.append(arg)
# else:
# args.append(self.typical_parameters[arg])
if arguments.restrict is not None:
for restriction in arguments.restrict.split(','):
(var, value) = restriction.split('=')
variable = next(x for x in self.variables if x.name == var)
variable.restrict_to(value)
def instances(self):
names = [var.name for var in self.variables]
values = [var.values for var in self.variables]
tuples = itertools.product(*values)
return [Contender(names, variation) for variation in list(tuples)]
|
import gtk
class DragTarget(object):
"""
A DragTarget supports a certain mime type and has methods to figure out if
a drag is possible as well as to retrieve the actual text-data representation
for the drag
"""
app = 0
widget = 0
actions = gtk.gdk.ACTION_COPY | gtk.gdk.ACTION_MOVE | gtk.gdk.ACTION_LINK
def get_data(self, widget, context):
"""
called when the destination requests the data
may also be called to find out if a particular widget state (selection)
supports this target as a drag source. In this case, context will be
None
"""
raise NotImplementedError
def delete_data(self, widget, context):
"""
called when the drag operation finished and the destination requests
to delete the original data
"""
pass
|
import unittest
from .. import util
class TestUtil(unittest.TestCase):
def test_state_dict_names(self):
state_dict = {
'conv1.weight': 0,
'conv1.bias': 0,
'fc1.weight': 0,
'fc2.weight': 0,
'fc2.bias': 0
}
layer_names = util.state_dict_layer_names(state_dict)
self.assertListEqual(sorted(layer_names), ['conv1', 'fc1', 'fc2'])
if __name__ == '__main__':
unittest.main()
|
"""
Name: thomas.py
Goal: Numerical resolution of linear equations with the method of Thomas
Author: HOUNSI madouvi antoine-sebastien
Date: 08/03/2022
"""
from os.path import join, dirname
import sys
import numpy as np
from linearEq.utils.gaussForVal import gauss
class Thomas:
def __init__(self, file):
self.matrix = None
self.vect = None
self.file = join(dirname(__file__), file)
self.dim = self.countLine(self.file)
def getData(self, file):
self.matrix = list()
self.vect = list()
sys.stdin = open(file)
for _ in range(self.dim):
line = sys.stdin.readline().split("|")
self.matrix.append([float(x) for x in line[0].split()])
self.vect.append(float(line[1]))
def countLine(self, file):
"""
:param file:
:return: nbOfLine
"""
cpt = 0
with open(file) as f:
for line in f:
if not line.isspace():
cpt += 1
return cpt
@staticmethod
def isTridiagonal(matrix, dim):
for i in range(dim):
counter = 0
for j in range(dim):
if matrix[i][j] != 0: counter += 1
if (i == 0 or i == dim - 1) and counter > 2: return False
if counter > 3: return False
return True
def factorization(self, matrix, dim):
matL = np.identity(dim)
matU = np.zeros([dim, dim])
matU[0][0] = matrix[0][0]
for i in range(dim):
if i != dim - 1: matU[i][i + 1] = matrix[i][i + 1]
if i > 0:
matL[i][i - 1] = matrix[i][i - 1] / matU[i - 1][i - 1]
matU[i][i] = matrix[i][i] - matL[i][i - 1] * matU[i - 1][i]
return matL, matU
def solution(self):
"""
:return: solution
"""
try:
dim = self.dim
self.getData(self.file)
if not self.isTridiagonal(self.matrix, dim):
return "Votre matrice n'est pas tridiagonale"
matL, matU = self.factorization(self.matrix, dim)
sY = list()
sY.append(self.vect[0])
for i in range(1, dim):
s1 = self.vect[i] - sY[i - 1] * matL[i][i - 1]
sY.append(s1)
# lTM => s
s = list()
s.append(sY[dim - 1] / matU[dim - 1][dim - 1])
for i in range(dim - 2, -1, -1):
try:
val = (sY[i] - matU[i][i + 1] * s[len(s) - 1]) / matU[i][i]
s.append(val)
except ZeroDivisionError:
return "Veuillez réessayer"
s = [round(i, 2) for i in s]
s.reverse()
return s
except ZeroDivisionError:
return "division per zéro"
except RuntimeError:
return "Erreur lors de l'execution"
except TypeError:
return "Données non variables, veuillez resaisir les données"
except IndexError:
return "Erreur lors de l'indexation, veuillez resaisir les données"
except EOFError:
return "Eof error"
except ValueError:
return "Erreur lors de la saisie"
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from sqlalchemy import column
from superset.db_engine_specs.bigquery import BigQueryEngineSpec
from tests.db_engine_specs.base_tests import DbEngineSpecTestCase
class BigQueryTestCase(DbEngineSpecTestCase):
def test_bigquery_sqla_column_label(self):
label = BigQueryEngineSpec.make_label_compatible(column("Col").name)
label_expected = "Col"
self.assertEqual(label, label_expected)
label = BigQueryEngineSpec.make_label_compatible(column("SUM(x)").name)
label_expected = "SUM_x__5f110"
self.assertEqual(label, label_expected)
label = BigQueryEngineSpec.make_label_compatible(column("SUM[x]").name)
label_expected = "SUM_x__7ebe1"
self.assertEqual(label, label_expected)
label = BigQueryEngineSpec.make_label_compatible(column("12345_col").name)
label_expected = "_12345_col_8d390"
self.assertEqual(label, label_expected)
def test_convert_dttm(self):
dttm = self.get_dttm()
self.assertEqual(
BigQueryEngineSpec.convert_dttm("DATE", dttm), "CAST('2019-01-02' AS DATE)"
)
self.assertEqual(
BigQueryEngineSpec.convert_dttm("DATETIME", dttm),
"CAST('2019-01-02T03:04:05.678900' AS DATETIME)",
)
self.assertEqual(
BigQueryEngineSpec.convert_dttm("TIMESTAMP", dttm),
"CAST('2019-01-02T03:04:05.678900' AS TIMESTAMP)",
)
|
# Copyright (c) 2017 Intel Corporation. All rights reserved.
# Use of this source code is governed by a MIT-style
# license that can be found in the LICENSE file.
from tests.integration.utils.test_blockdevices.test_blockdevice import TestBlockDevice
class TestBlockDeviceLinux(TestBlockDevice):
_supported_device_types = ['linux']
def __init__(self, device_type, device_path):
super(TestBlockDeviceLinux, self).__init__(device_type, device_path)
@property
def preferred_fstype(self):
return 'ldiskfs'
@property
def device_path(self):
return self._device_path
@property
def destroy_commands(self):
return ['wipefs -a {}'.format(self.device_path)]
def __str__(self):
return '%s' % self.device_path
|
import unittest
import requests
from unittest import mock
from dataverk.connectors import NaisS3Connector
from tests.dataverk.connectors.storage.test_resources.mock_nais_s3_api import mock_requests_put, mock_requests_get
from tests.dataverk.connectors.storage.test_resources.nais_s3_storage_common import NAIS_S3_ENDPOINT, NAIS_S3_BLOB_NAME, \
NAIS_S3_RESOURCE_FMT, NAIS_S3_BUCKET_NAME, NAIS_S3_RESOURCE_CONTENT
class TestNaisS3Connector(unittest.TestCase):
def test_class_instantiation(self):
s3_conn = NaisS3Connector(NAIS_S3_BUCKET_NAME, NAIS_S3_ENDPOINT)
self.assertIsInstance(s3_conn, NaisS3Connector)
@mock.patch("requests.put", side_effect=mock_requests_put)
def test_write_valid(self, mock_put):
s3_conn = NaisS3Connector(NAIS_S3_BUCKET_NAME, NAIS_S3_ENDPOINT)
s3_conn.write(data=NAIS_S3_RESOURCE_CONTENT, destination_blob_name=NAIS_S3_BLOB_NAME, fmt=NAIS_S3_RESOURCE_FMT)
@mock.patch("requests.get", side_effect=mock_requests_get)
def test_read_valid(self, mock_get):
s3_conn = NaisS3Connector(NAIS_S3_BUCKET_NAME, NAIS_S3_ENDPOINT)
resource = s3_conn.read(blob_name=f"{NAIS_S3_BLOB_NAME}.{NAIS_S3_RESOURCE_FMT}")
self.assertEqual(resource, NAIS_S3_RESOURCE_CONTENT)
@mock.patch("requests.get", side_effect=mock_requests_get)
def test_read_invalid_resource_not_found(self, mock_get):
s3_conn = NaisS3Connector(NAIS_S3_BUCKET_NAME, NAIS_S3_ENDPOINT)
with self.assertRaises(requests.exceptions.HTTPError):
resource = s3_conn.read(blob_name=f"resource/not-found.{NAIS_S3_RESOURCE_FMT}")
|
'''
Core algorithm.
Key algorithm parameters:
- ga_priority (p) = probability that the next task issued is a GA evaluation
- population (n)
- workers (w) = number of parallel workers
- task_limit (l) = number of function evaluations to carry out
Describing the algorithm as (p, n, w, l):
- (0, 1, 1, L) = single sequential local search for L function evaluations
- (0, N, N, L) = N parallel local search runs for L total function evaluations
- (1, N, 1, N * G) = standard GA with G generations on population size N,
evaluated sequentially
- (1, N, W, L) = standard GA with G generations on population size N,
evaluated in parallel by W workers with opportunistic local
search
'''
import asyncio
import dataclasses
import heapq
import time
import typing
@dataclasses.dataclass(order=True)
class LSTask:
''' Priority queueable LS task. :solution is a neighbour of :base.
:base_fitness is the known fitness of :base. This allows a worker
to evaluate solution and submit a new neighbour of the better of
;base and :solution for subsequent LS ops. '''
priority: typing.Any=dataclasses.field()
base: typing.Any=dataclasses.field(compare=False)
base_fitness: typing.Any=dataclasses.field(compare=False)
solution: typing.Any=dataclasses.field(compare=False)
@dataclasses.dataclass
class GATask:
''' GA task simply requires :solution to be evaluated. '''
solution: typing.Any=dataclasses.field(compare=False)
@dataclasses.dataclass
class Queue:
''' Manage both queues and record statistics. '''
ga_queue: typing.List
ls_queue: typing.List
ga_results: typing.List
best_solution: typing.Any
ga_tasks_issued: int
ls_tasks_issued: int
generations: int
total_evals: int
record: typing.List
def print_state(self, elapsed):
print(
f"Elapsed: {elapsed} "
f"Evals: {self.total_evals} "
f"Fittest: {self.best_solution[1]} "
f"Generation: {self.generations} "
f"GA Issued: {self.ga_tasks_issued} "
f"LS Issued: {self.ls_tasks_issued} "
f"Pending: {len(self.ga_queue) + len(self.ls_queue)} "
f"Completed: {len(self.ga_results)}")
async def worker(queue, *, rstate, evaluate, neighbour, next_population,
task_limit, ga_priority, population_size):
''' Worker coroutine. Carries out atomic queue update operations GetTask,
LSResult, GAResult as outlined in the paper. Asynchronously evaluates
solutions. '''
while queue.ls_tasks_issued + queue.ga_tasks_issued < task_limit:
# Get task from queue according to the priority rules.
if (len(queue.ls_queue) == 0) or (len(queue.ga_queue) > 0 and queue.ga_tasks_issued <= ga_priority * (queue.ls_tasks_issued + queue.ga_tasks_issued)):
queue.ga_tasks_issued += 1
task = queue.ga_queue.pop()
else:
queue.ls_tasks_issued += 1
task = heapq.heappop(queue.ls_queue)
# Evaluate fitness asynchronously.
fitness = await evaluate(task.solution)
queue.total_evals += 1
# Update incumbent and statistics.
if queue.best_solution is None or fitness > queue.best_solution[1]:
if type(task) is GATask:
if queue.generations == 0:
source = 'initial'
else:
source = 'genetic'
else:
source = 'local'
queue.record.append({
'source': source, 'fitness': fitness,
'step': queue.ls_tasks_issued + queue.ga_tasks_issued})
queue.best_solution = task.solution, fitness
# Update the queues
if type(task) is GATask:
# Update GA results and create a new population if required.
queue.ga_results.append(dict(solution=task.solution, fitness=fitness))
if len(queue.ga_results) >= population_size:
queue.ga_queue = [
GATask(ind)
for ind in next_population(queue.ga_results)]
queue.ga_results = []
queue.generations += 1
# Submit a local search task for a neighbour of this solution.
heapq.heappush(queue.ls_queue, LSTask(
priority=-fitness, solution=neighbour(task.solution),
base=task.solution, base_fitness=fitness))
else:
# Submit a neighbour if this solution is better, otherwise backtrack
# (i.e. submit a neighbour of the previous solution instead).
if fitness > task.base_fitness:
heapq.heappush(queue.ls_queue, LSTask(
priority=-fitness, solution=neighbour(task.solution),
base=task.solution, base_fitness=fitness))
else:
heapq.heappush(queue.ls_queue, LSTask(
priority=-task.base_fitness, solution=neighbour(task.base),
base=task.base, base_fitness=task.base_fitness))
async def monitor(queue, start, log_seconds):
while True:
await asyncio.sleep(log_seconds)
queue.print_state(time.monotonic() - start)
async def run(*, population, workers, log_seconds, **kwargs):
'''
Main function to run the parallelised hybrid strategy.
See onemax.py for an example of use.
Create queue with initial GA population. Run workers until the evaluation
budget (max evaluated solutions) is exhausted. Report evaluation statistics
and return the best known solution.
population: List of initial candidate solutions.
rstate: Seeded random.Random object.
evaluate: Coroutine to asynchronously evaluation fitness.
neighbour: Function to generate a local neighbour from a solution.
next_population: Function to generate a new population given an
existing population with evaluated fitnesses.
workers: Number of parallel workers.
ga_priority: Fraction of evaluations put towards evaluating GA solutions
vs LS solutions.
task_limit: Maximum fitness evaluations before termination.
'''
loop = asyncio.get_event_loop()
queue = Queue(
ga_queue=[GATask(ind) for ind in population],
ls_queue=[], ga_results=[], best_solution=None,
ga_tasks_issued=0, ls_tasks_issued=0, generations=0,
record=[], total_evals=0)
start = time.monotonic()
asyncio.ensure_future(monitor(queue, start, log_seconds))
await asyncio.gather(*(
worker(queue, population_size=len(population), **kwargs)
for _ in range(workers)))
queue.print_state(time.monotonic() - start)
return queue
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.autograd as autograd
from torch.autograd import Variable
from utils import *
from config import parameters
START_TAG = '<START>'
STOP_TAG = '<STOP>'
def to_scalar(var):
return var.view(-1).data.tolist()[0]
def argmax(vec):
_, idx = torch.max(vec, 1)
return to_scalar(idx)
def prepare_sequence(seq, to_ix):
idxs = [to_ix[w] for w in seq]
tensor = torch.LongTensor(idxs)
return Variable(tensor)
def log_sum_exp(vec):
# vec 2D: 1 * tagset_size
max_score = vec[0, argmax(vec)]
max_score_broadcast = max_score.view(1, -1).expand(1, vec.size()[1])
return max_score + torch.log(torch.sum(torch.exp(vec - max_score_broadcast)))
class Neural_CRF_AE(nn.Module):
def __init__(self, vocab_size, tag_to_ix, embedding_dim, hidden_dim, char_lstm_dim=parameters['char_lstm_dim'],
char_to_ix=None, pre_word_embeds=None, char_embedding_dim=parameters['char_dim'], use_gpu=False,
n_cap=None, cap_embedding_dim=None, use_crf=True, char_mode='CNN',
features_dim = parameters["features_dim"], gazetter_dim = parameters["gazetter_dim"],
gazetteer_lambda = parameters["gazetteer_lambda"], pos_lambda = parameters["pos_lambda"],
wordshape_lambda = parameters["wordshape_lambda"]):
super(Neural_CRF_AE, self).__init__()
self.use_gpu = use_gpu
self.embedding_dim = embedding_dim
self.hidden_dim = hidden_dim
self.vocab_size = vocab_size
self.tag_to_ix = tag_to_ix
self.n_cap = n_cap
self.cap_embedding_dim = cap_embedding_dim
self.use_crf = use_crf
self.tagset_size = len(tag_to_ix)
self.out_channels = char_lstm_dim
self.char_mode = char_mode
self.hidden2gazetteer = nn.Linear(hidden_dim*2, gazetter_dim)
# self.hidden2deps = nn.Linear(hidden_dim*2, 45)
self.hidden2pos = nn.Linear(hidden_dim*2, 45)
self.hidden2shape = nn.Linear(hidden_dim*2, 151)
init_linear(self.hidden2gazetteer)
# init_linear(self.hidden2deps)
init_linear(self.hidden2pos)
init_linear(self.hidden2shape)
self.pos_lambda = pos_lambda
self.wordshape_lambda = wordshape_lambda
self.gazetteer_lambda = gazetteer_lambda
if char_embedding_dim is not None:
self.char_lstm_dim = char_lstm_dim
self.char_embeds = nn.Embedding(len(char_to_ix), char_embedding_dim)
init_embedding(self.char_embeds.weight)
self.char_cnn3 = nn.Conv2d(in_channels=1, out_channels=self.out_channels, kernel_size=(3, char_embedding_dim), padding=(2,0))
self.word_embeds = nn.Embedding(vocab_size, embedding_dim)
if pre_word_embeds is not None:
self.pre_word_embeds = True
self.word_embeds.weight = nn.Parameter(torch.FloatTensor(pre_word_embeds))
else:
self.pre_word_embeds = False
self.dropout = nn.Dropout(0.5)
self.lstm = nn.LSTM(embedding_dim+self.out_channels+features_dim+gazetter_dim, hidden_dim, bidirectional=True)
init_lstm(self.lstm)
self.hw_trans = nn.Linear(self.out_channels, self.out_channels)
self.hw_gate = nn.Linear(self.out_channels, self.out_channels)
self.h2_h1 = nn.Linear(hidden_dim*2, hidden_dim)
self.tanh = nn.Tanh()
self.hidden2tag = nn.Linear(hidden_dim*2, self.tagset_size)
init_linear(self.h2_h1)
init_linear(self.hidden2tag)
init_linear(self.hw_gate)
init_linear(self.hw_trans)
self.transitions = nn.Parameter(torch.zeros(self.tagset_size, self.tagset_size))
self.transitions.data[tag_to_ix[START_TAG], :] = -10000
self.transitions.data[:, tag_to_ix[STOP_TAG]] = -10000
def _score_sentence(self, feats, tags):
# tags is ground_truth, a list of ints, length is len(sentence)
# feats is a 2D tensor, len(sentence) * tagset_size
r = torch.LongTensor(range(feats.size()[0]))
if self.use_gpu:
r = r.cuda()
pad_start_tags = torch.cat([torch.cuda.LongTensor([self.tag_to_ix[START_TAG]]), tags])
pad_stop_tags = torch.cat([tags, torch.cuda.LongTensor([self.tag_to_ix[STOP_TAG]])])
else:
pad_start_tags = torch.cat([torch.LongTensor([self.tag_to_ix[START_TAG]]), tags])
pad_stop_tags = torch.cat([tags, torch.LongTensor([self.tag_to_ix[STOP_TAG]])])
score = torch.sum(self.transitions[pad_stop_tags, pad_start_tags]) + torch.sum(feats[r, tags])
return score
def _get_lstm_features(self, sentence, chars2, caps, chars2_length, d, feature, gazetteer):
chars_embeds = self.char_embeds(chars2).unsqueeze(1)
chars_cnn_out3 = nn.functional.relu(self.char_cnn3(chars_embeds))
chars_embeds = nn.functional.max_pool2d(chars_cnn_out3,kernel_size=(chars_cnn_out3.size(2), 1)).view(chars_cnn_out3.size(0), self.out_channels)
embeds = self.word_embeds(sentence)
embeds = torch.cat((embeds, chars_embeds, feature, gazetteer), 1)
embeds = embeds.unsqueeze(1)
embeds = self.dropout(embeds)
lstm_out, _ = self.lstm(embeds)
lstm_out = lstm_out.view(len(sentence), self.hidden_dim*2)
lstm_out = self.dropout(lstm_out)
gaze_feat = self.hidden2gazetteer(lstm_out)
lstm_feats = self.hidden2tag(lstm_out)
# deps_feats = self.hidden2deps(lstm_out)
shape_feats = self.hidden2shape(lstm_out)
pos_feats = self.hidden2pos(lstm_out)
return lstm_feats, gaze_feat, shape_feats, pos_feats
def _forward_alg(self, feats):
# calculate in log domain
# feats is len(sentence) * tagset_size
# initialize alpha with a Tensor with values all equal to -10000.
init_alphas = torch.Tensor(1, self.tagset_size).fill_(-10000.)
init_alphas[0][self.tag_to_ix[START_TAG]] = 0.
forward_var = autograd.Variable(init_alphas)
if self.use_gpu:
forward_var = forward_var.cuda()
for feat in feats:
emit_score = feat.view(-1, 1)
tag_var = forward_var + self.transitions + emit_score
max_tag_var, _ = torch.max(tag_var, dim=1)
tag_var = tag_var - max_tag_var.view(-1, 1)
forward_var = max_tag_var + torch.log(torch.sum(torch.exp(tag_var), dim=1)).view(1, -1) # ).view(1, -1)
terminal_var = (forward_var + self.transitions[self.tag_to_ix[STOP_TAG]]).view(1, -1)
alpha = log_sum_exp(terminal_var)
# Z(x)
return alpha
def viterbi_decode(self, feats):
backpointers = []
# analogous to forward
init_vvars = torch.Tensor(1, self.tagset_size).fill_(-10000.)
init_vvars[0][self.tag_to_ix[START_TAG]] = 0
forward_var = Variable(init_vvars)
if self.use_gpu:
forward_var = forward_var.cuda()
for feat in feats:
next_tag_var = forward_var.view(1, -1).expand(self.tagset_size, self.tagset_size) + self.transitions
_, bptrs_t = torch.max(next_tag_var, dim=1)
bptrs_t = bptrs_t.squeeze().data.cpu().numpy()
next_tag_var = next_tag_var.data.cpu().numpy()
viterbivars_t = next_tag_var[range(len(bptrs_t)), bptrs_t]
viterbivars_t = Variable(torch.FloatTensor(viterbivars_t))
if self.use_gpu:
viterbivars_t = viterbivars_t.cuda()
forward_var = viterbivars_t + feat
backpointers.append(bptrs_t)
terminal_var = forward_var + self.transitions[self.tag_to_ix[STOP_TAG]]
terminal_var.data[self.tag_to_ix[STOP_TAG]] = -10000.
terminal_var.data[self.tag_to_ix[START_TAG]] = -10000.
best_tag_id = argmax(terminal_var.unsqueeze(0))
path_score = terminal_var[best_tag_id]
best_path = [best_tag_id]
for bptrs_t in reversed(backpointers):
best_tag_id = bptrs_t[best_tag_id]
best_path.append(best_tag_id)
start = best_path.pop()
assert start == self.tag_to_ix[START_TAG]
best_path.reverse()
return path_score, best_path
def neg_log_likelihood(self, sentence, tags, chars2, caps, chars2_length, d, feature, gazetteer, gaze_targets, shape_label, pos_label):
# sentence, tags is a list of ints
# features is a 2D tensor, len(sentence) * self.tagset_size
feats, gaze, shapes, pos = self._get_lstm_features(sentence, chars2, caps, chars2_length, d, feature, gazetteer)
lst = [3, 0.5, 3]
if self.use_gpu:
cls_weights = torch.cuda.FloatTensor(lst)
else:
cls_weights = torch.FloatTensor(lst)
# hand_loss = self.feature_lambda * nn.functional.mse_loss(handcrafted, feature)
gaze_loss = self.gazetteer_lambda * nn.functional.cross_entropy(gaze, gaze_targets, weight=cls_weights)
# dep_loss = nn.functional.cross_entropy(deps, deps_label)
shape_loss = nn.functional.cross_entropy(shapes, shape_label)
pos_loss = nn.functional.cross_entropy(pos, pos_label)
if self.use_crf:
forward_score = self._forward_alg(feats)
gold_score = self._score_sentence(feats, tags)
return forward_score - gold_score + self.gazetteer_lambda*gaze_loss + self.wordshape_lambda*shape_loss + self.pos_lambda*pos_loss
else:
tags = Variable(tags)
scores = nn.functional.cross_entropy(feats, tags)
return scores + self.gazetteer_lambda*gaze_loss + self.wordshape_lambda*shape_loss + self.pos_lambda*pos_loss
def forward(self, sentence, chars, caps, chars2_length, d, feature, gazetteer):
feats, _, _, _ = self._get_lstm_features(sentence, chars, caps, chars2_length, d, feature, gazetteer)
# viterbi to get tag_seq
if self.use_crf:
score, tag_seq = self.viterbi_decode(feats)
else:
score, tag_seq = torch.max(feats, 1)
tag_seq = list(tag_seq.cpu().data)
return score, tag_seq
|
import os, sys, inspect
cmd_subfolder = os.path.realpath(os.path.abspath(os.path.join(os.path.split(inspect.getfile( inspect.currentframe() ))[0],"..")))
if cmd_subfolder not in sys.path:
sys.path.insert(0, cmd_subfolder)
|
#coding: UTF-8
'''Message is the carrier of a simple Pub/Sub system on top of ccnet'''
import datetime
import re
import uuid
import time
MESSAGE_PATTERN = re.compile(r'(?P<flags>[\d]+) (?P<from>[^ ]+) (?P<to>[^ ]+) (?P<id>[^ ]+) (?P<ctime>[^ ]+) (?P<rtime>[^ ]+) (?P<app>[^ ]+) (?P<body>.*)')
class Message(object):
def __init__(self, d):
self.flags = int(d['flags'])
self.from_ = d['from']
self.to = d['to']
self.id = d['id']
self.ctime = float(d['ctime'])
self.rtime = float(d['rtime'])
self.app = d['app']
self.body = d['body']
def message_from_string(s):
results = MESSAGE_PATTERN.match(s)
if results is None:
raise RuntimeError('Bad message: %s' % s)
d = results.groupdict()
return Message(d)
def gen_inner_message_string(self_id, app, content):
result = "%d %s %s %s %d %d %s %s\000" % (0, self_id, self_id, str(uuid.uuid1()),
int(time.time()), 0,
app, content)
return result
def message_to_string(msg):
f = '%(flags)s %(from_)s %(to)s %(id)s %(ctime)s %(rtime)s %(app)s %(body)s'
return f % dict(flags=msg.flags,
from_=msg.from_,
to=msg.to,
id=msg.id,
ctime=msg.ctime,
rtime=msg.rtime,
app=msg.app,
body=msg.body)
|
#!/usr/bin/env python
# coding: utf-8
# # Generate Long Audio Sample from trained model
# ## Boilerplate
# In[1]:
import os, sys
root_dir, _ = os.path.split(os.getcwd())
script_dir = os.path.join(root_dir, 'scripts')
sys.path.append(script_dir)
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
# In[2]:
import tensorflow as tf
print("GPU Available: ", tf.test.is_gpu_available())
# In[3]:
tf.keras.backend.clear_session()
# tf.keras.backend.set_floatx('float16')
# tf.debugging.set_log_device_placement(True)
gpus = tf.config.experimental.list_physical_devices('GPU')
# In[4]:
from hparams import hparams
from waveglow_model import WaveGlow
import training_utils as utils
import random
import pathlib
import pandas as pd
import numpy as np
import IPython.display as ipd
# In[19]:
show_audio = False
save_audio = True
# ## Load Long Audio Dataset
# In[5]:
test_dataset = utils.load_single_file_tfrecords(
record_file=os.path.join(hparams['tfrecords_dir'], hparams['test_file']))
test_dataset = test_dataset.batch(
hparams['train_batch_size'])
# ## Load long samples
# In[6]:
data_root_orig = tf.keras.utils.get_file(origin='https://data.keithito.com/data/speech/LJSpeech-1.1.tar.bz2',
fname='LJSpeech-1.1', untar=True, cache_dir=hparams['data_dir'])
data_root = pathlib.Path(data_root_orig)
# In[7]:
# data_root = pathlib.Path(hparams['data_dir'])
all_sound_paths = list(data_root.glob('*/*'))
all_sound_paths = [str(path) for path in all_sound_paths]
random.seed(a=1234)
random.shuffle(all_sound_paths)
# ## Load preprocessed long audio split mel spectrograms
# In[8]:
long_audio_record_file = os.path.join(hparams['tfrecords_dir'], hparams['long_audio_file'])
long_audio_dataset = utils.load_long_audio_tfrecords(long_audio_record_file).batch(hparams['train_batch_size'])
# ## Instantiate model
# In[9]:
myWaveGlow = WaveGlow(hparams=hparams, name='myWaveGlow')
optimizer = utils.get_optimizer(hparams=hparams)
# ## Model Checkpoints : Initialise or Restore
# In[10]:
checkpoint = tf.train.Checkpoint(step=tf.Variable(0),
optimizer=optimizer,
net=myWaveGlow)
manager_checkpoint = tf.train.CheckpointManager(
checkpoint,
directory=hparams['checkpoint_dir'],
max_to_keep=hparams['max_to_keep'])
checkpoint.restore(manager_checkpoint.latest_checkpoint)
if manager_checkpoint.latest_checkpoint:
tf.print('Restored from {checkpoint_dir}'.format(**hparams))
else:
raise ValueError('Fetch a valid checkpoint!')
# In[11]:
batched_long_audios = []
for x_train in long_audio_dataset:
batched_long_audios.append(myWaveGlow.infer(x_train['mel']))
# In[17]:
audios = dict()
originals = dict()
for x_train, samples in zip(long_audio_dataset, batched_long_audios):
splits = tf.unique_with_counts(x_train['path'])
long_audios = [audio for audio in tf.split(samples, splits.count)]
for index, path in enumerate(splits.y.numpy()):
if path.decode('utf-8') in audios.keys():
audios[path.decode('utf-8')] = tf.concat([audios[path.decode('utf-8')], tf.reshape(long_audios[index], [-1])], axis=0)
else:
audios[path.decode('utf-8')] = tf.reshape(long_audios[index], [-1])
signal = tf.io.read_file(path)
original = np.squeeze(tf.audio.decode_wav(signal).audio.numpy())
originals[path.decode('utf-8')] = original
# In[20]:
if show_audio:
for original, audio in zip(originals.values(), audios.values()):
print('original')
ipd.display(ipd.Audio(original[:audio.shape[0]], rate=hparams['sample_rate']))
print('generated')
ipd.display(ipd.Audio(audio, rate=hparams['sample_rate']))
# In[23]:
if save_audio:
for (path, original), audio in zip(originals.items(), audios.values()):
print(path)
_ , name = os.path.split(path)
original_wav = tf.audio.encode_wav(tf.expand_dims(original[:audio.shape[0]], axis=1), sample_rate=hparams['sample_rate'])
tf.io.write_file(filename=os.path.join(os.getcwd(), '..', 'data', 'audio_samples', 'original_' + name), contents=original_wav)
audio_wav = tf.audio.encode_wav(tf.expand_dims(audio, axis=1), sample_rate=hparams['sample_rate'])
tf.io.write_file(filename=os.path.join(os.getcwd(), '..', 'data', 'audio_samples', 'generated_' + name), contents=audio_wav)
# In[ ]:
|
import random
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision.models import resnet50, resnet18
class ReorderResNet(nn.Module):
def __init__(self, resnet_type='resnet50', pretrained=True):
super(ReorderResNet, self).__init__()
self.resnet_type = resnet_type
self.pretrained = pretrained
if self.resnet_type == 'resnet50':
resnet = resnet50(self.pretrained)
self.resnet_out_channels = 2048
else:
resnet = resnet18(self.pretrained)
self.resnet_out_channels = 512
self.res = nn.Sequential(*list(resnet.children())[:-1])
self.predictor = nn.Linear(self.resnet_out_channels * 3, 1)
def get_feature(self, x):
x = self.res(x)
x = x.view(-1, self.resnet_out_channels)
return x
def get_ordering(self, x):
x = self.predictor(x)
return x
def get_reorder_loss(self, images, device, args, labels):
images_shape = images.shape
batch_size, num_images = images.shape[:2]
# Change view so that we can put everything through the model at once.
images = images.view(batch_size * num_images, *images.shape[2:])
pose = self.get_feature(images)
pose = pose.view(batch_size, num_images, *pose.shape[1:])
# Get the ordering.
flattened = pose.view(batch_size, -1)
ordering = self.get_ordering(flattened).squeeze(-1)
loss_ordering = F.binary_cross_entropy_with_logits(ordering, labels)
predictions = torch.sigmoid(ordering) > 0.5
# DEBUG
# print('{:.3f}'.format(
# torch.sum(predictions.detach().cpu()).item() / len(predictions)))
accuracy = (predictions == labels).float().mean().item()
total_loss = sum([
args.lambda_ordering * loss_ordering,
# args.lambda_object_sim * loss_objects
])
stats = {
'accuracy': accuracy,
# 'objects_sim_loss': loss_objects.item(),
# 'presence_sparsity_loss': loss_sparsity.item(),
'ordering_loss': loss_ordering.item()
}
return total_loss, stats
|
# -*- coding: utf-8 -*-
"""Shared test cases."""
import os
from dfvfs.file_io import tsk_file_io
from dfvfs.file_io import tsk_partition_file_io
from dfvfs.lib import definitions
from dfvfs.lib import errors
from dfvfs.path import factory as path_spec_factory
from dfvfs.path import tsk_path_spec
from dfvfs.path import tsk_partition_path_spec
from dfvfs.resolver import context
from dfvfs.resolver import resolver
from tests import test_lib as shared_test_lib
class Ext2ImageFileTestCase(shared_test_lib.BaseTestCase):
"""Shared functionality for storage media image with an ext2 file system."""
_INODE_ANOTHER_FILE = 15
_INODE_PASSWORDS_TXT = 14
def setUp(self):
"""Sets up the needed objects used throughout the test."""
self._resolver_context = context.Context()
def tearDown(self):
"""Cleans up the needed objects used throughout the test."""
self._resolver_context.Empty()
def _TestOpenCloseInode(self, parent_path_spec):
"""Test the open and close functionality using an inode.
Args:
parent_path_spec (PathSpec): parent path specification.
"""
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.PREFERRED_EXT_BACK_END, inode=self._INODE_PASSWORDS_TXT,
parent=parent_path_spec)
file_object = resolver.Resolver.OpenFileObject(
path_spec, resolver_context=self._resolver_context)
self.assertEqual(file_object.get_size(), 116)
def _TestOpenCloseLocation(self, parent_path_spec):
"""Test the open and close functionality using a location.
Args:
parent_path_spec (PathSpec): parent path specification.
"""
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.PREFERRED_EXT_BACK_END, location='/passwords.txt',
parent=parent_path_spec)
file_object = resolver.Resolver.OpenFileObject(
path_spec, resolver_context=self._resolver_context)
self.assertEqual(file_object.get_size(), 116)
def _TestSeek(self, parent_path_spec):
"""Test the seek functionality.
Args:
parent_path_spec (PathSpec): parent path specification.
"""
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.PREFERRED_EXT_BACK_END, inode=self._INODE_ANOTHER_FILE,
location='/a_directory/another_file', parent=parent_path_spec)
file_object = resolver.Resolver.OpenFileObject(
path_spec, resolver_context=self._resolver_context)
self.assertEqual(file_object.get_size(), 22)
file_object.seek(10)
self.assertEqual(file_object.read(5), b'other')
self.assertEqual(file_object.get_offset(), 15)
file_object.seek(-10, os.SEEK_END)
self.assertEqual(file_object.read(5), b'her f')
file_object.seek(2, os.SEEK_CUR)
self.assertEqual(file_object.read(2), b'e.')
# Conforming to the POSIX seek the offset can exceed the file size
# but reading will result in no data being returned.
file_object.seek(300, os.SEEK_SET)
self.assertEqual(file_object.get_offset(), 300)
self.assertEqual(file_object.read(2), b'')
with self.assertRaises(IOError):
file_object.seek(-10, os.SEEK_SET)
# On error the offset should not change.
self.assertEqual(file_object.get_offset(), 300)
with self.assertRaises(IOError):
file_object.seek(10, 5)
# On error the offset should not change.
self.assertEqual(file_object.get_offset(), 300)
def _TestRead(self, parent_path_spec):
"""Test the read functionality.
Args:
parent_path_spec (PathSpec): parent path specification.
"""
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.PREFERRED_EXT_BACK_END, inode=self._INODE_PASSWORDS_TXT,
location='/passwords.txt', parent=parent_path_spec)
file_object = resolver.Resolver.OpenFileObject(
path_spec, resolver_context=self._resolver_context)
read_buffer = file_object.read()
expected_buffer = (
b'place,user,password\n'
b'bank,joesmith,superrich\n'
b'alarm system,-,1234\n'
b'treasure chest,-,1111\n'
b'uber secret laire,admin,admin\n')
self.assertEqual(read_buffer, expected_buffer)
# TODO: add boundary scenarios.
class ImageFileTestCase(shared_test_lib.BaseTestCase):
"""The unit test case for storage media image based test data."""
_INODE_ANOTHER_FILE = 16
_INODE_PASSWORDS_TXT = 15
def setUp(self):
"""Sets up the needed objects used throughout the test."""
self._resolver_context = context.Context()
def tearDown(self):
"""Cleans up the needed objects used throughout the test."""
self._resolver_context.Empty()
def _TestOpenCloseInode(self, parent_path_spec):
"""Test the open and close functionality using an inode.
Args:
parent_path_spec (PathSpec): parent path specification.
"""
path_spec = tsk_path_spec.TSKPathSpec(
inode=self._INODE_PASSWORDS_TXT, parent=parent_path_spec)
file_object = tsk_file_io.TSKFile(self._resolver_context, path_spec)
file_object.Open()
self.assertEqual(file_object.get_size(), 116)
def _TestOpenCloseLocation(self, parent_path_spec):
"""Test the open and close functionality using a location.
Args:
parent_path_spec (PathSpec): parent path specification.
"""
path_spec = tsk_path_spec.TSKPathSpec(
location='/passwords.txt', parent=parent_path_spec)
file_object = tsk_file_io.TSKFile(self._resolver_context, path_spec)
file_object.Open()
self.assertEqual(file_object.get_size(), 116)
def _TestSeek(self, parent_path_spec):
"""Test the seek functionality.
Args:
parent_path_spec (PathSpec): parent path specification.
"""
path_spec = tsk_path_spec.TSKPathSpec(
inode=self._INODE_ANOTHER_FILE, location='/a_directory/another_file',
parent=parent_path_spec)
file_object = tsk_file_io.TSKFile(self._resolver_context, path_spec)
file_object.Open()
self.assertEqual(file_object.get_size(), 22)
file_object.seek(10)
self.assertEqual(file_object.read(5), b'other')
self.assertEqual(file_object.get_offset(), 15)
file_object.seek(-10, os.SEEK_END)
self.assertEqual(file_object.read(5), b'her f')
file_object.seek(2, os.SEEK_CUR)
self.assertEqual(file_object.read(2), b'e.')
# Conforming to the POSIX seek the offset can exceed the file size
# but reading will result in no data being returned.
file_object.seek(300, os.SEEK_SET)
self.assertEqual(file_object.get_offset(), 300)
self.assertEqual(file_object.read(2), b'')
with self.assertRaises(IOError):
file_object.seek(-10, os.SEEK_SET)
# On error the offset should not change.
self.assertEqual(file_object.get_offset(), 300)
with self.assertRaises(IOError):
file_object.seek(10, 5)
# On error the offset should not change.
self.assertEqual(file_object.get_offset(), 300)
def _TestRead(self, parent_path_spec):
"""Test the read functionality.
Args:
parent_path_spec (PathSpec): parent path specification.
"""
path_spec = tsk_path_spec.TSKPathSpec(
inode=self._INODE_PASSWORDS_TXT, location='/passwords.txt',
parent=parent_path_spec)
file_object = tsk_file_io.TSKFile(self._resolver_context, path_spec)
file_object.Open()
read_buffer = file_object.read()
expected_buffer = (
b'place,user,password\n'
b'bank,joesmith,superrich\n'
b'alarm system,-,1234\n'
b'treasure chest,-,1111\n'
b'uber secret laire,admin,admin\n')
self.assertEqual(read_buffer, expected_buffer)
# TODO: add boundary scenarios.
class NTFSImageFileTestCase(shared_test_lib.BaseTestCase):
"""Shared functionality for storage media image with a NTFS file system."""
_MFT_ENTRY_ANOTHER_FILE = 67
_MFT_ENTRY_PASSWORDS_TXT = 66
def setUp(self):
"""Sets up the needed objects used throughout the test."""
self._resolver_context = context.Context()
def tearDown(self):
"""Cleans up the needed objects used throughout the test."""
self._resolver_context.Empty()
def _TestOpenCloseMFTEntry(self, file_object):
"""Test the open and close functionality using a MFT entry.
Args:
file_object (FileIO): file-like object.
"""
file_object.Open()
self.assertEqual(file_object.get_size(), 116)
# TODO: add a failing scenario.
def _TestOpenCloseLocation(self, file_object):
"""Test the open and close functionality using a location.
Args:
file_object (FileIO): file-like object.
"""
file_object.Open()
self.assertEqual(file_object.get_size(), 116)
def _TestSeek(self, file_object):
"""Test the seek functionality.
Args:
file_object (FileIO): file-like object.
"""
file_object.Open()
self.assertEqual(file_object.get_size(), 22)
file_object.seek(10)
self.assertEqual(file_object.read(5), b'other')
self.assertEqual(file_object.get_offset(), 15)
file_object.seek(-10, os.SEEK_END)
self.assertEqual(file_object.read(5), b'her f')
file_object.seek(2, os.SEEK_CUR)
self.assertEqual(file_object.read(2), b'e.')
# Conforming to the POSIX seek the offset can exceed the file size
# but reading will result in no data being returned.
file_object.seek(300, os.SEEK_SET)
self.assertEqual(file_object.get_offset(), 300)
self.assertEqual(file_object.read(2), b'')
with self.assertRaises(IOError):
file_object.seek(-10, os.SEEK_SET)
# On error the offset should not change.
self.assertEqual(file_object.get_offset(), 300)
with self.assertRaises(IOError):
file_object.seek(10, 5)
# On error the offset should not change.
self.assertEqual(file_object.get_offset(), 300)
def _TestRead(self, file_object):
"""Test the read functionality.
Args:
file_object (FileIO): file-like object.
"""
file_object.Open()
read_buffer = file_object.read()
expected_buffer = (
b'place,user,password\n'
b'bank,joesmith,superrich\n'
b'alarm system,-,1234\n'
b'treasure chest,-,1111\n'
b'uber secret laire,admin,admin\n')
self.assertEqual(read_buffer, expected_buffer)
# TODO: add boundary scenarios.
def _TestReadADS(self, file_object):
"""Test the read functionality on an alternate data stream (ADS).
Args:
file_object (FileIO): file-like object.
"""
file_object.Open()
expected_buffer = (
b'\xf0\x12\x03\xf8\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00')
read_buffer = file_object.read(size=16)
self.assertEqual(read_buffer, expected_buffer)
file_object.seek(-8, os.SEEK_END)
expected_buffer = b'\x20\x00\x00\x00\x20\x02\x00\x00'
read_buffer = file_object.read(size=16)
self.assertEqual(read_buffer, expected_buffer)
class MBRPartitionedImageFileTestCase(shared_test_lib.BaseTestCase):
"""Tests for MBR partitioned storage media image based test data."""
# mmls test_data/mbr.raw
# DOS Partition Table
# Offset Sector: 0
# Units are in 512-byte sectors
#
# Slot Start End Length Description
# 000: Meta 0000000000 0000000000 0000000001 Primary Table (#0)
# 001: ------- 0000000000 0000000000 0000000001 Unallocated
# 002: 000:000 0000000001 0000000129 0000000129 Linux (0x83)
# 003: Meta 0000000130 0000008191 0000008062 DOS Extended (0x05)
# 004: Meta 0000000130 0000000130 0000000001 Extended Table (#1)
# 005: ------- 0000000130 0000000130 0000000001 Unallocated
# 006: 001:000 0000000131 0000000259 0000000129 Linux (0x83)
# 007: ------- 0000000260 0000008191 0000007932 Unallocated
_BYTES_PER_SECTOR = 512
_OFFSET_P1 = 1 * _BYTES_PER_SECTOR
_SIZE_P1 = 129 * _BYTES_PER_SECTOR
_OFFSET_P2 = 131 * _BYTES_PER_SECTOR
_SIZE_P2 = 129 * _BYTES_PER_SECTOR
def setUp(self):
"""Sets up the needed objects used throughout the test."""
self._resolver_context = context.Context()
def tearDown(self):
"""Cleans up the needed objects used throughout the test."""
self._resolver_context.Empty()
def _TestOpenClose(self, parent_path_spec):
"""Test the open and close functionality.
Args:
parent_path_spec (PathSpec): parent path specification.
"""
path_spec = tsk_partition_path_spec.TSKPartitionPathSpec(
part_index=2, parent=parent_path_spec)
file_object = tsk_partition_file_io.TSKPartitionFile(
self._resolver_context, path_spec)
file_object.Open()
self.assertEqual(file_object.get_size(), self._SIZE_P1)
path_spec = tsk_partition_path_spec.TSKPartitionPathSpec(
part_index=13, parent=parent_path_spec)
file_object = tsk_partition_file_io.TSKPartitionFile(
self._resolver_context, path_spec)
with self.assertRaises(errors.PathSpecError):
file_object.Open()
path_spec = tsk_partition_path_spec.TSKPartitionPathSpec(
location='/p2', parent=parent_path_spec)
file_object = tsk_partition_file_io.TSKPartitionFile(
self._resolver_context, path_spec)
file_object.Open()
self.assertEqual(file_object.get_size(), self._SIZE_P2)
path_spec = tsk_partition_path_spec.TSKPartitionPathSpec(
location='/p0', parent=parent_path_spec)
file_object = tsk_partition_file_io.TSKPartitionFile(
self._resolver_context, path_spec)
with self.assertRaises(errors.PathSpecError):
file_object.Open()
path_spec = tsk_partition_path_spec.TSKPartitionPathSpec(
location='/p3', parent=parent_path_spec)
file_object = tsk_partition_file_io.TSKPartitionFile(
self._resolver_context, path_spec)
with self.assertRaises(errors.PathSpecError):
file_object.Open()
path_spec = tsk_partition_path_spec.TSKPartitionPathSpec(
start_offset=self._OFFSET_P2, parent=parent_path_spec)
file_object = tsk_partition_file_io.TSKPartitionFile(
self._resolver_context, path_spec)
file_object.Open()
self.assertEqual(file_object.get_size(), self._SIZE_P2)
path_spec = tsk_partition_path_spec.TSKPartitionPathSpec(
start_offset=self._SIZE_P1, parent=parent_path_spec)
file_object = tsk_partition_file_io.TSKPartitionFile(
self._resolver_context, path_spec)
with self.assertRaises(errors.PathSpecError):
file_object.Open()
def _TestSeek(self, parent_path_spec):
"""Test the seek functionality.
Args:
parent_path_spec (PathSpec): parent path specification.
"""
path_spec = tsk_partition_path_spec.TSKPartitionPathSpec(
part_index=6, parent=parent_path_spec)
file_object = tsk_partition_file_io.TSKPartitionFile(
self._resolver_context, path_spec)
file_object.Open()
self.assertEqual(file_object.get_size(), self._SIZE_P2)
file_object.seek(4128)
self.assertEqual(file_object.get_offset(), 0x11620 - self._OFFSET_P2)
self.assertEqual(
file_object.read(16), b'lost+found\x00\x00\x0c\x00\x00\x00')
self.assertEqual(file_object.get_offset(), 0x11630 - self._OFFSET_P2)
file_object.seek(-28156, os.SEEK_END)
self.assertEqual(file_object.get_offset(), 0x19a04 - self._OFFSET_P2)
data = file_object.read(8)
self.assertEqual(data, b' is a te')
self.assertEqual(file_object.get_offset(), 0x19a0c - self._OFFSET_P2)
file_object.seek(4, os.SEEK_CUR)
self.assertEqual(file_object.get_offset(), 0x19a10 - self._OFFSET_P2)
data = file_object.read(7)
self.assertEqual(data, b'ile.\n\nW')
self.assertEqual(file_object.get_offset(), 0x19a17 - self._OFFSET_P2)
# Conforming to the POSIX seek the offset can exceed the file size
# but reading will result in no data being returned.
expected_offset = self._SIZE_P2 + 100
file_object.seek(expected_offset, os.SEEK_SET)
self.assertEqual(file_object.get_offset(), expected_offset)
self.assertEqual(file_object.read(20), b'')
with self.assertRaises(IOError):
file_object.seek(-10, os.SEEK_SET)
# On error the offset should not change.
self.assertEqual(file_object.get_offset(), expected_offset)
with self.assertRaises(IOError):
file_object.seek(10, 5)
# On error the offset should not change.
self.assertEqual(file_object.get_offset(), expected_offset)
def _TestRead(self, parent_path_spec):
"""Test the read functionality.
Args:
parent_path_spec (PathSpec): parent path specification.
"""
path_spec = tsk_partition_path_spec.TSKPartitionPathSpec(
part_index=6, parent=parent_path_spec)
file_object = tsk_partition_file_io.TSKPartitionFile(
self._resolver_context, path_spec)
file_object.Open()
self.assertEqual(file_object.get_size(), self._SIZE_P2)
file_object.seek(0x19e00 - self._OFFSET_P2)
data = file_object.read(32)
self.assertEqual(data, b'place,user,password\nbank,joesmit')
class SylogTestCase(shared_test_lib.BaseTestCase):
"""The unit test case for the syslog test data."""
def _TestGetSizeFileObject(self, file_object):
"""Runs the get size tests on the file-like object.
Args:
file_object (file): file-like object with the test data.
"""
self.assertEqual(file_object.get_size(), 1247)
def _TestReadFileObject(self, file_object, base_offset=167):
"""Runs the read tests on the file-like object.
Args:
file_object (file): file-like object with the test data.
base_offset (Optional[int]): base offset use in the tests.
"""
file_object.seek(base_offset, os.SEEK_SET)
self.assertEqual(file_object.get_offset(), base_offset)
expected_buffer = (
b'Jan 22 07:53:01 myhostname.myhost.com CRON[31051]: (root) CMD '
b'(touch /var/run/crond.somecheck)\n')
read_buffer = file_object.read(95)
self.assertEqual(read_buffer, expected_buffer)
expected_offset = base_offset + 95
self.assertEqual(file_object.get_offset(), expected_offset)
def _TestSeekFileObject(self, file_object, base_offset=167):
"""Runs the seek tests on the file-like object.
Args:
file_object (file): file-like object with the test data.
base_offset (Optional[int]): base offset use in the tests.
"""
file_object.seek(base_offset + 10)
self.assertEqual(file_object.read(5), b'53:01')
expected_offset = base_offset + 15
self.assertEqual(file_object.get_offset(), expected_offset)
file_object.seek(-10, os.SEEK_END)
self.assertEqual(file_object.read(5), b'times')
file_object.seek(2, os.SEEK_CUR)
self.assertEqual(file_object.read(2), b'--')
# Conforming to the POSIX seek the offset can exceed the file size
# but reading will result in no data being returned.
file_object.seek(2000, os.SEEK_SET)
self.assertEqual(file_object.get_offset(), 2000)
self.assertEqual(file_object.read(2), b'')
# Test with an invalid offset.
with self.assertRaises(IOError):
file_object.seek(-10, os.SEEK_SET)
# On error the offset should not change.
self.assertEqual(file_object.get_offset(), 2000)
# Test with an invalid whence.
with self.assertRaises(IOError):
file_object.seek(10, 5)
# On error the offset should not change.
self.assertEqual(file_object.get_offset(), 2000)
class PaddedSyslogTestCase(SylogTestCase):
"""The unit test case for padded syslog test data.
The syslog test data is padded with '=' characters.
"""
def setUp(self):
"""Sets up the needed objects used throughout the test."""
self.padding_size = 0
def _TestGetSizeFileObject(self, file_object):
"""Runs the get size tests on the file-like object.
Args:
file_object (file): file-like object with the test data.
"""
self.assertEqual(file_object.get_size(), 1247 + self.padding_size)
def _TestSeekFileObject(self, file_object, base_offset=167):
"""Runs the seek tests on the file-like object.
Args:
file_object (file): file-like object with the test data.
base_offset (Optional[int]): base offset use in the tests.
"""
file_object.seek(base_offset + 10)
self.assertEqual(file_object.read(5), b'53:01')
expected_offset = base_offset + 15
self.assertEqual(file_object.get_offset(), expected_offset)
file_object.seek(-10 - self.padding_size, os.SEEK_END)
self.assertEqual(file_object.read(5), b'times')
file_object.seek(2, os.SEEK_CUR)
self.assertEqual(file_object.read(2), b'--')
# Conforming to the POSIX seek the offset can exceed the file size
# but reading will result in no data being returned.
file_object.seek(2000, os.SEEK_SET)
self.assertEqual(file_object.get_offset(), 2000)
self.assertEqual(file_object.read(2), b'')
# Test with an invalid offset.
with self.assertRaises(IOError):
file_object.seek(-10, os.SEEK_SET)
# On error the offset should not change.
self.assertEqual(file_object.get_offset(), 2000)
# Test with an invalid whence.
with self.assertRaises(IOError):
file_object.seek(10, 5)
# On error the offset should not change.
self.assertEqual(file_object.get_offset(), 2000)
class WindowsFATImageFileTestCase(shared_test_lib.BaseTestCase):
"""Shared functionality for storage media image with a FAT file system."""
_INODE_ANOTHER_FILE = 615
_INODE_PASSWORDS_TXT = 10
def setUp(self):
"""Sets up the needed objects used throughout the test."""
self._resolver_context = context.Context()
def tearDown(self):
"""Cleans up the needed objects used throughout the test."""
self._resolver_context.Empty()
def _TestOpenCloseMFTEntry(self, parent_path_spec):
"""Test the open and close functionality using a MFT entry.
Args:
parent_path_spec (PathSpec): parent path specification.
"""
path_spec = tsk_path_spec.TSKPathSpec(
inode=self._INODE_PASSWORDS_TXT, parent=parent_path_spec)
file_object = tsk_file_io.TSKFile(self._resolver_context, path_spec)
file_object.Open()
self.assertEqual(file_object.get_size(), 126)
# TODO: add a failing scenario.
def _TestOpenCloseLocation(self, parent_path_spec):
"""Test the open and close functionality using a location.
Args:
parent_path_spec (PathSpec): parent path specification.
"""
path_spec = tsk_path_spec.TSKPathSpec(
location='/passwords.txt', parent=parent_path_spec)
file_object = tsk_file_io.TSKFile(self._resolver_context, path_spec)
file_object.Open()
self.assertEqual(file_object.get_size(), 126)
def _TestSeek(self, parent_path_spec):
"""Test the seek functionality.
Args:
parent_path_spec (PathSpec): parent path specification.
"""
path_spec = tsk_path_spec.TSKPathSpec(
inode=self._INODE_ANOTHER_FILE, location='/a_directory/another_file',
parent=parent_path_spec)
file_object = tsk_file_io.TSKFile(self._resolver_context, path_spec)
file_object.Open()
self.assertEqual(file_object.get_size(), 24)
file_object.seek(10)
self.assertEqual(file_object.read(5), b'other')
self.assertEqual(file_object.get_offset(), 15)
file_object.seek(-12, os.SEEK_END)
self.assertEqual(file_object.read(5), b'her f')
file_object.seek(2, os.SEEK_CUR)
self.assertEqual(file_object.read(2), b'e.')
# Conforming to the POSIX seek the offset can exceed the file size
# but reading will result in no data being returned.
file_object.seek(300, os.SEEK_SET)
self.assertEqual(file_object.get_offset(), 300)
self.assertEqual(file_object.read(2), b'')
with self.assertRaises(IOError):
file_object.seek(-10, os.SEEK_SET)
# On error the offset should not change.
self.assertEqual(file_object.get_offset(), 300)
with self.assertRaises(IOError):
file_object.seek(10, 5)
# On error the offset should not change.
self.assertEqual(file_object.get_offset(), 300)
def _TestRead(self, parent_path_spec):
"""Test the read functionality.
Args:
parent_path_spec (PathSpec): parent path specification.
"""
path_spec = tsk_path_spec.TSKPathSpec(
inode=self._INODE_PASSWORDS_TXT, location='/passwords.txt',
parent=parent_path_spec)
file_object = tsk_file_io.TSKFile(self._resolver_context, path_spec)
file_object.Open()
read_buffer = file_object.read()
expected_buffer = (
b'place,user,password \r\n'
b'bank,joesmith,superrich \r\n'
b'alarm system,-,1234 \r\n'
b'treasure chest,-,1111 \r\n'
b'uber secret laire,admin,admin \r\n')
self.assertEqual(read_buffer, expected_buffer)
# TODO: add boundary scenarios.
class WindowsNTFSImageFileTestCase(shared_test_lib.BaseTestCase):
"""Shared functionality for storage media image with a NTFS file system."""
_MFT_ENTRY_ANOTHER_FILE = 36
_MFT_ENTRY_PASSWORDS_TXT = 35
def setUp(self):
"""Sets up the needed objects used throughout the test."""
self._resolver_context = context.Context()
def tearDown(self):
"""Cleans up the needed objects used throughout the test."""
self._resolver_context.Empty()
def _TestOpenCloseMFTEntry(self, parent_path_spec):
"""Test the open and close functionality using a MFT entry.
Args:
parent_path_spec (PathSpec): parent path specification.
"""
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.PREFERRED_NTFS_BACK_END, mft_attribute=1,
mft_entry=self._MFT_ENTRY_PASSWORDS_TXT, parent=parent_path_spec)
file_object = resolver.Resolver.OpenFileObject(
path_spec, resolver_context=self._resolver_context)
self.assertEqual(file_object.get_size(), 126)
# TODO: add a failing scenario.
def _TestOpenCloseLocation(self, parent_path_spec):
"""Test the open and close functionality using a location.
Args:
parent_path_spec (PathSpec): parent path specification.
"""
if definitions.PREFERRED_NTFS_BACK_END == definitions.TYPE_INDICATOR_TSK:
location = '/passwords.txt'
else:
location = '\\passwords.txt'
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.PREFERRED_NTFS_BACK_END, location=location,
parent=parent_path_spec)
file_object = resolver.Resolver.OpenFileObject(
path_spec, resolver_context=self._resolver_context)
self.assertEqual(file_object.get_size(), 126)
# Try open with a path specification that has no parent.
path_spec.parent = None
with self.assertRaises(errors.PathSpecError):
resolver.Resolver.OpenFileObject(
path_spec, resolver_context=self._resolver_context)
def _TestSeek(self, parent_path_spec):
"""Test the seek functionality.
Args:
parent_path_spec (PathSpec): parent path specification.
"""
if definitions.PREFERRED_NTFS_BACK_END == definitions.TYPE_INDICATOR_TSK:
location = '/a_directory/another_file'
else:
location = '\\a_directory\\another_file'
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.PREFERRED_NTFS_BACK_END, location=location,
mft_attribute=2, mft_entry=self._MFT_ENTRY_ANOTHER_FILE,
parent=parent_path_spec)
file_object = resolver.Resolver.OpenFileObject(
path_spec, resolver_context=self._resolver_context)
self.assertEqual(file_object.get_size(), 24)
file_object.seek(10)
self.assertEqual(file_object.read(5), b'other')
self.assertEqual(file_object.get_offset(), 15)
file_object.seek(-12, os.SEEK_END)
self.assertEqual(file_object.read(5), b'her f')
file_object.seek(2, os.SEEK_CUR)
self.assertEqual(file_object.read(2), b'e.')
# Conforming to the POSIX seek the offset can exceed the file size
# but reading will result in no data being returned.
file_object.seek(300, os.SEEK_SET)
self.assertEqual(file_object.get_offset(), 300)
self.assertEqual(file_object.read(2), b'')
with self.assertRaises(IOError):
file_object.seek(-10, os.SEEK_SET)
# On error the offset should not change.
self.assertEqual(file_object.get_offset(), 300)
with self.assertRaises(IOError):
file_object.seek(10, 5)
# On error the offset should not change.
self.assertEqual(file_object.get_offset(), 300)
def _TestRead(self, parent_path_spec):
"""Test the read functionality.
Args:
parent_path_spec (PathSpec): parent path specification.
"""
if definitions.PREFERRED_NTFS_BACK_END == definitions.TYPE_INDICATOR_TSK:
location = '/passwords.txt'
else:
location = '\\passwords.txt'
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.PREFERRED_NTFS_BACK_END, location=location,
mft_attribute=2, mft_entry=self._MFT_ENTRY_PASSWORDS_TXT,
parent=parent_path_spec)
file_object = resolver.Resolver.OpenFileObject(
path_spec, resolver_context=self._resolver_context)
read_buffer = file_object.read()
expected_buffer = (
b'place,user,password \r\n'
b'bank,joesmith,superrich \r\n'
b'alarm system,-,1234 \r\n'
b'treasure chest,-,1111 \r\n'
b'uber secret laire,admin,admin \r\n')
self.assertEqual(read_buffer, expected_buffer)
# TODO: add boundary scenarios.
|
from flask import Flask, jsonify, request, Response
from functools import wraps
from werkzeug.routing import Rule
from optparse import OptionParser
from pprint import pprint
import time
VERBOSE = 'verbose'
BASIC_AUTH = 'basic_auth'
AUTH_USERNAME = 'auth_username'
AUTH_PASSWORD = 'auth_password'
config = {
BASIC_AUTH: False,
VERBOSE: False
}
app = Flask(__name__)
app.url_map.add(Rule('/', defaults={'path' : ''}, endpoint='index'))
app.url_map.add(Rule('/<path:path>', endpoint='index'))
def validate_status_code(status_code):
if status_code < 600:
return True
return False
def extract(d):
return {key: value for (key, value) in d.items()}
def check_auth(username, password):
if AUTH_USERNAME not in config or AUTH_PASSWORD not in config:
return False
return username == config[AUTH_USERNAME] and password == config[AUTH_PASSWORD]
def authenticate():
return Response(
'Could not verify your access level for that URL.\n'
'You have to login with proper credentials', 401,
{'WWW-Authenticate': 'Basic realm="Login Required"'})
def requires_auth(f):
@wraps(f)
def decorated(*args, **kwargs):
if not config[BASIC_AUTH]:
return f(*args, **kwargs)
auth = request.authorization
if not auth or not check_auth(auth.username, auth.password):
return authenticate()
return f(*args, **kwargs)
return decorated
@app.endpoint('index')
@requires_auth
def echo(path):
status_code = request.args.get('status') or 200
status_code = int(status_code)
if not validate_status_code(status_code):
status_code = 200
data = {
'success' : True,
'status' : status_code,
'time' : time.time(),
'path' : request.path,
'script_root' : request.script_root,
'url' : request.url,
'base_url' : request.base_url,
'url_root' : request.url_root,
'method' : request.method,
'headers' : extract(request.headers),
'data' : request.data.decode(encoding='UTF-8'),
'host' : request.host,
'args' : extract(request.args),
'form' : extract(request.form),
'json' : request.json,
'cookies' : extract(request.cookies)
}
if config[VERBOSE]:
pprint(data)
response = jsonify(data)
response.status_code = status_code
print(extract(request.form))
return response
def main():
parser = OptionParser()
parser.add_option('--port', dest='port', default=8888, help='port to run server on - default 5000')
parser.add_option('--auth', dest='auth', help='basic authentication credentials, should be passed in like "username:password"')
parser.add_option('-v', '--verbose', dest='verbose',
default=False, action='store_true', help='increased verbosity - outputs response to console')
parser.add_option('--debug', dest='debug',
default=False, action='store_true', help='enable debug mode in flask')
(options, args) = parser.parse_args()
config[VERBOSE] = options.verbose
if options.auth:
username, password = options.auth.split(':')
if username is None or password is None:
parser.error('Invalid auth credentials {0}'.format(options.auth))
config[BASIC_AUTH] = True
config[AUTH_USERNAME] = username
config[AUTH_PASSWORD] = password
app.debug = options.debug
app.run(port=int(options.port))
if __name__ == '__main__':
main()
|
'''
***************************************************************************
* (c) Andrew Robinson (andrew.robinson@latrobe.edu.au) 2013 *
* La Trobe University & *
* Life Sciences Computation Centre (LSCC, part of VLSCI) *
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU Library General Public License (LGPL) *
* as published by the Free Software Foundation; either version 2 of *
* the License, or (at your option) any later version. *
* *
* ScienceScripts is distributed in the hope that it will be useful, *
* but WITHOUT ANY WARRANTY; without even the implied warranty of *
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
* GNU Library General Public License for more details. *
* *
* You should have received a copy of the GNU Library General Public *
* License along with ScienceScripts; if not, write to the Free Software *
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
* USA *
* *
***************************************************************************
Created on 19/09/2013
@author: arobinson
'''
import sys, getopt
from Bio import SeqIO
from BCBio import GFF
def main(argv):
'''For each subject feature find the nearest neighbour sequence in each direction.
NOTE: it takes into account the strand of the subject so that its 3' and 5'
neighbours are reported correctly'''
neighbourGffFilename = None
subjectGffFilename = None
subjectFeatureNames = 'gene'
neighbourFeatureNames = 'region'
verbose = False
titles = False
delimiter = "\t"
naValue = 'N/a'
cmdmsg = '''nearestfeat.py [-h] -S <subject.feature.names> -N <neighbour.feature.names> -s <subject.gff> -n <neighbours.gff>'''
helpmsg = '''For each subject feature find the nearest neighbour sequence in each direction.
%s
-h Print this help msg
-v Print a summary (counts) at end of processing
-t Print titles in first row
-S <str> Subject feature names, a comma separated list (no spaces), (default: gene)
-N <str> Neighbour feature names, a comma separated list (no spaces), (default: region)
-s <str> GFF file containing subject information.
-n <str> GFF file containing the Neighbour features.
''' % cmdmsg
# parse arguments
try:
opts, args = getopt.getopt(argv,"htvS:N:s:n:",[])
except getopt.GetoptError:
print cmdmsg
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print helpmsg
sys.exit()
elif opt == "-S":
subjectFeatureNames = arg
elif opt == "-N":
neighbourFeatureNames = arg
elif opt == "-s":
subjectGffFilename = arg
elif opt == "-n":
neighbourGffFilename = arg
elif opt == "-v":
verbose = True
elif opt == "-t":
titles = True
# compute extra options
subjectFeatureNamesList = subjectFeatureNames.split(',')
neighbourFeatureNamesList = neighbourFeatureNames.split(',')
counter = SafeCounter(['+', '-', '.', '?', 1, -1, 'ne'])
### (1) parse the neighbour gff file ###
limitInfo = {'gff_type': neighbourFeatureNamesList}
neighbourHandle = open(neighbourGffFilename, 'rU')
neighbourTags = []
neighbourTagLists = {}
for sequence in GFF.parse(neighbourHandle, limit_info=limitInfo):
# make a list of neighbour locations
# Note: this performs gene consolidation via a delayed appending loop
neighbourTags = []
lastNeighbour = None
for feature in sequence.features:
subject = (int(feature.location.start), int(feature.location.end))
# delayed processing to check for overlaps
if lastNeighbour: # true on 2+ iteration
if lastNeighbour[1] >= subject[0]: # overlap?
lastNeighbour = (lastNeighbour[0], subject[1])
else:
neighbourTags.append((lastNeighbour[0], True))
neighbourTags.append((lastNeighbour[1], False))
lastNeighbour = subject
else:
lastNeighbour = subject
counter.inc('ne')
# ^ next feature ^
# finish off the last neighbour (if exists)
if lastNeighbour:
neighbourTags.append((lastNeighbour[0], True))
neighbourTags.append((lastNeighbour[1], False))
neighbourTags.sort()
neighbourTagLists[sequence.id] = neighbourTags
# ^ next sequence ^
neighbourHandle.close()
# print column titles if requested
if titles:
print str(delimiter).join(['Feature id',
'Label',
"5' Distance",
"3' Distance",
'Strand',
])
### (2) process the subjects (1-by-1) ###
limitInfo = {'gff_type': subjectFeatureNamesList}
subjectHandle = open(subjectGffFilename, 'rU')
for sequence in GFF.parse(subjectHandle, limit_info=limitInfo):
try:
neighbourTags = neighbourTagLists[sequence.id]
except:
neighbourTags = []
for subject in sequence.features:
(int(subject.location.start), int(subject.location.end))
i = -1
## find first tag after subject end ##
endDist = naValue
for i in xrange(len(neighbourTags)):
neighbourTag = neighbourTags[i]
if neighbourTag[0] >= subject.location.end:
if neighbourTag[1]: # is start neighbour?
endDist = neighbourTag[0] - subject.location.end
else:
endDist = 0
break
# find the first tag before subject start
startDist = naValue
for i in xrange(i,-1,-1): # backwards from where first loop ended
neighbourTag = neighbourTags[i]
if neighbourTag[0] <= subject.location.start:
if neighbourTag[1]: # is start neighbour?
startDist = 0
else:
startDist = subject.location.start - neighbourTag[0]
break
counter.inc(subject.strand)
label = "%s-%s-%s" % (sequence.id, subject.location.start, subject.location.end)
fid = label
if 'ID' in subject.qualifiers:
fid = " & ".join(subject.qualifiers['ID'])
if subject.strand in ('+', '.', '?', 1):
print str(delimiter).join([
fid,
label,
str(startDist),
str(endDist),
"+",
])
elif subject.strand in ('-', -1):
print str(delimiter).join([
fid,
label,
str(endDist),
str(startDist),
"-",
])
elif verbose:
print "Unknown strand: %s" % subject.strand
# ^ next subject ^
# ^ next sequence ^
subjectHandle.close()
if verbose:
print ''
print '[Feature counts]'
print 'Total: %s' % (counter.total - counter.counters['ne'])
print '+ve: %s' % (counter.counters['+'] + counter.counters[1])
print '-ve: %s' % (counter.counters['-'] + counter.counters[-1])
print 'non-stranded: %s' % counter.counters['.']
print 'unknown: %s' % counter.counters['?']
print 'others: %s' % counter.others
print ''
print '[Neighbour counts]'
print 'Total: %s' % counter.counters['ne']
class SafeCounter(object):
''''''
def __init__(self, lists):
self.counters = {}
for val in lists:
self.counters[val] = 0
self.total = 0
self.others = 0
def inc(self, key):
try:
self.counters[key] += 1
except KeyError:
self.others += 1;
self.total += 1
## end SafeCounter
if __name__ == "__main__":
main(sys.argv[1:])
## EOF ##
|
from django.contrib import admin
from .models import ExampleSlackModel
class ExampleSlackAdmin(admin.ModelAdmin):
"""Simple admin view for looking over data"""
admin.site.register(ExampleSlackModel, ExampleSlackAdmin)
|
# -*- coding: utf-8 -*-
from guillotina import configure
from guillotina.behaviors.instance import AnnotationBehavior
from guillotina.behaviors.properties import FunctionProperty
from guillotina.interfaces import IResource
from guillotina.utils import get_authenticated_user_id
from guillotina_cms.interfaces import IFollowing
from guillotina_cms.interfaces import IFollowingMarker
@configure.behavior(
title="Following",
provides=IFollowing,
marker=IFollowingMarker,
for_=IResource)
class Following(AnnotationBehavior):
__local__properties__ = ('favorite',)
def get_favorite(self):
user = get_authenticated_user_id()
return user in (self.favorites or [])
def set_favorite(self, value):
pass
favorite = FunctionProperty(
'favorite', get_favorite, set_favorite)
|
from collections import namedtuple
from functools import partial
from typing import Any, Dict, Union
from quilldelta import utils as _
__all__ = ['Insert', 'Retain', 'Delete', 'OperationType',
'is_retain', 'is_insert', 'is_delete',
'it_insert_text', 'load_operation']
def _sum_operation(instance, other):
type_op = type(instance)
type_other = type(other)
if type(other) != type_op:
raise ValueError(f'Operations are not the same type '
f'{type_op.__name__} != {type_other}')
if hasattr(instance, 'attributes'):
instance_attr = instance.attributes if instance.attributes else None
other_attr = other.attributes if other.attributes else None
if instance_attr != other_attr:
raise ValueError("Can't sum operations with different attributes")
return type_op(instance.value + other.value, other_attr)
else:
return type_op(instance.value + other.value)
class Insert(namedtuple('Insert', 'value, attributes')):
__slots__ = ()
__str__ = _.instance_as_json
__add__ = _sum_operation
as_data = _.instance_as_dict
as_json = _.instance_as_json
@classmethod
def fromdict(cls, data):
data.setdefault('attributes', None)
return _.dict_to_class(cls, data)
@property
def length(self):
if isinstance(self.value, str):
return len(self.value)
return 1
class Retain(namedtuple('Retain', 'value, attributes')):
__slots__ = ()
__str__ = _.instance_as_json
__add__ = _sum_operation
as_data = _.instance_as_dict
as_json = _.instance_as_json
@classmethod
def fromdict(cls, data: dict):
data.setdefault('attributes', None)
return _.dict_to_class(cls, data)
@property
def length(self):
return self.value
@length.setter
def length(self, val: int):
assert isinstance(val, int)
self.value = val
class Delete(namedtuple('Delete', 'value')):
__slots__ = ()
__str__ = _.instance_as_json
__add__ = _sum_operation
as_data = _.instance_as_dict
as_json = _.instance_as_json
@classmethod
def fromdict(cls, data: dict):
return _.dict_to_class(cls, data)
@property
def length(self):
return self.value
@length.setter
def length(self, val: int):
assert isinstance(val, int)
self.value = val
OperationType = Union[Insert, Retain, Delete, Dict]
def load_operation(data: OperationType):
if isinstance(data, (Insert, Retain, Delete)):
return data
elif isinstance(data, Dict):
if 'insert' in data:
return Insert.fromdict(data)
elif 'retain' in data:
return Retain.fromdict(data)
elif 'delete' in data:
return Delete.fromdict(data)
raise ValueError('Unknown operation for %s' % data)
def _isinstance(op: Any, class_or_tuple):
return isinstance(op, class_or_tuple)
is_insert = partial(_isinstance, class_or_tuple=Insert)
is_retain = partial(_isinstance, class_or_tuple=Retain)
is_delete = partial(_isinstance, class_or_tuple=Delete)
def it_insert_text(op: Any):
return is_insert(op) and isinstance(op.value, str)
|
import binascii
import uuid
from base64 import b64decode
from datetime import datetime, timedelta
from urllib.parse import parse_qs, parse_qsl, urlencode, urlparse, urlunparse
from django.core.exceptions import ImproperlyConfigured
from django.db.models import F, Func, TextField
from rest_framework.pagination import BasePagination
from rest_framework.response import Response
from rest_framework.settings import api_settings
class ActivityCursorPagination(BasePagination):
"""
Cursor pagination for activities.
The activity stream service scrapes specified endpoints at regular intervals to get the
activity feed from various services. It scrapes all the pages and more frequently: the
last page only. If the last page has a "next" link, it scrapes that and updates the pointer
to the last page.
The default LIMIT-ORDER pagination gets slower as you progress through the pages so we
decided to use cursor pagination here because we needed to render the last page quite
frequently.
The built-in Django Rest Framework Cursor pagination is not used, since it
- has a lot of code that isn't applicable to this use case, which makes it tricky to extend or
debug, e.g. for peformance issues
- uses an almost-unique value + offset cursor that isn't needed when we have a completely
unique compound cursor: (modified_on, id)
"""
page_size = api_settings.PAGE_SIZE
summary = None
def _get_summary(self):
if self.summary is None:
raise ImproperlyConfigured(
f'{self.__class__.__name__} requires definition of `summary` attribute '
'or a `_get_summary()` method',
)
return self.summary
def _replace_query_param(self, url, key, vals):
"""
Replaces all of the values of `key` of the query in `url` with `vals`
The DRF version of this function is not used, since it always replaces all of the values of
`key` with a single value.
"""
parsed = urlparse(url)
return urlunparse(parsed._replace(query=urlencode(tuple(
(_key, val) for (_key, val) in parse_qsl(parsed.query, keep_blank_values=True)
if _key != key
) + tuple((key, val) for val in vals))))
def paginate_queryset(self, queryset, request, view=None):
"""
Returns a page of results based on the cursor query string parameter. Designed to make the
last page empty
"""
# Extract cursor from query string. Inclues partial support for DRF's base64-encoded cursor
# of timestamp + offset, the previous pagination mechanism. This is so that at the time
# of deployment the Activity Stream can carry on from at most a few pages before where it
# was. Once this is live and working, the DRF support can be removed
try:
after_ts_str = parse_qs(b64decode(request.GET.getlist('cursor')[0]))[b'p'][0].decode()
after_id_str = '00000000-0000-0000-0000-000000000000'
except (IndexError, KeyError, binascii.Error):
after_ts_str, after_id_str = request.GET.getlist(
'cursor',
('0001-01-01 00:00:00.000000+00:00', '00000000-0000-0000-0000-000000000000'),
)
after_ts = datetime.fromisoformat(after_ts_str)
after_id = uuid.UUID(after_id_str)
# Filter queryset to be after cursor.
#
# A composite/row/tuple lexicographic comparison is used to have the biggest chance of
# fully using a multicolumn index. When tested on interactions in production, these queries
# take ~50ms. If doing the comparison "manually", such queries take take ~1.5s+
#
# To do this in the Django ORM requires 'annotate', which itself requires a small hack: the
# setting of an output_field, which can be anything since we don't access the value.
modified_on_id = Func(F('modified_on'), F('id'), function='ROW', output_field=TextField())
after_ts_id = Func(after_ts, after_id, function='ROW')
# Mitigate the risk of timestamps being committed slightly out of order, which could result
# in activities being missed when the last page is polled
one_second_ago = datetime.now() - timedelta(seconds=1)
page = list(queryset
.annotate(modified_on_id=modified_on_id)
.filter(modified_on_id__gt=after_ts_id, modified_on__lt=one_second_ago)
# Do not use ROW expressions in order_by: it seems to have an extremely
# negative performance impact
.order_by('modified_on', 'id')[:self.page_size])
# Build and store next link for all non-empty pages to be used in get_paginated_response
if not page:
self.next_link = None
else:
final_instance = page[-1]
next_after_ts_str = final_instance.modified_on.isoformat(timespec='microseconds')
next_after_id_str = str(final_instance.id)
self.next_link = self._replace_query_param(
request.build_absolute_uri(),
'cursor', (next_after_ts_str, next_after_id_str),
)
return page
def get_paginated_response(self, data):
"""
Overriding this function to re-format the response according to
activity stream spec.
"""
return Response(
{
'@context': 'https://www.w3.org/ns/activitystreams',
'summary': self._get_summary(),
'type': 'OrderedCollectionPage',
'orderedItems': data,
'next': self.next_link,
},
)
|
import gc
import torch
import numpy as np
import random
from transformers import AutoTokenizer, AutoModelForSequenceClassification
def cleanup():
gc.collect()
torch.cuda.empty_cache()
def turn_off_grad(model):
for param in model.parameters():
param.requires_grad = False
def turn_on_grad(model):
for param in model.parameters():
param.requires_grad = True
def load_model(
model_name=None,
model_class=AutoModelForSequenceClassification,
use_cuda=True
):
if model_name is None:
raise ValueError('model_name should be provided')
model = model_class.from_pretrained(model_name)
if torch.cuda.is_available() and use_cuda:
model.cuda()
tokenizer = AutoTokenizer.from_pretrained(model_name)
return model, tokenizer
def set_seed(seed: int):
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
np.random.seed(seed)
random.seed(seed)
def static_vars(**kwargs):
def decorate(func):
for k in kwargs:
setattr(func, k, kwargs[k])
return func
return decorate
|
# twitter/views_admin.py
# Brought to you by We Vote. Be good.
# -*- coding: UTF-8 -*-
from .controllers import delete_possible_twitter_handles, retrieve_possible_twitter_handles
from .models import TwitterLinkPossibility
from admin_tools.views import redirect_to_sign_in_page
from candidate.models import CandidateCampaign, CandidateCampaignManager
from django.contrib.auth.decorators import login_required
from django.contrib import messages
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
from voter.models import voter_has_authority
from wevote_functions.functions import convert_to_int, positive_value_exists
import wevote_functions.admin
from wevote_settings.models import RemoteRequestHistory, RETRIEVE_POSSIBLE_TWITTER_HANDLES
logger = wevote_functions.admin.get_logger(__name__)
@login_required
def delete_possible_twitter_handles_view(request, candidate_campaign_we_vote_id):
authority_required = {'verified_volunteer'} # admin, verified_volunteer
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
candidate_manager = CandidateCampaignManager()
results = candidate_manager.retrieve_candidate_campaign_from_we_vote_id(candidate_campaign_we_vote_id)
if not results['candidate_campaign_found']:
messages.add_message(request, messages.INFO, results['status'])
return HttpResponseRedirect(reverse('candidate:candidate_edit_we_vote_id',
args=(candidate_campaign_we_vote_id,)))
candidate_campaign = results['candidate_campaign']
results = delete_possible_twitter_handles(candidate_campaign)
messages.add_message(request, messages.INFO, 'Possibilities deleted.')
return HttpResponseRedirect(reverse('candidate:candidate_edit_we_vote_id', args=(candidate_campaign_we_vote_id,)))
@login_required
def retrieve_possible_twitter_handles_view(request, candidate_campaign_we_vote_id):
authority_required = {'verified_volunteer'} # admin, verified_volunteer
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
candidate_manager = CandidateCampaignManager()
results = candidate_manager.retrieve_candidate_campaign_from_we_vote_id(candidate_campaign_we_vote_id)
if not results['candidate_campaign_found']:
messages.add_message(request, messages.INFO, results['status'])
return HttpResponseRedirect(reverse('candidate:candidate_edit_we_vote_id',
args=(candidate_campaign_we_vote_id,)))
candidate_campaign = results['candidate_campaign']
results = retrieve_possible_twitter_handles(candidate_campaign)
messages.add_message(request, messages.INFO, 'Number of possibilities found: ' + results['num_of_possibilities'])
return HttpResponseRedirect(reverse('candidate:candidate_edit_we_vote_id', args=(candidate_campaign_we_vote_id,)))
@login_required
def bulk_retrieve_possible_twitter_handles_view(request):
authority_required = {'verified_volunteer'} # admin, verified_volunteer
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
google_civic_election_id = convert_to_int(request.GET.get('google_civic_election_id', 0))
hide_candidate_tools = request.GET.get('hide_candidate_tools', False)
page = request.GET.get('page', 0)
state_code = request.GET.get('state_code', '')
limit = convert_to_int(request.GET.get('show_all', 0))
if not positive_value_exists(google_civic_election_id) and not positive_value_exists(state_code) \
and not positive_value_exists(limit):
messages.add_message(request, messages.ERROR,
'bulk_retrieve_possible_twitter_handles_view, LIMITING_VARIABLE_REQUIRED')
return HttpResponseRedirect(reverse('candidate:candidate_list', args=()) +
'?google_civic_election_id=' + str(google_civic_election_id) +
'&state_code=' + str(state_code) +
'&hide_candidate_tools=' + str(hide_candidate_tools) +
'&page=' + str(page)
)
try:
candidate_list = CandidateCampaign.objects.all()
if positive_value_exists(google_civic_election_id):
candidate_list = candidate_list.filter(google_civic_election_id=google_civic_election_id)
if positive_value_exists(state_code):
candidate_list = candidate_list.filter(state_code__iexact=state_code)
candidate_list = candidate_list.order_by('candidate_name')
if positive_value_exists(limit):
candidate_list = candidate_list[:limit]
candidate_list_count = candidate_list.count()
# Run Twitter account search and analysis on candidates without a linked or possible Twitter account
number_of_candidates_to_search = 25
current_candidate_index = 0
while positive_value_exists(number_of_candidates_to_search) \
and (current_candidate_index < candidate_list_count):
one_candidate = candidate_list[current_candidate_index]
if not positive_value_exists(one_candidate.candidate_twitter_handle):
# Candidate does not have a Twitter account linked
# Check to see if we have already tried to find their information from Twitter. We don't want to
# search Twitter more than once.
request_history_query = RemoteRequestHistory.objects.filter(
candidate_campaign_we_vote_id__iexact=one_candidate.we_vote_id,
kind_of_action=RETRIEVE_POSSIBLE_TWITTER_HANDLES)
request_history_list = list(request_history_query)
if not positive_value_exists(request_history_list):
# Twitter account search and analysis has not been run on this candidate yet
results = retrieve_possible_twitter_handles(one_candidate)
number_of_candidates_to_search -= 1
current_candidate_index += 1
except CandidateCampaign.DoesNotExist:
# This is fine, do nothing
pass
return HttpResponseRedirect(reverse('candidate:candidate_list', args=()) +
'?google_civic_election_id=' + str(google_civic_election_id) +
'&state_code=' + str(state_code) +
'&hide_candidate_tools=' + str(hide_candidate_tools) +
'&page=' + str(page)
)
|
import logging
from asreview.ascii import welcome_message
from asreview.config import DEFAULT_MODEL, DEFAULT_FEATURE_EXTRACTION
from asreview.config import DEFAULT_QUERY_STRATEGY
from asreview.config import DEFAULT_BALANCE_STRATEGY
from asreview.config import DEFAULT_N_INSTANCES
from asreview.config import DEFAULT_N_PRIOR_EXCLUDED
from asreview.config import DEFAULT_N_PRIOR_INCLUDED
from asreview.entry_points.base import BaseEntryPoint, _base_parser
from asreview.review import review_simulate
class SimulateEntryPoint(BaseEntryPoint):
description = "Simulate the performance of ASReview."
def execute(self, argv):
parser = _simulate_parser()
args = parser.parse_args(argv)
args_dict = vars(args)
path = args_dict.pop("dataset")
verbose = args_dict.get("verbose", 0)
if verbose == 0:
logging.getLogger().setLevel(logging.WARNING)
elif verbose == 1:
logging.getLogger().setLevel(logging.INFO)
elif verbose >= 2:
logging.getLogger().setLevel(logging.DEBUG)
print(welcome_message())
review_simulate(path, **args_dict)
DESCRIPTION_SIMULATE = """
Automated Systematic Review (ASReview) for simulation runs.
The simulation modus is used to measure the performance of our
software on existing systematic reviews. The software shows how many
papers you could have potentially skipped during the systematic
review."""
def _simulate_parser(prog="simulate", description=DESCRIPTION_SIMULATE):
parser = _base_parser(prog=prog, description=description)
# Active learning parameters
# File path to the data.
parser.add_argument(
"dataset",
type=str,
nargs="*",
help="File path to the dataset or one of the built-in datasets."
)
# Initial data (prior knowledge)
parser.add_argument(
"--n_prior_included",
default=DEFAULT_N_PRIOR_INCLUDED,
type=int,
help="Sample n prior included papers. "
"Only used when --prior_included is not given. "
f"Default {DEFAULT_N_PRIOR_INCLUDED}")
parser.add_argument(
"--n_prior_excluded",
default=DEFAULT_N_PRIOR_EXCLUDED,
type=int,
help="Sample n prior excluded papers. "
"Only used when --prior_excluded is not given. "
f"Default {DEFAULT_N_PRIOR_EXCLUDED}")
return parser
|
# -*- coding: utf-8 -*-
# @Time : 2021/2/22 18:54
# @Author : duyu
# @Email : abelazady@foxmail.com
# @File : my_hook.py
# @Software: PyCharm
import torch
from mmcv.runner import HOOKS, Hook
from mmcv.runner.hooks.optimizer import OptimizerHook
@HOOKS.register_module()
class MyOptimizerHook(OptimizerHook):
def __init__(self):
super(MyOptimizerHook,self).__init__()
def after_train_iter(self, runner):
runner.optimizer.zero_grad()
runner.outputs['loss'].backward()
if self.grad_clip is not None:
grad_norm = self.clip_grads(runner.model.parameters())
if grad_norm is not None:
# Add grad norm to the logger
runner.log_buffer.update({'grad_norm': float(grad_norm)},
runner.outputs['num_samples'])
runner.optimizer.step()
@HOOKS.register_module()
class MyHook(Hook):
def __init__(self):
pass
def before_run(self, runner):
# print(dir(runner))
pass
def after_run(self, runner):
pass
def before_epoch(self, runner):
pass
def after_epoch(self, runner):
pass
def before_iter(self, runner):
pass
def after_iter(self, runner):
pass
def hook_fn_backward(module,grad_input,grad_output):
# print(module)
# print(module.weight)
# print(grad_output)
pass
|
n=int(input('tabuada de qual numero vc quer?'))
for c in range(0,11):
print(n,'*' , c,'=' ,n* c)
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .services.reachability_service import ReachabilityServiceClient
from .services.reachability_service import ReachabilityServiceAsyncClient
from .types.connectivity_test import ConnectivityTest
from .types.connectivity_test import Endpoint
from .types.connectivity_test import LatencyDistribution
from .types.connectivity_test import LatencyPercentile
from .types.connectivity_test import ProbingDetails
from .types.connectivity_test import ReachabilityDetails
from .types.reachability import CreateConnectivityTestRequest
from .types.reachability import DeleteConnectivityTestRequest
from .types.reachability import GetConnectivityTestRequest
from .types.reachability import ListConnectivityTestsRequest
from .types.reachability import ListConnectivityTestsResponse
from .types.reachability import OperationMetadata
from .types.reachability import RerunConnectivityTestRequest
from .types.reachability import UpdateConnectivityTestRequest
from .types.trace import AbortInfo
from .types.trace import CloudSQLInstanceInfo
from .types.trace import DeliverInfo
from .types.trace import DropInfo
from .types.trace import EndpointInfo
from .types.trace import FirewallInfo
from .types.trace import ForwardInfo
from .types.trace import ForwardingRuleInfo
from .types.trace import GKEMasterInfo
from .types.trace import InstanceInfo
from .types.trace import LoadBalancerBackend
from .types.trace import LoadBalancerInfo
from .types.trace import NetworkInfo
from .types.trace import RouteInfo
from .types.trace import Step
from .types.trace import Trace
from .types.trace import VpnGatewayInfo
from .types.trace import VpnTunnelInfo
__all__ = (
'ReachabilityServiceAsyncClient',
'AbortInfo',
'CloudSQLInstanceInfo',
'ConnectivityTest',
'CreateConnectivityTestRequest',
'DeleteConnectivityTestRequest',
'DeliverInfo',
'DropInfo',
'Endpoint',
'EndpointInfo',
'FirewallInfo',
'ForwardInfo',
'ForwardingRuleInfo',
'GKEMasterInfo',
'GetConnectivityTestRequest',
'InstanceInfo',
'LatencyDistribution',
'LatencyPercentile',
'ListConnectivityTestsRequest',
'ListConnectivityTestsResponse',
'LoadBalancerBackend',
'LoadBalancerInfo',
'NetworkInfo',
'OperationMetadata',
'ProbingDetails',
'ReachabilityDetails',
'ReachabilityServiceClient',
'RerunConnectivityTestRequest',
'RouteInfo',
'Step',
'Trace',
'UpdateConnectivityTestRequest',
'VpnGatewayInfo',
'VpnTunnelInfo',
)
|
# Packages up pygw so it's pip-installable
from setuptools import setup, find_packages
with open('README.md', 'r') as fh:
long_description = fh.read()
def get_version():
try:
from maven_version import get_maven_version
version = get_maven_version()
except ModuleNotFoundError:
# If maven version isn't found, it must be from the distribution
from pkg_resources import get_distribution
from pkg_resources import DistributionNotFound
version = get_distribution('pygw').version
return version
setup(
name='pygw',
author='GeoWave Contributors',
author_email='geowave.python@gmail.com',
description='GeoWave bindings for Python3',
long_description=long_description,
long_description_content_type='text/markdown',
url='https://locationtech.github.io/geowave/',
project_urls={
'Documentation': 'https://locationtech.github.io/geowave/pydocs/',
'Source': 'https://github.com/locationtech/geowave/tree/master/python/src/main/python',
},
version=get_version(),
packages=find_packages(),
classifiers=[
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.0',
'Programming Language :: Python :: 3.1',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
],
install_requires=['py4j==0.10.8.1','shapely==1.6'],
python_requires='>=3,<3.8' # py4j does not support python 3.8 yet
)
|
import pytest
from mars_profiling.profile_report import ProfileReport
def test_phases_empty():
profile = ProfileReport(None)
with pytest.raises(ValueError) as e:
profile.to_html()
assert (
e.value.args[0]
== "Can not describe a `lazy` ProfileReport without a DataFrame."
)
|
import random
from datetime import date, datetime, timedelta, timezone
from typing import List, Optional
from fastapi.exceptions import HTTPException
from starlette import status
from app.models.phq import AvgAndEstimatedPhqScore, Phq, SingleQuestionAvgScore
from app.models.user import User
from app.schema.phq import (
GraphEntry,
Question,
SingleQuestionResponse,
SingleQuestionResponseFloat,
)
QV1 = [
"I have little interest or pleasure in doing things",
"I feel down and depressed and hopeless",
"I have trouble with sleep",
"I have been feeling tired and have little energy",
"I have a poor appetite or am overeating",
"I feel guilty or bad about myself",
"I have trouble concentrating",
"I am moving slower or fidgeting more",
"I would be better off dead or hurting myself",
]
QV2 = [
"I have lots of interest or pleasure in doing things",
"I feel up and bright and hopeful",
"I have been sleeping well",
"I have been feeling active and have lots of enery",
"I am eating the right amount of food",
"I feel positive and good about myself",
"I can concentrate well",
"I am not fidgety or feel weighed down either",
"I do not want to hurt or kill myself",
]
def all_questions(user: User) -> List[Question]:
# returns the list of all questions
record = AvgAndEstimatedPhqScore.objects(user=user).first()
questions = []
for i in range(0, 9):
version = random.choice([1, 2])
question = ""
if version == 1:
question = QV1[i]
else:
question = QV2[i]
average_score = 0
if record:
average_score = record.average_scores.get(str(i + 1)).average
if version == 2:
average_score = 3 - average_score
average_score = int(average_score)
# questions.append([i+1, question, version, average_score])
questions.append(
{
"qno": i + 1,
"question": question,
"version": version,
"average_score": average_score,
}
)
return questions
def three_questions(user: User) -> List[Question]:
# if user has submitted his first record, then he won't get any more questions on same day
# record = AvgAndEstimatedPhqScore.objects(user=user).first()
record = (
Phq.objects(
user=user,
)
.order_by("+datetime")
.first()
)
if record:
if record.datetime.date() == datetime.now(tz=timezone.utc).date():
return []
# get records for current user that have datetime greater than or equal to todays date at midnight
# sort the records in desc order of datetime
todays_records = Phq.objects(
user=user,
datetime__gte=datetime.combine(datetime.now().date(), datetime.min.time()),
).order_by("-datetime")
all_ques = all_questions(user)
# if no records for that day, send 3 random questions
if len(todays_records) == 0:
return random.sample(list(all_ques), k=3)
if len(todays_records) < 3:
# if no records past 4 hour, select 3 random questions
if (
datetime.now(tz=timezone.utc) - todays_records[0].datetime
).total_seconds() / 60 > 0.5:
return random.sample(list(all_ques), k=3)
return []
def get_score(response: SingleQuestionResponse) -> int:
# if question version was 1, return score as it is
# if question version was 2, return 3-score
if response:
if response.version == 1:
return response.score
elif response.version == 2:
return 3 - response.score
def add_answers_to_db(user: User, body: List[SingleQuestionResponse]):
answers = {}
for response in body:
answers.update({str(response.qno): get_score(response)})
phq = Phq(user=user, datetime=datetime.now(tz=timezone.utc), answers=answers)
phq.save()
def update_avg_and_estm_phq(
user: User,
body: list,
date: Optional[date] = None,
fixed=False,
):
# update the average of each question and the estimated phq after each response
if not date:
date = datetime.now(tz=timezone.utc).date()
if not body:
# if no records for that day, update fixed value only
# should only run when fix function is called
that_days_record = AvgAndEstimatedPhqScore.objects(user=user, date=date).first()
that_days_record.fixed = fixed
that_days_record.save()
return
# fetch all records in desc order of date so 1st record will be latest
all_records = AvgAndEstimatedPhqScore.objects(user=user).order_by("-date")
todays_record = None
if all_records:
if all_records[0].date == date:
todays_record = all_records[0]
# if record for that day exists, update that record
if todays_record:
for response in body:
# update values for all questions present in body
qno = str(response.qno)
score = get_score(response)
old_avg = todays_record.average_scores.get(qno).average
old_total_records = todays_record.average_scores.get(qno).total_records
todays_record.estimated_phq += score - old_avg
new_entry = SingleQuestionAvgScore(
average=(old_avg * old_total_records + score) / (old_total_records + 1),
total_records=old_total_records + 1,
)
todays_record.average_scores.update({qno: new_entry})
todays_record.fixed = fixed
# save the updated record
todays_record.save()
# if record for that day doesn't exists but their are previous records
# in that case, we take latest record because if todays record doesn't exists, latest record gonna be yesterdays
# and create new record for current day using yesterdays record and form values
# assuming that we are running fix records function before this function on post request
elif all_records:
yesterdays_record = all_records[0]
estimated_phq = yesterdays_record.estimated_phq
average_scores = yesterdays_record.average_scores.copy()
for response in body:
qno = str(response.qno)
score = get_score(response)
yesterdays_avg = yesterdays_record.average_scores.get(qno).average
yesterdays_total_records = yesterdays_record.average_scores.get(
qno
).total_records
estimated_phq += score - yesterdays_avg
todays_entry = SingleQuestionAvgScore(
average=(yesterdays_avg * yesterdays_total_records + score)
/ (yesterdays_total_records + 1),
total_records=yesterdays_total_records + 1,
)
average_scores.update({qno: todays_entry})
new_record = AvgAndEstimatedPhqScore(
user=user,
date=date,
fixed=fixed,
average_scores=average_scores,
estimated_phq=estimated_phq,
)
# add this new record to db
new_record.save()
# if no record for user exists, first record is created
# create new record
else:
average_scores = {}
estimated_phq = 0
for response in body:
qno = str(response.qno)
score = get_score(response)
todays_entry = SingleQuestionAvgScore(average=score, total_records=1)
estimated_phq += score
average_scores.update({qno: todays_entry})
new_record = AvgAndEstimatedPhqScore(
user=user,
date=date,
fixed=fixed,
average_scores=average_scores,
estimated_phq=estimated_phq,
)
new_record.save()
def fix_missing_records(user, last_fix_date: date):
# this function should take care of user not inputting all three records a day
# as well as question not being asked single time for given day
def daterange(start_date, end_date):
# returns the range of dates to iterate on
for n in range(int((end_date - start_date).days)):
yield start_date + timedelta(n)
for day in daterange(last_fix_date, datetime.now(tz=timezone.utc).date()):
# fetch the records of particular day
# check for the questiions that haven't answered at all in that days records
# and create entry for those missing questions using previous day's average
records = Phq.objects(
user=user,
datetime__gte=datetime.combine(day, datetime.min.time()),
datetime__lt=datetime.combine(day + timedelta(days=1), datetime.min.time()),
)
estimated_phq_record = AvgAndEstimatedPhqScore.objects(
user=user, date=last_fix_date
).first()
entry = []
for i in range(1, 10):
entry_exists = False
for record in records:
if record.answers.get(str(i)) is not None:
entry_exists = True
if not entry_exists:
entry.append(
SingleQuestionResponseFloat(
qno=i,
score=estimated_phq_record.average_scores.get(str(i)).average,
version=1,
)
)
update_avg_and_estm_phq(user, entry, day, fixed=True)
def generate_graph_values(user: User) -> List[GraphEntry]:
graph_details = []
records = AvgAndEstimatedPhqScore.objects(user=user).order_by("+date")
# if no records, raise exception
if records is None:
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
detail="No data found 😞",
)
phq_records = list(Phq.objects(user=user).order_by("+date"))
for record in records:
sum_of_avg = 0
q9_sum = 0
q9_count = 0
for i in range(1, 10):
sum_of_avg += record.average_scores.get(str(i)).average
for i in range(3):
# check the 1st three records of phq table in ascending order of date
# if record has same date as average table record, calculate q9 average of that day
# otherwise keep it 0
if phq_records:
if phq_records[0].datetime.date() == record.date:
if (score := phq_records[0].answers.get("9")) is not None:
q9_sum += score
q9_count += 1
# remove record that has been used so next record become 1st
phq_records.pop(0)
else:
break
graph_details.append(
GraphEntry(
date=record.date,
estimated_phq=record.estimated_phq,
sum_of_avg=sum_of_avg,
q9_avg=(q9_sum / q9_count) if q9_count != 0 else 0,
)
)
if len(graph_details) > 14:
graph_details = graph_details[-14:]
return graph_details
|
import StringIO
import json
import logging
import random
import urllib
import urllib2
import requests
# for sending images
from PIL import Image
import multipart
# standard app engine imports
from google.appengine.api import urlfetch
from google.appengine.ext import ndb
import webapp2
TOKEN = '356407842:AAFndoaIMeEfF7tXCAEq0pU5Rlekm5k2asw'
BASE_URL = 'https://api.telegram.org/bot' + TOKEN + '/'
# ================================
class EnableStatus(ndb.Model):
# key name: str(chat_id)
enabled = ndb.BooleanProperty(indexed=False, default=False)
# ================================
def setEnabled(chat_id, yes):
es = EnableStatus.get_or_insert(str(chat_id))
es.enabled = yes
es.put()
def getEnabled(chat_id):
es = EnableStatus.get_by_id(str(chat_id))
if es:
return es.enabled
return False
# ================================
class MeHandler(webapp2.RequestHandler):
def get(self):
urlfetch.set_default_fetch_deadline(60)
self.response.write(json.dumps(json.load(urllib2.urlopen(BASE_URL + 'getMe'))))
class GetUpdatesHandler(webapp2.RequestHandler):
def get(self):
urlfetch.set_default_fetch_deadline(60)
self.response.write(json.dumps(json.load(urllib2.urlopen(BASE_URL + 'getUpdates'))))
class SetWebhookHandler(webapp2.RequestHandler):
def get(self):
urlfetch.set_default_fetch_deadline(60)
url = self.request.get('url')
if url:
self.response.write(json.dumps(json.load(urllib2.urlopen(BASE_URL + 'setWebhook', urllib.urlencode({'url': url})))))
class WebhookHandler(webapp2.RequestHandler):
def post(self):
urlfetch.set_default_fetch_deadline(60)
body = json.loads(self.request.body)
logging.info('request body:')
logging.info(body)
self.response.write(json.dumps(body))
update_id = body['update_id']
try:
message = body['message']
except:
message = body['edited_message']
message_id = message.get('message_id')
date = message.get('date')
text = message.get('text')
fr = message.get('from')
chat = message['chat']
chat_id = chat['id']
if not text:
logging.info('no text')
return
def reply(msg=None, img=None):
if msg:
resp = urllib2.urlopen(BASE_URL + 'sendMessage', urllib.urlencode({
'chat_id': str(chat_id),
'text': msg.encode('utf-8'),
'disable_web_page_preview': 'true',
'reply_to_message_id': str(message_id),
})).read()
elif img:
resp = multipart.post_multipart(BASE_URL + 'sendPhoto', [
('chat_id', str(chat_id)),
('reply_to_message_id', str(message_id)),
], [
('photo', 'image.jpg', img),
])
else:
logging.error('no msg or img specified')
resp = None
logging.info('send response:')
logging.info(resp)
if text.startswith('/'):
if text == '/start':
reply('Bot enabled')
setEnabled(chat_id, True)
elif text == '/stop':
reply('Bot disabled')
setEnabled(chat_id, False)
elif text == '/image':
img = Image.new('RGB', (512, 512))
base = random.randint(0, 16777216)
pixels = [base+i*j for i in range(512) for j in range(512)] # generate sample image
img.putdata(pixels)
output = StringIO.StringIO()
img.save(output, 'JPEG')
reply(img=output.getvalue())
else:
reply('What command?')
# CUSTOMIZE FROM HERE
elif 'who are you' in text:
reply('College Enquiry Chatbot created by George J Padayatti & Jose Thomas Dominic')
elif 'what time' in text:
reply('look at the corner of your screen!')
else:
if getEnabled(chat_id):
message_text = '+'.join(text.split(" "))
server_url = "http://ccbserver.herokuapp.com/api/msg/"
final_url = server_url+message_text
resp = requests.get(final_url)
msg = json.loads(resp.text)
msg = msg['response'][0]['output']
reply(msg)
else:
logging.info('not enabled for chat_id {}'.format(chat_id))
app = webapp2.WSGIApplication([
('/me', MeHandler),
('/updates', GetUpdatesHandler),
('/set_webhook', SetWebhookHandler),
('/webhook', WebhookHandler),
], debug=True)
|
import os
#Predicates = {}
class dataset:
current_path = os.path.dirname(os.path.realpath(__file__))
os.chdir(current_path)
def get_resource(self):
# current_path= os.path.dirname(os.path.realpath(__file__))
# os.chdir(current_path)
Rfile = open(".\\Resources.txt", 'r')
Resources = Rfile.read().split('\n')
return Resources
def get_Predicates(self):
Rfile = open(".\\Property.txt", 'r')
Predicates = Rfile.read().split('\n')
return Predicates
# def get_Predicates(self):
# Pfile=open(".\\Property.txt",'r')
# lines=Pfile.read().splitlines()
# for line in lines:
# sline=line.split('\t')
# key=sline.pop(0)
# Predicates[key]=sline
# return Predicates
if __name__ == '__main__':
print(os.getcwd())
|
"""
Policy Network for training imitation learning model. For discrete case, we use classifier.
For continuous case, we use regressor.
"""
import numpy as np
import torch
import torch.nn as nn
from torchlib.common import move_tensor_to_gpu, convert_numpy_to_tensor, enable_cuda
from torchlib.deep_rl import BaseAgent
from torchlib.deep_rl.algorithm.model_based.utils import StateActionPairDataset
from tqdm.auto import tqdm
class ImitationPolicy(BaseAgent):
def __init__(self, model: nn.Module, optimizer):
self.model = model
self.optimizer = optimizer
self.state_mean = None
self.state_std = None
self.loss_fn = None
if enable_cuda:
self.model.cuda()
def train(self):
self.model.train()
def eval(self):
self.model.eval()
@property
def state_dict(self):
states = {
'model': self.model.state_dict(),
'state_mean': self.state_mean,
'state_std': self.state_std
}
return states
def load_state_dict(self, state_dict):
self.model.load_state_dict(state_dict['model'])
self.state_mean = state_dict['state_mean']
self.state_std = state_dict['state_std']
def set_state_stats(self, state_mean, state_std):
self.state_mean = convert_numpy_to_tensor(state_mean).unsqueeze(dim=0)
self.state_std = convert_numpy_to_tensor(state_std).unsqueeze(dim=0)
def predict(self, state):
"""
Args:
state: (ob_dim,)
Returns:
"""
raise NotImplementedError
def fit(self, dataset: StateActionPairDataset, epoch=10, batch_size=128, verbose=False):
t = range(epoch)
if verbose:
t = tqdm(t)
train_data_loader, val_data_loader = dataset.random_iterator(batch_size=batch_size)
for i in t:
losses = []
for state, action in train_data_loader:
self.optimizer.zero_grad()
state = move_tensor_to_gpu(state)
action = move_tensor_to_gpu(action)
state = (state - self.state_mean) / self.state_std
output = self.model.forward(state)
loss = self.loss_fn(output, action)
loss.backward()
self.optimizer.step()
losses.append(loss.item())
self.eval()
val_losses = []
with torch.no_grad():
for state, action in val_data_loader:
state = move_tensor_to_gpu(state)
action = move_tensor_to_gpu(action)
state = (state - self.state_mean) / self.state_std
output = self.model.forward(state)
loss = self.loss_fn(output, action)
val_losses.append(loss.item())
self.train()
if verbose:
t.set_description('Epoch {}/{} - Avg policy train loss: {:.4f} - Avg policy val loss: {:.4f}'.format(
i + 1, epoch, np.mean(losses), np.mean(val_losses)))
class DiscreteImitationPolicy(ImitationPolicy):
def __init__(self, model: nn.Module, optimizer):
super(DiscreteImitationPolicy, self).__init__(model=model, optimizer=optimizer)
self.loss_fn = nn.CrossEntropyLoss()
def predict(self, state):
state = np.expand_dims(state, axis=0)
with torch.no_grad():
state = convert_numpy_to_tensor(state)
state = (state - self.state_mean) / self.state_std
action = self.model.forward(state)
action = torch.argmax(action, dim=-1)
return action.cpu().numpy()[0]
class ContinuousImitationPolicy(ImitationPolicy):
"""
For continuous policy, we assume the action space is between -1 and 1.
So we use tanh as final activation layer.
"""
def __init__(self, model: nn.Module, optimizer):
super(ContinuousImitationPolicy, self).__init__(model=model, optimizer=optimizer)
self.loss_fn = nn.MSELoss()
def predict(self, state):
state = np.expand_dims(state, axis=0)
with torch.no_grad():
state = convert_numpy_to_tensor(state)
state = (state - self.state_mean) / self.state_std
action = self.model.forward(state)
return action.cpu().numpy()[0]
|
from marshmallow_jsonapi import Schema, fields
from marshmallow import validate
from flask.ext.sqlalchemy import SQLAlchemy
from sqlalchemy.exc import SQLAlchemyError
db = SQLAlchemy(session_options={"autoflush": True})
class CRUD():
def add(self, resource):
db.session.add(resource)
return db.session.commit()
def update(self):
return db.session.commit()
def delete(self, resource):
db.session.delete(resource)
return db.session.commit()
class Representative(db.Model, CRUD):
__tablename__ = 'haulers_representative'
id = db.Column(db.Integer, primary_key=True)
HAULER_ID = db.Column(db.Integer, nullable=False)
email = db.Column(db.String(64), unique=True, nullable=False)
password = db.Column(db.String(64))
class RepresentativeSchema(Schema):
not_blank = validate.Length(min=1, error='Field cannot be blank')
id = fields.Integer(primary_key=True)
HAULER_ID = fields.Integer()
email = fields.String(validate=not_blank)
#self links
def get_top_level_links(self, data, many):
self_link = ''
if many:
self_link = "/representative/"
else:
if 'attributes' in data:
self_link = "/representative/{}".format(data['attributes']['id'])
return {'self': self_link}
class Meta:
type_ = 'representative'
|
__author__ = 'filipkulig'
import time
from board import Board
from draw import Draw
class Engine():
draw = None
board = None
def __init__(self):
self.draw = Draw()
self.board = Board()
self.draw.set_board(self.board)
def run(self):
info = self.draw.get_board_info()
self.board.set_cols(info['cols'])
self.board.set_lines(info['lines'])
self.draw.draw_board()
while True:
self.board.update()
self.draw.draw_board()
time.sleep(0.1)
|
#!/usr/bin/env python
# Copyright 2020 Stanford University, Los Alamos National Laboratory
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
import os
import subprocess
def find_flexflow_header(ffhome_dir):
def try_prefix(prefix_dir):
flexflow_ch_path = os.path.join(prefix_dir, 'python', 'flexflow_c.h')
flexflow_cxxh_path = os.path.join(prefix_dir, 'include', 'model.h')
if os.path.exists(flexflow_ch_path) and os.path.exists(flexflow_cxxh_path):
flexflow_cxxh_dir = os.path.join(prefix_dir, 'include')
return flexflow_cxxh_dir, flexflow_ch_path
result = try_prefix(ffhome_dir)
if result:
return result
raise Exception('Unable to locate flexflow_c.h and flexflow.h header file')
def build(output_dir, libname, ffhome_dir):
flexflow_cxxh_dir, flexflow_ch_path = find_flexflow_header(ffhome_dir)
header = subprocess.check_output(['gcc', '-I', flexflow_cxxh_dir, '-E', '-P', flexflow_ch_path]).decode('utf-8')
with open(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'flexflow_cffi_header.py.in')) as f:
content = f.read()
content = content.format(header=repr(header), libname=repr(libname))
if output_dir is None:
output_dir = os.path.dirname(os.path.realpath(__file__))
with open(os.path.join(output_dir, 'flexflow_cffi_header.py'), 'wb') as f:
f.write(content.encode('utf-8'))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--ffhome-dir', required=True)
parser.add_argument('--libname', required=True)
parser.add_argument('--output-dir', required=False)
args = parser.parse_args()
build(args.output_dir, args.libname, args.ffhome_dir)
|
#
# This file is part of Bakefile (http://bakefile.org)
#
# Copyright (C) 2009-2013 Vaclav Slavik
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
"""
Keep track of properties for extensions or model parts.
Also define standard, always available, properties.
"""
import expr, api, utils
from vartypes import IdType, EnumType, ListType, PathType, StringType, BoolType, TheAnyType
from api import Property
def _std_model_part_props():
return [
Property("_condition",
type=BoolType(),
default=True,
readonly=True,
inheritable=False,
doc="""
Whether to include this object in the build.
Typically a more complicated boolean expression.
"""),
]
def std_file_props():
"""Creates list of all standard source file properties."""
return _std_model_part_props() + [
Property("_filename",
type=PathType(),
default=[],
readonly=True,
inheritable=False,
doc="Source file name."),
Property("compile-commands",
type=ListType(StringType()),
default=[],
inheritable=False,
doc="""
Command or commands to run to compile this source file,
i.e. to generate other file(s) from it. This can be used for
generating some files or for compiling custom file types.
Two placeholders can be used in the commands, ``%(in)`` and
``%(out)``. They are replaced with the name of the source file
and ``outputs`` respectively. Both placeholders are optional.
"""),
Property("compile-message",
type=StringType(),
default=expr.NullExpr(),
inheritable=False,
doc="""
Message shown to the user when running the command.
The same placeholder as in *compiler-commands* can be used.
"""),
Property("outputs",
type=ListType(PathType()),
default=lambda t: None if t["compile-commands"] else expr.NullExpr(),
inheritable=False,
doc="""
Output files created by the build step that compiles this file
Only applicable if *compile-commands* is set.
"""),
Property("dependencies",
type=ListType(PathType()),
default=[],
inheritable=False,
doc="""
List of additional files that the commands that compiles this
source file depend on.
Only applicable if *compile-commands* is set.
"""),
]
def std_target_props():
"""Creates list of all standard target properties."""
return _std_model_part_props() + [
Property("id",
type=IdType(),
default=lambda t: expr.LiteralExpr(t.name),
readonly=True,
inheritable=False,
doc="Target's unique name (ID)."),
Property("deps",
type=ListType(IdType()),
default=[],
inheritable=False,
doc="""
Dependencies of the target (list of IDs).
The dependencies are handled in target-specific ways.
At the very least, they are added to the list of
dependencies in generated makefiles or projects to ensure
correct build order. Some targets may be smart about some
kinds of the dependencies and do more.
In particular, compiled targets (executables, DLLs) will
automatically link against all libraries found in `deps`.
"""),
Property("pre-build-commands",
type=ListType(StringType()),
default=[],
inheritable=False,
doc="""
Custom commands to run before building the target.
The value is a list of shell commands to run. Notice that
the commands are platform-specific and so typically need
to be set conditionally depending on the value of
``toolset``.
Currently only implemented by Visual Studio.
"""),
Property("post-build-commands",
type=ListType(StringType()),
default=[],
inheritable=False,
doc="""
Custom commands to run after building the target.
The value is a list of shell commands to run. Notice that
the commands are platform-specific and so typically need
to be set conditionally depending on the value of
``toolset``.
Currently only implemented by Visual Studio.
"""),
Property("configurations",
type=ListType(StringType()), # FIXME: use a custom type that validates config names
default="Debug Release",
inheritable=True,
doc="""
List of configurations to use for this target.
See :ref:`configurations` for more information.
"""
),
]
def std_module_props():
"""Creates list of all standard module properties."""
toolsets_enum_type = EnumType("toolset", sorted(api.Toolset.all_names()))
return [
Property("toolsets",
type=ListType(toolsets_enum_type),
default=[],
inheritable=True,
doc="List of toolsets to generate makefiles/projects for."),
Property("_srcdir",
type=PathType(),
default=lambda x: x.srcdir_as_path(),
readonly=True,
inheritable=False,
doc="The value of @srcdir anchor for the module."),
]
def std_project_props():
"""Creates list of all standard project properties."""
toolsets_enum_type = EnumType("toolset", sorted(api.Toolset.all_names()))
return [
Property("toolset",
type=toolsets_enum_type,
default=expr.PlaceholderExpr("toolset"),
readonly=True,
inheritable=False,
doc="The toolset makefiles or projects are being generated for. "
"This property is set by Bakefile and can be used for performing "
"toolset-specific tasks or modifications."
),
Property("config",
type=StringType(),
default=expr.PlaceholderExpr("config"),
readonly=True,
inheritable=False,
doc="""
Current configuration.
This property is set by Bakefile and can be used for performing
per-configuration modifications. The value is one of the
*configurations* values specified for the target.
See :ref:`configurations` for more information.
"""
),
Property("arch",
type=StringType(),
default=expr.PlaceholderExpr("arch"),
readonly=True,
inheritable=False,
doc="""
Current architecture.
This property is set by Bakefile and can be used for
performing per-architecture modifications (if the toolset
supports it, which currently only Visual Studio does).
The value is one of the *archs* values specified for the
target.
"""
),
]
def std_setting_props():
"""Creates list of all standard Setting properties."""
return _std_model_part_props() + [
Property("help",
type=StringType(),
default=expr.NullExpr(),
inheritable=False,
doc="""
Documentation for the setting.
This will be used in the generated output to explain the setting to
the user, if supported by the toolset.
"""
),
Property("default",
type=TheAnyType,
default=expr.NullExpr(),
inheritable=False,
doc="Default value of the setting, if any."
),
]
class PropertiesDict(utils.OrderedDict):
"""
Dictionary of properties, keyed by their names.
"""
def __init__(self, scope):
super(PropertiesDict, self).__init__()
self.scope = scope
def add(self, prop, as_inherited=False):
if prop.name in self:
# The same property may be shared by different target types (e.g.
# "defines" for any native compiled target: programs, shared or
# static libraries, ...).
# That is OK, the property comes from a common base class then and
# is the same instance. Having two different properties with the
# same name is not OK, though.
if self[prop.name] is not prop:
raise RuntimeError("property \"%s\" defined more than once at the same scope (%s)" %
(prop.name, self.scope))
if as_inherited:
assert prop.scopes # must have assigned scope from elsewhere already
else:
prop._add_scope(self.scope)
self[prop.name] = prop
def _fill_prop_dict(props, scope):
d = PropertiesDict(scope)
for p in props:
d.add(p)
return d
def _propagate_inheritables(props, into):
"""
Add inheritable properties from *props* into *into* dictionary, which holds
properties for a higher-level scope.
"""
for p in props.itervalues():
if p.inheritable:
into.add(p, as_inherited=True)
def _collect_properties_from_others(variable_name):
"""
Yields properties from "external" source -- i.e. not defined on the model
part type (e.g. target type) itself, but in toolset.
"""
for toolset in api.Toolset.all():
for p in toolset.all_properties(variable_name):
p._add_toolset(toolset.name)
yield p
for step in api.CustomStep.all():
for p in step.all_properties(variable_name):
yield p
class PropertiesRegistry(object):
"""
Registry of existing properties.
"""
def __init__(self):
self._init_vars()
def _init_vars(self):
self._initialized = False
self.all_targets = None
self.all_files = None
self.modules = None
self.project = None
self.settings = None
self.target_types = {}
def get_project_prop(self, name):
"""
Returns property *name* on module level if such property exists, or
:const:`None` otherwise.
"""
if not self._initialized:
self._init_props()
return self.project.get(name, None)
def get_module_prop(self, name):
"""
Returns property *name* on module level if such property exists, or
:const:`None` otherwise.
"""
if not self._initialized:
self._init_props()
return self.modules.get(name, None)
def get_target_prop(self, target_type, name):
"""
Returns property *name* on target level for targets of type *target_type*
if such property exists, or :const:`None` otherwise.
"""
if not self._initialized:
self._init_props()
if name in self.all_targets:
return self.all_targets[name]
else:
return self.target_types[target_type].get(name, None)
def get_file_prop(self, name):
"""
Returns property *name* on source file level if such property exists, or
:const:`None` otherwise.
"""
if not self._initialized:
self._init_props()
return self.all_files.get(name, None)
def get_setting_prop(self, name):
"""
Returns property *name* of a Setting object if such property exists, or
:const:`None` otherwise.
"""
if not self._initialized:
self._init_props()
return self.settings.get(name, None)
def enum_project_props(self):
if not self._initialized:
self._init_props()
for p in self.project.itervalues():
yield p
def enum_module_props(self):
if not self._initialized:
self._init_props()
for p in self.modules.itervalues():
yield p
def enum_target_props(self, target_type):
if not self._initialized:
self._init_props()
for p in self.target_types[target_type].itervalues():
yield p
for p in self.all_targets.itervalues():
yield p
def enum_file_props(self):
if not self._initialized:
self._init_props()
for p in self.all_files.itervalues():
yield p
def enum_setting_props(self):
if not self._initialized:
self._init_props()
for p in self.settings.itervalues():
yield p
def _init_props(self):
assert not self._initialized
# Project:
self.project = _fill_prop_dict(std_project_props(), api.Property.SCOPE_PROJECT)
for p in _collect_properties_from_others("properties_project"):
self.project.add(p)
# Modules:
self.modules = _fill_prop_dict(std_module_props(), api.Property.SCOPE_MODULE)
for p in _collect_properties_from_others("properties_module"):
self.modules.add(p)
# All targets:
self.all_targets = _fill_prop_dict(std_target_props(), api.Property.SCOPE_TARGET)
for p in _collect_properties_from_others("properties_target"):
self.all_targets.add(p)
_propagate_inheritables(self.all_targets, self.modules)
# Specific target types:
for target_type in api.TargetType.all():
props = _fill_prop_dict(target_type.all_properties(), target_type.name)
for p in _collect_properties_from_others("properties_%s" % target_type):
props.add(p)
self.target_types[target_type] = props
_propagate_inheritables(props, self.modules)
# File types:
self.all_files = _fill_prop_dict(std_file_props(), api.Property.SCOPE_FILE)
for p in _collect_properties_from_others("properties_file"):
self.all_files.add(p)
_propagate_inheritables(self.all_files, self.all_targets)
_propagate_inheritables(self.all_files, self.modules)
# Settings:
self.settings = _fill_prop_dict(std_setting_props(), api.Property.SCOPE_SETTING)
for p in _collect_properties_from_others("properties_setting"):
self.settings.add(p)
self._initialized = True
def force_rescan(self):
"""Force re-scanning of properties"""
self._init_vars()
registry = PropertiesRegistry()
get_project_prop = registry.get_project_prop
get_module_prop = registry.get_module_prop
get_target_prop = registry.get_target_prop
get_file_prop = registry.get_file_prop
get_setting_prop = registry.get_setting_prop
enum_project_props = registry.enum_project_props
enum_module_props = registry.enum_module_props
enum_target_props = registry.enum_target_props
enum_file_props = registry.enum_file_props
enum_setting_props = registry.enum_setting_props
|
# %%
#######################################
def split_string(string: str, size: int):
"""Splits a string into smaller strings of the desired size value.
Examples:
>>> string = 'hey hey hiya'\n
>>> split_string(string, 4)\n
['hey ', 'hey ', 'hiya']
References:
https://youtu.be/pG3L2Ojh1UE?t=336
"""
created_step_points = range(0, len(string), size)
sublists_created = [string[i : i + size] for i in created_step_points]
return sublists_created
|
import torch
import torch.nn as nn
import torchvision.models as models
import torch.nn.functional as F
from scipy.optimize import linear_sum_assignment
class DetrLoss(nn.Module):
def __init__(self, matcher, num_classes, eos_coef, losses):
super().__init__()
self.num_classes = num_classes
self.matcher = matcher
self.eos_coef = eos_coef
self.losses = losses
empty_weight = torch.ones(self.num_classes + 1)
empty_weight[-1] = self.eos_coef
self.register_buffer("empty_weight", empty_weight)
def loss_labels(self, outputs, targets, indices, num_boxes):
"""
Classification loss (NLL) targets dicts must contain the key "class_labels" containing a tensor of dim
[nb_target_boxes]
"""
assert "logits" in outputs, "No logits were found in the outputs"
src_logits = outputs["logits"]
idx = self._get_src_permutation_idx(indices)
target_classes_o = torch.cat([t["labels"][J] for t, (_, J) in zip(targets, indices)])
target_classes = torch.full(
src_logits.shape[:2], self.num_classes, dtype=torch.int64, device=src_logits.device
)
target_classes[idx] = target_classes_o
loss_ce = nn.functional.cross_entropy(src_logits.transpose(1, 2), target_classes, self.empty_weight)
losses = {"loss_ce": loss_ce}
return losses
def loss_boxes(self, outputs, targets, indices, num_boxes):
"""
Compute the losses related to the bounding boxes, the L1 regression loss and the GIoU loss.
Targets dicts must contain the key "boxes" containing a tensor of dim [nb_target_boxes, 4]. The target boxes
are expected in format (center_x, center_y, w, h), normalized by the image size.
"""
assert "bboxes" in outputs, "No predicted boxes found in outputs"
idx = self._get_src_permutation_idx(indices)
src_boxes = outputs["bboxes"][idx]
target_boxes = torch.cat([t["bboxes"][i] for t, (_, i) in zip(targets, indices)], dim=0)
loss_bbox = nn.functional.l1_loss(src_boxes, target_boxes, reduction="none")
losses = {}
losses["loss_bbox"] = loss_bbox.sum() / num_boxes
return losses
def _get_src_permutation_idx(self, indices):
# permute predictions following indices
batch_idx = torch.cat([torch.full_like(src, i) for i, (src, _) in enumerate(indices)])
src_idx = torch.cat([src for (src, _) in indices])
return batch_idx, src_idx
def forward(self, outputs, targets):
outputs_without_aux = {k: v for k, v in outputs.items() if k != "auxiliary_outputs"}
indices = self.matcher(outputs_without_aux, targets)
num_boxes = sum(len(t["labels"]) for t in targets)
num_boxes = torch.as_tensor([num_boxes], dtype=torch.float, device=next(iter(outputs.values())).device)
num_boxes = torch.clamp(num_boxes, min=1).item()
losses = {}
for loss in self.losses:
losses.update(self.get_loss(loss, outputs, targets, indices, num_boxes))
return losses
def get_loss(self, loss, outputs, targets, indices, num_boxes):
loss_map = {
"labels": self.loss_labels,
"boxes": self.loss_boxes
}
assert loss in loss_map, f"Loss {loss} not supported"
return loss_map[loss](outputs, targets, indices, num_boxes)
class HungarianMatcher(nn.Module):
@torch.no_grad()
def forward(self, predictions, targets):
# prediction {"labels": [...], "bboxes": np.ndarray}
# targets: dict with "labels" and "bboxes" as lists
bs, num_queries = predictions["logits"].shape[:2]
output_probs = predictions['logits'].flatten(0, 1).softmax(-1)
output_bboxes = predictions['bboxes'].flatten(0, 1)
gt_labels = torch.cat([target['labels'] for target in targets])
gt_bboxes = torch.cat([target['bboxes'] for target in targets])
class_cost = -output_probs[:, gt_labels]
bbox_loss = torch.cdist(output_bboxes, gt_bboxes)
matrix_loss = bbox_loss + class_cost
matrix_loss = matrix_loss.view(bs, num_queries, -1)
sizes = [len(v["bboxes"]) for v in targets]
indxes = []
for i, cost_part in enumerate(matrix_loss.split(sizes, -1)):
i_match, j_match = linear_sum_assignment(cost_part[i])
indxes.append((torch.as_tensor(i_match, dtype=torch.int64), torch.as_tensor(j_match, dtype=torch.int64)))
return indxes
class PositionEncoder(nn.Module):
def __init__(self, max_hw=50, hidden_dim=256) -> None:
super().__init__()
self.row_encoding = nn.Embedding(max_hw, hidden_dim // 2)
self.col_encoding = nn.Embedding(max_hw, hidden_dim // 2)
def forward(self, x):
b, c, h, w = x.shape
rows_embs = self.row_encoding(torch.arange(0, h, device=x.device))
rows_embs = rows_embs.unsqueeze(1).repeat(1, 1, w, 1)
cols_embs = self.col_encoding(torch.arange(0, w, device=x.device))
cols_embs = cols_embs.unsqueeze(0).repeat(1, h, 1, 1)
embds = torch.cat((rows_embs, cols_embs), dim=-1).permute(0, 3, 1, 2)
return embds
class MLPBoxDecoder(nn.Module):
def __init__(self, input_dim, hidden_dim, output_dim) -> None:
super().__init__()
self.fc1 = nn.Linear(input_dim, hidden_dim)
self.fc2 = nn.Linear(hidden_dim, hidden_dim)
self.fc3 = nn.Linear(hidden_dim, output_dim)
def forward(self, x):
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
return self.fc3(x)
class DETR(nn.Module):
def __init__(
self,
num_classes,
num_queries,
hidden_dim,
n_head,
num_encoder_layers,
num_decoder_layers,
dropout,
) -> None:
super().__init__()
self.encoder = nn.Sequential(*models.resnet50(pretrained=True).children())[:-2]
self.encoder_downsampler = nn.Conv2d(
2048, hidden_dim, kernel_size=1
) # 2048 resnet
self.position_encoder = PositionEncoder(hidden_dim=hidden_dim)
self.object_queries = nn.Parameter(torch.randn(num_queries, hidden_dim))
self.decoder = nn.Transformer(
d_model=hidden_dim,
nhead=n_head,
dim_feedforward=hidden_dim * 4,
num_encoder_layers=num_encoder_layers,
num_decoder_layers=num_decoder_layers,
batch_first=True,
dropout=dropout,
)
self.final_ln = nn.LayerNorm(hidden_dim)
self.class_projection = nn.Linear(hidden_dim, num_classes + 1)
self.bbox_projection = MLPBoxDecoder(hidden_dim, hidden_dim, 4)
def forward(self, x):
b, c, h, w = x.shape
features = self.encoder(x)
features = self.encoder_downsampler(features)
pos_embds = self.position_encoder(features)
features += pos_embds
features = features.flatten(2).permute(0, 2, 1)
obj_queries = self.object_queries.repeat(b, 1, 1)
output = self.decoder(features, obj_queries)
output = self.final_ln(output)
classes = self.class_projection(output)
bboxes = self.bbox_projection(output)
out = {}
out['logits'] = classes
out['bboxes'] = torch.sigmoid(bboxes)
return out
if __name__ == "__main__":
detr = DETR(30, 256)
example_input = torch.randn((4, 3, 224, 224))
output = detr(example_input)
|
# -*- coding: utf-8 -*-
# __author__ = "wynterwang"
# __date__ = "2020/9/24"
from __future__ import absolute_import
__all__ = ["ResourceIsolation", "ResourceIsolationByUser"]
class ResourceIsolation:
def isolation_filters(self, request):
"""
:param request: request object
:type request: restful_falcon.core.request.Request
:return: filter list
:rtype: list
"""
raise NotImplementedError()
class ResourceIsolationByUser(ResourceIsolation):
def isolation_filters(self, request):
if not request.user.is_admin:
return [("created_by", request.user.user_id)]
|
'''
This script is used to rerun previously run scenarios during the fuzzing process
'''
import sys
import os
sys.path.append('pymoo')
carla_root = '../carla_0994_no_rss'
sys.path.append(carla_root+'/PythonAPI/carla/dist/carla-0.9.9-py3.7-linux-x86_64.egg')
sys.path.append(carla_root+'/PythonAPI/carla')
sys.path.append(carla_root+'/PythonAPI')
sys.path.append('.')
sys.path.append('fuzzing_utils')
sys.path.append('carla_lbc')
sys.path.append('carla_lbc/leaderboard')
sys.path.append('carla_lbc/leaderboard/team_code')
sys.path.append('carla_lbc/scenario_runner')
sys.path.append('carla_lbc/carla_project')
sys.path.append('carla_lbc/carla_project/src')
sys.path.append('carla_lbc/carla_specific_utils')
# os.system('export PYTHONPATH=/home/zhongzzy9/anaconda3/envs/carla99/bin/python')
import random
import pickle
import atexit
import numpy as np
from datetime import datetime
import traceback
from distutils.dir_util import copy_tree
import json
import torch
import torch.nn as nn
import torch.optim as optim
import torch.utils.data as Data
import torchvision.utils
from torchvision import models
import argparse
from carla_lbc.carla_specific_utils.carla_specific import run_carla_simulation, get_event_location_and_object_type, check_bug, get_unique_bugs, get_if_bug_list
from object_types import pedestrian_types, vehicle_types, static_types, vehicle_colors
from customized_utils import make_hierarchical_dir, exit_handler, process_X, inverse_process_X, get_sorted_subfolders, load_data, get_picklename
parser = argparse.ArgumentParser()
parser.add_argument('-p','--port', type=int, default=2045, help='TCP port(s) to listen to')
parser.add_argument('--ego_car_model', type=str, default='', help='model to rerun chosen scenarios. If not specified, the original one will be used.')
parser.add_argument('--rerun_mode', type=str, default='all', help="need to set to one of ['all', 'train', 'test']")
parser.add_argument('--data_category', type=str, default='bugs', help="need to set to one of ['bugs', 'non_bugs']")
parser.add_argument('--parent_folder', type=str, default='', help='the parent folder that consists of fuzzing data. It should include both bugs and non_bugs folder.')
parser.add_argument('--record_every_n_step', type=int, default=5, help='how many frames to save camera images')
parser.add_argument('--is_save', type=int, default=1, help='save rerun results')
parser.add_argument('--has_display', type=int, default=1, help='display the simulation during rerun.')
parser.add_argument("--debug", type=int, default=0, help="whether using the debug mode: planned paths will be visualized.")
arguments = parser.parse_args()
port = arguments.port
# ['lbc', 'lbc_augment', 'auto_pilot']
ego_car_model = arguments.ego_car_model
# ['train', 'test']
rerun_mode = arguments.rerun_mode
# ['bugs', 'non_bugs']
data_category = arguments.data_category
parent_folder = arguments.parent_folder
record_every_n_step = arguments.record_every_n_step
is_save = arguments.is_save
debug = arguments.debug
assert os.path.isdir(parent_folder), parent_folder+' does not exist locally'
os.environ['CUBLAS_WORKSPACE_CONFIG'] = ':4096:8'
random.seed(0)
np.random.seed(0)
torch.manual_seed(0)
torch.use_deterministic_algorithms(True)
torch.backends.cudnn.benchmark = False
os.environ['HAS_DISPLAY'] = str(arguments.has_display)
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
def rerun_simulation(pickle_filename, is_save, rerun_save_folder, ind, sub_folder_name, scenario_file, ego_car_model='', x=[], record_every_n_step=10):
is_bug = False
# parameters preparation
if ind == 0:
launch_server = True
else:
launch_server = False
counter = ind
with open(pickle_filename, 'rb') as f_in:
pf = pickle.load(f_in)
x = pf['x']
fuzzing_content = pf['fuzzing_content']
fuzzing_arguments = pf['fuzzing_arguments']
sim_specific_arguments = pf['sim_specific_arguments']
dt_arguments = pf['dt_arguments']
route_type = pf['route_type']
route_str = pf['route_str']
if not ego_car_model:
ego_car_model = pf['ego_car_model']
mask = pf['mask']
labels = pf['labels']
tmp_save_path = pf['tmp_save_path']
fuzzing_arguments.record_every_n_step = record_every_n_step
fuzzing_arguments.ego_car_model = ego_car_model
fuzzing_arguments.debug = debug
folder = '_'.join([route_type, route_str, ego_car_model])
parent_folder = make_hierarchical_dir([rerun_save_folder, folder])
fuzzing_arguments.parent_folder = parent_folder
fuzzing_arguments.mean_objectives_across_generations_path = os.path.join(parent_folder, 'mean_objectives_across_generations.txt')
# TBD: temporary fix to be compatible with earlier data
fuzzing_arguments.terminate_on_collision = True
objectives, run_info = run_carla_simulation(x, fuzzing_content, fuzzing_arguments, sim_specific_arguments, dt_arguments, launch_server, counter, port)
is_bug = int(check_bug(objectives))
# save data
if is_save:
print('sub_folder_name', sub_folder_name)
if is_bug:
rerun_folder = make_hierarchical_dir([parent_folder, 'rerun_bugs'])
print('\n'*3, 'rerun also causes a bug!!!', '\n'*3)
else:
rerun_folder = make_hierarchical_dir([parent_folder, 'rerun_non_bugs'])
try:
new_path = os.path.join(rerun_folder, sub_folder_name)
copy_tree(tmp_save_path, new_path)
except:
print('fail to copy from', tmp_save_path)
traceback.print_exc()
raise
cur_info = {'x':x, 'objectives':objectives, 'labels':run_info['labels'], 'mask':run_info['mask'], 'is_bug':is_bug, 'fuzzing_content': run_info['fuzzing_content'], 'fuzzing_arguments': run_info['fuzzing_arguments'], 'sim_specific_arguments': run_info['sim_specific_arguments'], 'dt_arguments': run_info['dt_arguments'], 'route_type': run_info['route_type'], 'route_str': run_info['route_str']}
with open(new_path+'/'+'cur_info.pickle', 'wb') as f_out:
pickle.dump(cur_info, f_out)
return is_bug, objectives
def rerun_list_of_scenarios(parent_folder, rerun_save_folder, scenario_file, data_category, mode, ego_car_model, record_every_n_step=10, is_save=True):
import re
subfolder_names = get_sorted_subfolders(parent_folder, data_category)
print('len(subfolder_names)', len(subfolder_names))
mid = len(subfolder_names) // 2
random.shuffle(subfolder_names)
train_subfolder_names = subfolder_names[:mid]
test_subfolder_names = subfolder_names[-mid:]
if mode == 'train':
chosen_subfolder_names = train_subfolder_names
elif mode == 'test':
chosen_subfolder_names = test_subfolder_names
elif mode == 'all':
chosen_subfolder_names = subfolder_names
bug_num = 0
objectives_avg = 0
for ind, sub_folder in enumerate(chosen_subfolder_names):
print('episode:', ind+1, '/', len(chosen_subfolder_names), 'bug num:', bug_num)
sub_folder_name = re.search(".*/([0-9]*)$", sub_folder).group(1)
print('sub_folder', sub_folder)
print('sub_folder_name', sub_folder_name)
if os.path.isdir(sub_folder):
pickle_filename = os.path.join(sub_folder, 'cur_info.pickle')
if os.path.exists(pickle_filename):
print('pickle_filename', pickle_filename)
is_bug, objectives = rerun_simulation(pickle_filename, is_save, rerun_save_folder, ind, sub_folder_name, scenario_file, ego_car_model=ego_car_model, record_every_n_step=record_every_n_step)
objectives_avg += np.array(objectives)
if is_bug:
bug_num += 1
print('bug_ratio :', bug_num / len(chosen_subfolder_names))
print('objectives_avg :', objectives_avg / len(chosen_subfolder_names))
if __name__ == '__main__':
time_str = datetime.now().strftime("%Y_%m_%d_%H_%M_%S")
scenario_folder = 'carla_lbc/scenario_files'
if not os.path.exists(scenario_folder):
os.mkdir(scenario_folder)
scenario_file = scenario_folder+'/'+'current_scenario_'+time_str+'.json'
atexit.register(exit_handler, [port])
print('ego_car_model', ego_car_model, 'data_category', data_category, 'mode', rerun_mode)
rerun_save_folder = make_hierarchical_dir(['carla_lbc', 'rerun', rerun_mode, time_str])
rerun_list_of_scenarios(parent_folder, rerun_save_folder, scenario_file, data_category, rerun_mode, ego_car_model, record_every_n_step=record_every_n_step)
|
import unittest
from pathlib import Path
import json
import pandas as pd
from filmweb_integrator.fwimdbmerge.filmweb import Filmweb
from pandas.io.json import json_normalize
DATA_STATIC = str(Path(__file__).parent.parent.parent.parent.absolute()) + '/data_static'
FILMWEB_EXAMPLE_JSON = DATA_STATIC + '/example_test_01_json.json'
FILMWEB_EXAMPLE_CSV = DATA_STATIC + '/example_test_01_json.csv'
FILMWEB_EXAMPLE_FILMWEB = DATA_STATIC + '/example_test_01_json.json'
"""
self = TestFilmweb()
self.setUp()
"""
class TestFilmweb(unittest.TestCase):
def setUp(self):
self.sut = Filmweb()
def test_get_json_shuold_return_dataframe(self):
json_text = open(FILMWEB_EXAMPLE_JSON).read()
df = json_normalize(json.loads(json_text))
self.assertEqual(46, len(df))
def test_get_dataframe_should_return_simple_dataframe(self):
# given
df = pd.read_csv(FILMWEB_EXAMPLE_CSV)
# when
result = self.sut.get_dataframe(df, extended=False, use_saved_scraped=True)
# then
self.assertEqual(13, len(result.columns))
# def test_get_dataframe_should_return_extended_dataframe(self):
# # given
# df = pd.read_csv(FILMWEB_EXAMPLE_CSV)
# # when
# result = self.sut.get_dataframe(df, extended=True, use_saved_scraped=True)
# # then
# self.assertEqual(59, len(result.columns))
|
# V0
# V1
# http://bookshadow.com/weblog/2018/06/17/leetcode-exam-room/
# https://blog.csdn.net/fuxuemingzhu/article/details/83141523
# IDEA : bisect.insort : https://www.cnblogs.com/skydesign/archive/2011/09/02/2163592.html
class ExamRoom(object):
def __init__(self, N):
"""
:type N: int
"""
self.N, self.L = N, list()
def seat(self):
"""
:rtype: int
"""
N, L = self.N, self.L
if not self.L: res = 0
else:
d, res = L[0], 0
# d means cur distance, res means cur pos
for a, b in zip(L, L[1:]):
if (b - a) / 2 > d:
d = (b - a) / 2
res = (b + a) / 2
if N - 1 - L[-1] > d:
res = N - 1
bisect.insort(L, res)
return res
def leave(self, p):
"""
:type p: int
:rtype: void
"""
self.L.remove(p)
# Your ExamRoom object will be instantiated and called as such:
# obj = ExamRoom(N)
# param_1 = obj.seat()
# obj.leave(p)
# V2
# Time: seat: O(logn), amortized
# leave: O(logn)
# Space: O(n)
import heapq
class ExamRoom(object):
def __init__(self, N):
"""
:type N: int
"""
self.__num = N
self.__seats = {-1: [-1, self.__num], self.__num: [-1, self.__num]}
self.__max_heap = [(-self.__distance((-1, self.__num)), -1, self.__num)]
def seat(self):
"""
:rtype: int
"""
while self.__max_heap[0][1] not in self.__seats or \
self.__max_heap[0][2] not in self.__seats or \
self.__seats[self.__max_heap[0][1]][1] != self.__max_heap[0][2] or \
self.__seats[self.__max_heap[0][2]][0] != self.__max_heap[0][1]:
heapq.heappop(self.__max_heap) # lazy deletion
_, left, right = heapq.heappop(self.__max_heap)
mid = 0 if left == -1 \
else self.__num-1 if right == self.__num \
else (left+right) // 2
self.__seats[mid] = [left, right]
heapq.heappush(self.__max_heap, (-self.__distance((left, mid)), left, mid))
heapq.heappush(self.__max_heap, (-self.__distance((mid, right)), mid, right))
self.__seats[left][1] = mid
self.__seats[right][0] = mid
return mid
def leave(self, p):
"""
:type p: int
:rtype: void
"""
left, right = self.__seats[p]
self.__seats.pop(p)
self.__seats[left][1] = right
self.__seats[right][0] = left
heapq.heappush(self.__max_heap, (-self.__distance((left, right)), left, right))
def __distance(self, segment):
return segment[1]-segment[0]-1 if segment[0] == -1 or segment[1] == self.__num \
else (segment[1]-segment[0]) // 2
|
# Copyright 2021 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import contextlib
import difflib
import filecmp
import os
import shutil
import tempfile
import unittest
from json5_generator import Json5File, Writer
@contextlib.contextmanager
def tmp_dir():
tmp = tempfile.mkdtemp()
try:
yield tmp
finally:
shutil.rmtree(tmp)
def path_to_test_file(*path):
return os.path.join(os.path.dirname(__file__), 'tests', *path)
def diff(filename1, filename2):
with open(filename1) as file1:
file1_lines = file1.readlines()
with open(filename2) as file2:
file2_lines = file2.readlines()
# Use Python's difflib module so that diffing works across platforms
return ''.join(difflib.context_diff(file1_lines, file2_lines))
def is_identical_file(reference_filename, output_filename):
reference_basename = os.path.basename(reference_filename)
if not os.path.isfile(reference_filename):
print 'Missing reference file!'
print '(if adding new test, update reference files)'
print reference_basename
print
return False
if not filecmp.cmp(reference_filename, output_filename):
# cmp is much faster than diff, and usual case is "no difference",
# so only run diff if cmp detects a difference
print 'FAIL: %s' % reference_basename
print diff(reference_filename, output_filename)
return False
return True
def compare_output_dir(reference_dir, output_dir):
"""
Compares output files in both reference_dir and output_dir.
Note: this function ignores subdirectory content in both reference
dir and output_dir.
Note: reference_dir should have all ref files ending with .ref suffix.
'.ref' suffix is added to bypass code formatter on reference files.
:returns {bool}: Whether files in output dir matches files in ref dir
"""
ref_content = {
f[:-4]
for f in os.listdir(reference_dir) if f.endswith('.ref')
}
output_content = set(os.listdir(output_dir))
if ref_content != output_content:
print 'Output files does not match.'
print 'Following files are extra: {}'.format(output_content -
ref_content)
print 'Following files are missing: {}'.format(ref_content -
output_content)
return False
for file_name in ref_content:
ref_file = os.path.join(reference_dir, file_name) + '.ref'
output_file = os.path.join(output_dir, file_name)
if os.path.isdir(ref_file) and os.path.isdir(output_file):
continue
elif os.path.isdir(ref_file) or os.path.isdir(output_file):
return False
elif not is_identical_file(ref_file, output_file):
return False
return True
class WriterTest(unittest.TestCase):
def _test_writer(self, writer_class, json5_files, reference_dir):
"""
:param writer_class {Writer}: a subclass to Writer
:param json5_files {List[str]}: json5 test input files
:param reference_dir {str}: directory to expected output files
"""
with tmp_dir() as tmp:
writer = writer_class(json5_files, tmp)
writer.write_files(tmp)
writer.cleanup_files(tmp)
self.assertTrue(compare_output_dir(reference_dir, tmp))
|
from Classes.EdgeData import EdgeData
class Edges(object):
"""Class to store and process edge data.
Attributes
----------
rec_edge_method: str
Method used to determine coef for rec. edge 'Fixed', 'Variable'.
vel_method: str
Method used to compute the velocity used 'MeasMag', 'VectorProf'.
left: EdgeData
Object of EdgeData for left edge.
right: EdgeData
Object of EdgeData for right edge.
"""
def __init__(self):
"""Initialize Edges.
"""
self.rec_edge_method = None
self.vel_method = None
self.left = EdgeData()
self.right = EdgeData()
def populate_data(self, rec_edge_method, vel_method):
"""Store the general methods used for edge data.
Parameters
----------
rec_edge_method: str
Method used to determine coef for rec. edge 'Fixed', 'Variable'.
vel_method: str
Method used to compute the velocity used 'MeasMag', 'VectorProf'.
"""
self.rec_edge_method = rec_edge_method
self.vel_method = vel_method
def populate_from_qrev_mat(self, transect):
"""Populates the object using data from previously saved QRev Matlab file.
Parameters
----------
transect: mat_struct
Matlab data structure obtained from sio.loadmat
"""
if hasattr(transect, 'edges'):
if hasattr(transect.edges, 'left'):
self.left = EdgeData()
self.left.populate_from_qrev_mat(transect.edges.left)
if hasattr(transect.edges, 'right'):
self.right = EdgeData()
self.right.populate_from_qrev_mat(transect.edges.right)
self.rec_edge_method = transect.edges.recEdgeMethod
self.vel_method = transect.edges.velMethod
def change_property(self, prop, setting, edge=None):
"""Change edge property
Parameters
----------
prop: str
Name of property.
setting:
New property setting.
edge: str
Edge to change (left, right)
"""
if edge is None:
setattr(self, prop, setting)
else:
temp = getattr(self, edge)
temp.change_property(prop, setting)
|
# Created On: 2011-11-27
# Copyright 2013 Hardcoded Software (http://www.hardcoded.net)
#
# This software is licensed under the "BSD" License as described in the "LICENSE" file,
# which should be included with this package. The terms are also available at
# http://www.hardcoded.net/licenses/bsd_license
from qtlib.column import Column
from ..base.results_model import ResultsModel as ResultsModelBase
class ResultsModel(ResultsModelBase):
COLUMNS = [
Column('marked', defaultWidth=30),
Column('name', defaultWidth=200),
Column('folder_path', defaultWidth=180),
Column('size', defaultWidth=60),
Column('extension', defaultWidth=40),
Column('dimensions', defaultWidth=100),
Column('exif_timestamp', defaultWidth=120),
Column('mtime', defaultWidth=120),
Column('percentage', defaultWidth=60),
Column('dupe_count', defaultWidth=80),
]
|
import requests
import json
RIDEEM_HOST = 'https://rideem.io'
def API(key = None, host = RIDEEM_HOST):
return Rideem(key, host)
class Rideem(object):
def __init__(self, key = None, host = RIDEEM_HOST):
self._key = key # app private key
self._host = host
def create_app(self, name, email = None):
""" Creates an app with name and email.
"""
params = { 'email' : email } if email else {}
return self._op('apps', name, method = 'post', data = params)
def update_app(self, app):
""" Updates an app.
"""
params = {}
self._add_key_param(params, app)
return self._op('apps', app, method = 'post', data = app,
params = params)
def delete_app(self, app):
""" Deletes an app.
"""
params = {}
self._add_key_param(params, app)
return self._op('apps', app, method = 'delete', params = params)
def get_app(self, name):
""" Gets an app by name.
"""
params = {}
self._add_key_param(params)
return self._op('apps', name, params = params)
def create_promo_for(self, app, name = None, codes = None, delay = 60,
private = False, start = None, end = None):
""" Creates a promo for the app.
"""
params = {}
self._add_key_param(params, app)
promo = {
'name' : name,
'code' : codes if codes else [],
'delay' : delay,
'private' : private,
'start' : start,
'end' : end,
}
return self._op('promos', app, method = 'post', data = promo,
params = params)
def update_promo_for(self, app, promo):
""" Updates a promo.
"""
params = {}
self._add_key_param(params, app)
p = promo.copy()
p['code'] = p['codes']
del p['key']
del p['codes']
return self._op('promos', app, method = 'post', data = p,
params = params)
def delete_promo_from(self, app, promo):
""" Deletes promo for the app.
"""
params = {}
self._add_key_param(params, app)
name = promo['name']
if name:
params.update({'name' : name})
return self._op('promos', app, method = 'delete', params = params)
def get_promo_from(self, app, name = ''):
""" Gets promo named name for an app.
"""
params = {}
self._add_key_param(params, app)
if name:
params.update({'name' : name})
return self._op('promos', app, params = params)
def code_from(self, app, promo = None, key = None):
""" Redeems a code from an app for promo using key.
Note: app and promo can be strings or dicts.
"""
params = {}
suffix = None
if type(app) is str:
self._add_key_param(params)
else:
self._add_key_param(params, app)
promo_name = None
if type(promo) is str:
promo_name = promo
elif promo:
promo_name = promo['name']
if key:
params.update({'key' : key})
if promo_name:
suffix = '/for/{}'.format(promo_name)
return self._op('from', app, suffix = suffix, params = params)
def request(self, app, get = False):
""" Request codes for an app.
Note: app can be a string or dict.
"""
method = 'post' if not get else 'get'
return self._op('request', app, method = method)
def verify(self, app, token):
""" Verifies an app using token.
"""
params = { 'token' : token } if token else {}
return self._op('verify', app, params = params)
def _op(self, endpoint, app, params = None, method = 'get',
data = None, suffix = None):
app_name = app if type(app) is str else app['name']
suffix = suffix if suffix else ''
endpoint = '/rideem/{}/{}{}'.format(endpoint, app_name, suffix)
if method == 'post':
r = self._post(endpoint, data, params = params)
else:
r = self._request(method, endpoint, params = params)
return r
def _request(self, method, ep, **kwargs):
url = self._host + ep
requester = getattr(requests, method)
response = requester(url, **kwargs)
r = response.json() if len(response.text) > 0 else {}
return r, response.status_code
def _post(self, url, o, params):
data = json.dumps(o)
headers = {'Content-type': 'application/json'}
return self._request('post', url, data = data, headers = headers,
params = params)
def _add_key_param(self, params, o = None):
key = None
key = o['key'] if o and 'key' in o else None
if not key: key = self._key
if key:
params.update({ 'key' : key })
@property
def key(self):
return self._key
@key.setter
def key(self, k):
self._key = k
@property
def host(self):
return self._host
@host.setter
def host(self, h):
self._host = h
|
#!/usr/bin/python3
"""
Given a non-negative integer, you could swap two digits at most once to get the
maximum valued number. Return the maximum valued number you could get.
Example 1:
Input: 2736
Output: 7236
Explanation: Swap the number 2 and the number 7.
Example 2:
Input: 9973
Output: 9973
Explanation: No swap.
Note:
The given number is in the range [0, 108]
"""
class Solution:
def maximumSwap(self, num: int) -> int:
"""
stk maintain a increasing stack from right to left
"""
stk = []
nums = list(str(num))
n = len(nums)
for i in range(n-1, -1, -1):
if stk and stk[-1][1] >= nums[i]: # only keep the rightmost duplicate
continue
stk.append((i, nums[i]))
for i in range(n):
while stk and stk[-1][0] <= i:
stk.pop()
if stk and stk[-1][1] > nums[i]:
j = stk[-1][0]
nums[i], nums[j] = nums[j], nums[i]
break
return int("".join(nums))
if __name__ == "__main__":
assert Solution().maximumSwap(2736) == 7236
assert Solution().maximumSwap(9973) == 9973
|
# coding: utf-8
from django.conf import settings
from django.conf.urls import include, patterns, url
from django.contrib.auth.views import logout
from django.contrib import admin
from django.views.generic.base import TemplateView
urlpatterns = patterns(
'',
url(r'^', include('mapa_cidadao.core.urls')),
url(r'', include('social_auth.urls')),
url(r'^logout/$', logout, {"next_page": "/"}, name="logout"),
url(r'^admin/', include(admin.site.urls)),
)
if settings.DEBUG:
urlpatterns += patterns(
'',
(
r'^media/(?P<path>.*)$',
'django.views.static.serve',
{
'document_root': settings.MEDIA_ROOT
}
),
(r'^404/$', TemplateView.as_view(template_name='404.html')),
)
|
import io
import platform
import os
import sys
import setuptools
try:
from numpy import get_include
except ImportError:
import subprocess
errno = subprocess.call([sys.executable, '-m', 'pip', 'install', 'numpy'])
if errno:
print('Please install numpy')
raise SystemExit(errno)
else:
from numpy import get_include
try:
from Cython.Build import cythonize
except ImportError:
import subprocess
errno = subprocess.call([sys.executable, '-m', 'pip', 'install', 'Cython'])
if errno:
print('Please install Cython')
raise SystemExit(errno)
else:
from Cython.Build import cythonize
# Package meta-data.
NAME = 'river'
DESCRIPTION = 'Online machine learning in Python'
LONG_DESCRIPTION_CONTENT_TYPE = 'text/markdown'
URL = 'https://github.com/online-ml/river'
EMAIL = 'maxhalford25@gmail.com'
AUTHOR = 'Max Halford'
REQUIRES_PYTHON = '>=3.6.0'
# Package requirements.
base_packages = ['numpy>=1.18.1', 'scipy>=1.4.1', 'pandas>=1.0.1']
compat_packages = base_packages + [
'scikit-learn',
'scikit-surprise',
'sqlalchemy',
'torch',
'vaex'
]
dev_packages = base_packages + [
'asv',
'flake8>=3.7.9',
'graphviz>=0.10.1',
'matplotlib>=3.0.2',
'mypy>=0.761',
'pytest>=4.5.0',
'pytest-cov>=2.6.1',
'pytest-cython>=0.1.0',
'scikit-learn>=0.22.1',
'sqlalchemy>=1.3.15'
]
docs_packages = dev_packages + [
'flask',
'ipykernel',
'jupyter-client',
'mike==0.5.3',
'mkdocs',
'mkdocs-awesome-pages-plugin',
'mkdocs-material',
'nbconvert',
'numpydoc'
]
here = os.path.abspath(os.path.dirname(__file__))
# Import the README and use it as the long-description.
with io.open(os.path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = '\n' + f.read()
# Load the package's __version__.py module as a dictionary.
about = {}
with open(os.path.join(here, NAME, '__version__.py')) as f:
exec(f.read(), about)
# Where the magic happens:
setuptools.setup(
name=NAME,
version=about['__version__'],
description=DESCRIPTION,
long_description=long_description,
long_description_content_type=LONG_DESCRIPTION_CONTENT_TYPE,
author=AUTHOR,
author_email=EMAIL,
python_requires=REQUIRES_PYTHON,
url=URL,
packages=setuptools.find_packages(exclude=('tests', 'scikit-multiflow')),
install_requires=base_packages,
extras_require={
'dev': dev_packages,
'compat': compat_packages,
'docs': docs_packages
},
include_package_data=True,
license='BSD-3',
classifiers=[
# Trove classifiers
# Full list: https://pypi.python.org/pypi?%3Aaction=list_classifiers
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy'
],
ext_modules=cythonize(
module_list=[
setuptools.Extension(
'*',
sources=['**/*.pyx'],
include_dirs=[get_include()],
libraries=[] if platform.system() == 'Windows' else ['m']
)
],
compiler_directives={
'language_level': 3,
'binding': True,
'embedsignature': True
}
) + [setuptools.Extension(
'river.neighbors.libNearestNeighbor',
sources=[os.path.join('river', 'neighbors', 'src',
'libNearestNeighbor', 'nearestNeighbor.cpp')],
include_dirs=[get_include()],
libraries=[] if platform.system() == 'Windows' else ['m'],
language='c++'
)]
)
|
#!/usr/bin/python
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: iam_user
short_description: Manage AWS IAM users
description:
- Manage AWS IAM users
version_added: "2.5"
author: Josh Souza, @joshsouza
options:
name:
description:
- The name of the user to create.
required: true
managed_policy:
description:
- A list of managed policy ARNs or friendly names to attach to the user. To embed an inline policy, use M(iam_policy).
required: false
state:
description:
- Create or remove the IAM user
required: true
choices: [ 'present', 'absent' ]
purge_policy:
description:
- Detach policies which are not included in managed_policy list
required: false
default: false
requirements: [ botocore, boto3 ]
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Note: This module does not allow management of groups that users belong to.
# Groups should manage their membership directly using `iam_group`,
# as users belong to them.
# Create a user
- iam_user:
name: testuser1
state: present
# Create a user and attach a managed policy using its ARN
- iam_user:
name: testuser1
managed_policy:
- arn:aws:iam::aws:policy/AmazonSNSFullAccess
state: present
# Remove all managed policies from an existing user with an empty list
- iam_user:
name: testuser1
state: present
purge_policy: true
# Delete the user
- iam_user:
name: testuser1
state: absent
'''
RETURN = '''
user:
description: dictionary containing all the user information
returned: success
type: complex
contains:
arn:
description: the Amazon Resource Name (ARN) specifying the user
type: string
sample: "arn:aws:iam::1234567890:user/testuser1"
create_date:
description: the date and time, in ISO 8601 date-time format, when the user was created
type: string
sample: "2017-02-08T04:36:28+00:00"
user_id:
description: the stable and unique string identifying the user
type: string
sample: AGPAIDBWE12NSFINE55TM
user_name:
description: the friendly name that identifies the user
type: string
sample: testuser1
path:
description: the path to the user
type: string
sample: /
'''
from ansible.module_utils._text import to_native
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import camel_dict_to_snake_dict, ec2_argument_spec, get_aws_connection_info, boto3_conn
from ansible.module_utils.ec2 import HAS_BOTO3
import traceback
try:
from botocore.exceptions import ClientError, ParamValidationError
except ImportError:
pass # caught by imported HAS_BOTO3
def compare_attached_policies(current_attached_policies, new_attached_policies):
# If new_attached_policies is None it means we want to remove all policies
if len(current_attached_policies) > 0 and new_attached_policies is None:
return False
current_attached_policies_arn_list = []
for policy in current_attached_policies:
current_attached_policies_arn_list.append(policy['PolicyArn'])
if not set(current_attached_policies_arn_list).symmetric_difference(set(new_attached_policies)):
return True
else:
return False
def convert_friendly_names_to_arns(connection, module, policy_names):
# List comprehension that looks for any policy in the 'policy_names' list
# that does not begin with 'arn'. If there aren't any, short circuit.
# If there are, translate friendly name to the full arn
if not any([not policy.startswith('arn:') for policy in policy_names if policy is not None]):
return policy_names
allpolicies = {}
paginator = connection.get_paginator('list_policies')
policies = paginator.paginate().build_full_result()['Policies']
for policy in policies:
allpolicies[policy['PolicyName']] = policy['Arn']
allpolicies[policy['Arn']] = policy['Arn']
try:
return [allpolicies[policy] for policy in policy_names]
except KeyError as e:
module.fail_json(msg="Couldn't find policy: " + str(e))
def create_or_update_user(connection, module):
params = dict()
params['UserName'] = module.params.get('name')
managed_policies = module.params.get('managed_policy')
purge_policy = module.params.get('purge_policy')
changed = False
if managed_policies:
managed_policies = convert_friendly_names_to_arns(connection, module, managed_policies)
# Get user
user = get_user(connection, module, params['UserName'])
# If user is None, create it
if user is None:
# Check mode means we would create the user
if module.check_mode:
module.exit_json(changed=True)
try:
connection.create_user(**params)
changed = True
except ClientError as e:
module.fail_json(msg="Unable to create user: {0}".format(to_native(e)), exception=traceback.format_exc(),
**camel_dict_to_snake_dict(e.response))
except ParamValidationError as e:
module.fail_json(msg="Unable to create user: {0}".format(to_native(e)), exception=traceback.format_exc())
# Manage managed policies
current_attached_policies = get_attached_policy_list(connection, module, params['UserName'])
if not compare_attached_policies(current_attached_policies, managed_policies):
current_attached_policies_arn_list = []
for policy in current_attached_policies:
current_attached_policies_arn_list.append(policy['PolicyArn'])
# If managed_policies has a single empty element we want to remove all attached policies
if purge_policy:
# Detach policies not present
for policy_arn in list(set(current_attached_policies_arn_list) - set(managed_policies)):
changed = True
if not module.check_mode:
try:
connection.detach_user_policy(UserName=params['UserName'], PolicyArn=policy_arn)
except ClientError as e:
module.fail_json(msg="Unable to detach policy {0} from user {1}: {2}".format(
policy_arn, params['UserName'], to_native(e)),
exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
except ParamValidationError as e:
module.fail_json(msg="Unable to detach policy {0} from user {1}: {2}".format(
policy_arn, params['UserName'], to_native(e)),
exception=traceback.format_exc())
# If there are policies to adjust that aren't in the current list, then things have changed
# Otherwise the only changes were in purging above
if set(managed_policies).difference(set(current_attached_policies_arn_list)):
changed = True
# If there are policies in managed_policies attach each policy
if managed_policies != [None] and not module.check_mode:
for policy_arn in managed_policies:
try:
connection.attach_user_policy(UserName=params['UserName'], PolicyArn=policy_arn)
except ClientError as e:
module.fail_json(msg="Unable to attach policy {0} to user {1}: {2}".format(
policy_arn, params['UserName'], to_native(e)),
exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
except ParamValidationError as e:
module.fail_json(msg="Unable to attach policy {0} to user {1}: {2}".format(
policy_arn, params['UserName'], to_native(e)),
exception=traceback.format_exc())
if module.check_mode:
module.exit_json(changed=changed)
# Get the user again
user = get_user(connection, module, params['UserName'])
module.exit_json(changed=changed, iam_user=camel_dict_to_snake_dict(user))
def destroy_user(connection, module):
params = dict()
params['UserName'] = module.params.get('name')
if get_user(connection, module, params['UserName']):
# Check mode means we would remove this user
if module.check_mode:
module.exit_json(changed=True)
# Remove any attached policies otherwise deletion fails
try:
for policy in get_attached_policy_list(connection, module, params['UserName']):
connection.detach_user_policy(UserName=params['UserName'], PolicyArn=policy['PolicyArn'])
except ClientError as e:
module.fail_json(msg="Unable to detach policy {0} from user {1}: {2}".format(
policy['PolicyArn'], params['UserName'], to_native(e)),
exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
except ParamValidationError as e:
module.fail_json(msg="Unable to detach policy {0} from user {1}: {2}".format(
policy['PolicyArn'], params['UserName'], to_native(e)),
exception=traceback.format_exc())
try:
connection.delete_user(**params)
except ClientError as e:
module.fail_json(msg="Unable to delete user {0}: {1}".format(params['UserName'], to_native(e)),
exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
except ParamValidationError as e:
module.fail_json(msg="Unable to delete user {0}: {1}".format(params['UserName'], to_native(e)),
exception=traceback.format_exc())
else:
module.exit_json(changed=False)
module.exit_json(changed=True)
def get_user(connection, module, name):
params = dict()
params['UserName'] = name
try:
return connection.get_user(**params)
except ClientError as e:
if e.response['Error']['Code'] == 'NoSuchEntity':
return None
else:
module.fail_json(msg="Unable to get user {0}: {1}".format(name, to_native(e)),
**camel_dict_to_snake_dict(e.response))
def get_attached_policy_list(connection, module, name):
try:
return connection.list_attached_user_policies(UserName=name)['AttachedPolicies']
except ClientError as e:
if e.response['Error']['Code'] == 'NoSuchEntity':
return None
else:
module.fail_json(msg="Unable to get policies for user {0}: {1}".format(name, to_native(e)),
**camel_dict_to_snake_dict(e.response))
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
name=dict(required=True, type='str'),
managed_policy=dict(default=[], type='list'),
state=dict(choices=['present', 'absent'], required=True),
purge_policy=dict(default=False, type='bool')
)
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True
)
if not HAS_BOTO3:
module.fail_json(msg='boto3 required for this module')
region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True)
connection = boto3_conn(module, conn_type='client', resource='iam', region=region, endpoint=ec2_url, **aws_connect_params)
state = module.params.get("state")
if state == 'present':
create_or_update_user(connection, module)
else:
destroy_user(connection, module)
if __name__ == '__main__':
main()
|
import pytest
from libqtile.widget import BatteryIcon
from libqtile import images
import cairocffi
from .conftest import TEST_DIR
def test_images_fail():
"""Test BatteryIcon() with a bad theme_path
This theme path doesn't contain all of the required images.
"""
battery = BatteryIcon(theme_path=TEST_DIR)
with pytest.raises(images.LoadingError):
battery.setup_images()
def test_images_good(tmpdir, fake_bar, svg_img_as_pypath):
"""Test BatteryIcon() with a good theme_path
This theme path does contain all of the required images.
"""
for name in BatteryIcon.icon_names:
target = tmpdir.join(name + '.svg')
svg_img_as_pypath.copy(target)
batt = BatteryIcon(theme_path=str(tmpdir))
batt.fontsize = 12
batt.bar = fake_bar
batt.setup_images()
assert len(batt.surfaces) == len(BatteryIcon.icon_names)
for name, surfpat in batt.surfaces.items():
assert isinstance(surfpat, cairocffi.SurfacePattern)
def test_images_default(fake_bar):
"""Test BatteryIcon() with the default theme_path
Ensure that the default images are successfully loaded.
"""
batt = BatteryIcon()
batt.fontsize = 12
batt.bar = fake_bar
batt.setup_images()
assert len(batt.surfaces) == len(BatteryIcon.icon_names)
for name, surfpat in batt.surfaces.items():
assert isinstance(surfpat, cairocffi.SurfacePattern)
|
# views_counter plugin server
# Developed by Raven, 2021.12.28
# Copyright (c) RavenKiller
# This source code is licensed under the MIT License found in the
# LICENSE file in the root directory of this source tree.
import tornado.ioloop
import tornado.web
import hashlib
import sqlite3
from datetime import datetime as dt
import sys
SQL_CREATE_TABLE = '''CREATE TABLE IF NOT EXISTS views
(id INTEGER PRIMARY KEY,
site_hash TEXT NOT NULL,
page_hash TEXT NOT NULL,
user_hash TEXT NOT NULL,
time TEXT NOT NULL
);'''
# wildcard: (site_hash, page_hash, user_hash, time)
SQL_INSERT_RECORD = "INSERT INTO views VALUES (NULL, ?, ?, ?, ?);"
# wildcard: (page_hash)
SQL_PAGE_VIEWS = "SELECT COUNT(*) FROM views WHERE page_hash=?;"
# wildcard: (site_hash)
SQL_SITE_VIEWS = "SELECT COUNT(*) FROM views WHERE site_hash=?;"
SQL_PAGE_USERS = "SELECT DISTINCT page_hash, user_hash FROM views;"
SQL_SITE_USERS = "SELECT DISTINCT site_hash, user_hash FROM views;"
class MainHandler(tornado.web.RequestHandler):
def get(self):
self.render("demo.html")
class VCHandler(tornado.web.RequestHandler):
def set_default_headers(self):
self.set_header("Access-Control-Allow-Origin", "*")
self.set_header("Access-Control-Allow-Headers", "x-requested-with")
self.set_header('Access-Control-Allow-Methods', 'POST, GET, OPTIONS')
def post(self):
res = {"success":0, "vc_page_views":0, "vc_site_views":0, "vc_page_users":0, "vc_site_users":0}
# Extract information
src = self.get_argument("src", "")
ip = self.request.remote_ip
user_agent = self.request.headers.get("User-Agent", "")
origin = self.request.headers.get("Origin", "")
if not src or not ip or not user_agent or not origin:
self.finish(res)
# Hash
page_hash = hashlib.md5(src.encode("utf-8")).hexdigest()
site_hash = hashlib.md5(origin.encode("utf-8")).hexdigest()
user_hash = hashlib.md5((ip+user_agent).encode("utf-8")).hexdigest()
# Open DB
now = dt.now()
conn = sqlite3.connect('views.db')
c = conn.cursor()
try:
# Insert record
c.execute(SQL_INSERT_RECORD, (site_hash, page_hash, user_hash, now.strftime("%Y-%m-%d %H:%M:%S")))
# Page views
c.execute(SQL_PAGE_VIEWS, (page_hash, ))
res["vc_page_views"] = c.fetchall()[0][0]
# Site views
c.execute(SQL_SITE_VIEWS, (site_hash, ))
res["vc_site_views"] = c.fetchall()[0][0]
# Page users
c.execute(SQL_PAGE_USERS)
res["vc_page_users"] = len(c.fetchall())
# Site users
c.execute(SQL_SITE_USERS)
res["vc_site_users"] = len(c.fetchall())
# Close and return
conn.commit()
conn.close()
res["success"] = 1
self.finish(res)
except:
conn.rollback()
conn.close()
res["success"] = 0
self.finish(res)
def make_app():
return tornado.web.Application([
(r"/", MainHandler),
(r"/views", VCHandler)
])
if __name__ == "__main__":
conn = sqlite3.connect('views.db')
c = conn.cursor()
c.execute(SQL_CREATE_TABLE)
conn.commit()
conn.close()
app = make_app()
app.listen(int(sys.argv[1]))
tornado.ioloop.IOLoop.current().start()
|
import sys
def triple_trouble(one, two, three):
s = ''
for i in range(len(one)):
s += one[i]
s += two[i]
s += three[i]
return s
if __name__ == "__main__":
if len(sys.argv) == 4:
print(triple_trouble(one=sys.argv[1], two=sys.argv[2], three=sys.argv[3]))
else:
sys.exit(1)
|
#!/usr/bin/python3
"""
Creating table investment
"""
import models
from datetime import datetime
import sqlalchemy
from sqlalchemy import Column, String, ForeignKey
from sqlalchemy import Integer, DateTime, Float
from models import investor
from models.base_model import BaseModel, Base
class Investment(BaseModel, Base):
"""Representation of Investment"""
__tablename__ = 'investment'
# id = Column(Integer, primary_key=True,
# nullable=False, autoincrement=True)
investor = Column(String(60), ForeignKey('investor.id'), nullable=False, autoincrement=False)
amount = Column(Float, nullable=False)
term_in_months = Column(Integer, nullable=False)
rentability = Column(Float, nullable=False)
status = Column(String(15), nullable=False)
investment_date = Column(DateTime, nullable=False)
return_date = Column(DateTime, nullable=False)
created_at = Column(DateTime, default=datetime.utcnow)
updated_at = Column(DateTime, default=datetime.utcnow)
def __init__(self, *args, **kwargs):
"""Initializes investment"""
super().__init__(*args, **kwargs)
|
import os
import torch
from torch import autograd
from ..infer.line_of_best_fit import line_of_best_fit
# A system for autonomous steering using an LSTM network to generate a steering angle based on the slope and intercept
# of the line of best fit calculated for the center line of the road
# Created by brendon-ai, November 2017
# Main class, instantiated with a trained model
class LSTMSteeringEngine:
# Trained model used for inference of steering angles
trained_model = None
# Externally accessible storage for the center line
center_line_of_best_fit = None
# Set global trained model provided as an argument
def __init__(self, trained_model_path):
# Get the global path of the model, not relative to the home folder
global_path = os.path.expanduser(trained_model_path)
# Load the model from disk and remap it onto the CPU
self.trained_model = torch.load(global_path, map_location=lambda storage, location: storage)
# Compute a steering angle, given points down the center of the road
def compute_steering_angle(self, center_points):
# Calculate the line of best fit of the provided points
self.center_line_of_best_fit = line_of_best_fit(center_points)
# Add two empty axes to the returned list to represent the batch and sequence
center_line_empty_axes = [[self.center_line_of_best_fit]]
# Convert the list to an Autograd Variable
center_line_variable = autograd.Variable(torch.FloatTensor(center_line_empty_axes))
# Run inference using the provided model to get a steering angle
steering_angle = self.trained_model(center_line_variable)
# Return the steering angle along with dummy values for the proportional error and line slope
return steering_angle, 0, 0
|
##########################################################################
#
# Copyright (c) 2019, Cinesite VFX Ltd. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import os
import six
import imath
import IECore
import Gaffer
import GafferUITest
import GafferScene
import GafferSceneUI
class TransformToolTest( GafferUITest.TestCase ) :
def testSelectionEditability( self ) :
script = Gaffer.ScriptNode()
script["box"] = Gaffer.Box()
script["box"]["plane"] = GafferScene.Plane()
Gaffer.PlugAlgo.promote( script["box"]["plane"]["out"] )
# Box is editable, so all fields of the selection should be useable.
selection = GafferSceneUI.TransformTool.Selection( script["box"]["out"], "/plane", script.context(), None )
self.assertEqual( selection.scene(), script["box"]["out"] )
self.assertEqual( selection.path(), "/plane" )
self.assertEqual( selection.context(), script.context() )
self.assertEqual( selection.upstreamScene(), script["box"]["plane"]["out"] )
self.assertEqual( selection.upstreamPath(), "/plane" )
self.assertEqual( selection.upstreamContext()["scene:path"], IECore.InternedStringVectorData( [ "plane" ] ) )
self.assertTrue( selection.editable() )
self.assertEqual( selection.editTarget(), script["box"]["plane"]["transform"] )
self.assertEqual( selection.transformSpace(), imath.M44f() )
# Reference internals are not editable, so attempts to access invalid
# fields should throw.
referenceFileName = os.path.join( self.temporaryDirectory(), "test.grf" )
script["box"].exportForReference( referenceFileName )
script["reference"] = Gaffer.Reference()
script["reference"].load( referenceFileName )
selection = GafferSceneUI.TransformTool.Selection( script["reference"]["out"], "/plane", script.context(), None )
self.assertEqual( selection.scene(), script["reference"]["out"] )
self.assertEqual( selection.path(), "/plane" )
self.assertEqual( selection.context(), script.context() )
self.assertEqual( selection.upstreamScene(), script["reference"]["plane"]["out"] )
self.assertEqual( selection.upstreamPath(), "/plane" )
self.assertEqual( selection.upstreamContext()["scene:path"], IECore.InternedStringVectorData( [ "plane" ] ) )
self.assertFalse( selection.editable() )
with six.assertRaisesRegex( self, RuntimeError, "Selection is not editable" ) :
selection.editTarget()
with six.assertRaisesRegex( self, RuntimeError, "Selection is not editable" ) :
selection.transformSpace()
def testSelectionEditScopes( self ) :
# Start with an EditScope that isn't even connected.
plane = GafferScene.Plane()
editScope = Gaffer.EditScope()
editScope.setup( plane["out"] )
selection = GafferSceneUI.TransformTool.Selection( plane["out"], "/plane", Gaffer.Context(), None )
self.assertTrue( selection.editable() )
self.assertEqual( selection.editTarget(), plane["transform"] )
self.assertIsNone( selection.editScope() )
selection = GafferSceneUI.TransformTool.Selection( plane["out"], "/plane", Gaffer.Context(), editScope )
self.assertFalse( selection.editable() )
self.assertEqual( selection.warning(), "EditScope not in history" )
self.assertRaises( RuntimeError, selection.acquireTransformEdit )
self.assertRaises( RuntimeError, selection.transformSpace )
self.assertEqual( selection.editScope(), editScope )
# Connect it and it should start to work, even if there is a downstream
# Transform node.
editScope["in"].setInput( plane["out"] )
transform = GafferScene.Transform()
transform["in"].setInput( editScope["out"] )
selection = GafferSceneUI.TransformTool.Selection( transform["out"], "/plane", Gaffer.Context(), editScope )
self.assertTrue( selection.editable() )
self.assertEqual( selection.warning(), "" )
self.assertEqual( selection.upstreamScene(), editScope["out"] )
self.assertEqual( selection.editScope(), editScope )
# Disable the EditScope and the selection should become non-editable.
editScope["enabled"].setValue( False )
selection = GafferSceneUI.TransformTool.Selection( transform["out"], "/plane", Gaffer.Context(), editScope )
self.assertFalse( selection.editable() )
self.assertEqual( selection.warning(), "EditScope disabled" )
self.assertEqual( selection.editScope(), editScope )
editScope["enabled"].setValue( True )
selection = GafferSceneUI.TransformTool.Selection( transform["out"], "/plane", Gaffer.Context(), editScope )
self.assertTrue( selection.editable() )
self.assertEqual( selection.warning(), "" )
self.assertEqual( selection.editScope(), editScope )
# Make the downstream node author a transform that would override the
# EditScope. The selection should become non-editable again.
pathFilter = GafferScene.PathFilter()
pathFilter["paths"].setValue( IECore.StringVectorData( [ "/plane" ] ) )
transform["filter"].setInput( pathFilter["out"] )
selection = GafferSceneUI.TransformTool.Selection( transform["out"], "/plane", Gaffer.Context(), editScope )
self.assertFalse( selection.editable() )
self.assertEqual( selection.warning(), "EditScope overridden downstream" )
self.assertEqual( selection.upstreamScene(), transform["out"] )
# Disable the downstream node and we should be back in business.
transform["enabled"].setValue( False )
selection = GafferSceneUI.TransformTool.Selection( transform["out"], "/plane", Gaffer.Context(), editScope )
self.assertTrue( selection.editable() )
self.assertEqual( selection.warning(), "" )
self.assertEqual( selection.upstreamScene(), editScope["out"] )
def testSceneReaderSelectionEditability( self ) :
sceneReader = GafferScene.SceneReader()
sceneReader["fileName"].setValue( "${GAFFER_ROOT}/python/GafferSceneTest/alembicFiles/groupedPlane.abc" )
selection = GafferSceneUI.TransformTool.Selection( sceneReader["out"], "/group/plane", Gaffer.Context(), None )
self.assertTrue( selection.editable() )
self.assertEqual( selection.path(), "/group" )
self.assertEqual( selection.editTarget(), sceneReader["transform"] )
selection = GafferSceneUI.TransformTool.Selection( sceneReader["out"], "/group", Gaffer.Context(), None )
self.assertTrue( selection.editable() )
self.assertEqual( selection.path(), "/group" )
self.assertEqual( selection.editTarget(), sceneReader["transform"] )
def testInvalidSelection( self ) :
plane = GafferScene.Plane()
selection = GafferSceneUI.TransformTool.Selection( plane["out"], "/cube", Gaffer.Context(), None )
self.assertFalse( selection.editable() )
self.assertEqual( selection.warning(), "Location does not exist" )
def testAcquireTransformEdit( self ) :
plane = GafferScene.Plane()
selection = GafferSceneUI.TransformTool.Selection( plane["out"], "/plane", Gaffer.Context(), None )
edit = selection.acquireTransformEdit()
self.assertEqual( edit.translate, plane["transform"]["translate"] )
self.assertEqual( edit.rotate, plane["transform"]["rotate"] )
self.assertEqual( edit.scale, plane["transform"]["scale"] )
self.assertEqual( edit.pivot, plane["transform"]["pivot"] )
editScope = Gaffer.EditScope()
editScope.setup( plane["out"] )
editScope["in"].setInput( plane["out"] )
selection = GafferSceneUI.TransformTool.Selection( editScope["out"], "/plane", Gaffer.Context(), editScope )
self.assertIsNone( selection.acquireTransformEdit( createIfNecessary = False ) )
edit = selection.acquireTransformEdit()
self.assertTrue( editScope.isAncestorOf( edit.translate ) )
def testDontEditUpstreamOfReference( self ) :
script = Gaffer.ScriptNode()
script["plane"] = GafferScene.Plane()
script["box"] = Gaffer.Box()
script["box"]["filter"] = GafferScene.PathFilter()
script["box"]["filter"]["paths"].setValue( IECore.StringVectorData( [ "/plane" ] ) )
script["box"]["transform"] = GafferScene.Transform()
script["box"]["transform"]["filter"].setInput( script["box"]["filter"]["out"] )
Gaffer.PlugAlgo.promote( script["box"]["transform"]["in"] )
Gaffer.PlugAlgo.promote( script["box"]["transform"]["out"] )
script["box"]["in"].setInput( script["plane"]["out"] )
# Box is editable
selection = GafferSceneUI.TransformTool.Selection( script["box"]["out"], "/plane", script.context(), None )
self.assertEqual( selection.upstreamScene(), script["box"]["transform"]["out"] )
self.assertEqual( selection.upstreamPath(), "/plane" )
self.assertEqual( selection.upstreamContext()["scene:path"], IECore.InternedStringVectorData( [ "plane" ] ) )
self.assertTrue( selection.editable() )
self.assertEqual( selection.editTarget(), script["box"]["transform"]["transform"] )
# Reference internals are not editable, so we can't edit the transform any more.
# Make sure we don't accidentally traverse through the reference and try to edit
# the Plane directly.
referenceFileName = os.path.join( self.temporaryDirectory(), "test.grf" )
script["box"].exportForReference( referenceFileName )
script["reference"] = Gaffer.Reference()
script["reference"].load( referenceFileName )
script["reference"]["in"].setInput( script["plane"]["out"] )
selection = GafferSceneUI.TransformTool.Selection( script["reference"]["out"], "/plane", script.context(), None )
self.assertEqual( selection.upstreamScene(), script["reference"]["transform"]["out"] )
self.assertEqual( selection.upstreamPath(), "/plane" )
self.assertEqual( selection.upstreamContext()["scene:path"], IECore.InternedStringVectorData( [ "plane" ] ) )
self.assertFalse( selection.editable() )
if __name__ == "__main__":
unittest.main()
|
import torch
import torch.nn as nn
from model import common
import math
def make_model(opt):
return LAUNet(opt)
class Evaluator(nn.Module):
def __init__(self, n_feats):
super(Evaluator, self).__init__()
self.conv1 = nn.Sequential(nn.Conv2d(3, n_feats, kernel_size=3, stride=2))
self.conv2 = nn.Sequential(nn.Conv2d(n_feats, n_feats, kernel_size=3, stride=2))
self.bn1 = nn.BatchNorm2d(n_feats)
self.relu1 = nn.ReLU(inplace=True)
self.avg_layer = torch.nn.AdaptiveAvgPool2d((1, 1))
self.linear_layer = nn.Conv2d(in_channels=n_feats, out_channels=2, kernel_size=1, stride=1)
self.prob_layer = nn.Softmax()
# saved actions and rewards
self.saved_action = None
self.rewards = []
self.eva_threshold = 0.5
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu1(x)
x = self.conv2(x)
x = self.avg_layer(x)
x = self.linear_layer(x).squeeze()
softmax = self.prob_layer(x)
if self.training:
m = torch.distributions.Categorical(softmax)
action = m.sample()
self.saved_action = action
else:
action = softmax[1]
action = torch.where(action > self.eva_threshold, 1, 0)
self.saved_action = action
m = None
return action, m
class LAUNet(nn.Module):
def __init__(self, opt, conv=common.default_conv):
super(LAUNet, self).__init__()
self.opt = opt
self.scale = opt.scale[-1]
self.level = int(math.log(self.scale, 2))
self.saved_actions = []
self.softmaxs = []
n_blocks = opt.n_blocks
n_feats = 64
kernel_size = 3
n_height = 1024
# main SR network
self.upsample = [nn.Upsample(scale_factor=2**(i+1), mode='bicubic', align_corners=False) for i in range(self.level)]
self.upsample = nn.ModuleList(self.upsample)
rgb_mean = (0.4737, 0.4397, 0.4043)
rgb_std = (1.0, 1.0, 1.0)
# data preprocessing
self.sub_mean = common.MeanShift(opt.rgb_range, rgb_mean, rgb_std)
# head conv
self.head = conv(opt.n_colors, n_feats)
# CA Dense net
self.body = [common.CADensenet(conv, n_feats, n_CADenseBlocks=(self.level-i)*n_blocks) for i in range(self.level)]
self.body = nn.ModuleList(self.body)
# upsample blocks
self.up_blocks = [common.Upsampler(common.default_conv, 2, n_feats, act=False) for _ in range(2*self.level-1)]
self.up_blocks += [common.Upsampler(common.default_conv, 2**i, 3, act=False) for i in range(self.level-1,0,-1)]
self.up_blocks = nn.ModuleList(self.up_blocks)
# tail conv that output sr ODIs
self.tail = [conv(n_feats, opt.n_colors) for _ in range(self.level+1)]
self.tail = nn.ModuleList(self.tail)
# data postprocessing
self.add_mean = common.MeanShift(opt.rgb_range, rgb_mean, rgb_std, 1)
# evaluator subnet
self.evaluator = nn.ModuleList()
for p in range(opt.n_evaluator):
self.evaluator.append(Evaluator(n_feats))
def merge(self, imglist, radio):
if radio[0] == 0 and radio[-1] == 0:
return imglist[-1]
else:
result = [imglist[0]]
for i in range(1, len(imglist)):
north, middle, south = torch.split(result[-1], [radio[0]*i, result[-1].size(2)-radio[0]*i-radio[-1]*i, radio[-1]*i], dim=2)
result.append(torch.cat((north, imglist[i], south), dim=2))
return result[-1]
def forward(self, lr):
results = []
masks = []
gprobs = []
x = self.sub_mean(lr)
g1 = self.upsample[0](x)
g2 = self.upsample[1](x)
g3 = self.upsample[2](x)
x = self.head(x)
# 1st level
b1 = self.body[0](x)
f1 = self.up_blocks[2](b1)
f1 = self.tail[0](f1)
g1 = self.add_mean(f1 + g1)
eva_g1 = g1.detach()
patchlist = torch.chunk(eva_g1, self.opt.n_evaluator, dim=2)
for i in range(len(patchlist)):
action, gprob = self.evaluator[i](patchlist[i])
threshold = action.size(0) if self.training else 1
mask = 1 if int(action.sum()) == threshold else 0
self.saved_actions.append(action)
self.softmaxs.append(gprob)
masks.append(mask)
gprobs.append(gprob)
crop_n, remain, crop_s = 0, 0, 0
for i in range(self.opt.n_evaluator//(2**self.level)):
if masks[i] == 1:
crop_n += b1.size(2)//self.opt.n_evaluator
else:
break
for j in range(self.opt.n_evaluator-1, self.opt.n_evaluator*((2**self.level-1)//(2**self.level)), -1):
if masks[j] == 1:
crop_s += b1.size(2)//self.opt.n_evaluator
else:
break
remain = b1.size(2)-crop_n-crop_s
if crop_n or crop_s:
_, b1re, _ = torch.split(b1, [crop_n, remain, crop_s], dim=2)
_, g2, _ = torch.split(g2, [crop_n*4, remain*4, crop_s*4], dim=2)
else:
b1re = b1
# 2ed level
b2 = self.up_blocks[0](b1re)
b2 = self.body[1](b2)
f2 = self.up_blocks[3](b2)
f2 = self.tail[1](f2)
g2 = self.add_mean(f2 + g2)
# 3rd level
if crop_n or crop_s:
_, b2re, _ = torch.split(b2, [crop_n * 2, b2.size(2)-crop_n * 2-crop_s * 2, crop_s * 2], dim=2)
_, g3, _ = torch.split(g3, [crop_n * 16, g3.size(2)-crop_n * 16 - crop_s * 16, crop_s * 16], dim=2)
else:
b2re = b2
b3 = self.up_blocks[1](b2re)
b3 = self.body[2](b3)
f3 = self.up_blocks[4](b3)
f3 = self.tail[2](f3)
g3 = self.add_mean(f3 + g3)
g1up = self.up_blocks[5](g1)
g2up = self.up_blocks[6](g2)
g4 = self.merge([g1up, g2up, g3], [crop_n*8, remain*8, crop_s*8])
results = [g1up, g2up, g3, g4]
return results
|
import unittest
import src.Models.AircraftDynamics as ad
import numpy as np
class AircraftDynamicsTest(unittest.TestCase):
def test_stright_line(self):
dynamics = ad.AircraftDynamics(step_time=0.001, cruise_speed=20.0)
start_state = np.array([10, 10, np.pi/4.0])
end_state = dynamics.update(start_state, 0, 10.0)
dx = 200.0/np.sqrt(2)
dy = dx
correct_end_state = np.array([10+dx, 10+dy, np.pi/4.0])
self.assertTrue(np.allclose(correct_end_state, end_state, atol=0.1))
def test_arc(self):
dynamics = ad.AircraftDynamics(step_time=0.001, cruise_speed=20.0, fast_update=False)
start_state = np.array([10, 10, np.pi / 2.0])
end_state = dynamics.update(start_state, -20, np.pi)
correct_end_state = np.array([50, 10, 3.0/2.0 * np.pi])
start_state2 = np.array([10, 10, np.pi / 2.0])
end_state2 = dynamics.update(start_state2, 20, np.pi)
correct_end_state2 = np.array([-30, 10, 3.0 / 2.0 * np.pi])
start_state3 = np.array([10, 10, np.pi / 2.0])
end_state3 = dynamics.update(start_state3, 20, np.pi / 2.0)
correct_end_state3 = np.array([-10, 30, np.pi])
start_state4 = np.array([10, 10, np.pi / 2.0])
end_state4 = dynamics.update(start_state4, -20, np.pi / 2.0)
correct_end_state4 = np.array([30, 30, 0])
self.assertTrue(np.allclose(correct_end_state, end_state, atol=0.1))
self.assertTrue(np.allclose(correct_end_state2, end_state2, atol=0.1))
self.assertTrue(np.allclose(correct_end_state3, end_state3, atol=0.1))
self.assertTrue(np.allclose(correct_end_state4, end_state4, atol=0.1))
def test_arc_fast(self):
dynamics = ad.AircraftDynamics(step_time=0.001, cruise_speed=20.0, fast_update=True)
start_state = np.array([10, 10, np.pi / 2.0])
end_state = dynamics.update(start_state, -20, np.pi)
correct_end_state = np.array([50, 10, 3.0/2.0 * np.pi])
start_state2 = np.array([10, 10, np.pi / 2.0])
end_state2 = dynamics.update(start_state2, 20, np.pi)
correct_end_state2 = np.array([-30, 10, 3.0 / 2.0 * np.pi])
start_state3 = np.array([10, 10, np.pi / 2.0])
end_state3 = dynamics.update(start_state3, 20, np.pi/2.0)
correct_end_state3 = np.array([-10, 30, np.pi])
start_state4 = np.array([10, 10, np.pi / 2.0])
end_state4 = dynamics.update(start_state4, -20, np.pi / 2.0)
correct_end_state4 = np.array([30, 30, 0])
self.assertTrue(np.allclose(correct_end_state, end_state, atol=0.1))
self.assertTrue(np.allclose(correct_end_state2, end_state2, atol=0.1))
self.assertTrue(np.allclose(correct_end_state3, end_state3, atol=0.1))
self.assertTrue(np.allclose(correct_end_state4, end_state4, atol=0.1))
if __name__ == '__main__':
unittest.main()
|
from django.http import JsonResponse
from django.utils.deprecation import MiddlewareMixin
class FirstMiddleware(MiddlewareMixin):
def process_request(self, request):
print('FirstMiddleware: process_request')
# return JsonResponse({'Hello': 'Django BBS'})
def process_view(self, request, view_func, view_args, view_kwargs):
print('FirstMiddleware: process_view')
# return JsonResponse({'Hello': 'Django BBS'})
def process_response(self, request, response):
print('FirstMiddleware: process_response')
return response
def process_exception(self, request, exception):
print('FirstMiddleware: process_exception')
return JsonResponse({'exception': str(exception)})
def process_template_response(self, request, response):
print('FirstMiddleware: process_template_response')
return response
class SecondMiddleware(MiddlewareMixin):
def process_request(self, request):
print('SecondMiddleware: process_request')
def process_view(self, request, view_func, view_args, view_kwargs):
print('SecondMiddleware: process_view')
def process_response(self, request, response):
print('SecondMiddleware: process_response')
return response
|
import yt
import os as os
from glob import glob
import pandas as pd
from tqdm import tqdm
import pyrats
sims=[]
init=1
for s in sims:
cx=[]; cy=[]; cz=[]; t=[]; x=[]; y=[]; z=[]
os.chdir('/home/pfister/scratch/'+s)
files=glob('output_*')
#for i in tqdm(range(3)):
for i in tqdm(range(len(files)-init+1)):
ds=pyrats.load(i+init, dm=True, bh=True, stars=True)
d=ds.all_data()
t+=[float(ds.current_time.in_units('Gyr'))]
ccx=float(d['dm','particle_position_x'].mean())
ccy=float(d['dm','particle_position_y'].mean())
ccz=float(d['dm','particle_position_z'].mean())
for r in [3000,2500,2000,1500,1000,750,500,300]:
sp=ds.sphere([ccx,ccy,ccz], (r,'pc'))
ccx=float(sp['dm','particle_position_x'].mean())
ccy=float(sp['dm','particle_position_y'].mean())
ccz=float(sp['dm','particle_position_z'].mean())
#arg=d['deposit','stars_cic'].argmax()
#cx+=[float(d['index','x'][arg])]
#cy+=[float(d['index','y'][arg])]
#cz+=[float(d['index','z'][arg])]
cx+=[ccx]
cy+=[ccy]
cz+=[ccz]
x+=[float(d['sink','particle_position_x'].mean())]
y+=[float(d['sink','particle_position_y'].mean())]
z+=[float(d['sink','particle_position_z'].mean())]
gal={'cx':cx, 'cy':cy, 'cz':cz, 't':t, 'x':x, 'y':y, 'z':z}
gal=pd.DataFrame(data=gal)
gal.to_csv('GalCenter.csv', index=False)
|
from typing import cast
import gym
from gym.spaces import Discrete
def get_action_size_from_env(env: gym.Env) -> int:
if isinstance(env.action_space, Discrete):
return cast(int, env.action_space.n)
return cast(int, env.action_space.shape[0])
|
#!/usr/bin/env python
import os
import subprocess
import os.path as p
import sys
DIR_OF_THIS_SCRIPT = p.dirname( p.abspath( __file__ ) )
DIR_OF_THIRD_PARTY = p.join( DIR_OF_THIS_SCRIPT, 'third_party' )
DIR_OF_YCMD_THIRD_PARTY = p.join( DIR_OF_THIRD_PARTY, 'ycmd', 'third_party' )
python_path = []
for folder in os.listdir( DIR_OF_THIRD_PARTY ):
python_path.append( p.abspath( p.join( DIR_OF_THIRD_PARTY, folder ) ) )
for folder in os.listdir( DIR_OF_YCMD_THIRD_PARTY ):
# We skip python-future because it needs to be inserted in sys.path AFTER
# the standard library imports but we can't do that with PYTHONPATH because
# the std lib paths are always appended to PYTHONPATH. We do it correctly in
# prod in ycmd/utils.py because we have access to the right sys.path.
# So for dev, we rely on python-future being installed correctly with
# pip install -r test_requirements.txt
#
# Pip knows how to install this correctly so that it doesn't matter where in
# sys.path the path is.
if folder == 'python-future':
continue
python_path.append( p.abspath( p.join( DIR_OF_YCMD_THIRD_PARTY, folder ) ) )
if os.environ.get( 'PYTHONPATH' ):
python_path.append( os.environ[ 'PYTHONPATH' ] )
os.environ[ 'PYTHONPATH' ] = os.pathsep.join( python_path )
sys.path.insert( 1, p.abspath( p.join( DIR_OF_YCMD_THIRD_PARTY,
'argparse' ) ) )
import argparse
def RunFlake8():
print( 'Running flake8' )
subprocess.check_call( [
'flake8',
p.join( DIR_OF_THIS_SCRIPT, 'python' )
] )
def ParseArguments():
parser = argparse.ArgumentParser()
parser.add_argument( '--skip-build', action = 'store_true',
help = 'Do not build ycmd before testing.' )
parser.add_argument( '--no-flake8', action = 'store_true',
help = 'Do not run flake8' )
return parser.parse_known_args()
def BuildYcmdLibs( args ):
if not args.skip_build:
subprocess.check_call( [
sys.executable,
p.join( DIR_OF_THIS_SCRIPT, 'third_party', 'ycmd', 'build.py' )
] )
def NoseTests( extra_args ):
subprocess.check_call( [
'nosetests',
'-v',
'-w',
p.join( DIR_OF_THIS_SCRIPT, 'python' )
] + extra_args )
def Main():
( parsed_args, extra_args ) = ParseArguments()
if not parsed_args.no_flake8:
RunFlake8()
BuildYcmdLibs( parsed_args )
NoseTests( extra_args )
if __name__ == "__main__":
Main()
|
import dataiku
INPUT_DATASET = "mydataset"
COLUMN_TO_PARTITION_BY = "mypartitioningcolumn"
dataset = dataiku.Dataset(INPUT_DATASET)
df = dataset.get_dataframe(columns = [COLUMN_TO_PARTITION_BY])
combinations = df[COLUMN_TO_PARTITION_BY].unique()
combinations_str = "/".join(combinations)
client = dataiku.api_client()
project = client.get_project(dataiku.default_project_key())
variables = project.get_variables()
variables["standard"]["myPartitionList"] = combinations_str
project.set_variables(variables)
|
#! /home/alessap/miniconda3/bin/python
import subprocess
print("Setting pointers properties")
# xinput_output = str(subprocess.check_output(["xinput"])).split("\n").split("\n")
# print([line for line in xinput_output if "Master" in line])
ps = subprocess.Popen(("xinput"), stdout=subprocess.PIPE)
output = str(subprocess.check_output(("grep", "Logitech MX Master"), stdin=ps.stdout))
ps.wait()
print(output.split("id=")[1].split("\\t")[0])
mouse_name = "Logitech MX Master"
touch_pad_name = "SynPS/2 Synaptics TouchPad"
# # set natural scrolling
# xinput set-prop 'SynPS/2 Synaptics TouchPad' 'libinput Natural Scrolling Enabled' 1
#
# # xinput set-prop "SynPS/2 Synaptics TouchPad" "Tapping Enabled" 1
# #
#
# # natural scrolling touchpad and MX Master mouse
# xinput set-prop 'pointer:Logitech MX Master' 'libinput Natural Scrolling Enabled' 0
# xinput set-prop 11 306 0
# xinput set-prop 11 303 1
#
# # enable tapping touchpad
# xinput set-prop 'SynPS/2 Synaptics TouchPad' 306 1
# xinput set-prop 11 306 1
|
"""
PEP8+ code style for better code
=============================================================================
PEP8 is the defacto standard for coding style for Python. Code conforming to
that is fairly readable, but some cases could be better. From experience,
I have learned additional guidelines to improve code readability and
robustness, hence PEP8+.
To ensure conformity to PEP8, flake8 can be used to check. The additional
guidelines would need to be checked manually.
This non-working program demonstrates many PEP8 styles along with the
additional guidelines.
References
-----------------------------------------------------------------------------
PEP8: https://www.python.org/dev/peps/pep-0008/
flake8: http://flake8.pycqa.org/en/latest/
"""
# Standard libraries. Imports should be sorted in alphabetical order.
import logging
import random
# 3rd-party libraries
from mysql import FakeDBConnection
# Local libraries
from mycompany.models import DataTable
log = logging.getLogger(__name__)
CONSTANTS = 'can be listed here too'
def main():
# GUIDELINE #1: Use new string format
# -----------------------------------------------------------------------
# String formatting should use f-string or .format() instead of %
# as % substitute is not as flexible and may become deprecated in the
# future. See https://www.python.org/dev/peps/pep-3101/
hello = 'hello {}'.format('world')
greeting = f'{hello}, how are you'
# The only place where %s is used would be methods that accept them
# natively, which may have performance improvements. E.g. logging
# module would skip string formatting if the log level is higher.
log.debug('This %s formatting does not happen for INFO level', greeting)
# GUIDELINE #2: Separate/group logic blocks
# -----------------------------------------------------------------------
# For `if` blocks, add an extra blank line to group each block to improve
# readability.
if len(hello) < 1:
print('nothing')
print('was')
print('said')
elif len(hello) < 5:
print('quick hello')
else:
print('long')
print('greeting')
# Same for `try/except` blocks.
try:
1/0
except Exception:
log.info('Yep, that happened')
# GUIDELINE #3: Use bare except with care
# -----------------------------------------------------------------------
# While we are talking about exceptions, never use bare except without
# re-throwing. Generally, it should not be used unless there is a need to
# handle system exceptions, such as SystemExit, to rollback a database
# transaction. See https://docs.python.org/2/howto/doanddont.html#except
db = FakeDBConnection()
try:
db.insert(DataTable, [])
db.update(DataTable, column1='new value')
db.commit()
except: # noqa
db.rollback()
raise # Always re-raise bare except so system exceptions are handled
# GUIDELINE #4: Separate/group related code
# -----------------------------------------------------------------------
# Related/similar stuff should be separated by blank line for readability
hello = 1
world = 2
hello_world = hello / world
print(f'{hello} + {world} = ', 3)
print(f'{hello} / {world} = {hello_world}')
# GUIDELINE #5: Use parenthesis instead of backward slash
# -----------------------------------------------------------------------
# Parenthesis is more readable than backward slash that appears at the
# end of each line.
if (hello > world and
world > hello_world):
print('This is not so '
'bad, right?')
else:
print('No backward slash used')
class Superhero:
""" A hero with superpowers. """
#: This is a class level constant
CLASS_LEVEL_CONSTANT = True
def __init__(self, powers):
"""
Initialize the superhero with powers.
:param list[str] powers: List of superpowers. It's possible to
use type hints in Python 3, but the
readability isn't great for optional
args with default values, so *I* prefer
to continue documenting them via
docstrings.
"""
#: List of superpowers.
self.powers = powers
# Hidden power that no one knows about
self._hidden_power = 'glow in the dark'
def fight(self):
""" Fight villian using one of the superpowers """
print(random.choice(self.powers))
if random.random() > 0.99:
print(self._hidden_power)
if __name__ == "__main__":
main()
|
"""Imports Tapis CLI settings to a Jinja environment
Prefixes TAPIS_PY and TAPIS_CLI are stripped and key names are lowercased.
"""
import re
from tapis_cli import settings
BLACKLIST = ['TAPIS_CLI_ALLOW_EMPTY_VARS']
__all__ = ['key_values']
def key_values():
key_vals = dict()
cli_settings = settings.all_settings()
for k, v in cli_settings.items():
if k not in BLACKLIST and not k.startswith('_'):
k = k.lower()
k = re.sub('^tapis_py_', '', k)
k = re.sub('^tapis_cli_', '', k)
key_vals[k] = v
return key_vals
|
from django.apps import AppConfig
from django.db.models.signals import pre_save
class NodeConfig(AppConfig):
name = 'node'
def ready(self):
from node.models import IncomingNode
from node.signals.node.pre_save import node_pre_save
pre_save.connect(node_pre_save, sender=IncomingNode)
|
import numpy as np
import csv
import time
np.random.seed(1234) # seed 고정
def randomize(): np.random.seed(time.time()) # 현재시각을 seed로
RND_MEAN = 0
RND_STD = 0.0030
LEARNING_RATE = 0.001
def abalone_exec(epoch_count=10, mb_size=10, report=1):
load_abalone_dataset()
init_model()
train_and_test(epoch_count, mb_size, report)
def load_abalone_dataset():
with open('./data/abalone.csv', 'r') as csvfile:
csvreader = csv.reader(csvfile)
# next()는 iterator의 __next__() 호출시 next item을 검색함. 첫줄이 header row라서 건너뛰기 위함.
next(csvreader, None)
rows = []
for row in csvreader:
rows.append(row)
global data, input_cnt, output_cnt
input_cnt, output_cnt = 10, 1
data = np.zeros(shape=(len(rows), input_cnt+output_cnt))
for n, row in enumerate(rows):
if row[0] == 'I': data[n, 0] = 1
if row[0] == 'M': data[n, 1] = 1
if row[0] == 'F': data[n, 2] = 1
data[n, 3:] = row[1:]
def init_model():
"""
initialize a model weight and bias
"""
global weight, bias, input_cnt, output_cnt
weight = np.random.normal(RND_MEAN, RND_STD, [input_cnt, output_cnt])
bias = np.zeros(output_cnt)
def train_and_test(epoch_count, mb_size, report):
step_count = arrange_data(mb_size)
test_x, test_y = get_test_data()
for epoch in range(epoch_count):
losses, accs = [], []
for n in range(step_count):
train_x, train_y = get_train_data(mb_size, n)
loss, acc = run_train(train_x, train_y)
losses.append(loss)
accs.append(acc)
if report > 0 and (epoch+1) % report == 0:
acc = run_test(test_x, test_y)
print('Epoch {0}: loss={1:5.3f}, accuracy={2:5.3f}/{3:5.3f}'.\
format(epoch+1, np.mean(losses), np.mean(accs), acc))
final_acc = run_test(test_x, test_y)
print('\nFinal Test: final accuracy = {:5.3f}'.format(final_acc))
def arrange_data(mb_size):
global data, shuffle_map, test_begin_idx
shuffle_map = np.arange(data.shape[0])
np.random.shuffle(shuffle_map)
step_count = int(data.shape[0] *0.8) // mb_size
test_begin_idx = step_count * mb_size
return step_count
def get_train_data(mb_size, nth):
global data, shuffle_map, test_begin_idx, output_cnt
if nth == 0:
np.random.shuffle(shuffle_map[:test_begin_idx])
train_data = data[shuffle_map[mb_size*nth:mb_size*(nth+1)]]
return train_data[:,:-output_cnt], train_data[:,-output_cnt:]
def get_test_data():
global data, shuffle_map, test_begin_idx, output_cnt
test_data = data[shuffle_map[test_begin_idx:]]
return test_data[:, :-output_cnt], test_data[:, -output_cnt:]
def run_train(x, y):
output, aux_nn = forward_neuralnet(x)
loss, aux_pp = forward_postproc(output, y)
accuracy = eval_accuracy(output, y)
G_loss = 1.0
G_output = backprop_postproc(G_loss, aux_pp)
backprop_neuralnet(G_output, aux_nn)
return loss, accuracy
def run_test(x, y):
output, _ = forward_neuralnet(x)
accuracy = eval_accuracy(output, y)
return accuracy
def forward_neuralnet(x):
global weight, bias
output = np.matmul(x, weight) + bias
return output, x
def backprop_neuralnet(G_output, x):
global weight, bias
g_output_w = x.transpose()
G_w = np.matmul(g_output_w, G_output)
G_b = np.sum(G_output, axis=0)
weight -= LEARNING_RATE * G_w
bias -= LEARNING_RATE * G_b
def forward_postproc(output, y):
diff = output - y
square = np.square(diff)
loss = np.mean(square)
return loss, diff
def backprop_postproc(G_loss, diff):
shape = diff.shape
g_loss_square = np.ones(shape) / np.prod(shape)
g_square_diff = 2 * diff
g_diff_output = 1
G_square = g_loss_square * G_loss
G_diff = g_square_diff * G_square
G_output = g_diff_output * G_diff
return G_output
def eval_accuracy(output, y):
mdiff = np.mean(np.abs((output - y) / y))
return 1 - mdiff
if __name__ == "__main__":
"""
* 출력결과 loss는 지속적으로 줄어드는 반면 accuracy는 그대로인 것을 볼 때
전복의 외형으로 고리수를 예측한다는 문제 자체가 한계로 작용했을 수 있다.
* 학습률과 배치 사이즈는 학습에 큰 영향을 주는 하이퍼파라미터이다.
그래서 여러 경우로 학습해서 비교해보는 것이 필요하다.
"""
print("Epoch: 10, batch size: 10, Learning rate: {}".format(LEARNING_RATE))
abalone_exec()
print("="*100)
LEARNING_RATE = 0.01
print("Epoch: 40, batch size: 40, Learning rate: {}".format(LEARNING_RATE))
abalone_exec(40, 40, 4)
|
import numpy as np
import pytest
import unittest
from mvc.models.metrics.base_metric import BaseMetric
class BaseMetricTest(unittest.TestCase):
def setUp(self):
self.metric = BaseMetric()
def test_add(self):
with pytest.raises(NotImplementedError):
self.metric.add(np.random.random())
def test_get(self):
with pytest.raises(NotImplementedError):
self.metric.get()
def test_reset(self):
with pytest.raises(NotImplementedError):
self.metric.reset()
|
# coding: utf-8
import lxml.html
from .abstract import get_strategy
class TestTags:
def test_adding_tags_for_page(self):
# Given
strategy = get_strategy({
'start_urls': [{
'url': 'http://foo.bar/api',
'tags': ["test"]
}]
})
strategy.dom = lxml.html.fromstring("""
<html><body>
<h1>Foo</h1>
</body></html>
""")
# When
actual = strategy.get_records_from_dom("http://foo.bar/api")
# Then
assert actual[0]['tags'] == ["test"]
def test_adding_tags_for_subpage(self):
# Given
strategy = get_strategy({
'start_urls': [{
'url': 'http://foo.bar/api',
'tags': ["test"]
}]
})
strategy.dom = lxml.html.fromstring("""
<html><body>
<h1>Foo</h1>
</body></html>
""")
# When
actual = strategy.get_records_from_dom("http://foo.bar/api/test")
# Then
assert actual[0]['tags'] == ["test"]
def test_regex_start_urls(self):
# Given
# Stub ENV variables read by ConfigLoader
strategy = get_strategy({
'start_urls': [
{
'url': 'http://foo.bar/.*',
'tags': ["test"]
}
]
})
strategy.dom = lxml.html.fromstring("""
<html><body>
<h1>Foo</h1>
</body></html>
""")
# When
actual = strategy.get_records_from_dom("http://foo.bar/api/test")
# Then
assert actual[0]['tags'] == ["test"]
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
##################################################################################
# Module: args
# Purpose: Module defining all switches and arguments used by Audit UI
#
# Notes:
#
##################################################################################
import sys
import logging
import data_pipeline.logger.logging_loader
import argparse
logger = logging.getLogger(__name__)
def parse_args(arg_list):
logger.info("Parsing command line arguments: {}".format(arg_list))
args_parser = argparse.ArgumentParser()
args_parser.add_argument("--quiet", action="store_true", help="quiet mode")
args_parser.add_argument("--verbose", action="store_true", help="verbose mode")
args_parser.add_argument("--veryverbose", action="store_true", help="very verbose mode")
args_parser.add_argument("--audituser", nargs='?', help="process audit user credentials requried for logging processing metrics")
args_parser.add_argument("--httphost", nargs='?', default = '0.0.0.0', help="process audit web server http host")
args_parser.add_argument("--httpport", nargs='?', default = '5000', help="process audit web server http port")
parsed_args = args_parser.parse_args(arg_list)
return parsed_args
def get_program_args():
return parse_args(sys.argv[1:])
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import os
import argparse
from azureml.core import Datastore
import aml_utils
def main(datastore, data_path):
# Get snapshot of your data and save it in datastore
os.makedirs(data_path, exist_ok=True)
with open(os.path.join(data_path, 'data.csv'), 'w') as f:
f.write('column1,column2,column3\n1,2,3\n4,5,6\n7,8,9\n')
ws = aml_utils.retrieve_workspace()
datastore = Datastore(ws, name=datastore)
datastore.upload(
src_dir=data_path,
target_path=data_path,
overwrite=False
)
print(f'Snapshot saved in datastore {datastore}, path {data_path}')
def parse_args(args_list=None):
parser = argparse.ArgumentParser()
parser.add_argument('--datastore', type=str, required=True)
parser.add_argument('--path', type=str, required=True)
args_parsed = parser.parse_args(args_list)
return args_parsed
if __name__ == "__main__":
args = parse_args()
main(
datastore=args.datastore,
data_path=args.path
)
|
"""The package with different tools serving as helpers in other modules."""
from .xmath import Xmath as xmath
__all__ = ["xmath", ]
|
import pickle
from conftest import Benchmark, random_rat
from donuts import RationalFunction
def test_rat_to_string(benchmark: Benchmark) -> None:
r = random_rat(nterms=1000)
result = benchmark(str, r)
assert result
def test_rat_from_string(benchmark: Benchmark) -> None:
r = random_rat(nterms=1000)
s = str(r)
result = benchmark(RationalFunction, s)
assert result
def test_rat_dumps(benchmark: Benchmark) -> None:
r = random_rat(nterms=1000)
result = benchmark(pickle.dumps, r)
assert result
def test_rat_loads(benchmark: Benchmark) -> None:
r = random_rat(nterms=1000)
s = pickle.dumps(r)
result = benchmark(pickle.loads, s)
assert result
def test_rat_add(benchmark: Benchmark) -> None:
r1 = random_rat(nterms=100, seed=1)
r2 = random_rat(nterms=100, seed=2)
result = benchmark(lambda a, b: a + b, r1, r2)
assert result
def test_rat_mul(benchmark: Benchmark) -> None:
r1 = random_rat(nterms=100, seed=1)
r2 = random_rat(nterms=100, seed=2)
result = benchmark(lambda a, b: a * b, r1, r2)
assert result
|
from datetime import datetime, timedelta
from random import randint
from zeeguu_core_test.rules.article_rule import ArticleRule
from zeeguu_core_test.rules.base_rule import BaseRule
from zeeguu_core_test.rules.language_rule import LanguageRule
from zeeguu_core_test.rules.rss_feed_rule import RSSFeedRule
from zeeguu_core_test.rules.url_rule import UrlRule
from zeeguu_core_test.rules.user_rule import UserRule
from zeeguu_core.model import Article
from zeeguu_core.model.user_article import UserArticle
class UserArticleRule(BaseRule):
"""
Creates a User Article object with random data and saves it to the database.
"""
def __init__(self):
super().__init__()
self.user_article = self._create_model_object()
self.save(self.user_article)
def _create_model_object(self):
user = UserRule().user
article = ArticleRule().article
user_article = UserArticle(user, article)
if self._exists_in_db(user_article):
return self._create_model_object()
return user_article
@staticmethod
def _exists_in_db(obj):
return UserArticle.exists(obj)
|
from Blueprint3DJSBPY.bp3dpy.model.half_edge import HalfEdge;
from mathutils import Vector;
class Room():
def __init__(self, name, floorplan, corners):
self.__name = name;
self.__floorplan = floorplan;
self.__corners = corners;
self.__walls = [];
self.__interiorCorners = [];
self.__interiorCorners3D = [];
self.__edgePointer = None;
self.__floorRectangleSize = None;
self.updateWalls();
self.updateInteriorCorners();
for corner in self.__corners:
corner.attachRoom(self);
def updateWalls(self):
prevEdge = None;
firstEdge = None;
self.__walls = [];
for i, firstCorner in enumerate(self.__corners):
secondCorner = self.__corners[(i+1) % len(self.__corners)];
wallTo = firstCorner.wallTo(secondCorner);
wallFrom = firstCorner.wallFrom(secondCorner);
edge = None;
if(wallTo):
edge = HalfEdge(self, wallTo, True);
elif(wallFrom):
edge = HalfEdge(self, wallFrom, False);
else:
print('corners arent connected by a wall, uh oh');
if(wallTo and not wallTo in self.__walls):
self.__walls.append(wallTo);
wallTo.addRoom(self);
if(wallFrom and not wallFrom in self.__walls):
self.__walls.append(wallFrom);
wallFrom.addRoom(self);
if(i == 0):
firstEdge = edge;
else:
edge.prev = prevEdge;
prevEdge.next = edge;
if(i + 1 == len(self.__corners)):
firstEdge.prev = edge;
edge.next = firstEdge;
prevEdge = edge;
self.__edgePointer = firstEdge;
def updateInteriorCorners(self):
minB, maxB = Vector((1e10, 1e10)), Vector((-1e10, -1e10));
self.__interiorCorners = [];
edge = self.__edgePointer;
iterateWhile = True;
while(iterateWhile):
iStart = edge.interiorStart();
cStart = edge.getStart();
minB.x = min(iStart.x, minB.x);
minB.y = min(iStart.y, minB.y);
maxB.x = max(maxB.x, iStart.x);
maxB.y = max(maxB.y, iStart.y);
self.__interiorCorners.append(iStart);
self.__interiorCorners3D.append(Vector((iStart.x, iStart.y, cStart.elevation)));
if(edge.next == self.__edgePointer):
break;
else:
edge = edge.next;
self.__floorRectangleSize = maxB - minB;
def getUuid(self):
cornerIds = [corner.id for corner in self.__corners];
cornerIds = sorted(cornerIds);
return ','.join(cornerIds);
def getTexture(self):
uuid = self.getUuid();
tex = self.__floorplan.getFloorTexture(uuid);
return tex;
@property
def floorRectangleSize(self):
return self.__floorRectangleSize;
@property
def edgePointer(self):
return self.__edgePointer;
@property
def interiorCorners(self):
return self.__interiorCorners;
@property
def interiorCorners3D(self):
return self.__interiorCorners3D;
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-11-26 10:54
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('premises', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='premise',
name='key_indirect_object',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='key_indirect_object', to='premises.Noun'),
),
migrations.AlterField(
model_name='premise',
name='key_predicate',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.DO_NOTHING, to='premises.Verb'),
),
migrations.AlterField(
model_name='premise',
name='premise_type',
field=models.IntegerField(choices=[(1, 'Categorization'), (3, 'Comparison'), (4, 'Deduction'), (5, 'Diagnosis'), (6, 'Proposal')], default=1),
),
migrations.AlterField(
model_name='premisevote',
name='value',
field=models.IntegerField(),
),
migrations.AlterUniqueTogether(
name='premise',
unique_together=set([('premise_type', 'key_subject', 'key_predicate', 'key_object', 'key_complement', 'key_indirect_object')]),
),
]
|
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
"""Example showing how to disable commitment.
Note: This configuration should only be used as part of a migration from version 1.x to 2.x, or for advanced users
with specialized requirements. We recommend that AWS Encryption SDK users enable commitment whenever possible.
"""
import aws_encryption_sdk
from aws_encryption_sdk import CommitmentPolicy
def encrypt_decrypt(key_arn, source_plaintext, botocore_session=None):
"""Encrypts and then decrypts a string under one KMS customer master key (CMK).
:param str key_arn: Amazon Resource Name (ARN) of the KMS CMK
:param bytes source_plaintext: Data to encrypt
:param botocore_session: existing botocore session instance
:type botocore_session: botocore.session.Session
"""
kwargs = dict(key_ids=[key_arn])
if botocore_session is not None:
kwargs["botocore_session"] = botocore_session
# Set up an encryption client with an explicit commitment policy disallowing encryption with algorithms that
# provide commitment
client = aws_encryption_sdk.EncryptionSDKClient(commitment_policy=CommitmentPolicy.FORBID_ENCRYPT_ALLOW_DECRYPT)
# Create master key provider using the ARN of the key and the session (botocore_session)
kms_key_provider = aws_encryption_sdk.StrictAwsKmsMasterKeyProvider(**kwargs)
# Encrypt the plaintext using the AWS Encryption SDK. It returns the encrypted message and the header. Note: in
# order for decrypt to succeed, the key_ids value must be the key ARN of the CMK.
ciphertext, encrypted_message_header = client.encrypt(source=source_plaintext, key_provider=kms_key_provider)
# Decrypt the encrypted message using the AWS Encryption SDK. It returns the decrypted message and the header
plaintext, decrypted_message_header = client.decrypt(source=ciphertext, key_provider=kms_key_provider)
# Verify that the original message and the decrypted message are the same
assert source_plaintext == plaintext
# Verify that the encryption context of the encrypted message and decrypted message match
assert all(
pair in encrypted_message_header.encryption_context.items()
for pair in decrypted_message_header.encryption_context.items()
)
|
#!/usr/bin/env python
import copy
from importlib import import_module
import logging
import numpy as np
from openquake.hazardlib.gsim.base import GMPE
from openquake.hazardlib.gsim.boore_2014 import BooreEtAl2014
from openquake.hazardlib.gsim.campbell_bozorgnia_2014 import CampbellBozorgnia2014
from openquake.hazardlib.imt import PGA, PGV, SA
from openquake.hazardlib import const
from openquake.hazardlib.valid import gsim
from openquake.hazardlib.contexts import RuptureContext
from shakelib.conversions.imt.abrahamson_bhasin_2020 import AbrahamsonBhasin2020
from shakelib.conversions.imc.boore_kishida_2017 import BooreKishida2017
from shakelib.sites import Sites
# Special case GMPEs:
from shakelib.gmpe.nga_east import NGAEast
def set_sites_depth_parameters(sites, gmpe):
"""
Need to select the appropriate z1pt0 value for different GMPEs.
Note that these are required site parameters, so even though
OQ has these equations built into the class in most cases.
I have submitted an issue to OQ requesting subclasses of these
methods that do not require the depth parameters in the
SitesContext to make this easier.
Args:
sites:1 An OQ sites context.
gmpe: An OQ GMPE instance.
Returns:
An OQ sites context with the depth parameters set for the
requested GMPE.
"""
if gmpe == "[MultiGMPE]":
return sites
Sites._addDepthParameters(sites)
if (
gmpe == "[AbrahamsonEtAl2014]"
or gmpe == "[AbrahamsonEtAl2014]\nregion = 'TWN'"
or gmpe == "[AbrahamsonEtAl2014]\nregion = 'CHN'"
):
sites.z1pt0 = sites.z1pt0_ask14_cal
if gmpe == "[AbrahamsonEtAl2014]\nregion = 'JPN'":
sites.z1pt0 = sites.z1pt0_ask14_jpn
if gmpe == "[ChiouYoungs2014]" or isinstance(gmpe, BooreEtAl2014):
sites.z1pt0 = sites.z1pt0_cy14_cal
if isinstance(gmpe, CampbellBozorgnia2014):
if (
gmpe == "[CampbellBozorgnia2014JapanSite]"
or gmpe == "[CampbellBozorgnia2014HighQJapanSite]"
or gmpe == "[CampbellBozorgnia2014LowQJapanSite]"
):
sites.z2pt5 = sites.z2pt5_cb14_jpn
else:
sites.z2pt5 = sites.z2pt5_cb14_cal
if (
gmpe == "[ChiouYoungs2008]"
or gmpe == "[Bradley2013]"
or gmpe == "[Bradley2013Volc]"
):
sites.z1pt0 = sites.z1pt0_cy08
if gmpe == "[CampbellBozorgnia2008]":
sites.z2pt5 = sites.z2pt5_cb07
if gmpe == "[AbrahamsonSilva2008]":
sites.z1pt0 = gmpe._compute_median_z1pt0(sites.vs30)
return sites
def stuff_context(sites, rup, dists):
"""
Function to fill a rupture context with the contents of all of the
other contexts.
Args:
sites (SiteCollection): A SiteCollection object.
rup (RuptureContext): A RuptureContext object.
dists (DistanceContext): A DistanceContext object.
Returns:
RuptureContext: A new RuptureContext whose attributes are all of
the elements of the three inputs.
"""
ctx = RuptureContext()
for name in [name for name in vars(sites) if not name.startswith("__")]:
setattr(ctx, name, getattr(sites, name))
for name in [name for name in vars(rup) if not name.startswith("__")]:
setattr(ctx, name, getattr(rup, name))
for name in [name for name in vars(dists) if not name.startswith("__")]:
setattr(ctx, name, getattr(dists, name))
return ctx
def get_gmpe_from_name(name, conf):
# Only import the NullGMPE when we're testing
# We'll want to import any other GMPEs we add at the top of this module
# so that gsim() picks them up; anything in OQ is already included
if name == "NullGMPE":
mod = import_module(conf["gmpe_modules"][name][1])
return gsim(name)
class MultiGMPE(GMPE):
"""
Implements a GMPE that is the combination of multiple GMPEs.
"""
DEFINED_FOR_TECTONIC_REGION_TYPE = None
DEFINED_FOR_INTENSITY_MEASURE_TYPES = None
DEFINED_FOR_INTENSITY_MEASURE_COMPONENT = None
DEFINED_FOR_STANDARD_DEVIATION_TYPES = set([const.StdDev.TOTAL])
REQUIRES_SITES_PARAMETERS = None
REQUIRES_RUPTURE_PARAMETERS = None
REQUIRES_DISTANCES = None
def get_mean_and_stddevs(self, sites, rup, dists, imt, stddev_types):
"""
See superclass `method <http://docs.openquake.org/oq-hazardlib/master/gsim/index.html#openquake.hazardlib.gsim.base.GroundShakingIntensityModel.get_mean_and_stddevs>`__.
Unlike the superclass method, the stddev list returned by this
function will have twice as many arrays as are requested in
stddev_types: The first set will include the standard deviation
inflation due to the point-source to finite fault conversion (if
any), and the second set will not include this inflation. In the
case where a finite rupture is provided (and, thus, no point-source
to finite rupture adjustments are made) the two sets of stddev
arrays will be identical. Thus, if::
stddev_types = [const.StdDev.TOTAL, const.StdDev.INTRA_EVENT,
const.StdDev.INTER_EVENT]
the returned stddev list will contain six arrays: the first three
will include the point-source inflation, and the second three will
not.
""" # noqa
# ---------------------------------------------------------------------
# Sort out shapes of the sites and dists elements
# Need to turn all 2D arrays into 1D arrays because of
# inconsistencies in how arrays are handled in OpenQuake.
# ---------------------------------------------------------------------
shapes = []
for k, v in sites.__dict__.items():
if k == "_slots_":
continue
if (k != "lons") and (k != "lats"):
shapes.append(v.shape)
sites.__dict__[k] = np.reshape(sites.__dict__[k], (-1,))
for k, v in dists.__dict__.items():
if k == "_slots_":
continue
if (k != "lons") and (k != "lats") and v is not None:
shapes.append(v.shape)
dists.__dict__[k] = np.reshape(dists.__dict__[k], (-1,))
shapeset = set(shapes)
if len(shapeset) != 1:
raise Exception("All dists and sites elements must have same shape.")
else:
orig_shape = list(shapeset)[0]
sd_avail = self.DEFINED_FOR_STANDARD_DEVIATION_TYPES
if not sd_avail.issuperset(set(stddev_types)):
raise Exception("Requested an unavailable stddev_type.")
# Evaluate MultiGMPE:
lnmu, lnsd = self.__get_mean_and_stddevs__(sites, rup, dists, imt, stddev_types)
# Check for large-distance cutoff/weights
if hasattr(self, "CUTOFF_DISTANCE"):
lnmu_large, lnsd_large = self.__get_mean_and_stddevs__(
sites, rup, dists, imt, stddev_types, large_dist=True
)
# Stomp on lnmu and lnsd at large distances
dist_cutoff = self.CUTOFF_DISTANCE
lnmu[dists.rjb > dist_cutoff] = lnmu_large[dists.rjb > dist_cutoff]
for i in range(len(lnsd)):
lnsd[i][dists.rjb > dist_cutoff] = lnsd_large[i][
dists.rjb > dist_cutoff
]
# Undo reshapes of inputs
for k, v in dists.__dict__.items():
if k == "_slots_":
continue
if (k != "lons") and (k != "lats") and v is not None:
dists.__dict__[k] = np.reshape(dists.__dict__[k], orig_shape)
for k, v in sites.__dict__.items():
if k == "_slots_":
continue
if (k != "lons") and (k != "lats"):
sites.__dict__[k] = np.reshape(sites.__dict__[k], orig_shape)
# Reshape output
lnmu = np.reshape(lnmu, orig_shape)
for i in range(len(lnsd)):
lnsd[i] = np.reshape(lnsd[i], orig_shape)
return lnmu, lnsd
def __get_mean_and_stddevs__(
self, sites, rup, dists, imt, stddev_types, large_dist=False
):
# ---------------------------------------------------------------------
# Sort out which set of weights to use
# ---------------------------------------------------------------------
if large_dist is False:
wts = self.WEIGHTS
else:
wts = self.WEIGHTS_LARGE_DISTANCE
# ---------------------------------------------------------------------
# This is the array to hold the weighted combination of the GMPEs
# ---------------------------------------------------------------------
lnmu = np.zeros_like(sites.vs30)
# ---------------------------------------------------------------------
# Hold on to the individual means and stddevs so we can compute the
# combined stddev
# ---------------------------------------------------------------------
lnmu_list = []
lnsd_list = []
for i, gmpe in enumerate(self.GMPES):
# -----------------------------------------------------------------
# Loop over GMPE list
# -----------------------------------------------------------------
set_sites_depth_parameters(sites, gmpe)
# -----------------------------------------------------------------
# Select the IMT
# -----------------------------------------------------------------
gmpe_imts = [
imt.__name__ for imt in list(gmpe.DEFINED_FOR_INTENSITY_MEASURE_TYPES)
]
if (
not isinstance(gmpe, MultiGMPE)
and (imt.string == "PGV")
and ("PGV" not in gmpe_imts)
):
ab2020 = AbrahamsonBhasin2020(rup.mag)
timt = SA(ab2020.getTref())
else:
timt = imt
# -----------------------------------------------------------------
# Grab GMPE_LIMITS in gmpe instance for later as the multigmpe
# nests downward.
# -----------------------------------------------------------------
if hasattr(self, "GMPE_LIMITS"):
# Remember that GMPE_LIMITS is only present if it is getting
# loaded from a config... we could change this eventually.
gmpe.GMPE_LIMITS = self.GMPE_LIMITS
# -----------------------------------------------------------------
# Apply GMPE_LIMITS if applicable
# -----------------------------------------------------------------
if hasattr(gmpe, "GMPE_LIMITS"):
gmpes_with_limits = list(gmpe.GMPE_LIMITS.keys())
gmpe_class_str = str(gmpe).replace("[", "").replace("]", "")
if gmpe_class_str in gmpes_with_limits:
limit_dict = gmpe.GMPE_LIMITS[gmpe_class_str]
for k, v in limit_dict.items():
if k == "vs30":
vs30min = float(v[0])
vs30max = float(v[1])
sites.vs30 = np.clip(sites.vs30, vs30min, vs30max)
Sites._addDepthParameters(sites)
# -----------------------------------------------------------------
# Evaluate
# -----------------------------------------------------------------
if not isinstance(gmpe, MultiGMPE):
ctx = stuff_context(sites, rup, dists)
lmean, lsd = gmpe.get_mean_and_stddevs(
ctx, ctx, ctx, timt, stddev_types
)
else:
lmean, lsd = gmpe.get_mean_and_stddevs(
sites, rup, dists, timt, stddev_types
)
if not isinstance(gmpe, MultiGMPE):
# -------------------------------------------------------------
# We may need to inflate the standard deviations to account for
# the point-source to finite rupture conversion.
# -------------------------------------------------------------
lsd_new = self.__inflatePSSigma__(
gmpe, lmean, lsd, sites, rup, dists, timt, stddev_types
)
for sd in lsd:
lsd_new.append(sd)
lsd = lsd_new
# -------------------------------------------------------------
# If IMT is PGV and PGV is not given by the GMPE, then
# convert from the appropriate PSA
# -------------------------------------------------------------
if (imt.string == "PGV") and ("PGV" not in gmpe_imts):
lmean, lsd = ab2020.getPGVandSTDDEVS(
lmean, lsd, stddev_types, ctx.rrup, ctx.vs30
)
# -------------------------------------------------------------
# -------------------------------------------------------------
if self.HAS_SITE[i] is False:
lamps = self.__get_site_factors__(
sites, rup, dists, timt, default=True
)
lmean = lmean + lamps
# -------------------------------------------------------------
# Convertions due to component definition
# -------------------------------------------------------------
imc_in = gmpe.DEFINED_FOR_INTENSITY_MEASURE_COMPONENT
imc_out = self.DEFINED_FOR_INTENSITY_MEASURE_COMPONENT
if imc_in != imc_out:
bk17 = BooreKishida2017(imc_in, imc_out)
lmean = bk17.convertAmps(imt, lmean, dists.rrup, rup.mag)
#
# The extra sigma from the component conversion appears to
# apply to the total sigma, so the question arises as to
# how to apportion it between the intra- and inter-event
# sigma. Here we assume it all enters as intra-event sigma.
#
for j, stddev_type in enumerate(stddev_types):
if stddev_type == const.StdDev.INTER_EVENT:
continue
lsd[j] = bk17.convertSigmas(imt, lsd[j])
# End: if GMPE is not MultiGMPE
#
# At this point lsd will have 2 * len(stddev_types) entries, the
# first group will have the point-source to finite rupture
# inflation (if any), and the second set will not; in cases where
# a finite rupture is used, the two sets will be identical
#
# -----------------------------------------------------------------
# Compute weighted mean and collect the elements to compute sd
# -----------------------------------------------------------------
lnmu = lnmu + wts[i] * lmean
lnmu_list.append(lmean)
lnsd_list = lnsd_list + lsd
# -----------------------------------------------------------------
# The mean is a weighted sum of random variables, so the stddev
# is the weighted sum of of their covariances (effectively). See:
# https://en.wikipedia.org/wiki/Variance#Weighted_sum_of_variables
# for an explanation. Also see:
# http://usgs.github.io/shakemap/manual4_0/tg_processing.html#ground-motion-prediction
# for a discussion on the way this is implemented here.
# -------------------------------------------------------------- # noqa
nwts = len(wts)
npwts = np.array(wts).reshape((1, -1))
nsites = len(lnmu)
# Find the correlation coefficients among the gmpes; if there are
# fewer than 10 points, just use an approximation (noting that the
# correlation among GMPEs tends to be quite high).
if nsites < 10:
cc = np.full((nwts, nwts), 0.95)
np.fill_diagonal(cc, 1.0)
else:
np.seterr(divide="ignore", invalid="ignore")
cc = np.reshape(np.corrcoef(lnmu_list), (nwts, nwts))
np.seterr(divide="warn", invalid="warn")
cc[np.isnan(cc)] = 1.0
# Multiply the correlation coefficients by the weights matrix
# (this is cheaper than multiplying all of elements of each
# stddev array by their weights since we have to multiply
# everything by the correlation coefficient matrix anyway))
cc = ((npwts * npwts.T) * cc).reshape((nwts, nwts, 1))
nstds = len(stddev_types)
lnsd_new = []
for i in range(nstds * 2):
sdlist = []
for j in range(nwts):
sdlist.append(lnsd_list[j * nstds * 2 + i].reshape((1, 1, -1)))
sdstack = np.hstack(sdlist)
wcov = (sdstack * np.transpose(sdstack, axes=(1, 0, 2))) * cc
# This sums the weighted covariance as each point in the output
lnsd_new.append(np.sqrt(wcov.sum((0, 1))))
return lnmu, lnsd_new
@classmethod
def __from_config__(cls, conf, filter_imt=None):
"""
Construct a MultiGMPE from a config file.
Args:
conf (dict): Dictionary of config options.
filter_imt (IMT): An optional IMT to filter/reweight the GMPE list.
Returns:
MultiGMPE object.
"""
IMC = getattr(const.IMC, conf["interp"]["component"])
selected_gmpe = conf["modeling"]["gmpe"]
logging.debug(f"selected_gmpe: {selected_gmpe}")
logging.debug(f"IMC: {IMC}")
# ---------------------------------------------------------------------
# Allow for selected_gmpe to be found in either conf['gmpe_sets'] or
# conf['gmpe_modules'], if it is a GMPE set, then all entries must be
# either a GMPE or a GMPE set (cannot have a GMPE set that is a mix of
# GMPEs and GMPE sets).
# ---------------------------------------------------------------------
if selected_gmpe in conf["gmpe_sets"].keys():
selected_gmpe_sets = conf["gmpe_sets"][selected_gmpe]["gmpes"]
gmpe_set_weights = [
float(w) for w in conf["gmpe_sets"][selected_gmpe]["weights"]
]
logging.debug(f"selected_gmpe_sets: {selected_gmpe_sets}")
logging.debug(f"gmpe_set_weights: {gmpe_set_weights}")
# -----------------------------------------------------------------
# If it is a GMPE set, does it contain GMPEs or GMPE sets?
# -----------------------------------------------------------------
set_of_gmpes = all([s in conf["gmpe_modules"] for s in selected_gmpe_sets])
set_of_sets = all([s in conf["gmpe_sets"] for s in selected_gmpe_sets])
if set_of_sets is True:
mgmpes = []
for s in selected_gmpe_sets:
mgmpes.append(
cls.__multigmpe_from_gmpe_set__(conf, s, filter_imt=filter_imt)
)
out = MultiGMPE.__from_list__(mgmpes, gmpe_set_weights, imc=IMC)
elif set_of_gmpes is True:
out = cls.__multigmpe_from_gmpe_set__(
conf, selected_gmpe, filter_imt=filter_imt
)
else:
raise TypeError(
"%s must consist exclusively of keys in "
"conf['gmpe_modules'] or conf['gmpe_sets']" % selected_gmpe
)
elif selected_gmpe in conf["gmpe_modules"].keys():
modinfo = conf["gmpe_modules"][selected_gmpe]
# mod = import_module(modinfo[1])
# tmpclass = getattr(mod, modinfo[0])
# out = MultiGMPE.__from_list__([tmpclass()], [1.0], imc=IMC)
out = MultiGMPE.__from_list__(
[get_gmpe_from_name(modinfo[0], conf)], [1.0], imc=IMC
)
else:
raise TypeError(
"conf['modeling']['gmpe'] must be a key in "
"conf['gmpe_modules'] or conf['gmpe_sets']"
)
out.DESCRIPTION = selected_gmpe
# ---------------------------------------------------------------------
# Deal with GMPE limits
# ---------------------------------------------------------------------
gmpe_lims = conf["gmpe_limits"]
# We need to replace the short name in the dictionary key with module
# name here since the conf is not available within the MultiGMPE class.
mods = conf["gmpe_modules"]
mod_keys = mods.keys()
new_gmpe_lims = {}
for k, v in gmpe_lims.items():
if k in mod_keys:
new_gmpe_lims[mods[k][0]] = v
else:
new_gmpe_lims[k] = v
out.GMPE_LIMITS = new_gmpe_lims
return out
def __multigmpe_from_gmpe_set__(conf, set_name, filter_imt=None):
"""
Private method for constructing a MultiGMPE from a set_name.
Args:
conf (ConfigObj): A ShakeMap config object.
filter_imt (IMT): An optional IMT to filter/reweight the GMPE list.
set_name (str): Set name; must correspond to a key in
conf['set_name'].
Returns:
MultiGMPE.
"""
IMC = getattr(const.IMC, conf["interp"]["component"])
selected_gmpes = conf["gmpe_sets"][set_name]["gmpes"]
selected_gmpe_weights = [
float(w) for w in conf["gmpe_sets"][set_name]["weights"]
]
# Check for large distance GMPEs
if "weights_large_dist" in conf["gmpe_sets"][set_name].keys():
if not conf["gmpe_sets"][set_name]["weights_large_dist"]:
selected_weights_large_dist = None
else:
selected_weights_large_dist = [
float(w) for w in conf["gmpe_sets"][set_name]["weights_large_dist"]
]
else:
selected_weights_large_dist = None
if "dist_cutoff" in conf["gmpe_sets"][set_name].keys():
if np.isnan(conf["gmpe_sets"][set_name]["dist_cutoff"]):
selected_dist_cutoff = None
else:
selected_dist_cutoff = float(conf["gmpe_sets"][set_name]["dist_cutoff"])
else:
selected_dist_cutoff = None
if "site_gmpes" in conf["gmpe_sets"][set_name].keys():
if not conf["gmpe_sets"][set_name]["site_gmpes"]:
selected_site_gmpes = None
else:
selected_site_gmpes = conf["gmpe_sets"][set_name]["site_gmpes"]
else:
selected_site_gmpes = None
if "weights_site_gmpes" in conf["gmpe_sets"][set_name].keys():
if not conf["gmpe_sets"][set_name]["weights_site_gmpes"]:
selected_weights_site_gmpes = None
else:
selected_weights_site_gmpes = conf["gmpe_sets"][set_name][
"weights_site_gmpes"
]
else:
selected_weights_site_gmpes = None
# ---------------------------------------------------------------------
# Import GMPE modules and initialize classes into list
# ---------------------------------------------------------------------
gmpes = []
for g in selected_gmpes:
# This is the old school way of importing the modules; I'm
# leaving it in here temporarily just for documentation.
# mod = import_module(conf['gmpe_modules'][g][1])
# tmpclass = getattr(mod, conf['gmpe_modules'][g][0])
# gmpes.append(tmpclass())
gmpe_name = conf["gmpe_modules"][g][0]
gmpes.append(get_gmpe_from_name(gmpe_name, conf))
# ---------------------------------------------------------------------
# Filter out GMPEs not applicable to this period
# ---------------------------------------------------------------------
if filter_imt is not None:
filtered_gmpes, filtered_wts = filter_gmpe_list(
gmpes, selected_gmpe_weights, filter_imt
)
else:
filtered_gmpes, filtered_wts = gmpes, selected_gmpe_weights
# ---------------------------------------------------------------------
# Import site GMPEs
# ---------------------------------------------------------------------
if selected_site_gmpes is not None:
if isinstance(selected_site_gmpes, str):
selected_site_gmpes = [selected_site_gmpes]
site_gmpes = []
for g in selected_site_gmpes:
# This is the old school way of importing the modules; I'm
# leaving it in here temporarily just for documentation.
# mod = import_module(conf['gmpe_modules'][g][1])
# tmpclass = getattr(mod, conf['gmpe_modules'][g][0])
# site_gmpes.append(tmpclass())
gmpe_name = conf["gmpe_modules"][g][0]
site_gmpes.append(get_gmpe_from_name(gmpe_name, conf))
else:
site_gmpes = None
# ---------------------------------------------------------------------
# Filter out site GMPEs not applicable to this period
# ---------------------------------------------------------------------
if site_gmpes is not None:
if filter_imt is not None:
filtered_site_gmpes, filtered_site_wts = filter_gmpe_list(
site_gmpes, selected_weights_site_gmpes, filter_imt
)
else:
filtered_site_gmpes = copy.copy(site_gmpes)
filtered_site_wts = copy.copy(selected_weights_site_gmpes)
else:
filtered_site_gmpes = None
filtered_site_wts = None
# ---------------------------------------------------------------------
# Construct MultiGMPE
# ---------------------------------------------------------------------
logging.debug(f" filtered_gmpes: {filtered_gmpes}")
logging.debug(f" filtered_wts: {filtered_wts}")
mgmpe = MultiGMPE.__from_list__(
filtered_gmpes,
filtered_wts,
default_gmpes_for_site=filtered_site_gmpes,
default_gmpes_for_site_weights=filtered_site_wts,
imc=IMC,
)
# ---------------------------------------------------------------------
# Append large-distance info if specified
# ---------------------------------------------------------------------
if selected_dist_cutoff is not None:
if filter_imt is not None:
filtered_gmpes_ld, filtered_wts_ld = filter_gmpe_list(
gmpes, selected_weights_large_dist, filter_imt
)
else:
filtered_wts_ld = copy.copy(selected_weights_large_dist)
mgmpe.CUTOFF_DISTANCE = copy.copy(selected_dist_cutoff)
mgmpe.WEIGHTS_LARGE_DISTANCE = copy.copy(filtered_wts_ld)
mgmpe.DESCRIPTION = set_name
return mgmpe
@classmethod
def __from_list__(
cls,
gmpes,
weights,
imc=const.IMC.GREATER_OF_TWO_HORIZONTAL,
default_gmpes_for_site=None,
default_gmpes_for_site_weights=None,
reference_vs30=760,
):
"""
Construct a MultiGMPE instance from lists of GMPEs and weights.
Args:
gmpes (list): List of OpenQuake
`GMPE <http://docs.openquake.org/oq-hazardlib/master/gsim/index.html#built-in-gsims>`__
instances.
weights (list): List of weights; must sum to 1.0.
imc: Requested intensity measure component. Must be one listed
`here <http://docs.openquake.org/oq-hazardlib/master/const.html?highlight=imc#openquake.hazardlib.const.IMC>`__.
The amplitudes returned by the GMPEs will be converted to this
IMT. Default is 'GREATER_OF_TWO_HORIZONTAL', which is used by
ShakeMap. See discussion in
`this section <http://usgs.github.io/shakemap/tg_choice_of_parameters.html#use-of-peak-values-rather-than-mean>`__
of the ShakeMap manual.
default_gmpes_for_site (list):
Optional list of OpenQuake GMPE instance to use as a site term
for any of the GMPEs that do not have a site term.
Notes:
* We do not check for consistency in the reference rock
defintion, so the user nees to be aware of this issue and
holds responsibiilty for ensuring compatibility.
* We check whether or not a GMPE has a site term by c
hecking the REQUIRES_SITES_PARAMETERS slot for vs30.
default_gmpes_for_site_weights: Weights for default_gmpes_for_site.
Must sum to one and be same length as default_gmpes_for_site.
If None, then weights are set to be equal.
reference_vs30:
Reference rock Vs30 in m/s. We do not check that this matches
the reference rock in the GMPEs so this is the responsibility
of the user.
""" # noqa
# ---------------------------------------------------------------------
# Check that GMPE weights sum to 1.0:
# ---------------------------------------------------------------------
if np.abs(np.sum(weights) - 1.0) > 1e-7:
raise Exception("Weights must sum to one.")
# ---------------------------------------------------------------------
# Check that length of GMPE weights equals length of gmpe list
# ---------------------------------------------------------------------
if len(weights) != len(gmpes):
raise Exception("Length of weights must match length of GMPE list.")
# ---------------------------------------------------------------------
# Check that gmpes is a list of OQ GMPE instances
# ---------------------------------------------------------------------
for g in gmpes:
if not isinstance(g, GMPE):
raise Exception(f'"{g}" is a {type(g)} not a GMPE instance.')
self = cls()
self.GMPES = gmpes
self.WEIGHTS = weights
# ---------------------------------------------------------------------
# Combine the intensity measure types. This is problematic:
# - Logically, we should only include the intersection of the sets
# of imts for the different GMPEs.
# - In practice, this is not feasible because most GMPEs in CEUS and
# subduction zones do not have PGV.
# - So instead we will use the union of the imts and then convert
# to get the missing imts later in get_mean_and_stddevs.
# ---------------------------------------------------------------------
imts = [set(g.DEFINED_FOR_INTENSITY_MEASURE_TYPES) for g in gmpes]
self.DEFINED_FOR_INTENSITY_MEASURE_TYPES = set.union(*imts)
# ---------------------------------------------------------------------
# For VirtualIPE class, we also want to know if ALL of the GMPEs are
# defined for PGV, in which case we will convert from PGV to MI,
# otherwise use PGA or Sa.
# ---------------------------------------------------------------------
haspgv = [PGV in set(g.DEFINED_FOR_INTENSITY_MEASURE_TYPES) for g in gmpes]
self.ALL_GMPES_HAVE_PGV = all(haspgv)
# ---------------------------------------------------------------------
# Store intensity measure types for conversion in get_mean_and_stddevs.
# ---------------------------------------------------------------------
self.IMCs = [g.DEFINED_FOR_INTENSITY_MEASURE_COMPONENT for g in gmpes]
# ---------------------------------------------------------------------
# Store the component
# ---------------------------------------------------------------------
self.DEFINED_FOR_INTENSITY_MEASURE_COMPONENT = imc
# ---------------------------------------------------------------------
# Intersection of GMPE standard deviation types
# ---------------------------------------------------------------------
stdlist = [set(g.DEFINED_FOR_STANDARD_DEVIATION_TYPES) for g in gmpes]
self.DEFINED_FOR_STANDARD_DEVIATION_TYPES = set.intersection(*stdlist)
# ---------------------------------------------------------------------
# Need union of site parameters, but it is complicated by the
# different depth parameter flavors.
# ---------------------------------------------------------------------
sitepars = [set(g.REQUIRES_SITES_PARAMETERS) for g in gmpes]
self.REQUIRES_SITES_PARAMETERS = set.union(*sitepars)
# ---------------------------------------------------------------------
# Construct a list of whether or not each GMPE has a site term
# ---------------------------------------------------------------------
self.HAS_SITE = ["vs30" in g.REQUIRES_SITES_PARAMETERS for g in gmpes]
# ---------------------------------------------------------------------
# Checks and sort out defaults
# ---------------------------------------------------------------------
# things to check if default_gmpes_for_site is provided
if default_gmpes_for_site is not None:
# check that default_gmpe_for_site are OQ GMPEs or None
for g in default_gmpes_for_site:
if not isinstance(g, GMPE):
raise Exception(f'"{g}" is not a GMPE instance.')
# apply default weights if necessary
if default_gmpes_for_site_weights is None:
n = len(default_gmpes_for_site)
default_gmpes_for_site_weights = [1 / n] * n
# Things to check if one or more GMPE does not have a site term
if not all(self.HAS_SITE):
# Raise an exception if no default site is provided
if default_gmpes_for_site is None:
raise Exception(
"Must provide default_gmpes_for_site if one or"
" more GMPE does not have site term."
)
# If weights are unspecified, use equal weight
if default_gmpes_for_site_weights is None:
default_gmpes_for_site_weights = [
1 / len(default_gmpes_for_site)
] * len(default_gmpes_for_site)
# check that length of default_gmpe_for_site matches length of
# default_gmpe_for_site_weights
if len(default_gmpes_for_site_weights) != len(default_gmpes_for_site):
raise Exception(
"Length of default_gmpes_for_site_weights "
"must match length of default_gmpes_for_site "
"list."
)
# check weights sum to one if needed
if not all(self.HAS_SITE):
if np.sum(default_gmpes_for_site_weights) != 1.0:
raise Exception(
"default_gmpes_for_site_weights must sum" " to one."
)
# Note: if ALL of the GMPEs do not have a site term (requiring Vs30),
# then REQUIRES_SITES_PARAMETERS for the MultiGMPE will not
# include Vs30 even though it will be needed to compute the
# default site term. So if the site checks have passed to this
# point, we should add Vs30 to the set of required site pars:
self.REQUIRES_SITES_PARAMETERS = set.union(
set(self.REQUIRES_SITES_PARAMETERS), set(["vs30"])
)
self.DEFAULT_GMPES_FOR_SITE = default_gmpes_for_site
self.DEFAULT_GMPES_FOR_SITE_WEIGHTS = default_gmpes_for_site_weights
self.REFERENCE_VS30 = reference_vs30
# ---------------------------------------------------------------------
# Union of rupture parameters
# ---------------------------------------------------------------------
ruppars = [set(g.REQUIRES_RUPTURE_PARAMETERS) for g in gmpes]
self.REQUIRES_RUPTURE_PARAMETERS = set.union(*ruppars)
# ---------------------------------------------------------------------
# Union of distance parameters
# ---------------------------------------------------------------------
distpars = [set(g.REQUIRES_DISTANCES) for g in gmpes]
self.REQUIRES_DISTANCES = set.union(*distpars)
return self
def __get_site_factors__(self, sites, rup, dists, imt, default=False):
"""
Method for computing site amplification factors from the defalut GMPE
to be applied to GMPEs which do not have a site term.
**NOTE** Amps are calculated in natural log units and so the ln(amp)
is returned.
Args:
sites (SitesContext): Instance of SitesContext.
rup (RuptureContext): Instance of RuptureContext.
dists (DistancesContext): Instance of DistancesContext.
imt: An instance openquake.hazardlib.imt.
default (bool): Boolean of whether or not to return the
amplificaiton factors for the gmpes or default_gmpes_for_site.
This argument is primarily only intended to be used internally
for when we just need to access the default amplifications to
apply to those GMPEs that do not have site terms.
Returns:
Site amplifications in natural log units.
"""
# ---------------------------------------------------------------------
# Make reference sites context
# ---------------------------------------------------------------------
ref_sites = copy.deepcopy(sites)
ref_sites.vs30 = np.full_like(sites.vs30, self.REFERENCE_VS30)
# TODO: Should we reset the Sites depth parameters here? Probably.
# ---------------------------------------------------------------------
# If default True, construct new MultiGMPE with default GMPE/weights
# ---------------------------------------------------------------------
if default is True:
tmp = MultiGMPE.__from_list__(
self.DEFAULT_GMPES_FOR_SITE,
self.DEFAULT_GMPES_FOR_SITE_WEIGHTS,
self.DEFINED_FOR_INTENSITY_MEASURE_COMPONENT,
)
# ---------------------------------------------------------------------
# If default False, just use self
# ---------------------------------------------------------------------
else:
tmp = self
lmean, lsd = tmp.get_mean_and_stddevs(
sites, rup, dists, imt, list(tmp.DEFINED_FOR_STANDARD_DEVIATION_TYPES)
)
lmean_ref, lsd = tmp.get_mean_and_stddevs(
ref_sites, rup, dists, imt, list(tmp.DEFINED_FOR_STANDARD_DEVIATION_TYPES)
)
lamps = lmean - lmean_ref
return lamps
def __describe__(self):
"""
Construct a dictionary that describes the MultiGMPE.
Note: For simplicity, this method ignores issues related to
GMPEs used for the site term and changes in the GMPE with
distance. For this level of detail, please see the config files.
Returns:
A dictionary representation of the MultiGMPE.
"""
gmpe_dict = {"gmpes": [], "weights": [], "name": self.DESCRIPTION}
for i in range(len(self.GMPES)):
gmpe_dict["weights"].append(self.WEIGHTS[i])
if isinstance(self.GMPES[i], MultiGMPE):
gmpe_dict["gmpes"].append(self.GMPES[i].__describe__())
else:
gmpe_dict["gmpes"].append(str(self.GMPES[i]))
return gmpe_dict
def __inflatePSSigma__(
self, gmpe, lmean, lsd, sites, rup, dists, imt, stddev_types
):
"""
If the point-source to finite-fault factors are used, we need to
inflate the intra-event and total standard deviations. We do this
by standard propagation of error techniques: taking the (numerical)
derivative of the GMPE (as a function of distance) squared times the
additional variance from the conversion, added
to the variance of the GMPE (then taking the square root). We do
this separately for each of Rrup and Rjb and sum the results.
If Rrup and Rjb are calculated from a finite rupture model, their
variance arrays will be "None" and lsd will remain unchanged.
Otherwise the error inflation will be applied. Normally one or the
other of Rrup/Rjb will not be used and so that term will be zero; in
some cases both may be used and both may result in non-zero
derivatives.
Args:
gmpe:
The GMPE to use for the calculations. Must be a base GMPE and
not a GMPE set, otherwise no action is taken.
lmean:
The mean values returned by the "normal" evaluation of the
GMPE.
lsd:
The standard deviations returned by the "normal" evaluation
of the GMPE.
sites:
The sites context required by the GMPE.
rup:
The rupture context required by the GMPE.
dists:
The distance context required by the GMPE.
imt:
The intensity measure type being evaluated.
stddev_types:
The list of stddev types found in lsd.
Returns:
list: A list of arrays of inflated standard deviations
corresponding to the elements of lsd.
"""
new_sd = []
delta_distance = 0.01
delta_var = [0, 0]
for i, dtype in enumerate(("rrup", "rjb")):
# Skip dtype if the gmpe does not require it
if dtype not in gmpe.REQUIRES_DISTANCES:
continue
# Skip dtype if it has not been subject to a point-source to
# finite rupture conversion
dvar = getattr(dists, dtype + "_var", None)
if dvar is None:
continue
# Add a small amound to the rupture distance (rrup or rjb)
# and re-evaluate the GMPE
rup_dist = getattr(dists, dtype)
rup_dist += delta_distance
ctx = stuff_context(sites, rup, dists)
tmean, tsd = gmpe.get_mean_and_stddevs(ctx, ctx, ctx, imt, stddev_types)
# Find the derivative w.r.t. the rupture distance
dm_dr = (lmean - tmean) / delta_distance
# The additional variance is (dm/dr)^2 * dvar
delta_var[i] = dm_dr ** 2 * dvar
# Put the rupture distance back to what it was
rup_dist -= delta_distance
for i, stdtype in enumerate(stddev_types):
if stdtype == const.StdDev.INTER_EVENT:
new_sd.append(lsd[i].copy())
continue
new_sd.append(np.sqrt(lsd[i] ** 2 + delta_var[0] + delta_var[1]))
return new_sd
def filter_gmpe_list(gmpes, wts, imt):
"""
Method to remove GMPEs from the GMPE list that are not applicable
to a specific IMT. Rescales the weights to sum to one.
Args:
gmpes (list): List of GMPE instances.
wts (list): List of floats indicating the weight of the GMPEs.
imt (IMT): OQ IMT to filter GMPE list for.
Returns:
tuple: List of GMPE instances and list of weights.
"""
if wts is None:
n = len(gmpes)
wts = [1 / n] * n
per_max = [np.max(get_gmpe_sa_periods(g)) for g in gmpes]
per_min = [np.min(get_gmpe_sa_periods(g)) for g in gmpes]
if imt == PGA():
sgmpe = [g for g in gmpes if PGA in g.DEFINED_FOR_INTENSITY_MEASURE_TYPES]
swts = [
w
for g, w in zip(gmpes, wts)
if PGA in g.DEFINED_FOR_INTENSITY_MEASURE_TYPES
]
elif imt == PGV():
sgmpe = []
swts = []
for i in range(len(gmpes)):
if (PGV in gmpes[i].DEFINED_FOR_INTENSITY_MEASURE_TYPES) or (
per_max[i] >= 1.0 and per_min[i] <= 1.0
):
sgmpe.append(gmpes[i])
swts.append(wts[i])
else:
per = imt.period
sgmpe = []
swts = []
for i in range(len(gmpes)):
if per_max[i] >= per and per_min[i] <= per:
sgmpe.append(gmpes[i])
swts.append(wts[i])
if len(sgmpe) == 0:
raise KeyError(f"No applicable GMPEs from GMPE list for {str(imt)}")
# Scale weights to sum to one
swts = np.array(swts)
swts = swts / np.sum(swts)
return sgmpe, swts
def get_gmpe_sa_periods(gmpe):
"""
Method to extract the SA periods defined by a GMPE.
Args:
gmpe (GMPE): A GMPE instance.
Retunrs:
list: List of periods.
"""
if gmpe == "[NGAEast]":
per = gmpe.per_array
else:
ctab = get_gmpe_coef_table(gmpe).sa_coeffs
ilist = list(ctab.keys())
per = [i.period for i in ilist]
return per
def get_gmpe_coef_table(gmpe):
"""
Method for finding the (or "a") GMPE table.
Notes:
* The reason for the complexity here is that there can be multiple
coefficient tables, and some of them may not have the sa_coeffs
attribute, which is the main reason for getting the table.
* We are also assuming that if there are more than one coefficient
table, the range of periods will be the same across all of the
tables.
Args:
gmpe (GMPE): An OQ GMPE instance.
Returns:
The associated coefficient table.
"""
stuff = gmpe.__dir__()
coef_list = [s for s in stuff if "COEFFS" in s]
for coef_sel in coef_list:
cobj = getattr(gmpe, coef_sel)
if "sa_coeffs" in cobj.__dir__():
return cobj
raise Exception(f"GMPE {gmpe} does not contain sa_coeffs attribute.")
|
# Author: Mathieu Blondel, 2019
# License: BSD
import numpy as np
from scipy.optimize import fmin_l_bfgs_b
from sklearn.base import BaseEstimator
from sklearn.preprocessing import LabelBinarizer
from sklearn.preprocessing import LabelEncoder
from sklearn.utils.extmath import safe_sparse_dot
from sklearn.preprocessing import add_dummy_feature
from sklearn.metrics.pairwise import pairwise_kernels
from polytopes import UnitCube
from polytopes import ProbabilitySimplex
from polytopes import Knapsack
from polytopes import CartesianProduct
from polytopes import Birkhoff
from polytopes import Permutahedron
from polytopes import OrderSimplex
class Reals(object):
def Euclidean_project(self, theta):
# Identity function.
return theta
def MAP(self, theta):
# For ordinal regression only.
return np.round(theta).ravel()
def Shannon_negentropy(u):
mask = u > 0
return np.sum(u[mask] * np.log(u[mask]))
class Estimator(BaseEstimator):
def __init__(self, projection_type="Euclidean", projection_set="unit-cube",
map_set=None, min_labels=0, max_labels=None,
alpha=1.0, fit_intercept=True,
kernel=None, degree=3, coef0=1, gamma=1,
max_iter=500, tol=1e-5,
random_state=None, verbose=0):
self.projection_type = projection_type
self.projection_set = projection_set
self.map_set = map_set
self.min_labels = min_labels
self.max_labels = max_labels
self.alpha = alpha
self.fit_intercept = fit_intercept
self.kernel = kernel
self.degree = degree
self.coef0 = coef0
self.gamma = gamma
self.max_iter = max_iter
self.tol = tol
self.random_state = random_state
self.verbose = verbose
def _get_set(self, name):
d = {
"reals": Reals(),
"unit-cube": UnitCube(),
"simplex": ProbabilitySimplex(),
"Cartesian-cube": CartesianProduct(UnitCube()),
"Cartesian-simplex": CartesianProduct(ProbabilitySimplex()),
"knapsack": Knapsack(min_labels=self.min_labels,
max_labels=self.max_labels),
"Birkhoff": Birkhoff(),
"permutahedron": Permutahedron(),
"order-simplex": OrderSimplex(),
}
if not name in d:
raise ValueError("Invalid polytope / set name.")
return d[name]
def _get_projection_set(self):
return self._get_set(self.projection_set)
def _get_map_set(self):
if self.map_set is None:
map_set = self.projection_set
else:
map_set = self.map_set
return self._get_set(map_set)
def _solve_lbfgs(self, X, Y):
n_samples, n_features = X.shape
# If Y.shape = n_samples x n_classes, then d = n_classes
# If Y.shape = n_samples x n_classes x n_classes, then d = n_classes^2
d = np.prod(Y.shape[1:])
polytope = self._get_projection_set()
Y_flat = Y.reshape(n_samples, -1)
def _func(coef):
coef = coef.reshape(d, n_features)
# n_samples x d
theta = safe_sparse_dot(X, coef.T)
if self.projection_type == "Euclidean":
u = polytope.Euclidean_project(theta)
loss = np.sum(theta * u)
loss -= 0.5 * np.sum(u ** 2)
loss += 0.5 * np.sum(Y ** 2)
elif self.projection_type == "KL":
u = polytope.KL_project(theta)
loss = np.sum(theta * u)
loss -= Shannon_negentropy(u)
loss += Shannon_negentropy(Y)
else:
raise ValueError("Invalid projection type.")
loss -= np.sum(Y_flat * theta)
loss /= n_samples
# d x n_features
grad = safe_sparse_dot(u.T, X)
grad -= safe_sparse_dot(Y_flat.T, X)
grad /= n_samples
# Regularization term
loss += 0.5 * self.alpha * np.sum(coef ** 2)
grad += self.alpha * coef
return loss, grad.ravel()
coef0 = np.zeros(d * n_features, dtype=np.float64)
coef, funcval, infodic = fmin_l_bfgs_b(_func, coef0,
maxiter=self.max_iter)
if infodic["warnflag"] != 0:
print("NOT CONVERGED: ", infodic["task"])
return coef.reshape(d, n_features)
def _kernel(self, X):
return pairwise_kernels(X, self.X_tr_, metric=self.kernel,
degree=self.degree, coef0=self.coef0,
gamma=self.gamma, filter_params=True)
def fit(self, X, Y):
if self.kernel is not None:
self.X_tr_ = X.copy()
X = self._kernel(X)
if self.fit_intercept:
X = add_dummy_feature(X)
if hasattr(Y, "toarray"):
raise ValueError("scipy sparse matrices not supported for Y")
Y = np.array(Y)
self.coef_ = self._solve_lbfgs(X, Y)
return self
def decision_function(self, X):
if self.kernel is not None:
X = self._kernel(X)
if self.fit_intercept:
X = add_dummy_feature(X)
return safe_sparse_dot(X, self.coef_.T)
def predict(self, X, V=None, b=None):
"""
V and/or b can be passed to do calibrated decoding (see paper).
"""
df = self.decision_function(X)
polytope = self._get_projection_set()
if self.projection_type == "Euclidean":
df = polytope.Euclidean_project(df)
elif self.projection_type == "KL":
df = polytope.KL_project(df)
else:
raise ValueError("Projection type not implemented")
if V is not None:
if hasattr(V, "mvec"):
df = -V.mvec(df)
else:
df = df.dot(-V)
if b is not None:
df -= b
return self._get_map_set().MAP(df)
class RegressionEstimator(Estimator):
def __init__(self, *args, **kw):
super(RegressionEstimator, self).__init__(*args, **kw)
self.projection_set = "reals"
def fit(self, X, y):
Y = y.reshape(-1, 1)
return super(RegressionEstimator, self).fit(X, Y)
def predict(self, X):
ret = super(RegressionEstimator, self).predict(X)
return ret.ravel()
class MulticlassEstimator(Estimator):
def __init__(self, *args, **kw):
super(MulticlassEstimator, self).__init__(*args, **kw)
self.projection_set = "simplex"
def fit(self, X, y):
self.label_encoder_ = LabelEncoder().fit(y)
y = self.label_encoder_.transform(y)
lb = LabelBinarizer(neg_label=0)
Y = lb.fit_transform(y)
return super(MulticlassEstimator, self).fit(X, Y)
def predict(self, X):
ret = super(MulticlassEstimator, self).predict(X)
return self.label_encoder_.inverse_transform(ret)
|
################################################################################
# (c) 2006, The Honeynet Project
# Author: Jed Haile jed.haile@thelogangroup.biz
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
################################################################################
# $Id$
class Base(object):
"""
This should be a base class that provides commonly used functions.
I decided to add this late in the game. There are replicated functions
in the below classes that should be put in here.
"""
def __init__(self):
self.p = None
self.file = None
self.filter = None
self.out = lambda x: str(x)
self.listeners = []
#self.out = None
def setOutput(self, obj):
"""
obj needs to be callable, taking 1 arg
#TODO: check that obj isinstance of Output??
"""
self.out = obj
def doOutput(self, m):
self.out(m)
def setFilter(self, filter):
self.filter = filter
self.p.setfilter(filter)
class Output(object):
"""
This class will provide a generic output interface so we can output
in text, html, whatever.
"""
def write():
pass
|
from setuptools import setup, find_packages
setup(name='busypenguin',
version='0.0.0',
description='Publish slack notifications for tasks using context managers',
url='https://github.com/kyrias/busypenguin',
author='Johannes Löthberg',
author_email='johannes@kyriasis.com',
license='ISC',
packages=find_packages(),
install_requires=['slackclient'])
|
import argparse
import logging
import os
import sys
from src.utils import construct_new_filename
logging.basicConfig(
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
level=logging.INFO,
)
def main():
a_p = argparse.ArgumentParser(
"Rename all 'MacOS screenshot files' like %Y-%m-%d-%H-%M-%S."
)
a_p.add_argument(
"-d",
"--directory",
required=True,
)
args = a_p.parse_args()
directory = args.directory
if not os.path.exists(directory):
logging.info(
"there doesn't exist a directory at '%s'; aborting!",
directory,
)
sys.exit(1)
if not os.path.isdir(directory):
logging.info(
"'%s' exists but is not a directory; aborting!",
directory,
)
sys.exit(2)
logging.info(
"beginning to loop through the contents of the following folder %s",
directory,
)
for dir_entry in os.listdir(directory):
source = os.path.join(directory, dir_entry)
if not os.path.isfile(source):
logging.info("'%s' is not a file; skipping it", dir_entry)
continue
else:
new_filename = construct_new_filename(dir_entry)
target = os.path.join(directory, new_filename)
logging.info("'%s' is a file; renaming it to '%s'", dir_entry, new_filename)
os.rename(source, target)
if __name__ == "__main__":
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.