content
stringlengths 5
1.05M
|
|---|
"""AWS DynamoDB backed storage
In AWS, there will be one Machine executing in the current context, and others
executing elsewhere, as part of the same "session". There is one Controller per
session.
Data per session:
- futures (resolved, value, chain, continuations - machine, offset)
- machines (probe logs, state - ip, stopped flag, stacks, and bindings)
Data exchange points:
- machine forks (State of new machine set to point at the fork IP)
- machine waits on future (continuation added to that future)
- machine checks whether future is resolved
- future resolves (must refresh list of continuations)
- top level machine finishes (Controller sets session result)
- machine stops (upload the State)
- machine continues (download the State)
"""
import functools
import logging
import sys
import time
import warnings
from typing import List, Tuple
from ..machine import future as fut
from ..machine.controller import Controller, ControllerError
from . import ddb_model as db
from .ddb_model import (
AREC,
FUTURE,
META,
PEVENTS,
PLOGS,
STATE,
STDOUT,
PLUGINS_HASH_KEY,
)
try:
from ..machine.executable import Executable
from ..machine.probe import ProbeEvent, ProbeLog
from ..machine.state import State
from ..machine.stdout_item import StdoutItem
except ModuleNotFoundError:
warnings.warn(
"Could not import some components - controller won't be fully functional."
)
LOG = logging.getLogger(__name__)
class DataController(Controller):
supports_plugins = True
@classmethod
def with_new_session(cls):
"""Create a data controller for a new session"""
base_session = db.init_base_session()
this_session = db.new_session()
LOG.info("Created new session, %s", this_session.session_id)
return cls(this_session, base_session)
@classmethod
def with_session_id(cls, session_id: str, db_cls=db.SessionItem):
try:
base_session = db.init_base_session()
this_session = db_cls.get(session_id, META)
except db_cls.DoesNotExist as exc:
raise ControllerError("Session does not exist") from exc
LOG.info("Reloaded session %s", session_id)
return cls(this_session, base_session, db_cls=db_cls)
def __init__(self, this_session, base_session, db_cls=db.SessionItem):
self.SI = db_cls
self.session_id = this_session.session_id
if base_session.meta.exe:
self.executable = Executable.deserialise(base_session.meta.exe)
elif this_session.meta.exe:
self.executable = Executable.deserialise(this_session.meta.exe)
else:
self.executable = None
# It's allowed to initialise a controller with no executable, as
# long as the user calls set_executable before creating a machine.
def _qry(self, group, item_id=None):
"""Retrieve the specified group:item_id"""
_key = f"{group}:{item_id}" if item_id is not None else group
try:
return self.SI.get(self.session_id, _key, consistent_read=True)
except self.SI.DoesNotExist as exc:
raise ControllerError(
f"Item {_key} does not exist in {self.session_id}"
) from exc
@functools.lru_cache
def _lock_item(self, group: str, item_id=None) -> db.SessionLocker:
"""Get a context manager that locks the specified group:item_id"""
item = self._qry(group, item_id)
return db.SessionLocker(item)
def set_executable(self, exe):
self.executable = exe
s = self._qry(META)
s.meta.exe = exe.serialise()
s.save()
LOG.info("Updated session code")
def set_entrypoint(self, fn_name: str):
s = self._qry(META)
s.meta.entrypoint = fn_name
s.save()
## Threads
def new_thread(self) -> int:
"""Create a new thread, returning the thead ID"""
with self._lock_item(META):
s = self._qry(META)
vmid = s.meta.num_threads
s.meta.num_threads += 1
s.meta.stopped.append(False)
s.save()
db.new_session_item(self.session_id, f"{STATE}:{vmid}", state=State([])).save()
db.new_session_item(
self.session_id, f"{FUTURE}:{vmid}", future=fut.Future()
).save()
return vmid
def get_thread_ids(self) -> List[int]:
"""Get a list of thread IDs in this session"""
s = self._qry(META)
return list(range(s.meta.num_threads))
def get_top_level_future(self):
return self.get_future(0)
def is_top_level(self, vmid):
return vmid == 0
def all_stopped(self):
s = self._qry(META)
return all(s.meta.stopped)
def set_stopped(self, vmid, stopped: bool):
with self._lock_item(META):
s = self._qry(META)
s.meta.stopped[vmid] = stopped
s.save()
def set_state(self, vmid, state):
# NOTE: no locking required, no inter-thread state access allowed
s = self._qry(STATE, vmid)
s.state = state
s.save()
def get_state(self, vmid):
return self._qry(STATE, vmid).state
## controller properties
@property
def broken(self):
s = self._qry(META)
return s.meta.broken
@broken.setter
def broken(self, value):
try:
with self._lock_item(META):
s = self._qry(META)
s.meta.broken = True
s.save()
except db.LockTimeout:
# If a thread dies while updating META, this will timeout. However,
# in that case, broken is True and we *should* ignore the lock and
# carry on. If it's broken, the lock doesn't matter anyway.
# Hopefully this isn't a genuine race condition.
if value:
s = self._qry(META)
s.meta.broken = True
s.save()
else:
raise
@property
def result(self):
s = self._qry(META)
return s.meta.result
@result.setter
def result(self, value):
with self._lock_item(META):
s = self._qry(META)
s.meta.result = value
s.save()
## arecs
def new_arec(self):
with self._lock_item(META):
s = self._qry(META)
ptr = s.meta.num_arecs
s.meta.num_arecs += 1
s.save()
return ptr
def set_arec(self, ptr, rec):
try:
s = self._qry(AREC, ptr)
s.arec = rec
except ControllerError:
s = db.new_session_item(self.session_id, f"{AREC}:{ptr}", arec=rec)
s.save()
def get_arec(self, ptr):
return self._qry(AREC, ptr).arec
def increment_ref(self, ptr):
s = self._qry(AREC, ptr)
s.update(actions=[self.SI.arec.ref_count.set(self.SI.arec.ref_count + 1)])
def decrement_ref(self, ptr):
s = self._qry(AREC, ptr)
s.update(actions=[self.SI.arec.ref_count.set(self.SI.arec.ref_count - 1)])
return s.arec
def delete_arec(self, ptr):
s = self._qry(AREC, ptr)
s.arec.deleted = True
def lock_arec(self, ptr):
return self._lock_item(AREC, ptr)
## probes
def set_probe_data(self, vmid, probe):
# FIXME - 400k limit on item size is quite easy to break with events and
# logs.
events = [item.serialise() for item in probe.events]
s = self._qry(PEVENTS)
s.update(actions=[self.SI.pevents.set(self.SI.pevents.append(events))])
logs = [item.serialise() for item in probe.logs]
s = self._qry(PLOGS)
s.update(actions=[self.SI.plogs.set(self.SI.plogs.append(logs))])
def get_probe_logs(self):
s = self._qry(PLOGS)
return [ProbeLog.deserialise(item) for item in s.plogs]
def get_probe_events(self):
s = self._qry(PEVENTS)
return [ProbeEvent.deserialise(item) for item in s.pevents]
## futures
def get_future(self, vmid):
s = self._qry(FUTURE, vmid)
return s.future
def set_future(self, vmid, future: fut.Future):
s = self._qry(FUTURE, vmid)
s.future = future
s.save()
def add_continuation(self, fut_ptr, vmid):
s = self._qry(FUTURE, fut_ptr)
s.update(
actions=[
self.SI.future.continuations.set(
self.SI.future.continuations.append([vmid])
)
]
)
def set_future_chain(self, fut_ptr, chain):
s = self._qry(FUTURE, fut_ptr)
s.update(actions=[self.SI.future.chain.set(chain)])
def lock_future(self, ptr):
return self._lock_item(FUTURE, ptr)
## stdout
def get_stdout(self):
s = self._qry(STDOUT)
return [StdoutItem.deserialise(item) for item in s.stdout]
def write_stdout(self, item):
# Avoid empty strings (DynamoDB can't handle them)
if item.text:
sys.stdout.write(item.text)
s = self._qry(STDOUT)
s.update(
actions=[self.SI.stdout.set(self.SI.stdout.append([item.serialise()]))]
)
@property # Legacy. TODO: remove
def stdout(self):
return self.get_stdout()
## plugin API
def supports_plugin(self, name: str):
# TODO
return True
def add_plugin_future(self, plugin_name: str, plugin_value_id: str) -> str:
"""Add a special kind of future which is resolved by a plugin"""
future_id = f"{plugin_name}:{plugin_value_id}"
# Create a pointer to this session for the plugin to resume
db.new_session_item(
PLUGINS_HASH_KEY, future_id, plugin_future_session=self.session_id,
).save()
# Create the actual future
db.new_session_item(
self.session_id, f"{FUTURE}:{future_id}", future=fut.Future(),
).save()
return future_id
@classmethod
def find_plugin_future(
cls, plugin_name: str, plugin_value_id: str
) -> Tuple[str, str]:
future_id = f"{plugin_name}:{plugin_value_id}"
try:
s = cls.SI.get(PLUGINS_HASH_KEY, future_id)
except cls.SI.DoesNotExist as exc:
raise ControllerError("Future does not exist") from exc
return (s.plugin_future_session, future_id)
|
from systemtools.logger import *
from systemtools.duration import *
from systemtools.file import *
from systemtools.basics import *
from systemtools.location import *
from systemtools.system import *
from datastructuretools.basics import ListChunker
from collections import defaultdict
from sparktools.utils import *
from pyspark.sql.functions import lit, rand, randn, col, udf, desc
from pyspark.sql.types import *
from pyspark.ml.feature import HashingTF, IDF, CountVectorizer
def addTermFrequencies(df, vocDir, inputCol="ngrams", targetCol="tf",
minDF=2, chunksSize=10000000,
logger=None, verbose=True, removeInputCol=False, pruneVoc=False, debug=False):
print("DEPRECATED")
exit()
"""
The purpose of this function is to replace CountVectorizer which throw either:
* Remote RPC client disassociated. Likely due to containers exceeding thresholds
* Java heap space
in cases the vocabulary is large (~1 000 000 000 for a Spark cluster of 30 nodes)
or your amount of available RAM is low.
For example when you want to use ngrams >= 2 as the vocabulary.
The default CountVectorizer will share the vocabulary across nodes.
Instead, this function will first split the voc and sum all frequencies for each
vocabulary chunks.
This function take a datframe, will add a "tf" column. You also have to give a directory where
the vocabulary will be stored in multiple files (0.pickle, 1.pickle...).
pruneVoc is very facultative and not mandatory in most cases.
"""
# First we delete the vocabulary which already exists in the vocDir:
for current in sortedGlob(vocDir + "/*.pickle"):
removeFile(current)
# We flat all the voc and remove duplicates:
log("Starting getting the vocabulary", logger=logger, verbose=verbose)
tt = TicToc(logger=logger, verbose=verbose)
tt.tic(display=False)
vocRDD = df.select(inputCol).rdd.flatMap(lambda x: list(set(x[0])))
# We add a count to each row (each row is a ngram):
vocRDD = vocRDD.map(lambda x: (x, 1))
# Now we count document frequencies for each term:
vocRDD = vocRDD.reduceByKey(lambda v1, v2: v1 + v2)
# We keep only voc element which is >= minDF:
whiteVocRDD = vocRDD.filter(lambda o: o[1] >= minDF)
if pruneVoc:
blackVocRDD = vocRDD.filter(lambda o: o[1] < minDF)
# We collect and chunk the voc to do not share the entire voc across Spark nodes:
if chunksSize is None:
whiteVocChunks = [list(whiteVocRDD.keys().collect())]
if pruneVoc:
blackVocChunks = [list(blackVocRDD.keys().collect())]
whiteVocSize = len(whiteVocChunks[0])
if pruneVoc:
blackVocSize = len(blackVocChunks[0])
else:
# ListChunker will serialize in batchs (chunks) to do not need to persist the whole content in memory
# We use rddStreamCollect because the `collect` method of Dataframe load the entire voc in memory
whiteVocChunks = ListChunker(chunksSize, rddStreamCollect(whiteVocRDD.keys(), chunksSize, logger=logger, verbose=verbose), logger=logger, verbose=verbose)
if pruneVoc:
blackVocChunks = ListChunker(chunksSize, rddStreamCollect(blackVocRDD.keys(), chunksSize, logger=logger, verbose=verbose), logger=logger, verbose=verbose)
whiteVocSize = whiteVocChunks.getTotalSize()
if pruneVoc:
blackVocSize = blackVocChunks.getTotalSize()
# We delete all ngrams which are not in whiteVocChunks:
if pruneVoc:
for blackVocChunk in pb(blackVocChunks, message="Prunning vocabulary black list", logger=logger, verbose=verbose):
blackVocChunk = set(blackVocChunk)
theUdf = udf(lambda ngrams: [token for token in ngrams if token not in blackVocChunk], ArrayType(StringType()))
df = df.withColumn(inputCol, theUdf(df[inputCol]))
# We fill the tf column with zeros:
theUdf = udf(lambda: SparseVector(whiteVocSize, {}), VectorUDT())
df = df.withColumn(targetCol, theUdf())
# We define the udf function:
def __sumTF(ngrams, vector, vocChunkDict, startIndex=0):
"""
This function take ngrams and a vector, it will add frequencies of these ngrams in
the vector at the right index according to the dictionnary of indexes vocChunkDict
and the startIndex.
"""
# We create a default dict of zero integers
values = defaultdict(int) # from collections import defaultdict
# For each ngram:
for ngram in ngrams:
# We check if the ngram exist in the voc:
if ngram in vocChunkDict:
# We find the right index from the entire vocabulary (not only this chunk, so we add startIndex):
index = vocChunkDict[ngram] + startIndex
# We add 1 frequency:
values[index] += 1
# We sum with the previous vector:
vector = sparseVectorAdd(vector, SparseVector(vector.size, dict(values)))
# We return the final vector:
return vector
# We create the start index of each chunk:
startIndex = 0
ngramCountCol = "ngramCount"
# For each white chunk:
for whiteVocChunk in pb(whiteVocChunks, message="Summing term frequencies", logger=logger, verbose=verbose): # We use `pb` to see a progress bar
# We construct the voc as a dict to have access to indexes in O(1):
whiteVocChunkDict = dict()
i = 0
for current in whiteVocChunk:
whiteVocChunkDict[current] = i
i += 1
# We create the udf and give whiteVocChunkDict and startIndex:
theUDF = udf(lambda col1, col2: __sumTF(col1, col2, whiteVocChunkDict, startIndex), VectorUDT())
# We add all frequencies for the current voc chunk:
df = df.withColumn(targetCol, theUDF(df[inputCol], df[targetCol]))
# Here we force spark to execute the withColumn, instead it works lazy and
# receive a lot of withColumn stage because of the `for` loop and crash:
df.count()
# df = df.where(col("tf").isNotNull()) # This line produce this error: Invalid PythonUDF <lambda>(), requires attributes from more than one child.
# TODO do best force like a sum over tf column... # tmpUdf = udf(lambda x, y: len(x) + len(y), IntegerType())
# tmpUdf = udf(lambda x: len(x.indices), IntegerType())
# df = df.withColumn(ngramCountCol, tmpUdf(df[targetCol]))
# reduce = df.select(["authorialDomain", ngramCountCol]).rdd.reduceByKey(lambda x, y: x + y)
# theSum = 0
# for ad, count in reduce.collect():
# theSum += count
# log("theSum=" + str(theSum), logger=logger, verbose=False)
# And we continue to the next chunk:
startIndex += len(whiteVocChunk)
# We drop the ngramCount column:
try:
df = df.drop(ngramCountCol)
except:
pass
# We drop the ngrams column:
if removeInputCol:
df = df.drop(inputCol)
# We store and reset list chunkers:
mkdir(vocDir)
if isinstance(whiteVocChunks, ListChunker):
if pruneVoc:
blackVocChunks.reset()
whiteVocChunks.copyFiles(vocDir)
log("Voc size: " + str(whiteVocChunks.getTotalSize()), logger, verbose=verbose)
whiteVocChunks.reset()
else:
serialize(whiteVocChunks[0], vocDir + "/0.pickle")
log("Voc size: " + str(len(whiteVocChunks[0])), logger, verbose=verbose)
# We log the end:
tt.toc("We generated the voc and added term frequencies to the DF.")
# We print explain:
if debug:
df.explain()
log("df.storageLevel: " + str(df.storageLevel), logger, verbose=verbose)
# We return all data:
return df
def toNgramsFrequency_old(df, inputColName="ngrams", targetColName="tf",
minDF=2, vocabSize=2000000000,
removeInputCol=False):
"""
This replace ngrams column by a CountVectorizer column sum n sparse vectors
"""
cv = CountVectorizer(inputCol=inputColName, outputCol=targetColName, minDF=minDF, vocabSize=vocabSize)
cvModel = cv.fit(df)
voc = cvModel.vocabulary
tfDF = cvModel.transform(df)
# We drop the ngrams columns:
if removeInputCol:
try:
tfDF = tfDF.drop(inputColName)
except Exception as e:
logException(e, logger)
return (tfDF, voc)
|
import os
from argparse import Namespace, ArgumentParser
from logging import Logger
from consolebundle.ConsoleCommand import ConsoleCommand
from consolebundle.StrToBool import str2Bool
from databricks_cli.dbfs.api import DbfsApi
from databricks_cli.dbfs.dbfs_path import DbfsPath
class DbfsUploadCommand(ConsoleCommand):
def __init__(
self,
logger: Logger,
dbfsApi: DbfsApi,
):
self.__logger = logger
self.__dbfsApi = dbfsApi
def getCommand(self) -> str:
return 'dbx:dbfs:upload'
def getDescription(self):
return 'Uploads file to DBFS'
def configure(self, argumentParser: ArgumentParser):
argumentParser.add_argument(dest='sourceFilePath', help='Source file path')
argumentParser.add_argument(dest='targetFilePath', help='Target file path')
argumentParser.add_argument('--overwrite', dest='overwrite', type=str2Bool, nargs='?', const=True, default=False, help='Overwrite target file')
def run(self, inputArgs: Namespace):
if os.path.isabs(inputArgs.sourceFilePath):
sourceFilePath = inputArgs.sourceFilePath
else:
sourceFilePath = os.getcwd() + os.sep + inputArgs.sourceFilePath
self.__logger.info(f'Uploading {sourceFilePath} to {inputArgs.targetFilePath}')
self.__dbfsApi.put_file(
sourceFilePath,
DbfsPath(inputArgs.targetFilePath),
inputArgs.overwrite,
)
self.__logger.info(f'File successfully uploaded')
|
from chunkflow.flow.flow import main
main()
|
import textwrap
import requests_mock
import transaction
from purl import URL
from onegov.form import FormCollection
from onegov.pay import PaymentProviderCollection
def test_setup_stripe(client):
client.login_admin()
assert client.app.default_payment_provider is None
with requests_mock.Mocker() as m:
m.post('https://oauth.example.org/register/foo', json={
'token': '0xdeadbeef'
})
client.get('/payment-provider').click("Stripe Connect")
url = URL(m.request_history[0].json()['url'])
url = url.query_param('oauth_redirect_secret', 'bar')
url = url.query_param('code', 'api_key')
m.post('https://connect.stripe.com/oauth/token', json={
'scope': 'read_write',
'stripe_publishable_key': 'stripe_publishable_key',
'stripe_user_id': 'stripe_user_id',
'refresh_token': 'refresh_token',
'access_token': 'access_token',
})
client.get(url.as_string())
provider = client.app.default_payment_provider
assert provider.title == 'Stripe Connect'
assert provider.publishable_key == 'stripe_publishable_key'
assert provider.user_id == 'stripe_user_id'
assert provider.refresh_token == 'refresh_token'
assert provider.access_token == 'access_token'
def test_stripe_form_payment(client):
collection = FormCollection(client.app.session())
collection.definitions.add('Donate', definition=textwrap.dedent("""
E-Mail *= @@@
Donation *=
(x) Small (10 CHF)
( ) Medium (100 CHF)
"""), type='custom', payment_method='free')
providers = PaymentProviderCollection(client.app.session())
providers.add(type='stripe_connect', default=True, meta={
'publishable_key': '0xdeadbeef',
'access_token': 'foobar'
})
transaction.commit()
page = client.get('/form/donate')
page.form['e_mail'] = 'info@example.org'
page = page.form.submit().follow()
assert "Totalbetrag" in page
assert "10.00 CHF" in page
assert "+ 0.59" not in page
assert "Online zahlen und abschliessen" in page
button = page.pyquery('.checkout-button')
assert button.attr('data-stripe-amount') == '1000'
assert button.attr('data-stripe-currency') == 'CHF'
assert button.attr('data-stripe-email') == 'info@example.org'
assert button.attr('data-stripe-description') == 'Donate'
assert button.attr('data-action') == 'submit'
assert button.attr('data-stripe-allowrememberme') == 'false'
assert button.attr('data-stripe-key') == '0xdeadbeef'
with requests_mock.Mocker() as m:
charge = {
'id': '123456'
}
m.post('https://api.stripe.com/v1/charges', json=charge)
m.get('https://api.stripe.com/v1/charges/123456', json=charge)
m.post('https://api.stripe.com/v1/charges/123456/capture', json=charge)
page.form['payment_token'] = 'foobar'
page.form.submit().follow()
with requests_mock.Mocker() as m:
m.get('https://api.stripe.com/v1/charges/123456', json={
'id': '123456',
'captured': True,
'refunded': False,
'paid': True,
'status': 'foobar'
})
client.login_admin()
ticket = client.get('/tickets/ALL/open').click('Annehmen').follow()
assert "Bezahlt" in ticket
payments = client.get('/payments')
assert "FRM-" in payments
assert "Stripe Connect" in payments
assert "info@example.org" in payments
assert "9.41 CHF" in payments
assert "0.59" in payments
def test_stripe_charge_fee_to_customer(client):
collection = FormCollection(client.app.session())
collection.definitions.add('Donate', definition=textwrap.dedent("""
E-Mail *= @@@
Donation *=
(x) Small (10 CHF)
( ) Medium (100 CHF)
"""), type='custom', payment_method='free')
providers = PaymentProviderCollection(client.app.session())
providers.add(type='stripe_connect', default=True, meta={
'publishable_key': '0xdeadbeef',
'access_token': 'foobar',
'user_id': 'foobar'
})
transaction.commit()
client.login_admin()
with requests_mock.Mocker() as m:
m.get('https://api.stripe.com/v1/accounts/foobar', json={
'business_name': 'Govikon',
'email': 'info@example.org'
})
page = client.get('/payment-provider').click("Einstellungen", index=1)
assert 'Govikon / info@example.org' in page
page.form['charge_fee_to_customer'] = True
page.form.submit()
page = client.get('/form/donate')
page.form['e_mail'] = 'info@example.org'
page = page.form.submit().follow()
assert "Totalbetrag" in page
assert "10.00 CHF" in page
assert "+ 0.61 CHF Kreditkarten-Gebühr" in page
assert "Online zahlen und abschliessen" in page
button = page.pyquery('.checkout-button')
assert button.attr('data-stripe-amount') == '1061'
with requests_mock.Mocker() as m:
charge = {
'id': '123456'
}
m.post('https://api.stripe.com/v1/charges', json=charge)
m.get('https://api.stripe.com/v1/charges/123456', json=charge)
m.post('https://api.stripe.com/v1/charges/123456/capture', json=charge)
page.form['payment_token'] = 'foobar'
page.form.submit().follow()
with requests_mock.Mocker() as m:
m.get('https://api.stripe.com/v1/charges/123456', json={
'id': '123456',
'captured': True,
'refunded': False,
'paid': True,
'status': 'foobar'
})
client.login_admin()
ticket = client.get('/tickets/ALL/open').click('Annehmen').follow()
assert "Bezahlt" in ticket
payments = client.get('/payments')
assert "FRM-" in payments
assert "Stripe Connect" in payments
assert "info@example.org" in payments
assert "10.00" in payments
assert "0.61" in payments
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
"""Microbenchmark benchmark example for TCP connectivity.
Commands to run:
python3 examples/benchmarks/tcp_connectivity.py
"""
from superbench.benchmarks import BenchmarkRegistry
from superbench.common.utils import logger
if __name__ == '__main__':
context = BenchmarkRegistry.create_benchmark_context(
'tcp-connectivity', parameters='--hostfile /tmp/superbench/hostfile.test --port 80 --parallel 1'
)
benchmark = BenchmarkRegistry.launch_benchmark(context)
if benchmark:
logger.info(
'benchmark: {}, return code: {}, result: {}'.format(
benchmark.name, benchmark.return_code, benchmark.result
)
)
|
# coding=utf-8
from OTLMOW.OTLModel.Datatypes.KeuzelijstField import KeuzelijstField
from OTLMOW.OTLModel.Datatypes.KeuzelijstWaarde import KeuzelijstWaarde
# Generated with OTLEnumerationCreator. To modify: extend, do not edit
class KlUitlaatType(KeuzelijstField):
"""De verschillende types van uitlaat."""
naam = 'KlUitlaatType'
label = 'Uitlaat type'
objectUri = 'https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#KlUitlaatType'
definition = 'De verschillende types van uitlaat.'
codelist = 'https://wegenenverkeer.data.vlaanderen.be/id/conceptscheme/KlUitlaatType'
options = {
'inlaat': KeuzelijstWaarde(invulwaarde='inlaat',
label='inlaat',
definitie='locatie waar water van een open profiel naar een inbuizing overgaat',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlUitlaatType/inlaat'),
'uitlaat': KeuzelijstWaarde(invulwaarde='uitlaat',
label='uitlaat',
definitie='locatie waar water van een inbuizing naar een open profiel overgaat',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlUitlaatType/uitlaat')
}
|
#!/usr/bin/python
"""Summarize the results of many RAPPOR analysis runs.
Takes a list of STATUS.txt files on stdin, and reads the corresponding spec.txt
and log.txt files. Writes a CSV to stdout. Row key is (metric, date).
"""
import collections
import csv
import json
import os
import re
import sys
# Parse bash 'time' output:
# real 0m11.578s
# TODO: Parse the time from metrics.json instead.
TIMING_RE = re.compile(
r'real \s+ (\d+) m ([\d.]+) s', re.VERBOSE)
# TODO: Could have decode-dist and decode-assoc output the PID?
PID_RE = re.compile(
r'write_pid.py: PID (\d+)') # not VERBOSE, spaces are literal
def ParseMemCsv(f):
"""Compute summary stats for memory.
vm5_peak_kib -> max(vm_peak_kib) # over 5 second intervals. Since it uses
the kernel, it's accurate except for takes that spike in their last 4
seconds.
vm5_mean_kib -> mean(vm_size_kib) # over 5 second intervals
"""
peak_by_pid = collections.defaultdict(list)
size_by_pid = collections.defaultdict(list)
# Parse columns we care about, by PID
c = csv.reader(f)
for i, row in enumerate(c):
if i == 0:
continue # skip header
# looks like timestamp, pid, then (rss, peak, size)
_, pid, _, peak, size = row
if peak != '':
peak_by_pid[pid].append(int(peak))
if size != '':
size_by_pid[pid].append(int(size))
mem_by_pid = {}
# Now compute summaries
pids = peak_by_pid.keys()
for pid in pids:
peaks = peak_by_pid[pid]
vm5_peak_kib = max(peaks)
sizes = size_by_pid[pid]
vm5_mean_kib = sum(sizes) / len(sizes)
mem_by_pid[pid] = (vm5_peak_kib, vm5_mean_kib)
return mem_by_pid
def CheckJobId(job_id, parts):
"""Sanity check for date or smoke test."""
if not job_id.startswith('201') and not job_id.startswith('smoke'):
raise RuntimeError(
"Expected job ID to start with '201' or 'smoke': got %r (%s)" %
(job_id, parts))
def ReadStatus(f):
status_line = f.readline().strip()
return status_line.split()[0] # OK, TIMEOUT, FAIL
def CombineDistTaskStatus(stdin, c_out, mem_by_pid):
"""Read status task paths from stdin, write CSV summary to c_out'."""
#util.log('%s', mem_by_pid)
# Parses:
# - input path for metric name and date
# - spec.txt for task params
# - STATUS.txt for task success/failure
# - metrics.json for output metrics
# - log.txt for timing, if it ran to completion
# - and for structured data
# - join with mem by PID
header = (
'job_id', 'params_file', 'map_file',
'metric', 'date',
'vm5_peak_kib', 'vm5_mean_kib', # set when not skipped
'seconds', 'status',
# only set when OK
'num_reports', 'num_rappor', 'allocated_mass',
# only set when failed
'fail_reason')
c_out.writerow(header)
for line in stdin:
#
# Receive a STATUS.txt path on each line of stdin, and parse it.
#
status_path = line.strip()
with open(status_path) as f:
status = ReadStatus(f)
# Path should look like this:
# ~/rappor/cron/2015-05-20__19-22-01/raw/Settings.NewTabPage/2015-05-19/STATUS.txt
parts = status_path.split('/')
job_id = parts[-5]
CheckJobId(job_id, parts)
#
# Parse the job spec
#
result_dir = os.path.dirname(status_path)
spec_file = os.path.join(result_dir, 'spec.txt')
with open(spec_file) as f:
spec_line = f.readline()
# See backfill.sh analyze-one for the order of these 7 fields.
# There are 3 job constants on the front.
(num_reports, metric_name, date, counts_path, params_path,
map_path, _) = spec_line.split()
# NOTE: These are all constant per metric. Could have another CSV and
# join. But denormalizing is OK for now.
params_file = os.path.basename(params_path)
map_file = os.path.basename(map_path)
# remove extension
params_file, _ = os.path.splitext(params_file)
map_file, _ = os.path.splitext(map_file)
#
# Read the log
#
log_file = os.path.join(result_dir, 'log.txt')
with open(log_file) as f:
lines = f.readlines()
# Search lines in reverse order for total time. It could have output from
# multiple 'time' statements, and we want the last one.
seconds = None # for skipped
for i in xrange(len(lines) - 1, -1, -1):
# TODO: Parse the R timing too. Could use LOG_RECORD_RE.
m = TIMING_RE.search(lines[i])
if m:
min_part, sec_part = m.groups()
seconds = float(min_part) * 60 + float(sec_part)
break
# Extract stack trace
if status == 'FAIL':
# Stack trace looks like: "Calls: main -> RunOne ..."
fail_reason = ''.join(line.strip() for line in lines if 'Calls' in line)
else:
fail_reason = None
# Extract PID and join with memory results
pid = None
vm5_peak_kib = None
vm5_mean_kib = None
if mem_by_pid:
for line in lines:
m = PID_RE.match(line)
if m:
pid = m.group(1)
# Could the PID not exist if the process was super short was less
# than 5 seconds?
try:
vm5_peak_kib, vm5_mean_kib = mem_by_pid[pid]
except KeyError: # sometimes we don't add mem-track on the front
vm5_peak_kib, vm5_mean_kib = None, None
break
else:
pass # we weren't passed memory.csv
#
# Read the metrics
#
metrics = {}
metrics_file = os.path.join(result_dir, 'metrics.json')
if os.path.isfile(metrics_file):
with open(metrics_file) as f:
metrics = json.load(f)
num_rappor = metrics.get('num_detected')
allocated_mass = metrics.get('allocated_mass')
# Construct and write row
row = (
job_id, params_file, map_file,
metric_name, date,
vm5_peak_kib, vm5_mean_kib,
seconds, status,
num_reports, num_rappor, allocated_mass,
fail_reason)
c_out.writerow(row)
def CombineAssocTaskStatus(stdin, c_out):
"""Read status task paths from stdin, write CSV summary to c_out'."""
header = (
'job_id', 'metric', 'date', 'status', 'num_reports',
'total_elapsed_seconds', 'em_elapsed_seconds', 'var1', 'var2', 'd1',
'd2')
c_out.writerow(header)
for line in stdin:
status_path = line.strip()
with open(status_path) as f:
status = ReadStatus(f)
parts = status_path.split('/')
job_id = parts[-6]
CheckJobId(job_id, parts)
#
# Parse the job spec
#
result_dir = os.path.dirname(status_path)
spec_file = os.path.join(result_dir, 'assoc-spec.txt')
with open(spec_file) as f:
spec_line = f.readline()
# See backfill.sh analyze-one for the order of these 7 fields.
# There are 3 job constants on the front.
# 5 job params
(_, _, _, _, _,
dummy_num_reports, metric_name, date, reports, var1, var2, map1,
output_dir) = spec_line.split()
#
# Parse decode-assoc metrics
#
metrics = {}
metrics_file = os.path.join(result_dir, 'assoc-metrics.json')
if os.path.isfile(metrics_file):
with open(metrics_file) as f:
metrics = json.load(f)
# After we run it we have the actual number of reports
num_reports = metrics.get('num_reports')
total_elapsed_seconds = metrics.get('total_elapsed_time')
em_elapsed_seconds = metrics.get('em_elapsed_time')
estimate_dimensions = metrics.get('estimate_dimensions')
if estimate_dimensions:
d1, d2 = estimate_dimensions
else:
d1, d2 = (0, 0) # unknown
row = (
job_id, metric_name, date, status, num_reports, total_elapsed_seconds,
em_elapsed_seconds, var1, var2, d1, d2)
c_out.writerow(row)
def main(argv):
action = argv[1]
try:
mem_csv = argv[2]
except IndexError:
mem_by_pid = None
else:
with open(mem_csv) as f:
mem_by_pid = ParseMemCsv(f)
if action == 'dist':
c_out = csv.writer(sys.stdout)
CombineDistTaskStatus(sys.stdin, c_out, mem_by_pid)
elif action == 'assoc':
c_out = csv.writer(sys.stdout)
CombineAssocTaskStatus(sys.stdin, c_out)
else:
raise RuntimeError('Invalid action %r' % action)
if __name__ == '__main__':
try:
main(sys.argv)
except RuntimeError, e:
print >>sys.stderr, 'FATAL: %s' % e
sys.exit(1)
|
"""Django admin site settings for core models."""
from django.contrib import admin
from pykeg.core import models
from pykeg.core.util import CtoF
class UserAdmin(admin.ModelAdmin):
list_display = (
"username",
"email",
"date_joined",
"last_login",
"is_active",
"is_superuser",
"is_staff",
)
list_filter = ("is_active", "is_superuser", "is_staff")
admin.site.register(models.User, UserAdmin)
class KegbotSiteAdmin(admin.ModelAdmin):
list_display = ("name",)
admin.site.register(models.KegbotSite, KegbotSiteAdmin)
class KegTapAdmin(admin.ModelAdmin):
list_display = ("name", "current_keg", "sort_order")
admin.site.register(models.KegTap, KegTapAdmin)
class KegAdmin(admin.ModelAdmin):
list_display = ("id", "type")
list_filter = ("status",)
search_fields = ("id", "type__name")
admin.site.register(models.Keg, KegAdmin)
class DrinkAdmin(admin.ModelAdmin):
list_display = ("id", "user", "keg", "time")
list_filter = ("keg", "time")
search_fields = ("id", "user__username")
admin.site.register(models.Drink, DrinkAdmin)
class AuthenticationTokenAdmin(admin.ModelAdmin):
list_display = ("auth_device", "user", "token_value", "nice_name", "enabled", "IsActive")
list_filter = ("auth_device", "enabled")
search_fields = ("user__username", "token_value", "nice_name")
admin.site.register(models.AuthenticationToken, AuthenticationTokenAdmin)
class DrinkingSessionAdmin(admin.ModelAdmin):
list_display = ("id", "start_time", "end_time", "volume_ml", "GetTitle")
list_filter = ("start_time",)
search_fields = ("name",)
admin.site.register(models.DrinkingSession, DrinkingSessionAdmin)
class ThermoSensorAdmin(admin.ModelAdmin):
list_display = ("raw_name", "nice_name")
search_fields = list_display
admin.site.register(models.ThermoSensor, ThermoSensorAdmin)
def thermolog_deg_c(obj):
return "%.2f C" % (obj.temp,)
def thermolog_deg_f(obj):
return "%.2f F" % (CtoF(obj.temp),)
class ThermologAdmin(admin.ModelAdmin):
list_display = ("sensor", thermolog_deg_c, thermolog_deg_f, "time")
list_filter = ("sensor", "time")
admin.site.register(models.Thermolog, ThermologAdmin)
class SystemEventAdmin(admin.ModelAdmin):
list_display = ("id", "kind", "time", "user", "drink", "keg", "session")
list_filter = ("kind", "time")
admin.site.register(models.SystemEvent, SystemEventAdmin)
class PictureAdmin(admin.ModelAdmin):
list_display = ("id", "time", "user", "keg", "session", "caption")
list_filter = ("time",)
admin.site.register(models.Picture, PictureAdmin)
admin.site.register(models.Beverage)
admin.site.register(models.BeverageProducer)
admin.site.register(models.Controller)
admin.site.register(models.FlowMeter)
admin.site.register(models.FlowToggle)
|
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 28 05:05:15 2019
@author: FC
"""
import confmap as cm
import numpy as np
im = cm.HyperbolicTiling('./Reflets.jpg',1,'',600,600)
im.transform(sommets=(6,4),nbit=20,backcolor=[255,255,255], delta=1e-3)
|
from interfacebuilder.misc import *
import spglib
class interactive_plot:
def plot_results(self, jitter=0.05):
""" Plots results interactively.
Generates a matplotlib interface that allows to select the reconstructed stacks and save them to a file.
Args:
jitter (float, optional): Jitters data points to make picking easier. Defaults to 0.05.
"""
from matplotlib.widgets import Button
import matplotlib.cm as cm
from matplotlib.colors import ListedColormap
def rand_jitter(arr, jitter):
stdev = jitter * (max(arr) - min(arr)) + 0.01
return arr + np.random.randn(len(arr)) * stdev
data = np.array([[i.stress, len(i.atoms)] for i in self.solved], dtype=float)
color = [i.angle for i in self.solved]
norm = matplotlib.colors.Normalize(
vmin=self.angle_limits[0], vmax=self.angle_limits[1], clip=True
)
cmap = matplotlib.colors.LinearSegmentedColormap.from_list(
"", ["darkgreen", "lightgreen", "lightblue", "royalblue"]
)
mapper = cm.ScalarMappable(norm=norm, cmap=cmap)
color = [mapper.to_rgba(v) for v in color]
fig, ax = plt.subplots(nrows=1, ncols=3, figsize=([6.4 * 3, 6.4]))
ax[0].scatter(
rand_jitter(data[:, 0], jitter),
data[:, 1],
color=color,
alpha=0.75,
picker=3.5,
marker=".",
)
clb = plt.colorbar(cm.ScalarMappable(norm=norm, cmap=cmap), ax=ax[0])
clb.set_label(r"$\theta$ in °", rotation=270, labelpad=8)
ax[0].set_ylim(np.min(data[:, 0]) - 0.01, np.max(data[:, 1]) + 0.01)
ax[0].set_ylim(0, ax[0].get_ylim()[1] + 10)
ax[0].set_xlabel(r"$\bar{\varepsilon}_A + \bar{\varepsilon}_B$ in %")
ax[0].set_ylabel("Number of atoms")
ax[0].set_title("Click a point to select structure.")
ax[0].grid(
axis="both", color="lightgray", linestyle="-", linewidth=1, alpha=0.2
)
ax[1].set_yticks([])
ax[1].set_xticks([])
ax[1].set_xlabel("")
ax[1].set_ylabel("")
ax[1].set_frame_on(False)
ax[2].set_yticks([])
ax[2].set_xticks([])
ax[2].set_xlabel("")
ax[2].set_ylabel("")
ax[2].set_frame_on(False)
axbutton = plt.axes([0.8, 0.05, 0.1, 0.05])
axbutton2 = plt.axes([0.65, 0.05, 0.1, 0.05])
fig.canvas.mpl_connect("pick_event", self.__onpick)
def __save(stack):
try:
name = "{}_M{}{}{}{}_N{}{}{}{}_a{:.2f}.xyz".format(
self.current_stack.get_chemical_formula(), *self.current_scdata[1:]
)
stack.write(name, vec_cell=True)
logging.info("Saved structure to {}".format(name))
except:
logging.error("You need to select a point first.")
def __standardize(atoms):
atoms = atoms.copy()
cell = (atoms.get_cell()).tolist()
pos = atoms.get_scaled_positions().tolist()
numbers = atoms.get_atomic_numbers()
cell, scaled_pos, numbers = spglib.standardize_cell(
(cell, pos, numbers),
to_primitive=True,
symprec=1e-4,
no_idealize=False,
)
atoms = ase.atoms.Atoms(
scaled_positions=scaled_pos, numbers=numbers, cell=cell, pbc=True
)
axes = [0, 1, 2]
lengths = atoms.cell.lengths()
order = [x for x, y in sorted(zip(axes, lengths), key=lambda pair: pair[1])]
if order != [0, 1, 2]:
atoms = ase.geometry.permute_axes(atoms, order)
self.current_stack = atoms
self.__plot_stack(atoms, fig, ax[2], self.current_scdata)
save = Button(axbutton, " Save this structure. ")
save.on_clicked(lambda x: __save(self.current_stack))
standard = Button(axbutton2, "spglib standardize")
standard.on_clicked(lambda x: __standardize(self.current_stack))
plt.show()
def __onpick(self, event):
point = event.artist
mouseevent = event.mouseevent
index = event.ind[0]
fig = point.properties()["figure"]
axes = fig.axes
stack = self.solved[index].atoms.copy()
M = self.solved[index].M
N = self.solved[index].N
angle = self.solved[index].angle
m1, m2, m3, m4 = M[0, 0], M[0, 1], M[1, 0], M[1, 1]
n1, n2, n3, n4 = N[0, 0], N[0, 1], N[1, 0], N[1, 1]
scdata = (
int(len(stack)),
int(m1),
int(m2),
int(m3),
int(m4),
int(n1),
int(n2),
int(n3),
int(n4),
float(angle),
index,
)
self.current_scdata = scdata
self.current_stack = stack
self.__plot_stack(stack, fig, axes[2], scdata)
basis1 = self.bottom.atoms.copy()
basis2 = self.top.atoms.copy()
basis2.rotate(angle, v="z", rotate_cell=True)
self.__plot_lattice_points(
fig, axes[1], basis1.cell, basis2.cell, scdata,
)
def __plot_stack(self, stack, fig, axes, scdata):
from ase.visualize.plot import plot_atoms
canvas = fig.canvas
axes.clear()
axes.set_yticks([])
axes.set_xticks([])
axes.set_xlabel("")
axes.set_ylabel("")
scdata = "#{:d}, {:d} atoms, twist angle of {:.2f}°".format(
scdata[-1], len(stack), scdata[-2]
)
axes.set_title(scdata)
plot_atoms(stack, axes, radii=0.3)
axes.set_frame_on(False)
canvas.draw()
def __plot_lattice_points(self, fig, axes, basis1, basis2, scdata):
import itertools
from matplotlib import path, patches
canvas = fig.canvas
axes.clear()
axes.set_yticks([])
axes.set_xticks([])
axes.set_xlabel("")
axes.set_ylabel("")
natoms, m1, m2, m3, m4, n1, n2, n3, n4, angle, _ = scdata
sc1 = np.array([[m1, m2], [m3, m4]])
sc2 = np.array([[n1, n2], [n3, n4]])
N = self.N_translations + 15
def plot_grid(fig, axes, basis, sc, color, fc, ec, alpha):
from matplotlib import path, patches
basis = basis[:2, :2].copy()
a1 = basis[0, :].copy()
a2 = basis[1, :].copy()
p = itertools.product(range(-N + 1, N), range(-N + 1, N))
points = np.array([n[0] * a1 + n[1] * a2 for n in p])
axes.scatter(points[:, 0], points[:, 1], color=color, alpha=alpha, s=3)
SC = sc @ basis
path1 = [(0, 0), (SC[0, :]), (SC[0, :] + SC[1, :]), (SC[1, :]), (0, 0)]
path1 = path.Path(path1)
patch = patches.PathPatch(
path1, facecolor=fc, edgecolor=ec, alpha=alpha, lw=2
)
axes.add_patch(patch)
# first cell
plot_grid(fig, axes, basis1, sc1, "darkred", "none", "darkred", 0.5)
# second cell
plot_grid(fig, axes, basis2, sc2, "darkblue", "none", "darkblue", 0.5)
# supercell
C = sc1 @ basis1[:2, :2] + self.weight * (
sc2 @ basis2[:2, :2] - sc1 @ basis1[:2, :2]
)
it = itertools.product(range(-N + 1, N), range(-N + 1, N))
a1 = C[0, :]
a2 = C[1, :]
points = np.array([n[0] * a1 + n[1] * a2 for n in it])
path3 = [(0, 0), (C[0, :]), (C[0, :] + C[1, :]), (C[1, :]), (0, 0)]
axes.scatter(
points[:, 0],
points[:, 1],
facecolor="none",
edgecolor="tab:green",
s=20,
linewidth=2,
)
p = path.Path(path3)
patch = patches.PathPatch(
p, facecolor="tab:purple", edgecolor="none", lw=2, alpha=0.5
)
axes.add_patch(patch)
path3 = np.array(path3)
xlim = (np.min(path3[:, 0]) - 4, np.max(path3[:, 0] + 4))
ylim = (np.min(path3[:, 1]) - 4, np.max(path3[:, 1] + 4))
axes.axis("equal")
axes.set_xlim(xlim)
axes.set_ylim(ylim)
axes.set_frame_on(False)
scdata = """M = ({: 2d}, {: 2d}, {: 2d}, {: 2d})\nN = ({: 2d}, {: 2d}, {: 2d}, {: 2d})""".format(
m1, m2, m3, m4, n1, n2, n3, n4
)
axes.set_title(scdata)
canvas.draw()
|
import datetime
from python_to_you.extensions.database import db
from sqlalchemy_serializer import SerializerMixin
class Groups(db.Model, SerializerMixin):
__tablename__ = 'groups'
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.ForeignKey('users.id', ondelete="CASCADE"))
create_post = db.Column(db.Boolean(),default=False)
edit_post = db.Column(db.Boolean(),default=False)
update_post = db.Column(db.Boolean(),default=False)
read_post = db.Column(db.Boolean(),default=True)
add_post = db.Column(db.Boolean(),default=False)
remove_post = db.Column(db.Boolean(),default=False)
block_post = db.Column(db.Boolean(),default=False)
title = db.Column(db.String(255))
description = db.Column(db.Text())
created_at = db.Column(db.DateTime(), default=datetime.datetime.utcnow)
updated_at = db.Column(db.DateTime())
|
from lxml import html
from time import gmtime, strftime
import requests
import smtplib
import time
#from config import Config
#config = Config()
OTOMOTO_URL = 'https://www.otomoto.pl/osobowe/krakow/kia/ceed/i-2006-2012/-/kombi/?search%5Bfilter_float_price%3Ato%5D=35000&search%5Bfilter_float_year%3Afrom%5D=2010&search%5Bfilter_float_mileage%3Ato%5D=200000&search%5Bfilter_float_engine_power%3Afrom%5D=110&search%5Bfilter_enum_damaged%5D=0&search%5Bfilter_enum_country_origin%5D%5B0%5D=pl&search%5Bfilter_enum_no_accident%5D=1&search%5Bdist%5D=100&search%5Bcountry%5D='
MOBILEDE_URL = 'http://www.mobile.de/pl/samochod/kia/lokalizacja/hanower-niemcy/vhc:car,cnt:de,loc:hanower%2C%2C+niemcy,rng:100,srt:date,sro:desc,ms1:13200__,frn:2010,prx:8000,mlx:150000,pwn:74,dmg:false,vcg:estatecar'
OLX_URL = 'https://www.olx.pl/motoryzacja/samochody/kia/ceed/krakow/?search%5Bfilter_float_price%3Ato%5D=35000&search%5Bfilter_float_year%3Afrom%5D=2010&search%5Bfilter_float_enginesize%3Afrom%5D=1400&search%5Bfilter_float_enginesize%3Ato%5D=2000&search%5Bfilter_float_enginepower%3Afrom%5D=110&search%5Bfilter_float_milage%3Ato%5D=200000&search%5Bfilter_enum_car_body%5D%5B0%5D=estate-car&search%5Bfilter_enum_condition%5D%5B0%5D=notdamaged&search%5Bfilter_enum_country_origin%5D%5B0%5D=pl&search%5Border%5D=created_at%3Adesc&search%5Bdist%5D=100&view=list'
otomoto = 999
mobile = 999
olx = 999
def otomotoCars():
page = requests.get(OTOMOTO_URL)
tree = html.fromstring(page.content)
cars = tree.xpath('//span[@class="counter"]/text()')
print "OTOMOTO: " + cars[0][1:2]
return cars[0][1:2]
def mobileDeCars():
page = requests.get(MOBILEDE_URL)
tree = html.fromstring(page.content)
cars = tree.xpath('//h1[@class="h2 u-text-orange"]/text()')
print "MOBILEDE: " + cars[0][0:1]
return cars[0][0:1]
def olxCars():
page = requests.get(OLX_URL)
tree = html.fromstring(page.content)
cars = tree.xpath('//div[@class="dontHasPromoted section clr rel"]/h2/text()')
print "OLX: " + cars[0][11:12]
return cars[0][11:12]
def otomotoNewCar():
page = requests.get(OTOMOTO_URL)
tree = html.fromstring(page.content)
cars = tree.xpath('//a[@class="offer-title__link"]')
return cars[0].items()[2][1]
def mobileNewCar():
page = requests.get(MOBILEDE_URL)
tree = html.fromstring(page.content)
cars = tree.xpath('//a[@class="vehicle-data track-event u-block js-track-event js-track-dealer-ratings"]')
return "www.mobile.de" + cars[0].items()[6][1]
def olxNewCar():
page = requests.get(OLX_URL)
tree = html.fromstring(page.content)
cars = tree.xpath('//a[@class="marginright5 link linkWithHash detailsLink"]')
print cars[0].items()[0][1]
return cars[0].items()[2][1]
def sendMail(text):
gmail_user = 'xxxx'
gmail_pwd = 'xxxx'
FROM = 'xxxx'
TO = ['xxxx']
SUBJECT = 'Cars'
TEXT = text
# Prepare actual message
message = """From: %s\nTo: %s\nSubject: %s\n\n%s
""" % (FROM, ", ".join(TO), SUBJECT, TEXT)
print message
try:
server = smtplib.SMTP("smtp.gmail.com", 587)
server.ehlo()
server.starttls()
server.login(gmail_user, gmail_pwd)
server.sendmail(FROM, TO, message)
server.close()
print 'Successfully sent the mail'
except:
print "Failed to send mail"
# MAIN PROGRAM
while 1:
print "Check at " + strftime("%Y-%m-%d %H:%M:%S", gmtime())
try:
newOtomoto = int(otomotoCars())
newMobilede = int(mobileDeCars())
newOlx = int(olxCars())
if otomoto < newOtomoto:
print "NEW CAR IN OTOMOTO!!!"
sendMail("New car in Otomoto\n" + otomotoNewCar())
if mobile < newMobilede:
print "NEW CAR IN MOBILEDE!!!"
sendMail("New car in Mobile.de\n" + mobileNewCar())
if olx < newOlx:
print "NEW CAR IN OLX!!!"
sendMail("New car in OLX.de\n" + olxNewCar())
otomoto = newOtomoto
mobile = newMobilede
olx = newOlx
print "----------------------------"
time.sleep(30)
except:
print "Connecting problems..."
print "Trying again in 30 seconds."
time.sleep(30)
|
import os
import logging
from pathlib import Path
logger = logging.getLogger(__name__)
# Default Directory Paths
# home_dir = os.path.expanduser("~") # Also acceptable
home_dir = str(Path.home())
PROJECT_DIR = os.path.join(home_dir, ".ex05", "ruppasur")
LOG_DIR = os.path.join(PROJECT_DIR, "logs")
DATA_DIR = os.path.join(PROJECT_DIR, "data")
os.makedirs(LOG_DIR, exist_ok=True)
os.makedirs(DATA_DIR, exist_ok=True)
# Logging Configuration
LOG_FILE_PATH = os.path.join(LOG_DIR, "Ram_log.log")
logging.basicConfig(filename=LOG_FILE_PATH,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
|
from django.contrib import admin
from .models import Info, Book, Student, Issue, Reservation, Class
# Register your models here.
admin.site.register(Info)
admin.site.register(Book)
admin.site.register(Class)
admin.site.register(Student)
admin.site.register(Issue)
admin.site.register(Reservation)
|
"""
스택 자료구조 구현
"""
class Node:
def __init__(self, data):
self.data = data
self.next = None
class Stack:
def __init__(self):
self.head = None
def push(self, value):
new_head = Node(value)
new_head.next = self.head
self.head = new_head
def pop(self):
if self.is_empty():
return "Stack is empty!"
new_head = self.head.next
pop_data = self.head.data
self.head = new_head
return pop_data
def peek(self):
if self.is_empty():
return "Stack is empty!"
cur_node = self.head
return cur_node.data
def is_empty(self):
return self.head is None
stack = Stack()
stack.push(3)
print(stack.peek())
stack.push(0)
print(stack.peek())
print(stack.pop())
print(stack.peek())
print(stack.is_empty())
print(stack.pop())
print(stack.pop())
|
from django.conf import settings
from django.contrib import admin
from .models import ADGroupMapping, ADGroup, OIDCBackChannelLogoutEvent
@admin.register(ADGroupMapping)
class ADGroupMappingAdmin(admin.ModelAdmin):
pass
admin.site.register(ADGroup)
if getattr(settings, "HELUSERS_BACK_CHANNEL_LOGOUT_ENABLED", False):
@admin.register(OIDCBackChannelLogoutEvent)
class OIDCBackChannelLogoutEventAdmin(admin.ModelAdmin):
list_display = ["iss", "sub", "sid", "created_at"]
readonly_fields = ["created_at"]
search_fields = ["iss", "sub", "sid"]
|
#
# QAPI helper library
#
# Copyright IBM, Corp. 2011
# Copyright (c) 2013 Red Hat Inc.
#
# Authors:
# Anthony Liguori <aliguori@us.ibm.com>
# Markus Armbruster <armbru@redhat.com>
#
# This work is licensed under the terms of the GNU GPL, version 2.
# See the COPYING file in the top-level directory.
import re
from ordereddict import OrderedDict
import os
import sys
builtin_types = [
'str', 'int', 'number', 'bool',
'int8', 'int16', 'int32', 'int64',
'uint8', 'uint16', 'uint32', 'uint64'
]
builtin_type_qtypes = {
'str': 'QTYPE_QSTRING',
'int': 'QTYPE_QINT',
'number': 'QTYPE_QFLOAT',
'bool': 'QTYPE_QBOOL',
'int8': 'QTYPE_QINT',
'int16': 'QTYPE_QINT',
'int32': 'QTYPE_QINT',
'int64': 'QTYPE_QINT',
'uint8': 'QTYPE_QINT',
'uint16': 'QTYPE_QINT',
'uint32': 'QTYPE_QINT',
'uint64': 'QTYPE_QINT',
}
def error_path(parent):
res = ""
while parent:
res = ("In file included from %s:%d:\n" % (parent['file'],
parent['line'])) + res
parent = parent['parent']
return res
class QAPISchemaError(Exception):
def __init__(self, schema, msg):
self.input_file = schema.input_file
self.msg = msg
self.col = 1
self.line = schema.line
for ch in schema.src[schema.line_pos:schema.pos]:
if ch == '\t':
self.col = (self.col + 7) % 8 + 1
else:
self.col += 1
self.info = schema.parent_info
def __str__(self):
return error_path(self.info) + \
"%s:%d:%d: %s" % (self.input_file, self.line, self.col, self.msg)
class QAPIExprError(Exception):
def __init__(self, expr_info, msg):
self.info = expr_info
self.msg = msg
def __str__(self):
return error_path(self.info['parent']) + \
"%s:%d: %s" % (self.info['file'], self.info['line'], self.msg)
class QAPISchema:
def __init__(self, fp, input_relname=None, include_hist=[], parent_info=None):
input_fname = os.path.abspath(fp.name)
if input_relname is None:
input_relname = fp.name
self.input_dir = os.path.dirname(input_fname)
self.input_file = input_relname
self.include_hist = include_hist + [(input_relname, input_fname)]
self.parent_info = parent_info
self.src = fp.read()
if self.src == '' or self.src[-1] != '\n':
self.src += '\n'
self.cursor = 0
self.line = 1
self.line_pos = 0
self.exprs = []
self.accept()
while self.tok != None:
expr_info = {'file': input_relname, 'line': self.line, 'parent': self.parent_info}
expr = self.get_expr(False)
if isinstance(expr, dict) and "include" in expr:
if len(expr) != 1:
raise QAPIExprError(expr_info, "Invalid 'include' directive")
include = expr["include"]
if not isinstance(include, str):
raise QAPIExprError(expr_info,
'Expected a file name (string), got: %s'
% include)
include_path = os.path.join(self.input_dir, include)
if any(include_path == elem[1]
for elem in self.include_hist):
raise QAPIExprError(expr_info, "Inclusion loop for %s"
% include)
try:
fobj = open(include_path, 'r')
except IOError as e:
raise QAPIExprError(expr_info,
'%s: %s' % (e.strerror, include))
exprs_include = QAPISchema(fobj, include,
self.include_hist, expr_info)
self.exprs.extend(exprs_include.exprs)
else:
expr_elem = {'expr': expr,
'info': expr_info}
self.exprs.append(expr_elem)
def accept(self):
while True:
self.tok = self.src[self.cursor]
self.pos = self.cursor
self.cursor += 1
self.val = None
if self.tok == '#':
self.cursor = self.src.find('\n', self.cursor)
elif self.tok in ['{', '}', ':', ',', '[', ']']:
return
elif self.tok == "'":
string = ''
esc = False
while True:
ch = self.src[self.cursor]
self.cursor += 1
if ch == '\n':
raise QAPISchemaError(self,
'Missing terminating "\'"')
if esc:
string += ch
esc = False
elif ch == "\\":
esc = True
elif ch == "'":
self.val = string
return
else:
string += ch
elif self.tok == '\n':
if self.cursor == len(self.src):
self.tok = None
return
self.line += 1
self.line_pos = self.cursor
elif not self.tok.isspace():
raise QAPISchemaError(self, 'Stray "%s"' % self.tok)
def get_members(self):
expr = OrderedDict()
if self.tok == '}':
self.accept()
return expr
if self.tok != "'":
raise QAPISchemaError(self, 'Expected string or "}"')
while True:
key = self.val
self.accept()
if self.tok != ':':
raise QAPISchemaError(self, 'Expected ":"')
self.accept()
if key in expr:
raise QAPISchemaError(self, 'Duplicate key "%s"' % key)
expr[key] = self.get_expr(True)
if self.tok == '}':
self.accept()
return expr
if self.tok != ',':
raise QAPISchemaError(self, 'Expected "," or "}"')
self.accept()
if self.tok != "'":
raise QAPISchemaError(self, 'Expected string')
def get_values(self):
expr = []
if self.tok == ']':
self.accept()
return expr
if not self.tok in [ '{', '[', "'" ]:
raise QAPISchemaError(self, 'Expected "{", "[", "]" or string')
while True:
expr.append(self.get_expr(True))
if self.tok == ']':
self.accept()
return expr
if self.tok != ',':
raise QAPISchemaError(self, 'Expected "," or "]"')
self.accept()
def get_expr(self, nested):
if self.tok != '{' and not nested:
raise QAPISchemaError(self, 'Expected "{"')
if self.tok == '{':
self.accept()
expr = self.get_members()
elif self.tok == '[':
self.accept()
expr = self.get_values()
elif self.tok == "'":
expr = self.val
self.accept()
else:
raise QAPISchemaError(self, 'Expected "{", "[" or string')
return expr
def find_base_fields(base):
base_struct_define = find_struct(base)
if not base_struct_define:
return None
return base_struct_define['data']
# Return the discriminator enum define if discriminator is specified as an
# enum type, otherwise return None.
def discriminator_find_enum_define(expr):
base = expr.get('base')
discriminator = expr.get('discriminator')
if not (discriminator and base):
return None
base_fields = find_base_fields(base)
if not base_fields:
return None
discriminator_type = base_fields.get(discriminator)
if not discriminator_type:
return None
return find_enum(discriminator_type)
def check_union(expr, expr_info):
name = expr['union']
base = expr.get('base')
discriminator = expr.get('discriminator')
members = expr['data']
# If the object has a member 'base', its value must name a complex type.
if base:
base_fields = find_base_fields(base)
if not base_fields:
raise QAPIExprError(expr_info,
"Base '%s' is not a valid type"
% base)
# If the union object has no member 'discriminator', it's an
# ordinary union.
if not discriminator:
enum_define = None
# Else if the value of member 'discriminator' is {}, it's an
# anonymous union.
elif discriminator == {}:
enum_define = None
# Else, it's a flat union.
else:
# The object must have a member 'base'.
if not base:
raise QAPIExprError(expr_info,
"Flat union '%s' must have a base field"
% name)
# The value of member 'discriminator' must name a member of the
# base type.
discriminator_type = base_fields.get(discriminator)
if not discriminator_type:
raise QAPIExprError(expr_info,
"Discriminator '%s' is not a member of base "
"type '%s'"
% (discriminator, base))
enum_define = find_enum(discriminator_type)
# Do not allow string discriminator
if not enum_define:
raise QAPIExprError(expr_info,
"Discriminator '%s' must be of enumeration "
"type" % discriminator)
# Check every branch
for (key, value) in members.items():
# If this named member's value names an enum type, then all members
# of 'data' must also be members of the enum type.
if enum_define and not key in enum_define['enum_values']:
raise QAPIExprError(expr_info,
"Discriminator value '%s' is not found in "
"enum '%s'" %
(key, enum_define["enum_name"]))
# Todo: add checking for values. Key is checked as above, value can be
# also checked here, but we need more functions to handle array case.
def check_exprs(schema):
for expr_elem in schema.exprs:
expr = expr_elem['expr']
if expr.has_key('union'):
check_union(expr, expr_elem['info'])
def parse_schema(input_file):
try:
schema = QAPISchema(open(input_file, "r"))
except (QAPISchemaError, QAPIExprError), e:
print >>sys.stderr, e
exit(1)
exprs = []
for expr_elem in schema.exprs:
expr = expr_elem['expr']
if expr.has_key('enum'):
add_enum(expr['enum'], expr['data'])
elif expr.has_key('union'):
add_union(expr)
elif expr.has_key('type'):
add_struct(expr)
exprs.append(expr)
# Try again for hidden UnionKind enum
for expr_elem in schema.exprs:
expr = expr_elem['expr']
if expr.has_key('union'):
if not discriminator_find_enum_define(expr):
add_enum('%sKind' % expr['union'])
try:
check_exprs(schema)
except QAPIExprError, e:
print >>sys.stderr, e
exit(1)
return exprs
def parse_args(typeinfo):
if isinstance(typeinfo, basestring):
struct = find_struct(typeinfo)
assert struct != None
typeinfo = struct['data']
for member in typeinfo:
argname = member
argentry = typeinfo[member]
optional = False
structured = False
if member.startswith('*'):
argname = member[1:]
optional = True
if isinstance(argentry, OrderedDict):
structured = True
yield (argname, argentry, optional, structured)
def de_camel_case(name):
new_name = ''
for ch in name:
if ch.isupper() and new_name:
new_name += '_'
if ch == '-':
new_name += '_'
else:
new_name += ch.lower()
return new_name
def camel_case(name):
new_name = ''
first = True
for ch in name:
if ch in ['_', '-']:
first = True
elif first:
new_name += ch.upper()
first = False
else:
new_name += ch.lower()
return new_name
def c_var(name, protect=True):
# ANSI X3J11/88-090, 3.1.1
c89_words = set(['auto', 'break', 'case', 'char', 'const', 'continue',
'default', 'do', 'double', 'else', 'enum', 'extern', 'float',
'for', 'goto', 'if', 'int', 'long', 'register', 'return',
'short', 'signed', 'sizeof', 'static', 'struct', 'switch',
'typedef', 'union', 'unsigned', 'void', 'volatile', 'while'])
# ISO/IEC 9899:1999, 6.4.1
c99_words = set(['inline', 'restrict', '_Bool', '_Complex', '_Imaginary'])
# ISO/IEC 9899:2011, 6.4.1
c11_words = set(['_Alignas', '_Alignof', '_Atomic', '_Generic', '_Noreturn',
'_Static_assert', '_Thread_local'])
# GCC http://gcc.gnu.org/onlinedocs/gcc-4.7.1/gcc/C-Extensions.html
# excluding _.*
gcc_words = set(['asm', 'typeof'])
# C++ ISO/IEC 14882:2003 2.11
cpp_words = set(['bool', 'catch', 'class', 'const_cast', 'delete',
'dynamic_cast', 'explicit', 'false', 'friend', 'mutable',
'namespace', 'new', 'operator', 'private', 'protected',
'public', 'reinterpret_cast', 'static_cast', 'template',
'this', 'throw', 'true', 'try', 'typeid', 'typename',
'using', 'virtual', 'wchar_t',
# alternative representations
'and', 'and_eq', 'bitand', 'bitor', 'compl', 'not',
'not_eq', 'or', 'or_eq', 'xor', 'xor_eq'])
# namespace pollution:
polluted_words = set(['unix', 'errno'])
if protect and (name in c89_words | c99_words | c11_words | gcc_words | cpp_words | polluted_words):
return "q_" + name
return name.replace('-', '_').lstrip("*")
def c_fun(name, protect=True):
return c_var(name, protect).replace('.', '_')
def c_list_type(name):
return '%sList' % name
def type_name(name):
if type(name) == list:
return c_list_type(name[0])
return name
enum_types = []
struct_types = []
union_types = []
def add_struct(definition):
global struct_types
struct_types.append(definition)
def find_struct(name):
global struct_types
for struct in struct_types:
if struct['type'] == name:
return struct
return None
def add_union(definition):
global union_types
union_types.append(definition)
def find_union(name):
global union_types
for union in union_types:
if union['union'] == name:
return union
return None
def add_enum(name, enum_values = None):
global enum_types
enum_types.append({"enum_name": name, "enum_values": enum_values})
def find_enum(name):
global enum_types
for enum in enum_types:
if enum['enum_name'] == name:
return enum
return None
def is_enum(name):
return find_enum(name) != None
def c_type(name):
if name == 'str':
return 'char *'
elif name == 'int':
return 'int64_t'
elif (name == 'int8' or name == 'int16' or name == 'int32' or
name == 'int64' or name == 'uint8' or name == 'uint16' or
name == 'uint32' or name == 'uint64'):
return name + '_t'
elif name == 'size':
return 'uint64_t'
elif name == 'bool':
return 'bool'
elif name == 'number':
return 'double'
elif type(name) == list:
return '%s *' % c_list_type(name[0])
elif is_enum(name):
return name
elif name == None or len(name) == 0:
return 'void'
elif name == name.upper():
return '%sEvent *' % camel_case(name)
else:
return '%s *' % name
def genindent(count):
ret = ""
for i in range(count):
ret += " "
return ret
indent_level = 0
def push_indent(indent_amount=4):
global indent_level
indent_level += indent_amount
def pop_indent(indent_amount=4):
global indent_level
indent_level -= indent_amount
def cgen(code, **kwds):
indent = genindent(indent_level)
lines = code.split('\n')
lines = map(lambda x: indent + x, lines)
return '\n'.join(lines) % kwds + '\n'
def mcgen(code, **kwds):
return cgen('\n'.join(code.split('\n')[1:-1]), **kwds)
def basename(filename):
return filename.split("/")[-1]
def guardname(filename):
guard = basename(filename).rsplit(".", 1)[0]
for substr in [".", " ", "-"]:
guard = guard.replace(substr, "_")
return guard.upper() + '_H'
def guardstart(name):
return mcgen('''
#ifndef %(name)s
#define %(name)s
''',
name=guardname(name))
def guardend(name):
return mcgen('''
#endif /* %(name)s */
''',
name=guardname(name))
# ENUMName -> ENUM_NAME, EnumName1 -> ENUM_NAME1
# ENUM_NAME -> ENUM_NAME, ENUM_NAME1 -> ENUM_NAME1, ENUM_Name2 -> ENUM_NAME2
# ENUM24_Name -> ENUM24_NAME
def _generate_enum_string(value):
c_fun_str = c_fun(value, False)
if value.isupper():
return c_fun_str
new_name = ''
l = len(c_fun_str)
for i in range(l):
c = c_fun_str[i]
# When c is upper and no "_" appears before, do more checks
if c.isupper() and (i > 0) and c_fun_str[i - 1] != "_":
# Case 1: next string is lower
# Case 2: previous string is digit
if (i < (l - 1) and c_fun_str[i + 1].islower()) or \
c_fun_str[i - 1].isdigit():
new_name += '_'
new_name += c
return new_name.lstrip('_').upper()
def generate_enum_full_value(enum_name, enum_value):
abbrev_string = _generate_enum_string(enum_name)
value_string = _generate_enum_string(enum_value)
return "%s_%s" % (abbrev_string, value_string)
|
from audobject.core.testing import (
TestObject,
)
|
# Random Walk auf einem orthogonalen Gitter
import turtle as t
import random as r
import math
wn = t.Screen()
wn.setup(width = 800, height = 800)
wn.colormode(255)
wn.bgcolor(50, 50, 50)
wn.title("Random-Walk (1)")
SL = 3 # Schrittlänge
def random_walk(x, y):
step = r.choice(["N", "S", "E", "W"])
if step == "N":
y += SL
elif step == "S":
y -= SL
elif step == "E":
x += SL
elif step == "W":
x -= SL
else:
print("Es ist was faul im Staate Dänemark")
return(x, y)
def distance(a, b):
return(math.sqrt(a**2 + b**2))/SL
colors = ["white", "yellow", "orange", "green", "blue", "purple", "red"]
alex = t.Turtle()
alex.speed(0)
x, y = 0, 0
for i in range(5000):
if distance(x, y) < 10:
color = 0
elif distance(x, y) < 20:
color = 1
elif distance(x, y) < 30:
color = 2
elif distance(x, y) < 40:
color = 3
elif distance(x, y) < 50:
color = 4
elif distance(x, y) < 75:
color = 5
else:
color = 6
alex.pencolor(colors[color])
alex.width(2)
x, y = random_walk(x, y)
alex.goto(x, y)
if i > 100 and distance(x, y) < 2:
print(i, distance(x, y))
print(distance(x, y))
wn.mainloop()
|
# -*- coding: utf-8 -*-
# MIT License
#
# Copyright (c) 2019 Jean-François Boismenu
#
# See LICENSE at the root of this project for more info.
from emnes.nes import NES
from emnes.cartridge_reader import CartridgeReader
from emnes.cpu import CPU
from emnes.memory_bus import MemoryBus
__all__ = ["NES", "CartridgeReader", "CPU", "MemoryBus"]
|
from checkov.common.models.enums import CheckCategories, CheckResult
from checkov.kubernetes.base_spec_check import BaseK8Check
class Tiller(BaseK8Check):
def __init__(self):
name = "Ensure that Tiller (Helm v2) is not deployed"
id = "CKV_K8S_34"
# Location: container .image
supported_kind = ['containers', 'initContainers']
categories = [CheckCategories.KUBERNETES]
super().__init__(name=name, id=id, categories=categories, supported_entities=supported_kind)
def get_resource_id(self, conf):
return f'{conf["parent"]} - {conf["name"]}'
def scan_spec_conf(self, conf):
return CheckResult.FAILED if self.is_tiller(conf) else CheckResult.PASSED
@staticmethod
def is_tiller(conf):
if "image" in conf:
conf_image = conf["image"]
if isinstance(conf_image,str) and "tiller" in conf_image:
return True
if "parent_metadata" in conf:
if "labels" in conf["parent_metadata"]:
if "app" in conf["parent_metadata"]["labels"]:
if conf["parent_metadata"]["labels"]["app"] == "helm":
return True
elif "name" in conf["parent_metadata"]["labels"]:
if conf["parent_metadata"]["labels"]["name"] == "tiller":
return True
return False
check = Tiller()
|
class ClassCollection(list):
def get(self, **kwargs):
return self.klass.get(**kwargs)
|
# The MIT License (MIT)
# Copyright (c) 2022 by the xcube development team and contributors
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import abc
from typing import Optional, Dict, Union, Sequence, List
from xcube.util.assertions import assert_given, assert_false
class Node(abc.ABC):
"""An abstract XML node."""
@abc.abstractmethod
def add(self, *elements: 'Element') -> 'Node':
"""
Adds child elements to this element.
:param elements: Child elements
"""
pass
@abc.abstractmethod
def to_xml(self, indent: int = 2) -> str:
"""
Converts this node into valid XML text using UTF-8 encoding.
:param indent: Indent in spaces. Defaults to 2.
"""
pass
def __str__(self):
"""Calls to_xml() with default arguments."""
return self.to_xml()
class Element(Node):
"""
An XML element node.
:param tag: Tag name
:param attrs: Attributes
:param text: Text
:param elements: Child elements
"""
def __init__(self,
tag: str,
attrs: Optional[Dict[str, str]] = None,
text: Optional[Union[str, Sequence[str]]] = None,
elements: Optional[Sequence['Element']] = None):
assert_given(tag, name='tag')
assert_false(text and elements,
message='text and elements are mutually exclusive')
self._tag = tag
self._text = text
self._attrs = dict(attrs) if attrs else {}
self._elements = list(elements) if elements else []
def add(self, *elements: 'Element') -> 'Element':
self._elements.extend(elements)
return self
def to_xml(self, indent: int = 2) -> str:
lines = []
self._to_xml(indent, 0, lines)
return '\n'.join(lines)
def _to_xml(self, indent: int, level: int, lines: List[str]):
tab = indent * ' '
tabs = level * tab
line = f'{tabs}<{self._tag}'
attrs = self._attrs
text = self._text
elements = self._elements
if attrs:
for k, v in attrs.items():
line += f' {k}="{v}"'
if text:
if isinstance(text, str):
lines.append(f'{line}>{text}</{self._tag}>')
else:
lines.append(line + '>')
for t in text:
lines.append(f'{tabs}{tab}{t}')
lines.append(f'{tabs}</{self._tag}>')
elif elements:
lines.append(line + '>')
for node in elements:
node._to_xml(indent, level + 1, lines)
lines.append(f'{tabs}</{self._tag}>')
else:
lines.append(line + '/>')
class Document(Node):
"""
An XML document.
:param root: The only root element.
"""
def __init__(self, root: Element):
self._root = root
def add(self, *elements: 'Element') -> 'Document':
self._root.add(*elements)
return self
def to_xml(self, indent: int = 2) -> str:
xml = self._root.to_xml()
return f'<?xml version="1.0" encoding="UTF-8"?>\n{xml}\n'
|
from api.tests.ver2.test_base import TestBase
from api.ver2.utils.strings import v2_url_prefix
from api.strings import status_key, data_key, error_key, status_404, \
status_400, status_200
from api.tests.ver2.test_data.register_test_data import *
from api.tests.ver2.test_data.office_test_data import correct_office
from api.tests.ver2.test_data.party_test_data import correct_party
from api.tests.ver2.test_data.signup_test_data\
import user_with_correct_signup_data, \
user_with_correct_signup_data_2
from api.strings import status_201
class TestRegister(TestBase):
def setUp(self):
""" setup objects required for these tests """
super().setUp()
self.client.post(
v2_url_prefix + '/auth/signup',
json=user_with_correct_signup_data
) # user
self.client.post(
v2_url_prefix + '/parties',
json=correct_party, headers=self.admin_headers)
self.client.post(
v2_url_prefix + '/offices',
json=correct_office, headers=self.admin_headers)
# clear all lists after tests
def tearDown(self):
super().tearDown()
# close the db connection
def test_register(self):
res = self.client.post(
v2_url_prefix + '/office/1/register',
json=correct_candidate_infor,
headers=self.admin_headers
)
data = res.get_json()
self.assertEqual(data[status_key], status_201)
self.assertEqual(
data[data_key][0][candidate_key],
correct_candidate_infor[candidate_key])
self.assertEqual(res.status_code, status_201)
def test_user_not_found(self):
res = self.client.post(
v2_url_prefix + '/office/1/register',
json=candidate_id_unexisting_infor,
headers=self.admin_headers
)
data = res.get_json()
self.assertEqual(data[status_key], status_404)
self.assertEqual(data[error_key], 'Selected User does not exist')
self.assertEqual(res.status_code, status_404)
def test_party_not_found(self):
res = self.client.post(
v2_url_prefix + '/office/1/register',
json=party_id_unexisting_info,
headers=self.admin_headers
)
data = res.get_json()
self.assertEqual(data[status_key], status_404)
self.assertEqual(data[error_key], 'Selected Party does not exist')
self.assertEqual(res.status_code, status_404)
def test_office_not_found(self):
res = self.client.post(
v2_url_prefix + '/office/100000/register',
json=correct_candidate_infor,
headers=self.admin_headers
)
data = res.get_json()
self.assertEqual(data[status_key], status_404)
self.assertEqual(data[error_key], 'Selected Office does not exist')
self.assertEqual(res.status_code, status_404)
def test_candidate_is_registered(self):
self.client.post(
v2_url_prefix + '/office/1/register',
json=correct_candidate_infor,
headers=self.admin_headers
)
res = self.client.post(
v2_url_prefix + '/office/1/register',
json=correct_candidate_infor,
headers=self.admin_headers
)
data = res.get_json()
self.assertEqual(data[status_key], status_400)
self.assertEqual(data[error_key], 'Candidate is already registered')
self.assertEqual(res.status_code, status_400)
def test_candidates_same_party_and_office(self):
self.client.post(
v2_url_prefix + '/auth/signup',
json=user_with_correct_signup_data_2,
headers=self.admin_headers
) # another user
self.client.post(
v2_url_prefix + '/office/1/register',
json=correct_candidate_infor,
headers=self.admin_headers
)
res = self.client.post(
v2_url_prefix + '/office/1/register',
json=correct_candidate_infor_2,
headers=self.admin_headers
)
data = res.get_json()
self.assertEqual(data[status_key], status_400)
self.assertEqual(
data[error_key],
'Two candidates from the same Party cannot be vie for one office')
self.assertEqual(res.status_code, status_400)
def test_get_all_candidates(self):
self.client.post(
v2_url_prefix + '/office/1/register',
json=correct_candidate_infor,
headers=self.admin_headers
)
res = self.client.get(
v2_url_prefix + '/candidates',
headers=self.admin_headers)
data = res.get_json()
self.assertEqual(data[status_key], status_200)
self.assertEqual(len(data[data_key]), 1)
self.assertEqual(res.status_code, status_200)
def test_get_all_candidates_no_data(self):
res = self.client.get(v2_url_prefix + '/candidates',
headers=self.admin_headers)
data = res.get_json()
self.assertEqual(data[status_key], status_200)
self.assertEqual(len(data[data_key]), 0)
self.assertEqual(res.status_code, status_200)
def test_get_candidates_by_office(self):
self.client.post(
v2_url_prefix + '/office/1/register',
json=correct_candidate_infor,
headers=self.admin_headers
)
res = self.client.get(v2_url_prefix + '/office/1/register',
headers=self.admin_headers)
data = res.get_json()
self.assertEqual(data[status_key], status_200)
self.assertEqual(len(data[data_key]), 1)
self.assertEqual(data[data_key][0]['id'], 1)
self.assertEqual(res.status_code, status_200)
def test_get_single_candidate_not_found(self):
res = self.client.get(
v2_url_prefix + '/candidates/1',
headers=self.admin_headers)
data = res.get_json()
self.assertEqual(data[status_key], status_404)
self.assertEqual(data['error'], 'Candidate not found')
self.assertEqual(res.status_code, status_404)
|
from src.model.charactors import Charactor
class Aggressive(Charactor):
""" Any person or entity that can fight.
"""
def __init__(self):
super.__init__()
|
# To run this, download the BeautifulSoup zip file
# http://www.py4e.com/code3/bs4.zip
# and unzip it in the same directory as this file
from urllib.request import urlopen
from bs4 import BeautifulSoup
import ssl
import re
# Ignore SSL certificate errors
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
num_content=0
url = input('Enter - ')
html = urlopen(url, context=ctx).read()
soup = BeautifulSoup(html, "html.parser")
#print('h')
# Retrieve all of the anchor tags
tags = soup('span') #find_all('td')
#if len(tags) > 1: print('hey')
for tag in tags:
# Look at the parts of a tag
#print('TAG:', tag, len(tag)) #s= re.findall('([0-9]+)',line)
#if len(tag) == 1:
#comment = re.findall('([0-9]+)',tag)
#print(comment)
#print('URL:', tag.get('href', None))
#print('Contents:', tag.contents[0])
num_content = int(tag.contents[0]) + num_content
# print('Attrs:', tag.attrs)
print(num_content)
|
import tensorflow as tf
import tensorflow_probability as tfp
import numpy as np
def run_tf_example_A():
# EXAMPLE CODE
# https://www.tensorflow.org/probability/api_docs/python/tfp/mcmc/sample_annealed_importance_chain
tfd = tfp.distributions
# Run 100 AIS chains in parallel
num_chains = 100
dims = 20
dtype = np.float32
proposal = tfd.MultivariateNormalDiag(
loc=tf.zeros([dims], dtype=dtype))
target = tfd.TransformedDistribution(
distribution=tfd.Gamma(concentration=dtype(2),
rate=dtype(3)),
bijector=tfp.bijectors.Invert(tfp.bijectors.Exp()),
event_shape=[dims])
chains_state, ais_weights, kernels_results = (
tfp.mcmc.sample_annealed_importance_chain(
num_steps=1000,
proposal_log_prob_fn=proposal.log_prob,
target_log_prob_fn=target.log_prob,
current_state=proposal.sample(num_chains),
make_kernel_fn=lambda tlp_fn: tfp.mcmc.HamiltonianMonteCarlo(
target_log_prob_fn=tlp_fn,
step_size=0.2,
num_leapfrog_steps=2)))
log_estimated_normalizer = (tf.reduce_logsumexp(ais_weights)
- np.log(num_chains))
log_true_normalizer = tf.math.lgamma(2.) - 2. * tf.math.log(3.)
print("True", log_true_normalizer)
print("Estimated", log_estimated_normalizer)
def run_test_ising_3spin(beta=2.0, nsteps=10, nchains=100):
# EXAMPLE CODE
# N = 3 spins
# Jij = [[0, 1, 1], [1, 0, 1], [1, 1, 0]] # should behave like ferromagnet
# Z for this case is computable exactly
# Run 100 AIS chains in parallel
num_chains = nchains
dims = 1 # use p-form continuous rep. for integral
dims_N = 3
dtype = tf.float32
# fix target model
#Jij = np.array([[0.0, 1.0, 1.0], [1.0, 0.0, 1.0], [1.0, 1.0, 0.0]]) # TODO add diagonals compare
Jij = np.array([[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]]) # TODO add diagonals compare, will change ln Z by adding beta/2.0 * Tr(J)
WEIGHTS = np.array([[1.0, 1.0, 1.0]]).T
Jij_tf = tf.convert_to_tensor(Jij, dtype=dtype)
WEIGHTS_tf = tf.convert_to_tensor(WEIGHTS, dtype=dtype)
"""
def proposal_log_prob_fn(*states):
# given vector size N ints, return scalar for each chain
#fvals = [1.0] * len(states)
fvals = tf.ones(len(states), dtype=tf.float32)
return fvals
def target_log_prob_fn(*states):
# TODO 1: the state must be binary but appears as floats (when printed during the sim)
# maybe use Metropolis Hastings instead of HMC (requires continuous p(x))
# TODO 2: if the state must be continuous, maybe we switch to the p-dim hidden variable form and treat the integrand as e^S(h) and use S(h) as our log-prob
# given vector size N ints, return scalar for each chain
fvals = [0.0] * len(states)
for idx, state in enumerate(states):
print(Jij_tf)
print('BINARY?', state)
#print(tf.tensordot(Jij_tf, state, 1))
negative_energy = 0.5 * tf.tensordot(state, tf.tensordot(Jij_tf, state, 1), 1)
print(negative_energy)
fvals[idx] = beta * negative_energy
fvals = tf.convert_to_tensor(fvals, dtype=tf.float32)
return fvals
init_state = [0] * num_chains
for idx in range(num_chains):
sample_01_convention = np.random.binomial(1, 0.5, 3) # this should sample the uniform distribution on 3 spins
sample = sample_01_convention * 2 - 1
init_state[idx] = tf.convert_to_tensor(sample, dtype=dtype)
"""
tfd = tfp.distributions
proposal = tfd.MultivariateNormalDiag(loc=tf.zeros([dims], dtype=dtype))
proposal_log_prob_fn = proposal.log_prob
target_log_prob_const = dims_N * tf.math.log( 2.0 ) - (dims / 2.0) * tf.math.log( 2.0 * np.pi / beta)
print("target_log_prob_const", target_log_prob_const)
def target_log_prob_fn(hidden_states):
# TODO 1: the state must be binary but appears as floats (when printed during the sim)
# maybe use Metropolis Hastings instead of HMC (requires continuous p(x))
# TODO 2: if the state must be continuous, maybe we switch to the p-dim hidden variable form and treat the integrand as e^S(h) and use S(h) as our log-prob
# given vector size N ints, return scalar for each chain
fvals = [0.0] * len(hidden_states)
# TODO tensor speedup test with p > 1
for idx, hidden in enumerate(hidden_states):
term1 = tf.tensordot(hidden, hidden, 1)
cosh_arg = beta * tf.tensordot(WEIGHTS_tf, hidden, 1)
log_cosh_vec = tf.math.log( tf.math.cosh(cosh_arg) )
term2 = tf.math.reduce_sum(log_cosh_vec)
fvals[idx] = - (beta / 2.0) * term1 + term2
fvals = tf.convert_to_tensor(fvals, dtype=dtype) + target_log_prob_const
return fvals
# draw 100 samples from the proposal distribution
init_state = proposal.sample(num_chains)
#print(type(init_state))
#print(init_state)
#print('.........')
chains_state, ais_weights, kernels_results = (
tfp.mcmc.sample_annealed_importance_chain(
num_steps=nsteps,
proposal_log_prob_fn=proposal_log_prob_fn,
target_log_prob_fn=target_log_prob_fn,
current_state=init_state,
make_kernel_fn=lambda tlp_fn: tfp.mcmc.HamiltonianMonteCarlo(
target_log_prob_fn=tlp_fn,
step_size=0.2,
num_leapfrog_steps=2)))
log_estimated_normalizer = (tf.reduce_logsumexp(ais_weights)
- np.log(num_chains))
# compute true analytically
states = [np.array([-1, -1, -1]),
np.array([-1, -1, 1]),
np.array([-1, 1, -1]),
np.array([-1, 1, 1]),
np.array([ 1, -1, -1]),
np.array([ 1, -1, 1]),
np.array([ 1, 1, -1]),
np.array([ 1, 1, 1])]
beta = beta # TODO care
boltz_factors = [np.exp(0.5 * beta * np.dot(s.T, np.dot(Jij, s))) for s in states]
Z = np.sum(boltz_factors)
log_true_normalizer = np.log(Z)
print("True", log_true_normalizer)
print("Estimated", log_estimated_normalizer)
return log_estimated_normalizer
if __name__ == '__main__':
#print("Running example A...")
#run_tf_example_A()
print("Running example B...")
#log_estimated_normalizer = run_test_ising_3spin()
nn = 10
runs = [0] * nn
for idx in range(nn):
runs[idx] = run_test_ising_3spin(beta=2.0)
print(runs)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from roca.detect import RocaFingerprinter, flatten, drop_none, AutoJSONEncoder
import random
import base64
import unittest
import pkg_resources
__author__ = 'dusanklinec'
class FprintTest(unittest.TestCase):
"""Simple Fingerprint tests"""
def __init__(self, *args, **kwargs):
super(FprintTest, self).__init__(*args, **kwargs)
self.inputs = []
def setUp(self):
"""
Loads testing certs
:return:
"""
fls = pkg_resources.resource_listdir(__name__, 'data')
fls = [x for x in fls if
x.endswith('.pem') or
x.endswith('.txt') or
x.endswith('.pub') or
x.endswith('.pgp') or
x.endswith('.p7s')]
for fname in fls:
self.inputs.append((fname, self._get_res(fname)))
def tearDown(self):
"""
Cleanup
:return:
"""
def _get_res(self, name):
"""
Loads resource
:param name:
:return:
"""
resource_package = __name__
resource_path = '/'.join(('data', name))
return pkg_resources.resource_string(resource_package, resource_path)
def test_fprint(self):
"""
Test fingerprints
:return:
"""
positive_samples = ['mod01.txt', 'mod02.txt', 'mod03.txt', 'mod08.txt', 'mod09.txt', 'key04.pgp',
'cert04.pem', 'cert05.pem']
self.assertGreaterEqual(len(self.inputs), 19, 'Some inputs are missing')
fprinter = RocaFingerprinter()
for fname, data in self.inputs:
ret = drop_none(flatten(fprinter.process_file(data, fname)))
self.assertGreaterEqual(len(ret), 1, 'At least one result expected')
if fname.endswith('.txt'):
self.assertEqual(len(ret), 1, 'Hex mod input epxected result count is 1, not %s' % len(ret))
self.assertEqual('mod-hex', ret[0].type, 'File type detection failed')
for sub in ret:
self.assertIsNone(sub.error, 'Unexpected error with file %s : %s' % (fname, sub.error))
self.assertEqual(fname, sub.fname, 'Filename mismatch')
self.assertIsNotNone(sub.n, 'Modulus is empty')
self.assertGreaterEqual(len(sub.n), 10, 'Modulus is too short')
if fname in positive_samples:
self.assertTrue(sub.marked, 'False negative detection on fingerprinted modulus: %s' % fname)
else:
self.assertFalse(sub.marked, 'False positive detection on non-fingerprinted modulus %s' % fname)
if __name__ == "__main__":
unittest.main() # pragma: no cover
|
from transitions.extensions import GraphMachine
from telegram import (ReplyKeyboardMarkup, ReplyKeyboardRemove)
class TocMachine(GraphMachine):
def __init__(self, **machine_configs):
self.machine = GraphMachine(
model = self,
**machine_configs
)
#print(self.machine.states)
self.states = self.machine.states
self.models = self.machine.models
self.topic = 'null'
self.num = 0
self.imgid=''
def going_to_test(self,update):
text = update.message.text
return text.lower() == 'test'
def on_enter_test(self,update):
update.message.reply_text('Okay, thanks')
self.go_back(update)
def is_going_to_getimage(self, update):
text = update.message.text
return text.lower() == 'guide dog'
def is_going_to_image_verify(self, update):
text = update.message.text
return text.lower() == 'yes' or text.lower() == 'no'
def on_enter_image_verify(self, update):
text = update.message.text
if text.lower()=='yes':
update.message.reply_text('Okay, thanks')
self.go_back(update)
else:
update.message.reply_text('Please tell me what you see, or what else did you see?')
#self.advance(update)
def on_enter_image_end(self, update):
text = update.message.text
if text.lower()!='':
self.go_back(update)
def is_going_to_news(self, update):
text = update.message.text
return text.lower() == 'instant news'
def is_going_to_state1(self, update):
text = update.message.text
self.topic = 'w'
return text.lower() == 'world'
def is_going_to_state2(self, update):
text = update.message.text
self.topic = 's'
return text.lower() == 'sports'
def is_going_to_state3(self, update):
text = update.message.text
self.topic = 'e'
return text.lower() == 'entertainment'
def is_going_to_search(self, update):
text = update.message.text
self.topic = 'q'
return text.lower() == 'search'
def is_going_to_lang_sel_EN(self, update):
text = update.message.text
self.lang = 'us'
return text.lower() == 'english'
def is_going_to_lang_sel_TW(self, update):
text = update.message.text
self.lang = 'tw'
return text.lower() == 'chinese'
def is_going_to_numOfPost(self, update):
text = update.message.text
#num = 0
try:
self.num = int(text)
return True
except ValueError:
return False
def is_going_to_fortune(self, update):
text = update.message.text
return text.lower() == 'inspire my day'
def is_going_to_fortune_lang(self, update):
text = update.message.text
if text.lower()=='english':
self.lang='en'
else:
self.lang='tw'
return text.lower() == 'english' or text.lower() == 'chinese'
def is_going_to_fortune_ACC(self, update):
text = update.message.text
return text.lower() == 'yes' or text.lower() == 'no'
def on_enter_fortune_ACC(self, update):
text = update.message.text
if text.lower()=='yes':
self.go_back(update)
else:
self.loop_back(update)
def on_enter_user(self, update):
user_name = update.message.chat.first_name +' '+ update.message.chat.last_name
text = "Hi! " + user_name +" I'm bot!!\nWhat would you like to see?"
reply_keyboard = [['Instant News'], ['Image Talks'], ['Fortune']]
#update.message.reply_text(text, reply_markup=ReplyKeyboardMarkup(reply_keyboard, one_time_keyboard=True))
def on_enter_state1(self, update):
self.query = False
update.message.reply_text("World news Selected")
#handle_language(update)
#reply_keyboard = [['English', 'Chinese']]
#text = 'What language would you prefer?'
#update.message.reply_text(text, reply_markup=ReplyKeyboardMarkup(reply_keyboard, one_time_keyboard=True))
#self.go_back(update)
def on_enter_state2(self, update):
self.query = False
update.message.reply_text("Sports selected")
#handle_language(update)
#self.go_back(update)
def on_enter_state3(self, update):
self.query = False
update.message.reply_text("Entertainment Selected")
#handle_language(update)
#self.go_back(update)
def on_enter_search_key(self, update):
self.query = True
self.topic = update.message.text
def on_enter_news_end(self, update):
self.go_back(update)
def on_exit_state1(self, update):
print('Leaving state1')
def on_exit_state2(self, update):
print('Leaving state2')
def on_exit_state3(self, update):
print('Leaving state3')
def handle_language(self, update):
reply_keyboard = [['English', 'Chinese']]
text = 'What language would you prefer?'
update.message.reply_text(text, reply_markup=ReplyKeyboardMarkup(reply_keyboard, one_time_keyboard=True))
|
from itertools import chain
import logging
import sys
from pyspark.sql import functions as f
from pyspark.sql.session import SparkSession
from pyspark.sql.window import Window
from pyspark.sql.types import ArrayType, StringType
spark = SparkSession.builder.getOrCreate()
spark.conf.set("spark.sql.execution.arrow.enabled", "true")
logger = logging.getLogger(__name__)
log_handler = logging.StreamHandler(sys.stdout)
log_handler.setFormatter(logging.Formatter(
"%(asctime)s - %(name)s - %(levelname)s - %(message)s"))
log_handler.setLevel(logging.DEBUG)
logger.addHandler(log_handler)
logger.setLevel(logging.DEBUG)
input_path = '/data/measures/'
output_path = '/data/data_science/master_file/'
def load_datasets():
hba1c = spark.read.parquet(input_path + 'hba1c/hba1c_results_*.parquet')
unplanned = spark.read.parquet(input_path + 'unplanned_admission/unplanned_admission_results_*.parquet')
raf_hcc = spark.read.parquet('/data/data_science/raf/NW_diag_HCC_raf_new_V22.parquet')
pcw = spark.read.parquet(input_path + 'pcw/results.parquet')
pcp20 = spark.read.parquet('/data/attribution/attribution_2020.parquet')
return hba1c, unplanned, raf_hcc, pcw, pcp20
def write_output(df):
logger.info("CREATING MASTER DATASET")
logger.info("WRITING: {}".format(output_path + "master_v2.parquet"))
df.write.mode('overwrite').parquet(output_path + 'master_v2.parquet')
return df
def main():
hba1c, unplanned, raf_hcc, pcw, pcp20 = load_datasets()
# source_year_expr = f.substring(f.col('source_file'), 72, 4)
# print(master.show(3, truncate=False))
hba1c = hba1c.withColumnRenamed('numerator', 'numerator_hba1c')
unplanned = unplanned.withColumnRenamed('numerator','numerator_unp')
pcw = pcw.withColumnRenamed('numerator','numerator_pcw')
pcw = pcw.select('member_id', 'numerator_pcw')
hba1c = hba1c.select('member_id', 'numerator_hba1c')
unplanned = unplanned.select('member_id', 'numerator_unp')
chbp20 = chbp20.select('member_id', 'numerator_chbp')
pcp20 = pcp20.select('member_id', 'provider_npi', 'provider_specialty', 'count_of_visits', 'latest_visit_date')
raf_hcc = raf_hcc.select('BENE_MBI_ID', 'BENE_AGE', 'BENE_SEX_CD', 'concat_elig', 'oerc', 'source_year', 'claim_year', 'hcc_lst', 'risk_score')
df = df.fillna(0)
df = df.withColumn('medicaid_flag', df['medicaid_flag'].cast('integer'))
df = df.withColumn('outcome', df['outcome'].cast(DoubleType()))
feature_cols = [col for col in df.columns if col not in ['member_id', 'outcome']]
remove_feature_cols = []
feature_cols = list(set(feature_cols) - set(remove_feature_cols))
'''
##########################################################################
Before SMOTE model
##########################################################################
'''
print('\n \n')
print('=============================================================== \n')
print('Before SMOTE Model results ')
print('=============================================================== \n')
# train, test = df.randomSplit([0.8, 0.2], seed=12345)
dataset_size = float(df.select("outcome").count())
numPositives = df.select("outcome").where('outcome == 1').count()
per_ones = (float(numPositives) / float(dataset_size)) * 100
print('The number of ones are {}'.format(numPositives))
print('Percentage of ones are {}'.format(per_ones))
bucketizer = Bucketizer(splits=[15, 30, 38, 55], inputCol='age', outputCol='age_groups')
df_buck = bucketizer.setHandleInvalid('keep').transform(df)
df_buck = df_buck.withColumn('age_group_31-38', f.when(f.col('age_groups') == 1.0, f.lit(1)).otherwise(f.lit(0)))
df_buck = df_buck.withColumn('age_group_38-55', f.when(f.col('age_groups') == 2.0, f.lit(1)).otherwise(f.lit(0)))
binarizer = Binarizer(threshold=0.5, inputCol='outcome', outputCol='label')
binarizedDF = binarizer.transform(df_buck)
binarizedDF = binarizedDF.drop('outcome', 'age', 'age_groups')
feature_cols1 = [col for col in binarizedDF.columns if col not in ['member_id', 'label']]
assembler = VectorAssembler(inputCols=feature_cols1, outputCol='features')
assembled = assembler.transform(binarizedDF)
print(assembled.describe().show(vertical=True))
(trainData, testData) = assembled.randomSplit([0.75, 0.25], seed=42)
print('Distribution of Ones and Zeros in trainData is: ', trainData.groupBy('label').count().take(3))
lr = LogisticRegression(labelCol='label', featuresCol='features', maxIter=100)
lrModel = lr.fit(trainData)
print("Intercept: " + str(lrModel.intercept))
modelcoefficients = np.array(lrModel.coefficients)
names = [x['name']
for x in sorted(trainData.schema["features"]
.metadata["ml_attr"]["attrs"]['numeric'], key=lambda x: x['idx'])]
matchcoefs = np.column_stack((modelcoefficients, np.array(names)))
matchcoefsdf = pd.DataFrame(matchcoefs)
matchcoefsdf.columns = ['Coefvalue', 'Feature']
print(matchcoefsdf)
predictions = lrModel.transform(testData)
results = predictions.select('probability', 'prediction', 'label')
print(results.show(10, truncate=False))
evaluator = BinaryClassificationEvaluator()
print('Test Data Area under ROC score is : ',
evaluator.evaluate(predictions))
accuracy = predictions.filter(
predictions.label == predictions.prediction).count() / float(predictions.count())
print('Accuracy : ', accuracy)
# compute TN, TP, FN, and FP
print(predictions.groupBy('label', 'prediction').count().show())
# Calculate the elements of the confusion matrix
TN = predictions.filter('prediction = 0 AND label = prediction').count()
TP = predictions.filter('prediction = 1 AND label = prediction').count()
FN = predictions.filter('prediction = 0 AND label <> prediction').count()
FP = predictions.filter('prediction = 1 AND label <> prediction').count()
# calculate accuracy, precision, recall, and F1-score
accuracy = (TN + TP) / (TN + TP + FN + FP + 1)
precision = TP / (TP + FP + 1)
recall = TP / (TP + FN + 1)
F = 2 * (precision * recall) / (precision + recall + 1)
print('n precision: %0.3f' % precision)
print('n recall: %0.3f' % recall)
print('n accuracy: %0.3f' % accuracy)
print('n F1 score: %0.3f' % F)
print('\n \n')
write_output(master)
if __name__ == "__main__":
logger.info('START')
main()
logger.info('END')
|
from __future__ import absolute_import
import sys
import os
from datetime import date
from utilities.TestClass import TestClass
import swagger_client
from swagger_client.api.measure_evaluation_api import MeasureEvaluationApi
from swagger_client.rest import ApiException
# Set global variables in the file that can be accessed by all tests within the class
reqdata_path = "/bzt-configs/tests/src/main/resources/config/"
measurezip_path = "/bzt-configs/tests/src/main/resources/cql/measure-zip/"
class MeasureEvaluationAPITests(TestClass):
def test_evaluateMeasureWithSNOMEDCodes(self):
measureEvaluationApi = MeasureEvaluationApi(swagger_client.ApiClient(self.configuration))
version = date.today()
request_data = reqdata_path + "over60ColonoscopyFromCode-request-data.json"
measure = measurezip_path + "over60_and_had_colonoscopy_from_code_v1_1_1.zip"
resp = measureEvaluationApi.evaluate_measure(version, request_data, measure, _preload_content=False)
response = resp.read()
response = response.decode('utf-8')
print("response is : " + str(response))
status = resp.status
print("status from response is : " + str(status))
assert '200' in str(status), 'Should contain 200 and Measure report returned.'
expectedResp = '{"resourceType":"MeasureReport","extension":[{"url":"http://ibm.com/fhir/cdm/StructureDefinition/measure-parameter-value","valueParameterDefinition":{"extension":[{"url":"http://ibm.com/fhir/cdm/StructureDefinition/parameter-value","valuePeriod":{"start":"2019-03-18T00:00:00.000+00:00","end":"2020-09-18T23:59:59.999+00:00"}}],"name":"Measurement Period","use":"in","type":"Period"}},{"url":"http://ibm.com/fhir/cdm/StructureDefinition/measure-parameter-value","valueParameterDefinition":{"extension":[{"url":"http://ibm.com/fhir/cdm/StructureDefinition/parameter-value","valueString":"ProductLine"}],"name":"Product Line","use":"in","type":"string"}}],"status":"complete","type":"individual","measure":"Measure/Over60AndHadColonscopyFromCode-1.1.1","subject":{"reference":"Patient/00ce7acb-5daa-3509-2e9f-211976bc70e1"},"period":{"start":"2019-03-18T00:00:00.000+00:00","end":"2020-09-18T23:59:59.999+00:00"},"group":[{"population":[{"code":{"coding":[{"system":"http://terminology.hl7.org/CodeSystem/measure-population","code":"initial-population"}]},"count":1},{"code":{"coding":[{"system":"http://terminology.hl7.org/CodeSystem/measure-population","code":"numerator"}]},"count":1},{"code":{"coding":[{"system":"http://terminology.hl7.org/CodeSystem/measure-population","code":"denominator"}]},"count":1}],"measureScore":{"value":1.0}}]}'
assert expectedResp in response, 'Response should contain ' + expectedResp
def test_evaluateMeasureWithValueSets(self):
measureEvaluationApi = MeasureEvaluationApi(swagger_client.ApiClient(self.configuration))
version = date.today()
request_data = reqdata_path + "over60ColonoscopyFromVS-request-data.json"
measure = measurezip_path + "over60_and_had_colonoscopy_from_vs_v1_1_1.zip"
resp = measureEvaluationApi.evaluate_measure(version, request_data, measure, _preload_content=False)
response = resp.read()
response = response.decode('utf-8')
print("response is : " + str(response))
status = resp.status
print("status from response is : " + str(status))
assert '200' in str(status), 'Should contain 200 and Measure report returned'
expectedResp = '{"resourceType":"MeasureReport","extension":[{"url":"http://ibm.com/fhir/cdm/StructureDefinition/measure-parameter-value","valueParameterDefinition":{"extension":[{"url":"http://ibm.com/fhir/cdm/StructureDefinition/parameter-value","valuePeriod":{"start":"2019-03-18T00:00:00.000+00:00","end":"2020-09-18T23:59:59.999+00:00"}}],"name":"Measurement Period","use":"in","type":"Period"}},{"url":"http://ibm.com/fhir/cdm/StructureDefinition/measure-parameter-value","valueParameterDefinition":{"extension":[{"url":"http://ibm.com/fhir/cdm/StructureDefinition/parameter-value","valueString":"ProductLine"}],"name":"Product Line","use":"in","type":"string"}}],"status":"complete","type":"individual","measure":"Measure/Over60andHadColonoscopyFromVS-1.1.1","subject":{"reference":"Patient/00ce7acb-5daa-3509-2e9f-211976bc70e1"},"period":{"start":"2019-03-18T00:00:00.000+00:00","end":"2020-09-18T23:59:59.999+00:00"},"group":[{"population":[{"code":{"coding":[{"system":"http://terminology.hl7.org/CodeSystem/measure-population","code":"initial-population"}]},"count":1},{"code":{"coding":[{"system":"http://terminology.hl7.org/CodeSystem/measure-population","code":"numerator"}]},"count":1},{"code":{"coding":[{"system":"http://terminology.hl7.org/CodeSystem/measure-population","code":"denominator"}]},"count":1}],"measureScore":{"value":1.0}}]}'
assert expectedResp in response, 'Response should contain ' + expectedResp
def test_evaluateMeasureWithMissingValueSetOnTenant(self):
measureEvaluationApi = MeasureEvaluationApi(swagger_client.ApiClient(self.configuration))
version = date.today()
request_data = reqdata_path + "colColoRectalCancerScreening-request-data.json"
measure = measurezip_path + "col_colorectal_cancer_screening_1.0.0.zip"
try:
resp = measureEvaluationApi.evaluate_measure(version, request_data, measure, _preload_content=False)
result = resp.read()
except ApiException as e:
print("Exception when calling MeasureEvaluationApi->evaluate_measure: %s\n" % e)
result = str(e)
print("Exception in test_evaluateMeasureWithMissingValueSetOnTenant is : " + result)
assert '400' in result, 'Should contain 400 error due to IllegalArgumentException caught due to unresolved value set reference.'
def test_evaluateMeasureWithInvalidPatientIdValue(self):
measureEvaluationApi = MeasureEvaluationApi(swagger_client.ApiClient(self.configuration))
version = date.today()
request_data = reqdata_path + "over60ColonoscopyFromVS-invalid-patientId-request-data.json"
measure = measurezip_path + "over60_and_had_colonoscopy_from_vs_v1_1_1.zip"
try:
resp = measureEvaluationApi.evaluate_measure(version, request_data, measure, _preload_content=False)
result = resp.read()
except ApiException as e:
print("Exception when calling MeasureEvaluationApi->evaluate_measure: %s\n" % e)
result = str(e)
print("Exception in test_evaluateMeasureWithInvalidPatientIdValue is : " + result)
assert '400' in result, 'Should contain 400 error due to exception communicating with FHIR server as specified PatientId not found on server.'
def test_evaluateMeasureWithInvalidMeasureIdValue(self):
measureEvaluationApi = MeasureEvaluationApi(swagger_client.ApiClient(self.configuration))
version = date.today()
request_data = reqdata_path + "over60ColonoscopyFromVS-invalid-measureId-request-data.json"
measure = measurezip_path + "over60_and_had_colonoscopy_from_vs_v1_1_1.zip"
try:
resp = measureEvaluationApi.evaluate_measure(version, request_data, measure, _preload_content=False)
result = resp.read()
except ApiException as e:
print("Exception when calling MeasureEvaluationApi->evaluate_measure: %s\n" % e)
result = str(e)
print("Exception in test_evaluateMeasureWithInvalidMeasureIdValue is : " + result)
assert '400' in result, 'Should contain 400 error with message stating: Failed to resolve the specified Measure resource.'
def test_evaluateMeasureWithIncorrectMeasureFileFormat(self):
measureEvaluationApi = MeasureEvaluationApi(swagger_client.ApiClient(self.configuration))
version = date.today()
request_data = reqdata_path + "over60ColonoscopyFromVS-request-data.json"
measure = measurezip_path + "over60_and_had_colonoscopy_from_vs_v1_1_1.json"
try:
resp = measureEvaluationApi.evaluate_measure(version, request_data, measure, _preload_content=False)
result = resp.read()
except ApiException as e:
print("Exception when calling MeasureEvaluationApi->evaluate_measure: %s\n" % e)
result = str(e)
print("Exception in test_evaluateMeasureWithIncorrectMeasureFileFormat is : " + result)
assert '400' in result, 'Should contain 400 error with message stating: Failed to resolve the specified Measure resource.'
def test_evaluateMeasureWithMissingDependenciesZipFile(self):
measureEvaluationApi = MeasureEvaluationApi(swagger_client.ApiClient(self.configuration))
version = date.today()
request_data = reqdata_path + "over60ColonoscopyFromVS-request-data.json"
measure = measurezip_path + "over60_and_had_colonoscopy_from_vs_v1_1_1_incomplete.zip"
try:
resp = measureEvaluationApi.evaluate_measure(version, request_data, measure, _preload_content=False)
result = resp.read()
except ApiException as e:
print("Exception when calling MeasureEvaluationApi->evaluate_measure: %s\n" % e)
result = str(e)
print("Exception in test_evaluateMeasureWithMissingDependenciesZipFile is : " + result)
assert '400' in result, 'Should contain 400 error due to unexpected exception caught as a required library resource was not found.'
def test_evaluateMeasureWithWrongFHIREndpointPort(self):
measureEvaluationApi = MeasureEvaluationApi(swagger_client.ApiClient(self.configuration))
version = date.today()
request_data = reqdata_path + "over60ColonoscopyFromVS-wrong-fhirendpoint-port-request-data.json"
measure = measurezip_path + "over60_and_had_colonoscopy_from_vs_v1_1_1.zip"
try:
resp = measureEvaluationApi.evaluate_measure(version, request_data, measure, _preload_content=False)
result = resp.read()
except ApiException as e:
print("Exception when calling MeasureEvaluationApi->evaluate_measure: %s\n" % e)
result = str(e)
print("Exception in test_evaluateMeasureWithWrongFHIREndpointPort is : " + result)
assert '500' in result, 'Should contain 500 Error message stating: Connect to fhir-internal.dev.svc:9444 failed.'
def test_evaluateMeasureWithWrongFHIRUserPassword(self):
measureEvaluationApi = MeasureEvaluationApi(swagger_client.ApiClient(self.configuration))
version = date.today()
request_data = reqdata_path + "over60ColonoscopyFromVS-wrong-fhiruser-password-request-data.json"
measure = measurezip_path + "over60_and_had_colonoscopy_from_vs_v1_1_1.zip"
try:
resp = measureEvaluationApi.evaluate_measure(version, request_data, measure, _preload_content=False)
result = resp.read()
except ApiException as e:
print("Exception when calling MeasureEvaluationApi->evaluate_measure: %s\n" % e)
result = str(e)
print("Exception in test_evaluateMeasureWithWrongFHIRUserPassword is : " + result)
assert '400' in result, 'Should contain 400 error with message stating: HTTP 401 Unauthorized.'
def test_evaluateMeasureWithInvalidTenantIdValue(self):
measureEvaluationApi = MeasureEvaluationApi(swagger_client.ApiClient(self.configuration))
version = date.today()
request_data = reqdata_path + "over60ColonoscopyFromVS-invalid-tenantId-request-data.json"
measure = measurezip_path + "over60_and_had_colonoscopy_from_vs_v1_1_1.zip"
try:
resp = measureEvaluationApi.evaluate_measure(version, request_data, measure, _preload_content=False)
result = resp.read()
except ApiException as e:
print("Exception when calling MeasureEvaluationApi->evaluate_measure: %s\n" % e)
result = str(e)
print("Exception in test_evaluateMeasureWithInvalidTenantIdValue is : " + result)
assert '400' in result, 'Should contain 400 error with message stating: FHIRPersistenceException: Unexpected exception while creating JDBC persistence layer.'
def test_evaluateMeasureWithInvalidAPIVersion1(self):
measureEvaluationApi = MeasureEvaluationApi(swagger_client.ApiClient(self.configuration))
version = '2021/04/24'
request_data = reqdata_path + "over60ColonoscopyFromVS-request-data.json"
measure = measurezip_path + "over60_and_had_colonoscopy_from_vs_v1_1_1.zip"
try:
resp = measureEvaluationApi.evaluate_measure(version, request_data, measure, _preload_content=False)
result = resp.read()
except ApiException as e:
print("Exception when calling MeasureEvaluationApi->evaluate_measure: %s\n" % e)
result = str(e)
print("Exception in test_evaluateMeasureWithInvalidAPIVersion1 is : " + result)
assert '400' in result, 'Should contain 400 error with message stating: Invalid version parameter value.'
def test_evaluateMeasureWithInvalidAPIVersion2(self):
measureEvaluationApi = MeasureEvaluationApi(swagger_client.ApiClient(self.configuration))
version = '04-24-2021'
request_data = reqdata_path + "over60ColonoscopyFromVS-request-data.json"
measure = measurezip_path + "over60_and_had_colonoscopy_from_vs_v1_1_1.zip"
try:
resp = measureEvaluationApi.evaluate_measure(version, request_data, measure, _preload_content=False)
result = resp.read()
except ApiException as e:
print("Exception when calling MeasureEvaluationApi->evaluate_measure: %s\n" % e)
result = str(e)
print("Exception in test_evaluateMeasureWithInvalidAPIVersion2 is : " + result)
assert '400' in result, 'Should contain 400 error with message stating: Invalid version parameter value.'
|
import csv
import pandas as pd
from scipy.stats import pearsonr
label = {}
with open('dataset/label-result.csv', 'r') as file:
i = 0
for row in file:
if i == 0:
i += 1
continue
row = row.split(',')
last = row[len(row)-1].split('\n')
rest = ','.join(row[1:])
rest = rest.split('\n')[0]
label[row[0]] = rest
emotion = {}
header = ''
with open('dataset/emosent-result.csv', 'r') as file:
i = 0
for row in file:
if i == 0:
header = row.split('\n')[0]
header += ',Following,Followers,Retweet,Favorite,Extraversion,Agreeableness,Conscientiousness,Neuroticism,Openness,Label\n'
i += 1
continue
row = row.split(',')
last = row[len(row)-1].split('\n')
row[len(row)-1] = last[0]
emotion[row[0]] = row[1:]
count = 0
socials = dict()
with open('dataset/crawl-social.csv', 'r') as social:
for target_list in social:
if count == 0:
count += 1
continue
temp = target_list.split(',')
temp[len(temp) - 1] = temp[len(temp) - 1].split('\n')[0]
socials[temp[0]] = {
'Following': temp[1],
'Followers': temp[2],
'Retweet': temp[3],
'Favorite': temp[4]
}
matches = {}
count = 0
with open('dataset/pearson-data.csv', 'w') as file:
file.write(header)
for target_list in emotion:
if target_list in label:
if target_list in socials:
emotion[target_list].append(label[target_list])
emotion[target_list].insert(0, target_list)
emotion[target_list][len(emotion[target_list])-1] = socials[target_list]['Following'] + ',' + socials[target_list]['Followers'] + ',' + socials[target_list]['Retweet'] + ',' + socials[target_list]['Favorite'] + ',' + emotion[target_list][len(emotion[target_list])-1] + '\n'
file.write(','.join(emotion[target_list]))
count += 1
print("\n- writing to 'dataset/pearson-data.csv' complete.")
# ==========================================================
df = pd.read_csv('dataset/pearson-data.csv')
list1 = df['Trust']
list2 = df['Neuroticism']
corr, _ = pearsonr(list1, list2)
print('\nPearsons correlation: %.3f' % corr)
|
# -*- coding: utf-8 -*-
from unittest.mock import MagicMock, patch
from chaoslib.exceptions import FailedActivity
from kubernetes import client, config
import pytest
from chaosgce.nodepool.actions import create_new_nodepool, delete_nodepool, \
swap_nodepool
import fixtures
@patch('chaosgce.nodepool.actions.wait_on_operation', autospec=False)
@patch('chaosgce.build', autospec=True)
@patch('chaosgce.Credentials', autospec=True)
def test_create_nodepool(Credentials, service_builder, wait_on_operation):
project_id = fixtures.configuration["gce_project_id"]
cluster_name = fixtures.configuration["gce_cluster_name"]
zone = fixtures.configuration["gce_zone"]
Credentials.from_service_account_file.return_value = MagicMock()
service = MagicMock()
service_builder.return_value = service
nodepool_svc = MagicMock()
service.projects().zones().clusters().nodePools.return_value = nodepool_svc
create_np = MagicMock()
nodepool_svc.create = create_np
create_np.return_value.execute.return_value = {
"name": "mynodepool"
}
ops_svc = MagicMock()
service.projects().zones().operations.return_value = ops_svc
response = create_new_nodepool(
body=fixtures.nodepool.body,
secrets=fixtures.secrets,
configuration=fixtures.configuration
)
create_np.assert_called_with(
projectId=project_id, zone=zone, clusterId=cluster_name,
body=fixtures.nodepool.body)
wait_on_operation.assert_called_with(ops_svc,
project_id=fixtures.configuration["gce_project_id"],
zone=fixtures.configuration["gce_zone"], operation_id="mynodepool")
@patch('chaosgce.nodepool.actions.wait_on_operation', autospec=False)
@patch('chaosgce.build', autospec=True)
@patch('chaosgce.Credentials', autospec=True)
def test_delete_nodepool(Credentials, service_builder, wait_on_operation):
project_id = fixtures.configuration["gce_project_id"]
cluster_name = fixtures.configuration["gce_cluster_name"]
zone = fixtures.configuration["gce_zone"]
Credentials.from_service_account_file.return_value = MagicMock()
service = MagicMock()
service_builder.return_value = service
nodepool_svc = MagicMock()
service.projects().zones().clusters().nodePools.return_value = nodepool_svc
delete_np = MagicMock()
nodepool_svc.delete = delete_np
delete_np.return_value.execute.return_value = {
"name": "mynodepool"
}
ops_svc = MagicMock()
service.projects().zones().operations.return_value = ops_svc
response = delete_nodepool(
node_pool_id="mynodepool",
secrets=fixtures.secrets,
configuration=fixtures.configuration
)
delete_np.assert_called_with(
projectId=project_id, zone=zone, clusterId=cluster_name,
nodePoolId="mynodepool")
wait_on_operation.assert_called_with(ops_svc,
project_id=fixtures.configuration["gce_project_id"],
zone=fixtures.configuration["gce_zone"], operation_id="mynodepool")
@patch('chaosgce.nodepool.actions.drain_nodes', autospec=False)
@patch('chaosgce.nodepool.actions.wait_on_operation', autospec=False)
@patch('chaosgce.build', autospec=True)
@patch('chaosgce.Credentials', autospec=True)
def test_swap_nodepool(Credentials, service_builder, wait_on_operation,
drain_nodes):
project_id = fixtures.configuration["gce_project_id"]
cluster_name = fixtures.configuration["gce_cluster_name"]
zone = fixtures.configuration["gce_zone"]
Credentials.from_service_account_file.return_value = MagicMock()
service = MagicMock()
service_builder.return_value = service
nodepool_svc = MagicMock()
service.projects().zones().clusters().nodePools.return_value = nodepool_svc
create_np = MagicMock()
nodepool_svc.create = create_np
create_np.return_value.execute.return_value = {
"name": "default-pool"
}
delete_np = MagicMock()
nodepool_svc.delete = delete_np
delete_np.return_value.execute.return_value = {
"name": "mynodepool"
}
ops_svc = MagicMock()
service.projects().zones().operations.return_value = ops_svc
response = swap_nodepool(
old_node_pool_id="mynodepool",
new_nodepool_body=fixtures.nodepool.body,
delete_old_node_pool=True,
secrets=fixtures.secrets,
configuration=fixtures.configuration
)
create_np.assert_called_with(
projectId=project_id, zone=zone, clusterId=cluster_name,
body=fixtures.nodepool.body)
delete_np.assert_called_with(
projectId=project_id, zone=zone, clusterId=cluster_name,
nodePoolId="mynodepool")
wait_on_operation.assert_called_with(ops_svc,
project_id=fixtures.configuration["gce_project_id"],
zone=fixtures.configuration["gce_zone"], operation_id="mynodepool")
@patch('chaosgce.nodepool.actions.drain_nodes', autospec=False)
@patch('chaosgce.nodepool.actions.wait_on_operation', autospec=False)
@patch('chaosgce.build', autospec=True)
@patch('chaosgce.Credentials', autospec=True)
def test_swap_nodepool_without_delete(Credentials, service_builder,
wait_on_operation, drain_nodes):
project_id = fixtures.configuration["gce_project_id"]
cluster_name = fixtures.configuration["gce_cluster_name"]
zone = fixtures.configuration["gce_zone"]
Credentials.from_service_account_file.return_value = MagicMock()
service = MagicMock()
service_builder.return_value = service
nodepool_svc = MagicMock()
service.projects().zones().clusters().nodePools.return_value = nodepool_svc
create_np = MagicMock()
nodepool_svc.create = create_np
create_np.return_value.execute.return_value = {
"name": "default-pool"
}
delete_np = MagicMock()
nodepool_svc.delete = delete_np
delete_np.return_value.execute.return_value = {
"name": "mynodepool"
}
ops_svc = MagicMock()
service.projects().zones().operations.return_value = ops_svc
response = swap_nodepool(
old_node_pool_id="mynodepool",
new_nodepool_body=fixtures.nodepool.body,
delete_old_node_pool=False,
secrets=fixtures.secrets,
configuration=fixtures.configuration
)
create_np.assert_called_with(
projectId=project_id, zone=zone, clusterId=cluster_name,
body=fixtures.nodepool.body)
delete_np.assert_not_called()
|
#! /usr/env/python
"""
flow_director_d8.py: provides the component FlowDirectorsD8.
This components finds the steepest single-path steepest descent flow
directions and considers diagonal links between nodes on a raster grid. It is
not implemented for irregular grids. For a method that works for irregular
grids and does not consider diagonal links for rasters, use
FlowDirectorSteepest instead.
"""
import numpy
from landlab import LinkStatus
from landlab.components.flow_director import flow_direction_DN
from landlab.components.flow_director.flow_director_to_one import _FlowDirectorToOne
class FlowDirectorD8(_FlowDirectorToOne):
"""Single-path (steepest direction) flow direction with diagonals on
rasters.
Single-path (steepest direction) flow direction finding on raster grids
by the D8 method. This method considers flow on all eight links such that
flow is possible on orthogonal and on diagonal links.
The method that considers only orthogonal links (D4 method) for raster
grids is FlowDirectorSteepest.
This method is not implemented for Voroni grids, use
FlowDirectorSteepest instead.
Stores as ModelGrid fields:
- Node array of receivers (nodes that receive flow), or ITS OWN ID if
there is no receiver: *'flow__receiver_node'*
- Node array of steepest downhill slopes:
*'topographic__steepest_slope'*
- Node array containing ID of link that leads from each node to its
receiver, or BAD_INDEX_VALUE if no link:
*'flow__link_to_receiver_node'*
- Boolean node array of all local lows: *'flow__sink_flag'*
The primary method of this class is :func:`run_one_step`.
Examples
--------
>>> import numpy as np
>>> from landlab import RasterModelGrid
>>> from landlab.components import FlowDirectorD8
>>> mg = RasterModelGrid((3,3), xy_spacing=(1, 1))
>>> mg.set_closed_boundaries_at_grid_edges(True, True, True, False)
>>> _ = mg.add_field(
... "topographic__elevation",
... mg.node_x + mg.node_y,
... at="node",
... )
>>> fd = FlowDirectorD8(mg, 'topographic__elevation')
>>> fd.surface_values
array([ 0., 1., 2., 1., 2., 3., 2., 3., 4.])
>>> fd.run_one_step()
>>> mg.at_node['flow__receiver_node']
array([0, 1, 2, 3, 0, 5, 6, 7, 8])
>>> mg.at_node['topographic__steepest_slope']
array([ 0. , 0. , 0. , 0. , 1.41421356,
0. , 0. , 0. , 0. ])
>>> mg.at_node['flow__link_to_receiver_node']
array([-1, -1, -1, -1, 12, -1, -1, -1, -1])
>>> mg.at_node['flow__sink_flag'].astype(int)
array([1, 1, 1, 1, 0, 1, 1, 1, 1])
>>> mg_2 = RasterModelGrid((5, 4), xy_spacing=(1, 1))
>>> topographic__elevation = np.array([0., 0., 0., 0.,
... 0., 21., 10., 0.,
... 0., 31., 20., 0.,
... 0., 32., 30., 0.,
... 0., 0., 0., 0.])
>>> _ = mg_2.add_field(
... "topographic__elevation",
... topographic__elevation,
... at="node",
... )
>>> mg_2.set_closed_boundaries_at_grid_edges(True, True, True, False)
>>> fd_2 = FlowDirectorD8(mg_2)
>>> fd_2.run_one_step()
>>> mg_2.at_node['flow__receiver_node'] # doctest: +NORMALIZE_WHITESPACE
array([ 0, 1, 2, 3,
4, 1, 2, 7,
8, 6, 6, 11,
12, 10, 10, 15,
16, 17, 18, 19])
The flow directors also have the ability to return the flow receiver nodes
>>> receiver = fd.direct_flow()
>>> receiver
array([0, 1, 2,
3, 0, 5,
6, 7, 8])
"""
_name = "FlowDirectorD8"
_info = {
"flow__link_to_receiver_node": {
"dtype": int,
"intent": "out",
"optional": False,
"units": "-",
"mapping": "node",
"doc": "ID of link downstream of each node, which carries the discharge",
},
"flow__receiver_node": {
"dtype": int,
"intent": "out",
"optional": False,
"units": "-",
"mapping": "node",
"doc": "Node array of receivers (node that receives flow from current node)",
},
"flow__sink_flag": {
"dtype": bool,
"intent": "out",
"optional": False,
"units": "-",
"mapping": "node",
"doc": "Boolean array, True at local lows",
},
"topographic__elevation": {
"dtype": float,
"intent": "in",
"optional": True,
"units": "m",
"mapping": "node",
"doc": "Land surface topographic elevation",
},
"topographic__steepest_slope": {
"dtype": float,
"intent": "out",
"optional": False,
"units": "-",
"mapping": "node",
"doc": "The steepest *downhill* slope",
},
}
def __init__(self, grid, surface="topographic__elevation"):
"""
Parameters
----------
grid : ModelGrid
A grid of type RasterModelGrid.
surface : field name at node or array of length node, optional
The surface to direct flow across, default is field at node:
topographic__elevation,.
"""
self._method = "D8"
super(FlowDirectorD8, self).__init__(grid, surface)
try:
self._grid.nodes_at_d8
except AttributeError:
self._is_Voroni = True
else:
self._is_Voroni = False
if self._is_Voroni:
raise NotImplementedError(
"FlowDirectorD8 not implemented for"
"irregular grids, use"
"FlowDirectorSteepest"
)
self.updated_boundary_conditions()
def updated_boundary_conditions(self):
"""Method to update FlowDirectorD8 when boundary conditions change.
Call this if boundary conditions on the grid are updated after
the component is instantiated.
"""
self._active_links = numpy.arange(self._grid.number_of_d8)
nodes_at_d8 = self._grid.nodes_at_d8[self._active_links]
self._activelink_tail = nodes_at_d8[:, 0]
self._activelink_head = nodes_at_d8[:, 1]
def run_one_step(self):
"""Find flow directions and save to the model grid.
run_one_step() checks for updated boundary conditions, calculates
slopes on links, finds baselevel nodes based on the status at node,
calculates flow directions, and saves results to the grid.
an alternative to direct_flow() is direct_flow() which does the same
things but also returns the receiver nodes not return values.
"""
self.direct_flow()
def direct_flow(self):
"""Find flow directions, save to the model grid, and return receivers.
direct_flow() checks for updated boundary conditions, calculates
slopes on links, finds baselevel nodes based on the status at node,
calculates flow directions, saves results to the grid, and returns a
at-node array of receiver nodes. This array is stored in the grid at:
grid['node']['flow__receiver_node']
an alternative to direct_flow() is run_one_step() which does the same
things but also returns a at-node array of receiver nodes. This array
is stored in the grid at:
grid['node']['flow__receiver_node']
"""
self._check_updated_bc()
# update the surface, if it was provided as a model grid field.
self._changed_surface()
# step 1. Calculate link slopes.
link_slope = -self._grid.calc_grad_at_d8(self._surface_values)
link_slope[self._grid.status_at_d8 != LinkStatus.ACTIVE] = 0
# Step 2. Find and save base level nodes.
(baselevel_nodes,) = numpy.where(
numpy.logical_or(
self._grid.status_at_node == self._grid.BC_NODE_IS_FIXED_VALUE,
self._grid.status_at_node == self._grid.BC_NODE_IS_FIXED_GRADIENT,
)
)
# Calculate flow directions by D8 method
receiver, steepest_slope, sink, recvr_link = flow_direction_DN.flow_directions(
self._surface_values,
self._active_links,
self._activelink_tail,
self._activelink_head,
link_slope,
grid=self._grid,
baselevel_nodes=baselevel_nodes,
)
# Save the four ouputs of this component.
self._grid["node"]["flow__receiver_node"][:] = receiver
self._grid["node"]["topographic__steepest_slope"][:] = steepest_slope
self._grid["node"]["flow__link_to_receiver_node"][:] = recvr_link
self._grid["node"]["flow__sink_flag"][:] = numpy.zeros_like(
receiver, dtype=bool
)
self._grid["node"]["flow__sink_flag"][sink] = True
return receiver
if __name__ == "__main__": # pragma: no cover
import doctest
doctest.testmod()
|
from .signer import Signer
|
""" This module contains "throaway" code for pre-generating a list of stations.
TODO: Remove this module when the Fire Weather Index Calculator uses the correct API as source for data.
"""
import csv
import json
import re
import geopandas
from shapely.geometry import Point
def fetch_ecodivision_name(latitude: str, longitude: str, ecodivisions: geopandas.GeoDataFrame):
""" Returns the ecodivision name for a given lat/long coordinate """
station_coord = Point(float(longitude), float(latitude))
for _, ecodivision_row in ecodivisions.iterrows():
geom = ecodivision_row['geometry']
if station_coord.within(geom):
return ecodivision_row['CDVSNNM']
return None
with open('csv/Station_BC_June2020.csv', 'r') as csvfile:
rows = csv.reader(csvfile, dialect=csv.unix_dialect)
header = next(rows)
code = header.index('station_code')
name = header.index('station_name')
station_category = header.index('station_category')
lat = header.index('latitude')
long = header.index('longitude')
weather_stations = []
ECODIVISIONS = geopandas.read_file(
'data/ERC_ECODIV_polygon/ERC_ECODIV_polygon.shp')
with open('data/ecodivisions_core_seasons.json') as file_handle:
CORE_SEASONS = json.load(file_handle)
# Keep track of station count for debug purposes.
station_count = 0
for row in rows:
# We're only interested in permanent, active weather stations
# Active stations are either marked as 'active' in the station_category row.
if row[station_category] != 'active':
continue
# Some stations are incorrectly labeled 'active', station names that start
# with ZZ are not active, and must be skipped.
# Quick deploy (temporary) stations are marked QD at the end
# Remove stations ending with SF and (WIND), which don't have valid fwi values
regex = re.compile(r"^(ZZ)|(.*QD)$|(.*SF)$|(.*\(WIND\))", re.I)
if regex.match(row[name]):
print('Skipping {}:{}'.format(row[code], row[name]))
continue
station_count = station_count + 1
# hacky fix for station 447 (WATSON LAKE FS), which is in the Yukon
# so ecodivision name has to be hard-coded
if row[code] == "447":
ecodivision_name = "SUB-ARCTIC HIGHLANDS"
else:
ecodivision_name = fetch_ecodivision_name(
row[lat], row[long], ECODIVISIONS)
if ecodivision_name is not None:
core_season = CORE_SEASONS[ecodivision_name]['core_season']
else:
core_season = {"start_month": "5", "start_day": "1",
"end_month": "8", "end_day": "31"}
weather_stations.append(
{
"code": row[code],
"name": row[name],
"lat": row[lat],
"long": row[long],
"ecodivision_name": ecodivision_name,
"core_season": core_season
}
)
# Order stations by name.
weather_stations.sort(key=lambda station: station['name'])
with open('app/data/weather_stations.json', 'w') as json_file:
# Dump json with an indent making it more human readable.
json.dump({'weather_stations': weather_stations}, json_file, indent=2)
print('Station export complete, {} stations exported.'.format(station_count))
|
import pytest
import rpy2.rlike.container as rlc
class TestOrdDict(object):
def test_new(self):
nl = rlc.OrdDict()
x = (('a', 123), ('b', 456), ('c', 789))
nl = rlc.OrdDict(x)
def test_new_invalid(self):
with pytest.raises(TypeError):
rlc.OrdDict({})
@pytest.mark.parametrize('methodname,args',
(('__cmp__', [None]),
('__eq__', [None]),
('__ne__', [None]),
('__reversed__', []),
('sort', [])))
def test_notimplemented(self, methodname, args):
nl = rlc.OrdDict()
with pytest.raises(NotImplementedError):
getattr(nl, methodname)(*args)
def test_repr(self):
x = (('a', 123), ('b', 456), ('c', 789))
nl = rlc.OrdDict(x)
assert isinstance(repr(nl), str)
def test_iter(self):
x = (('a', 123), ('b', 456), ('c', 789))
nl = rlc.OrdDict(x)
for a, b in zip(nl, x):
assert a == b[0]
def test_len(self):
x = rlc.OrdDict()
assert len(x) == 0
x['a'] = 2
x['b'] = 1
assert len(x) == 2
def test_getsetitem(self):
x = rlc.OrdDict()
x['a'] = 1
assert len(x) == 1
assert x['a'] == 1
assert x.index('a') == 0
x['a'] = 2
assert len(x) == 1
assert x['a'] == 2
assert x.index('a') == 0
x['b'] = 1
assert len(x) == 2
assert x['b'] == 1
assert x.index('b') == 1
def test_get(self):
x = rlc.OrdDict()
x['a'] = 1
assert x.get('a') == 1
assert x.get('b') is None
assert x.get('b', 2) == 2
def test_keys(self):
x = rlc.OrdDict()
word = 'abcdef'
for i,k in enumerate(word):
x[k] = i
for i,k in enumerate(x.keys()):
assert word[i] == k
def test_getsetitemwithnone(self):
x = rlc.OrdDict()
x['a'] = 1
x[None] = 2
assert len(x) == 2
x['b'] = 5
assert len(x) == 3
assert x['a'] == 1
assert x['b'] == 5
assert x.index('a') == 0
assert x.index('b') == 2
def test_reverse(self):
x = rlc.OrdDict()
x['a'] = 3
x['b'] = 2
x['c'] = 1
x.reverse()
assert x['c'] == 1
assert x.index('c') == 0
assert x['b'] == 2
assert x.index('b') == 1
assert x['a'] == 3
assert x.index('a') == 2
def test_items(self):
args = (('a', 5), ('b', 4), ('c', 3),
('d', 2), ('e', 1))
x = rlc.OrdDict(args)
it = x.items()
for ki, ko in zip(args, it):
assert ki[0] == ko[0]
assert ki[1] == ko[1]
class TestTaggedList(object):
def test__add__(self):
tl = rlc.TaggedList((1,2,3), tags=('a', 'b', 'c'))
tl = tl + tl
assert len(tl) == 6
assert tl.tags == ('a', 'b', 'c', 'a', 'b', 'c')
assert tuple(tl) == (1,2,3,1,2,3)
def test__delitem__(self):
tl = rlc.TaggedList((1,2,3), tags=('a', 'b', 'c'))
assert len(tl) == 3
del tl[1]
assert len(tl) == 2
assert tl.tags == ('a', 'c')
assert tuple(tl) == (1, 3)
def test__delslice__(self):
tl = rlc.TaggedList((1,2,3,4), tags=('a', 'b', 'c', 'd'))
del tl[1:3]
assert len(tl) == 2
assert tl.tags == ('a', 'd')
assert tuple(tl) == (1, 4)
def test__iadd__(self):
tl = rlc.TaggedList((1,2,3), tags=('a', 'b', 'c'))
tl += tl
assert len(tl) == 6
assert tl.tags == ('a', 'b', 'c', 'a', 'b', 'c')
assert tuple(tl) == (1,2,3,1,2,3)
def test__imul__(self):
tl = rlc.TaggedList((1,2), tags=('a', 'b'))
tl *= 3
assert len(tl) == 6
assert tl.tags == ('a', 'b', 'a', 'b', 'a', 'b')
assert tuple(tl) == (1,2,1,2,1,2)
def test__init__(self):
tl = rlc.TaggedList((1,2,3), tags=('a', 'b', 'c'))
with pytest.raises(ValueError):
rlc.TaggedList((1,2,3), tags = ('b', 'c'))
def test__setslice__(self):
tl = rlc.TaggedList((1,2,3,4), tags=('a', 'b', 'c', 'd'))
tl[1:3] = [5, 6]
assert len(tl) == 4
assert tl.tags == ('a', 'b', 'c', 'd')
assert tuple(tl) == (1, 5, 6, 4)
def test_append(self):
tl = rlc.TaggedList((1,2,3), tags=('a', 'b', 'c'))
assert len(tl) == 3
tl.append(4, tag='a')
assert len(tl) == 4
assert tl[3] == 4
assert tl.tags == ('a', 'b', 'c', 'a')
def test_extend(self):
tl = rlc.TaggedList((1,2,3), tags=('a', 'b', 'c'))
tl.extend([4, 5])
assert tuple(tl.itertags()) == ('a', 'b', 'c', None, None)
assert tuple(tl) == (1, 2, 3, 4, 5)
def test_insert(self):
tl = rlc.TaggedList((1,2,3), tags=('a', 'b', 'c'))
tl.insert(1, 4, tag = 'd')
assert tuple(tl.itertags()) == ('a', 'd', 'b', 'c')
assert tuple(tl) == (1, 4, 2, 3)
def test_items(self):
tl = rlc.TaggedList((1,2,3), tags=('a', 'b', 'c'))
assert tuple(tl.items()) == (('a', 1), ('b', 2), ('c', 3))
def test_iterontag(self):
tl = rlc.TaggedList((1,2,3), tags=('a', 'b', 'a'))
assert tuple(tl.iterontag('a')) == (1, 3)
def test_itertags(self):
tl = rlc.TaggedList((1,2,3), tags=('a', 'b', 'c'))
assert tuple(tl.itertags()) == ('a', 'b', 'c')
def test_pop(self):
tl = rlc.TaggedList((1,2,3), tags=('a', 'b', 'c'))
assert len(tl) == 3
elt = tl.pop()
assert elt == 3
assert len(tl) == 2
assert tl.tags == ('a', 'b')
assert tuple(tl) == (1, 2)
elt = tl.pop(0)
assert elt == 1
assert len(tl) == 1
assert tl.tags == ('b', )
def test_remove(self):
tl = rlc.TaggedList((1,2,3), tags=('a', 'b', 'c'))
assert len(tl) == 3
tl.remove(2)
assert len(tl) == 2
assert tl.tags == ('a', 'c')
assert tuple(tl) == (1, 3)
def test_reverse(self):
tn = ['a', 'b', 'c']
tv = [1,2,3]
tl = rlc.TaggedList(tv, tags = tn)
tl.reverse()
assert len(tl) == 3
assert tl.tags == ('c', 'b', 'a')
assert tuple(tl) == (3, 2, 1)
def test_sort(self):
tn = ['a', 'c', 'b']
tv = [1,3,2]
tl = rlc.TaggedList(tv, tags = tn)
tl.sort()
assert tl.tags == ('a', 'b', 'c')
assert tuple(tl) == (1, 2, 3)
def test_tags(self):
tn = ['a', 'b', 'c']
tv = [1,2,3]
tl = rlc.TaggedList(tv, tags = tn)
tags = tl.tags
assert isinstance(tags, tuple) is True
assert tags == ('a', 'b', 'c')
tn = ['d', 'e', 'f']
tl.tags = tn
assert isinstance(tags, tuple) is True
assert tl.tags == tuple(tn)
def test_settag(self):
tn = ['a', 'b', 'c']
tv = [1,2,3]
tl = rlc.TaggedList(tv, tags = tn)
tl.settag(1, 'z')
assert tl.tags == ('a', 'z', 'c')
def test_from_items(self):
od = rlc.OrdDict( (('a', 1), ('b', 2), ('c', 3)) )
tl = rlc.TaggedList.from_items(od)
assert tl.tags == ('a', 'b', 'c')
assert tuple(tl) == (1, 2, 3)
tl = rlc.TaggedList.from_items({'a':1, 'b':2, 'c':3})
assert set(tl.tags) == set(('a', 'b', 'c'))
assert set(tuple(tl)) == set((1, 2, 3))
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
import json
from Tools.Logger import logger
from Lib.Similarity.Similarity import Similarity
class Jaccard(Similarity):
def predict(self, doc):
results = []
for index in range(self.total):
x = self.X[index]
sum_max = 0.0
sum_min = 0.0
for xi, yi in zip(x, doc):
sum_min += min(xi, yi)
sum_max += max(xi, yi)
try:
results.append([self.y[index], sum_min / sum_max])
except ZeroDivisionError:
results.append([self.y[index], 0.0])
results.sort(reverse=True, key=lambda x: x[1])
return results
|
import numpy as np
from tensorcomlib.tensor import tensor
from ..tools.tools import float2front
#Tensor Times Matrix
def tensor_times_mat(ten,mat,mode):
shp = ten.shape
ndim = ten.ndims
order = float2front(ten.order,mode)
newdata = np.dot(mat,unfold(X,mode).data)
p = mat.shape[0]
newshp = [p]
newshp.extend(shp[0:mode])
newshp.extend(shp[mode+1:ndim])
T = newdata.reshape(newshp)
T = np.transpose(T,[order.index(i) for i in range(len(order))])
return tensor(T)
#Tensor
def tensor_multi_times_mat(X,matlist,modelist,transpose):
res = X
for i,(mat,mode) in enumerate(zip(matlist,modelist)):
if transpose:
res = tensor_times_mat(res,mat.T,mode)
else:
res = tensor_times_mat(res, mat, mode)
return res
def tensor_times_vec(X,vec,mode):
ndim = X.ndims
return tensor(T)
def tensor_times_tensor(X1,X2):
pass
def tensor2vec(X):
return X.data.reshape(X.size(),order='F')
def vec2tensor():
pass
def khatri_rao():
pass
def kronecker(ten1,ten2):
res = np.kron(ten1.data,ten2.data)
return tensor(res,res.shape)
def einstein():
pass
def teninner(X1,X2):
if(X1.shape != X2.shape):
raise ValueError("All the tensor's shape must be same!")
res = (X1.data) * (X2.data)
return tensor(res,X1.shape)
def tenouter(X1,X2):
return tensor(np.tensordot(X1.data, X2.data, axes=0))
def tennorm(X):
return np.linalg.norm(X.data)
def tensor_contraction(X1,X2):
return tensor(np.tensordot(X1.data,X2.data,axes=2))
# Tensor Addition and Subtraction
def add(X1,X2):
return tensor(X1.data+X2.data)
def sub(X1,X2):
return tensor(X1.data-X2.data)
|
#!/usr/bin/env python3
""" functions related to printing cards """
import sys
import os.path
def writedict():
""" return the dictionnary """
col = ["♠", "♥", "♦", "♣"]
d = {}
for i in range(0, 4):
for j in range(1, 15):
if j <= 10:
d[i*14+j] = col[i] + str(j)
else:
a = ""
if j == 11:
a = "V"
if j == 12:
a = "C"
if j == 13:
a = "Q"
if j == 14:
a = "K"
d[i*14+j] = col[i] + a
for i in range(14*4 + 1, 14*4 + 2 + 21):
d[i] = "T" + str(i - 14*4)
d[78] = "JJ"
return d
if __name__ == "__main__":
""" save the dictionnary """
if len(sys.argv) != 2:
print("error : require one argument (filename)")
sys.exit(1)
fname = sys.argv[1]
if os.path.isfile(fname):
print("Delete {}? y/n".format(fname))
a = input()
if a != "" and a == 'n':
sys.exit(0)
try:
with open(fname, 'w') as f:
d = writedict()
for i in range(1, 79):
f.write("d[{}] = \"{}\"\n".format(i, d[i]))
f.write("\n\n")
f.write("D = {\n")
for i in range(1, 79):
if i != 78:
f.write("\t\t{} : \"{}\",\n".format(i, d[i]))
else:
f.write("\t\t{} : \"{}\"\n".format(i, d[i]))
f.write("\t\t}\n")
print("Done, saved as {}".format(fname))
except Exception as e:
#print("error :", e.message, e.args)
print(e)
sys.exit(0)
|
import os
import shutil
import sqlite3
import pytz
from dateutil import parser
from django.conf import settings
from django.utils.html import escape
from .base import HoursDataSource
class DataSource(HoursDataSource):
def fetch(self):
if settings.VERBOSE:
print "Chrome history..."
if 'CHROME' not in settings.DS or 'HISTORY' not in settings.DS['CHROME']:
raise BaseException("Chrome history location not specified.")
start_time = self.start_date.strftime('%s')
end_time = self.end_date.strftime('%s')
copied_file = os.path.join(settings.BASE_DIR, 'data/chrome/History')
orig_file = settings.DS['CHROME']['HISTORY']
# Copy file if we haven't copied it before, or if it's outdated
if not os.path.exists(copied_file) or os.stat(orig_file).st_mtime - os.stat(copied_file).st_mtime > 0:
print "Copying history"
shutil.copy2(orig_file, copied_file)
# See http://linuxsleuthing.blogspot.ca/2011/06/decoding-google-chrome-timestamps-in.html
query = """SELECT DATETIME((visit_time/1000000)-11644473600, 'unixepoch') AS dt,
title, urls.url
FROM urls, visits WHERE urls.id = visits.url
AND (visit_time/1000000)-11644473600 > %s
AND (visit_time/1000000)-11644473600 < %s
ORDER BY visit_time ASC;""" % (start_time, end_time)
conn = sqlite3.connect(copied_file)
cursor = conn.cursor()
db_results = cursor.execute(query)
for date_string, url_title, url in db_results:
date = parser.parse(date_string).replace(tzinfo=pytz.UTC)
self.add_entry(date, escape(url_title[:80]), escape(url_title[:80]), escape(url[:80]), 'url')
conn.close()
|
import traceback
from typing import Dict, List, Optional, Union
import dateparser
import demistomock as demisto # noqa: F401
import requests
from CommonServerPython import * # noqa: F401
# Disable insecure warnings
requests.packages.urllib3.disable_warnings()
''' CONSTANTS '''
DATE_FORMAT = '%Y-%m-%dT%H:%M:%SZ'
MAX_INCIDENTS_TO_FETCH = 50
HELLOWORLD_SEVERITIES = ['Low', 'Medium', 'High', 'Critical']
''' CLIENT CLASS '''
class Client(BaseClient):
def test_connect(self, api_key):
return self._http_request(
method='GET',
url_suffix='/info.php?',
params={
'indicator': 'pulsedive.com',
'key': api_key
}
)
def get_ip_reputation(self, ip: str, api_key) -> Dict[str, Any]:
return self._http_request(
method='GET',
url_suffix='/info.php?',
params={
'indicator': ip,
'pretty': '1',
'key': api_key
}
)
def get_domain_reputation(self, domain: str, api_key) -> Dict[str, Any]:
return self._http_request(
method='GET',
url_suffix='/info.php?',
params={
'indicator': domain,
'pretty': '1',
'key': api_key
}
)
def get_url_reputation(self, url: str, api_key) -> Dict[str, Any]:
return self._http_request(
method='GET',
url_suffix='/info.php?',
params={
'indicator': url,
'pretty': '1',
'key': api_key
}
)
def post_value_scan(self, value: str, probe: str, api_key) -> Dict[str, Any]:
return self._http_request(
method='POST',
url_suffix='/analyze.php',
params={
'value': value,
'probe': probe,
'pretty': '1',
'key': api_key
}
)
def get_value_scan(self, qid: str, api_key) -> Dict[str, Any]:
return self._http_request(
method='GET',
url_suffix='/analyze.php?',
params={
'qid': qid,
'pretty': '1',
'key': api_key
}
)
''' HELPER FUNCTIONS '''
def parse_domain_date(domain_date: Union[List[str], str], date_format: str = '%Y-%m-%dT%H:%M:%S.000Z') -> Optional[str]:
"""Converts whois date format to an ISO8601 string
Converts the HelloWorld domain WHOIS date (YYYY-mm-dd HH:MM:SS) format
in a datetime. If a list is returned with multiple elements, takes only
the first one.
:type domain_date: ``Union[List[str],str]``
:param severity:
a string or list of strings with the format 'YYYY-mm-DD HH:MM:SS'
:return: Parsed time in ISO8601 format
:rtype: ``Optional[str]``
"""
if isinstance(domain_date, str):
# if str parse the value
if _date := dateparser.parse(domain_date).strftime(date_format): # type: ignore[union-attr]
return _date
return None
elif isinstance(domain_date, list) and len(domain_date) > 0 and isinstance(domain_date[0], str):
# if list with at least one element, parse the first element
return dateparser.parse(domain_date[0]).strftime(date_format) # type: ignore[union-attr]
# in any other case return nothing
return None
def convert_to_xsoar_severity(pulsedive_severity) -> int:
if (pulsedive_severity == 'unknown' or pulsedive_severity == 'none'):
xsoar_severity = Common.DBotScore.NONE # unknown
elif pulsedive_severity == 'high':
xsoar_severity = Common.DBotScore.SUSPICIOUS # suspicious
elif pulsedive_severity == 'critical':
xsoar_severity = Common.DBotScore.BAD # bad
else:
xsoar_severity = Common.DBotScore.GOOD # good
return xsoar_severity
''' COMMAND FUNCTIONS '''
def test_module(client: Client, api_key) -> str:
"""Tests API connectivity and authentication"""
try:
client.test_connect(api_key)
except DemistoException:
return 'Could not connect to Pulsedive'
return 'ok'
def ip_reputation_command(client: Client, args: Dict[str, Any], api_key) -> List[CommandResults]:
ips = argToList(args.get('ip'))
if len(ips) == 0:
raise ValueError('IP(s) not specified')
command_results: List[CommandResults] = []
for ip in ips:
try:
ip_data = client.get_ip_reputation(ip, api_key)
indicator_ip = ip_data['indicator']
reputation = ip_data['risk']
score = convert_to_xsoar_severity(reputation)
# Create the DBotScore structure first using the Common.DBotScore class.
dbot_score = Common.DBotScore(
indicator=indicator_ip,
indicator_type=DBotScoreType.IP,
integration_name='Pulsedive',
score=score,
malicious_description=f'Pulsedive returned reputation {reputation}'
)
# Create the IP Standard Context structure using Common.IP and add
# dbot_score to it.
ip_standard_context = Common.IP(
ip=indicator_ip,
dbot_score=dbot_score
)
ip_data.pop('objects', None)
ip_data.pop('nir', None)
command_results.append(CommandResults(
readable_output=tableToMarkdown('IP Details:', ip_data),
outputs_prefix='Pulsedive.IP',
outputs_key_field='indicator',
outputs=ip_data,
indicator=ip_standard_context
))
except DemistoException:
# Create the DBotScore structure first using the Common.DBotScore class.
dbot_score = Common.DBotScore(
indicator=ip,
indicator_type=DBotScoreType.IP,
integration_name='Pulsedive',
score=Common.DBotScore.NONE,
malicious_description='Pulsedive returned reputation None'
)
# Create the IP Standard Context structure using Common.IP and add
# dbot_score to it.
ip_standard_context = Common.IP(
ip=ip,
dbot_score=dbot_score
)
command_results.append(CommandResults(
readable_output=str(ip) + ' not found in indicator data',
outputs_prefix='Pulsedive.IP',
outputs_key_field='indicator',
indicator=ip_standard_context
))
return command_results
def domain_reputation_command(client: Client, args: Dict[str, Any], api_key) -> List[CommandResults]:
domains = argToList(args.get('domain'))
if len(domains) == 0:
raise ValueError('domain(s) not specified')
command_results: List[CommandResults] = []
for domain in domains:
try:
domain_data = client.get_domain_reputation(domain, api_key)
indicator_domain = domain_data['indicator']
reputation = domain_data['risk']
score = convert_to_xsoar_severity(reputation)
if 'creation_date' in domain_data:
domain_data['creation_date'] = parse_domain_date(domain_data['creation_date'])
if 'expiration_date' in domain_data:
domain_data['expiration_date'] = parse_domain_date(domain_data['expiration_date'])
if 'updated_date' in domain_data:
domain_data['updated_date'] = parse_domain_date(domain_data['updated_date'])
dbot_score = Common.DBotScore(
indicator=indicator_domain,
integration_name='Pulsedive',
indicator_type=DBotScoreType.DOMAIN,
score=score,
malicious_description='Pulsedive returned reputation {reputation}'
)
domain_standard_context = Common.Domain(
domain=indicator_domain,
dbot_score=dbot_score
)
command_results.append(CommandResults(
readable_output=tableToMarkdown('Domain Details:', domain_data),
outputs_prefix='Pulsedive.Domain',
outputs_key_field='indicator',
outputs=domain_data,
indicator=domain_standard_context
))
except DemistoException:
# Create the DBotScore structure first using the Common.DBotScore class.
dbot_score = Common.DBotScore(
indicator=domain,
indicator_type=DBotScoreType.DOMAIN,
integration_name='Pulsedive',
score=Common.DBotScore.NONE,
malicious_description='Pulsedive returned reputation None'
)
domain_standard_context = Common.Domain(
domain=domain,
dbot_score=dbot_score
)
command_results.append(CommandResults(
readable_output=str(domain) + ' not found in indicator data',
outputs_prefix='Pulsedive.Domain',
outputs_key_field='indicator',
indicator=domain_standard_context
))
return command_results
def url_reputation_command(client: Client, args: Dict[str, Any], api_key) -> List[CommandResults]:
urls = argToList(args.get('url'))
if len(urls) == 0:
raise ValueError('URL(s) not specified')
command_results: List[CommandResults] = []
for url in urls:
try:
url_data = client.get_url_reputation(url, api_key)
indicator_url = url_data['indicator']
reputation = url_data['risk']
score = convert_to_xsoar_severity(reputation)
dbot_score = Common.DBotScore(
indicator=str(indicator_url),
indicator_type=DBotScoreType.URL,
integration_name='Pulsedive',
score=score,
malicious_description=f'Pulsedive returned reputation {reputation}'
)
url_standard_context = Common.URL(
url=indicator_url,
dbot_score=dbot_score
)
url_data.pop('objects', None)
url_data.pop('nir', None)
command_results.append(CommandResults(
readable_output=tableToMarkdown('URL Details:', url_data),
outputs_prefix='Pulsedive.URL',
outputs_key_field='indicator',
outputs=url_data,
indicator=url_standard_context
))
except DemistoException:
# Create the DBotScore structure first using the Common.DBotScore class.
dbot_score = Common.DBotScore(
indicator=str(url),
indicator_type=DBotScoreType.URL,
integration_name='Pulsedive',
score=Common.DBotScore.NONE,
malicious_description='Pulsedive returned reputation None'
)
url_standard_context = Common.URL(
url=str(url),
dbot_score=dbot_score
)
command_results.append(CommandResults(
readable_output=str(url) + ' not found in indicator data',
outputs_prefix='Pulsedive.URL',
outputs_key_field='indicator',
indicator=url_standard_context
))
return command_results
def scan_value_command(client: Client, args: Dict[str, Any], api_key) -> List[CommandResults]:
values = argToList(args.get('value'))
if len(values) == 0:
raise ValueError('Value(s) not specified')
if args.get('scan_type') == 'passiv':
scan_type_value = '0'
else:
scan_type_value = '1'
command_results: List[CommandResults] = []
for value in values:
try:
value_data = client.post_value_scan(value, scan_type_value, api_key)
value_data.update({'value': value})
command_results.append(CommandResults(
readable_output=tableToMarkdown('Value Details:', value_data),
outputs_prefix='Pulsedive.Scan',
outputs_key_field='value',
outputs=value_data
))
except DemistoException:
raise DemistoException(
f'Failed to execute {demisto.command()} command. Error: Problem submitting the data for scanning'
)
return command_results
def scan_result_command(client: Client, args: Dict[str, Any], api_key) -> List[CommandResults]:
"""
Scan result command
"""
qids = argToList(args.get('qid'))
if len(qids) == 0:
raise ValueError('QID(s) not specified')
command_results: List[CommandResults] = []
for qid in qids:
try:
qid_data = client.get_value_scan(qid, api_key)
if 'data' in qid_data and qid_data['data']:
qid_data.update({'qid': qid, 'indicator': qid_data['data']['indicator']})
if qid_data['data']['type'] == 'url' or qid_data['data']['type'] == 'domain':
try:
screenshot = requests.get(
qid_data['data']['properties']['dom']['screenshot']
)
screenshot_file = fileResult(
qid_data['data']['properties']['dom']['screenshot'],
screenshot.content,
file_type=EntryType.ENTRY_INFO_FILE
)
screenshot_file['Type'] = entryTypes['image']
demisto.results(screenshot_file)
except DemistoException:
raise DemistoException(
f'Failed to execute {demisto.command()} command. Error: Problem getting the screenshot'
)
reputation = qid_data['data']['risk']
score = convert_to_xsoar_severity(reputation)
if qid_data['data']['type'] == 'url':
dbot_score = Common.DBotScore(
indicator=qid_data['data']['indicator'],
indicator_type=DBotScoreType.URL,
integration_name='Pulsedive',
score=score
)
url_indicator = Common.URL(
url=qid_data['data']['indicator'],
dbot_score=dbot_score
)
command_results.append(CommandResults(
readable_output=tableToMarkdown(
'Value Details:',
qid_data,
headers=('indicator', 'qid', 'status', 'success')
),
outputs_prefix='Pulsedive.ScanResult',
outputs_key_field='qid',
outputs=qid_data['data'],
indicator=url_indicator
))
if qid_data['data']['type'] == 'ip':
dbot_score = Common.DBotScore(
indicator=qid_data['data']['indicator'],
indicator_type=DBotScoreType.IP,
integration_name='Pulsedive',
score=score
)
ip_indicator = Common.IP(
ip=qid_data['data']['indicator'],
asn=qid_data['data']['properties']['geo']['asn'],
geo_country=qid_data['data']['properties']['geo']['country'],
port=qid_data['data']['attributes']['port'],
dbot_score=dbot_score
)
command_results.append(CommandResults(
readable_output=tableToMarkdown(
'Value Details:',
qid_data,
headers=('indicator', 'qid', 'status', 'success')
),
outputs_prefix='Pulsedive.ScanResult',
outputs_key_field='qid',
outputs=qid_data['data'],
indicator=ip_indicator
))
if qid_data['data']['type'] == 'domain':
dbot_score = Common.DBotScore(
indicator=qid_data['data']['indicator'],
indicator_type=DBotScoreType.DOMAIN,
integration_name='Pulsedive',
score=score
)
domain_indicator = Common.Domain(
domain=qid_data['data']['indicator'],
domain_status=qid_data['data']['properties']['whois']['status'],
name_servers=qid_data['data']['properties']['whois']['nserver'],
dbot_score=dbot_score
)
command_results.append(CommandResults(
readable_output=tableToMarkdown(
'Value Details:',
qid_data,
headers=('indicator', 'qid', 'status', 'success')
),
outputs_prefix='Pulsedive.ScanResult',
outputs_key_field='qid',
outputs=qid_data['data'],
indicator=domain_indicator
))
else:
command_results.append(CommandResults(
readable_output=tableToMarkdown('Value Details:', qid_data),
outputs_prefix='Pulsedive.ScanResult',
outputs_key_field='qid',
outputs=qid_data
))
except DemistoException:
return_error(
f'Failed to execute {demisto.command()} command. Error: Problem with processing the scan results'
)
return command_results
''' MAIN FUNCTION '''
def main() -> None:
api_key = demisto.params().get('apikey')
base_url = 'https://www.pulsedive.com/api'
verify_certificate = not demisto.params().get('insecure', False)
proxy = demisto.params().get('proxy', False)
headers = {'User-Agent': 'XSOAR - Integration'}
try:
client = Client(
base_url=base_url,
verify=verify_certificate,
proxy=proxy,
headers=headers)
if demisto.command() == 'test-module':
# This is the call made when pressing the integration Test button.
return_results(test_module(client, api_key))
elif demisto.command() == 'ip':
return_results(ip_reputation_command(client, demisto.args(), api_key))
elif demisto.command() == 'domain':
return_results(domain_reputation_command(client, demisto.args(), api_key))
elif demisto.command() == 'url':
return_results(url_reputation_command(client, demisto.args(), api_key))
elif demisto.command() == 'pulsedive-scan':
return_results(scan_value_command(client, demisto.args(), api_key))
elif demisto.command() == 'pulsedive-scan-result':
return_results(scan_result_command(client, demisto.args(), api_key))
# Log exceptions and return errors
except Exception as e:
demisto.error(traceback.format_exc()) # print the traceback
return_error(f'Failed to execute {demisto.command()} command.\nError:\n{str(e)}')
''' ENTRY POINT '''
if __name__ in ('__main__', '__builtin__', 'builtins'):
main()
|
#!/usr/bin/env python3
"""
Purpose
This script dumps comb table of ec curve. When you add a new ec curve, you
can use this script to generate codes to define `<curve>_T` in ecp_curves.c
"""
# Copyright The Mbed TLS Contributors
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import subprocess
import sys
import tempfile
HOW_TO_ADD_NEW_CURVE = """
If you are trying to add new curve, you can follow these steps:
1. Define curve parameters (<curve>_p, <curve>_gx, etc...) in ecp_curves.c.
2. Add a macro to define <curve>_T to NULL following these parameters.
3. Build mbedcrypto
4. Run this script with an argument of new curve
5. Copy the output of this script into ecp_curves.c and replace the macro added
in Step 2
6. Rebuild and test if everything is ok
Replace the <curve> in the above with the name of the curve you want to add."""
CC = os.getenv('CC', 'cc')
MBEDTLS_LIBRARY_PATH = os.getenv('MBEDTLS_LIBRARY_PATH', "library")
SRC_DUMP_COMB_TABLE = r'''
#include <stdio.h>
#include <stdlib.h>
#include "mbedtls/ecp.h"
#include "mbedtls/error.h"
static void dump_mpi_initialize( const char *name, const mbedtls_mpi *d )
{
uint8_t buf[128] = {0};
size_t olen;
uint8_t *p;
olen = mbedtls_mpi_size( d );
mbedtls_mpi_write_binary_le( d, buf, olen );
printf("static const mbedtls_mpi_uint %s[] = {\n", name);
for (p = buf; p < buf + olen; p += 8) {
printf( " BYTES_TO_T_UINT_8( 0x%02X, 0x%02X, 0x%02X, 0x%02X, 0x%02X, 0x%02X, 0x%02X, 0x%02X ),\n",
p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7] );
}
printf("};\n");
}
static void dump_T( const mbedtls_ecp_group *grp )
{
char name[128];
printf( "#if MBEDTLS_ECP_FIXED_POINT_OPTIM == 1\n" );
for (size_t i = 0; i < grp->T_size; ++i) {
snprintf( name, sizeof(name), "%s_T_%zu_X", CURVE_NAME, i );
dump_mpi_initialize( name, &grp->T[i].X );
snprintf( name, sizeof(name), "%s_T_%zu_Y", CURVE_NAME, i );
dump_mpi_initialize( name, &grp->T[i].Y );
}
printf( "static const mbedtls_ecp_point %s_T[%zu] = {\n", CURVE_NAME, grp->T_size );
size_t olen;
for (size_t i = 0; i < grp->T_size; ++i) {
int z;
if ( mbedtls_mpi_cmp_int(&grp->T[i].Z, 0) == 0 ) {
z = 0;
} else if ( mbedtls_mpi_cmp_int(&grp->T[i].Z, 1) == 0 ) {
z = 1;
} else {
fprintf( stderr, "Unexpected value of Z (i = %d)\n", (int)i );
exit( 1 );
}
printf( " ECP_POINT_INIT_XY_Z%d(%s_T_%zu_X, %s_T_%zu_Y),\n",
z,
CURVE_NAME, i,
CURVE_NAME, i
);
}
printf("};\n#endif\n\n");
}
int main()
{
int rc;
mbedtls_mpi m;
mbedtls_ecp_point R;
mbedtls_ecp_group grp;
mbedtls_ecp_group_init( &grp );
rc = mbedtls_ecp_group_load( &grp, CURVE_ID );
if (rc != 0) {
char buf[100];
mbedtls_strerror( rc, buf, sizeof(buf) );
fprintf( stderr, "mbedtls_ecp_group_load: %s (-0x%x)\n", buf, -rc );
return 1;
}
grp.T = NULL;
mbedtls_ecp_point_init( &R );
mbedtls_mpi_init( &m);
mbedtls_mpi_lset( &m, 1 );
rc = mbedtls_ecp_mul( &grp, &R, &m, &grp.G, NULL, NULL );
if ( rc != 0 ) {
char buf[100];
mbedtls_strerror( rc, buf, sizeof(buf) );
fprintf( stderr, "mbedtls_ecp_mul: %s (-0x%x)\n", buf, -rc );
return 1;
}
if ( grp.T == NULL ) {
fprintf( stderr, "grp.T is not generated. Please make sure"
"MBEDTLS_ECP_FIXED_POINT_OPTIM is enabled in mbedtls_config.h\n" );
return 1;
}
dump_T( &grp );
return 0;
}
'''
SRC_DUMP_KNOWN_CURVE = r'''
#include <stdio.h>
#include <stdlib.h>
#include "mbedtls/ecp.h"
int main() {
const mbedtls_ecp_curve_info *info = mbedtls_ecp_curve_list();
mbedtls_ecp_group grp;
mbedtls_ecp_group_init( &grp );
while ( info->name != NULL ) {
mbedtls_ecp_group_load( &grp, info->grp_id );
if ( mbedtls_ecp_get_type(&grp) == MBEDTLS_ECP_TYPE_SHORT_WEIERSTRASS ) {
printf( " %s", info->name );
}
info++;
}
printf( "\n" );
return 0;
}
'''
def join_src_path(*args):
return os.path.normpath(os.path.join(os.path.dirname(__file__), "..", *args))
def run_c_source(src, cflags):
"""
Compile and run C source code
:param src: the c language code to run
:param cflags: additional cflags passing to compiler
:return:
"""
binname = tempfile.mktemp(prefix="mbedtls")
fd, srcname = tempfile.mkstemp(prefix="mbedtls", suffix=".c")
srcfile = os.fdopen(fd, mode="w")
srcfile.write(src)
srcfile.close()
args = [CC,
*cflags,
'-I' + join_src_path("include"),
"-o", binname,
'-L' + MBEDTLS_LIBRARY_PATH,
srcname,
'-lmbedcrypto']
p = subprocess.run(args=args, check=False)
if p.returncode != 0:
return False
p = subprocess.run(args=[binname], check=False, env={
'LD_LIBRARY_PATH': MBEDTLS_LIBRARY_PATH
})
if p.returncode != 0:
return False
os.unlink(srcname)
os.unlink(binname)
return True
def compute_curve(curve):
"""compute comb table for curve"""
r = run_c_source(
SRC_DUMP_COMB_TABLE,
[
'-g',
'-DCURVE_ID=MBEDTLS_ECP_DP_%s' % curve.upper(),
'-DCURVE_NAME="%s"' % curve.lower(),
])
if not r:
print("""\
Unable to compile and run utility.""", file=sys.stderr)
sys.exit(1)
def usage():
print("""
Usage: python %s <curve>...
Arguments:
curve Specify one or more curve names (e.g secp256r1)
All possible curves: """ % sys.argv[0])
run_c_source(SRC_DUMP_KNOWN_CURVE, [])
print("""
Environment Variable:
CC Specify which c compile to use to compile utility.
MBEDTLS_LIBRARY_PATH
Specify the path to mbedcrypto library. (e.g. build/library/)
How to add a new curve: %s""" % HOW_TO_ADD_NEW_CURVE)
def run_main():
shared_lib_path = os.path.normpath(os.path.join(MBEDTLS_LIBRARY_PATH, "libmbedcrypto.so"))
static_lib_path = os.path.normpath(os.path.join(MBEDTLS_LIBRARY_PATH, "libmbedcrypto.a"))
if not os.path.exists(shared_lib_path) and not os.path.exists(static_lib_path):
print("Warning: both '%s' and '%s' are not exists. This script will use "
"the library from your system instead of the library compiled by "
"this source directory.\n"
"You can specify library path using environment variable "
"'MBEDTLS_LIBRARY_PATH'." % (shared_lib_path, static_lib_path),
file=sys.stderr)
if len(sys.argv) <= 1:
usage()
else:
for curve in sys.argv[1:]:
compute_curve(curve)
if __name__ == '__main__':
run_main()
|
"""
Ensures the functionality of PyMotifCounterBase.
:author: Athanasios Anastasiou
:date: Nov 2021
"""
import pytest
import re
from pymotifcounter.abstractcounter import (PyMotifCounterBase,
PyMotifCounterInputTransformerBase,
PyMotifCounterOutputTransformerBase)
from pymotifcounter.parameters import (PyMotifCounterParameterInt,
PyMotifCounterParameterStr,
PyMotifCounterParameterFilepath)
from pymotifcounter.exceptions import PyMotifCounterError
def test_init_binary_is_invalid():
"""
Tests the initialisation of a plain PyMotifCounter.
:raises: PyMotifCounterError if the counter is initialised without pointing to a valid binary
"""
with pytest.raises(PyMotifCounterError):
bn = PyMotifCounterBase(binary_location="/some/path",
input_parameter=PyMotifCounterParameterFilepath("i", alias="test_input", ),
output_parameter=PyMotifCounterParameterFilepath("o", alias="test_output", ),
input_transformer=PyMotifCounterInputTransformerBase(),
output_transformer=PyMotifCounterOutputTransformerBase(),
parameters=[])
def test_set_duplicate_parameter_error():
"""
Ensures that PyMotifCounterBase object does not allow overwriting of parameters
"""
p1 = PyMotifCounterParameterStr("s", alias="Size", default_value="ALPHA")
p2 = PyMotifCounterParameterInt("q", alias="Size", default_value=22)
p3 = PyMotifCounterParameterStr("m", alias="Method", default_value="Gaussian")
p4 = PyMotifCounterParameterInt("m", alias="MethaneLevel", default_value=22)
b = PyMotifCounterBase(binary_location="/bin/bash",
input_parameter=PyMotifCounterParameterFilepath("i", alias="test_input",),
output_parameter=PyMotifCounterParameterFilepath("o", alias="test_output",),
input_transformer=PyMotifCounterInputTransformerBase(),
output_transformer=PyMotifCounterOutputTransformerBase(),
parameters=[p1, p3])
# Test same alias, this should fail
with pytest.raises(PyMotifCounterError):
b.add_parameter(p2)
# Test same name, this should fail
with pytest.raises(PyMotifCounterError):
b.add_parameter(p4)
def get_unknown_parameter_error():
"""
Ensures that PyMotifCounterBase raises an error if an attempt is made to address an unknown parameter.
"""
p1 = PyMotifCounterParameterStr("s", alias="Size", default_value="ALPHA")
p3 = PyMotifCounterParameterStr("m", alias="Method", default_value="Gaussian")
b = PyMotifCounterBase(binary_location="/bin/bash",
input_parameter=PyMotifCounterParameterFilepath("i", alias="test_input", ),
output_parameter=PyMotifCounterParameterFilepath("o", alias="test_output", ),
input_transformer=PyMotifCounterInputTransformerBase(),
output_transformer=PyMotifCounterOutputTransformerBase(),
parameters=[p1, p3])
with pytest.raises(PyMotifCounterError):
b.get_parameter("l")
|
#!/usr/bin/env python
__author__ = "XXX"
__email__ = "XXX"
import logging
import os
import pandas as pd
from constants import *
from datasets.dataset_loader import DatasetLoader
from utils.pandas_utils import remap_columns_consecutive
logger = logging.getLogger(__name__)
logging.basicConfig(level=os.environ.get("LOGLEVEL"))
class Gowalla(
DatasetLoader,
name="Gowalla",
directory_name="gowalla",
url="http://snap.stanford.edu/data/loc-gowalla_totalCheckins.txt.gz",
url_archive_format=None,
expected_files=["loc-gowalla_totalCheckins.txt.gz"],
description="""Gowalla is a location-based social networking website where users
share their locations by checking-in. The friendship network is undirected and was
collected using their public API, and consists of 196,591 nodes and 950,327 edges.
We have collected a total of 6,442,890 check-ins of these users over the period of Feb. 2009 - Oct. 2010.
""",
source="https://snap.stanford.edu/data/loc-Gowalla.html",
):
pass
def load(self):
self.download()
data_path = [self._resolve_path(path) for path in self.expected_files]
df = pd.read_csv(
data_path[0],
sep="\t",
names=["userID", "check-in time", "latitude", "longitude", "itemID"],
)
# remove unused columns and drop duplicates
df = df.drop(["check-in time", "latitude", "longitude"], axis=1).drop_duplicates()
# remap users and artists idxs
remap_columns_consecutive(df, columns_names=["userID", "itemID"])
df = df.rename(columns={"userID": DEFAULT_USER_COL, "itemID": DEFAULT_ITEM_COL})
logging.info(f"dataset interactions: {len(df)}")
return df
|
from typing import Any, Union
import metagraph as mg
def range_equivalent(a: Union[range, Any], b: Union[range, Any]):
"""
:return: True if two range like objects have the same start, stop, and step.
They need not be the same type.
"""
return a.start == b.start and a.stop == b.stop and a.step == b.step
def test_num_nodes(kg_from_nx_di_8_12):
nodes_total = 0
for nid in kg_from_nx_di_8_12.value.nodes():
nodes_total += 1
assert kg_from_nx_di_8_12.value.num_nodes() == nodes_total
assert kg_from_nx_di_8_12.value.num_nodes() == 8
def test_num_edges(kg_from_nx_di_8_12):
edges_total = 0
for nid in kg_from_nx_di_8_12.value.nodes():
edges_total += len(kg_from_nx_di_8_12.value.out_edge_ids(nid))
assert kg_from_nx_di_8_12.value.num_edges() == edges_total
assert kg_from_nx_di_8_12.value.num_edges() == 12
def test_topology(kg_from_nx_di_8_12):
assert range_equivalent(kg_from_nx_di_8_12.value.out_edge_ids(0), range(0, 3))
assert range_equivalent(kg_from_nx_di_8_12.value.out_edge_ids(1), range(3, 5))
assert range_equivalent(kg_from_nx_di_8_12.value.out_edge_ids(2), range(5, 8))
assert range_equivalent(kg_from_nx_di_8_12.value.out_edge_ids(3), range(8, 9))
assert range_equivalent(kg_from_nx_di_8_12.value.out_edge_ids(4), range(9, 10))
assert range_equivalent(kg_from_nx_di_8_12.value.out_edge_ids(5), range(10, 12))
assert [kg_from_nx_di_8_12.value.get_edge_dst(i) for i in kg_from_nx_di_8_12.value.out_edge_ids(0)] == [1, 3, 4]
assert [kg_from_nx_di_8_12.value.get_edge_dst(i) for i in kg_from_nx_di_8_12.value.out_edge_ids(2)] == [4, 5, 6]
assert [kg_from_nx_di_8_12.value.get_edge_dst(i) for i in kg_from_nx_di_8_12.value.out_edge_ids(4)] == [7]
assert [kg_from_nx_di_8_12.value.get_edge_dst(i) for i in kg_from_nx_di_8_12.value.out_edge_ids(5)] == [6, 7]
def test_schema(kg_from_nx_di_8_12):
assert len(kg_from_nx_di_8_12.value.loaded_node_schema()) == 0
assert len(kg_from_nx_di_8_12.value.loaded_edge_schema()) == 1
def test_edge_property_directed(kg_from_nx_di_8_12):
assert kg_from_nx_di_8_12.value.loaded_edge_schema()[0].name == "value_from_translator"
assert [v.as_py() for v in kg_from_nx_di_8_12.value.get_edge_property("value_from_translator")] == [
4,
2,
7,
3,
5,
5,
2,
8,
1,
4,
4,
6,
]
def test_compare_node_count(nx_from_kg_di_8_12, katanagraph_cleaned_8_12_di):
nlist = [each_node[0] for each_node in list(nx_from_kg_di_8_12.value.nodes(data=True))]
num_no_edge_nodes = 0
for nid in katanagraph_cleaned_8_12_di.value.nodes():
if nid not in nlist:
assert katanagraph_cleaned_8_12_di.value.out_edge_ids(nid) == range(0, 0)
num_no_edge_nodes += 1
assert num_no_edge_nodes + len(nlist) == katanagraph_cleaned_8_12_di.value.num_nodes()
assert num_no_edge_nodes == 0
def test_compare_edge_count(nx_from_kg_di_8_12, katanagraph_cleaned_8_12_di):
edge_dict_count = {(each_e[0], each_e[1]): 0 for each_e in list(nx_from_kg_di_8_12.value.edges(data=True))}
for src in katanagraph_cleaned_8_12_di.value.nodes():
for dest in [
katanagraph_cleaned_8_12_di.value.get_edge_dst(e)
for e in katanagraph_cleaned_8_12_di.value.out_edge_ids(src)
]:
if (src, dest) in edge_dict_count:
edge_dict_count[(src, dest)] += 1
assert sum([edge_dict_count[i] for i in edge_dict_count]) == katanagraph_cleaned_8_12_di.value.num_edges()
assert len(list(nx_from_kg_di_8_12.value.edges(data=True))) == katanagraph_cleaned_8_12_di.value.num_edges()
|
"""empty message
Revision ID: 5f6cccd86344
Revises: 6ea519fd23ca
Create Date: 2021-12-20 14:48:35.949936
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.sql import column, table
from sqlalchemy.sql.sqltypes import Boolean, String
# revision identifiers, used by Alembic.
revision = '5f6cccd86344'
down_revision = '6ea519fd23ca'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('NotificationTypes',
sa.Column('notificationtypeid', sa.Integer(), autoincrement=True, nullable=False),
sa.Column('name', sa.String(length=100), nullable=False),
sa.Column('description', sa.String(length=255), nullable=True),
sa.Column('isactive', sa.Boolean(), nullable=False),
sa.PrimaryKeyConstraint('notificationtypeid')
)
notification_type_table = table('NotificationTypes',
column('name',String),
column('description',String),
column('isactive',Boolean),
)
op.bulk_insert(
notification_type_table,
[
{'name':'State','description':'State','isactive':True},
{'name':'Division','description':'Division','isactive':True},
{'name':'User Comments','description':'User Comments','isactive':True},
{'name':'Extension','description':'Extension','isactive':True},
{'name':'Assignment','description':'Assignment','isactive':True},
]
)
op.create_table('NotificationUserTypes',
sa.Column('notificationusertypeid', sa.Integer(), autoincrement=True, nullable=False),
sa.Column('name', sa.String(length=100), nullable=False),
sa.Column('description', sa.String(length=255), nullable=True),
sa.Column('isactive', sa.Boolean(), nullable=False),
sa.PrimaryKeyConstraint('notificationusertypeid')
)
notification_usertype_table = table('NotificationUserTypes',
column('name',String),
column('description',String),
column('isactive',Boolean),
)
op.bulk_insert(
notification_usertype_table,
[
{'name':'Watcher','description':'Watcher','isactive':True},
{'name':'Assignee','description':'Assignee','isactive':True},
{'name':'Tagged User','description':'Tagged User','isactive':True}
]
)
op.create_table('FOIRequestNotifications',
sa.Column('notificationid', sa.Integer(), autoincrement=True, nullable=False),
sa.Column('notification', sa.TEXT(), nullable=True),
sa.Column('notificationtypeid', sa.Integer(), nullable=False),
sa.Column('requestid', sa.Integer(), nullable=True),
sa.Column('version', sa.Integer(), nullable=True),
sa.Column('idnumber', sa.String(length=50), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('createdby', sa.String(length=120), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('updatedby', sa.String(length=120), nullable=True),
sa.ForeignKeyConstraint(['requestid', 'version'], ['FOIMinistryRequests.foiministryrequestid', 'FOIMinistryRequests.version'], ),
sa.ForeignKeyConstraint(['notificationtypeid'], ['NotificationTypes.notificationtypeid']),
sa.PrimaryKeyConstraint('notificationid')
)
op.create_table('FOIRequestNotificationUsers',
sa.Column('notificationuserid', sa.Integer(), autoincrement=True, nullable=False),
sa.Column('notificationid', sa.Integer(), nullable=False),
sa.Column('userid', sa.String(length=100), nullable=False),
sa.Column('notificationusertypeid', sa.Integer(), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('createdby', sa.String(length=120), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('updatedby', sa.String(length=120), nullable=True),
sa.ForeignKeyConstraint(['notificationid'], ['FOIRequestNotifications.notificationid']),
sa.ForeignKeyConstraint(['notificationusertypeid'], ['NotificationUserTypes.notificationusertypeid']),
sa.PrimaryKeyConstraint('notificationuserid')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('NotificationTypes')
op.drop_table('NotificationUserTypes')
op.drop_table('FOIRequestNotificationUsers')
op.drop_table('FOIRequestNotifications')
# ### end Alembic commands ###
|
from typing import Any, Dict
import torch
from rllib.environment import SystemEnvironment
from rllib.environment.systems import InvertedPendulum
from exps.inverted_pendulum.util import PendulumReward
from curriculum_experiments.environment_parameter import ContinuousParameter
from curriculum_experiments.environment_wrapper import EnvironmentWrapper
import numpy as np
def large_state_termination(state, action, next_state=None):
"""Termination condition for environment."""
if not isinstance(state, torch.Tensor):
state = torch.tensor(state)
if not isinstance(action, torch.Tensor):
action = torch.tensor(action)
done = torch.any(torch.abs(state) > 200, dim=-1) | torch.any(
torch.abs(action) > 200, dim=-1
)
return (
torch.zeros(*done.shape, 2)
.scatter_(dim=-1, index=(~done).long().unsqueeze(-1), value=-float("inf"))
.squeeze(-1)
)
class PendulumContinuousWrapper(EnvironmentWrapper):
def __init__(self):
super().__init__()
self.name = "Pendulum-v2"
self.parameters = {
"mass": ContinuousParameter(0.1, 3),
"length": ContinuousParameter(0.1, 5),
"friction": ContinuousParameter(0.001, 0.1),
"gravity": ContinuousParameter(5, 20),
}
def create_env(self, parameter_values: Dict[str, Any]):
initial_distribution = torch.distributions.Uniform(
torch.tensor([np.pi, -0.0]), torch.tensor([np.pi, +0.0])
)
reward_model = PendulumReward(action_cost=0.1)
environment = SystemEnvironment(
InvertedPendulum(mass=parameter_values["mass"],
length=parameter_values["length"],
friction=parameter_values["friction"],
step_size=1 / 80,
gravity=parameter_values["gravity"]),
reward=reward_model,
initial_state=initial_distribution.sample,
termination_model=large_state_termination,
)
environment.reset()
return environment
|
import esphome.codegen as cg
from esphome import pins
import esphome.config_validation as cv
from esphome.const import (
CONF_DELAY,
CONF_ID,
)
AUTO_LOAD = ["sensor", "voltage_sampler"]
CODEOWNERS = ["@asoehlke"]
MULTI_CONF = True
cd74hc4067_ns = cg.esphome_ns.namespace("cd74hc4067")
CD74HC4067Component = cd74hc4067_ns.class_(
"CD74HC4067Component", cg.Component, cg.PollingComponent
)
CONF_PIN_S0 = "pin_s0"
CONF_PIN_S1 = "pin_s1"
CONF_PIN_S2 = "pin_s2"
CONF_PIN_S3 = "pin_s3"
DEFAULT_DELAY = "2ms"
CONFIG_SCHEMA = cv.Schema(
{
cv.GenerateID(): cv.declare_id(CD74HC4067Component),
cv.Required(CONF_PIN_S0): pins.internal_gpio_output_pin_schema,
cv.Required(CONF_PIN_S1): pins.internal_gpio_output_pin_schema,
cv.Required(CONF_PIN_S2): pins.internal_gpio_output_pin_schema,
cv.Required(CONF_PIN_S3): pins.internal_gpio_output_pin_schema,
cv.Optional(
CONF_DELAY, default=DEFAULT_DELAY
): cv.positive_time_period_milliseconds,
}
).extend(cv.COMPONENT_SCHEMA)
async def to_code(config):
var = cg.new_Pvariable(config[CONF_ID])
await cg.register_component(var, config)
pin_s0 = await cg.gpio_pin_expression(config[CONF_PIN_S0])
cg.add(var.set_pin_s0(pin_s0))
pin_s1 = await cg.gpio_pin_expression(config[CONF_PIN_S1])
cg.add(var.set_pin_s1(pin_s1))
pin_s2 = await cg.gpio_pin_expression(config[CONF_PIN_S2])
cg.add(var.set_pin_s2(pin_s2))
pin_s3 = await cg.gpio_pin_expression(config[CONF_PIN_S3])
cg.add(var.set_pin_s3(pin_s3))
cg.add(var.set_switch_delay(config[CONF_DELAY]))
|
# -*- coding: utf-8 -*-
import unittest
import numpy as np
from . import solve
class TestSolver(unittest.TestCase):
_FIELDS = [
[
[7, 0, 0, 3, 0, 0, 2, 0, 6],
[0, 0, 2, 0, 5, 8, 0, 0, 0],
[8, 3, 0, 0, 0, 7, 0, 4, 9],
[3, 9, 0, 0, 0, 0, 8, 5, 4],
[0, 0, 0, 7, 0, 3, 0, 0, 0],
[1, 2, 8, 0, 0, 0, 0, 6, 7],
[6, 8, 0, 5, 0, 0, 0, 2, 3],
[0, 0, 0, 8, 9, 0, 4, 0, 0],
[4, 0, 5, 0, 0, 1, 0, 0, 8],
],
[
[6, 3, 4, 0, 1, 5, 0, 0, 0],
[0, 0, 0, 6, 4, 0, 5, 0, 9],
[5, 0, 1, 2, 7, 8, 0, 0, 3],
[4, 0, 7, 3, 0, 9, 0, 8, 1],
[9, 8, 0, 4, 2, 1, 0, 5, 7],
[3, 0, 2, 8, 0, 7, 4, 9, 6],
[0, 2, 5, 0, 8, 0, 9, 0, 0],
[8, 6, 3, 0, 9, 0, 1, 7, 2],
[0, 4, 0, 0, 3, 2, 0, 6, 0],
],
[
[0, 3, 4, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 6, 0, 0, 5, 0, 9],
[5, 0, 1, 0, 7, 0, 0, 0, 3],
[4, 0, 7, 0, 0, 0, 0, 8, 1],
[9, 0, 0, 0, 2, 0, 0, 5, 7],
[3, 0, 2, 8, 0, 7, 0, 9, 6],
[0, 2, 5, 0, 8, 0, 0, 0, 0],
[8, 6, 3, 0, 9, 0, 1, 0, 2],
[0, 4, 0, 0, 3, 0, 0, 6, 0],
],
[
[0, 6, 7, 8, 0, 0, 5, 4, 0],
[2, 0, 0, 0, 3, 0, 0, 0, 7],
[0, 4, 9, 0, 7, 0, 8, 0, 0],
[0, 3, 0, 0, 0, 7, 9, 8, 4],
[0, 0, 0, 2, 0, 5, 0, 0, 0],
[7, 8, 6, 4, 0, 0, 0, 1, 0],
[0, 0, 1, 0, 5, 0, 4, 2, 0],
[8, 0, 0, 0, 4, 0, 0, 0, 3],
[0, 9, 3, 0, 0, 2, 1, 5, 0],
],
[
[0, 4, 8, 3, 0, 6, 0, 5, 0],
[0, 0, 9, 0, 2, 0, 6, 0, 8],
[0, 0, 2, 0, 1, 0, 0, 0, 7],
[2, 0, 6, 0, 3, 0, 0, 0, 5],
[0, 0, 3, 0, 0, 9, 8, 0, 0],
[8, 0, 0, 0, 7, 4, 9, 0, 2],
[5, 0, 0, 0, 8, 0, 7, 0, 0],
[9, 0, 4, 0, 6, 0, 5, 0, 0],
[0, 8, 0, 5, 0, 2, 1, 6, 0],
],
[
[4, 0, 0, 2, 0, 0, 0, 3, 0],
[0, 0, 0, 0, 0, 3, 0, 0, 4],
[0, 6, 0, 7, 0, 0, 0, 0, 9],
[0, 0, 1, 8, 5, 0, 6, 0, 0],
[0, 0, 5, 4, 0, 0, 2, 0, 0],
[0, 0, 7, 0, 1, 0, 3, 0, 0],
[1, 0, 0, 0, 0, 9, 0, 5, 0],
[3, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 7, 0, 0, 0, 4, 0, 0, 3],
],
[
[0, 0, 0, 0, 0, 7, 5, 0, 0],
[7, 0, 0, 1, 0, 0, 0, 4, 0],
[5, 0, 0, 0, 0, 0, 2, 0, 0],
[0, 0, 1, 3, 9, 0, 0, 0, 8],
[3, 0, 0, 7, 8, 6, 0, 0, 4],
[8, 0, 0, 0, 4, 1, 7, 0, 0],
[0, 0, 8, 0, 0, 0, 0, 0, 9],
[0, 5, 0, 0, 0, 3, 0, 0, 1],
[0, 0, 4, 6, 0, 0, 0, 0, 0],
],
[
[8, 0, 0, 0, 1, 0, 0, 0, 9],
[0, 5, 0, 8, 0, 7, 0, 1, 0],
[0, 0, 4, 0, 9, 0, 7, 0, 0],
[0, 6, 0, 7, 0, 1, 0, 2, 0],
[5, 0, 8, 0, 6, 0, 1, 0, 7],
[0, 1, 0, 5, 0, 2, 0, 9, 0],
[0, 0, 7, 0, 4, 0, 6, 0, 0],
[0, 8, 0, 3, 0, 9, 0, 4, 0],
[3, 0, 0, 0, 5, 0, 0, 0, 8],
],
[
[0, 0, 0, 6, 0, 4, 7, 0, 0],
[7, 0, 6, 0, 0, 0, 0, 0, 9],
[0, 0, 0, 0, 0, 5, 0, 8, 0],
[0, 7, 0, 0, 2, 0, 0, 9, 3],
[8, 0, 0, 0, 0, 0, 0, 0, 5],
[4, 3, 0, 0, 1, 0, 0, 7, 0],
[0, 5, 0, 2, 0, 0, 0, 0, 0],
[3, 0, 0, 0, 0, 0, 2, 0, 8],
[0, 0, 2, 3, 0, 1, 0, 0, 0]
],
[
[0, 3, 9, 1, 0, 0, 0, 0, 0],
[4, 0, 8, 0, 6, 0, 0, 0, 2],
[2, 0, 0, 5, 8, 0, 7, 0, 0],
[8, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 2, 0, 0, 0, 9, 0, 0, 0],
[3, 0, 6, 0, 0, 0, 0, 4, 9],
[0, 0, 0, 0, 1, 0, 0, 3, 0],
[0, 4, 0, 3, 0, 0, 0, 0, 8],
[7, 0, 0, 0, 0, 0, 4, 0, 0]
]
]
def test_solvable(self):
for f in self._FIELDS:
with self.subTest(f=f):
array = np.array(f)
solution = solve(array)
self._check_solution(solution)
def _check_solution(self, solution: np.array):
expected = set(range(1, 10))
for i_row in range(9):
self.assertSetEqual(set(solution[i_row, :]), expected)
for i_col in range(9):
self.assertSetEqual(set(solution[:, i_col]), expected)
for box_row in range(3):
for box_col in range(3):
actual = set(solution[box_row * 3:(box_row + 1) * 3, box_col * 3:(box_col + 1) * 3].reshape(-1))
self.assertSetEqual(actual, expected)
|
import logging
import traceback
import pandas as pd
from morpher.jobs import MorpherJob
"""
Select Job makes it possible to access a range of feature selection techniques with a unified interface
Upon calling .execute() you can specify the feature selection method and how many features shall be returned.
"""
class Select(MorpherJob):
def do_execute(self):
filename = self.get_input("filename")
df = pd.read_csv(filepath_or_buffer=filename)
selection_method = self.get_input_variables("selection_method")
df, imputer = self.execute(df, selection_method=selection_method)
self.add_output("filename", self.save_to_file(df))
self.add_output("cohort_id", self.get_input("cohort_id"))
self.add_output("user_id", self.get_input("user_id"))
self.logger.info("Features selected successfully.")
"""
Selects features in a given dataset using a number of predefined feature selection techniques
Params:
data the data which from which features shall be selected (Pandas DataFrame)
selection_method method to use for selection, Mutual Information, F-Test, Boruta or ElasticNet
target target variable (binary outcome)
verbose tells us whether to print additional information (default=False)
kwargs with kwargs one can provide specific instructions to any of the classifiers
"""
def execute(
self, data, selection_method=None, target=None, top=None, reverse=False, verbose=False, **kwargs
):
try:
if not data.empty:
features, labels = data.drop(target, axis=1), data[target]
if selection_method:
""" here for compatibility purposes """
if not callable(selection_method):
selection_method = self.get_callable('morpher.selectors', selection_method)
if verbose:
print(
f"Performing feature selection with {selection_method.__name__} ..."
)
n_top = top or features.shape[1]
selector = selection_method(top=n_top, **kwargs)
if verbose:
print(f"Total features prior to selection: {features.shape[1]}")
[
print(f"{feat}")
for feat in features.columns
]
selector.fit(features.to_numpy(), labels.to_numpy())
''' Empty selector will return empty list '''
if len(selector.get_indices()) > 0:
features = features.iloc[:, selector.get_indices(reverse=reverse)]
if verbose:
print(f"Total features after selection: {features.shape[1]}")
[
print(f"{feat}")
for feat in features.columns
]
data = pd.concat(
[features, labels.to_frame()], axis=1
)
else:
print("No selection method provided, features remained untouched.")
else:
raise AttributeError("No data provided")
except Exception:
print(traceback.format_exc())
logging.error(traceback.format_exc())
return data, features.columns.tolist()
|
from glob import glob
import argparse, json, re
WORD = re.compile(r'[\w\.]+')
def ttJson(name, directory=False):
if directory:
name = glob(name + "/*.txt")
else:
name = [name]
for ids, fname in enumerate(name):
with open("".join(fname.split(".")[:-1]) + ".json", 'w') as writer:
text = open(fname, 'r').read()
text = " ".join(WORD.findall(text))
out = '{ "id" : \"' + str(ids+100) + '\", "text": \"' + text + '\"}'
writer.write(out)
print "Done with converting"
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--dir", type=str, help='please type the name of the directory which have your .txt file : \
python cnv.py -d "temp dir"')
parser.add_argument("-f", "--file", type=str, help='please type the name of the file which have want to convert into json : \
python cnv.py -f "tempfile.txt"')
parser.add_argument("-c", "--clear", type=int, choices=[0,1], default=0, help="You want to delete all of the \
.txt file , 0 for yes, 1 for no")
args = parser.parse_args()
if args.dir:
ttJson(args.dir, directory=True)
if args.file:
ttJson(args.file)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
from flask import Flask
from werkzeug.serving import run_simple
from blueprints.web import web_bp
def create_app(config=False):
# send global static files to a junk /nostatic path
app = Flask(__name__, static_path='/nostatic')
# generate configuration from environmental variable
if not config:
config = os.environ.get('APP_SETTINGS', 'config.DevelopmentConfig')
app.config.from_object(config)
app.register_blueprint(web_bp)
return app
if __name__ == '__main__':
app = create_app()
run_simple('0.0.0.0', app.config.get('PORT', 8080), app,
use_reloader=app.config.get('RELOADER', True),
use_debugger=app.config.get('DEBUG', True))
|
# init file for stan fit functions
import xidplus.io as io
__author__ = 'pdh21'
import os
output_dir = os.getcwd()
full_path = os.path.realpath(__file__)
path, file = os.path.split(full_path)
stan_path=os.path.split(os.path.split(path)[0])[0]+'/stan_models/'
stan_path=os.path.split(os.path.split(path)[0])[0]+'/stan_models/'
import pystan
import pickle
import inspect
def get_stancode(model_file):
"""
Check if exisiting compiled stan code exists and matches current git version, otherwise compile and save.
:param model_file: filename of stan model
:return: compiled stan code
"""
try:
with open(output_dir+model_file+'.pkl', 'rb') as f:
# using the same model as before
obj = pickle.load(f)
sm=obj['sm']
if obj['version'] != io.git_version(): raise IOError
print("%s found. Reusing" % model_file)
except IOError as e:
print("%s not found or wrong version. Compiling" % model_file)
sm = pystan.StanModel(file=stan_path+model_file+'.stan')
# save it to the file 'model.pkl' for later use
with open(output_dir+model_file+'.pkl', 'wb') as f:
pickle.dump({'sm': sm, 'version': io.git_version()}, f)
return sm
|
# -*- coding: utf-8 -*-
# Author: github.com/madhavajay
"""Fixes pyo3 mixed modules for import in python"""
import importlib
import os
from typing import Dict, Any, List, Optional
# gets the name of the top level module / package
package_name = __name__.split(".")[0]
# convert the subdirs from "package_name" into a list of sub module names
def get_module_name_from_init_path(path: str) -> List[str]:
_, path_and_file = os.path.splitdrive(os.path.dirname(path))
module_path = path_and_file.split(package_name)[-1]
parts = module_path.split(os.path.sep)[1:]
return parts
# step through the main base module from rust at myproj.myproj and unpack each level
def unpack_module_from_parts(module: Any, module_parts: List[str]) -> Any:
for part in module_parts:
module = getattr(module, part)
return module
# take the local scope of the caller and populate it with the correct properties
def fix_imports(lcl: Dict[str, Any], init_file_path: str, debug: bool = False) -> None:
# rust library is available as package_name.package_name
import_string = f".{package_name}"
base_module = importlib.import_module(import_string, package=package_name)
module_parts = get_module_name_from_init_path(init_file_path)
submodule = unpack_module_from_parts(base_module, module_parts)
if debug:
module_path = ".".join(module_parts)
print(f"Parsed module_name: {module_path} from: {init_file_path}")
# re-export functions
keys = ["builtin_function_or_method", "module"]
for k in dir(submodule):
if type(getattr(submodule, k)).__name__ in keys:
if debug:
print(f"Loading: {submodule}.{k}")
lcl[k] = getattr(submodule, k)
# re-export a python module, class or function onto the current module level
def reexport(lcl: Dict[str, Any], obj: Any, alt_name: Optional[str] = None) -> None:
key = obj.__name__
if alt_name is not None:
key = alt_name
lcl[key] = obj
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-12-09 20:51
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('registration', '0020_auto_20171209_1510'),
]
operations = [
migrations.AlterField(
model_name='userinfo',
name='grad_year',
field=models.IntegerField(blank=True, default=None, null=True, verbose_name='graduation year'),
),
]
|
from django.urls import path
from . import views
app_name = 'modelzoo'
urlpatterns = [
path('', views.IndexView.as_view(), name='index'),
path('<slug:slug>/', views.DetailView.as_view(), name='detail'),
path('nb/<slug:slug>/', views.DetailView_nb.as_view(), name='detail_nb'),
path('api/hello/', views.hello, name='hello'),
]
|
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Find Nash equilibria for constant- or general-sum 2-player games.
Non-matrix games are handled by computing the normal (bimatrix) form.
The algorithms used are:
* direct computation of pure equilibria.
* linear programming to find equilibria for constant-sum games.
* iterated dominance to reduce the action space.
* reverse search vertex enumeration (if using lrsnash) to find all general-sum
equilibria.
* support enumeration (if using nashpy) to find all general-sum equilibria.
* Lemke-Howson enumeration (if using nashpy) to find one general-sum
equilibrium.
The general-sum mixed-equilibrium algorithms are likely to work well for tens of
actions, but less likely to scale beyond that.
"""
import fractions
import os
import subprocess
import tempfile
import warnings
import nashpy
import numpy as np
@np.vectorize
def to_fraction_str(x, lrsnash_max_denom):
return str(fractions.Fraction(x).limit_denominator(lrsnash_max_denom))
def lrs_solve(row_payoffs, col_payoffs, lrsnash_max_denom, lrsnash_path):
"""Find all Nash equilibria using the lrsnash solver.
`lrsnash` uses reverse search vertex enumeration on rational polytopes.
For more info, see: http://cgm.cs.mcgill.ca/~avis/C/lrslib/USERGUIDE.html#nash
Args:
row_payoffs: payoffs for row player
col_payoffs: payoffs for column player
lrsnash_max_denom: maximum denominator
lrsnash_path: path for temporary files
Yields:
(row_mixture, col_mixture), numpy vectors of float64s.
"""
num_rows, num_cols = row_payoffs.shape
game_file, game_file_path = tempfile.mkstemp()
try:
game_file = os.fdopen(game_file, "w")
# write dimensions
game_file.write("%d %d\n\n" % (num_rows, num_cols))
# write row-player payoff matrix as fractions
for row in range(num_rows):
game_file.write(
" ".join(to_fraction_str(row_payoffs[row], lrsnash_max_denom)) + "\n")
game_file.write("\n")
# write col-player payoff matrix as fractions
for row in range(num_rows):
game_file.write(
" ".join(to_fraction_str(col_payoffs[row], lrsnash_max_denom)) + "\n")
game_file.write("\n")
game_file.close()
lrs = subprocess.Popen([lrsnash_path or "lrsnash", "-s", game_file_path],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
col_mixtures = []
for line in lrs.stdout:
if len(line) <= 1 or line[:1] == b"*":
continue
line = np.asfarray([fractions.Fraction(x) for x in line.decode().split()])
if line[0] == 2: # col-player
col_mixtures.append(line[1:-1])
else: # row-player
row_mixture = line[1:-1]
# row-mixture forms a Nash with every col-mixture listed directly above
for col_mixture in col_mixtures:
yield (row_mixture, col_mixture)
col_mixtures = []
finally:
os.remove(game_file_path)
def lemke_howson_solve(row_payoffs, col_payoffs):
"""Find Nash equilibria using the Lemke-Howson algorithm.
The algorithm is not guaranteed to find all equilibria. Also it can yield
wrong answers if the game is degenerate (but raises warnings in that case).
Args:
row_payoffs: payoffs for row player
col_payoffs: payoffs for column player
Yields:
(row_mixture, col_mixture), numpy vectors of float64s.
"""
showwarning = warnings.showwarning
warned_degenerate = [False]
def showwarning_check_degenerate(message, *args, **kwargs):
if "Your game could be degenerate." in str(message):
warned_degenerate[0] = True
showwarning(message, *args, **kwargs)
try:
warnings.showwarning = showwarning_check_degenerate
for row_mixture, col_mixture in nashpy.Game(
row_payoffs, col_payoffs).lemke_howson_enumeration():
if warned_degenerate[0]:
# attempt to discard obviously-wrong results
if (row_mixture.shape != row_payoffs.shape[:1] or
col_mixture.shape != row_payoffs.shape[1:]):
warnings.warn("Discarding ill-shaped solution.")
continue
if (not np.isfinite(row_mixture).all() or
not np.isfinite(col_mixture).all()):
warnings.warn("Discarding non-finite solution.")
continue
yield row_mixture, col_mixture
finally:
warnings.showwarning = showwarning
|
"""tracking school
Revision ID: 9abbf14692f8
Revises: d287e378eca6
Create Date: 2022-05-16 08:00:28.857382
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '9abbf14692f8'
down_revision = 'd287e378eca6'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('school',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=400), nullable=True),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('school')
# ### end Alembic commands ###
|
from PyQt5.QtCore import QPointF
from PyQt5.QtWidgets import QInputDialog, QMessageBox
from gui.main_window.node_editor.items.connection_item import ConnectionItem
from gui.main_window.node_editor.items.node_item import NodeItem
from gui.main_window.node_editor.node_editor_event_handler import NodeEditorEventHandler
from gui.main_window.node_editor.node_editor_view import NodeEditorView
from gui.main_window.node_editor.node_sort import NodeSort
class NodeEditor:
def __init__(self, parent=None):
self.__view = NodeEditorView(self, parent)
self.__scene = self.__view.scene()
self.__networkManager = None
self.__currentPhase = ""
self.__nodes = dict()
self.__hiddenConnections = list()
self.__eventHandler = NodeEditorEventHandler(self, self.__view, self.__scene)
self.__view.setEventHandler(self.__eventHandler)
self.__scene.setEventHandler(self.__eventHandler)
self.disable = False
def getNodes(self):
""" Returns the dict of all node items, the layer ID is the key """
return self.__nodes
def setNetworkManager(self, networkManager):
""" Sets the NetworkManager """
self.__networkManager = networkManager
self.__eventHandler.setNetworkManager(networkManager)
def getNetworkManager(self):
""" Returns the NetworkManager """
return self.__networkManager
def setCopySelection(self):
self.__eventHandler.setCopySelection()
def clearAll(self):
""" Removes all items from the scene and clears the internal dicts """
self.__nodes.clear()
self.__scene.clear()
def createLayerPositionDict(self):
""" Creates and returns a dict with the layer ID as key and the layers position in the scene as value """
# create a new dict
positions = dict()
# fill dict
for layerID, nodeItem in self.__nodes.iteritems():
pos = nodeItem.scenePos()
positions[layerID] = (pos.x(), pos.y())
return positions
def getPositionOfLayer(self, id):
""" Returns the position of a node item with a given layer ID """
pos = self.__nodes[id].scenePos()
return pos.x(), pos.y()
def setPositionOfLayer(self, id, pos):
""" Sets the position of a node item with the given layer ID """
self.__nodes[id].setPos(pos[0],pos[1])
def applyLayerPositionDict(self, positionsDict):
""" Sets the positions of all node items to the positions defined in positionsDict, layerID is the key """
for layerID, pos in positionsDict.iteritems():
self.__nodes[layerID].setPos(pos[0], pos[1])
def addLayer(self, layerID, layerData, scenePosX = 0.0, scenePosY = 0.0):
""" Creates a new NodeItem to represent the layer """
# create the new node Item
nodeItem = NodeItem(layerID, layerData, self)
# set the tooltip containing the parameters to the node item
parameters = self.__networkManager.getToolTip(layerID)
nodeItem.setToolTip(parameters)
# add top connectors to the node item
if "top" in layerData["parameters"]:
for top in layerData["parameters"]["top"]:
nodeItem.addTopConnector(top)
# add bottom connectors to the node item
if "bottom" in layerData["parameters"]:
for bottom in layerData["parameters"]["bottom"]:
nodeItem.addBottomConnector(bottom)
# add the node item to the scene and the internal dict
self.__scene.addItem(nodeItem)
self.__nodes[layerID] = nodeItem
nodeItem.setPos(QPointF(scenePosX, scenePosY))
def setKeyboardFocus(self):
self.__view.setFocus()
def focusLayer(self, layerID):
""" Focuses the viewport to the node item with the given layer ID """
position = self.__nodes[layerID].pos()
self.__view.centerOn(position)
def addBottomBlob(self, layerID, blobName):
""" Adds a bottom connector to the node item with the given layer ID """
nodeItem = self.__nodes[layerID]
nodeItem.addBottomConnector(blobName)
def removeBottomBlob(self, layerID, blobIndex):
""" Removes a bottom connector from the node item with the given layer ID """
self.__nodes[layerID].removeBottomConnector(blobIndex)
# Update the tooltip as well
self.updateTooltip(layerID)
def addTopBlob(self, layerID, blobName):
""" Adds a top connector to the node item with the given layer ID """
nodeItem = self.__nodes[layerID]
nodeItem.addTopConnector(blobName)
bottomConnectors = self.__nodes[layerID].getBottomConnectors()
for bottomConnector in bottomConnectors:
connections = bottomConnector.getConnections()
for connection in connections:
connection.updateData()
# Update the tooltip as well
self.updateTooltip(layerID)
def renameTopBlob(self, layerID, blobIndex, newName):
""" Renames a top connector and changes the name of all connected bottom connectors """
# update the top connectors name
connector = self.__nodes[layerID].getTopConnectors()[blobIndex]
connector.setBlobName(newName)
self.__nodes[layerID].updateLayerData()
# get all connected connectors and change their name
connections = connector.getConnections()
for connection in connections:
bottomConnector = connection.getBottomConnector()
self.__networkManager.renameBottomBlob(bottomConnector.getNodeItem().getLayerID(),
bottomConnector.getIndex(),
newName)
connection.updateData()
# update all connections
bottomConnectors = self.__nodes[layerID].getBottomConnectors()
for bottomConnector in bottomConnectors:
connections = bottomConnector.getConnections()
for connection in connections:
connection.updateData()
# Update the tooltip as well
self.updateTooltip(layerID)
def renameBottomBlob(self, layerID, blobIndex, newName):
""" Renames a bottom connector and removes all connections getting invalid by this operation """
# rename the bottom connector
bottomConnector = self.__nodes[layerID].getBottomConnectors()[blobIndex]
bottomConnector.setBlobName(newName)
self.__nodes[layerID].updateLayerData()
# get all top connectors and check if they have the same name. If not -> remove connection
connections = list(bottomConnector.getConnections())
for connection in connections:
topConnector = connection.getTopConnector()
if topConnector.getBlobName() != newName:
self.removeConnection(connection)
# Update the tooltip as well
self.updateTooltip(layerID)
def removeTopBlob(self, layerID, blobIndex):
""" Removes a top connector from the node item with the given layer ID """
self.__nodes[layerID].removeTopConnector(blobIndex)
# Update the tooltip as well
self.updateTooltip(layerID)
def disconnectBottomBlob(self, layerID, blobIndex):
""" Removes all connections connected to the bottom connector with the given index
of the node item with the given layer ID """
connector = self.__nodes[layerID].getBottomConnectors()[blobIndex]
self.__disconnectAllConnectorConnections(connector)
# Update the tooltip as well
self.updateTooltip(layerID)
def disconnectTopBlob(self, layerID, blobIndex):
""" Removes all connections connected to the top connector with the given index
of the node item with the given layer ID """
connector = self.__nodes[layerID].getTopConnectors()[blobIndex]
self.__disconnectAllConnectorConnections(connector)
# Update the tooltip as well
self.updateTooltip(layerID)
def createConnection(self, topLayerID, topBlobIndex, bottomLayerID, bottomBlobIndex):
""" Creates a new connection between the top connector at topBlobIndex of the node item with the topLayerID
and the bottom connector at bottomBlobIndex of the node item with the bottomLayerID """
# get the connectors
topConnector = self.__nodes[topLayerID].getTopConnectors()[topBlobIndex]
bottomConnector = self.__nodes[bottomLayerID].getBottomConnectors()[bottomBlobIndex]
# create a new connection item and add it to the scene
connection = ConnectionItem(self)
self.__scene.addItem(connection)
# set the connection/connectors in the connection/connectors
connection.setTopConnector(topConnector)
connection.setBottomConnector(bottomConnector)
bottomConnector.addConnection(connection)
topConnector.addConnection(connection)
def rearrangeNodes(self):
""" Automatically sorts the node items """
nodesort = NodeSort(self.getNodes(), self.__view)
self.__networkManager.checkLayersForMovement(self.getNodeIds())
def rearrangeNodesVertical(self):
""" Automatically sorts the node items Vertical """
nodesort = NodeSort(self.getNodes(), self.__view, True)
self.__networkManager.checkLayersForMovement(self.getNodeIds())
def clearSelection(self):
""" Clears the selection """
self.__scene.clearSelection()
def setSelectedLayer(self, layerID):
""" Sets a single layer as selected. Already selected layers get deselected. """
# remove all node items from the selection
self.clearSelection()
# get the node item with layer ID and set it as selected
for item in self.__scene.items():
if isinstance(item, NodeItem):
if item.getLayerID() == layerID:
item.setSelected(True)
break
def setSelectedLayers(self, layerIDs):
""" Sets a list of layers as selected. Alread selected layers get deselected. """
# remove all node items from the selection
self.clearSelection()
# get all items in the scene
layerList = list(layerIDs)
items = self.__scene.items()
# check all items and set the as selected if they are in the given list
while(len(layerList) > 0):
for item in items:
if isinstance(item, NodeItem):
if item.getLayerID() == layerList[0]:
item.setSelected(True)
break
del layerList[0]
def addLayerToSelection(self, layerID):
""" Selects a node item without clearing other layers selection """
items = self.__scene.items()
for item in items:
if isinstance(item, NodeItem):
if item.getLayerID() == layerID:
item.setSelected(True)
break
def removeLayerFromSelection(self, layerID):
""" Deselects a node item without clearing other layers selection """
selectedItems = self.__scene.selectedItems()
for item in selectedItems:
if isinstance(item, NodeItem):
if item.getLayerID() == layerID:
item.setSelected(False)
break
def updateLayerData(self, layerID):
""" Notifies a node item to update the layer data to render correctly after change """
self.__nodes[layerID].updateLayerData()
# Update the tooltip as well
self.updateTooltip(layerID)
def disconnectConnectionsOfSelection(self):
""" Removes all connections from the selection and all connections connecting any selected node item """
# get all selected items
selectedItems = self.__scene.selectedItems()
# remove all selected connections
for item in selectedItems:
if isinstance(item, ConnectionItem):
self.removeConnection(item)
# remove all connections connected to selected layers
for item in selectedItems:
if isinstance(item, NodeItem):
self.__disconnectAllLayerConnections(item)
def __disconnectAllLayerConnections(self, nodeItem):
""" Removes all connections connected to any top or bottom connector of a given node item """
# remove connections of top connectors
for topConnector in nodeItem.getTopConnectors():
self.__disconnectAllConnectorConnections(topConnector)
# remove connections of bottom connectors
for bottomConnector in nodeItem.getBottomConnectors():
self.__disconnectAllConnectorConnections(bottomConnector)
def __disconnectAllConnectorConnections(self, connector):
""" Removes all connections connected to a connector """
connections = list(connector.getConnections())
for connection in connections:
self.removeConnection(connection)
def removeConnection(self, connection):
""" Removes a connection """
# get all selected items
selectedItems = self.__scene.selectedItems()
# remove connection from connectors
topConnector = connection.getTopConnector()
bottomConnector = connection.getBottomConnector()
topConnector.removeConnection(connection)
bottomConnector.removeConnection(connection)
# get layer ID and connector index to notify data change
bottomLayerID = bottomConnector.getNodeItem().getLayerID()
bottomBlobIndex = bottomConnector.getIndex()
# if the bottom connector has no connections left and is not in the selection,
# remove the underling data relation of top-bottom blob
if bottomConnector.getConnectionCount() == 0 and bottomConnector.getNodeItem() not in selectedItems:
self.__networkManager.disconnectLayer(bottomLayerID, bottomBlobIndex)
# remove the connection item from the scene
self.__scene.removeItem(connection)
def deleteItemsByID(self, list):
""" Removes node items from the scene based on the list of layer IDs """
for layerID in list:
items = self.__scene.items()
for item in items:
if isinstance(item, NodeItem):
if item.getLayerID() == layerID:
self.__scene.removeItem(item)
index = self.__nodes.values().index(item)
key = self.__nodes.keys()[index]
del self.__nodes[key]
def updateLayerName(self, layerID, name):
""" Notify the node item to update the rendering. """
self.__nodes[layerID].updateLayerData()
# Update the tooltip as well
self.updateTooltip(layerID)
def updateTooltip(self, layerID):
""" Updates the tooltip of the node item with the given layer ID """
parameters = self.__networkManager.getToolTip(layerID)
self.__nodes[layerID].setToolTip(parameters)
def createListFromHiddenConnections(self):
""" Creates a list of tuples containing the hidden connections, the tuples have the form
(topLayerID, topBlobIndex, bottomLayerID, bottomBlobIndex) """
outList = list()
for connection in self.__hiddenConnections:
topConnector = connection.getTopConnector()
bottomConnector = connection.getBottomConnector()
outList.append((topConnector.getNodeItem().getLayerID(), topConnector.getIndex(),
bottomConnector.getNodeItem().getLayerID(), bottomConnector.getIndex()))
return outList
def setHiddenConnectionsFromList(self, hiddenList):
""" Sets all connections to show and then hides the connections specified in hiddenList,
hiddenList contains tuples (topLayerID, topBlobIndex, bottomLayerID, bottomBlobIndex) """
self.__hiddenConnections = []
for data in hiddenList:
connection = self.__getConnection(data[0], data[1], data[2], data[3])
if connection is not None:
connection.setHidden(True)
self.__hiddenConnections.append(connection)
def tryToAddHiddenConnection(self, connection):
""" Called by connections to notify a hidden state change. """
if connection not in self.__hiddenConnections:
self.__hiddenConnections.append(connection)
self.__networkManager.connectionsHiddenStateChanged(self.createListFromHiddenConnections())
def tryToRemoveHiddenConnection(self, connection):
""" Called by connections to notify a hidden state change. """
if connection in self.__hiddenConnections:
self.__hiddenConnections.remove(connection)
self.__networkManager.connectionsHiddenStateChanged(self.createListFromHiddenConnections())
def tryToCreateLayer(self, type, scenePosX, scenePosY):
""" Notifies the NetworkManager to create a new Layer. """
self.__networkManager.addLayer(type, scenePosX, scenePosY)
def tryToAddBottomBlob(self, layerID, blobName):
""" Notifies the NetworkManager to create a new bottom connector for the given layer. """
self.__networkManager.addBottomBlob(layerID, blobName)
def tryToRemoveBottomBlob(self, layerID, blobIndex):
""" Notifies the NetworkManager to try to remove the bottom connector
with the given index in the given layer. """
self.__networkManager.removeBottomBlob(layerID, blobIndex)
def getNode(self, layerID):
return self.__nodes[layerID]
def tryToAddTopBlob(self, layerID, blobName):
""" Notifies the NetworkManager to add a top connector to the layer with the given name. """
self.__networkManager.addTopBlob(layerID, blobName)
def tryToRenameTopBlob(self, connector):
""" Shows a input dialog to ask for a new blob name
and notifies the NetworkManager to change the blobs name. """
# show input dialog
name, ok = QInputDialog.getText(self.__view, "Change name of top blob", "Top blob name:")
name = name.strip()
# if the name is invalid ask again
duplicateFound = True
while ok:
if len(name) == 0:
QMessageBox.warning(self.__view, "Top blob name is empty",
"The name for the top blob can't be empty or all whitespace characters.")
name, ok = QInputDialog.getText(self.__view, "Change name of top blob", "Top blob name:")
name = name.strip()
continue
elif duplicateFound:
for blob in connector.getNodeItem().getTopConnectors():
if name == blob.getBlobName() and blob != connector:
QMessageBox.warning(self.__view, "Top blob name already exists",
"The name you chose already exists on this Layer, please choose another name.")
name, ok = QInputDialog.getText(self.__view, "Change name of top blob", "Top blob name:")
name = name.strip()
duplicateFound = True
break # stop for loop, continue while loop
else:
duplicateFound = False
else:
break # if string is valid
# the user provided a valid name
if ok:
# check if any bottom connector has multiple connections as a bottom connector can only have one name
bottomConnectorWithMultipleConnectionsFound = False
connections = connector.getConnections()
for connection in connections:
bottomConnector = connection.getBottomConnector()
if bottomConnector.getConnectionCount() > 1:
bottomConnectorWithMultipleConnectionsFound = True
# ask the user whether they want to continue (break invalid connections) ore cancel
shouldRename = True
if bottomConnectorWithMultipleConnectionsFound:
reply = QMessageBox.question(self.__view, "Removing connections",
"A bottom blob, connected to the top blob, has multiple connections. "
"By renaming the top blob, the other connections will get removed. "
"Continue?", QMessageBox.Yes, QMessageBox.No)
if reply != QMessageBox.Yes:
shouldRename = False
# if the user did not cancel -> notify the Networkmanager
if shouldRename:
self.__networkManager.renameTopBlob(connector.getNodeItem().getLayerID(), connector.getIndex(), name)
def tryToRemoveTopBlob(self, layerID, blobIndex):
""" Notifies the NetworkManager to remove the bottom connector with the given index of the given layer """
self.__networkManager.removeTopBlob(layerID, blobIndex)
def tryToClearSelection(self):
""" Notifies the NetworkManager to clear the selection """
self.__networkManager.clearSelectionWithoutSavingHistory()
def tryToSetSelectionList(self, layerIDs):
""" Notifies the NetworkManager to update the selected layer list """
self.__networkManager.setSelectionList(layerIDs)
def tryToDeleteSelection(self):
""" Notifies the NetworkManager to remove all selected layers """
self.__networkManager.deleteSelectedLayers()
def tryToConnect(self, topLayerID, topBlobIndex, bottomLayerID, bottomBlobIndex):
""" Checks wheter the new connection would be valid and
notifies the NetworkManager to create the new connection """
topConnector = self.__nodes[topLayerID].getTopConnectors()[topBlobIndex]
bottomConnector = self.__nodes[bottomLayerID].getBottomConnectors()[bottomBlobIndex]
alreadyConnectedBottomLayers = topConnector.getConnectedLayers()
# check id the new connections is between two connectors of the same layer7
if topConnector.getNodeItem() == bottomConnector.getNodeItem():
return False
# check if connection between both connectors exists
if bottomLayerID in alreadyConnectedBottomLayers:
return False
# check if both layers have the same phase (if both are not in all phases)
if topConnector.getPhase() != "" and bottomConnector.getPhase() != "":
if topConnector.getPhase() != bottomConnector.getPhase():
return False
# if the top layer is in all phases, the bottom layer is not allowed to have any connection at the connector
if topConnector.getPhase() == "":
if bottomConnector.getConnectionCount() > 0:
return False
# if the top layer is not in all phases, check if the
# bottom layer connector has another connection in the same phase
else:
if bottomConnector.hasPhaseConnection(topConnector.getPhase()):
return False
elif bottomConnector.getBlobName() != "":
if topConnector.getBlobName() != bottomConnector.getBlobName():
return False
if topConnector.isInPlace():
if topConnector.hasPhaseConnection(bottomConnector.getPhase()):
return False
# layers can get connected
self.__networkManager.connectLayers(topLayerID, topBlobIndex, bottomLayerID, bottomBlobIndex)
return True
def isCurrentPhase(self, phase):
""" Returns whether the phase is equals the current displayed phase. """
if len(phase) == 0 or self.__currentPhase == phase:
return True
else:
return False
def __getConnection(self, topID, topIndex, bottomID, bottomIndex):
""" Returns the connection between the layers with the IDs topID and bottomID,
connected at the connectors with the indices topIndex and bottomIndex """
topConnector = self.__nodes[topID].getTopConnectors()[topIndex]
connections = topConnector.getConnections()
for connection in connections:
bottomConnector = connection.getBottomConnector()
if bottomConnector.getNodeItem().getLayerID() == bottomID and bottomConnector.getIndex() == bottomIndex:
return connection
return None
def calculateLayerOrder(self):
""" Returns a list of layer ID representing the order of connection """
order = list()
# get a list of all node items with no bottom connectors
# or with no connections connected to any bottom connector
currentNodes = self.__getStartNodes()
waitingNodes = dict()
# unused
touched = {}
# repeat until all nodes have been processed
while len(currentNodes) > 0:
currentNode = currentNodes[0]
touched[currentNode.getLayerID()] = currentNode.getName()
# add the current node to the order list
order.append(currentNode.getLayerID())
# get a list of all nodes connected to any top connector of the current node
followingNodes = currentNode.getNodesConnectedToTops()
for node in followingNodes:
touched[node.getLayerID()] = node.getName()
# for all following nodes, check if they are waiting (not all node items connected to bottoms
# have been processed) and decrees the waiting count. If all needed nodes have benn processed,
# add the node to the processable nodes
for following in followingNodes:
# the layer has already been processed
if following.getLayerID() in order:
continue
# layer has other unprocessed pre nodes
if following in waitingNodes:
if waitingNodes[following] > 1:
waitingNodes[following] -= 1
else:
currentNodes.append(following)
del waitingNodes[following]
else:
inputNodeCount = len(following.getNodesConnectedToBottoms())
if inputNodeCount > 1:
waitingNodes[following] = inputNodeCount - 1
else:
currentNodes.append(following)
currentNodes.remove(currentNode)
return order
def getView(self):
return self.__view
def getScene(self):
return self.__view.scene()
def __getStartNodes(self):
""" Returns a list of all node items with no bottom connectors
or no connections connected to bottom connectors. """
startNodes = list()
for layerID, item in self.__nodes.iteritems():
if item.getBottomConnectorCount() == 0:
startNodes.append(item)
else:
connected = False
for connector in item.getBottomConnectors():
if connector.getConnectionCount() > 0:
connected = True
break
if not connected:
startNodes.append(item)
return startNodes
def getNodeIds(self):
""" Returns a list of all layer IDs of nodes currently in the scene. """
return self.__nodes.keys()
def disableEditing(self, disable):
""" Disable relevant changes of the net. """
self.disable = disable
self.__eventHandler.setDisabled(disable)
self.__view.scene().disableEditing(disable)
for _, node in self.getNodes().iteritems():
node.disableBlobs(disable)
|
from django.conf.urls import url
from . import views
app_name = 'campaigns'
urlpatterns = [
url( r'^$', views.JsonView.response, name='index' ),
url( r'^(?P<app_code>[a-zA-Z0-9]+)/info/$', views.info, name='info' ),
url( r'^(?P<app_code>[a-zA-Z0-9]+)/services/$', views.services, name='services' ),
]
|
from django.urls import path
from .views import home, CityDetailView, CityCreateView, CityUpdateView, CityDeleteView
urlpatterns = [
path("detail/<int:pk>/", CityDetailView.as_view(), name="detail"),
path("add/", CityCreateView.as_view(), name="add"),
path("update/<int:pk>/", CityUpdateView.as_view(), name="update"),
path("delete/<int:pk>/", CityDeleteView.as_view(), name="delete"),
path("", home, name="home"),
]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Author : mh
import time
import os
def change_name(path):
if not os.path.isdir(path) and not os.path.isfile(path):
print("路径错误")
return
if os.path.isfile(path):
# 分割出目录与文件
file_path = os.path.split(path)
# 分割出文件与文件扩展名
lists = file_path[1].split('.')
# # 文件名
fileName = lists[0]
# 后缀
file_ext = lists[-1]
img_ext = ['jpeg', 'png', 'jpg']
print(lists[0])
# fileNmewName = fileName.replace('bz_2_2_o_ut', 'bz_3_3_p_u_t')
# os.rename(path, file_path[0] + '/' + fileNmewName + '.' + file_ext)
if file_ext in img_ext:
fileNmewName = fileName.replace('_b_z_n_2_89_io_yu_7_', '_b_z_3_n_34_sfo_p_x_')
os.rename(path, file_path[0] + '/' + fileNmewName + '.' + file_ext)
elif os.path.isdir(path):
for x in os.listdir(path):
# os.path.join()在路径处理上很有用
change_name(os.path.join(path, x))
def main():
img_dir = '/Users/mahao/Desktop/images'
start = time.time()
change_name(img_dir)
c = time.time() - start
print('程序运行耗时:%0.2f' % (c))
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
import time
import datetime
from openerp import models,fields,api
from openerp.tools.translate import _
from math import *
from openerp.exceptions import Warning
class sale_order(models.Model):
_inherit = "sale.order"
@api.model
def _get_default_location(self):
company_id = self.env.user.company_id.id
warehouse_obj = self.env['stock.warehouse']
warehouse_id = warehouse_obj.search([('company_id','=',company_id)])
location = warehouse_id.out_type_id and warehouse_id.out_type_id.default_location_src_id
return location and location or False
@api.depends('order_line')
def _compute(self):
for obj in self:
obj.is_nb_lignes=len(obj.order_line)
is_type_commande = fields.Selection([
('standard', 'Ferme'),
('ouverte' , 'Ouverte'),
('cadence' , 'Cadencé'),
('ls' , 'Liste à servir'),
('proforma', 'PROFORMA'),
], "Type de commande")
is_article_commande_id = fields.Many2one('product.product', 'Article de la commande', help="Article pour les commandes ouvertes")
is_ref_client = fields.Char("Référence client", store=True, compute='_ref_client')
is_source_location_id = fields.Many2one('stock.location', 'Source Location', default=_get_default_location)
is_transporteur_id = fields.Many2one('res.partner', 'Transporteur')
is_liste_servir_id = fields.Many2one('is.liste.servir', 'Liste à servir')
is_info_client = fields.Text("Information client complèmentaire")
is_nb_lignes = fields.Integer("Nb lignes", store=True, compute='_compute')
is_date_envoi_mail = fields.Datetime("Mail envoyé le", readonly=False)
is_incoterm = fields.Many2one('stock.incoterms', 'Incoterm / Conditions de livraison', related='partner_id.is_incoterm', readonly=True)
is_lieu = fields.Char('Lieu', related='partner_id.is_lieu', readonly=True)
is_ar_commentaire = fields.Text("Commentaire AR de commande")
is_message = fields.Text("Message", compute='_compute_message')
is_ar_contact_id = fields.Many2many('res.partner', 'is_sale_ar_contact_id_rel', 'partner_id', 'contact_id', 'Destinataire AR de commande')
_defaults = {
'is_type_commande': 'standard',
}
@api.depends('partner_id')
def _compute_message(self):
for obj in self:
#** Recherche messages *********************************************
messages = ''
where=['|',('name','=',obj.partner_id.id),('name','=',False)]
for row in self.env['is.vente.message'].search(where):
messages += row.message + '\n'
#*******************************************************************
obj.is_message = messages
@api.multi
def onchange_client_order_ref(self, client_order_ref,partner_id):
warning = {}
if client_order_ref and partner_id:
orders = self.env['sale.order'].search([('client_order_ref','=',client_order_ref),('partner_id','=',partner_id)],limit=1)
if orders:
warning = {
'title': _('Warning!'),
'message' : u"La commande "+orders[0].name+u" a déjà ce même numéro de commande client !"
}
return {
'value' : {},
'warning': warning,
}
@api.model
def fields_view_get(self, view_id=None, view_type='form', toolbar=False,submenu=False):
res = super(sale_order, self).fields_view_get(view_id=view_id, view_type=view_type, toolbar=toolbar,submenu=submenu)
#** Suppression des rapports indiqués **********************************
indexes=[]
for idx, print_submenu in enumerate(res.get('toolbar', {}).get('print', [])):
if print_submenu['display_name'] in ["Devis / Commande"]:
indexes.append(idx)
for idx in reversed(indexes):
res['toolbar']['print'].pop(idx)
#***********************************************************************
return res
@api.multi
def envoyer_ar_par_mail(self):
cr , uid, context = self.env.args
modele_mail = u"""
<html>
<head>
<meta content="text/html; charset=UTF-8" http-equiv="Content-Type">
</head>
<body>
<font>Bonjour, </font>
<br><br>
<font> Veuillez trouver ci-joint notre AR de commande.</font>
<br><br>
[commentaire]
Cordialement <br><br>
[from]<br>
</body>
</html>
"""
for obj in self:
mails=[]
if obj.is_ar_contact_id:
for c in obj.is_ar_contact_id:
mail = c.name + u' <' + c.email + u'>'
mails.append(mail)
else:
for c in obj.partner_id.child_ids:
if c.is_type_contact.name == 'Approvisionneur':
mail = c.name + u' <' + c.email + u'>'
mails.append(mail)
if not mails:
raise Warning(u"Aucun mail de type 'Approvisionneur' pour ce client !")
email_contact = ','.join(mails)
user = self.env['res.users'].browse(uid)
email = user.email
nom = user.name
if email==False:
raise Warning(u"Votre mail n'est pas renseigné !")
#** Génération du PDF **********************************************
name=u'ar_commande-' + obj.client_order_ref + u'.pdf'
pdf = self.env['report'].get_pdf(obj, 'is_plastigray.report_ar_commande')
#*******************************************************************
# ** Recherche si une pièce jointe est déja associèe ***************
model=self._name
attachment_obj = self.env['ir.attachment']
attachments = attachment_obj.search([('res_model','=',model),('res_id','=',obj.id),('name','=',name)])
# ******************************************************************
# ** Creation ou modification de la pièce jointe *******************
vals = {
'name': name,
'datas_fname': name,
'type': 'binary',
'res_model': model,
'res_id': obj.id,
'datas': pdf.encode('base64'),
}
attachment_id=False
if attachments:
for attachment in attachments:
attachment.write(vals)
attachment_id=attachment.id
else:
attachment = attachment_obj.create(vals)
attachment_id=attachment.id
#*******************************************************************
email_cc = nom + u' <'+email+u'>'
email_from = email_cc
subject = u'AR de commande Plastigray ' + obj.client_order_ref + ' pour ' + obj.partner_id.name
#subject = u'AR de commande Plastigray '+obj.client_order_ref+' pour '+obj.partner_id.name+u' (to='+email_contact+u')'
email_to = email_contact
#email_to = email_cc
body_html = modele_mail.replace('[from]', user.name)
if obj.is_ar_commentaire:
commentaire = obj.is_ar_commentaire.replace('\n', '<br>') + '<br><br>'
else:
commentaire = ''
if obj.is_message:
commentaire += obj.is_message.replace('\n', '<br>') + '<br><br>'
body_html = body_html.replace('[commentaire]', commentaire)
email_vals = {
'subject' : subject,
'email_to' : email_to,
'email_cc' : email_cc,
'email_from' : email_from,
'body_html' : body_html.encode('utf-8'),
'attachment_ids': [(6, 0, [attachment_id])]
}
email_id = self.env['mail.mail'].create(email_vals)
if email_id:
self.env['mail.mail'].send(email_id)
obj.message_post(u'Commande envoyée par mail à '+ email_contact)
obj.is_date_envoi_mail=datetime.datetime.now()
@api.multi
def actualiser_prix_commande(self):
for obj in self:
for line in obj.order_line:
res=line.onchange_date_livraison(
line.is_date_livraison,
line.product_id.id,
line.product_uom_qty,
line.product_uom.id,
obj.partner_id.id,
obj.pricelist_id.id,
obj.company_id.id,
obj.id)
price=res['value']['price_unit']
if price:
line.price_unit=price
@api.multi
def numeroter_lignes(self):
for obj in self:
lines = self.env['sale.order.line'].search([('order_id','=',obj.id)],order="is_date_expedition")
sequence=0
for line in lines:
sequence=sequence+1
line.sequence=sequence
def onchange_partner_id(self, cr, uid, ids, partner_id, context=None):
res = super(sale_order, self).onchange_partner_id(cr, uid, ids, partner_id, context=context)
if partner_id:
partner = self.pool.get('res.partner').browse(cr, uid, partner_id, context=context)
if partner.is_adr_facturation:
res['value'].update({'partner_invoice_id': partner.is_adr_facturation.id })
if partner.is_source_location_id:
res['value'].update({'is_source_location_id': partner.is_source_location_id.id })
if partner.is_transporteur_id:
res['value'].update({'is_transporteur_id': partner.is_transporteur_id.id })
return res
@api.depends('is_article_commande_id', 'is_article_commande_id.is_ref_client', 'is_article_commande_id.product_tmpl_id.is_ref_client')
def _ref_client(self):
for order in self:
if order.is_article_commande_id:
order.is_ref_client = order.is_article_commande_id.is_ref_client
def onchange_order_line(self, cr, uid, ids, type_commande, order_line, context=None):
value = {}
if len(order_line)>1:
value.update({'is_type_commande_ro': True})
else:
value.update({'is_type_commande_ro': False})
return {'value': value}
@api.multi
def action_acceder_client(self):
dummy, view_id = self.env['ir.model.data'].get_object_reference('base', 'view_partner_form')
for obj in self:
return {
'name': "Client",
'view_mode': 'form',
'view_id': view_id,
'view_type': 'form',
'res_model': 'res.partner',
'type': 'ir.actions.act_window',
'res_id': obj.partner_id.id,
'domain': '[]',
}
@api.multi
def action_acceder_commande(self):
for obj in self:
return {
'name': "Commande",
'view_mode': 'form',
'view_type': 'form',
'res_model': 'sale.order',
'type': 'ir.actions.act_window',
'res_id': obj.id,
'domain': '[]',
}
@api.multi
def _verif_tarif(self,vals):
if 'is_type_commande' in vals and 'is_article_commande_id' in vals and 'pricelist_id' in vals :
if vals['is_type_commande']=='ouverte':
product_id=vals['is_article_commande_id']
product = self.env['product.product'].browse([product_id])
partner = self.env['res.partner'].browse(vals['partner_id'])
pricelist_id=vals['pricelist_id']
context={}
if pricelist_id:
pricelist=self.env['product.pricelist'].browse(pricelist_id)
qty = self.env['product.template'].get_lot_livraison(product.product_tmpl_id, partner)
#date = time.strftime('%Y-%m-%d')
date = vals['date_order']
ctx = dict(
context,
uom=product.uom_id.id,
date=date,
)
price = self.pool.get('product.pricelist').price_get(self._cr, self._uid, pricelist_id,
product_id, qty, vals['partner_id'], ctx)[pricelist_id]
if price is False:
raise Warning("Il n'existe pas de tarif (liste de prix) pour l'article '"+str(product.is_code)+"' / qt="+str(qty)+ " / date="+str(date))
@api.multi
def _verif_existe(self,vals):
if 'is_article_commande_id' in vals:
r=self.env['sale.order'].search([
['partner_id' , '=', vals['partner_id']],
['is_article_commande_id', '=', vals['is_article_commande_id']],
['is_type_commande' , '=', vals['is_type_commande']],
['state' , '=', 'draft'],
['is_type_commande' , '=', 'ouverte'],
])
if len(r)>1 :
raise Warning(u"Il exite déjà une commande ouverte pour cet article et ce client")
@api.multi
def _client_order_ref(self, obj):
if obj.is_type_commande!='ls':
for line in obj.order_line:
line.is_client_order_ref=obj.client_order_ref
@api.multi
def _verif_article_livrable(self, obj):
for line in obj.order_line:
ok=False
for l in line.product_id.is_client_ids:
if l.client_id.id==obj.partner_id.id:
ok=True
if ok==False:
raise Warning(u"L'article "+line.product_id.is_code+u" n'est pas livrable à ce client (cf fiche article) !")
@api.model
def create(self, vals):
self._verif_tarif(vals)
obj = super(sale_order, self).create(vals)
self._verif_existe(vals)
self._client_order_ref(obj)
self._verif_article_livrable(obj)
return obj
@api.multi
def write(self,vals):
res=super(sale_order, self).write(vals)
for obj in self:
vals2={
'is_type_commande' : obj.is_type_commande,
'is_article_commande_id' : obj.is_article_commande_id.id,
'pricelist_id' : obj.pricelist_id.id,
'partner_id' : obj.partner_id.id,
'partner_invoice_id' : obj.partner_invoice_id.id,
'date_order' : obj.date_order,
}
self._verif_tarif(vals2)
self._verif_existe(vals2)
self._client_order_ref(obj)
self._verif_article_livrable(obj)
return res
class sale_order_line(models.Model):
_inherit = "sale.order.line"
_order = 'order_id desc, sequence, is_date_livraison, id'
#TODO : J'ai du surcharger ce champ pour ajouter un index (select=true)
product_id = fields.Many2one('product.product', 'Product', domain=[('sale_ok', '=', True)], change_default=True, readonly=True, states={'draft': [('readonly', False)]}, ondelete='restrict',select=True)
is_justification = fields.Char("Justif.", help="Ce champ est obligatoire si l'article n'est pas renseigné ou le prix à 0")
is_date_livraison = fields.Date("Date de liv.",select=True)
is_date_expedition = fields.Date("Date d'expé.", store=True, compute='_date_expedition',select=True)
is_type_commande = fields.Selection([('ferme', 'Ferme'),('previsionnel', 'Prév.')], "Type",select=True)
is_client_order_ref = fields.Char("Commande client")
is_ref_client = fields.Char('Réf. client', related='product_id.is_ref_client', readonly=True)
is_date_confirmation = fields.Date("Date de confirmation")
is_comment_confirm = fields.Char("Commentaire de confirmation")
is_ref_plan = fields.Char("Réf. plan", related='product_id.is_ref_plan', readonly=True)
is_ind_plan = fields.Char("Indice plan", related='product_id.is_ind_plan', readonly=True)
# Le 22/06/2020 à 12:02, Caroline CHEVALLIER a écrit :
# Date de livraison - délai de transport = date d'expédition.
# Il faut ensuite comparer la date d'expédition au calendrier usine.
# Si la la date d'expédition se situe un jour où l'entreprise est fermée, il faut ramener la date d'expédition au 1er jour ouvré.
# J'ai aussi une autre demande qui concerne le calendrier usine : nous avons besoin de 2 calendriers usine : un calendrier pour la production
# et qui sert au calcul de besoin et un calendrier pour les expéditions qui sert au calcul de la date d'expédition.
# La plupart du temps, ces 2 calendriers sont identiques mais en période estivale, ces calendriers sont différents
# car nous avons besoin d'avoir une permanence logistique et nous avons besoin de pouvoir expédier des produits.
# Date de livraison = 31/07/2020
# Nous avons fermé le calendrier de la société les 29/30/31 juillet :
# le temps de transport est inchangé : 2 jours : le 29/07 étant fermé,
# le système devrait positionné la commande au 28/07 : premier jour ouvert dans le calendrier.
@api.depends('is_date_livraison')
def _date_expedition(self):
for order in self:
if order.is_date_livraison:
cr = self._cr
uid = self._uid
context = self._context
res_partner = self.env['res.partner']
delai_transport = order.order_id.partner_id.is_delai_transport
date_expedition = order.is_date_livraison
date_expedition = datetime.datetime.strptime(date_expedition, '%Y-%m-%d')
#Delai de transport en jour ouvrés (sans samedi et dimanches)
if delai_transport:
while delai_transport>0:
date_expedition = date_expedition - datetime.timedelta(days=1)
weekday = date_expedition.weekday()
if weekday not in [5,6]:
delai_transport = delai_transport - 1
# jours de fermeture de la société
jours_fermes = res_partner.num_closing_days(order.order_id.company_id.is_calendrier_expedition_id)
# Jours de congé de la société
leave_dates = res_partner.get_leave_dates(order.order_id.company_id.is_calendrier_expedition_id)
#date_expedition = date_expedition - datetime.timedelta(days=delai_transport)
new_date = date_expedition
while True:
date_txt=new_date.strftime('%Y-%m-%d')
num_day = int(time.strftime('%w', time.strptime( date_txt, '%Y-%m-%d')))
if (num_day in jours_fermes or date_txt in leave_dates):
new_date = new_date - datetime.timedelta(days=1)
else:
break
date_expedition = new_date.strftime('%Y-%m-%d')
order.is_date_expedition=date_expedition
@api.multi
def action_acceder_commande(self):
dummy, view_id = self.env['ir.model.data'].get_object_reference('sale', 'view_order_form')
for obj in self:
return {
'name': "Commande",
'view_mode': 'form',
'view_id': view_id,
'view_type': 'form',
'res_model': 'sale.order',
'type': 'ir.actions.act_window',
'res_id': obj.order_id.id,
'domain': '[]',
}
@api.multi
def action_acceder_client(self):
dummy, view_id = self.env['ir.model.data'].get_object_reference('base', 'view_partner_form')
for obj in self:
return {
'name': "Client",
'view_mode': 'form',
'view_id': view_id,
'view_type': 'form',
'res_model': 'res.partner',
'type': 'ir.actions.act_window',
'res_id': obj.order_id.partner_id.id,
'domain': '[]',
}
@api.multi
def action_acceder_article(self):
dummy, view_id = self.env['ir.model.data'].get_object_reference('is_pg_product', 'is_product_template_only_form_view')
for obj in self:
return {
'name': "Article",
'view_mode': 'form',
'view_id': view_id,
'view_type': 'form',
'res_model': 'product.template',
'type': 'ir.actions.act_window',
'res_id': obj.product_id.product_tmpl_id.id,
'domain': '[]',
}
@api.multi
def check_date_livraison(self, date_livraison, partner_id, context=None):
res_partner = self.env['res.partner']
if partner_id:
partner = self.env['res.partner'].browse(partner_id)
# jours de fermeture de la société
jours_fermes = res_partner.num_closing_days(partner)
# Jours de congé de la société
leave_dates = res_partner.get_leave_dates(partner,avec_jours_feries=True)
# Jours fériés du pays du client
#jours_feries=res_partner.get_jours_feries(partner)
# num de jour dans la semaine de la date de livraison
num_day = time.strftime('%w', time.strptime(date_livraison, '%Y-%m-%d'))
if int(num_day) in jours_fermes or date_livraison in leave_dates:
return False
return True
@api.multi
def onchange_date_livraison(self, date_livraison, product_id, qty, uom, partner_id, pricelist, company_id, order_id=False):
context=self._context
v = {}
warning = {}
if order_id:
order = self.env['sale.order'].browse(order_id)
if order:
partner_id=order.partner_id.id
company_id=order.company_id.id
if partner_id and date_livraison:
partner = self.env['res.partner'].browse(partner_id)
company = self.env['res.company'].browse(company_id)
res_partner = self.env['res.partner']
# # jours de fermeture de la société
# jours_fermes = res_partner.num_closing_days(company.partner_id)
# # Jours de congé de la société
# leave_dates = res_partner.get_leave_dates(company.partner_id)
# delai_transport = partner.is_delai_transport
# date_expedition = date_livraison
# if delai_transport:
# i = 0
# while i < delai_transport:
# date = datetime.datetime.strptime(date_expedition, '%Y-%m-%d') - datetime.timedelta(days=1)
# date = date.strftime('%Y-%m-%d')
# num_day = time.strftime('%w', time.strptime(date, '%Y-%m-%d'))
# date_expedition = res_partner.get_day_except_weekend(date, num_day)
# i += 1
#
# date_expedition = res_partner.get_working_day(date, num_day, jours_fermes, leave_dates)
#
# v['is_date_expedition'] = date_expedition
check_date = self.check_date_livraison(date_livraison, partner_id, context=context)
if not check_date:
warning = {
'title': _('Warning!'),
'message' : 'La date de livraison tombe pendant la fermeture du client.'
}
#** Recherche prix dans liste de prix pour la date et qt ***********
if pricelist and product_id:
ctx = dict(
context,
uom=uom,
date=date_livraison,
)
price = self.pool.get('product.pricelist').price_get(self._cr, self._uid, [pricelist],
product_id, qty or 1.0, partner_id, ctx)[pricelist]
v['price_unit'] = price
# mettre à jour is_justification
if product_id is not False and pricelist is not False and date_livraison is not False:
SQL="SELECT get_pricelist_justif('sale', {}, {}, {}, '{}') FROM product_product WHERE id={}".format(pricelist, product_id, qty or 1.0, date_livraison, product_id)
self._cr.execute(SQL)
result = self._cr.fetchone()
v['is_justification'] = result[0];
#*******************************************************************
return {'value': v,
'warning': warning}
# Arrondir au lot et au multiple du lot dans la saisie des commandes
#def product_id_change(self, cr, uid, ids, pricelist, product, qty=0,
@api.multi
def product_id_change(self, pricelist_id, product_id, qty=0,
uom=False, qty_uos=0, uos=False, name='', partner_id=False,
lang=False, update_tax=True, date_order=False, packaging=False, fiscal_position=False, flag=False, context=None):
qty=self.env['product.template'].get_arrondi_lot_livraison(product_id, partner_id, qty)
vals = super(sale_order_line, self).product_id_change(pricelist_id, product_id, qty,
uom, qty_uos, uos, name, partner_id,
lang, update_tax, date_order, packaging,
fiscal_position, flag, context=context)
vals['value']['product_uom_qty'] = qty
if date_order is False:
if 'warning' in vals:
vals['warning']=False
return vals
#** Recherche prix dans liste de prix pour la date et qt ***********
price=0
if date_order:
if len(date_order)==10:
if pricelist_id:
ctx = dict(
context,
uom=uom,
date=date_order,
)
price = self.pool.get('product.pricelist').price_get(self._cr, self._uid, [pricelist_id],
product_id, qty or 1.0, partner_id, ctx)[pricelist_id]
#*******************************************************************
vals['value']['price_unit'] = price
# mettre à jour is_justification
if product_id is not False and pricelist_id is not False and date_order is not False:
SQL="SELECT get_pricelist_justif('sale', {}, {}, {}, '{}') FROM product_product WHERE id={}".format(pricelist_id, product_id, qty, date_order, product_id)
self._cr.execute(SQL)
result = self._cr.fetchone()
vals['value']['is_justification'] = result[0];
return vals
class is_vente_message(models.Model):
_name='is.vente.message'
_order='name'
_sql_constraints = [('name_uniq','UNIQUE(name)', 'Un message pour ce client existe déjà')]
name = fields.Many2one('res.partner', 'Client')
message = fields.Text('Message')
|
from functools import lru_cache
import luigi
import yaml
from servicecatalog_puppet import manifest_utils
from servicecatalog_puppet.workflow import tasks
class ManifestMixen(object):
manifest_file_path = luigi.Parameter()
@property
@lru_cache()
def manifest(self):
content = open(self.manifest_file_path, "r").read()
return manifest_utils.Manifest(yaml.safe_load(content))
class SectionTask(tasks.PuppetTask, ManifestMixen):
manifest_file_path = luigi.Parameter()
puppet_account_id = luigi.Parameter()
should_use_sns = luigi.BoolParameter()
should_use_product_plans = luigi.BoolParameter()
include_expanded_from = luigi.BoolParameter()
single_account = luigi.Parameter()
is_dry_run = luigi.BoolParameter()
execution_mode = luigi.Parameter()
cache_invalidator = luigi.Parameter()
def params_for_results_display(self):
return {
"puppet_account_id": self.puppet_account_id,
"manifest_file_path": self.manifest_file_path,
"cache_invalidator": self.cache_invalidator,
}
@property
@lru_cache()
def manifest(self):
content = open(self.manifest_file_path, "r").read()
return manifest_utils.Manifest(yaml.safe_load(content))
|
import re, os, csv
# A function that handles all the defaults and input for scanning information:
def scanningAndScannerInfo(f):
global captureDate, scannerMake, scannerModel, scannerUser, bitoneRes, contoneRes, scanningOrder, readingOrder, imageCompressionAgent, imageCompressionDate, imageCompressionTool, imageCompressionToolList
if DST.lower() == 'yes' or DST.lower() == 'y':
DSTOffset = '6'
else:
DSTOffset = '5'
captureDate = 'capture_date: ' + scanYearMonthDay + 'T' + scanTime + ':00-0' + DSTOffset + ':00\n'
# SPECIFIC TO NOTRE DAME
if scannerMakeInput.lower() == 'yes' or scannerMakeInput.lower() == 'y':
scannerMake = 'scanner_make: Kirtas\n'
else:
scannerMake = 'scanner_make: ' + scannerMakeInput + '\n'
if scannerModelInput.lower() == 'yes' or scannerModelInput.lower() == 'y':
scannerModel = 'scanner_model: APT 1200\n'
else:
scannerModel = 'scanner_model: ' + scannerModelInput + '\n'
# SPECIFIC TO NOTRE DAME
scannerUser = 'scanner_user: "Notre Dame Hesburgh Libraries: Digital Production Unit"\n'
if bitoneResInput != '0':
bitoneRes = 'bitonal_resolution_dpi: ' + bitoneResInput + '\n'
else:
bitoneRes = ''
if contoneResInput != '0':
contoneRes = 'contone_resolution_dpi: ' + contoneResInput + '\n'
else:
contoneRes = ''
if imageCompression.lower() == 'yes' or imageCompression.lower() == 'y':
# SPECIFIC TO NOTRE DAME
imageCompressionAgent = 'image_compression_agent: notredame\n'
if compressionDST.lower() == 'yes' or compressionDST.lower() == 'y':
compressionDSTOffset = '6'
else:
compressionDSTOffset = '5'
imageCompressionDate = 'image_compression_date: ' + imageCompressionYearMonthDay + 'T' + imageCompressionTime + ':00-0' + compressionDSTOffset + ':00\n'
if "," in imageCompressionToolList:
splitList = imageCompressionToolList.split(", ")
imageCompressionToolList = ''
for tool in splitList:
if tool == splitList[-1]:
imageCompressionToolList += '"' + tool + '"'
else:
imageCompressionToolList += '"' + tool + '", '
else:
imageCompressionToolList = '"' + imageCompressionToolList + '"'
imageCompressionTool = 'image_compression_tool: [' + imageCompressionToolList + ']\n'
if scanningOrderInput.lower() == 'yes' or scanningOrderInput.lower() == 'y':
scanningOrder = 'scanning_order: left-to-right\n'
elif scanningOrderInput.lower() == 'no' or scanningOrderInput.lower() == 'n':
scanningOrder = 'scanning_order: right-to-left\n'
else:
scanningOrder = 'scanning_order: left-to-right\n' #because let's be honest this is the most likely
if readingOrderInput.lower() == 'yes' or readingOrderInput.lower() == 'y':
readingOrder = 'reading_order: left-to-right\n'
elif readingOrderInput.lower() == 'no' or readingOrderInput.lower() == 'n':
readingOrder = 'reading_order: right-to-left\n'
else:
readingOrder = 'reading_order: left-to-right\n' #because let's be honest this is the most likely
f.write(captureDate)
f.write(scannerMake)
f.write(scannerModel)
f.write(scannerUser)
if bitoneRes != '':
f.write(bitoneRes)
if contoneRes != '':
f.write(contoneRes)
if imageCompression.lower() == 'yes' or imageCompression.lower() == 'y':
f.write(imageCompressionDate)
f.write(imageCompressionAgent)
f.write(imageCompressionTool)
f.write(scanningOrder)
f.write(readingOrder)
# Determines the length of the 000s to ensure that the filename is 8 characters.
def determinePrefixLength(fileNum):
global prefixZeroes
if 0 < fileNum < 10:
prefixZeroes = '0000000'
elif 10 <= fileNum < 100:
prefixZeroes = '000000'
elif 100 <= fileNum < 1000:
prefixZeroes = '00000'
elif 1000 <= fileNum < 10000:
prefixZeroes = '0000'
else:
prefixZeroes = 'error'
# Creates the file's name. Combines the leading 0s, integer as string, and filetype, and outputs global variable fileName
def generateFileName(prefix, suffix, fileType):
global fileName
fileName = prefix + str(suffix) + '.' + fileType.lower()
# Uses the number of the reading start page to determine where the reading order starts/create orderLabel variable to be returned later.
# Handles and incrementations for orderNum and romanInt
def generateOrderLabel(fileNum):
global readingStartNum, readingEndNum, orderNum, orderLabel, romanCap, romanInt, romanStart
if fileNum == int(readingStartNum):
orderNum = 1
if fileNum == int(romanStart):
romanInt = 1
orderLabel = ''
if int(romanCap) != 0:
if int(romanStart) <= fileNum <= int(romanCap):
orderLabel = 'orderlabel: "' + toRoman(romanInt) + '"'
romanInt += 1
elif int(romanCap) < romanInt:
orderLabel = ''
if int(readingStartNum) <= fileNum <= int(readingEndNum) and fileNum not in unpaginatedPages:
orderLabel = 'orderlabel: "' + str(orderNum) + '"'
orderNum += 1
# If this is a Multiwork item (note, does not function right if no multiwork boundary input), casts the numbers to start/end lists, then defines start/end numbers. Lots of globals because they'll need to be manipulated more elsewhere.
def defineMultiWorkLists():
global readingStartNum, readingEndNum, multiworkStartList, multiworkEndList, romanStartList, romanEndList, romanStart, romanCap
multiworkStartList = map(int, readingStartNum.split(", "))
multiworkEndList = map(int, readingEndNum.split(", "))
readingStartNum = multiworkStartList[0]
readingEndNum = multiworkEndList[0]
if ", " in romanStart:
romanStartList = map(int, romanStart.split(", "))
romanEndList = map(int, romanCap.split(", "))
romanStart = romanStartList[0]
romanCap = romanEndList[0]
# if type(romanStart).__name__ != 'int':
# romanStart = romanStart
# romanCap = romanCap
# This section seemed duplicative? But we'll see? The "if" clause is wrong for the CSV verion though...so would have to make sure that's fixed if actually deploying.
# Handles Start/End lists, pops off the first (0) number in the list, then resets start/end numbers. Again using globals because they'll need to be manipulated elsewhere.
def defineMultiworkCycle(fileNum):
global readingStartNum, readingEndNum, multiworkStartList, multiworkEndList, orderNum, romanStartList, romanEndList, romanStart, romanCap, romanInt
if fileNum in multiworkEndList:
if fileNum != multiworkEndList[-1]:
multiworkStartList.pop(0)
readingStartNum = multiworkStartList[0]
multiworkEndList.pop(0)
readingEndNum = multiworkEndList[0]
if fileNum in romanEndList:
if fileNum != romanEndList[-1]:
romanStartList.pop(0)
romanStart = romanStartList[0]
romanEndList.pop(0)
romanCap = romanEndList[0]
# Adds conversion support to/from Roman numerals, taken from diveintopython.net examples
romanNumeralMap = (('m', 1000),
('cm', 900),
('d', 500),
('cd', 400),
('c', 100),
('xc', 90),
('l', 50),
('xl', 40),
('x', 10),
('ix', 9),
('v', 5),
('iv', 4),
('i', 1))
def toRoman(n):
result = ''
for numeral, integer in romanNumeralMap:
while n >= integer:
result += numeral
n -= integer
return result
def fromRoman(s):
result = 0
index = 0
for numeral, integer in romanNumeralMap:
while s[index:index+len(numeral)] == numeral:
result += integer
index += len(numeral)
return result
# Processes inputs for various page numbers. Casts everything but covers, because there should only be one, into lists if they're not already lists. Could almost definitely be improved.
# When coming from CSV, types are always now 'str'. So how
def inputToLists():
global blankPages, chapterPages, chapterStart, copyrightPages, firstChapterStart, foldoutPages, imagePages, indexStart, multiworkBoundaries, prefacePages, referenceStartPages, tableOfContentsStarts, titlePages, halfTitlePages, unpaginatedPages
if ", " in blankPages:
blankPages = map(int, blankPages.split(", "))
else:
blankPages = [int(blankPages)]
if ", " in chapterPages:
chapterPages = map(int, chapterPages.split(", "))
else:
chapterPages = [int(chapterPages)]
if ", " in chapterStart:
chapterStart = map(int, chapterStart.split(", "))
else:
chapterStart = [int(chapterStart)]
if ", " in copyrightPages:
copyrightPages = map(int, copyrightPages.split(", "))
else:
copyrightPages = [int(copyrightPages)]
if ", " in firstChapterStart:
firstChapterStart = map(int, firstChapterStart.split(", "))
else:
firstChapterStart = [int(firstChapterStart)]
if ", " in foldoutPages:
foldoutPages = map(int, foldoutPages.split(", "))
else:
foldoutPages = [int(foldoutPages)]
if ", " in imagePages:
imagePages = map(int, imagePages.split(", "))
else:
imagePages = [int(imagePages)]
if ", " in indexStart:
indexStart = map(int, indexStart.split(", "))
else:
indexStart = [int(indexStart)]
if ", " in multiworkBoundaries:
multiworkBoundaries = map(int, multiworkBoundaries.split(", "))
else:
multiworkBoundaries = [int(multiworkBoundaries)]
if ", " in prefacePages:
prefacePages = map(int, prefacePages.split(", "))
else:
prefacePages = [int(prefacePages)]
if ", " in unpaginatedPages:
unpaginatedPages = map(int, unpaginatedPages.split(", "))
else:
unpaginatedPages = [int(unpaginatedPages)]
if ", " in referenceStartPages:
referenceStartPages = map(int, referenceStartPages.split(", "))
else:
referenceStartPages = [int(referenceStartPages)]
if ", " in tableOfContentsStarts:
tableOfContentsStarts = map(int, tableOfContentsStarts.split(", "))
else:
tableOfContentsStarts = [int(tableOfContentsStarts)]
if ", " in titlePages:
titlePages = map(int, titlePages.split(", "))
else:
titlePages = [int(titlePages)]
if ", " in halfTitlePages:
halfTitlePages = map(int, halfTitlePages.split(", "))
else:
halfTitlePages = [int(halfTitlePages)]
# Handles the reading labels. Uses list function which then gets split apart, so that multiple labels can apply to same page if relevant.
def generateLabel(fileNum):
global label
labelList = []
# Testing whether or not a page has a label
if fileNum == frontCover:
labelList.append('"FRONT_COVER"')
if fileNum == backCover:
labelList.append('"BACK_COVER"')
if fileNum in blankPages:
labelList.append('"BLANK"')
if fileNum in chapterPages:
labelList.append('"CHAPTER_PAGE"')
if fileNum in chapterStart:
labelList.append('"CHAPTER_START"')
if fileNum in copyrightPages:
labelList.append('"COPYRIGHT"')
if fileNum in firstChapterStart:
labelList.append('"FIRST_CONTENT_CHAPTER_START"')
if fileNum in foldoutPages:
labelList.append('"FOLDOUT"')
if fileNum in imagePages:
labelList.append('"IMAGE_ON_PAGE"')
if fileNum in indexStart:
labelList.append('"INDEX"')
if fileNum in multiworkBoundaries:
labelList.append('"MULTIWORK_BOUNDARY"')
if fileNum in prefacePages:
labelList.append('"PREFACE"')
if fileNum in referenceStartPages:
labelList.append('"REFERENCES"')
if fileNum in tableOfContentsStarts:
labelList.append('"TABLE_OF_CONTENTS"')
if fileNum in titlePages:
labelList.append('"TITLE"')
if fileNum in halfTitlePages:
labelList.append('"TITLE_PARTS"')
if not labelList:
label = ''
else:
label = 'label: ' + ', '.join(labelList)
# Combines all functions to write the file.
def writeFile():
global finalNumber, readingStartNum, readingEndNum, fileType, outputFile, romanCap, workingDir, orderNum, multiworkEndList, romanEndList, romanInt
originalDir = os.getcwd()
os.chdir(workingDir)
outputFile = outputFile + '.yml'
f = open(outputFile, 'w')
scanningAndScannerInfo(f)
f.write('pagedata:\n')
fileNum = 1
orderNum = 1
romanInt = 1
multiworkEndList = [0]
romanEndList = [0]
if multiworkBoundaries != 0:
defineMultiWorkLists()
inputToLists()
while fileNum <= finalNumber:
determinePrefixLength(fileNum)
generateFileName(prefixZeroes, fileNum, fileType)
generateOrderLabel(fileNum)
if multiworkBoundaries != 0:
defineMultiworkCycle(fileNum)
generateLabel(fileNum)
comma = ''
if orderLabel != '' and label !='':
comma = ', '
output = ' ' + fileName + ': { ' + orderLabel + comma + label + ' }\n'
f.write(output)
fileNum += 1
f.close()
print "File " + outputFile + " created in " + workingDir
os.chdir(originalDir)
# Putting input into a function vs. having a huge list of inputs at the end.
def gatherInput():
global fileType, workingDir, finalNumber, readingStartNum, readingEndNum, frontCover, outputFile, backCover, blankPages, chapterPages, chapterStart, copyrightPages, firstChapterStart, foldoutPages, imagePages, indexStart, multiworkBoundaries, prefacePages, referenceStartPages, tableOfContentsStarts, titlePages, halfTitlePages, romanStart, romanCap, scanYearMonthDay, scanTime, DST, scannerModelInput, scannerMakeInput, bitoneResInput, contoneResInput, compressionDST, imageCompression, imageCompressionTime, imageCompressionTool, imageCompressionYearMonthDay, imageCompressionTime, imageCompressionAgent, imageCompressionToolList, scanningOrderInput, readingOrderInput, unpaginatedPages
pathToFile = raw_input("Provide a link to the CSV file: ")
workingDir = raw_input("Provide the directory in which the finished file should be placed: ")
hathi_file = open(pathToFile)
hathi_csv = csv.reader(hathi_file)
for row in hathi_csv:
if row[0] == '':
outputFile = 'no_barcode'
else:
outputFile = row[0]
if row[1] == '':
scanYearMonthDay = "0"
else:
scanYearMonthDay = row[1]
if row[2] == '':
scanTime = "0"
else:
scanTime = row[2]
if row[3] == '':
DST = "0"
else:
DST = row[3]
if row[6] == '':
bitoneResInput = "0"
else:
bitoneResInput = row[6]
if row[7] == '':
contoneResInput = "0"
else:
contoneResInput = row[7]
if row[12] == '':
scanningOrderInput = 'Y'
else:
scanningOrderInput = row[12]
if row[13] == '':
readingOrderInput = 'Y'
else:
readingOrderInput = row[13]
if row[15] == '':
finalNumber = 0
else:
finalNumber = int(row[15])
if row[16] == '':
frontCover = 0
else:
frontCover = int(row[16])
if row[17] == '':
halfTitlePages = "0"
else:
halfTitlePages = row[17]
if row[18] == '':
titlePages = "0"
else:
titlePages = row[18]
if row[19] == '':
copyrightPages = "0"
else:
copyrightPages = row[19]
if row[20] == '':
tableOfContentsStarts = "0"
else:
tableOfContentsStarts = row[20]
if row[21] == '':
romanStart = "0"
else:
romanStart = row[21]
if row[22] == '':
romanCap = "0"
else:
romanCap = row[22]
if row[23] == '':
prefacePages = "0"
else:
prefacePages = row[23]
if row[24] == '':
readingStartNum = "0"
else:
readingStartNum = row[24]
if row[25] == '':
firstChapterStart = "0"
else:
firstChapterStart = row[25]
if row[26] == '':
chapterPages = "0"
else:
chapterPages = row[26]
if row[27] == '':
chapterStart = "0"
else:
chapterStart = row[27]
if row[28] == '':
readingEndNum = "0"
else:
readingEndNum = row[28]
if row[29] == '':
blankPages = "0"
else:
blankPages = row[29]
if row[30] == '':
unpaginatedPages = "0"
else:
unpaginatedPages = row[30]
if row[31] == '':
imagePages = "0"
else:
imagePages = row[31]
if row[32] == '':
foldoutPages = "0"
else:
foldoutPages = row[32]
if row[33] == '':
indexStart = "0"
else:
indexStart = row[33]
if row[34] == '':
referenceStartPages = "0"
else:
referenceStartPages = row[34]
if row[35] == '':
multiworkBoundaries = "0"
else:
multiworkBoundaries = row[35]
if row[36] == '':
backCover = 0
else:
backCover = int(row[36])
if row[14] == '':
fileType = 'tif'
else:
fileType = row[14]
if row[4] == '':
scannerMakeInput = 'y'
else:
scannerMakeInput = row[4]
if row[5] == '':
scannerModelInput = 'y'
else:
scannerModelInput = row[5]
if row[8] == '':
imageCompression = 'n'
else:
imageCompression = 'y'
if row[8] == '':
imageCompressionYearMonthDay = "0"
else:
imageCompressionYearMonthDay = row[8]
if row[9] == '':
imageCompressionTime = "0"
else:
imageCompressionTime = row[9]
if row[10] == '':
compressionDST = "0"
else:
compressionDST = row[10]
if row[11] == '':
imageCompressionToolList = "0"
else:
imageCompressionToolList = row[11]
writeFile()
gatherInput()
|
__module_name__ = "smileclient-responder"
__module_version__ = "0.1"
__module_description__ = "Replies to all events in a channel with \":D\""
import hexchat
responding_channels = { "##:D" }
triggering_commands = { "PRIVMSG", "NOTICE", "JOIN", "PART", "QUIT", "TOPIC", "MODE" }
overly_happy_users = { "heddwch" }
rejoin_delay = 5000
rejoin_hooks = dict()
def responder_hook(word, word_eol, userdata):
triggering_command = word[1]
triggering_channel = hexchat.get_info("channel")
responding_channel = False
for channel in responding_channels:
if(hexchat.nickcmp(triggering_channel, channel) == 0):
responding_channel = True
if responding_channel:
triggering_user = word[0][1:].split('!', 1)[0]
if(hexchat.nickcmp(triggering_command, "PRIVMSG") == 0 or hexchat.nickcmp(triggering_command, "NOTICE") == 0):
overly_happy = False
if(word[3] == ":D" or word[3] == "::D" or word[3] == ":+:D"):
for user in overly_happy_users:
if(hexchat.nickcmp(triggering_user, user) == 0):
overly_happy = True
break
if(overly_happy):
print("Ignoring message from overly happy user: {}".format(triggering_user))
return hexchat.EAT_NONE
command = "MSG"
if(hexchat.nickcmp(triggering_command, "NOTICE") == 0):
command = "NOTICE"
hexchat.command(command + " " + triggering_channel + " :D")
if super.check_debug():
print("This :D brought to you by: " + word_eol[0])
return hexchat.EAT_NONE
def rejoin(userdata):
hook = rejoin_hooks[userdata]
del rejoin_hooks[userdata]
hexchat.unhook(hook)
print("Rejoining {} now…".format(userdata))
hexchat.command("join " + userdata)
return hexchat.EAT_NONE
def handle_kick(word, word_eol, userdata):
channel = hexchat.get_info("channel")
if(hexchat.nickcmp(channel, userdata) == 0):
if(hexchat.nickcmp(word[3], hexchat.get_info("nick")) == 0):
print("Kicked from {}. Rejoining in {}ms…".format(channel, rejoin_delay))
rejoin_hooks[channel] = hexchat.hook_timer(rejoin_delay, rejoin, channel)
return hexchat.EAT_NONE
for channel in responding_channels:
hexchat.command("join " + channel)
hexchat.hook_server("KICK", handle_kick, channel)
for command in triggering_commands:
hexchat.hook_server(command, responder_hook, channel)
|
from flask_wtf import FlaskForm
from wtforms import StringField, TextAreaField, SubmitField, PasswordField
from wtforms.fields.html5 import DateField
from wtforms.validators import DataRequired, EqualTo, Email, Length, ValidationError
from book_manage.models import Admin
# Register forms
class RegistrationForm(FlaskForm):
username = StringField('Full name', validators=[DataRequired(), Length(min=2, max=20)])
email = StringField('Email address', validators=[DataRequired(), Email()])
password = PasswordField('Password', validators=[DataRequired()])
submit = SubmitField('Register')
def validate_username(self, username):
admin = Admin.query.filter_by(username=username.data).first()
if admin:
raise ValidationError('That username is taken. Please choose a different one')
def validate_email(self, email):
admin = Admin.query.filter_by(email=email.data).first()
if admin:
raise ValidationError('That email is taken. Please choose a different one')
# Login forms
class LoginForm(FlaskForm):
email = StringField('Email address', validators=[DataRequired(), Email()])
password = PasswordField('Password', validators=[DataRequired()])
submit = SubmitField('Login')
# Upload forms
class Upload(FlaskForm):
title = StringField('Book title', validators=[DataRequired()])
author = StringField('Book author')
description = TextAreaField('Book description')
submit = SubmitField('Register the book')
# Borrow forms
class Borrow(FlaskForm):
customer = StringField('Customer name', validators=[DataRequired()])
title = StringField('Book title', validators=[DataRequired()])
author = StringField('Book author')
borrow_date = DateField('Borrow date')
return_date = DateField('Return date')
submit = SubmitField('Borrow')
|
# -*- coding: utf-8 -*-
from django.http import HttpResponseRedirect
from maintenance_mode.core import set_maintenance_mode
def maintenance_mode_off(request):
"""
Deactivate maintenance-mode and redirect to site root.
Only superusers are allowed to use this view.
"""
if request.user.is_superuser:
set_maintenance_mode(False)
redirect_to = request.META.get('SCRIPT_NAME', '/')
return HttpResponseRedirect('{}/'.format(redirect_to) if not redirect_to.endswith('/') else redirect_to)
def maintenance_mode_on(request):
"""
Activate maintenance-mode and redirect to site root.
Only superusers are allowed to use this view.
"""
if request.user.is_superuser:
set_maintenance_mode(True)
redirect_to = request.META.get('SCRIPT_NAME', '/')
return HttpResponseRedirect('{}/'.format(redirect_to) if not redirect_to.endswith('/') else redirect_to)
|
VERSION = '0.0.17'
SERVER_URL = 'https://api.weixin.qq.com'
COMPANY_URL = 'https://qyapi.weixin.qq.com'
SERVER_WAIT_TIME = 4.5
GREETING_WORDS = 'Greeting from itchatmp!'
try:
import itchatmphttp
COROUTINE = True
except ImportError:
COROUTINE = False
|
from core.advbase import *
from module.bleed import Bleed
def module():
return Ieyasu
class Ieyasu(Adv):
conf = {}
conf['slots.a'] = [
'Resounding_Rendition',
'Flash_of_Genius',
'Moonlight_Party',
'The_Plaguebringer',
'Dueling_Dancers'
]
conf['acl'] = """
##Use Gala Cat Sith only when out of Skillful Trickery
`dragon(c3-s-end), self.trickery <= 1
`s3, not buff(s3)
`s1, buff(s3)
`s2, x=5
`s4, fsc or not self.afflics.poison.get()
`fs, x=5 and buff(s3)
"""
conf['coabs'] = ['Wand','Delphi','Axe2']
conf['share.base'] = ['Rodrigo']
conf['share.poison'] = ['Curran']
def s2ifbleed(self):
if self.bleed_stack > 0:
return self.s2buff.get()
return 0
def prerun(self):
self.s2buff = Selfbuff('s2',0.20,15,'crit')
self.s2buff.modifier.get = self.s2ifbleed
# @staticmethod
# def prerun_skillshare(adv, dst):
# adv.bleed = Bleed('g_bleed',0).reset()
# def s2_proc(self, e):
# self.s2buff.on()
if __name__ == '__main__':
from core.simulate import test_with_argv
test_with_argv(None, *sys.argv)
|
import pathlib
from dotenv import load_dotenv
load_dotenv()
from pippin.util.utils import Utils
load_dotenv(dotenv_path=Utils.get_project_root().joinpath(pathlib.PurePath('.env')))
import argparse
import asyncio
import getpass
from pippin.db.models.wallet import Wallet, WalletLocked, WalletNotFound
from pippin.db.tortoise_config import DBConfig
from tortoise import Tortoise
from tortoise.transactions import in_transaction
from pippin.util.crypt import AESCrypt, DecryptionError
from pippin.util.random import RandomUtil
from pippin.util.validators import Validators
from pippin.version import __version__
from pippin.config import Config
import os
# Set and patch nanopy
import nanopy
nanopy.account_prefix = 'ban_' if Config.instance().banano else 'nano_'
if Config.instance().banano:
nanopy.standard_exponent = 29
nanopy.work_difficulty = 'fffffe0000000000'
parser = argparse.ArgumentParser(description=f'Pippin v{__version__}')
subparsers = parser.add_subparsers(title='available commands', dest='command')
wallet_parser = subparsers.add_parser('wallet_list')
wallet_create_parser = subparsers.add_parser('wallet_create')
wallet_create_parser.add_argument('--seed', type=str, help='Seed for wallet (optional)', required=False)
wallet_change_seed_parser = subparsers.add_parser('wallet_change_seed')
wallet_change_seed_parser.add_argument('--wallet', type=str, help='ID of wallet to change seed for', required=True)
wallet_change_seed_parser.add_argument('--seed', type=str, help='New seed for wallet (optional)', required=False)
wallet_change_seed_parser.add_argument('--encrypt', action='store_true', help='If specified, will get prompted for a password to encrypt the wallet', default=False)
wallet_view_seed_parser = subparsers.add_parser('wallet_view_seed')
wallet_view_seed_parser.add_argument('--wallet', type=str, help='Wallet ID', required=True)
wallet_view_seed_parser.add_argument('--password', type=str, help='Password needed to decrypt wallet (if encrypted)', required=False)
wallet_view_seed_parser.add_argument('--all-keys', action='store_true', help='Also show all of the wallet address and keys', default=False)
account_create_parser = subparsers.add_parser('account_create')
account_create_parser.add_argument('--wallet', type=str, help='Wallet ID', required=True)
account_create_parser.add_argument('--key', type=str, help='AdHoc Account Key', required=False)
account_create_parser.add_argument('--count', type=int, help='Number of accounts to create (min: 1)', required=False)
wallet_destroy_parser = subparsers.add_parser('wallet_destroy')
wallet_destroy_parser.add_argument('--wallet', type=str, help='Wallet ID', required=True)
repget_parser = subparsers.add_parser('wallet_representative_get')
repget_parser.add_argument('--wallet', type=str, help='Wallet ID', required=True)
repset_parser = subparsers.add_parser('wallet_representative_set')
repset_parser.add_argument('--wallet', type=str, help='Wallet ID', required=True)
repset_parser.add_argument('--representative', type=str, help='New Wallet Representative', required=True)
repset_parser.add_argument('--update-existing', action='store_true', help='Update existing accounts', default=False)
options = parser.parse_args()
async def wallet_list():
wallets = await Wallet.all().prefetch_related('accounts', 'adhoc_accounts')
if len(wallets) == 0:
print("There aren't any wallets")
return
for w in wallets:
print(f"ID:{w.id}")
print("Accounts:")
for a in w.accounts:
print(a.address)
async def wallet_create(seed):
async with in_transaction() as conn:
wallet = Wallet(
seed=RandomUtil.generate_seed() if seed is None else seed
)
await wallet.save(using_db=conn)
new_acct = await wallet.account_create(using_db=conn)
print(f"Wallet created, ID: {wallet.id}\nFirst account: {new_acct}")
async def wallet_change_seed(wallet_id: str, seed: str, password: str) -> str:
encrypt = False
old_password = None
if len(password) > 0:
encrypt = True
# Retrieve wallet
try:
wallet = await Wallet.get_wallet(wallet_id)
except WalletNotFound:
print(f"No wallet found with ID: {wallet_id}")
exit(1)
except WalletLocked as wl:
wallet = wl.wallet
while True:
try:
npass = getpass.getpass(prompt='Enter current password:')
crypt = AESCrypt(npass)
try:
decrypted = crypt.decrypt(wallet.seed)
async with in_transaction() as conn:
wallet.seed = decrypted
wallet.encrypted = False
await wallet.save(using_db=conn, update_fields=['seed', 'encrypted'])
for a in await wallet.adhoc_accounts.all():
a.private_key = crypt.decrypt(a.private_key)
await a.save(using_db=conn, update_fields=['private_key'])
old_password = npass
break
except DecryptionError:
print("**Invalid password**")
except KeyboardInterrupt:
break
exit(0)
# Change key
await wallet.change_seed(seed)
# Encrypt if necessary
if encrypt:
await wallet.encrypt_wallet(password)
# Get newest account
newest = await wallet.get_newest_account()
print(f"Seed changed for wallet {wallet.id}\nFirst account: {newest.address}")
async def wallet_view_seed(wallet_id: str, password: str, all_keys: bool) -> str:
# Retrieve wallet
crypt = None
try:
wallet = await Wallet.get_wallet(wallet_id)
except WalletNotFound:
print(f"No wallet found with ID: {wallet_id}")
exit(1)
except WalletLocked as wl:
wallet = None
if password is not None:
crypt = AESCrypt(password)
try:
decrypted = crypt.decrypt(wl.wallet.seed)
wallet = wl.wallet
wallet.seed = decrypted
except DecryptionError:
pass
if wallet is None:
while True:
try:
npass = getpass.getpass(prompt='Enter current password:')
crypt = AESCrypt(npass)
try:
decrypted = crypt.decrypt(wl.wallet.seed)
wallet = wl.wallet
wallet.seed = decrypted
except DecryptionError:
print("**Invalid password**")
except KeyboardInterrupt:
break
exit(0)
print(f"Seed: {wallet.seed}")
if all_keys:
for a in await wallet.accounts.all():
print(f"Addr: {a.address} PrivKey: {nanopy.deterministic_key(wallet.seed, index=a.account_index)[0].upper()}")
else:
print(f"AdHoc accounts:")
for a in await wallet.adhoc_accounts.all():
if not wallet.encrypted:
print(f"Addr: {a.address} PrivKey: {a.private_key.upper()}")
else:
print(f"Addr: {a.address} PrivKey: {crypt.decrypt(a.private_key)}")
async def account_create(wallet_id: str, key: str, count: int = 1) -> str:
# Retrieve wallet
crypt = None
password=None
if count is None:
count = 1
try:
wallet = await Wallet.get_wallet(wallet_id)
except WalletNotFound:
print(f"No wallet found with ID: {wallet_id}")
exit(1)
except WalletLocked as wl:
wallet = wl.wallet
if key is not None:
while True:
try:
npass = getpass.getpass(prompt='Enter current password to encrypt ad-hoc key:')
crypt = AESCrypt(npass)
try:
decrypted = crypt.decrypt(wl.wallet.seed)
wallet = wl.wallet
wallet.seed = decrypted
password=npass
except DecryptionError:
print("**Invalid password**")
except KeyboardInterrupt:
break
exit(0)
if key is None:
if count == 1:
a = await wallet.account_create()
print(f"account: {a}")
else:
async with in_transaction() as conn:
ass = await wallet.accounts_create(count=count)
for a in ass:
print(f"account: {a}")
else:
a = await wallet.adhoc_account_create(key, password=password)
print(f"account: {a}")
async def wallet_destroy(wallet_id: str):
# Retrieve wallet
try:
wallet = await Wallet.get_wallet(wallet_id)
except WalletNotFound:
print(f"No wallet found with ID: {wallet_id}")
exit(1)
except WalletLocked as wl:
wallet = wl.wallet
await wallet.delete()
print("Wallet destroyed")
async def wallet_representative_get(wallet_id: str):
# Retrieve wallet
try:
wallet = await Wallet.get_wallet(wallet_id)
except WalletNotFound:
print(f"No wallet found with ID: {wallet_id}")
exit(1)
except WalletLocked as wl:
wallet = wl.wallet
if wallet.representative is None:
print("Representative not set")
else:
print(f"Wallet representative: {wallet.representative}")
async def wallet_representative_set(wallet_id: str, rep: str, update_existing: bool = False):
# Retrieve wallet
# Retrieve wallet
crypt = None
password=None
if not Validators.is_valid_address(rep):
print("Invalid representative")
exit(1)
try:
wallet = await Wallet.get_wallet(wallet_id)
except WalletNotFound:
print(f"No wallet found with ID: {wallet_id}")
exit(1)
except WalletLocked as wl:
wallet = wl.wallet
if update_existing:
while True:
try:
npass = getpass.getpass(prompt='Enter current password to decrypt wallet:')
crypt = AESCrypt(npass)
try:
decrypted = crypt.decrypt(wl.wallet.seed)
wallet = wl.wallet
wallet.seed = decrypted
password=npass
except DecryptionError:
print("**Invalid password**")
except KeyboardInterrupt:
break
exit(0)
wallet.representative = rep
await wallet.save(update_fields=['representative'])
await wallet.bulk_representative_update(rep)
print(f"Representative changed")
def main():
loop = asyncio.new_event_loop()
try:
loop.run_until_complete(DBConfig().init_db())
if options.command == 'wallet_list':
loop.run_until_complete(wallet_list())
elif options.command == 'wallet_create':
if options.seed is not None:
if not Validators.is_valid_block_hash(options.seed):
print("Invalid seed specified")
exit(1)
loop.run_until_complete(wallet_create(options.seed))
elif options.command == 'wallet_change_seed':
if options.seed is not None:
if not Validators.is_valid_block_hash(options.seed):
print("Invalid seed specified")
exit(1)
else:
while True:
try:
options.seed = getpass.getpass(prompt='Enter new wallet seed:')
if Validators.is_valid_block_hash(options.seed):
break
print("**Invalid seed**, should be a 64-character hex string")
except KeyboardInterrupt:
break
exit(0)
password = ''
if options.encrypt:
while True:
try:
password = getpass.getpass(prompt='Enter password to encrypt wallet:')
if password.strip() == '':
print("**Bad password** - cannot be blanke")
break
except KeyboardInterrupt:
break
exit(0)
loop.run_until_complete(wallet_change_seed(options.wallet, options.seed, password))
elif options.command == 'wallet_view_seed':
loop.run_until_complete(wallet_view_seed(options.wallet, options.password, options.all_keys))
elif options.command == 'account_create':
if options.key is not None:
if not Validators.is_valid_block_hash(options.key):
print("Invalid Private Key")
exit(0)
elif options.key is not None and options.count is not None:
print("You can only specify one: --key or --count")
print("--count can only be used for deterministic accounts")
elif options.count is not None:
if options.count < 1:
print("Count needs to be at least 1...")
loop.run_until_complete(account_create(options.wallet, options.key, options.count))
elif options.command == 'wallet_destroy':
loop.run_until_complete(wallet_destroy(options.wallet))
elif options.command == 'wallet_representative_get':
loop.run_until_complete(wallet_representative_get(options.wallet))
elif options.command == 'wallet_representative_set':
loop.run_until_complete(wallet_representative_set(options.wallet, options.representatives, update_existing=options.update_existing))
else:
parser.print_help()
except Exception as e:
print(str(e))
raise e
finally:
loop.run_until_complete(Tortoise.close_connections())
loop.close()
if __name__ == "__main__":
main()
|
from aws_cdk import (
aws_iam as iam,
aws_rds as rds,
aws_sqs as sqs,
aws_sns as sns,
aws_ec2 as ec2,
aws_s3 as s3,
aws_logs as logs,
aws_kms as kms,
aws_cloudwatch as cloudwatch,
aws_cloudwatch_actions as cloudwatch_actions,
aws_secretsmanager as secretsmanager,
aws_s3_notifications as s3n,
aws_sns_subscriptions as subs,
aws_lambda as lfn,
Aspects, CfnOutput, Stack, SecretValue, Tags, Fn, Aws, CfnMapping, Duration, RemovalPolicy,
App, RemovalPolicy
)
from constructs import Construct
import re
import os
from cdk_nag import ( AwsSolutionsChecks, NagSuppressions )
class Aurora(Stack):
def __init__(self, scope:Construct, id:str,
vpc_id:str, ## vpc id
subnet_ids:list, ## list of subnet ids
db_name:str, ## database name
instance_type = None, ## ec2.InstanceType
replica_instances:int = 2, ## At least 1. Default 2
aurora_cluster_username:str="clusteradmin",
backup_retention_days:int=14,
backup_window:str="00:15-01:15",
preferred_maintenance_window:str="Sun:23:45-Mon:00:15",
engine:str="postgresql", ## Aurora Database Engine: postgresql or mysql
enable_babelfish:bool=True, ## Support for MSSQL. (no extra cost)
ingress_sources:list=[], ## A security group object or a network subnet
## ec2.Peer.ipv4("0.0.0.0/0")
## ec2.SecurityGroup
**kwargs) -> None:
super().__init__(scope, id, **kwargs)
if engine not in ["postgresql", "mysql"]:
print("Unknown Engine")
exit(1)
##
## Enforce a minimum backup retention period
##
if backup_retention_days < 14:
backup_retention_days = 14
##
## Enforce a minimum number of read replicas
##
if replica_instances < 1:
replica_instances = 1
############################################
##
## CDK Nag - https://pypi.org/project/cdk-nag/
## https://github.com/cdklabs/cdk-nag
##
## CDK Nag Checks for AWS Engagement Solutions Secuirty Rules:
## https://github.com/cdklabs/cdk-nag/blob/main/RULES.md#awssolutions
## Also checks for:
## HIPAA Security
## NIST 800-53 rev 4
## NIST 800-53 rev 5
##
############################################
Aspects.of(self).add(AwsSolutionsChecks())
##
## Supressed Errors
##
NagSuppressions.add_stack_suppressions(self, [{"id":"AwsSolutions-IAM4", "reason":"TODO: Stop using AWS managed policies."}])
NagSuppressions.add_stack_suppressions(self, [{"id":"AwsSolutions-IAM5", "reason":"TODO: Remove Wildcards in IAM roles."}])
##
## Supressed Warnings
##
NagSuppressions.add_stack_suppressions(self, [{"id":"AwsSolutions-RDS16", "reason":"parameter referencing an intrinsic function"}])
azs = Fn.get_azs()
vpc = ec2.Vpc.from_vpc_attributes(self, 'ExistingVPC', availability_zones=azs, vpc_id=vpc_id)
subnets = list()
for subnet_id in subnet_ids:
subnets.append(ec2.Subnet.from_subnet_attributes(self, subnet_id.replace("-", "").replace("_", "").replace(" ", ""), subnet_id=subnet_id))
vpc_subnets = ec2.SubnetSelection(subnets=subnets)
allAll = ec2.Port(protocol=ec2.Protocol("ALL"), string_representation="ALL")
tcp3306 = ec2.Port(protocol=ec2.Protocol("TCP"), from_port=3306, to_port=3306, string_representation="tcp3306 MySQL")
tcp5432 = ec2.Port(protocol=ec2.Protocol("TCP"), from_port=5432, to_port=5432, string_representation="tcp5432 PostgreSQL")
tcp1433 = ec2.Port(protocol=ec2.Protocol("TCP"), from_port=1433, to_port=1433, string_representation="tcp1433 MSSQL")
##
## Database Security Group
##
dbsg = ec2.SecurityGroup(self, "DatabaseSecurityGroup",
vpc = vpc,
allow_all_outbound = True,
description = id + " Database",
security_group_name = id + " Database",
)
dbsg.add_ingress_rule(
peer =dbsg,
connection =allAll,
description="all from self"
)
dbsg.add_egress_rule(
peer =ec2.Peer.ipv4("0.0.0.0/0"),
connection =allAll,
description="all out"
)
if engine == "mysql":
connection_port = tcp3306
connection_name = "tcp3306 MySQL"
else:
connection_port = tcp5432
connection_name = "tcp5432 PostgreSQL"
for ingress_source in ingress_sources:
dbsg.add_ingress_rule(
peer =ingress_source,
connection =connection_port,
description=connection_name
)
if engine == "postgresql":
dbsg.add_ingress_rule(
peer =ingress_source,
connection =tcp1433,
description="tcp1433 MSSQL"
)
db_subnet_group = rds.SubnetGroup(self,
id = "DatabaseSubnetGroup",
vpc = vpc,
description = id + " subnet group",
vpc_subnets = vpc_subnets,
subnet_group_name=id + "subnet group"
)
##
## use PostgreSQL by default
## https://docs.aws.amazon.com/cdk/api/v2/python/aws_cdk.aws_rds/AuroraPostgresEngineVersion.html#aws_cdk.aws_rds.AuroraPostgresEngineVersion
##
aurora_engine = rds.DatabaseClusterEngine.aurora_postgres(version=rds.AuroraPostgresEngineVersion.VER_13_4)
##
## include support for MySQL
## https://docs.aws.amazon.com/cdk/api/v2/python/aws_cdk.aws_rds/AuroraMysqlEngineVersion.html#aws_cdk.aws_rds.AuroraMysqlEngineVersion
##
if engine == "mysql":
aurora_engine = rds.DatabaseClusterEngine.aurora_mysql(version=rds.AuroraMysqlEngineVersion.VER_2_10_1)
aurora_parameters = {}
## If PostgreSQL, and enable_babelfish is True, turn on Babelfish support.
if enable_babelfish and engine=="postgresql":
aurora_parameters["rds.babelfish_status"] = "on"
aurora_parameter_group = rds.ParameterGroup(self, id="AuroraParameterGroup",
engine =aurora_engine,
description=id + " Parameter Group",
parameters =aurora_parameters)
##
## Secret username/password for the cluster.
##
aurora_cluster_secret = secretsmanager.Secret(self, "AuroraClusterCredentials",
secret_name =db_name + "AuroraClusterCredentials",
description =db_name + "Aurora Cluster Credentials",
generate_secret_string=secretsmanager.SecretStringGenerator(
exclude_characters ="\"@/\\ '",
generate_string_key ="password",
password_length =30,
secret_string_template='{"username":"'+aurora_cluster_username+'"}'),
)
aurora_cluster_credentials = rds.Credentials.from_secret(aurora_cluster_secret, aurora_cluster_username)
##
## Default Instance Type
##
if not instance_type:
instance_type = ec2.InstanceType.of(ec2.InstanceClass.BURSTABLE4_GRAVITON, ec2.InstanceSize.MEDIUM)
kms_key = kms.Key(self, "AuroraDatabaseKey",
enable_key_rotation=True,
alias=db_name
)
##
## https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/AuroraPostgreSQL.CloudWatch.html
##
## If PostgreSQL, export the postgresql log.
cloudwatch_logs_exports=["postgresql"]
## If MySQL, export the slowquery log.
if engine == "mysql":
cloudwatch_logs_exports=["slowquery"]
aurora_cluster = rds.DatabaseCluster(self, "AuroraDatabase",
engine = aurora_engine,
credentials = aurora_cluster_credentials, # Optional - will default to 'admin' username and generated password
backup = rds.BackupProps(
retention =Duration.days(backup_retention_days),
preferred_window =backup_window
),
parameter_group = aurora_parameter_group,
instances = replica_instances,
iam_authentication = True,
storage_encrypted = True,
storage_encryption_key = kms_key,
deletion_protection=True,
removal_policy=RemovalPolicy.SNAPSHOT,
copy_tags_to_snapshot=True,
cloudwatch_logs_exports=cloudwatch_logs_exports,
cloudwatch_logs_retention=logs.RetentionDays.ONE_MONTH,
preferred_maintenance_window=preferred_maintenance_window, # Should be specified as a range ddd:hh24:mi-ddd:hh24:mi (24H Clock UTC).
# Example: Sun:23:45-Mon:00:15
# Default: 30-minute window selected at random from an 8-hour block of time for each AWS Region,
# occurring on a random day of the week.
#cluster_identifier=db_name,
instance_identifier_base = db_name,
instance_props = {
"instance_type": instance_type,
"vpc_subnets": vpc_subnets,
"vpc": vpc,
"security_groups": [dbsg],
} ## instance_props
) ## rds.DatabaseCluster
aurora_cluster.apply_removal_policy(RemovalPolicy.RETAIN)
Tags.of(aurora_cluster).add("Name", db_name, priority=300)
aurora_cluster.add_rotation_single_user(
automatically_after=Duration.days(30),
exclude_characters ="\"@/\\ '",
vpc_subnets=vpc_subnets
)
##
## Cloudwatch Dashboard
##
dashboard = cloudwatch.Dashboard(self, "AuroraMonitoringDashboard",
dashboard_name=db_name)
db_connections = aurora_cluster.metric_database_connections()
cpu_utilization = aurora_cluster.metric_cpu_utilization()
deadlocks = aurora_cluster.metric_deadlocks()
free_local_storage = aurora_cluster.metric_free_local_storage()
freeable_memory = aurora_cluster.metric_freeable_memory()
network_receive_throughput = aurora_cluster.metric_network_receive_throughput()
network_throughput = aurora_cluster.metric_network_throughput()
network_transmit_throughput = aurora_cluster.metric_network_transmit_throughput()
snapshot_storage_used = aurora_cluster.metric_snapshot_storage_used()
total_backup_storage_billed = aurora_cluster.metric_total_backup_storage_billed()
volume_bytes_used = aurora_cluster.metric_volume_bytes_used()
volume_read_io_ps = aurora_cluster.metric_volume_read_io_ps()
volume_write_io_ps = aurora_cluster.metric_volume_write_io_ps()
# The average amount of time taken per disk I/O operation (average over 1 minute)
read_latency = aurora_cluster.metric("ReadLatency", statistic="Average", period=Duration.seconds(60))
percent90 = cloudwatch.HorizontalAnnotation(
value =85,
color =cloudwatch.Color.RED,
fill =cloudwatch.Shading('NONE'),
label ="🚨 DANGER")
percent80 = cloudwatch.HorizontalAnnotation(
value =75,
color =cloudwatch.Color.ORANGE,
fill =cloudwatch.Shading('NONE'),
label ="⚠️ WARNING")
widget_db_connections = cloudwatch.GraphWidget(
title="DB Connections",
# Metrics to display on left Y axis.
left =[db_connections],
#left_annotations = [percent90, percent80],
) ## GraphWidget
widget_cpu_utilization = cloudwatch.GraphWidget(
title="CPU Utilization",
# Metrics to display on left Y axis.
left =[cpu_utilization],
#left_annotations = [percent90, percent80],
) ## GraphWidget
widget_read_latency = cloudwatch.GraphWidget(
title="Read Latency",
# Metrics to display on left Y axis.
left =[read_latency],
#left_annotations = [percent90, percent80],
) ## GraphWidget
deadlocks = aurora_cluster.metric_deadlocks()
free_local_storage = aurora_cluster.metric_free_local_storage()
freeable_memory = aurora_cluster.metric_freeable_memory()
network_receive_throughput = aurora_cluster.metric_network_receive_throughput()
network_throughput = aurora_cluster.metric_network_throughput()
network_transmit_throughput = aurora_cluster.metric_network_transmit_throughput()
total_backup_storage_billed = aurora_cluster.metric_total_backup_storage_billed()
volume_bytes_used = aurora_cluster.metric_volume_bytes_used()
snapshot_storage_used = aurora_cluster.metric_snapshot_storage_used()
volume_read_io_ps = aurora_cluster.metric_volume_read_io_ps()
volume_write_io_ps = aurora_cluster.metric_volume_write_io_ps()
widget_deadlocks = cloudwatch.GraphWidget(title="Deadlocks", left=[deadlocks])
widget_free_local_storage = cloudwatch.GraphWidget(title="Free Local Storage", left=[free_local_storage])
widget_freeable_memory = cloudwatch.GraphWidget(title="Freeable Memory", left=[freeable_memory])
widget_network_receive_throughput = cloudwatch.GraphWidget(title="Network Throughput", left=[network_receive_throughput, network_throughput, network_transmit_throughput])
widget_total_backup_storage_billed = cloudwatch.GraphWidget(title="Backup Storage Billed", left=[total_backup_storage_billed])
widget_volume_bytes = cloudwatch.GraphWidget(title="Storage", left=[volume_bytes_used, snapshot_storage_used])
widget_volume_iops = cloudwatch.GraphWidget(title="Volume IOPs", left=[volume_read_io_ps, volume_write_io_ps])
##
## Each dashboard.add() creates a single row in the dashboard.
##
dashboard.add_widgets(
widget_db_connections,
widget_cpu_utilization
)
dashboard.add_widgets(
widget_total_backup_storage_billed,
widget_free_local_storage
)
dashboard.add_widgets(
widget_freeable_memory,
widget_volume_bytes,
widget_volume_iops,
)
dashboard.add_widgets(
widget_network_receive_throughput,
widget_read_latency,
widget_deadlocks,
)
CfnOutput(self, "OutputSecretName", export_name=aurora_cluster.stack.stack_name+":SecretName", value=aurora_cluster.secret.secret_name) # isecret
CfnOutput(self, "OutputSecretArn", export_name=aurora_cluster.stack.stack_name+":SecretArn", value=aurora_cluster.secret.secret_arn) # isecret
CfnOutput(self, "OutputGetSecretValue", export_name=aurora_cluster.stack.stack_name+":GetSecretValue", value="aws secretsmanager get-secret-value --secret-id "+aurora_cluster.secret.secret_arn)
CfnOutput(self, "OutputIntanceIdentifiers", export_name=aurora_cluster.stack.stack_name+":IntanceIdentifiers", value=str(aurora_cluster.instance_identifiers)) # list
instance_endpoints = []
for ie in aurora_cluster.instance_endpoints:
instance_endpoints.append(ie.hostname)
CfnOutput(self, "OutputEndpoints", export_name=aurora_cluster.stack.stack_name+":Endpoints", value=str(instance_endpoints)) # list
CfnOutput(self, "OutputClusterEndpoint", export_name=aurora_cluster.stack.stack_name+":Endpoint", value=aurora_cluster.cluster_endpoint.socket_address) # list
CfnOutput(self, "OutputEngineFamily", export_name=aurora_cluster.stack.stack_name+":EngineFamily", value=aurora_cluster.engine.engine_family) # iclusterengine
CfnOutput(self, "OutputEngineType", export_name=aurora_cluster.stack.stack_name+":EngineType", value=aurora_cluster.engine.engine_type) # iclusterengine
CfnOutput(self, "OutputEngineFullVersion", export_name=aurora_cluster.stack.stack_name+":EngineFullVersion", value=aurora_cluster.engine.engine_version.full_version) # iclusterengine
CfnOutput(self, "OutputEngineMajorVersion", export_name=aurora_cluster.stack.stack_name+":EngineMajorVersion", value=aurora_cluster.engine.engine_version.major_version) # iclusterengine
CfnOutput(self, "OutputParameterGroupFamily", export_name=aurora_cluster.stack.stack_name+":ParameterGroupFamily", value=aurora_cluster.engine.parameter_group_family)
class IcePlainsOfHoth(Stack):
def __init__(self, scope:Construct, id:str, **kwargs) -> None:
super().__init__(scope, id, **kwargs)
vpc = ec2.Vpc(self, "IcePlainsVpc",
cidr = "10.99.0.0/16",
max_azs = 3,
enable_dns_hostnames = True,
enable_dns_support = True,
subnet_configuration = [
ec2.SubnetConfiguration(
cidr_mask = 24,
name = 'public1',
subnet_type = ec2.SubnetType.PUBLIC,
),
ec2.SubnetConfiguration(
cidr_mask = 24,
name = 'public2',
subnet_type = ec2.SubnetType.PUBLIC,
),
ec2.SubnetConfiguration(
cidr_mask = 24,
name = 'public3',
subnet_type = ec2.SubnetType.PUBLIC,
)
]
)
vpc_subnets = vpc.select_subnets(
subnet_type=ec2.SubnetType.PUBLIC,
one_per_az =True
)
subnet_ids = []
for subnet in vpc_subnets.subnets:
subnet_ids.append(subnet.subnet_id)
vpc_id = vpc.vpc_id
Aurora(self, "EchoBaseDb",
db_name="EchoBase",
ingress_sources=[ec2.Peer.ipv4("10.10.10.10/32")],
vpc_id=vpc_id,
subnet_ids=subnet_ids,
env={'region': 'us-east-1'},
description="Echo Base DB")
app = App()
# Call the stack on its own
Aurora(app, "Aurora", env={"region":"us-east-1"}, description="Aurora Cluster",
vpc_id = "vpc-aaaaaaaa",
subnet_ids=["subnet-xxxxxxxx", "subnet-yyyyyyyy", "subnet-zzzzzzzz"],
db_name="sampledb"
)
# Use the construct in a sample stack
IcePlainsOfHoth(app, "IcePlainsOfHoth", env={"region":"us-east-1"}, description="Ice Plains of Hoth")
app.synth()
|
import boto3
client = boto3.client('resourcegroupstaggingapi')
def get(tag_filters):
response = client.get_resources(
TagFilters=tag_filters
)
return response
def human_readable(raw_get_resources_response):
return [
{**split_resource_arn(e['ResourceARN']), **{'Tags': collapse_tags(e['Tags'])}}
for e in raw_get_resources_response['ResourceTagMappingList']]
def split_resource_arn(arn):
_arn = arn.split(':')
if len(_arn) > 6:
resource = _arn[5:]
else:
resource = _arn[5]
resource = resource.split('/')
return {
'service': _arn[2],
'region': _arn[3],
'account_id': _arn[4],
'name': resource[0],
'id': resource[1] if len(resource) > 1 else '',
'arn': arn
}
def collapse_tags(raw_tags):
return [(e['Key'], e['Value']) for e in raw_tags]
|
# -*- coding: utf-8 -*-
"""
This module defines the steps related to the app.
"""
from behave import step
from flybirds.core.global_context import GlobalContext as g_Context
from flybirds.utils.dsl_helper import ele_wrap
@step("install app[{selector}]")
@ele_wrap
def install_app(context, selector=None):
g_Context.step.install_app(context, selector)
@step("delete app[{selector}]")
@ele_wrap
def uninstall_app(context, selector=None):
"""
uninstall app
"""
g_Context.step.uninstall_app(context, selector)
@step("start app[{selector}]")
@ele_wrap
def start_app(context, selector=None):
g_Context.step.start_app(context, selector)
@step("restart app")
def restart_app(context):
g_Context.step.restart_app(context)
@step("close app")
def stop_app(context):
g_Context.step.stop_app(context)
|
import os
import shutil
import random
from pathlib import Path
def main():
cdcp_path = os.path.dirname(os.getcwd()) + "/data/cdcp/original"
train_dir = cdcp_path + "/train"
dev_dir = cdcp_path + "/dev"
test_dir = cdcp_path + "/test"
train_file_prefix = {f.split(".")[0] for f in os.listdir(train_dir)}
random.seed(101)
dev_list = random.sample(list(train_file_prefix), len(train_file_prefix) // 10)
Path(dev_dir).mkdir(parents=True, exist_ok=True)
for f in os.listdir(train_dir):
if f.split(".")[0] in dev_list:
if '.ann' in f:
shutil.move(train_dir + "/" + f, dev_dir + "/" + f)
if '.txt' in f and '.pipe' not in f and '.json' not in f:
shutil.move(train_dir + "/" + f, dev_dir + "/" + f)
for f in os.listdir(train_dir):
if '.txt.json' in f or '.txt.pipe' in f:
os.remove(train_dir + "/" + f)
for f in os.listdir(test_dir):
if '.txt.json' in f or '.txt.pipe' in f:
os.remove(test_dir + "/" + f)
train_docs = len({f.split(".")[0] for f in os.listdir(train_dir)})
dev_docs = len({f.split(".")[0] for f in os.listdir(dev_dir)})
test_docs = len({f.split(".")[0] for f in os.listdir(test_dir)})
print("Train docs: {}".format(train_docs))
print("Dev docs: {}".format(dev_docs))
print("Test docs: {}".format(test_docs))
print("Total docs: {}".format(train_docs + dev_docs + test_docs))
if __name__ == "__main__":
main()
|
# -*- coding: utf-8 -*-
# Copyright 2013-2014 Eucalyptus Systems, Inc.
#
# Redistribution and use of this software in source and binary forms,
# with or without modification, are permitted provided that the following
# conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
Forms for Launch Config
"""
import wtforms
from wtforms import validators
from ..i18n import _
from . import BaseSecureForm, ChoicesManager
class LaunchConfigDeleteForm(BaseSecureForm):
"""LaunchConfig deletion form.
Only need to initialize as a secure form to generate CSRF token
"""
pass
class CreateLaunchConfigForm(BaseSecureForm):
"""Create Launch Configuration form"""
image_id = wtforms.HiddenField(label=_(u'Image'))
name_error_msg = _(u'Name is required')
name = wtforms.TextField(
label=_(u'Name'),
validators=[validators.InputRequired(message=name_error_msg)],
)
instance_type_error_msg = _(u'Instance type is required')
instance_type = wtforms.SelectField(
label=_(u'Instance type'),
validators=[validators.InputRequired(message=instance_type_error_msg)],
)
keypair_error_msg = _(u'Key pair is required')
keypair = wtforms.SelectField(
label=_(u'Key name'),
validators=[validators.InputRequired(message=keypair_error_msg)],
)
securitygroup_error_msg = _(u'Security group is required')
securitygroup = wtforms.SelectField(
label=_(u'Security group'),
validators=[validators.InputRequired(message=securitygroup_error_msg)],
)
role = wtforms.SelectField()
userdata = wtforms.TextAreaField(label=_(u'User data'))
userdata_file_helptext = _(u'User data file may not exceed 16 KB')
userdata_file = wtforms.FileField(label='')
kernel_id = wtforms.SelectField(label=_(u'Kernel ID'))
ramdisk_id = wtforms.SelectField(label=_(u'RAM disk ID (RAMFS)'))
monitoring_enabled = wtforms.BooleanField(label=_(u'Enable monitoring'))
create_sg_from_lc = wtforms.BooleanField(label=_(u'Create scaling group using this launch configuration'))
def __init__(self, request, image=None, securitygroups=None, conn=None, iam_conn=None, **kwargs):
super(CreateLaunchConfigForm, self).__init__(request, **kwargs)
self.image = image
self.securitygroups = securitygroups
self.conn = conn
self.iam_conn = iam_conn
self.cloud_type = request.session.get('cloud_type', 'euca')
self.set_error_messages()
self.monitoring_enabled.data = True
self.create_sg_from_lc.data = True
self.choices_manager = ChoicesManager(conn=conn)
self.set_help_text()
self.set_choices()
if image is not None:
self.image_id.data = self.image.id
def set_help_text(self):
self.userdata_file.help_text = self.userdata_file_helptext
def set_choices(self):
self.instance_type.choices = self.choices_manager.instance_types(cloud_type=self.cloud_type)
self.keypair.choices = self.choices_manager.keypairs(add_blank=True, no_keypair_option=True)
self.securitygroup.choices = self.choices_manager.security_groups(
securitygroups=self.securitygroups, add_blank=False)
self.role.choices = ChoicesManager(self.iam_conn).roles(add_blank=True)
self.kernel_id.choices = self.choices_manager.kernels(image=self.image)
self.ramdisk_id.choices = self.choices_manager.ramdisks(image=self.image)
# Set default choices where applicable, defaulting to first non-blank choice
if len(self.securitygroup.choices) > 1:
self.securitygroup.data = 'default'
def set_error_messages(self):
self.name.error_msg = self.name_error_msg
self.instance_type.error_msg = self.instance_type_error_msg
self.securitygroup.error_msg = self.securitygroup_error_msg
class LaunchConfigsFiltersForm(BaseSecureForm):
"""Form class for filters on landing page"""
instance_type = wtforms.SelectMultipleField(label=_(u'Instance type'))
key_name = wtforms.SelectMultipleField(label=_(u'Key pair'))
security_groups = wtforms.SelectMultipleField(label=_(u'Security group'))
def __init__(self, request, cloud_type='euca', ec2_conn=None, **kwargs):
super(LaunchConfigsFiltersForm, self).__init__(request, **kwargs)
self.request = request
self.cloud_type = cloud_type
self.ec2_conn = ec2_conn
self.ec2_choices_manager = ChoicesManager(conn=ec2_conn)
self.instance_type.choices = self.ec2_choices_manager.instance_types(
add_blank=False, cloud_type=self.cloud_type, add_description=False)
self.key_name.choices = self.ec2_choices_manager.keypairs(add_blank=False)
self.security_groups.choices = self.ec2_choices_manager.security_groups(add_blank=False)
|
#!/usr/bin/env python
""" -*- coding: utf-8 -*-
By: "John Hazelwood" <jhazelwo@users.noreply.github.com>
Tested with Python 2.6.6 on CentOS 6.5
"""
import re
import sys
import pwd
import os
import glob
import socket
import struct
import json
from pprint import pprint
class QuietError(Exception):
# All who inherit me shall not traceback, but be spoken of cleanly
pass
class RegexError(QuietError):
# Invalid regex pattern
pass
class NetShow(object):
""" Object to hold data about network connections. """
def __init__(self):
""" . """
self.use_header = True # Show header in __str__ output
self.as_json = False # Output as list of dicts.
self.as_dict = False # Output as iterable of JSON objects.
self.results = [] # Store filter results, if any.
self.contents = [] # Store complete network stack as list of dicts.
self.contents.extend(self.proc_to_dict('tcp'))
self.contents.extend(self.proc_to_dict('udp'))
self.contents.extend(self.proc_to_dict('tcp6'))
self.contents.extend(self.proc_to_dict('udp6'))
def usage(self):
return """
netshow.py, version 1.0
usage: netshow.py [--json|--dict|-s|-h] ['PATTERN' | WORDS]
--json : Output as iterable of JSON objects.
--dict : Output as list of dicts.
-s : Hide header
-h|--help : Help
Wrap regex in single quotes.
Words can be any whole-string match.
examples:
netshow.py 80
netshow.py tcp6
netshow.py tcp 22
netshow.py 10.2.3.4 53 'tcp|udp'
netshow.py '.*sshd$'
"""
def search_dict_values(self, pattern, d):
""" . """
pattern = str(pattern)
is_regex = False
special_charters = ['^', '*', '?', '[', '(', '|', '$']
for has in special_charters:
if has in pattern:
is_regex = True
if is_regex:
for v in d.values():
try:
if re.match(pattern, v):
return d
except Exception as e:
raise RegexError(e)
else:
if pattern in d.values():
return d
return False
def match_all_needles(self, needle, haystack):
""" . """
for n in needle:
if n not in haystack and not self.search_dict_values(n, haystack):
return False
return haystack
def filter(self, params):
""" . """
if not params:
return True
for connection in self.contents:
match = self.match_all_needles(params, connection)
if match:
self.results.append(match)
if not self.results:
return False
return True
def line_to_dict(self, line, protocol):
""" Construct dict of elements in {line}. """
d = {}
connection_states = {
'01':'ESTABLISHED',
'02':'SYN_SENT',
'03':'SYN_RECV',
'04':'FIN_WAIT1',
'05':'FIN_WAIT2',
'06':'TIME_WAIT',
'07':'CLOSE',
'08':'CLOSE_WAIT',
'09':'LAST_ACK',
'0A':'LISTEN',
'0B':'CLOSING' }
line_array = self._remove_empty(line.split(' '))
d['protocol'] = protocol
d['local_ip'], d['local_port'] = self._convert_ip_port(line_array[1])
d['remote_ip'], d['remote_port'] = self._convert_ip_port(line_array[2])
if 'tcp' in protocol:
d['state'] = connection_states[line_array[3]]
else:
d['state'] = ''
d['pid'] = self.pid_of_inode(line_array[9])
d['program'] = self.name_of_pid(d['pid'])
return d
def proc_to_dict(self, protocol):
""" Return list of dicts of /proc/net/{protocol}. """
if protocol not in ['tcp', 'tcp6', 'udp', 'udp6']:
raise TypeError('Unknown protocol {0}'.format(protocol))
l = []
with open('/proc/net/{0}'.format(protocol), 'r') as handle:
for line in handle:
line = line.rstrip('\n').strip(' ')
if ':' in line:
l.append(self.line_to_dict(line, protocol))
return l
def _convert_ip(self, address):
"""
Convert and squash addresses to familiar format.
ipv6 Convert '000080FE00000000FF565002BD69B1FE'
To 'fe80::250:56ff:feb1:69bd'
ipv4 Convert '8A8E1CAC'
To '172.28.142.138'
"""
if len(address) > 16:
## http://stackoverflow.com/questions/41940483
try:
address = address.decode('hex') # Python2
except AttributeError:
address = bytes.fromhex(address) # Python3
address = struct.unpack('>IIII', address)
address = struct.pack('@IIII', *address)
address = socket.inet_ntop(socket.AF_INET6, address).lower()
else:
address = '{0}.{1}.{2}.{3}'.format(
(self._hex2dec(address[6:8])),
(self._hex2dec(address[4:6])),
(self._hex2dec(address[2:4])),
(self._hex2dec(address[0:2]))
)
return address
def _hex2dec(self, this):
""" . """
return str(int(this,16))
def _remove_empty(self, this):
""" . """
return [x for x in this if x]
def _convert_ip_port(self, array):
""" Convert ipaddress and port from hex to decimal."""
host,port = array.split(':')
_port = self._hex2dec(port)
if _port == '0':
_port = '*'
return self._convert_ip(host),_port
def pid_of_inode(self, inode):
""" Find PID of process bound to given inode. """
for item in glob.glob('/proc/[0-9]*/fd/[0-9]*'):
try:
if '[{0}]'.format(inode) in os.readlink(item):
return item.split('/')[2]
except:
pass
return '' # TIME_WAIT
def name_of_pid(self, pid):
""" Return /name/of/program if possible. """
if pid:
try:
return os.readlink('/proc/{0}/exe'.format(pid))
except:
pass
return '' # No permission to see cmd (not owner or root)
def __str__(self):
""" Return contents as multi-line string similar to netstat. """
template = '{protocol:<5} {local_ip:>39} {local_port:<5} ' + \
'{remote_ip:>39} {remote_port:<5} {state:<11} {pid:>5} {program}\n'
s = ''
subject = self.contents
if netstat.results:
subject = self.results
if self.as_json:
return str(json.dumps(subject))
if self.as_dict:
return str(self.contents)
if self.use_header:
s = template.format(
protocol = 'Proto',
local_ip = 'Local Address',
local_port = 'Port',
remote_ip = 'Foreign Address',
remote_port = 'Port',
state = 'State',
pid = 'PID',
program = 'Program name'
)
for c in subject:
s += template.format(
protocol = c['protocol'],
local_ip = c['local_ip'],
local_port = c['local_port'],
remote_ip = c['remote_ip'],
remote_port = c['remote_port'],
state = c['state'],
pid = c['pid'],
program = c['program']
)
return s.rstrip('\n')
def quiet_hook(kind, message, traceback):
if QuietError in kind.__bases__:
print('{0}: {1}'.format(kind.__name__, message)) # Only print Error Type and Message
else:
sys.__excepthook__(kind, message, traceback) # Print Error Type, Message and Traceback
sys.excepthook = quiet_hook
if __name__ == '__main__':
netstat = NetShow()
args = sys.argv[1:]
if '--help' in args or '-h' in args:
print(netstat.usage())
exit(0)
if '--json' in args and '--dict' in args:
print('--json and --dict are mutually exclusive')
exit(1)
if '--json' in args:
netstat.as_json = True
args.remove('--json')
if '--dict' in args:
netstat.as_dict = True
args.remove('--dict')
if '-s' in args:
netstat.use_header = False
args.remove('-s')
if args and not netstat.filter(args):
exit(1)
print(netstat)
|
# Copyright 2014 Confluent Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ducktape.command_line.defaults import ConsoleDefaults
from ducktape.template import TemplateRenderer
from ducktape.errors import TimeoutError
from ducktape.cluster.remoteaccount import RemoteAccount
import os
import shutil
import tempfile
import time
class Service(TemplateRenderer):
"""Service classes know how to deploy a service onto a set of nodes and then clean up after themselves.
They request the necessary resources from the cluster,
configure each node, and bring up/tear down the service.
They also expose
information about the service so that other services or test scripts can
easily be configured to work with them. Finally, they may be able to collect
and check logs/output from the service, which can be helpful in writing tests
or benchmarks.
Services should generally be written to support an arbitrary number of nodes,
even if instances are independent of each other. They should be able to assume
that there won't be resource conflicts: the cluster tests are being run on
should be large enough to use one instance per service instance.
"""
# Provides a mechanism for locating and collecting log files produced by the service on its nodes.
# logs is a dict with entries that look like log_name: {"path": log_path, "collect_default": boolean}
#
# For example, zookeeper service might have self.logs like this:
# self.logs = {
# "zk_log": {"path": "/mnt/zk.log",
# "collect_default": True}
# }
logs = {}
def __init__(self, context, num_nodes=None, node_spec=None, *args, **kwargs):
"""
:param context: An object which has at minimum 'cluster' and 'logger' attributes. In tests, this is always a
TestContext object.
:param num_nodes: An integer representing the number of Linux nodes to allocate. If node_spec is not None, it
will be used and ``num_nodes`` will be ignored.
:param node_spec: A dictionary where the key is an operating system (possible values are in
``ducktape.cluster.remoteaccount.RemoteAccount.SUPPORTED_OS_TYPES``) and the value is the number
of nodes to allocate for the associated operating system. Values must be integers. Node
allocation takes place when ``start()`` is called, or when ``allocate_nodes()`` is called, whichever
happens first.
"""
super(Service, self).__init__(*args, **kwargs)
# Keep track of significant events in the lifetime of this service
self._init_time = time.time()
self._start_time = -1
self._start_duration_seconds = -1
self._stop_time = -1
self._stop_duration_seconds = -1
self._clean_time = -1
self._initialized = False
self.node_spec = Service.setup_node_spec(num_nodes, node_spec)
self.context = context
self.nodes = []
self.allocate_nodes()
# Keep track of which nodes nodes were allocated to this service, even after nodes are freed
# Note: only keep references to representations of the nodes, not the actual node objects themselves
self._nodes_formerly_allocated = [str(node.account) for node in self.nodes]
# Every time a service instance is created, it registers itself with its
# context object. This makes it possible for external mechanisms to clean up
# after the service if something goes wrong.
#
# Note: Allocate nodes *before* registering self with the service registry
self.context.services.append(self)
# Each service instance has its own local scratch directory on the test driver
self._local_scratch_dir = None
self._initialized = True
@staticmethod
def setup_node_spec(num_nodes=None, node_spec=None):
if not num_nodes and not node_spec:
raise Exception("Either num_nodes or node_spec must not be None.")
# If node_spec is none, convert num_nodes to a node_spec dict and assume Linux machines.
if not node_spec:
return {RemoteAccount.LINUX: num_nodes}
else:
try:
for os_type, _ in node_spec.iteritems():
if os_type not in RemoteAccount.SUPPORTED_OS_TYPES:
raise Exception("When nodes is a dictionary, each key must be a " +
"supported OS. '%s' is unknown." % os_type)
return node_spec
except:
raise Exception("Each node_spec key must be a supported operating system: " +
"%s, node_spec: %s" % (RemoteAccount.SUPPORTED_OS_TYPES, str(node_spec)))
def __repr__(self):
return "<%s: %s>" % (self.who_am_i(), "num_nodes: %d, nodes: %s" %
(len(self.nodes), [n.account.hostname for n in self.nodes]))
@property
def local_scratch_dir(self):
"""This local scratch directory is created/destroyed on the test driver before/after each test is run."""
if not self._local_scratch_dir:
self._local_scratch_dir = tempfile.mkdtemp()
return self._local_scratch_dir
@property
def service_id(self):
"""Human-readable identifier (almost certainly) unique within a test run."""
return "%s-%d-%d" % (self.__class__.__name__, self._order, id(self))
@property
def _order(self):
"""Index of this service instance with respect to other services of the same type registered with self.context.
When used with a test_context, this lets the user know
Example::
suppose the services registered with the same context looks like
context.services == [Zookeeper, Kafka, Zookeeper, Kafka, MirrorMaker]
then:
context.services[0]._order == 0 # "0th" Zookeeper instance
context.services[2]._order == 0 # "0th" Kafka instance
context.services[1]._order == 1 # "1st" Zookeeper instance
context.services[3]._order == 1 # "1st" Kafka instance
context.services[4]._order == 0 # "0th" MirrorMaker instance
"""
if hasattr(self.context, "services"):
same_services = [id(s) for s in self.context.services if type(s) == type(self)]
if self not in self.context.services and not self._initialized:
# It's possible that _order will be invoked in the constructor *before* self has been registered with
# the service registry (aka self.context.services).
return len(same_services)
# Note: index raises ValueError if the item is not in the list
index = same_services.index(id(self))
return index
else:
return 0
@property
def logger(self):
"""The logger instance for this service."""
return self.context.logger
@property
def cluster(self):
"""The cluster object from which this service instance gets its nodes."""
return self.context.cluster
@property
def allocated(self):
"""Return True iff nodes have been allocated to this service instance."""
return len(self.nodes) > 0
def who_am_i(self, node=None):
"""Human-readable identifier useful for log messages."""
if node is None:
return self.service_id
else:
return "%s node %d on %s" % (self.service_id, self.idx(node), node.account.hostname)
def allocate_nodes(self):
"""Request resources from the cluster."""
if self.allocated:
raise Exception("Requesting nodes for a service that has already been allocated nodes.")
self.logger.debug("Requesting nodes from the cluster: %s" % self.node_spec)
try:
self.nodes = self.cluster.alloc(self.node_spec)
except RuntimeError as e:
msg = str(e.message)
if hasattr(self.context, "services"):
msg += " Currently registered services: " + str(self.context.services)
raise RuntimeError(msg)
for idx, node in enumerate(self.nodes, 1):
# Remote accounts utilities should log where this service logs
if node.account._logger is not None:
# This log message help test-writer identify which test and/or service didn't clean up after itself
node.account.logger.critical(ConsoleDefaults.BAD_TEST_MESSAGE)
raise RuntimeError(
"logger was not None on service start. There may be a concurrency issue, " +
"or some service which isn't properly cleaning up after itself. " +
"Service: %s, node.account: %s" % (self.__class__.__name__, str(node.account)))
node.account.logger = self.logger
self.logger.debug("Successfully allocated %d nodes to %s" % (len(self.nodes), self.who_am_i()))
def start(self):
"""Start the service on all nodes."""
self.logger.info("%s: starting service" % self.who_am_i())
if self._start_time < 0:
# Set self._start_time only the first time self.start is invoked
self._start_time = time.time()
self.logger.debug(self.who_am_i() + ": killing processes and attempting to clean up before starting")
for node in self.nodes:
# Added precaution - kill running processes, clean persistent files
# try/except for each step, since each of these steps may fail if there are no processes
# to kill or no files to remove
try:
self.stop_node(node)
except:
pass
try:
self.clean_node(node)
except:
pass
for node in self.nodes:
self.logger.debug("%s: starting node" % self.who_am_i(node))
self.start_node(node)
if self._start_duration_seconds < 0:
self._start_duration_seconds = time.time() - self._start_time
def start_node(self, node):
"""Start service process(es) on the given node."""
raise NotImplementedError("%s: subclasses must implement start_node." % self.who_am_i())
def wait(self, timeout_sec=600):
"""Wait for the service to finish.
This only makes sense for tasks with a fixed amount of work to do. For services that generate
output, it is only guaranteed to be available after this call returns.
"""
unfinished_nodes = []
start = time.time()
end = start + timeout_sec
for node in self.nodes:
now = time.time()
if end > now:
self.logger.debug("%s: waiting for node", self.who_am_i(node))
if not self.wait_node(node, end - now):
unfinished_nodes.append(node)
else:
unfinished_nodes.append(node)
if unfinished_nodes:
raise TimeoutError("Timed out waiting %s seconds for service nodes to finish. " % str(timeout_sec) +
"These nodes are still alive: " + str(unfinished_nodes))
def wait_node(self, node, timeout_sec=None):
"""Wait for the service on the given node to finish.
Return True if the node finished shutdown, False otherwise.
"""
raise NotImplementedError("%s: subclasses must implement wait_node." % self.who_am_i())
def stop(self):
"""Stop service processes on each node in this service.
Subclasses must override stop_node.
"""
self._stop_time = time.time() # The last time stop is invoked
self.logger.info("%s: stopping service" % self.who_am_i())
for node in self.nodes:
self.logger.info("%s: stopping node" % self.who_am_i(node))
self.stop_node(node)
self._stop_duration_seconds = time.time() - self._stop_time
def stop_node(self, node):
"""Halt service process(es) on this node."""
raise NotImplementedError("%s: subclasses must implement stop_node." % self.who_am_i())
def clean(self):
"""Clean up persistent state on each node - e.g. logs, config files etc.
Subclasses must override clean_node.
"""
self._clean_time = time.time()
self.logger.info("%s: cleaning service" % self.who_am_i())
for node in self.nodes:
self.logger.info("%s: cleaning node" % self.who_am_i(node))
self.clean_node(node)
def clean_node(self, node):
"""Clean up persistent state on this node - e.g. service logs, configuration files etc."""
self.logger.warn("%s: clean_node has not been overriden. This may be fine if the service leaves no persistent state."
% self.who_am_i())
def free(self):
"""Free each node. This 'deallocates' the nodes so the cluster can assign them to other services."""
for node in self.nodes:
self.logger.info("%s: freeing node" % self.who_am_i(node))
node.account.logger = None
self.cluster.free(node)
self.nodes = []
def run(self):
"""Helper that executes run(), wait(), and stop() in sequence."""
self.start()
self.wait()
self.stop()
def get_node(self, idx):
"""ids presented externally are indexed from 1, so we provide a helper method to avoid confusion."""
return self.nodes[idx - 1]
def idx(self, node):
"""Return id of the given node. Return -1 if node does not belong to this service.
idx identifies the node within this service instance (not globally).
"""
for idx, n in enumerate(self.nodes, 1):
if self.get_node(idx) == node:
return idx
return -1
def close(self):
"""Release resources."""
# Remove local scratch directory
if self._local_scratch_dir and os.path.exists(self._local_scratch_dir):
shutil.rmtree(self._local_scratch_dir)
@staticmethod
def run_parallel(*args):
"""Helper to run a set of services in parallel. This is useful if you want
multiple services of different types to run concurrently, e.g. a
producer + consumer pair.
"""
for svc in args:
svc.start()
for svc in args:
svc.wait()
for svc in args:
svc.stop()
def to_json(self):
return {
"cls_name": self.__class__.__name__,
"module_name": self.__module__,
"lifecycle": {
"init_time": self._init_time,
"start_time": self._start_time,
"start_duration_seconds": self._start_duration_seconds,
"stop_time": self._stop_time,
"stop_duration_seconds": self._stop_duration_seconds,
"clean_time": self._clean_time
},
"service_id": self.service_id,
"nodes": self._nodes_formerly_allocated
}
|
from imgaug import augmenters as iaa
import numpy as np
class ImgAugTransform:
def __init__(self):
self.aug = iaa.Sequential([
iaa.Sometimes(0.25, iaa.GaussianBlur(sigma=(0, 3.0))),
iaa.Fliplr(0.5),
iaa.Affine(rotate=(-20, 20), mode='symmetric'),
])
def __call__(self, img):
img = np.array(img)
return self.aug.augment_image(img)
|
from setuptools import setup, find_packages
import codecs
import os
here = os.path.abspath(os.path.dirname(__file__))
with codecs.open(os.path.join(here, "README.md"), encoding="utf-8") as fh:
long_description = "\n" + fh.read()
VERSION = '1.1.1'
DESCRIPTION = 'Read HTML data and convert it into python classes.'
# Setting up
setup(
name="HTML-Reader",
version=VERSION,
author="Monkvy",
author_email="moluecke@gmx.de",
description=DESCRIPTION,
long_description_content_type="text/markdown",
long_description=long_description,
packages=find_packages(),
install_requires=[],
keywords=['python', 'html', 'parser', 'reader'],
classifiers=[
"Intended Audience :: Developers",
"Programming Language :: Python :: 3",
"Operating System :: Unix",
"Operating System :: MacOS :: MacOS X",
"Operating System :: Microsoft :: Windows",
]
)
|
# coding=utf-8
from __future__ import absolute_import, division, print_function
from click import command, pass_context
from .docker import docker_version_for_preset
from .golang import golang_version_for_preset
from ..util.common_options import ansible_output_options
from ..util.preset_option import Preset, preset_option
def install_dependencies_for_preset(context, _, value):
"""
Installs the full set of dependencies on the remote host.
Handles the special `--for` option, defaults to `origin/master` if
a preset is not provided by the user.
:param context: Click context
:param _: command-line parameter
:param value: version of OpenShift for which to install dependencies
"""
if not value or context.resilient_parsing:
return
prepare_all(context.obj, value)
context.exit()
_SHORT_HELP = 'Configure all dependencies on remote hosts.'
@command(
name='all',
short_help=_SHORT_HELP,
help=_SHORT_HELP + '''
If a preset is chosen, default values for the other options are used
and user-provided options are ignored.
\b
Examples:
Install dependencies for the default configuration
$ oct prepare all
\b
Install dependencies for a specific version of OpenShift
$ oct prepare all --for=ose/enterprise-3.3
''',
)
@preset_option(
help_action='Install dependencies',
callback=install_dependencies_for_preset,
)
@ansible_output_options
@pass_context
def all_command(context, preset=None):
"""
Installs the full set of dependencies on the remote host.
:param context: Click context
:param preset: version of OpenShift for which to install dependencies
"""
prepare_all(context.obj, preset)
def prepare_all(client, preset):
"""
Installs the full set of dependencies on the remote host.
:param client: Ansible client
:param preset: version of OpenShift for which to install dependencies
"""
# we can't default on a eager option or it would always trigger,
# so we default here instead
if not preset:
preset = Preset.origin_master
playbook_variables = {
'origin_ci_docker_version': docker_version_for_preset(preset),
'origin_ci_golang_version': golang_version_for_preset(preset),
}
client.run_playbook(
playbook_relative_path='prepare/main',
playbook_variables=playbook_variables,
)
|
"""Tests of 0x.order_utils.signature_utils.*."""
from zero_ex.order_utils.signature_utils import ec_sign_order_hash
def test_ec_sign_order_hash():
"""Test the signing of order hashes."""
assert ec_sign_order_hash() == "stub return value"
|
import sys
import os
sys.path.append(os.path.dirname(os.path.realpath(__file__)))
from .info import __version__
from . import FASTPT
|
import numpy as np
from scipy.ndimage import convolve, sobel
from scipy.interpolate import RectBivariateSpline
from scipy.optimize import basinhopping
from geometry import calc_bbox, MeshEdgeDetector, Position
class Gradient(object):
SCHARR = 'Scharr'
SOBEL = 'Sobel'
SCHARR_KERNEL_X = np.array([[3, 0, -3], [10, 0, -10], [3, 0, -3]]) / 32.0
SCHARR_KERNEL_Y = np.array([[3, 10, 3], [0, 0, 0], [-3, -10, -3]]) / 32.0
def __init__(self, image, method=SOBEL, normalized=False):
if method not in (Gradient.SCHARR, Gradient.SOBEL):
raise ValueError(
'Invalid method (use Gradient.SCHARR or Gradient.SOBEL)'
)
image = np.flipud(image)
if method == Gradient.SCHARR:
x_deriv = convolve(image, Gradient.SCHARR_KERNEL_X)
y_deriv = convolve(image, Gradient.SCHARR_KERNEL_Y)
else:
x_deriv = sobel(image, axis=1) / 8.0
y_deriv = sobel(image, axis=0) / 8.0
if normalized:
magnitude = np.hypot(x_deriv, y_deriv)
min_magnitude = magnitude.min()
max_magnitude_diff = magnitude.max() - min_magnitude
if min_magnitude > 0:
x_deriv -= min_magnitude * x_deriv / magnitude
y_deriv -= min_magnitude * y_deriv / magnitude
x_deriv /= max_magnitude_diff
y_deriv /= max_magnitude_diff
x_indices = np.arange(image.shape[1])
y_indices = np.arange(image.shape[0])
self._x_deriv = RectBivariateSpline(y_indices, x_indices, x_deriv)
self._y_deriv = RectBivariateSpline(y_indices, x_indices, y_deriv)
def __getitem__(self, coordinates):
if coordinates.ndim > 1:
coordinates = np.hsplit(coordinates[:, ::-1], 2)
else:
coordinates = coordinates[::-1]
return np.hstack((
self._x_deriv(*coordinates, grid=False),
self._y_deriv(*coordinates, grid=False)
))
class IntegralCalculator(object):
def __init__(self, image, scene, points_per_pixel=0.3,
normalized_gradient=False):
self._image_size = image.shape
self._scene = scene
self._points_per_pixel = points_per_pixel
self._gradient = Gradient(image, normalized=normalized_gradient)
self._mesh_edge_detector = MeshEdgeDetector(scene.mesh)
def __call__(self, position):
_, _, gradients, normals = self.calc_gradients_and_normals(position)
integral = np.abs((normals * gradients).sum(axis=1)).sum()
integral /= normals.shape[0]
return integral
def calc_gradients_and_normals(self, position):
scene = self._scene._replace(model=position)
mesh_edges = self._mesh_edge_detector(
scene.proj.dot(scene.view.matrix.dot(scene.model.matrix)),
self._image_size
)
edges = np.vstack((mesh_edges.borders, mesh_edges.sharp_edges))
lines = mesh_edges.projected_vertices[edges]
points, line_indices = self._select_points(lines)
lines = lines[line_indices]
normals = IntegralCalculator._calc_normals(lines)
gradients = self._gradient[points]
return mesh_edges, points, gradients, normals
def _select_points(self, lines):
line_count = lines.shape[0]
begin_points = lines[:, 0]
directions = lines[:, 1] - begin_points
lengths = np.linalg.norm(directions, axis=1)
point_counts = np.rint(lengths * self._points_per_pixel)
point_counts = point_counts.astype(np.int64, copy=False)
point_counts[point_counts == 0] = 1
line_indices = np.repeat(np.arange(line_count), point_counts)
steps = np.ones(line_count) / point_counts
coefs = np.cumsum(np.repeat(steps, point_counts))
begin_indices = np.cumsum(point_counts) - point_counts
coefs -= np.repeat(coefs[begin_indices], point_counts)
coefs += np.repeat(steps / 2, point_counts)
coefs = np.repeat(coefs.reshape((-1, 1)), 2, axis=1)
points = begin_points[line_indices] + directions[line_indices] * coefs
return points, line_indices
def _clip(self, points, line_indices):
mask = (points >= (0, 0)) & (points < self._image_size[::-1])
mask = mask[:, 0] & mask[:, 1]
return points[mask], line_indices[mask]
@staticmethod
def _calc_normals(lines):
normals = lines[:, 1] - lines[:, 0]
norms = np.repeat(np.linalg.norm(normals, axis=1), 2).reshape(-1, 2)
normals /= norms
normals = np.dot(normals, np.array([[0, -1], [1, 0]]))
return normals
class Walker(object):
def __init__(self, step_vector, step_size):
self.step_vector = step_vector
self.stepsize = step_size
def __call__(self, vector):
random_vector = np.random.uniform(-1, 1, vector.shape)
return vector + self.step_vector * random_vector * self.stepsize
class Guard(object):
def __init__(self, initial_pose, t_limit, r_limit):
vector = initial_pose.vector6
diff = np.array([t_limit] * 3 + [r_limit] * 3)
self._min_pose = vector - diff
self._max_pose = vector + diff
def __call__(self, f_new, x_new, f_old, x_old):
ok = ((x_new >= self._min_pose) & (x_new <= self._max_pose)).all()
return bool(ok)
def optimize_model(model, integral_calculator, step_callback, echo):
bbox = calc_bbox(integral_calculator._scene.mesh)
bbox_size = max(bbox[i + 1] - bbox[i] for i in xrange(0, 6, 2))
t_limit = 0.1 * max(bbox[i + 1] - bbox[i] for i in xrange(0, 6, 2))
r_limit = 22.5
guard = Guard(model, t_limit, r_limit)
walker = Walker(
np.array([0.2 * t_limit] * 3 + [0.2 * r_limit] * 3),
0.5
)
minimizer_kwargs = {
'method': 'SLSQP',
'tol': 0.00001,
'options': {
'maxiter': 50,
'disp': echo
}
}
basinhopping_result = basinhopping(
lambda x: bbox_size * (1 - integral_calculator(Position(x))),
model.vector6,
niter=50,
T=0.1,
minimizer_kwargs=minimizer_kwargs,
take_step=walker,
accept_test=guard,
callback=step_callback,
interval=10,
disp=echo
)
if echo:
print
print basinhopping_result
return Position(basinhopping_result.x)
|
# coding: utf8
from __future__ import print_function, absolute_import
from pickle import Unpickler, BUILD
from spinoff.actor.ref import Ref
class IncomingMessageUnpickler(Unpickler):
"""Unpickler for attaching a `Hub` instance to all deserialized `Ref`s."""
def __init__(self, node, file):
Unpickler.__init__(self, file)
self.node = node
# called by `Unpickler.load` before an uninitalized object is about to be filled with members;
def _load_build(self):
"""See `pickle.py` in Python's source code."""
# if the ctor. function (penultimate on the stack) is the `Ref` class...
if isinstance(self.stack[-2], Ref):
# Ref.__setstate__ will know it's a remote ref if the state is a tuple
self.stack[-1] = (self.stack[-1], self.node)
self.load_build() # continue with the default implementation
# detect our own refs sent back to us
ref = self.stack[-1]
if ref.uri.node == self.node.nid:
ref.is_local = True
ref._cell = self.node.guardian.lookup_cell(ref.uri)
# dbg(("dead " if not ref._cell else "") + "local ref detected")
del ref.node # local refs never need access to the node
else: # pragma: no cover
self.load_build()
dispatch = dict(Unpickler.dispatch) # make a copy of the original
dispatch[BUILD] = _load_build # override the handler of the `BUILD` instruction
|
import random
no_of_tests = 1000000 #1 million tests
def montyhall():
correct_switchdoor = 0
correct_samedoor = 0
incorrect_switchdoor = 0
incorrect_samedoor = 0
switchdoor_actual = 0
samedoor_actual = 0
for test in range(no_of_tests):
car = random.randint(0,2) #Door with car, chhosen randomly
player_choice1 = random.randint(0,2) #Player makes the choice
#Monty hall reveals a door with a goat.
#Initially, if player chose the door with a goat, Monty Hall reveals the other door with the goat.
#To win, the player must switch to the unopened door, which has the car behind it.
if player_choice1 != car:
switchdoor_actual += 1 #Switch door to win
player_flip_choice = bool(random.getrandbits(1)) #Player decision on switching
if player_flip_choice:
correct_switchdoor += 1 #Player Switches and Wins
else:
incorrect_samedoor += 1 #Player Retains same door and Loses
#Initially, if player chose the door with the car, Monty Hall reveals any of the other two doors randomly, both of which have a goat behind themm.
#To win the car, the player must retain the same door.
else:
samedoor_actual += 1 #Retain same door to win
player_flip_choice = bool(random.getrandbits(1)) #Player decision on switching
if player_flip_choice:
incorrect_switchdoor += 1 #Player Switches and Loses
else:
correct_samedoor += 1 #Player Retains same door and Wins
#Results for a random gameshow contestant
print("RESULTS FOR A RANDOM GAMESHOW CONTESTANT\n----------------------------------------")
print("Player Switches door and Wins: " + str(correct_switchdoor/no_of_tests))
print("Player Switches door and Loses: " + str(incorrect_switchdoor/no_of_tests))
print("Player Stays with same door and Wins: " + str(correct_samedoor/no_of_tests))
print("Player Stays with same door and Loses: " + str(incorrect_samedoor/no_of_tests))
#Success probabilities on making the switching decision
print("\nSUCCESS SCENARIOS FOR GAMESHOW CONTESTANTS\n------------------------------------------")
print("Switch Door to Win: " + str(switchdoor_actual/no_of_tests))
print("Same Door to Win: " + str(samedoor_actual/no_of_tests))
montyhall()
|
# -*- coding: utf-8 -*-
"""
:copyright: Copyright 2022 Sphinx Confluence Builder Contributors (AUTHORS)
:license: BSD-2-Clause (LICENSE)
"""
from tests.lib.testcase import ConfluenceTestCase
from tests.lib.testcase import setup_builder
from tests.lib import parse
import os
class TestConfluenceExpand(ConfluenceTestCase):
@classmethod
def setUpClass(cls):
super(TestConfluenceExpand, cls).setUpClass()
cls.dataset = os.path.join(cls.datasets, 'expand')
@setup_builder('confluence')
def test_storage_confluence_expand_directive_expected(self):
out_dir = self.build(self.dataset)
with parse('index', out_dir) as data:
expand_macros = data.find_all('ac:structured-macro',
{'ac:name': 'expand'})
self.assertIsNotNone(expand_macros)
self.assertEqual(len(expand_macros), 2)
# expand macro without a title
expand_macro = expand_macros.pop(0)
rich_body = expand_macro.find('ac:rich-text-body')
self.assertIsNotNone(rich_body)
title = expand_macro.find('ac:parameter', {'ac:name': 'title'})
self.assertIsNone(title)
contents = rich_body.text.strip()
self.assertIsNotNone(contents)
self.assertEqual(contents, 'no title content')
# expand macro with a title
expand_macro = expand_macros.pop(0)
rich_body = expand_macro.find('ac:rich-text-body')
self.assertIsNotNone(rich_body)
title = expand_macro.find('ac:parameter', {'ac:name': 'title'})
self.assertIsNotNone(title)
self.assertEqual(title.text, 'my title')
contents = rich_body.text.strip()
self.assertIsNotNone(contents)
self.assertEqual(contents, 'with title content')
|
import datetime
import logging
from typing import List, Tuple
import pyspark.sql as spark
from pyspark.ml import Pipeline
from pyspark.ml.classification import LogisticRegression
from pyspark.ml.feature import VectorAssembler
from pyspark.mllib.evaluation import RegressionMetrics
from pyspark.sql.functions import exp
from pyspark.sql.types import DoubleType
from dbnd import log_metric, parameter, pipeline
from dbnd.utils import data_combine, period_dates
from dbnd_examples.data import demo_data_repo
from dbnd_spark.spark import spark_task
from targets import target
from targets.types import PathStr
logger = logging.getLogger(__name__)
def data_source(task_target_date, name):
return target(demo_data_repo.partner_to_file(name, task_target_date))
@spark_task(result=parameter.output.csv[spark.DataFrame])
def get_and_enrich_spark(raw_data: spark.DataFrame, column_name: str):
raw_data.show()
data_with_new_feature = raw_data.withColumn(column_name + "_exp", exp(column_name))
return data_with_new_feature
@spark_task(result=parameter.output.csv[spark.DataFrame])
def clean_data_spark(raw_data: spark.DataFrame):
return raw_data.na.fill(0)
@pipeline()
def ingest_partner_a(task_target_date):
raw_data = data_source(name="a", task_target_date=task_target_date)
clean = clean_data_spark(raw_data=raw_data)
return get_and_enrich_spark(raw_data=clean, column_name="1")
@pipeline
def ingest_partner_b(task_target_date):
raw_data = data_source(name="b", task_target_date=task_target_date)
return clean_data_spark(raw_data=raw_data)
@pipeline
def ingest_partner_c(task_target_date):
raw_data = data_source(name="c", task_target_date=task_target_date)
clean = clean_data_spark(raw_data=raw_data)
return get_and_enrich_spark(raw_data=clean, column_name="10")
@pipeline
def fetch_partner_data_spark(
task_target_date, selected_partners: List[str], period=datetime.timedelta(days=7)
) -> List[spark.DataFrame]:
partner_data = []
for partner in selected_partners:
all_data = []
for d in period_dates(task_target_date, period):
if partner == "a":
data = ingest_partner_a(task_target_date=d)
elif partner == "b":
data = ingest_partner_b(task_target_date=d)
elif partner == "c":
data = ingest_partner_c(task_target_date=d)
else:
raise Exception("Partner not found!")
all_data.append(data)
partner_data.append(data_combine(all_data, sort=True))
return partner_data
@spark_task
def calculate_features(
raw_data: List[spark.DataFrame], selected_features: List[str] = None
) -> spark.DataFrame:
result = raw_data.pop(0)
for d in raw_data:
result = result.join(d, ["id"], "outer")
if selected_features:
result = result.select(selected_features)
result.show()
return result
@spark_task(result="training_set, test_set, validation_set")
def split_data_spark(
raw_data: spark.DataFrame,
) -> Tuple[spark.DataFrame, spark.DataFrame, spark.DataFrame]:
columns_to_remove = set(["id", "0_norm", "10_norm"])
if columns_to_remove.issubset(list(raw_data.schema.names)):
raw_data = raw_data.drop(columns_to_remove)
(train, test) = raw_data.randomSplit([0.8, 0.2])
(test, validation) = raw_data.randomSplit([0.5, 0.5])
target_stats = raw_data.describe(["target"])
log_metric(
"target.mean",
target_stats.filter(target_stats["summary"] == "mean")
.collect()[0]
.asDict()["target"],
)
log_metric(
"target.std",
target_stats.filter(target_stats["summary"] == "stddev")
.collect()[0]
.asDict()["target"],
)
return train, test, validation
@spark_task
def train_model_spark(
test_set: spark.DataFrame,
training_set: spark.DataFrame,
alpha: float = 1.0,
l1_ratio: float = 0.5,
saved_model=parameter.output.folder_data.with_flag(None)[PathStr],
) -> str:
transform = VectorAssembler(inputCols=["0", "1", "2"], outputCol="features")
lr = LogisticRegression(
featuresCol="features",
labelCol="target",
regParam=l1_ratio,
elasticNetParam=alpha,
family="multinomial",
maxIter=1,
)
ppl = Pipeline(stages=[transform, lr])
# Fit the pipeline to training documents.
model = ppl.fit(training_set)
prediction = model.transform(test_set)
evaluation = prediction.withColumn(
"label", prediction["target"].cast(DoubleType())
).select(["label", "prediction"])
evaluation.show()
metrics = RegressionMetrics(evaluation.rdd)
log_metric("r2", metrics.r2)
log_metric("alpha", alpha)
model.write().save(str(saved_model))
return "ok"
@pipeline(result=("model"))
def train_model_for_customer_spark(
task_target_date,
data: spark.DataFrame = None,
alpha: float = 1.0,
l1_ratio: float = 0.5,
period=datetime.timedelta(days=1),
selected_features: List[str] = None,
):
if data is None:
partners = fetch_partner_data_spark(
task_target_date=task_target_date, period=period
)
data = calculate_features(
selected_features=selected_features, raw_data=partners
)
training_set, test_set, validation_set = split_data_spark(raw_data=data)
model = train_model_spark(
test_set=test_set, training_set=training_set, alpha=alpha, l1_ratio=l1_ratio
)
return model.saved_model
|
"""Verifies that all providers of blockchain data are consistent with others."""
import unittest
try:
from boltzmann.utils.bitcoind_rpc_wrapper import BitcoindRPCWrapper
from boltzmann.utils.bci_wrapper import BlockchainInfoWrapper
except ImportError:
import sys
import os
# Adds boltzmann directory into path
sys.path.append(os.path.dirname(os.path.realpath(__file__)) + "/../../")
from boltzmann.utils.bitcoind_rpc_wrapper import BitcoindRPCWrapper
from boltzmann.utils.bci_wrapper import BlockchainInfoWrapper
class CompareTest(unittest.TestCase):
"""Compare the results of various providers for given transaction IDs."""
PROVIDERS = [BitcoindRPCWrapper, BlockchainInfoWrapper]
#a list of transactions with expected data
TEST_TXS = [
{'height': 100001,
'time': 1293624404,
'txid': '8131ffb0a2c945ecaf9b9063e59558784f9c3a74741ce6ae2a18d0571dac15bb',
'inputs': [{'n': 0,
'value': 5000000000,
'address': '1HYAekgNKqQiCadt3fnKdLQFFNLFHPPnCR',
'tx_idx': 239354,
'script': '4104cd31654088e472c60ab1c6ee7743deb186dce0b1ad5fc45691d37dad2620128e4b33c7c9c19ed01a5817e6e54c12fe1b83eafcb830440f23a2ce903cdb1df52fac'
},
{'n': 0,
'value': 5000000000,
'address': '16hwoJvz1xje8HBgoLZcxwo1CwE3cvkb17',
'tx_idx': 239356,
'script': '41041e1f1bdccf8cd5b9d3ffc0a14a36ad8a97663f14d16b94104d073abfc693d04178f263495cd6037aed097175297b39cfe5f5b9706fd425795bf7f61108145b53ac'
},
{'n': 0,
'value': 5000000000,
'address': '1KWGBfAsuBFzKQ7bhSJV5WbgVNvvQ5R1j2',
'tx_idx': 239322,
'script': '41043ea537ed214d2bcb07f56d2ecb047a4bd11d13fa160856f84e77e8d31ab2154cd2eb8cad37747b50b0b04d739186058d64212368202d1b41bc44fcb6decb90eaac'
},
{'n': 0,
'value': 5000000000,
'address': '15XgnazTwLj7sNPkbUo5vCSKBmR43X5vW4',
'tx_idx': 239205,
'script': '4104b2424b051a79a55b9f7970ceeecb25e81b876c53d9e46d6ee7e0ae656b94f8cf3a27a1b3f2465ac19409e2a08fb7d1f549adc70a5f90ff8418061688186064f4ac'
},
{'n': 0,
'value': 5001000000,
'address': '16HjHvF5umsgAzaX2ddosB81ttkrVHkvqo',
'tx_idx': 239162,
'script': '4104a7d578da4514a14b08d1e939924efaeacfde7d7d2897f2cef87248aaa4e3cd226f0660b9bf759448f9fb2f586f6027667b73d34a8114186265f9364193599c2cac'
}],
'outputs': [{'n': 0,
'value': 25000000000,
'address': '15xif4SjXiFi3NDEsmMZCfTdE9jvvVQrjU',
'tx_idx': 240051,
'script': '76a914366a27645806e817a6cd40bc869bdad92fe5509188ac'
},
{'n': 1,
'value': 1000000,
'address': '1NkKLMgbSjXrT7oHagnGmYFhXAWXjJsKCj',
'tx_idx': 240051,
'script': '76a914ee8bd501094a7d5ca318da2506de35e1cb025ddc88ac'
}]
},
{'height': 299173,
'time': 1399267359,
'txid': '8e56317360a548e8ef28ec475878ef70d1371bee3526c017ac22ad61ae5740b8',
'inputs': [{'n': 0,
'value': 10000000,
'address': '1FJNUgMPRyBx6ahPmsH6jiYZHDWBPEHfU7',
'tx_idx': 55795695,
'script': '76a9149cdac2b6a77e536f5f4ab6518fb078861f4dbf5188ac',
},
{'n': 1,
'value': 1380000,
'address': '1JDHTo412L9RCtuGbYw4MBeL1xn7ZTuzLH',
'tx_idx': 55462552,
'script': '76a914bcccdf22a567d2c30762c2c44edd3d4ff40e944c88ac'
}],
'outputs': [{'n': 0,
'value': 100000,
'address': '1JR3x2xNfeFicqJcvzz1gkEhHEewJBb5Zb',
'tx_idx': 55819527,
'script': '76a914bf06953ec3c533d040929dc82eb4845ec0d8171088ac'
},
{'n': 1,
'value': 9850000,
'address': '18JNSFk8eRZcM8RdqLDSgCiipgnfAYsFef',
'tx_idx': 55819527,
'script': '76a9145011d8607971901c1135c2e8ae3074c472af4bf188ac'
},
{'n': 2,
'value': 100000,
'address': '1ALKUqxRb2MeFqomLCqeYwDZK6FvLNnP3H',
'tx_idx': 55819527,
'script': '76a91466607632dc9e3c0ed2e24fe3c54ea488408e99f588ac'
},
{'n': 3,
'value': 1270000,
'address': '1PA1eHufj8axDWEbYfPtL8HXfA66gTFsFc',
'tx_idx': 55819527,
'script': '76a914f3070c305b4bca72aa4b57bcbad05de5a692f16a88ac'
}
]
}]
'''
TODO:Both of these currently raise exception for BCI, so shouldnt work for RPC either
{ #genesis block coinbase tx
'height': 0,
'time': 1231006505,
'txid': '4a5e1e4baab89f3a32518a88c31bc87f618f76673e2cc77ab2127b7afdeda33b',
'inputs': [{}], #TODO
'outputs': [{}] #TODO
},
{ #BIP30 duplicate tx, see:
#https://github.com/kristovatlas/interesting-bitcoin-data
'height': 91842, #also height 91812
'time': 1289757588,
'txid': 'd5d27987d2a3dfc724e359870c6644b40e497bdc0589a033220fe15429d88599',
'inputs': [{}], #TODO
'outputs': [{}] #TODO
}
'''
def test(self):
"""Verify that fields are present and as expected for each data provider."""
for test_idx, expected_tx in enumerate(self.TEST_TXS):
for provider in self.PROVIDERS:
prov = provider()
print("Starting test # {0}".format(test_idx+1))
txn = prov.get_tx(expected_tx['txid'])
_assertEq(expected_tx['txid'], txn.txid, test_idx+1)
_assertNoneOrEqual(txn.time, expected_tx['time'], test_idx+1)
_assertEq(
len(expected_tx['inputs']), len(txn.inputs), test_idx+1)
_assertEq(
len(expected_tx['outputs']), len(txn.outputs), test_idx+1)
for idx, tx_in in enumerate(expected_tx['inputs']):
_assertEq(tx_in['n'], txn.inputs[idx].n, test_idx+1)
_assertEq(tx_in['value'], txn.inputs[idx].value, test_idx+1)
_assertEq(
tx_in['address'], txn.inputs[idx].address, test_idx+1)
_assertNoneOrEqual(
txn.inputs[idx].tx_idx, tx_in['tx_idx'], test_idx+1)
for idx, tx_out in enumerate(expected_tx['outputs']):
_assertEq(tx_out['n'], txn.outputs[idx].n, test_idx+1)
_assertEq(
tx_out['value'], txn.outputs[idx].value, test_idx+1)
_assertEq(
tx_out['address'], txn.outputs[idx].address, test_idx+1)
_assertNoneOrEqual(
txn.outputs[idx].tx_idx, tx_out['tx_idx'], test_idx+1)
def _assertEq(a, b, test_num):
assert a == b, "Test {0}: {1} != {2}".format(test_num, a, b)
def _assertNoneOrEqual(a, b, test_num):
assert a is None or a == b, \
"Test {0}: {1} != None && != {2}".format(test_num, a, b)
if __name__ == '__main__':
unittest.main()
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, absolute_import
from django.contrib.auth.models import AbstractUser
from django.core.urlresolvers import reverse
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from django.utils.translation import ugettext_lazy as _
from .managers import CustomManager
@python_2_unicode_compatible
class User(AbstractUser):
# First Name and Last Name do not cover name patterns
# around the globe.
name = models.CharField(_('Name of User'), blank=True, max_length=255)
email = models.EmailField(_('email address'), unique=True)
username = models.CharField(
_('username'),
max_length=150,
blank=True,
default=''
)
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['first_name', 'last_name']
objects = CustomManager()
def __str__(self):
return self.email
def get_absolute_url(self):
return reverse('users:detail', kwargs={'email': self.email})
|
import numpy as np
import sys
import pickle as pkl
import networkx as nx
import scipy.sparse as sp
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import OneHotEncoder
def parse_index_file(filename):
index = []
for line in open(filename):
index.append(int(line.strip()))
return index
def load_sparse_csr(filename):
loader = np.load(filename)
return sp.csr_matrix((loader['data'], loader['indices'], loader['indptr']),
shape=loader['shape'])
def load_regions(WORKING_PATH, YEAR, one_hot=False):
region_data = np.load(WORKING_PATH+str(YEAR)+'-'+str(YEAR+1)+'_region.npy', allow_pickle=True)
region_data = ["unknown" if region is np.nan else region for region in region_data]
region_data = LabelEncoder().fit_transform(region_data)
if one_hot:
return OneHotEncoder(sparse=False).fit_transform(np.array(region_data).reshape((-1, 1)))
return region_data
def load_graph(WORKING_PATH, YEAR):
loader = np.load(WORKING_PATH+"weighted_procurement_"+str(YEAR)+"-"+str(YEAR+1)+".npz")
return sp.csr_matrix((loader['data'], loader['indices'], loader['indptr']), shape=loader['shape'])
def load_disease_network():
return sp.csr_matrix(np.load('gae/data/diseasome/disease_network_adj.npy'))
def load_disease_network_types(one_hot=False):
data = np.load('gae/data/diseasome/disease_network_types.npy')
data = LabelEncoder().fit_transform(data)
if one_hot:
return OneHotEncoder(sparse=False).fit_transform(np.array(data).reshape((-1, 1)))
return data
|
# Generated by Django 2.2.9 on 2020-02-10 13:26
from django.db import migrations, models
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = []
operations = [
migrations.CreateModel(
name="Team",
fields=[
("id", models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
("name", models.CharField(default=None, max_length=60, unique=True)),
],
),
]
|
import argparse
import os
import PIL.Image as Image
import numpy as np
import tensorflow as tf
from utils.app_utils import FPS
from dataset.df import visualization_utils as vis_util
from object_detection.utils import label_map_util
CWD_PATH = os.getcwd()
detection_graph = tf.Graph()
IMAGE_SIZE = (12, 8)
# Path to frozen detection graph. This is the actual model that is used for the object detection.
# PATH_TO_CKPT = os.path.join(CWD_PATH, 'object_detection', MODEL_NAME, 'frozen_inference_graph.pb')
PATH_TO_CKPT = os.path.join(CWD_PATH, 'frozen_inference_graph.pb')
# List of the strings that is used to add correct label for each box.
# PATH_TO_LABELS = os.path.join(CWD_PATH, 'object_detection', 'data', 'mscoco_label_map.pbtxt')
PATH_TO_LABELS = os.path.join(CWD_PATH, 'label_map.pbtxt')
NUM_CLASSES = 89
# Loading label map
label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES,
use_display_name=True)
category_index = label_map_util.create_category_index(categories)
def load_image_into_numpy_array(image):
(im_width, im_height) = image.size
return np.array(image.getdata()).reshape(
(im_height, im_width, 3)).astype(np.uint8)
def detect_objects(image_np, sess):
# Expand dimensions since the model expects images to have shape: [1, None, None, 3]
image_np_expanded = np.expand_dims(image_np, axis=0)
image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
# Each box represents a part of the image where a particular object was detected.
boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
# Each score represent how level of confidence for each of the objects.
# Score is shown on the result image, together with the class label.
scores = detection_graph.get_tensor_by_name('detection_scores:0')
classes = detection_graph.get_tensor_by_name('detection_classes:0')
num_detections = detection_graph.get_tensor_by_name('num_detections:0')
# Actual detection.
(boxes, scores, classes, num_detections) = sess.run(
[boxes, scores, classes, num_detections],
feed_dict={image_tensor: image_np_expanded})
# Visualization of the results of a detection.
vis_util.visualize_boxes_and_labels_on_image_array(
image_np,
np.squeeze(boxes),
np.squeeze(classes).astype(np.int32),
np.squeeze(scores),
category_index,
use_normalized_coordinates=True,
line_thickness=2)
return image_np
def start_tf():
# detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
sess = tf.Session(graph=detection_graph)
return sess
def stop_tf(sess):
sess.close()
def inference(sess, img_np):
fps = FPS().start()
fps.update()
output = detect_objects(img_np, sess)
display_PIL(output)
fps.stop()
def display_PIL(nparray):
image = Image.fromarray(nparray)
image.show()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-src', '--source', dest='video_source', type=int,
default=0, help='Device index of the camera.')
parser.add_argument('-wd', '--width', dest='width', type=int,
default=480, help='Width of the frames in the video stream.')
parser.add_argument('-ht', '--height', dest='height', type=int,
default=360, help='Height of the frames in the video stream.')
parser.add_argument('-num-w', '--num-workers', dest='num_workers', type=int,
default=2, help='Number of workers.')
parser.add_argument('-q-size', '--queue-size', dest='queue_size', type=int,
default=5, help='Size of the queue.')
args = parser.parse_args()
sess = start_tf()
for file in os.listdir("/Users/bok95/testimages"):
if file.endswith(".jpg"):
image = Image.open(os.path.join('/Users/bok95/testimages', file))
img_np = load_image_into_numpy_array(image)
inference(sess, img_np)
stop_tf(sess)
|
# -*- coding: utf-8 -*-
import sys, os
from setuptools import setup, find_packages
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
with open(path.join(here, 'version.txt')) as f:
version = f.read().strip()
setup(
name='spinalcordtoolbox',
version=version,
description='Library of analysis tools for MRI of the spinal cord',
long_description=long_description,
url='http://www.neuro.polymtl.ca/home',
author='NeuroPoly Lab, Polytechnique Montreal',
author_email='neuropoly@googlegroups.com',
license='MIT',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Healthcare Industry',
'Intended Audience :: Education',
'Topic :: Scientific/Engineering :: Medical Science Apps.',
'License :: OSI Approved :: MIT License',
'Operating System :: Unix',
'Operating System :: MacOS',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
],
keywords='Magnetic Resonance Imaging MRI spinal cord analysis template',
packages=[
"spinalcordtoolbox",
],
#package_data={},
data_files=[
# <hack>
("sct_scripts", [ os.path.join("scripts", x) for x in os.listdir("scripts") if x.endswith(".py") ]),
# </hack>
],
include_package_data=True,
extras_require={
'docs': [
'sphinx',
'sphinxcontrib-programoutput',
'sphinx_rtd_theme',
],
'mpi': [
'mpich==3.2',
'mpi4py==3.0.0',
],
'test': [
"pytest-runner",
"pytest",
],
},
entry_points=dict(
console_scripts=[
# <hack>
] + ['{}=spinalcordtoolbox.compat.launcher:main'.format(os.path.splitext(x)[0]) for x in os.listdir("scripts") if x.endswith(".py")] + [
# </hack>
# TODO add proper command-line entry points from refactored code
#'sct_deepseg_gm=spinalcordtoolbox.deepseg_gm.__main__:main',
],
),
)
|
import os
import sys
import matplotlib.pyplot as plt
import numpy as np
from astropy.io import fits
# ------------------------------------------------------------
# Input
output = os.path.join(sys.argv[1], "")
# ------------------------------------------------------------
stokes_files = output+'output/phase.dat'
norm_file = output+'output/normalization.dat'
plot_dir = output+'plot/'
if not os.path.exists(plot_dir):
os.makedirs(plot_dir)
phase, stokes_i, error_i, stokes_q, error_q, stokes_u, error_U, stokes_v, error_v = np.loadtxt(stokes_files, unpack=True)
# Change the sign of Stokes Q/U. The required sign for the phase curve convention depends
# on the polar direction angle to the detector, which is set to 90 deg for the phase curves.
stokes_q *= -1.
stokes_u *= -1.
if os.path.exists(norm_file):
# Reflected light normalization (W m-2 um-1)
_, _, norm = np.loadtxt(norm_file, unpack=True)
else:
# Thermal light
norm = None
stokes_pi = np.sqrt(stokes_q**2+stokes_u**2+stokes_v**2)
if norm is not None:
# Lambertian surface
ss_albedo = 1.
x = np.linspace(0, 180, 1000)
y = (2./3.)*ss_albedo*(np.sin(x*np.pi/180.)+(np.pi-(x*np.pi/180.))*np.cos(x*np.pi/180.))/np.pi
plt.plot(x,y, ls='--')
ss_albedo = 0.5
x = np.linspace(0, 180, 1000)
y = (2./3.)*ss_albedo*(np.sin(x*np.pi/180.)+(np.pi-(x*np.pi/180.))*np.cos(x*np.pi/180.))/np.pi
plt.plot(x,y, ls='--')
# Stokes I
plt.xlabel('Phase angle (deg)')
if norm is None:
plt.ylabel('Stokes I (W m$^{-2}$ µm$^{-1}$)')
plt.plot(phase, stokes_i, ls='-')
else:
plt.ylabel('Normalized Stokes I')
plt.plot(phase, stokes_i/norm, ls='-')
plt.ylim(0,1)
plt.xlim(0,180)
plt.savefig(plot_dir+'phase_i.pdf', bbox_inches='tight')
plt.clf()
# Stokes Q
plt.xlabel('Phase angle (deg)')
if norm is None:
plt.ylabel('Stokes Q')
plt.plot(phase, stokes_q, ls='-')
else:
plt.ylabel('Normalized Stokes Q')
plt.plot(phase, stokes_q/norm, ls='-')
plt.ylim(-1,1)
plt.xlim(0,180)
plt.savefig(plot_dir+'phase_q.pdf', bbox_inches='tight')
plt.clf()
# Stokes U
plt.xlabel('Phase angle (deg)')
if norm is None:
plt.ylabel('Stokes U')
plt.plot(phase, stokes_u, ls='-')
else:
plt.ylabel('Normalized Stokes U')
plt.plot(phase, stokes_u/norm, ls='-')
plt.ylim(-1,1)
plt.xlim(0,180)
plt.savefig(plot_dir+'phase_u.pdf', bbox_inches='tight')
plt.clf()
# Stokes V
plt.xlabel('Phase angle (deg)')
plt.ylabel('Normalized Stokes V')
if norm is None:
plt.plot(phase, stokes_v, ls='-')
else:
plt.plot(phase, stokes_v/norm, ls='-')
plt.ylim(-1,1)
plt.xlim(0,180)
plt.savefig(plot_dir+'phase_v.pdf', bbox_inches='tight')
plt.clf()
# -Q/I
plt.xlabel('Phase angle (deg)')
plt.ylabel('-Q/I')
plt.plot(phase[stokes_i>0.], -stokes_q[stokes_i>0.]/stokes_i[stokes_i>0.], ls='-')
plt.xlim(0,180)
plt.ylim(-1,1)
plt.savefig(plot_dir+'phase_q_pol.pdf', bbox_inches='tight')
plt.clf()
# U/I
plt.xlabel('Phase angle (deg)')
plt.ylabel('U/I')
plt.plot(phase[stokes_i>0.], stokes_u[stokes_i>0.]/stokes_i[stokes_i>0.], ls='-')
plt.xlim(0,180)
plt.ylim(-1,1)
plt.savefig(plot_dir+'phase_u_pol.pdf', bbox_inches='tight')
plt.clf()
# V/I
plt.xlabel('Phase angle (deg)')
plt.ylabel('V/I')
plt.plot(phase[stokes_i>0.], stokes_v[stokes_i>0.]/stokes_i[stokes_i>0.], ls='-')
plt.xlim(0,180)
plt.ylim(-1,1)
plt.savefig(plot_dir+'phase_v_pol.pdf', bbox_inches='tight')
plt.clf()
# Degree of polarization
plt.xlabel('Phase angle (deg)')
plt.ylabel('Degree of polarization')
plt.plot(phase[stokes_i>0.], stokes_pi[stokes_i>0.]/stokes_i[stokes_i>0.], ls='-')
plt.xlim(0,180)
plt.ylim(0,1)
plt.savefig(plot_dir+'phase_polarization.pdf', bbox_inches='tight')
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Invoke tasks."""
import os
import json
import shutil
from invoke import task
HERE = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(HERE, 'cookiecutter.json'), 'r') as fp:
COOKIECUTTER_SETTINGS = json.load(fp)
COOKIECUTTER_SETTINGS['repo_name'] = COOKIECUTTER_SETTINGS['app_title'].replace(' ','').lower()
COOKIE = os.path.join(HERE, COOKIECUTTER_SETTINGS['repo_name'])
@task
def build(ctx):
"""Build the cookiecutter."""
ctx.run('cookiecutter {0} --no-input'.format(HERE))
@task
def clean(ctx):
"""Clean out generated cookiecutter."""
print(COOKIE)
if os.path.exists(COOKIE):
shutil.rmtree(COOKIE)
print('Removed {0}'.format(COOKIE))
else:
print('App directory does not exist. Skipping.')
@task(pre=[clean, build])
def test(ctx):
"""Run lint commands and tests."""
os.chdir(COOKIE)
ctx.run('pip install -e ."[test]"', echo=True)
ctx.run('pytest', echo=True)
|
import json
import os
import time
import argparse
import uuid
import subprocess32
import sys
from datetime import datetime
from tzlocal import get_localzone
import pytz
import logging
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)),"../storage"))
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)),"../utils"))
from jobs_tensorboard import GenTensorboardMeta
import yaml
from jinja2 import Environment, FileSystemLoader, Template
from config import config, GetStoragePath
import base64
import re
import thread
import threading
import random
import pycurl
from StringIO import StringIO
logger = logging.getLogger(__name__)
def localize_time(date):
if type(date) == str:
date = datetime.strptime(date, "%Y-%m-%dT%H:%M:%SZ")
return pytz.utc.localize(date).isoformat()
def curl_get(url):
curl = pycurl.Curl()
curl.setopt(pycurl.URL, url)
curl.setopt(pycurl.SSL_VERIFYPEER, 1)
curl.setopt(pycurl.SSL_VERIFYHOST, 0)
curl.setopt(pycurl.CAINFO, config["certificate-authority"])
curl.setopt(pycurl.SSLKEYTYPE, "PEM")
curl.setopt(pycurl.SSLKEY, config["client-key"])
curl.setopt(pycurl.SSLCERTTYPE, "PEM")
curl.setopt(pycurl.SSLCERT, config["client-certificate"])
curl.setopt(curl.FOLLOWLOCATION, True)
buff = StringIO()
curl.setopt(pycurl.WRITEFUNCTION, buff.write)
curl.perform()
responseStr = buff.getvalue()
curl.close()
return responseStr
def kubectl_create(jobfile, EXEC=True):
if EXEC:
try:
output = subprocess32.check_output(["bash", "-c", config["kubelet-path"] + " create -f " + jobfile])
except Exception as e:
logger.exception("kubectl create")
output = ""
else:
output = "Job " + jobfile + " is not submitted to kubernetes cluster"
return output
def kubectl_delete(jobfile, EXEC=True):
if EXEC:
try:
cmd = "bash -c '" + config["kubelet-path"] + " delete -f " + jobfile + "'"
logger.info("executing %s", cmd)
output = os.system(cmd)
except Exception as e:
logger.exception("kubectl delete")
output = -1
else:
output = -1
return output
# timeout=0 means never timeout
def kubectl_exec(params, timeout=None):
"""As defalut, never timeout."""
try:
#print ("bash -c %s %s" % (config["kubelet-path"], params))
# TODO set the timeout
output = subprocess32.check_output(["bash", "-c", config["kubelet-path"] + " " + params], timeout=timeout)
except Exception as e:
logger.exception("kubectl exec")
output = ""
return output
def kubectl_exec_output_to_file(params, file):
os.system("%s %s 2>&1 | tee %s" % (config["kubelet-path"], params, file))
def Split(text, spliter):
return [x for x in text.split(spliter) if len(x.strip()) > 0]
def GetServiceAddress(jobId):
ret = []
#output = kubectl_exec(" describe svc -l run=" + jobId)
#print "output=\n" + output
#svcs = output.split("\n\n\n")
outputYaml = kubectl_exec(" get svc -l run={0} -o=yaml".format(jobId))
output = yaml.load(outputYaml)
svcs = output["items"]
for svc in svcs:
# lines = [Split(x,"\t") for x in Split(svc,"\n")]
# containerPort = None
# hostPort = None
# selector = None
# hostIP = None
# hostName = None
# for line in lines:
# if len(line) > 1:
# if line[0] == "Port:":
# containerPort = line[-1]
# if "/" in containerPort:
# containerPort = containerPort.split("/")[0]
# if line[0] == "NodePort:":
# hostPort = line[-1]
# if "/" in hostPort:
# hostPort = hostPort.split("/")[0]
# if line[0] == "Selector:" and line[1] != "<none>":
# selector = line[-1]
containerPort = svc["spec"]["ports"][0]["port"]
hostPort = svc["spec"]["ports"][0]["nodePort"]
labelIndex = 0
selector = ""
for label in svc["spec"]["selector"]:
if (labelIndex > 0):
selector += ","
selector += "{0}={1}".format(label, svc["spec"]["selector"][label])
labelIndex += 1
if selector is not None:
podInfo = GetPod(selector)
if podInfo is not None and "items" in podInfo:
for item in podInfo["items"]:
if "status" in item and "hostIP" in item["status"]:
hostIP = item["status"]["hostIP"]
if "spec" in item and "nodeName" in item["spec"]:
hostName = item["spec"]["nodeName"]
if containerPort is not None and hostIP is not None and hostPort is not None:
svcMapping = {}
svcMapping["containerPort"] = containerPort
svcMapping["hostPort"] = hostPort
if "." not in hostName and "domain" in config and (not config["domain"] is None) and len(config["domain"].strip()) > 0:
hostName += "."+config["domain"]
svcMapping["hostIP"] = hostIP
svcMapping["hostName"] = hostName
ret.append(svcMapping)
return ret
def GetPod(selector):
podInfo = {}
try:
output = kubectl_exec(" get pod -o yaml -l " + selector)
podInfo = yaml.load(output)
except Exception as e:
logger.exception("kubectl get pod")
podInfo = None
return podInfo
def GetLog(jobId, tail=None):
# assume our job only one container per pod.
selector = "run=" + jobId
podInfo = GetPod(selector)
logs = []
if podInfo is not None and "items" in podInfo:
for item in podInfo["items"]:
log = {}
if "metadata" in item and "name" in item["metadata"]:
log["podName"] = item["metadata"]["name"]
log["podMetadata"] = item["metadata"]
if "status" in item and "containerStatuses" in item["status"] and "containerID" in item["status"]["containerStatuses"][0]:
containerID = item["status"]["containerStatuses"][0]["containerID"].replace("docker://", "")
log["containerID"] = containerID
if tail is not None:
log["containerLog"] = kubectl_exec(" logs %s --tail=%s" % (log["podName"], str(tail)))
else:
log["containerLog"] = kubectl_exec(" logs " + log["podName"])
logs.append(log)
return logs
def check_pod_status(pod):
try:
if pod["status"]["containerStatuses"][0]["restartCount"] > 0:
return "Error"
except Exception as e:
pass
try:
if pod["status"]["phase"] == "Succeeded":
return "Succeeded"
except Exception as e:
pass
try:
if pod["status"]["phase"] == "Unknown":
return "Unknown" # host is dead/cannot be reached.
except Exception as e:
pass
try:
if pod["status"]["phase"] == "Failed":
return "Failed"
except Exception as e:
pass
try:
if pod["status"]["phase"] == "Pending":
return "Pending"
except Exception as e:
pass
try:
if pod["status"]["phase"] == "Running" and all("ready" in item and item["ready"] for item in pod["status"]["containerStatuses"]):
return "Running"
except Exception as e:
return "Pending"
return "Unknown"
def get_pod_events(pod):
description = kubectl_exec("describe pod %s" % pod["metadata"]["name"])
ret = []
for line in description.split("\n"):
if "fit failure summary on nodes" in line:
ret += [item.strip() for item in line.replace("fit failure summary on nodes : ", "").replace("(.*)", "").strip().split(",")]
return ret
def get_pod_pending_detail(pod):
description = kubectl_exec("describe pod %s" % pod["metadata"]["name"])
ret = []
for line in description.split("\n"):
if "fit failure summary on nodes" in line:
ret += [item.strip() for item in line.replace("fit failure summary on nodes : ", "").replace("(.*)", "").strip().split(",")]
return ret
def check_pending_reason(pod, reason):
reasons = get_pod_pending_detail(pod)
return any([reason in item for item in reasons])
def get_pod_events(podname):
url = "%s/api/v1/namespaces/default/events?fieldSelector=involvedObject.name=%s" % (config["apiserver"], podname)
responseStr = curl_get(url)
events = json.loads(responseStr)
return events
def get_pod_unscheduled_reason(podname):
events = get_pod_events(podname)
ret = ""
if "items" in events:
for event in events["items"]:
if "reason" in event and event["reason"] == "FailedScheduling":
ret = event["message"]
return ret
def get_pod_status(pod):
podstatus = {}
if "status" in pod and "conditions" in pod["status"]:
for condition in pod["status"]["conditions"]:
try:
if condition["type"] == "PodScheduled" and condition["status"] == "False" and "reason" in condition:
unscheduledReason = get_pod_unscheduled_reason(pod["metadata"]["name"])
podstatus["message"] = condition["reason"] + ":" + unscheduledReason
except Exception as e:
pass
if "status" in pod and "containerStatuses" in pod["status"]:
# assume we only have one container in every pod
containerStatus = pod["status"]["containerStatuses"][0]
if "state" in containerStatus and "waiting" in containerStatus["state"]:
ret = ""
if "reason" in containerStatus["state"]["waiting"]:
ret += containerStatus["state"]["waiting"]["reason"]
if "message" in containerStatus["state"]["waiting"]:
ret += ":\n" + containerStatus["state"]["waiting"]["message"]
podstatus["message"] = ret
elif "state" in containerStatus and "terminated" in containerStatus["state"]:
ret = ""
if "reason" in containerStatus["state"]["terminated"]:
ret += containerStatus["state"]["terminated"]["reason"]
if "message" in containerStatus["state"]["terminated"]:
ret += ":\n" + containerStatus["state"]["terminated"]["message"]
podstatus["message"] = ret
if "finishedAt" in containerStatus["state"]["terminated"] and containerStatus["state"]["terminated"]["finishedAt"] is not None:
podstatus["finishedAt"] = localize_time(containerStatus["state"]["terminated"]["finishedAt"])
if "startedAt" in containerStatus["state"]["terminated"] and containerStatus["state"]["terminated"]["startedAt"] is not None:
podstatus["startedAt"] = localize_time(containerStatus["state"]["terminated"]["startedAt"])
elif "state" in containerStatus and "running" in containerStatus["state"] and "startedAt" in containerStatus["state"]["running"]:
podstatus["message"] = "started at: " + localize_time(containerStatus["state"]["running"]["startedAt"])
if "startedAt" in containerStatus["state"]["running"]:
podstatus["startedAt"] = localize_time(containerStatus["state"]["running"]["startedAt"])
if "finishedAt" not in podstatus:
podstatus["finishedAt"] = datetime.now(get_localzone()).isoformat()
if "status" in pod and "podIP" in pod["status"]:
podstatus["podIP"] = pod["status"]["podIP"]
if "status" in pod and "hostIP" in pod["status"]:
podstatus["hostIP"] = pod["status"]["hostIP"]
return podstatus
def GetJobStatus(jobId):
podInfo = GetPod("run=" + jobId)
output = "Unknown"
if podInfo is None:
output = "kubectlERR"
detail = []
elif "items" in podInfo:
podStatus = [check_pod_status(pod) for pod in podInfo["items"]]
#detail = "=====================\n=====================\n=====================\n".join([yaml.dump(pod["status"], default_flow_style=False) for pod in podInfo["items"] if "status" in podInfo["items"]])
######!!!!!!!!!!!!!!!!CAUTION!!!!!! since "any and all are used here, the order of if cause is IMPORTANT!!!!!, we need to deail with Faild,Error first, and then "Unknown" then "Pending", at last " Successed and Running"
if len(podStatus) == 0:
output = "Pending"
elif any([status == "Error" for status in podStatus]):
output = "Failed"
elif any([status == "Failed" for status in podStatus]):
output = "Failed"
elif any([status == "Unknown" for status in podStatus]):
output = "Unknown"
elif any([status == "Pending" for status in podStatus]):
if any([check_pending_reason(pod, "PodFitsHostPorts") for pod in podInfo["items"]]):
output = "PendingHostPort"
else:
output = "Pending"
# there is a bug: if podStatus is empty, all (**) will be trigered.
elif all([status == "Succeeded" for status in podStatus]):
output = "Succeeded"
elif any([status == "Running" for status in podStatus]): # as long as there are no "Unknown", "Pending" nor "Error" pods, once we see a running pod, the job should be in running status.
output = "Running"
detail = [get_pod_status(pod) for i, pod in enumerate(podInfo["items"])]
return output, detail
def get_node_labels(key):
url = "%s/api/v1/nodes" % (config["apiserver"])
responseStr = curl_get(url)
nodes = json.loads(responseStr)
ret = []
if "items" in nodes:
for node in nodes["items"]:
if "metadata" in node and "labels" in node["metadata"]:
if key in node["metadata"]["labels"]:
v = node["metadata"]["labels"][key]
if not v in ret:
ret.append(v)
return ret
if __name__ == '__main__':
#Run()
print get_node_labels("rack")
pass
|
import numpy as np
arr_time = np.random.exponential(3, 3)
print(arr_time)
Exec_Time = np.random.exponential(1, 3)
print(Exec_Time)
for i in range(0,3):
print "A(",round(arr_time[i],1),",",round(Exec_Time[i],1),")"
'''
Output
A(0.3, 1.1)
A(2.5, 2.0)
A(0.0, 4.7)
'''
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.