content
stringlengths 5
1.05M
|
|---|
import json
import os
import shutil
from .cwltool_deps import ref_resolver
from .parser import (
JOB_JSON_FILE,
load_job_proxy,
)
def handle_outputs(job_directory=None):
# Relocate dynamically collected files to pre-determined locations
# registered with ToolOutput objects via from_work_dir handling.
if job_directory is None:
job_directory = os.path.join(os.getcwd(), os.path.pardir)
cwl_job_file = os.path.join(job_directory, JOB_JSON_FILE)
if not os.path.exists(cwl_job_file):
# Not a CWL job, just continue
return
# So we only need to do strict validation when the tool was loaded,
# no reason to do it again during job execution - so this shortcut
# allows us to not need Galaxy's full configuration on job nodes.
job_proxy = load_job_proxy(job_directory, strict_cwl_validation=False)
tool_working_directory = os.path.join(job_directory, "working")
outputs = job_proxy.collect_outputs(tool_working_directory)
for output_name, output in outputs.items():
target_path = job_proxy.output_path(output_name)
if isinstance(output, dict) and "location" in output:
output_path = ref_resolver.uri_file_path(output["location"])
if output["class"] != "File":
open("galaxy.json", "w").write(json.dump({
"dataset_id": job_proxy.output_id(output_name),
"type": "dataset",
"ext": "expression.json",
}))
shutil.move(output_path, target_path)
for secondary_file in output.get("secondaryFiles", []):
# TODO: handle nested files...
secondary_file_path = ref_resolver.uri_file_path(secondary_file["location"])
assert secondary_file_path.startswith(output_path)
secondary_file_name = secondary_file_path[len(output_path):]
secondary_files_dir = job_proxy.output_secondary_files_dir(
output_name, create=True
)
extra_target = os.path.join(secondary_files_dir, secondary_file_name)
shutil.move(
secondary_file_path,
extra_target,
)
else:
with open(target_path, "w") as f:
f.write(json.dumps(output))
__all__ = (
'handle_outputs',
)
|
from concurrent.futures import ThreadPoolExecutor
from time import sleep
from controlling.AsyncProcessor import AsyncProcessor
class DummyMovementEngine:
x_pos = 0
speed = 0
def __init__(self):
self.is_moving = False
self._executor = AsyncProcessor(ThreadPoolExecutor(max_workers=2))
def start(self, speed):
print("Started to move at speed", speed)
self.set_speed(speed)
self.is_moving = True
self._executor.enqueue(self._calc_x)
def stop(self):
print("stopped moving")
self.is_moving = False
def set_speed(self, speed):
self.speed = (speed*10) - 7
def _calc_x(self):
while self.is_moving:
self.x_pos += self.speed
sleep(0.025)
def get_x(self):
return self.x_pos
|
import torch
import kymatio.scattering3d.backend as backend
from kymatio import HarmonicScattering3D
class BenchmarkHarmonicScattering3D:
params = [
[
{ # Small. 32x32x32, 2 scales, 2 harmonics
"J": 2,
"shape": (32, 32, 32),
"L": 2,
},
{ # Large. 128x128x128, 2 scales, 2 harmonics
"J": 2,
"shape": (128, 128, 128),
"L": 2,
},
{ # A case with many scales (J=6) and few harmonics (L=1)
"J": 6,
"shape": (128, 128, 128),
"L": 1,
},
{ # A case with few scales (J=2) and many harmonics (L=6)
"J": 2,
"shape": (32, 32, 32),
"L": 4,
}
],
[
1,
]
]
param_names = ["sc_params", "batch_size"]
def setup(self, sc_params, batch_size):
scattering = HarmonicScattering3D(**sc_params)
scattering.cpu()
x = torch.randn(
batch_size,
sc_params["shape"][0], sc_params["shape"][1], sc_params["shape"][2],
dtype=torch.float32)
x.cpu()
self.scattering = scattering
self.x = x
def time_constructor(self, sc_params, batch_size):
HarmonicScattering3D(**sc_params)
def time_forward(self, sc_params, batch_size):
(self.scattering).forward(self.x)
|
#!/usr/bin/python3
"""Surface Hopping Module.
.. moduleauthor:: Bartosz Błasiak <blasiak.bartosz@gmail.com>
"""
from abc import ABC, abstractmethod
import math
import psi4
import numpy
from .aggregate import Aggregate
from .trajectory import TimePoint, Trajectory
from .hamiltonian import Isolated_Hamiltonian
from ..psithon.util import rearrange_eigenpairs, _reorder, check_sim
class System:
def __init__(self, psi4_molecule, temperature=0.0, nstates=1, current_state=0):
# Molecular aggregate
self.aggregate = Aggregate(psi4_molecule)
# Physical temperature
self.temperature = temperature
# Number of all electronic states
self.nstates = nstates
# Current electronic state index
self.current_state = current_state
# Hamiltonian of entire system
self.hamiltonian = None
def update(self, xyz):
"Update geometry of all molecules in the system"
self.aggregate.update(xyz)
self.hamiltonian.computers[0].update(self.aggregate.qm.geometry())
for i, computer in enumerate(self.hamiltonian.computers[1:]):
computer.update(self.aggregate.bath[i].geometry())
def set_hamiltonian(self, method_high, method_low=None):
"Set the Hamiltonian of the system"
if method_low is not None: raise NotImplementedError("Now only isolated Hamiltonian is implemented")
self.hamiltonian = Isolated_Hamiltonian(method_high, self.aggregate, self.nstates)
class Units:
fs2au = 4.1341373336493e+16 * 1.0e-15
au2fs = 1./fs2au
class DynamicalSystem(System, Units):
def __init__(self, aggregate, nstates=1, init_state=0, temperature=0.0,
qm_method='myCIS', gs_method=None, dt_class=0.5, dt_quant=None, seed=0):
System.__init__(self, aggregate, temperature, nstates, init_state)
Units.__init__(self)
numpy.random.seed(seed)
# Trajectory of the system
self.trajectory = Trajectory(self.aggregate)
# Initial electronic state index
self.init_state = init_state
# Current electronic state index
self.current_state = init_state
# Dimension of quantum adiabatic space
self.dim = nstates
# Time steps
self.dt_class = dt_class * self.fs2au
self.dt_quant = dt_class * self.fs2au / 1000.0 if dt_quant is None else dt_quant * self.fs2au
# Instantaneous total energy of the system
self.energy = None
# Instantaneous quantum amplitudes
self.c = None
# Instantaneous density matrix
self.d = None
# Set up the Hamiltonian
self.set_hamiltonian(qm_method)
def run(self, nt, out_xvf='traj.dat', out_xyz='traj.xyz', center_mode='qm'):
"Run the TSH dynamics"
outf = open(out_xvf, 'w')
outx = open(out_xyz, 'w')
print(" Initial Conditions")
self.set_initial_conditions()
self.trajectory.save(outf)
self.aggregate.save_xyz(outx, center_mode)
for i in range(nt):
t = i*self.dt_class*self.au2fs
print(" t = %13.3f [fs] s = %2d T = %6.1f [K]" % (t, self.current_state, self.temperature))
self.propagate()
self.trajectory.save(outf)
self.aggregate.save_xyz(outx, center_mode)
print(" Occupancies: ")
print("%8.3f"*len(self.p) % tuple(self.d.real.diagonal()))
print(" Transition Probabilities: ")
print("%8.3f"*len(self.p) % tuple(self.p))
outf.close()
outx.close()
def try_to_hop(self, g, e_curr, e_old):#TODO
"Try to hop from k to m"
raise NotImplementedError
def set_initial_conditions(self):#TODO
raise NotImplementedError
def propagate(self):#TODO
raise NotImplementedError
def _density_matrix(self, c):#TODO
raise NotImplementedError
def _update_density_matrix(self, c):#TODO
raise NotImplementedError
def _step_quantum(self, c, G):#TODO
raise NotImplementedError
def _decoherence_correction(self):#TODO
raise NotImplementedError
|
import cv2
import numpy as np
from bounding_box import BoundingBox
from utils import open_file
from tqdm import tqdm
from typing import List, TypeVar
SheetReader = TypeVar('SheetReader')
class Detector:
def __init__(self,
reader: SheetReader,
templates: List[np.ndarray],
detection_threshold: float,
is_staff: bool=False) -> None:
self._reader = reader
self._is_staff = is_staff
self._templates = templates
self._best_location_count = -1
self._best_locations = []
self._best_scale = 0
self._boxes = []
self._detection_threshold = detection_threshold
self._iou_threshold = 0.3
# Locate target position
def _locate(self) -> None:
for scale in np.arange(self._reader._start_size,
self._reader._stop_size + 0.01,
self._reader._search_step):
locations = []
location_count = 0
for template in self._templates:
if template is None:
continue
template = cv2.resize(template, None,
fx=scale, fy=scale,
interpolation=cv2.INTER_CUBIC)
result = cv2.matchTemplate(self._reader._img, template, cv2.TM_CCOEFF_NORMED)
result = np.where(result >= self._detection_threshold)
location_count += len(result[0])
locations += [result]
print('scale: {:1.2f}, matches: {:d}'.format(scale, location_count))
if (location_count > self._best_location_count):
self._best_location_count = location_count
self._best_locations = locations
self._best_scale = scale
# Generate bounding boxes
def _generate_boxes(self) -> None:
for i in tqdm(range(len(self._templates))):
h, w = np.array(self._templates[i].shape) * self._best_scale
if self._is_staff:
self._boxes.append([BoundingBox(0, pt[0], self._reader.img_width, h)
for pt in zip(*self._best_locations[i])])
else:
self._boxes.append([BoundingBox(pt[1], pt[0], w, h)
for pt in zip(*self._best_locations[i])])
self._boxes = [j for i in self._boxes for j in i]
# Merge bounding boxes
def _merge(self) -> None:
self._filtered_boxes = []
while len(self._boxes) > 0:
box = self._boxes.pop(0)
# TODO: KD-Tree
self._boxes.sort(key=lambda bounding_box: bounding_box.distance(box))
merged = False
while(not merged):
merged = True
i = 0
for _ in range(len(self._boxes)):
if box.iou(self._boxes[i]) > self._iou_threshold or self._boxes[i].iou(box) > self._iou_threshold:
box = box.merge(self._boxes.pop(i))
merged = False
elif box.distance(self._boxes[i]) > box.w / 2 + self._boxes[i].w / 2:
break
else:
i += 1
self._filtered_boxes.append(box)
def detect(self) -> List[BoundingBox]:
print('Locating...')
self._locate()
print('Generating bounding boxes...')
self._generate_boxes()
print('Merging bounding boxes...')
self._merge()
print('Total number of boxes: {}'.format(len(self._filtered_boxes)))
return self._filtered_boxes
|
# Generated by Django 2.2 on 2022-03-16 08:31
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('projects', '0016_auto_20220316_0643'),
]
operations = [
migrations.AddField(
model_name='myimage',
name='dishes_url',
field=models.CharField(default='http://127.0.0.1:8080', max_length=100),
preserve_default=False,
),
]
|
from collections import Counter, defaultdict
from functools import partial
import math, random
def entropy(class_probabilities):
"""given a list of class probabilities, compute the entropy"""
return sum(-p * math.log(p, 2) for p in class_probabilities if p)
def class_probabilities(labels):
total_count = len(labels)
return [count / total_count
for count in Counter(labels).values()]
def data_entropy(labeled_data):
labels = [label for _, label in labeled_data]
probabilities = class_probabilities(labels)
return entropy(probabilities)
def partition_entropy(subsets):
"""find the entropy from this partition of data into subsets"""
total_count = sum(len(subset) for subset in subsets)
return sum( data_entropy(subset) * len(subset) / total_count
for subset in subsets )
def group_by(items, key_fn):
"""returns a defaultdict(list), where each input item
is in the list whose key is key_fn(item)"""
groups = defaultdict(list)
for item in items:
key = key_fn(item)
groups[key].append(item)
return groups
def partition_by(inputs, attribute):
"""returns a dict of inputs partitioned by the attribute
each input is a pair (attribute_dict, label)"""
return group_by(inputs, lambda x: x[0][attribute])
def partition_entropy_by(inputs,attribute):
"""computes the entropy corresponding to the given partition"""
partitions = partition_by(inputs, attribute)
return partition_entropy(partitions.values())
def classify(tree, input):
"""classify the input using the given decision tree"""
# if this is a leaf node, return its value
if tree in [True, False]:
return tree
# otherwise find the correct subtree
attribute, subtree_dict = tree
subtree_key = input.get(attribute) # None if input is missing attribute
if subtree_key not in subtree_dict: # if no subtree for key,
subtree_key = None # we'll use the None subtree
subtree = subtree_dict[subtree_key] # choose the appropriate subtree
return classify(subtree, input) # and use it to classify the input
def build_tree_id3(inputs, split_candidates=None):
# if this is our first pass,
# all keys of the first input are split candidates
if split_candidates is None:
split_candidates = inputs[0][0].keys()
# count Trues and Falses in the inputs
num_inputs = len(inputs)
num_trues = len([label for item, label in inputs if label])
num_falses = num_inputs - num_trues
if num_trues == 0: # if only Falses are left
return False # return a "False" leaf
if num_falses == 0: # if only Trues are left
return True # return a "True" leaf
if not split_candidates: # if no split candidates left
return num_trues >= num_falses # return the majority leaf
# otherwise, split on the best attribute
best_attribute = min(split_candidates,
key=partial(partition_entropy_by, inputs))
partitions = partition_by(inputs, best_attribute)
new_candidates = [a for a in split_candidates
if a != best_attribute]
# recursively build the subtrees
subtrees = { attribute : build_tree_id3(subset, new_candidates)
for attribute, subset in partitions.items() }
subtrees[None] = num_trues > num_falses # default case
return (best_attribute, subtrees)
def forest_classify(trees, input):
votes = [classify(tree, input) for tree in trees]
vote_counts = Counter(votes)
return vote_counts.most_common(1)[0][0]
if __name__ == "__main__":
inputs = [
({'level':'Senior','lang':'Java','tweets':'no','phd':'no'}, False),
({'level':'Senior','lang':'Java','tweets':'no','phd':'yes'}, False),
({'level':'Mid','lang':'Python','tweets':'no','phd':'no'}, True),
({'level':'Junior','lang':'Python','tweets':'no','phd':'no'}, True),
({'level':'Junior','lang':'R','tweets':'yes','phd':'no'}, True),
({'level':'Junior','lang':'R','tweets':'yes','phd':'yes'}, False),
({'level':'Mid','lang':'R','tweets':'yes','phd':'yes'}, True),
({'level':'Senior','lang':'Python','tweets':'no','phd':'no'}, False),
({'level':'Senior','lang':'R','tweets':'yes','phd':'no'}, True),
({'level':'Junior','lang':'Python','tweets':'yes','phd':'no'}, True),
({'level':'Senior','lang':'Python','tweets':'yes','phd':'yes'},True),
({'level':'Mid','lang':'Python','tweets':'no','phd':'yes'}, True),
({'level':'Mid','lang':'Java','tweets':'yes','phd':'no'}, True),
({'level':'Junior','lang':'Python','tweets':'no','phd':'yes'},False)
]
for key in ['level','lang','tweets','phd']:
print(key, partition_entropy_by(inputs, key))
print()
senior_inputs = [(input, label)
for input, label in inputs if input["level"] == "Senior"]
for key in ['lang', 'tweets', 'phd']:
print(key, partition_entropy_by(senior_inputs, key))
print()
print("building the tree")
tree = build_tree_id3(inputs)
print(tree)
print("Junior / Java / tweets / no phd", classify(tree,
{ "level" : "Junior",
"lang" : "Java",
"tweets" : "yes",
"phd" : "no"} ))
print("Junior / Java / tweets / phd", classify(tree,
{ "level" : "Junior",
"lang" : "Java",
"tweets" : "yes",
"phd" : "yes"} ))
print("Intern", classify(tree, { "level" : "Intern" } ))
print("Senior", classify(tree, { "level" : "Senior" } ))
|
import stackless
from datetime import date, timedelta, datetime
from General.outsourcingTask import generateSol, stacklessTicker
from General.basicServer import CommunicationServer
from General.newProcess import StacklessProcess, getLock, getSharedObjects
def keepAlive():
while True:
stackless.schedule()
def spawnProcess(p):
p.start()
if __name__ == '__main__':
mgr, sharedDict, sharedList = getSharedObjects()
mainLock = getLock()
responseChannel = stackless.channel()
storage = dict()
initialTime = date(3564, 3, 5)
tickTime = 250000
timeChannel = stackless.channel()
#timeChannel = sharedDict
#timeChannel['date'] = initialTime
p = StacklessProcess(target=generateSol, lock=mainLock, args=(8, storage, initialTime),
listenChannel=timeChannel, responseChannel=responseChannel)
stackless.tasklet(spawnProcess)(p)
#stackless.tasklet(generateSol)(8, timeChannel, responseChannel, storage, initialTime)
stackless.tasklet(stacklessTicker)(timeChannel, tickTime, initialTime)
s = CommunicationServer(8000, responseChannel)
stackless.tasklet(s.run)()
stackless.tasklet(keepAlive)()
stackless.run()
|
import pandas as pd
from os import environ
from os.path import isfile, join, basename
from requests.exceptions import HTTPError as HTTPError1
from ..table_builder.cap_table_builder import CAPFileSource
TMP_DIR = environ.get('CAP2_TMP_DIR', '/tmp')
class PangeaFileSource(CAPFileSource):
def __init__(self, pangea_group):
self.grp = pangea_group
def sample_names(self):
for sample in self.grp.get_samples():
yield sample.name
def metadata(self):
tbl = {}
for sample in self.grp.get_samples():
tbl[sample.name] = sample.metadata
tbl = pd.DataFrame.from_dict(tbl, orient='index')
return tbl
def group_module_files(self, module_name, field_name):
ar = self.grp.analysis_result(module_name).get()
arf = ar.field(field_name).get()
local_path = join(TMP_DIR, basename(arf.get_referenced_filename()))
if not isfile(local_path):
local_path = arf.download_file(filename=local_path)
return local_path
def module_files(self, module_name, field_name):
"""Return an iterable 2-ples of (sample_name, local_path) for modules of specified type."""
for sample in self.grp.get_samples():
try:
ar = sample.analysis_result(module_name).get()
arf = ar.field(field_name).get()
local_path = join(TMP_DIR, basename(arf.get_referenced_filename()))
if not isfile(local_path):
local_path = arf.download_file(filename=local_path)
except HTTPError1:
continue
except Exception:
continue
yield sample.name, local_path
|
"""This module was designed to help making `WePay <https://wepay.com>`_ API calls.
.. moduleauthor:: lehins <lehins@yandex.ru>
:platform: independent
"""
from wepay.calls import *
from wepay.utils import Post, cached_property
__all__ = ['WePay']
class WePay(object):
"""A full client for the WePay API.
:keyword bool production: When ``False``, the ``stage.wepay.com`` API
server will be used instead of the default production.
:keyword str access_token: The access token associated with your
application.
:keyword str api_version: sets default version of API which will be
accepting calls. It is also possible to specify different version
per API call, since all calls accept a keyword argument
`api_version` as well. `More on API versioning
<https://stage.wepay.com/developer/tutorial/versioning>`_.
:keyword float timeout: time in seconds before HTTPS call request will timeout.
Also can be changed on per call basis.
:keyword bool silent: if set to `None` (default) will print
:exc:`WePayWarning<wepay.exceptions.WePayWarning>` if `production=True` and
raise them otherwise. Set it to `True` to stop parameter validation and
suppress all warnings, or `False` to raise all warnings.
:keyword bool use_requests: set to `False` in order to explicitly turn off
`requests <http://docs.python-requests.org/en/latest/>`_ library usage and
fallback to `urllib <https://docs.python.org/3/library/urllib.html#module-urllib>`_
Instance of this class contains attributes, which correspond to WePay
objects and should be used to perform API calls. If a WePay object has a
lookup call, corresponding attribute will also be callable. Example:
>>> api = WePay(production=False, access_token=WEPAY_ACCESS_TOKEN)
>>> response = api.account.create('Test Account', 'Short Description')
>>> api.account(response['account_id'])
Each method that performs an API call accepts all required parameters as
positional arguments, optional parameters as keyword arguments, as well as
one or more keyword arguments that are used to control behavior of a
call. All these methods accept keyword arguments ``api_version``,
``timeout`` and if documented also possible keyword arguments
``batch_mode``, ``batch_reference_id`` and ``access_token``:
* ``api_version`` will make sure the call is made to a specified API version
(cannot be used together with ``batch_mode``)
* ``timeout`` specifies a connection timeout in seconds for the call
(cannot be used together with ``batch_mode``)
* ``access_token`` will make sure the call is made with this
access_token, also use it to set `authorization` param in
``batch_mode``.
* ``batch_mode`` instead of performing an actual call to WePay, a method
will return a dictionary that is ready to be added to `/batch/create`,
namely to calls list parameter. :meth:`batch.create<wepay.calls.batch.Batch.create>`
* ``batch_reference_id`` will set `reference_id` param in a batch call,
it is an error to use it without ``batch_mode`` set to ``True``
Batch mode usage example:
>>> api = WePay(production=False, access_token=WEPAY_ACCESS_TOKEN)
>>> calls = []
>>> calls.append(api.account.create('Test Account', 'Short Description', batch_mode=True, access_token='STAGE_...', batch_reference_id='c1'))
>>> calls.append(api.checkout(12345, batch_mode=True))
>>> api.batch.create(CLIENT_ID, CLIENT_SECRET, calls)
"""
def __init__(self, production=True, access_token=None, api_version=None,
timeout=30, silent=None, use_requests=None):
self.production = production
self.access_token = access_token
self.api_version = api_version
self.silent = silent
self._timeout = timeout
self._post = Post(use_requests=use_requests, silent=silent)
if production:
self.api_endpoint = "https://wepayapi.com/v2"
self.browser_uri = "https://www.wepay.com"
self.browser_js = self.browser_uri + "/min/js/wepay.v2.js"
self.browser_iframe_js = self.browser_uri + "/min/js/iframe.wepay.js"
else:
self.api_endpoint = "https://stage.wepayapi.com/v2"
self.browser_uri = "https://stage.wepay.com"
self.browser_js = self.browser_uri + "/js/wepay.v2.js"
self.browser_iframe_js = self.browser_uri + "/js/iframe.wepay.js"
self.browser_endpoint = self.browser_uri + "/v2"
@cached_property
def oauth2(self):
""":class:`OAuth2<wepay.calls.oauth2.OAuth2>` call instance."""
return OAuth2(self)
@cached_property
def app(self):
""":class:`App<wepay.calls.app.App>` call instance"""
return App(self)
@cached_property
def user(self):
""":class:`User<wepay.calls.user.User>` call instance"""
return User(self)
@cached_property
def account(self):
""":class:`Account<wepay.calls.account.Account>` call instance"""
return Account(self)
@cached_property
def checkout(self):
""":class:`Checkout<wepay.calls.checkout.Checkout>` call instance"""
return Checkout(self)
@cached_property
def preapproval(self):
""":class:`Preapproval<wepay.calls.preapproval.Preapproval>` call instance"""
return Preapproval(self)
@cached_property
def withdrawal(self):
""":class:`Withdrawal<wepay.calls.withdrawal.Withdrawal>` call instance"""
return Withdrawal(self)
@cached_property
def credit_card(self):
""":class:`CreditCard<wepay.calls.credit_card.CreditCard>` call instance"""
return CreditCard(self)
@cached_property
def subscription_plan(self):
""":class:`SubscriptionPlan<wepay.calls.subscription_plan.SubscriptionPlan>`
call instance
"""
return SubscriptionPlan(self)
@cached_property
def subscription(self):
""":class:`Subscription<wepay.calls.subscription.Subscription>` call instance"""
return Subscription(self)
@cached_property
def subscription_charge(self):
""":class:`SubscriptionCharge<wepay.calls.subscription_charge.SubscriptionCharge>`
call instance
"""
return SubscriptionCharge(self)
@cached_property
def batch(self):
""":class:`Batch<wepay.calls.batch.Batch>` call instance """
return Batch(self)
def call(self, uri, params=None, access_token=None, api_version=None, timeout=None):
"""Calls wepay.com/v2/``uri`` with ``params`` and returns the JSON
response as a python `dict`. The optional ``access_token`` parameter
takes precedence over instance's ``access_token`` if it is
set. Essentially this is the place for all api calls.
:param str uri: API uri to call
:keyword dict params: parameters to include in the call
:keyword str access_token: access_token to use for the call.
:keyword str api_version: allows to create a call to specific version of API
:keyword float timeout: a way to specify a call timeout in seconds. If `None`
will use `WePay.timeout`.
:return: WePay response as documented per call
:rtype: dict
:raises: :exc:`WePayClientError<wepay.exceptions.WePayClientError>`
:raises: :exc:`WePayServerError<wepay.exceptions.WePayServerError>`
:raises: :exc:`WePayConnectionError<wepay.exceptions.WePayConnectionError>`
"""
url = self.api_endpoint + uri
params = params or {}
headers = {
'Content-Type': 'application/json',
'User-Agent': 'Python WePay SDK (third party)'
}
access_token = access_token or self.access_token
headers['Authorization'] = 'Bearer %s' % access_token
api_version = api_version or self.api_version
if not api_version is None:
headers['Api-Version'] = api_version
timeout = timeout or self._timeout
return self._post(url, params, headers, timeout)
|
from django.db import models
from csat.acquisition.models import DataCollectorConfig
from django.utils.translation import ugettext_lazy as _
from . import graphs
class ExamplesConfig(DataCollectorConfig):
GRAPH_CHOICES = [(g.key, g.description) for g in graphs.get_graphs()]
example = models.CharField(_('Example'), choices=GRAPH_CHOICES,
max_length=255)
|
import speech_recognition as sr
import sys
reload(sys)
sys.setdefaultencoding('utf8')
class YLSpeecher:
pass
if __name__ == '__main__':
# obtain audio from the microphone
r = sr.Recognizer()
with sr.Microphone() as source:
print("Say something!")
audio = r.listen(source)
# recognize speech using Sphinx
try:
print('Sphinx thinks you said ' + r.recognize_sphinx(audio, 'zh-CN'))
except sr.UnknownValueError:
print('Sphinx could not understand audio')
except sr.RequestError as e:
print('Sphinx error; {0}'.format(e))
|
# -*- coding: utf-8 -*-
import math
import random
from matplotlib import pyplot as plt
from helper.ambiente import Pontos
from helper.utils import colidir, simulate_points, definir_angulo, dist_euclidiana
import numpy as np
class Vector2d():
def __init__(self, x, y):
self.deltaX = x
self.deltaY = y
self.length = -1
self.direction = [0, 0]
self.vector2d_share()
def vector2d_share(self):
if type(self.deltaX) == type(list()) and type(self.deltaY) == type(list()):
deltaX, deltaY = self.deltaX, self.deltaY
self.deltaX = deltaY[0] - deltaX[0]
self.deltaY = deltaY[1] - deltaX[1]
self.length = math.sqrt(self.deltaX ** 2 + self.deltaY ** 2) * 1.0
if self.length > 0:
self.direction = [self.deltaX / self.length, self.deltaY / self.length]
else:
self.direction = None
else:
self.length = math.sqrt(self.deltaX ** 2 + self.deltaY ** 2) * 1.0
if self.length > 0:
self.direction = [self.deltaX / self.length, self.deltaY / self.length]
else:
self.direction = None
def __add__(self, other):
vec = Vector2d(self.deltaX, self.deltaY)
vec.deltaX += other.deltaX
vec.deltaY += other.deltaY
vec.vector2d_share()
return vec
def __sub__(self, other):
vec = Vector2d(self.deltaX, self.deltaY)
vec.deltaX -= other.deltaX
vec.deltaY -= other.deltaY
vec.vector2d_share()
return vec
def __mul__(self, other):
vec = Vector2d(self.deltaX, self.deltaY)
vec.deltaX *= other
vec.deltaY *= other
vec.vector2d_share()
return vec
def __truediv__(self, other):
return self.__mul__(1.0 / other)
def __repr__(self):
return 'Vector deltaX:{}, deltaY:{}, length:{}, direction:{}'.format(self.deltaX, self.deltaY, self.length,
self.direction)
|
""" Module for defining user related models. """
from datetime import datetime
from sqlalchemy import Column, Integer, String, DateTime, Float
from sqlalchemy.orm import relationship
from utils import get_logger
from models import Base
LOGGER = get_logger(__name__)
class User(Base):
""" Table for tracking the users of the game and their login info. """
__tablename__ = "User"
id = Column(Integer, primary_key=True)
email = Column(String, unique=True, index=True, nullable=False)
name = Column(String, nullable=False)
salt = Column(String, nullable=False)
password = Column(String, nullable=False)
last_seen = Column(DateTime, default=datetime.today, nullable=False)
money = Column(Float, default=100, nullable=False)
ship = relationship("Ship", cascade="all, delete-orphan", uselist=False)
def ping(self):
""" Update last seen time. """
self.last_seen = datetime.today()
LOGGER.debug(f"{self!r} ping at {self.last_seen.strftime('%H:%M:%S')}")
def __str__(self) -> str:
return f"{self.name}"
def __repr__(self) -> str:
return f"User(id={self.id}, name={self.name}, email={self.email})"
|
"""
This implements a class to control and present sensor data for a fishtank
"""
import socket
import time
import os
import random
try:
import network
import machine
import onewire
import ds18x20
import gc
import passwords
is_esp_device = True
except ImportError:
# not on esp device
import mock
is_esp_device = False
class FishtankSensor(object):
def __init__(self, temperature_pin):
if is_esp_device:
self.ds_pin = machine.Pin(temperature_pin)
self.ds_sensor = ds18x20.DS18X20(onewire.OneWire(self.ds_pin))
else:
self.ds_sensor = mock.Mock()
self.ds_sensor.scan.return_value = [self.ds_sensor]
self.ds_sensor.read_temp.return_value = 71.123
def get_temperature(self):
try:
# prep the sensor
self.ds_sensor.convert_temp()
time.sleep_ms(750) # wait for sensor to set up
roms = self.ds_sensor.scan()
print('Found DS devices: ', roms)
print('Temperatures: ')
for rom in roms:
temp = self.ds_sensor.read_temp(rom)
if isinstance(temp, float):
msg = round(temp, 2)
print(temp, end=' ')
print('Valid temperature')
return msg
except Exception as e:
print(str(e))
return random.uniform(15, 30)
class FishtankWebserver(object):
def __init__(self, temp_sensor, oled, mqtt_client=None,
port=80, refresh_secs=60, sensor_refresh_secs=30,
mqtt_publish_secs=60):
self.port = port
self.temp_sensor = temp_sensor
self.refresh_secs = refresh_secs
self.mqtt_publish_secs = mqtt_publish_secs
self.last_mqtt_publish = 0
self.oled = oled
self._temp = None
self._temp_last_updated = time.time()
self.sensor_refresh_secs = sensor_refresh_secs
self.network_info = None
self.mqtt_client = mqtt_client
def fahrenheit_to_celsius(self, temp):
return round(temp * (9 / 5) + 32.0, 2)
@property
def temp(self):
now = time.time()
delta = now - self._temp_last_updated
if delta < 0:
# this shouldn't happen, did time wrap?
machine.reset()
if self._temp is None or delta > self.sensor_refresh_secs:
self._temp = self.temp_sensor.get_temperature()
return self._temp
def serve_web_page(self):
html = """<!DOCTYPE HTML><html><head>
<meta name="viewport" content="width=device-width, initial-scale=1">
<meta http-equiv="refresh" content="%(refresh_secs)d">
<link rel="stylesheet" href="https://use.fontawesome.com/releases/v5.7.2/css/all.css" integrity="sha384-fnmOCqbTlWIlj8LyTjo7mOUStjsKC4pOpQbqyi7RrhN7udi9RwhKkMHpvLbHG9Sr" crossorigin="anonymous">
<style> html { font-family: Arial; display: inline-block; margin: 0px auto; text-align: center; }
h2 { font-size: 3.0rem; } p { font-size: 3.0rem; } .units { font-size: 1.2rem; }
.ds-labels{ font-size: 1.5rem; vertical-align:middle; padding-bottom: 15px; }
</style></head><body><h2>ESP with DS18B20</h2>
<p><i class="fas fa-thermometer-half" style="color:#059e8a;"></i>
<span class="ds-labels">Temperature</span>
<span id="temperature">%(celsius)0.2f</span>
<sup class="units">°C</sup>
</p>
<p><i class="fas fa-thermometer-half" style="color:#059e8a;"></i>
<span class="ds-labels">Temperature</span>
<span id="temperature">%(fahrenheit)0.2f</span>
<sup class="units">°F</sup>
</p></body></html>""" % {
"celsius": self.temp,
"fahrenheit": self.fahrenheit_to_celsius(self.temp),
"refresh_secs": self.refresh_secs
}
return html
def update_display(self):
# fill all pixels with color 0
self.oled.fill(0)
self.oled.text("%0.2fC" % self.temp, 0, 0)
self.oled.text("%0.2fF" % self.fahrenheit_to_celsius(self.temp), 0, 20)
if self.network_info:
self.oled.text(self.network_info[0], 0, 40)
self.oled.show()
print("updated display")
def update_mqtt(self):
cur_time = time.time()
if cur_time - self.last_mqtt_publish > self.mqtt_publish_secs:
print("will update mqtt data")
if self.mqtt_client:
self.mqtt_client.publish_metric("temperature_F", self.fahrenheit_to_celsius(self.temp))
self.last_mqtt_publish = cur_time
def handle_requests(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.settimeout(5) # listen timeout
s.bind(('', self.port))
s.listen(5)
while True:
try:
if is_esp_device:
if gc.mem_free() < 102000:
gc.collect()
self.update_mqtt()
self.update_display()
try:
conn, addr = s.accept()
except Exception as e:
print(str(e))
continue
conn.settimeout(3.0)
print('Got a connection from %s' % str(addr))
request = conn.recv(1024)
conn.settimeout(None)
request = str(request)
if request:
print('Content = %s' % request)
response = self.serve_web_page()
conn.send('HTTP/1.1 200 OK\n')
conn.send('Content-Type: text/html\n')
conn.send('Connection: close\n\n')
conn.sendall(response.encode())
conn.close()
except OSError as e:
conn.close()
print(str(e))
print('Connection closed')
def connect_wifi(self):
sta_if = network.WLAN(network.STA_IF)
if not sta_if.isconnected():
sta_if.active(True)
sta_if.connect(passwords.wifi_ssid, passwords.wifi_psk)
while not sta_if.isconnected():
print("waiting to connect to wifi")
machine.idle()
self.network_info = sta_if.ifconfig()
return(self.network_info)
def start(self):
print(self.connect_wifi())
self.update_display()
self.handle_requests()
|
def x_pow(x):
def pow(n):
print(x ** n)
return pow
def fibonacci(nth):
# 1,1,2,3,5,8,13,21,34,55
if nth == 1 or nth == 2:
return 1
else:
return fibonacci(nth - 1) + fibonacci(nth - 2)
|
# SPDX-FileCopyrightText: 2020 Foamyguy, written for Adafruit Industries
#
# SPDX-License-Identifier: Unlicense
"""
CircuitPython example for Monster M4sk.
Draws a basic eye dot on each screen. Looks at nose
when booped. Prints acceleration and light sensor
data when booped as well.
"""
import time
import board
import displayio
from adafruit_display_shapes.circle import Circle
import adafruit_monsterm4sk
# Account for slight screen difference if you want
LEFT_Y_OFFSET = 0 # 12 # my left screen is a tad higher
SCREEN_SIZE = 240
i2c_bus = board.I2C()
mask = adafruit_monsterm4sk.MonsterM4sk(i2c=i2c_bus)
left_group = displayio.Group(max_size=4)
mask.left_display.show(left_group)
right_group = displayio.Group(max_size=4)
mask.right_display.show(right_group)
right_circle = Circle(SCREEN_SIZE // 2, SCREEN_SIZE // 2, 40, fill=0x0000FF)
right_group.append(right_circle)
left_circle = Circle(SCREEN_SIZE // 2, SCREEN_SIZE // 2, 40, fill=0x00AA66)
left_group.append(left_circle)
while True:
# print(mask.boop)
if mask.boop:
left_circle.x = 0
right_circle.x = SCREEN_SIZE - 40 - 40 - 2
right_circle.y = SCREEN_SIZE // 4 - 40
left_circle.y = SCREEN_SIZE // 4 - 40 + LEFT_Y_OFFSET
print(mask.acceleration)
print(mask.light)
time.sleep(0.5)
else:
left_circle.x = SCREEN_SIZE // 2 - 40
right_circle.x = SCREEN_SIZE // 2 - 40
right_circle.y = SCREEN_SIZE // 2 - 40
left_circle.y = SCREEN_SIZE // 2 - 40 + LEFT_Y_OFFSET
|
# coding: utf-8
from enum import Enum
from six import string_types, iteritems
from bitmovin_api_sdk.common.poscheck import poscheck_model
class H264NalHrd(Enum):
NONE = "NONE"
VBR = "VBR"
CBR = "CBR"
|
from rest_framework import serializers
from django.contrib.auth.models import User
from django.utils.translation import ugettext_lazy as _
from rest_framework.compat import authenticate
from rest_framework.validators import UniqueValidator
from django.contrib.humanize.templatetags.humanize import naturaltime
from accounts.models import UserProfile
class UserDetailSerializer(serializers.ModelSerializer):
bio = serializers.CharField(source='profile.bio')
avatar = serializers.URLField(source='profile.avatar')
status = serializers.URLField(source='profile.status')
name = serializers.CharField(source='profile.name')
threads = serializers.HyperlinkedRelatedField(
many=True,
read_only=True,
view_name='thread-detail',
lookup_field='pk'
)
posts = serializers.HyperlinkedRelatedField(
many=True,
read_only=True,
view_name='post-detail',
lookup_field='pk'
)
date_joined = serializers.SerializerMethodField()
class Meta:
model = User
fields = [
'username',
'name',
'bio',
'avatar',
'status',
'is_staff',
'date_joined',
'threads',
'posts'
]
lookup_field = 'username'
def get_date_joined(self, obj):
return naturaltime(obj.date_joined)
class UserListSerializer(serializers.ModelSerializer):
bio = serializers.CharField(source='profile.bio')
avatar = serializers.URLField(source='profile.avatar')
status = serializers.URLField(source='profile.status')
name = serializers.CharField(source='profile.name')
class Meta:
model = User
fields = [
'username',
'name',
'bio',
'avatar',
'status',
'is_staff',
'date_joined'
]
class UserUpdateSerializer(serializers.ModelSerializer):
# A field from the user's profile:
bio = serializers.CharField(source='profile.bio', allow_blank=True)
name = serializers.CharField(
source='profile.name',
max_length=32,
allow_blank=True
)
avatar = serializers.URLField(source='profile.avatar', allow_blank=True)
status = serializers.CharField(
source='profile.status',
allow_blank=True,
default='',
min_length=0,
max_length=16
)
current_password = serializers.CharField(
write_only=True,
allow_blank=True,
label=_("Current Password"),
help_text=_('Required'),
)
new_password = serializers.CharField(
allow_blank=True,
default='',
write_only=True,
min_length=4,
max_length=32,
label=_("New Password"),
)
email = serializers.EmailField(
allow_blank=True,
default='',
validators=[UniqueValidator(
queryset=User.objects.all(),
message='has already been taken by other user'
)]
)
class Meta:
model = User
fields = (
'username',
'name',
'email',
'current_password',
'new_password',
'bio',
'avatar',
'status'
)
read_only_fields = ('username',)
lookup_field = 'username'
def update(self, instance, validated_data):
# make sure requesting user provide his current password
# e.g if admin 'endiliey' is updating a user 'donaldtrump',
# currentPassword must be 'endiliey' password instead of 'donaldtrump' password
try:
username = self.context.get('request').user.username
except:
msg = _('Must be authenticated')
raise serializers.ValidationError(msg, code='authorization')
password = validated_data.get('current_password')
validated_data.pop('current_password', None)
if not password:
msg = _('Must provide current password')
raise serializers.ValidationError(msg, code='authorization')
user = authenticate(request=self.context.get('request'),
username=username, password=password)
if not user:
msg = _('Sorry, the password you entered is incorrect.')
raise serializers.ValidationError(msg, code='authorization')
# change password to a new one if it exists
new_password = validated_data.get('new_password') or None
if new_password:
instance.set_password(new_password)
validated_data.pop('new_password', None)
# Update user profile fields
profile_data = validated_data.pop('profile', None)
profile = instance.profile
for field, value in profile_data.items():
if value:
setattr(profile, field, value)
# Update user fields
for field, value in validated_data.items():
if value:
setattr(instance, field, value)
profile.save()
instance.save()
return instance
class UserCreateSerializer(serializers.ModelSerializer):
# A field from the user's profile:
username = serializers.SlugField(
min_length=4,
max_length=32,
help_text=_(
'Required. 4-32 characters. Letters, numbers, underscores or hyphens only.'
),
validators=[UniqueValidator(
queryset=User.objects.all(),
message='has already been taken by other user'
)],
required=True
)
password = serializers.CharField(
min_length=4,
max_length=32,
write_only=True,
help_text=_(
'Required. 4-32 characters.'
),
required=True
)
email = serializers.EmailField(
required=True,
validators=[UniqueValidator(
queryset=User.objects.all(),
message='has already been taken by other user'
)]
)
bio = serializers.CharField(source='profile.bio', allow_blank=True, default='')
name = serializers.CharField(
source='profile.name',
allow_blank=True,
default='',
max_length=32
)
avatar = serializers.URLField(source='profile.avatar', allow_blank=True, default='')
status = serializers.CharField(
source='profile.status',
allow_blank=True,
max_length=16,
min_length=0,
default=''
)
class Meta:
model = User
fields = (
'username',
'name',
'email',
'password',
'bio',
'avatar',
'status'
)
def create(self, validated_data):
profile_data = validated_data.pop('profile', None)
username = validated_data['username']
email = validated_data['email']
password = validated_data['password']
user = User(
username = username,
email = email
)
user.set_password(password)
user.save()
avatar = profile_data.get('avatar') or None
if not avatar:
avatar = 'http://api.adorable.io/avatar/200/' + username
profile = UserProfile(
user = user,
bio = profile_data.get('bio', ''),
avatar = avatar,
name = profile_data.get('name', ''),
status = profile_data.get('status', 'Member')
)
profile.save()
return user
class UserTokenSerializer(serializers.Serializer):
username = serializers.CharField(label=_("Username"))
password = serializers.CharField(
label=_("Password"),
style={'input_type': 'password'},
trim_whitespace=False
)
def validate(self, attrs):
username = attrs.get('username')
password = attrs.get('password')
if username and password:
user = authenticate(request=self.context.get('request'),
username=username, password=password)
# The authenticate call simply returns None for is_active=False
# users. (Assuming the default ModelBackend authentication
# backend.)
if not user:
msg = _('Unable to log in with provided credentials.')
raise serializers.ValidationError(msg, code='authorization')
else:
msg = _('Must include "username" and "password".')
raise serializers.ValidationError(msg, code='authorization')
attrs['user'] = user
return attrs
class UserLoginSerializer(serializers.ModelSerializer):
username = serializers.SlugField(
max_length=32,
help_text=_(
'Required. 32 characters or fewer. Letters, numbers, underscores or hyphens only.'
),
required=True
)
token = serializers.CharField(allow_blank=True, read_only=True)
name = serializers.CharField(source='profile.name', read_only=True)
class Meta:
model = User
fields = [
'username',
'name',
'password',
'token',
]
extra_kwargs = {"password": {"write_only": True} }
|
from flask import Flask, request, jsonify
import json
import logging
import os
import random
import string
import sys
import sentencepiece as spm
import torch
import torchaudio
import numpy as np
from fairseq import options, progress_bar, utils, tasks
from fairseq.meters import StopwatchMeter, TimeMeter
from fairseq.utils import import_user_module
from fastai.basic_train import load_learner
import utils as ourutils
app = Flask(__name__)
import argparse
# import utils
# manually preparing argument
"""
Complete list of arguments for fairseq is as follows:
```
Namespace(beam=40, bpe=None, cpu=False, criterion='cross_entropy', ctc=False, data='./data', dataset_impl=None, diverse_bea
m_groups=-1, diverse_beam_strength=0.5, force_anneal=None, fp16=False, fp16_init_scale=128, fp16_scale_tolerance=0.0, fp16_
scale_window=None, gen_subset='test', kspmodel=None, lenpen=1, lm_weight=0.2, log_format=None, log_interval=1000, lr_schedu
ler='fixed', lr_shrink=0.1, match_source_len=False, max_len_a=0, max_len_b=200, max_sentences=None, max_tokens=10000000, me
mory_efficient_fp16=False, min_len=1, min_loss_scale=0.0001, model_overrides='{}', momentum=0.99, nbest=1, no_beamable_mm=F
alse, no_early_stop=False, no_progress_bar=False, no_repeat_ngram_size=0, num_shards=1, num_workers=1, optimizer='nag', pat
h='./data/checkpoint_avg_60_80.pt', prefix_size=0, print_alignment=False, quiet=False, remove_bpe=None, replace_unk=None, r
equired_batch_size_multiple=8, results_path=None, rnnt=False, rnnt_decoding_type='greedy', rnnt_len_penalty=-0.5, sacrebleu
=False, sampling=False, sampling_topk=-1, sampling_topp=-1.0, score_reference=False, seed=1, shard_id=0, skip_invalid_size_
inputs_valid_test=False, task='speech_recognition', tbmf_wrapper=False, temperature=1.0, tensorboard_logdir='', threshold_l
oss_scale=None, tokenizer=None, unkpen=0, unnormalized=False, user_dir='../fairseq/examples/speech_recognition', warmup_upd
ates=0, weight_decay=0.0, wfstlm=None)
```
"""
# fix torch for fastai
def gesv(b, A, out=None):
return torch.solve(b, A, out=out)
torch.gesv = gesv
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument('--max-tokens', type=int, default=10000000)
parser.add_argument('--nbest', type=int, default=1)
parser.add_argument('--path', default='./data/checkpoint_avg_60_80.pt')
parser.add_argument('--beam', type=int, default=40)
parser.add_argument('--user_dir', default='../fairseq/examples/speech_recognition')
parser.add_argument('--task', default='speech_recognition')
parser.add_argument('--data', default='./data')
parser.add_argument('--model_overrides', default='')
parser.add_argument('--no_beamable_mm', default=False)
parser.add_argument('--print_alignment', default=False)
args = parser.parse_args()
print("ARGS:", args)
# 1. Load the model
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
def optimize_models_asr(args, models):
"""Optimize ensemble for generation
"""
for model in models:
model.make_generation_fast_(
beamable_mm_beam_size=None if args.no_beamable_mm else args.beam,
need_attn=args.print_alignment,
)
model.to(dev)
def load_model_asr(args):
# Load ensemble
logger.info("| loading model(s) from {}".format(args.path))
models_asr, _model_args = utils.load_ensemble_for_inference(
args.path.split(":"),
task,
model_arg_overrides={}
)
optimize_models_asr(args, models_asr)
# Initialize generator
generator = task.build_generator(args)
sp = spm.SentencePieceProcessor()
sp.Load(os.path.join(args.data, 'spm.model'))
return models_asr, sp, generator
def process_predictions_asr(args, hypos, sp, tgt_dict):
res = []
for hypo in hypos[: min(len(hypos), args.nbest)]:
hyp_pieces = tgt_dict.string(hypo["tokens"].int().cpu())
hyp_words = sp.DecodePieces(hyp_pieces.split())
res.append(hyp_words)
return res
def calc_mean_invstddev(feature):
if len(feature.shape) != 2:
raise ValueError("We expect the input feature to be 2-D tensor")
mean = np.mean(feature, axis=0)
var = np.var(feature, axis=0)
# avoid division by ~zero
if var.any() < sys.float_info.epsilon:
return mean, 1.0 / (np.sqrt(var) + sys.float_info.epsilon)
return mean, 1.0 / np.sqrt(var)
def calcMN(features):
mean, invstddev = calc_mean_invstddev(features)
res = (features - mean) * invstddev
return res
def transcribe_asr(waveform, args, task, generator, models, sp, tgt_dict):
r"""
CUDA_VISIBLE_DEVICES=0 python infer_asr.py /Users/jamarshon/Documents/downloads/ \
--task speech_recognition --max-tokens 10000000 --nbest 1 --path \
/Users/jamarshon/Downloads/checkpoint_avg_60_80.pt --beam 20
"""
num_features = 80
output = torchaudio.compliance.kaldi.fbank(waveform, num_mel_bins=num_features)
output_cmvn = calcMN(output.cpu().detach().numpy())
# size (m, n)
source = torch.tensor(output_cmvn)
source = source.to(dev)
frames_lengths = torch.LongTensor([source.size(0)])
# size (1, m, n). In general, if source is (x, m, n), then hypos is (x, ...)
source.unsqueeze_(0)
sample = {'net_input': {'src_tokens': source, 'src_lengths': frames_lengths}}
hypos = task.inference_step(generator, models, sample)
assert len(hypos) == 1
transcription = []
print(hypos)
for i in range(len(hypos)):
# Process top predictions
hyp_words = process_predictions_asr(args, hypos[i], sp, tgt_dict)
transcription.append(hyp_words)
print('transcription:', transcription)
return transcription
# 2. Write a function for inference - wav file
def infer_asr(wav_file):
waveform, sample_rate = torchaudio.load_wav(wav_file)
waveform = waveform.mean(0, True)
waveform = torchaudio.transforms.Resample(orig_freq=sample_rate,new_freq=16000)(waveform)
print("waveform", waveform.shape)
import time
print(sample_rate, waveform.shape)
start = time.time()
tgt_dict = task.target_dictionary
transcription = transcribe_asr(waveform, args, task, generator, models_asr, sp, tgt_dict)
end = time.time()
print(end - start)
return transcription
def infer_classes(png_fname):
"""
XXX TODO
predict classes (background audio type or speech)
"""
# 1 as speech
from fastai.vision.image import open_image
classes = model_classes.predict(open_image(png_fname))
return classes
CLASS_LABELS = ['air_conditioner',
'car_horn',
'children_playing',
'dog_bark',
'drilling',
'engine_idling',
'gun_shot',
'jackhammer',
'siren',
'speech',
'street_music']
def infer_classes_by_short_image(wav_fname, png_fname):
"""
"""
# decide pipeline
speech_or_classes = infer_classes(png_fname)
print("infer_classes_by_short_image", speech_or_classes)
# feed corresponding network
SPEECH_CLASS = 9
CHILDREN_CLASS = 2
speech_or_classes_item = speech_or_classes[1].item()
if speech_or_classes_item == SPEECH_CLASS or speech_or_classes_item == CHILDREN_CLASS:
print("speech")
result = {'cls': 'speech', 'transcription': infer_asr(wav_fname)}
else:
print("background", speech_or_classes)
result = {'cls': CLASS_LABELS[speech_or_classes_item], 'transcription': ''}
# convert the result
return result
def test_convert_audio_image(wav_fname):
"""
test code
"""
filename, file_extension = os.path.splitext(wav_fname)
TEST_NFILE = 4
fnames = []
for i in range(TEST_NFILE):
(_wav_fname, png_fname) = filename + "_"+str(i) + ".wav", filename + "_"+ str(i) + ".png"
fnames.append((_wav_fname, png_fname))
from shutil import copyfile
# dummy copy for testing
copyfile(wav_fname, _wav_fname)
return fnames
dev = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
# Load dataset splits
task = tasks.setup_task(args)
# model for asr
models_asr, sp, generator = load_model_asr(args)
MODEL_CLASSES_PATH = '../../audio_classification/data/mixed/'
# model for classifying types, speech or classes
model_classes = load_learner(MODEL_CLASSES_PATH)
# 3. Define a route for inference, asr only
@app.route('/transcribe_asr', methods=['POST'])
def transcribe_asr_route():
# Get the file out from the request
print(request.files)
print('wav_file', request.files['audio'])
wav_file = request.files['audio']
wav_file_name = './tmp/' + wav_file.filename
wav_file.save(wav_file_name)
print("wav_file_name", wav_file_name)
transcription = infer_asr(wav_file_name)
print("translated transcript>>", transcription)
# Do the inference, get the result
# Return json
return jsonify({'success': True, 'transcription': transcription[0][0]})
# 4. Define a route for inference, mixed
@app.route('/transcribe', methods=['POST'])
def transcribe_route():
# Get the file out from the request
print(request.files)
print('wav_file', request.files['audio'])
wav_file = request.files['audio']
wav_file_name = './tmp/' + wav_file.filename
wav_file.save(wav_file_name)
#short_wav_png_fnames = test_convert_audio_image(wav_file_name)
short_wav_png_fnames = ourutils.convert_audio_image(wav_file_name)
print(short_wav_png_fnames)
results = [infer_classes_by_short_image(png_fname, wav_fname) for (png_fname, wav_fname) in short_wav_png_fnames]
return jsonify({'success': True, 'result': results})
@app.route('/')
def hello_world():
return 'Hello, World!'
if __name__ == '__main__':
app.run(host='0.0.0.0', port=8090)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Aug 20 14:19:54 2021
@author: gregstacey
"""
import os
import pandas as pd
import numpy as np
import sys
from itertools import chain
from rdkit import Chem
from tqdm import tqdm
if os.path.isdir("~/git/bespoke-deepgen"):
git_dir = os.path.expanduser("~/git/bespoke-deepgen")
elif os.path.isdir("/Users/gregstacey/Academics/Foster/Metabolomics/bespoke-deepgen"):
git_dir = os.path.expanduser("~/Academics/Foster/Metabolomics/bespoke-deepgen")
elif os.path.isdir("/scratch/st-ljfoster-1/staceyri/bespoke-deepgen"):
git_dir = os.path.expanduser("/scratch/st-ljfoster-1/staceyri/bespoke-deepgen")
python_dir = git_dir + "/python"
os.chdir(python_dir)
sys.path.append(python_dir)
# import functions
from functions import clean_mols, remove_salts_solvents, read_smiles, \
NeutraliseCharges
# import Vocabulary
from datasets import Vocabulary
# read full hmdb
data = pd.read_csv('../data/hmdb/20200730_hmdb_classifications-canonical.csv.gz')
data = data.dropna(subset = ["canonical_smiles"]) # remove NA
# write smiles files
# full
sm = pd.DataFrame({'canonical_smiles': data["canonical_smiles"].unique()})
sm.to_csv("../data/hmdb/hmdb.smi", index = False, columns = ["canonical_smiles"], header = False)
# kingdoms
unq = data["kingdom"].dropna().unique()
if not os.path.isdir("../data/hmdb/kingdom/"):
os.makedirs("../data/hmdb/kingdom/")
for ii in range(0, len(unq)):
ss = ''.join(e for e in unq[ii] if e.isalnum())
fn = "../data/hmdb/kingdom/" + ss + '.smi'
sm = data[data["kingdom"]==unq[ii]]
sm = pd.DataFrame({'canonical_smiles': sm["canonical_smiles"].unique()})
sm.to_csv(fn, index = False, columns = ["canonical_smiles"], header = False)
# superklass
unq = data["superklass"].dropna().unique()
if not os.path.isdir("../data/hmdb/superklass/"):
os.makedirs("../data/hmdb/superklass/")
for ii in range(0, len(unq)):
ss = ''.join(e for e in unq[ii] if e.isalnum())
fn = "../data/hmdb/superklass/" + ss + '.smi'
sm = data[data["superklass"]==unq[ii]]
sm = pd.DataFrame({'canonical_smiles': sm["canonical_smiles"].unique()})
sm.to_csv(fn, index = False, columns = ["canonical_smiles"], header = False)
# klass
unq = data["klass"].dropna().unique()
if not os.path.isdir("../data/hmdb/klass/"):
os.makedirs("../data/hmdb/klass/")
for ii in range(0, len(unq)):
ss = ''.join(e for e in unq[ii] if e.isalnum())
fn = "../data/hmdb/klass/" + ss + '.smi'
sm = data[data["klass"]==unq[ii]]
sm = pd.DataFrame({'canonical_smiles': sm["canonical_smiles"].unique()})
sm.to_csv(fn, index = False, columns = ["canonical_smiles"], header = False)
|
import numpy as np
from scipy import spatial
import matplotlib.pyplot as plt
class KMeansClustering():
'''this implimentation is going to find the distances between
each point and every other point, then get the average distance, and use the minimums
to cluster. As I'm writing this I'm realizing that this will just get the main center
not all the centers. I am however curious to see what this produces.'''
def __init__(self, k_means):
self.k = k_means # the number of clusters to build
self.data = None # will hold the raw data
self.distances = [] # will hold the distance matrix
def fit(self, X):
self.data = X.copy()
c_ids = np.random.randint(self.data.shape[0], size=self.k)
self.centers = self.data[c_ids]
previous = np.random.rand(self.k, 2)
while np.all(previous != self.centers):
# set previous to know when to stop
previous = self.centers.copy()
# get distances from the centers
distances = spatial.distance_matrix(self.data, self.centers)
# assign all observations to a center
self.assignments = np.argmin(distances, axis=1)
# calulate means based on clusters
for i in range(self.k):
_, means = self.get_distances(self.data[self.assignments == i])
# update the center with the new mean
self.centers[i] = self.data[self.assignments == i][np.argmin(means)]
def predict(self, y):
if type(self.data) == None:
raise AttributeError('Please Call Fit before Predict')
dists = spatial.distance_matrix(self.centers, y)
cent = np.argmin(dists)
return cent
def get_distances(self, X):
'''this little helper function builds out the
distances matrix'''
distances = []
for x in X:
y = spatial.distance_matrix(X, x.reshape(1, 2))
distances.append(np.squeeze(y))
distances = np.array(distances)
return distances, distances.mean(0)
def plot(self):
x, y = self.data.T
cx, cy = self.centers.T
plt.scatter(x, y, c=self.assignments)
plt.scatter(cx, cy, c='red')
plt.savefig('graph.png')
if __name__ == '__main__':
X = np.random.rand(500, 2)*100
y = np.random.rand(1, 2) * 100
clusters = KMeansClustering(4)
clusters.fit(X)
print(clusters.predict(y))
clusters.plot()
|
import nose
from helpers import parse_file, parse_string, check_visitor_return
from src.Generic import Generic
def test_sanity():
parse_file('./assets/empty.vhd')
parse_string('', 'design_file')
nose.tools.ok_(True)
def test_entity_declaration_empty():
entity = 'entity Adder is end entity Adder;'
visitor = parse_string(entity, 'design_file', True)
nose.tools.eq_(visitor.entities[0].name, 'Adder')
def test_generic_declaration():
generic = 'kDepth : natural := 8'
check_visitor_return(generic, 'interface_constant_declaration',
[Generic('kDepth', 'natural', '8')])
def test_abstract_literal():
# ignoring BASE_LITERAL option for now
check_visitor_return('22', 'abstract_literal', '22')
check_visitor_return('22.33', 'abstract_literal', '22.33')
def test_numeric_literal():
# ignoring PHYSICAL_LITERAL option for now
check_visitor_return('52.3', 'numeric_literal', '52.3')
def test_literal():
check_visitor_return('nULl', 'literal', 'null')
check_visitor_return('b1010', 'literal', 'b1010')
check_visitor_return('B"1010"', 'literal', 'b"1010"')
check_visitor_return('1019', 'literal', '1019')
check_visitor_return('hC1A1F', 'literal', 'hc1a1f')
check_visitor_return('xAABB', 'literal', 'xaabb')
def test_enumeration_literal():
check_visitor_return('this', 'enumeration_literal', 'this')
check_visitor_return("'l'", 'enumeration_literal', "'l'")
def test_primary():
check_visitor_return('b1011', 'primary', 'b1011')
def test_direction():
check_visitor_return('to', 'direction', 'to')
check_visitor_return('DOWNTO', 'direction', 'downto')
def test_shift_operator():
operators = ['SLL', 'srl', 'SLa', 'sra', 'ROR', 'ROL']
results = ['sll', 'srl', 'sla', 'sra', 'ror', 'rol']
for (op, res) in zip(operators, results):
check_visitor_return(op, 'shift_operator', res)
def test_relational_operator():
operators = ['=', '/=', '<', '<=', '>', '>=']
results = ['=', '/=', '<', '<=', '>', '>=']
for (op, res) in zip(operators, results):
check_visitor_return(op, 'relational_operator', res)
def test_logical_operator():
operators = ['and', 'OR', 'nand', 'NoR', 'XOR', 'xnor']
results = ['and', 'or', 'nand', 'nor', 'xor', 'xnor']
for (op, res) in zip(operators, results):
check_visitor_return(op, 'logical_operator', res)
def test_adding_operator():
operators = ['+', '-', '&']
results = ['+', '-', '&']
for (op, res) in zip(operators, results):
check_visitor_return(op, 'adding_operator', res)
def test_multiplying_operator():
operators = ['*', '/', 'mod', 'REM']
results = ['*', '/', 'mod', 'rem']
for (op, res) in zip(operators, results):
check_visitor_return(op, 'multiplying_operator', res)
|
from qgis.core import QgsProcessingProvider
# from ndvi import Calculate_ndvi
from ndvi_raster_calculator import Calculate_ndvi
from split_band import Split_bands
class NDVIProvider(QgsProcessingProvider):
def loadAlgorithms(self, *args, **kwargs):
self.addAlgorithm(Calculate_ndvi())
self.addAlgorithm(Split_bands())
# self.addAlgorithm(MyOtherAlgorithm())
def id(self, *args, **kwargs):
"""The ID of your plugin, used for identifying the provider.
This string should be a unique, short, character only string,
eg "qgis" or "gdal". This string should not be localised.
"""
return 'uas'
def name(self, *args, **kwargs):
"""The human friendly name of your plugin in Processing.
This string should be as short as possible (e.g. "Lastools", not
"Lastools version 1.0.1 64-bit") and localised.
"""
return 'UAS Class 2019'
def icon(self):
"""Should return a QIcon which is used for your provider inside
the Processing toolbox.
"""
return QgsProcessingProvider.icon(self)
|
import gym
from d3rlpy.algos import SAC
from d3rlpy.envs import AsyncBatchEnv
from d3rlpy.online.buffers import BatchReplayBuffer
if __name__ == '__main__':
env = AsyncBatchEnv([lambda: gym.make('Pendulum-v0') for _ in range(10)])
eval_env = gym.make('Pendulum-v0')
# setup algorithm
sac = SAC(batch_size=100, use_gpu=False)
# replay buffer for experience replay
buffer = BatchReplayBuffer(maxlen=100000, env=env)
# start training
sac.fit_batch_online(env,
buffer,
n_epochs=100,
eval_interval=1,
eval_env=eval_env,
n_steps_per_epoch=1000,
n_updates_per_epoch=1000)
|
from brownie import accounts, web3, Wei, reverts
from brownie.network.transaction import TransactionReceipt
from brownie.convert import to_address
import pytest
from brownie import Contract
ZERO_ADDRESS = '0x0000000000000000000000000000000000000000'
# reset the chain after every test case
@pytest.fixture(autouse=True)
def isolation(fn_isolation):
pass
######################################
# Owned
######################################
def test_token_factory_owned(token_factory):
assert token_factory.owner({'from': accounts[0]}) == accounts[0]
def test_token_factory_transferOwnership(token_factory):
tx = token_factory.transferOwnership(accounts[1], {'from': accounts[0]})
tx = token_factory.acceptOwnership( {'from': accounts[1]})
assert 'OwnershipTransferred' in tx.events
assert tx.events['OwnershipTransferred'] == {'from': accounts[0], 'to': accounts[1]}
with reverts():
token_factory.transferOwnership(accounts[1], {'from': accounts[0]})
######################################
# Factory Tests
######################################
def test_token_factory_numberOfChildren(token_factory):
assert token_factory.numberOfChildren() == 4
def test_token_factory_deployFrameTokenContract(token_factory):
name = 'Test Frame Token'
symbol = 'TFT'
mintable = True
transferable = True
initial_supply = '10 ether'
tx = token_factory.deployFrameToken(accounts[0],symbol, name,
initial_supply,mintable,transferable,{'from': accounts[1]})
assert 'FrameTokenDeployed' in tx.events
assert token_factory.numberOfChildren() == 5
tx = token_factory.deployFrameToken(accounts[0],symbol, name,
initial_supply,mintable,transferable,
{'from': accounts[1], 'value':'0.1 ether'})
assert 'FrameTokenDeployed' in tx.events
assert token_factory.numberOfChildren() == 6
def test_token_factory_deploy_below_fee(token_factory):
name = 'Test Frame Token'
symbol = 'TFT'
mintable = True
transferable = True
initial_supply = '10 ether'
tx = token_factory.setMinimumFee('0.2 ether', {'from': accounts[0]})
with reverts():
token_factory.deployFrameToken( accounts[0],symbol, name,
initial_supply,mintable,transferable, {'from': accounts[1], 'value':'0.1 ether'})
|
#!/usr/bin/env python
'''
Kick off the napalm-logs engine.
'''
# Import python stdlib
import os
import sys
import time
import signal
import logging
import optparse
# Import third party libs
import yaml
# Import napalm-logs
import napalm_logs
import napalm_logs.config as defaults
import napalm_logs.ext.six as six
log = logging.getLogger(__name__)
class CustomOption(optparse.Option, object):
def take_action(self, action, dest, *args, **kwargs):
# see https://github.com/python/cpython/blob/master/Lib/optparse.py#L786
self.explicit = True
return optparse.Option.take_action(self, action, dest, *args, **kwargs)
class OptionParser(optparse.OptionParser, object):
VERSION = napalm_logs.__version__
usage = 'napalm-logs [options]'
epilog = 'Full documentation coming soon.'
description = 'napalm-logs CLI script.'
def __init__(self, *args, **kwargs):
kwargs.setdefault('version', '%prog {0}'.format(self.VERSION))
kwargs.setdefault('usage', self.usage)
kwargs.setdefault('description', self.description)
kwargs.setdefault('epilog', self.epilog)
kwargs.setdefault('option_class', CustomOption)
optparse.OptionParser.__init__(self, *args, **kwargs)
self.add_option(
'-v',
action='store_true',
dest='version',
help='Show version number and exit.'
)
def add_option_group(self, *args, **kwargs):
option_group = optparse.OptionParser.add_option_group(self, *args, **kwargs)
option_group.option_class = CustomOption
return option_group
def parse_args(self, args=None, values=None):
options, args = optparse.OptionParser.parse_args(self, args, values)
if 'args_stdin' in options.__dict__ and options.args_stdin is True:
new_inargs = sys.stdin.readlines()
new_inargs = [arg.rstrip('\r\n') for arg in new_inargs]
new_options, new_args = optparse.OptionParser.parse_args(
self,
new_inargs)
options.__dict__.update(new_options.__dict__)
args.extend(new_args)
self.options, self.args = options, args
def print_version(self):
print('napalm-logs {0}'.format(self.VERSION))
class NLOptionParser(OptionParser, object):
def prepare(self):
self.add_option(
'-c', '--config-file',
dest='config_file',
help=('Config file absolute path. Default: {0}'.format(defaults.CONFIG_FILE))
)
# self.add_option(
# '-d', '--daemon',
# default=True,
# dest='daemon',
# action='store_true',
# help='Run the {0} as a daemon. Default: %default'.format(self.get_prog_name())
# )
self.add_option(
'-a', '--address',
dest='address',
help=('Listener address. Default: {0}'.format(defaults.ADDRESS))
)
self.add_option(
'--config-path',
dest='config_path',
help=('Device config path.')
)
self.add_option(
'--extension-config-path',
dest='extension_config_path',
help=('Extension config path.')
)
self.add_option(
'-p', '--port',
dest='port',
type=int,
help=('Listener bind port. Default: {0}'.format(defaults.PORT))
)
self.add_option(
'--listener',
dest='listener',
help=('Listener type. Default: {0}'.format(defaults.LISTENER))
)
self.add_option(
'-t', '--transport',
dest='transport',
help=('Publish transport. Default: {0}'.format(defaults.PUBLISHER))
)
self.add_option(
'--publisher',
dest='publisher',
help=('Publish transport. Default: {0}'.format(defaults.PUBLISHER))
)
self.add_option(
'--publish-address',
dest='publish_address',
help=('Publisher bind address. Default: {0}'.format(defaults.PUBLISH_ADDRESS))
)
self.add_option(
'--publish-port',
dest='publish_port',
type=int,
help=('Publisher bind port. Default: {0}'.format(defaults.PUBLISH_PORT))
)
self.add_option(
'--auth-address',
dest='auth_address',
help=('Authenticator bind address. Default: {0}'.format(defaults.AUTH_ADDRESS))
)
self.add_option(
'--auth-port',
dest='auth_port',
type=int,
help=('Authenticator bind port. Default: {0}'.format(defaults.AUTH_PORT))
)
self.add_option(
'--certificate',
dest='certificate',
help=('Absolute path to the SSL certificate used for client authentication.')
)
self.add_option(
'--keyfile',
dest='keyfile',
help=('Absolute path to the SSL keyfile')
)
self.add_option(
'--disable-security',
dest='disable_security',
action="store_true",
default=False,
help=('Disable encryption and data signing when publishing.')
)
self.add_option(
'-l', '--log-level',
dest='log_level',
help=('Logging level. Default: {0}'.format(defaults.LOG_LEVEL))
)
self.add_option(
'--log-file',
dest='log_file',
help=('Logging file. Default: {0}'.format(defaults.LOG_FILE))
)
self.add_option(
'--log-format',
dest='log_format',
help=('Logging format. Default: {0}'.format(defaults.LOG_FORMAT))
)
def convert_env_dict(self, d):
for k, v in d.items():
if isinstance(v, six.string_type):
if not v.startswith('${') or not v.endswith('}'):
continue
if not os.environ.get(v[2:-1]):
log.error('No env variable found for %s, please check your config file', v[2:-1])
sys.exit(1)
d[k] = os.environ[v[2:-1]]
if isinstance(v, dict):
self.convert_env_dict(v)
if isinstance(v, list):
self.convert_env_list(v)
def convert_env_list(self, l):
for n, v in enumerate(l):
if isinstance(v, six.string_type):
if not v.startswith('${') or not v.endswith('}'):
continue
if not os.environ.get(v[2:-1]):
log.error('No env variable found for %s, please check your config file', v[2:-1])
sys.exit(1)
l[n] = os.environ[v[2:-1]]
if isinstance(v, dict):
self.convert_env_dict(v)
if isinstance(v, list):
self.convert_env_list(v)
def read_config_file(self, filepath):
config = {}
try:
with open(filepath, 'r') as fstream:
config = yaml.load(fstream)
except (IOError, yaml.YAMLError):
log.info('Unable to read from %s', filepath)
# Convert any env variables
self.convert_env_dict(config)
return config
def parse(self, log, screen_handler):
self.prepare()
self.parse_args()
if self.options.version:
self.print_version()
sys.exit(1)
config_file_path = self.options.config_file or defaults.CONFIG_FILE
file_cfg = self.read_config_file(config_file_path)
log_file = self.options.log_file or file_cfg.get('log_file') or defaults.LOG_FILE
log_lvl = self.options.log_level or file_cfg.get('log_level') or defaults.LOG_LEVEL
log_fmt = self.options.log_format or file_cfg.get('log_format') or defaults.LOG_FORMAT
if log_file.lower() not in defaults.LOG_FILE_CLI_OPTIONS:
log_file_dir = os.path.dirname(log_file)
if not os.path.isdir(log_file_dir):
log.warning('%s does not exist, trying to create', log_file_dir)
try:
os.mkdir(log_file_dir)
except OSError:
log.error('Unable to create %s', log_file_dir, exc_info=True)
sys.exit(0)
log.removeHandler(screen_handler) # remove printing to the screen
logging.basicConfig(filename=log_file,
level=defaults.LOGGING_LEVEL.get(log_lvl.lower(), 'warning'),
format=log_fmt) # log to filecm
cert = self.options.certificate or file_cfg.get('certificate')
disable_security = self.options.disable_security or file_cfg.get('disable_security', False)
if not cert and disable_security is False:
log.error('certfile must be specified for server-side operations')
raise ValueError('Please specify a valid SSL certificate.')
# For each module we need to merge the defaults with the
# config file, but prefer the config file
listener_opts = defaults.LISTENER_OPTS
logger_opts = defaults.LOGGER_OPTS
publisher_opts = defaults.PUBLISHER_OPTS
device_whitelist = file_cfg.get('device_whitelist', [])
device_blacklist = file_cfg.get('device_blacklist', [])
listener = defaults.LISTENER
if self.options.listener:
listener = self.options.listener
elif file_cfg.get('listener'):
listener_cfg = file_cfg.get('listener')
if isinstance(listener_cfg, dict):
listener = list(listener_cfg.keys())[0]
log.debug('Using the %s listener from the config file', listener)
# TODO later we could allow the possibility to start multiple listeners
listener_opts_cfg = file_cfg.get('listener_opts', {})
# Merging the listener opts under the listener
# with the general opts under the listener_opts key
listener_opts = napalm_logs.utils.dictupdate(listener_cfg[listener],
listener_opts_cfg)
log.debug('Listener opts from the config file:')
log.debug(listener_opts)
elif isinstance(listener_cfg, six.string_type):
listener = listener_cfg
logger = defaults.LOGGER
if file_cfg.get('logger'):
logger_cfg = file_cfg.get('logger')
if isinstance(logger_cfg, dict):
logger = list(logger_cfg.keys())[0]
log.debug('Using the %s logger from the config file', logger)
# TODO later we could allow the possibility to start multiple loggers
logger_opts_cfg = file_cfg.get('logger_opts', {})
# Merging the logger opts under the logger
# with the general opts under the logger_opts key
logger_opts = napalm_logs.utils.dictupdate(logger_cfg[logger],
logger_opts_cfg)
log.debug('Logger opts from the config file:')
log.debug(logger_opts)
elif isinstance(logger_cfg, six.string_type):
logger = logger_cfg
publisher = defaults.PUBLISHER
if self.options.publisher or self.options.transport:
publisher = self.options.publisher or self.options.transport
elif file_cfg.get('publisher') or file_cfg.get('transport'):
publisher_cfg = file_cfg.get('publisher') or file_cfg.get('transport')
if isinstance(publisher_cfg, dict):
publisher = list(publisher_cfg.keys())[0]
log.debug('Using the %s publisher from the config file', publisher)
# TODO later we could allow the possibility to start multiple publishers
publisher_opts_cfg = file_cfg.get('publisher_opts', {})
# Merging the publisher opts under the publisher
# with the general opts under the publisher_opts key
publisher_opts = napalm_logs.utils.dictupdate(publisher_cfg[publisher],
publisher_opts_cfg)
log.debug('Publisher opts from the config file:')
log.debug(publisher_opts)
elif isinstance(publisher_cfg, six.string_type):
publisher = publisher_cfg
cfg = {
'address': self.options.address or file_cfg.get('address') or defaults.ADDRESS,
'port': self.options.port or file_cfg.get('port') or defaults.PORT,
'listener': listener,
'transport': publisher,
'publish_address': self.options.publish_address or file_cfg.get('publish_address') or
defaults.PUBLISH_ADDRESS, # noqa
'publish_port': self.options.publish_port or file_cfg.get('publish_port') or
defaults.PUBLISH_PORT, # noqa
'auth_address': self.options.auth_address or file_cfg.get('auth_address') or
defaults.AUTH_ADDRESS, # noqa
'auth_port': self.options.auth_port or file_cfg.get('auth_port') or
defaults.AUTH_PORT,
'certificate': cert,
'keyfile': self.options.keyfile or file_cfg.get('keyfile'),
'disable_security': disable_security,
'config_path': self.options.config_path or file_cfg.get('config_path'),
'extension_config_path': self.options.extension_config_path or file_cfg.get('extension_config_path'),
'log_level': log_lvl,
'log_format': log_fmt,
'listener_opts': listener_opts,
'logger': logger,
'logger_opts': logger_opts,
'publisher_opts': publisher_opts,
'device_whitelist': device_whitelist,
'device_blacklist': device_blacklist
}
return cfg
def _exit_gracefully(signum, _):
'''
Called when a signal is caught and marks exiting variable True
'''
global _up
_up = False
_up = True
def napalm_logs_engine():
if '' in sys.path:
sys.path.remove('')
# Temporarily will forward the log entries to the screen
# After reading the config and all good, will write into the right
# log file.
screen_logger = logging.StreamHandler(sys.stdout)
screen_logger.setFormatter(logging.Formatter(defaults.LOG_FORMAT))
log.addHandler(screen_logger)
nlop = NLOptionParser()
config = nlop.parse(log, screen_logger)
# Ignore SIGINT whilst starting child processes so they inherit the ignore
signal.signal(signal.SIGINT, signal.SIG_IGN)
signal.signal(signal.SIGTERM, signal.SIG_IGN)
nl = napalm_logs.NapalmLogs(**config)
nl.start_engine()
# Set SIGINT to _exit_gracefully so we can close everything down gracefully
signal.signal(signal.SIGINT, _exit_gracefully)
signal.signal(signal.SIGTERM, _exit_gracefully)
# Keep this function running until we receive instruction to terminate
while _up is True and nl.up is True:
time.sleep(1)
nl.stop_engine()
if __name__ == '__main__':
napalm_logs_engine()
|
# -*- coding: utf-8 -*-
import tempfile
import contextlib
from datetime import datetime, timedelta
from dateutil.tz import tzlocal
from gevent import monkey
monkey.patch_all()
import os.path
import json
from gevent.subprocess import check_output, sleep
PAUSE_SECONDS = timedelta(seconds=120)
PWD = os.path.dirname(os.path.realpath(__file__))
CWD = os.getcwd()
@contextlib.contextmanager
def update_auctionPeriod(path):
with open(path) as file:
data = json.loads(file.read())
new_start_time = (datetime.now(tzlocal()) + PAUSE_SECONDS).isoformat()
data['data']['auctionPeriod']['startDate'] = new_start_time
with tempfile.NamedTemporaryFile(delete=False) as auction_file:
json.dump(data, auction_file)
auction_file.seek(0)
yield auction_file.name
auction_file.close()
def run_texas(tender_file_path):
with open(tender_file_path) as _file:
auction_id = json.load(_file).get('data', {}).get('id')
if auction_id:
with update_auctionPeriod(tender_file_path) as auction_file:
check_output(TESTS['texas']['worker_cmd'].format(CWD, auction_id, auction_file).split())
sleep(1)
TESTS = {
"texas": {
"worker_cmd": '{0}/bin/auction_texas planning {1}'
' {0}/etc/auction_worker_texas.yaml'
' --planning_procerude partial_db --auction_info {2}',
"runner": run_texas,
'auction_worker_defaults': 'auction_worker_defaults:{0}/etc/auction_worker_defaults.yaml',
'suite': PWD
}
}
def includeme(tests):
tests.update(TESTS)
|
from os import path
from setuptools import setup
this_directory = path.abspath(path.dirname(__file__))
with open(path.join(this_directory, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='gostep',
long_description=long_description,
long_description_content_type='text/markdown',
scripts=[
'./bin/gostep'
],
version='0.1.0-beta10',
description='Serverless Templates Provider for Google Cloud platform',
url='https://github.com/codimite/gostep',
author='Lahiru Pathirage',
author_email='lahiru@codimite.com',
license='MIT',
packages=['gostep'],
zip_safe=False,
install_requires=[
'svn',
'google-cloud-storage',
'google-api-python-client',
'google-auth-httplib2',
'google-auth-oauthlib',
'oauth2client',
'pyyaml'
]
)
|
"""This module contains the TerminalView."""
import sys
class TerminalView():
def __init__(self):
pass
def announce_database_truncated(self, truncated):
if truncated:
print('Truncating HDF5 database ...')
else:
print('Reusing HDF5 database ...')
def announce_configuration_import(self):
print('Importing configuration from YAML into HDF5 database ...')
def announce_configuration_source(self, source):
print('Loading configuration from: {} ...'.format(source))
def announce_sampling_skipped(self):
print('Running samplers was skipped by user')
def announce_training_skipped(self):
print('Running trainers was skipped by user')
def announce_sampler_status(self, sid, status):
print('Current status of sampler {} is: {}'.format(sid, status))
def announce_trainer_status(self, tid, status):
print('Current status of trainer {} is: {}'.format(tid, status))
def announce_sampling_error(self, msg):
print(msg, file=sys.stderr)
def announce_training_error(self, msg):
print(msg, file=sys.stderr)
def present_results(self, results):
print('\n')
for surrogate_name, surrogate_result in results.items():
print('%s:' % (surrogate_name))
for result in surrogate_result.training_results:
func = '%s --> %s' % (result.metamodel.input_names,
result.metamodel.response_names)
score_r2 = 'r2_score=%f' % result.score_r2
score_mae = 'mae_score=%f' % result.score_mae
score_hae = 'hae_score=%f' % result.score_hae
score_mse = 'mse_score=%f' % result.score_mse
mtype = '(%s)' % result.metamodel.__class__.__name__
print(' -', func, ':', score_r2, score_hae,
score_mae, score_mse, mtype)
|
import json
from pprint import pprint
with open('plant_results_loop.json') as data_file:
data = json.load(data_file)
count = 0
for plant in data:
pprint(plant)
print " "
print " "
# print "*************{}*************".format(count)
# if 'value' in plant:
# for val in plant['value']:
# print val
# print " "
# print " "
# print '*' * 60
# count += 1
# print data[0]
|
from test_utils import run_query, redshift_connector
import pytest
def test_tochildren_success():
results = run_query(
"""WITH tileContext AS(
SELECT 0 AS zoom, 0 AS tileX, 0 AS tileY UNION ALL
SELECT 1, 1, 1 UNION ALL
SELECT 2, 2, 3 UNION ALL
SELECT 3, 4, 5 UNION ALL
SELECT 4, 6, 8 UNION ALL
SELECT 5, 10, 20 UNION ALL
SELECT 6, 40, 50 UNION ALL
SELECT 7, 80, 90 UNION ALL
SELECT 8, 160, 170 UNION ALL
SELECT 9, 320, 320 UNION ALL
SELECT 10, 640, 160 UNION ALL
SELECT 11, 1280, 640 UNION ALL
SELECT 12, 2560, 1280 UNION ALL
SELECT 13, 5120, 160 UNION ALL
SELECT 14, 10240, 80 UNION ALL
SELECT 15, 20480, 40 UNION ALL
SELECT 16, 40960, 80 UNION ALL
SELECT 17, 81920, 160 UNION ALL
SELECT 18, 163840, 320 UNION ALL
SELECT 19, 327680, 640 UNION ALL
SELECT 20, 163840, 1280 UNION ALL
SELECT 21, 81920, 2560 UNION ALL
SELECT 22, 40960, 5120 UNION ALL
SELECT 23, 20480, 10240 UNION ALL
SELECT 24, 10240, 20480 UNION ALL
SELECT 25, 5120, 40960 UNION ALL
SELECT 26, 2560, 81920 UNION ALL
SELECT 27, 1280, 163840 UNION ALL
SELECT 28, 640, 327680
),
quadintContext AS
(
SELECT *,
@@RS_PREFIX@@quadkey.QUADINT_FROMZXY(zoom, tileX, tileY) AS quadint
FROM tileContext
)
SELECT @@RS_PREFIX@@quadkey.TOCHILDREN(quadint, zoom + 1) AS children
FROM quadintContext;"""
)
fixture_file = open('./test/integration/tochildren_fixtures/out/quadints.txt', 'r')
lines = fixture_file.readlines()
fixture_file.close()
for idx, result in enumerate(results):
assert result[0] == lines[idx].rstrip()
def test_tochildren_wrong_zoom_failure():
with pytest.raises(redshift_connector.error.ProgrammingError) as excinfo:
run_query('SELECT @@RS_PREFIX@@quadkey.TOCHILDREN(4611686027017322525, 30)')
assert 'Wrong quadint zoom' in str(excinfo.value)
def test_tochildren_null_failure():
with pytest.raises(redshift_connector.error.ProgrammingError) as excinfo:
run_query('SELECT @@RS_PREFIX@@quadkey.TOCHILDREN(NULL, 1)')
assert 'NULL argument passed to UDF' in str(excinfo.value)
with pytest.raises(redshift_connector.error.ProgrammingError) as excinfo:
run_query('SELECT @@RS_PREFIX@@quadkey.TOCHILDREN(322, NULL)')
assert 'NULL argument passed to UDF' in str(excinfo.value)
|
#!/usr/bin/env python
import os
import os.path
import sys
import csv
def isexec (fpath):
if fpath == None: return False
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
def which(program):
fpath, fname = os.path.split(program)
if fpath:
if isexec (program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
exe_file = os.path.join(path, program)
if isexec (exe_file):
return exe_file
return None
def parseArgs (argv):
import argparse as a
p = a.ArgumentParser (description='Benchmark Runner')
p.add_argument ('--cpu', metavar='CPU',
type=int, help='CPU limit', default=3000)
p.add_argument ('--mem', metavar='MEM',
type=int, help='Memory limit (MB)', default=2048)
p.add_argument ('--bench', metavar='BENCH',
required=True,
help='File specifies paths to benchmark files')
p.add_argument ('--prefix', default='BRUNCH_STAT',
help='Prefix for stats')
p.add_argument ('--format', required=True, help='Fields')
p.add_argument ('--out', metavar='DIR',
default='brunch.out', help='Output directory')
if '-h' in argv or '--help' in argv:
p.print_help ()
p.exit (0)
try:
k = argv.index ('--')
except ValueError:
p.error ("No '--' argument")
args = p.parse_args (argv[:k])
args.tool_args = argv[k+1:]
return args
def collectStats (stats, file):
f = open (file, 'r')
for line in f:
if not line.startswith ('[STATS]'): continue
fld = line.split (' ')
if len(fld) < 3:
continue
stats [fld[1]] = fld[3].strip ()
f.close ()
return stats
def statsHeader (stats_file, flds):
with open (stats_file, 'w') as sf:
writer = csv.writer (sf)
writer.writerow (flds)
def statsLine (stats_file, fmt, stats):
line = list()
for fld in fmt:
if fld in stats: line.append (str (stats [fld]))
else: line.append (None)
with open (stats_file, 'a') as sf:
writer = csv.writer (sf)
writer.writerow (line)
cpuTotal = 0.0
def runTool (tool_args, f, out, cpu, mem, fmt):
global cpuTotal
import resource as r
def set_limits ():
if mem > 0:
mem_bytes = mem * 1024 * 1024
r.setrlimit (r.RLIMIT_AS, [mem_bytes, mem_bytes])
if cpu > 0:
r.setrlimit (r.RLIMIT_CPU, [cpu, cpu])
fmt_tool_args = [v.format(f=f) for v in tool_args]
fmt_tool_args[0] = which (fmt_tool_args[0])
base = os.path.basename (f)
print '[BASE] ' + base
print '[OUT]' + out
outfile = os.path.join (out, base + '.stdout')
errfile = os.path.join (out, base + '.stderr')
import subprocess as sub
benchmarks = open(f)
for line in benchmarks:
if line.startswith('endCommit'):
endcommit = line.replace('endCommit = ', '').strip()
elif line.startswith('classRoot'):
classroot = line.strip('classRoot = ').strip()
elif line.startswith('repoPath'):
repopath = line.strip('repoPath = ').replace('/.git', '').strip()
elif line.startswith('testScope'):
testscope = line.replace('testScope = ', '').strip()
print repopath
print endcommit
print classroot
print testscope
git = which('git')
#print git
os.chdir(repopath)
sub.call ([git, 'stash'])
sub.call ([git, 'checkout', endcommit])
project = repopath.split('/')[len(repopath.split('/')) - 1]
print '[PROJECT]: ' + project
mvn = which('mvn')
if project == 'hadoop':
sub.call ([mvn, 'clean'])
os.chdir(repopath + '/hadoop-maven-plugins')
sub.call ([mvn, 'install'])
os.chdir(repopath)
sub.call ([mvn, 'compile'])
sub.call ([mvn, 'test', '-Dtest=' + testscope])
elif project == 'repo':
if '#' in testscope:
testclass = testscope.split('#')[0]
testmethod = testscope.split('#')[1]
print 'TESTCLASS:' + testclass
print 'TESTMETHOD:' + testmethod
sub.call ([mvn, 'clean', 'compile'])
sub.call ([mvn, 'test', '-Dtests.class=' + testclass, '-Dtests.method=' + testmethod])
else:
testclass = testscope
sub.call ([mvn, 'clean', 'compile'])
sub.call ([mvn, 'test', '-Dtests.class=' + testclass])
elif project == 'maven':
submodule = classroot.split('/')[len(classroot.split('/')) - 3]
print '[SUBMODULE]:' + submodule
os.chdir(repopath + '/' + submodule)
sub.call ([mvn, 'clean', 'compile'])
sub.call ([mvn, 'test', '-Dtest=' + testscope])
else:
sub.call ([mvn, 'clean', 'compile'])
sub.call ([mvn, 'test', '-Dtest=' + testscope])
print fmt_tool_args
os.chdir('/home/polaris/Desktop/run_benchmark')
p = sub.Popen (fmt_tool_args,
stdout=open(outfile, 'w'), stderr=open(errfile, 'w'))
p.wait ()
cpuUsage = r.getrusage (r.RUSAGE_CHILDREN).ru_utime
stats = dict()
stats['File'] = f
stats['base'] = base
stats['Status'] = p.returncode
stats['Cpu'] = '{:.2f}'.format (cpuUsage - cpuTotal)
cpuTotal = cpuUsage
stats = collectStats (stats, outfile)
stats = collectStats (stats, errfile)
statsLine (os.path.join (out, 'stats'), fmt, stats)
def main (argv):
args = parseArgs (argv[1:])
if not os.path.exists (args.out):
os.mkdir (args.out)
fmt = args.format.split (':')
statsHeader (os.path.join (args.out, 'stats'), fmt)
global cpuTotal
import resource as r
cpuTotal = r.getrusage (r.RUSAGE_CHILDREN).ru_utime
for f in open(args.bench, 'r'):
runTool (args.tool_args, f.strip(), args.out,
cpu=args.cpu,
mem=args.mem,
fmt=fmt)
return 0
if __name__ == '__main__':
sys.exit (main (sys.argv))
|
from setuptools import setup, find_packages
with open("README.md", "r") as fh:
long_description = fh.read()
print(find_packages())
setup(
name='efsync',
version='1.0.3',
packages=find_packages(),
entry_points={
"console_scripts": ["efsync=efsync.efsync_cli:main"]
},
author="Philipp Schmid",
author_email="schmidphilipp1995@gmail.com",
description="A CLI/SDK which automatically uploads pip packages and directories to aws efs to be used in aws lambda",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/philschmid",
python_requires=">=3.6",
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
install_requires=[
'scp',
'paramiko',
'boto3',
'pyaml'
],
)
|
import itertools
import math
import matplotlib.pyplot as plt
import numpy as np
from . import junction
from .circle_fit import fit
def distance(p1, p2):
return math.sqrt(((p1[0] - p2[0]) ** 2) + ((p1[1] - p2[1]) ** 2))
def collinear(points, epsilon=0.01):
""" determine if three points are collinear
:param epsilon:
:param points: list of 3 point tuples that might be collinear
:type points: list of 3 2-member tuples
:return: boolean to tell if the points are collinear or not
:rtype: bool
"""
if not isinstance(points, list):
raise TypeError("parameter must be of type 'list'")
if len(points) != 3:
raise ValueError("list of points must have only 3 points")
# converts points to a np array that looks like:
# | x1 x2 x3 |
# | y1 y2 y3 |
# | 1 1 1 |
matrix = np.array(
[[points[0][0], points[1][0], points[2][0]], [points[0][1], points[1][1], points[2][1]], [1, 1, 1]])
# finds area of the triangle formed by the three points by taking the determinant of the matrix above
area = 0.5 * abs(np.linalg.det(matrix))
if area < epsilon:
return True
else:
return False
class Edge:
id_iter = itertools.count()
def __init__(self, start_node, end_node, intermediate_points, cells):
self._start_node = start_node
self._end_node = end_node
self._radius = 0
self._center = (0, 0)
self._intermediate_points = intermediate_points
self._mesh_segments = []
self._mesh_points = []
self._junctions = {start_node, end_node}
self._cell_label_set = cells
self._label = next(Edge.id_iter)
self._corresponding_tension_vector = None
self.tension_label = 0
self.tension_magnitude = 0
def _get_split_point(self, a, b, dist):
""" Returns the point that is <<dist>> length along the line a b.
a and b should each be an (x, y) tuple.
dist should be an integer or float, not longer than the line a b.
"""
dx = b[0] - a[0]
dy = b[1] - a[1]
try:
m = dy / dx
except ZeroDivisionError:
if b[1] > a[1]:
return a[0], a[1] + dist
elif a[1] > b[1]:
return b[0], b[1] + dist
else:
return a
c = a[1] - (m * a[0])
x = a[0] + (dist ** 2 / (1 + m ** 2)) ** 0.5
y = m * x + c
# formula has two solutions, so check the value to be returned is
# on the line a b.
if not (a[0] <= x <= b[0]) and (a[1] <= y <= b[1]):
x = a[0] - (dist ** 2 / (1 + m ** 2)) ** 0.5
y = m * x + c
return x, y
def split_line_single(self, line, length):
""" Returns two ogr line geometries, one which is the first length
<<length>> of <<line>>, and one one which is the remainder.
line should be a ogr LineString Geometry.
length should be an integer or float.
"""
line_points = line
sub_line = []
while length > 0:
d = distance(line_points[0], line_points[1])
if d > length:
split_point = self._get_split_point(line_points[0], line_points[1], length)
sub_line.append(line_points[0])
sub_line.append(split_point)
line_points[0] = split_point
break
if d == length:
sub_line.append(line_points[0])
sub_line.append(line_points[1])
line_points.remove(line_points[0])
break
if d < length:
sub_line.append(line_points[0])
line_points.remove(line_points[0])
length -= d
remainder = []
for point in line_points:
remainder.append(point)
return sub_line, remainder
def split_line_multiple(self, length=None, n_pieces=None):
""" Splits a ogr wkbLineString into multiple sub-strings, either of
a specified <<length>> or a specified <<n_pieces>>.
line should be an ogr LineString Geometry
Length should be a float or int.
n_pieces should be an int.
Either length or n_pieces should be specified.
Returns a list of ogr wkbLineString Geometries.
"""
line = self._intermediate_points.copy()
if not n_pieces:
n_pieces = int(math.ceil(self.length / length))
if not length:
length = self.length / float(n_pieces)
line_segments = []
remainder = line
for i in range(n_pieces - 1):
segment, remainder = self.split_line_single(remainder, length)
line_segments.append(segment)
else:
line_segments.append(remainder)
self._mesh_segments = line_segments
def calculate_edge_points(self):
for segment in self._mesh_segments:
self._mesh_points.append(segment[0])
self._mesh_points.append(self._mesh_segments[-1][-1])
def outside(self, background):
return background in self._cell_label_set
@property
def start_node(self):
return self._start_node
@start_node.setter
def start_node(self, node):
if isinstance(node, junction.Junction):
self._start_node = node
else:
raise TypeError('node should be of type Junction. Instead, node was of type {}'.format(type(node)))
@property
def end_node(self):
return self._end_node
@end_node.setter
def end_node(self, node):
if isinstance(node, junction.Junction):
self._end_node = node
else:
raise TypeError('node should be of type Junction. Instead, node was of type {}'.format(type(node)))
@property
def radius(self):
return self._radius
@radius.setter
def radius(self, r):
if isinstance(r, (int, float, complex)) and not isinstance(r, bool):
self._radius = r
else:
raise TypeError('radius must be of numeric type. Instead, r was of type {}'.format(type(r)))
@property
def center(self):
return self._center
@center.setter
def center(self, c):
if len(c) == 2:
self._center = c
else:
raise ValueError('center should not exceed length of 2. The length of center coordinates was: {}'.format(
len(c)))
@property
def xc(self):
return self._center[0]
@property
def yc(self):
return self._center[1]
@property
def corresponding_tension_vector(self):
return self._corresponding_tension_vector
@corresponding_tension_vector.setter
def corresponding_tension_vector(self, tension_vector):
if isinstance(tension_vector, tension_vector.TensionVector):
self._corresponding_tension_vector = tension_vector
else:
raise TypeError('corresponding_edge should be of type TensionVector. Instead, it was of type {}'.format(
type(tension_vector)))
@property
def length(self):
length = 0
for index, point in enumerate(self._intermediate_points):
if index < len(self._intermediate_points) - 1:
length += distance(point, self._intermediate_points[index + 1])
return length
@property
def location(self):
return self._intermediate_points[int(len(self._intermediate_points) / 2)]
def plot(self, label=False):
x, y = list(zip(*self._intermediate_points))
x = list(x)
y = list(y)
plt.plot(x, y, color='deepskyblue', linestyle='-', linewidth=0.5)
if label:
plt.text(self.location[0], self.location[1], str(self._label), color='white', fontsize=3,
horizontalalignment='center', verticalalignment='center')
def circle_fit(self):
x, y = list(zip(*self._mesh_points))
x = np.asarray(x)
y = np.asarray(y)
xc, yc, radius = fit(x, y, self.start_node.coordinates, self.end_node.coordinates)
self.center = (xc, yc)
self.radius = radius
@property
def linear(self):
for index, point in enumerate(self._mesh_points):
if index < len(self._mesh_points) - 3:
l = [point, self._mesh_points[index + 1], self._mesh_points[index + 2]]
if not collinear(l):
return False
return True
def angular_position(self, coordinates):
""" given a (x,y) coordinate, the angular position in radians from 0 to 2*pi around the fit circle is returned
:param coordinates:
:return:
"""
x, y = coordinates
angular_position = math.atan2(y - self.yc, x - self.xc) # y,x
# convert to radians between 0 and 2*pi
if angular_position > 0:
angular_position = angular_position
else:
angular_position = 2 * math.pi + angular_position
return angular_position
@property
def cw_around_circle(self):
"""true if the edge (start_node to end_node) goes clockwise around the fit circle, false if ccw
:return:
"""
start_angular_position = self.angular_position(self.start_node.coordinates)
end_angular_position = self.angular_position(self.end_node.coordinates)
return start_angular_position > end_angular_position
def start_tangent_angle(self, method='chord'):
if method == 'chord':
angle = math.atan2(self.end_node.y - self.start_node.y, self.end_node.x - self.start_node.x)
# convert to range of 0 to 2pi
if angle < 0:
angle += 2 * math.pi
return angle
if method == 'circular':
# circular edges
slope_of_tangent_line = (-(self.start_node.x - self.xc)) / (self.start_node.y - self.yc)
start_angular_position = self.angular_position(self.start_node.coordinates)
angle = math.atan(slope_of_tangent_line)
if self.cw_around_circle:
if math.pi < start_angular_position < 2 * math.pi:
# third and fourth quadrants
angle += math.pi
else:
if math.pi > start_angular_position > 0:
# first and second quadrants
angle += math.pi
# linear edges
if self.linear:
if self.end_node.x > self.start_node.x:
angle = math.atan2(-self.start_node.y + self.end_node.y, -self.start_node.x + self.end_node.x)
elif self.end_node.x == self.start_node.x:
if self.end_node.y - self.start_node.y > 0:
angle = math.pi / 2
else:
angle = 3 * math.pi / 2
elif self.end_node.x < self.start_node.x:
if self.end_node.y > self.start_node.y:
angle = math.pi / 2 + math.atan(
(self.start_node.x - self.end_node.x) / (-self.start_node.y + self.end_node.y))
else:
angle = math.pi + math.atan(
(self.start_node.y - self.end_node.y) / (self.start_node.x - self.end_node.x))
# convert to range of 0 to 2pi
if angle < 0:
angle += 2 * math.pi
return angle
def end_tangent_angle(self, method='chord'):
if method == 'chord':
angle = math.atan2(-self.end_node.y + self.start_node.y, -self.end_node.x + self.start_node.x)
# convert to range of 0 to 2pi
if angle < 0:
angle += 2 * math.pi
return angle
if method == 'circular':
slope_of_tangent_line = (-(self.end_node.x - self.xc)) / (self.end_node.y - self.yc)
end_angular_position = self.angular_position(self.end_node.coordinates)
angle = math.atan(slope_of_tangent_line)
if self.cw_around_circle:
if math.pi < end_angular_position < 2 * math.pi:
# third and fourth quadrants
angle += math.pi
else:
if math.pi > end_angular_position > 0:
# first and second quadrants
angle += math.pi
# convert to range of 0 to 2pi
if angle < 0:
angle += 2 * math.pi
angle -= math.pi
# linear edges
if self.linear:
if self.start_node.x > self.end_node.x:
angle = math.atan2(-self.end_node.y + self.start_node.y, -self.end_node.x + self.start_node.x)
elif self.start_node.x == self.end_node.x:
if self.start_node.y - self.end_node.y > 0:
angle = math.pi / 2
else:
angle = 3 * math.pi / 2
elif self.start_node.x < self.end_node.x:
if self.start_node.y > self.end_node.y:
angle = math.pi / 2 + math.atan(
(self.end_node.x - self.start_node.x) / (-self.end_node.y + self.start_node.y))
else:
angle = math.pi + math.atan(
(self.end_node.y - self.start_node.y) / (self.end_node.x - self.start_node.x))
# convert to range of 0 to 2pi
if angle < 0:
angle += 2 * math.pi
return angle
def map_unit_vectors_to_junctions(self):
angle = self.start_tangent_angle()
self.start_node.x_unit_vectors_dict[self._label] = math.cos(angle)
self.start_node.y_unit_vectors_dict[self._label] = math.sin(angle)
angle = self.end_tangent_angle()
self.end_node.x_unit_vectors_dict[self._label] = math.cos(angle)
self.end_node.y_unit_vectors_dict[self._label] = math.sin(angle)
def plot_tangent(self, c='y', ):
angle = self.start_tangent_angle()
plt.plot([self.start_node.x, self.start_node.x + 10 * math.cos(angle)], [self.start_node.y,
self.start_node.y + 10 * math.sin(
angle)], c=c, lw=0.75)
angle = self.end_tangent_angle()
plt.plot([self.end_node.x, self.end_node.x + 10 * math.cos(angle)], [self.end_node.y,
self.end_node.y + 10 * math.sin(
angle)], c=c, lw=0.75)
plt.text(self.location[0], self.location[1], str(round(self.tension_magnitude, 2)), color='red',
fontsize=3,
horizontalalignment='center', verticalalignment='center')
def plot_circle(self):
xc, yc = self._center
start_point = self.start_node.coordinates
mid_point = self._mesh_points[1]
end_point = self.end_node.coordinates
start_theta = math.atan2(start_point[1] - yc, start_point[0] - xc)
end_theta = math.atan2(end_point[1] - yc, end_point[0] - xc)
mid_theta = math.atan2(mid_point[1] - yc, mid_point[0] - xc)
if start_theta <= mid_theta <= end_theta:
theta_fit = np.linspace(start_theta, end_theta, 100)
elif start_theta >= mid_theta >= end_theta:
theta_fit = np.linspace(end_theta, start_theta, 100)
else:
if start_theta < 0:
start_theta = 2 * math.pi + start_theta
if end_theta < 0:
end_theta = 2 * math.pi + end_theta
if mid_theta < 0:
mid_theta = 2 * math.pi + mid_theta
theta_fit = np.linspace(start_theta, end_theta, 180)
# stores all x and y coordinates along the fitted arc
x_fit = xc + self.radius * np.cos(theta_fit)
y_fit = yc + self.radius * np.sin(theta_fit)
# plot least squares circular arc
plt.plot(x_fit, y_fit, 'r-', lw=1)
def __eq__(self, other):
return self._junctions == {other.start_node, other.end_node}
def __str__(self):
return str(self._start_node) + ' to ' + str(self._end_node)
def __hash__(self):
return hash(str(self))
|
personinfo = {'Name': 'John', 'Surname': 'Reese', 'Occupation': 'Killer'};
personinfo['Occupation'] = "Agent"; # Update
personinfo['Employer'] = "Harold Finch"; # Add
print personinfo['Name']
print "personinfo keys:", personinfo.keys();
print "personinfo values:", personinfo.values();
personinfo.clear();
print "personinfo's length after clearing:", len(personinfo);
|
mx = [-2,-1,1,2,2,1,-1,-2]
my = [1,2,2,1,-1,-2,-2,-1]
T = int(input())
def solve():
W = int(input())
curX, curY = map(int, (input().split()))
destX, destY = map(int, (input().split()))
isVisit = [[False for x in range(W)] for y in range(W) ]
q = []
q.append([ curX, curY ,0 ])
isVisit[curY][curX] = True
while(q):
cur = q.pop(0)
if cur[0] == destX and cur[1] == destY:
print(cur[2])
break
for v in range(0,8,1):
nx = mx[v] + cur[0]
ny = my[v] + cur[1]
if 0 <= nx and 0 <= ny and nx < W and ny < W:
if not isVisit[ny][nx]:
isVisit[ny][nx] = True
q.append([nx,ny, cur[2] + 1])
for v in range(T):
solve()
|
import torch.nn as nn
class PVN_Stage2_ActionHead(nn.Module):
"""
Outputs a 4-D action, where
"""
def __init__(self, h2=128):
super(PVN_Stage2_ActionHead, self).__init__()
self.linear = nn.Linear(h2, 16)
def init_weights(self):
pass
def forward(self, features):
x = self.linear(features)
return x
|
#!/sv/venv/T-PLE/bin/python2.7
from __future__ import print_function
from scripts.baseController import baseELOCalculations as ELOCal
vsDict = {}
for index, pName in enumerate(ELOCal.nameList):
vsDict[pName] = {'index': index, "wins": [0]*len(ELOCal.nameList)}
def main(logCount=None):
if not logCount:
logCount = ELOCal.getLogCount()[0][0]
ELOCal.debugPrint("UVR: Found Log Count", logCount)
processedLog = ELOCal.getLog(processed=True, newLogCount=logCount)
ELOCal.debugPrint("UVR: Processed Log", processedLog)
for result in processedLog:
vsDict[result[0]]["wins"][vsDict[result[1]]["index"]] = vsDict[result[0]]["wins"][vsDict[result[1]]["index"]] + 1
vsResults = []
for name in ELOCal.nameList:
vsResults.append(vsDict[name]["wins"])
ELOCal.debugPrint("UVR: vs List Results", vsResults)
ELOCal.updateCells(
values=vsResults,
sheetRange=ELOCal.vs_Result_Range
)
if __name__ == '__main__':
main()
|
import mne
from mne.channels.layout import _merge_grad_data as grad_rms
import scipy.io as sio
import numpy as np
from my_settings import data_folder
std_info = np.load(data_folder + "std_info.npy").item()
subject = 1
planar = sio.loadmat(data_folder + "meg_data_%sa.mat" % subject)["planardat"]
events = mne.read_events(data_folder + "sub_%s-eve.fif" % subject)
info = mne.create_info(204, ch_types="grad", sfreq=125)
info["chs"] = std_info["chs"]
info["ch_names"] = std_info["ch_names"]
info["lowpass"] = 41
info["highpass"] = 0.1
raw = mne.io.RawArray(planar, info)
raw.save(data_folder + "sub_%s-raw.fif" % subject)
# epochs
event_id = {"Anger/non-target": 1,
"Disgust/non-target": 2,
"Fear/non-target": 3,
"Happiness/target": 4,
"Neutrality/non-target": 5,
"Sadness/non-target": 6,
"Test": 10}
tmin, tmax = -0.2, 0.83
reject = {"grad": 4000e-13} # T / m (gradiometers)
epochs_params = dict(events=events,
event_id=event_id,
tmin=tmin,
tmax=tmax,
reject=reject,
baseline=(None, 0),
preload=True)
epochs = mne.Epochs(raw, **epochs_params)
epochs.save(data_folder + "sub_%s-epo.fif" % subject)
rms_data = np.asarray([grad_rms(t) for t in epochs.get_data()])
|
from django.http import HttpResponse, Http404
from django.template import loader
from applications.resume.models import Experience, Project, Education, Information, Certificate, Group
def resume_view(request, language):
if language == 'en':
temp = loader.get_template('resume/ltr-resume.html')
language_code = 1
elif language == 'fa':
temp = loader.get_template('resume/rtl-resume.html')
language_code = 2
else:
raise Http404
information = Information.objects.get(language=language_code)
group = Group.objects.all()
experiences = Experience.objects.filter(language=language_code).order_by('-start_date')
projects = Project.objects.filter(language=language_code).order_by('-start_date')
educations = Education.objects.filter(language=language_code).order_by('-begin_time')
certificates = Certificate.objects.filter(language=language_code).order_by('-issue_date')
context = {'information': information, 'group': group, 'experiences': experiences, 'projects': projects,
'educations': educations, 'certificates': certificates}
return HttpResponse(temp.render(context, request))
|
# keeps track of the game's logic
class GameState:
def __init__(self): # this means it's the function of the class GameState itself.
# so when calling stuff from here just say Game.State.something.
# the board is read row, column, character/s inside.
# example: self.board[0][0][0] = 'b' [first row][first column][first character]
# if the third parameter is not set, it returns both characters.
# min_max of the rows and columns are (0, 7), while for the characters it's obviously (0, 1)
self.board = [
["--", "bR", "--", "bN", "--", "bN", "--", "bR"],
["bP", "--", "bP", "--", "bP", "--", "bP", "--"],
["--", "bP", "--", "bP", "--", "bP", "--", "bP"],
["--", "--", "--", "--", "--", "--", "--", "--"],
["--", "--", "--", "--", "--", "--", "--", "--"],
["wP", "--", "wP", "--", "wP", "--", "wP", "--"],
["--", "wP", "--", "wP", "--", "wP", "--", "wP"],
["wR", "--", "wN", "--", "wN", "--", "wR", "--"]]
# the move_functions is a dictionary and it removes clutter in the function get_all_possible_moves
self.move_functions = {"P": self.get_pawn_moves, "N": self.get_knight_moves, "B": self.get_bishop_moves,
"R": self.get_rook_moves, "Q": self.get_queen_moves, "K": self.get_king_moves}
# it's set to True since white always goes first.
# To make it black's turn, make it False or write not self.white_to_move
self.white_to_move = True
# the list that contain the moves in [start file][start rank][end file][end rank] format. e.g. a2a4
self.move_log = []
# start locations of the kings. The values will be changed to keep track of the king.
# the king is tracked so that a possible pin/check is checked all the time.
# the values are negative since the king is initially not present.
self.white_king_location = (-12, -11)
self.black_king_location = (-12, -11)
# I don't need to explain these.
self.check_mate = False
self.stale_mate = False
self.in_check = False
# lists the values of where is the pinned piece and the piece that checks the king
self.pins = []
self.checks = []
# the square where you can move then backstab the hell out of that idiot
self.en_passant_possible = ()
# the square of the eaten enemy piece
self.dama_take_possible = []
# tracks how many pawns are promoted
self.white_dama_count = []
self.black_dama_count = []
# checks if you already have a promoted piece
self.first_white_dama = False
self.first_black_dama = False
# tracks when in the move_log is the enemy king added for easier undo function
self.white_king_start = []
self.black_king_start = []
# tracks where the pawn is converted into a king. Same reason as above
self.white_pawn_conv = []
self.black_pawn_conv = []
# lists the locations for all enemy pawns
self.enemy_pawns = []
# the list of valid coordinates used in the user input function
self.pos = ['0', '1', '2', '3', '4', '5', '6', '7']
# handles the main info when you make a move
def make_move(self, move):
# makes the piece move and leave the former square empty
self.board[move.end_row][move.end_col] = move.piece_moved
self.board[move.start_row][move.start_col] = "--"
# adds the made move to the move log
self.move_log.append(move)
# ends the turn of the current color
self.white_to_move = not self.white_to_move
# to keep track of the king's location
if move.piece_moved == 'wK':
self.white_king_location = (move.end_row, move.end_col)
elif move.piece_moved == 'bK':
self.black_king_location = (move.end_row, move.end_col)
# to do the en passant
# need to put this first else the en passant is possible but will not remove the taken pawn
if (move.end_row, move.end_col) == self.en_passant_possible:
move.is_en_passant_move = True
self.board[move.start_row][move.end_col] = "--"
move.piece_captured = self.board[move.start_row][move.end_col]
# to check what square an en passant take is possible
if move.piece_moved[1] == "P" and \
abs(move.start_row - move.end_row) == 2 and \
abs(move.start_col - move.end_col) == 0:
# shows where the attacking pawn can move to backstab
self.en_passant_possible = ((move.start_row + move.end_row) // 2, move.start_col)
else:
self.en_passant_possible = ()
# to do the dama take
if (move.end_row, move.end_col) in self.dama_take_possible:
move.is_dama_take = True
self.dama_take_possible = []
self.board[((move.start_row + move.end_row) // 2)][((move.start_col + move.end_col) // 2)] = "--"
# to create pawn promotion prompt
if move.is_pawn_promotion:
white_dama_type = len(self.white_dama_count) % 2
black_dama_type = len(self.black_dama_count) % 2
possible_promotions = ['K', 'Q', 'R', 'N']
if not self.white_to_move:
# to make the dama a bishop
if len(self.white_dama_count) != 0 and white_dama_type == 0:
self.board[move.end_row][move.end_col] = move.piece_moved[0] + 'B'
self.white_dama_count.append((move.end_row, move.end_col))
# to summon any piece except a bishop and a pawn
if len(self.white_dama_count) != 0 and white_dama_type == 1:
while True:
promoted_piece = input("Select a piece except a Bishop and a Pawn [K, Q, R, N]:")
if promoted_piece != 'B' or 'P':
self.board[move.end_row][move.end_col] = move.piece_moved[0] + promoted_piece
self.white_dama_count.append((move.end_row, move.end_col))
if promoted_piece in possible_promotions:
break
# the first dama of white
if len(self.white_dama_count) == 0:
self.first_white_dama = True
self.get_enemy_pawns()
self.board[move.end_row][move.end_col] = move.piece_moved[0] + 'Q'
self.white_dama_count.append((move.end_row, move.end_col))
while True:
print('')
print("The enemy's pawn locations are:")
print(self.enemy_pawns)
selected_king_row = input('Please select the row of the pawn:')
if selected_king_row in self.pos:
selected_king_col = input('Please select the column of the pawn:')
if selected_king_col in self.pos:
selected_king = (int(selected_king_row), int(selected_king_col))
if selected_king in self.enemy_pawns:
self.board[selected_king[0]][selected_king[1]] = 'bK'
self.black_king_location = (selected_king[0], selected_king[1])
self.enemy_pawns = []
self.black_pawn_conv.append((selected_king[0], selected_king[1]))
self.black_king_start.append(len(self.move_log))
break
else:
# to make the dama a bishop
if len(self.black_dama_count) != 0 and black_dama_type == 0:
self.board[move.end_row][move.end_col] = move.piece_moved[0] + 'B'
self.black_dama_count.append((move.end_row, move.end_col))
# to summon any piece except a bishop
if len(self.black_dama_count) != 0 and black_dama_type == 1:
while True:
promoted_piece = input("Select a piece except a Bishop and a Pawn [K, Q, R, N]:")
if promoted_piece != 'B' or 'P':
self.board[move.end_row][move.end_col] = move.piece_moved[0] + promoted_piece
self.black_dama_count.append((move.end_row, move.end_col))
if promoted_piece in possible_promotions:
break
# the first dama of black
if len(self.black_dama_count) == 0:
self.get_enemy_pawns()
self.board[move.end_row][move.end_col] = move.piece_moved[0] + 'Q'
self.black_dama_count.append((move.end_row, move.end_col))
while True:
print('')
print("The enemy's pawn locations are:")
print(self.enemy_pawns)
selected_king_row = input('Please select the row of the pawn:')
if selected_king_row in self.pos:
selected_king_col = input('Please select the column of the pawn:')
if selected_king_col in self.pos:
selected_king = (int(selected_king_row), int(selected_king_col))
if selected_king in self.enemy_pawns:
self.board[selected_king[0]][selected_king[1]] = 'wK'
self.white_king_location = (selected_king[0], selected_king[1])
self.enemy_pawns = []
self.white_pawn_conv.append((selected_king[0], selected_king[1]))
self.white_king_start.append(len(self.move_log))
break
# well, to undo moves. duh.
def undo_move(self):
# makes sure you don't undo yourself to nothingness
if len(self.move_log) != 0:
# thanks pop ctrl + z
move = self.move_log.pop()
# reverts the move made and turn
self.board[move.start_row][move.start_col] = move.piece_moved
self.board[move.end_row][move.end_col] = move.piece_captured
self.white_to_move = not self.white_to_move
# to keep track of the kings(again)
if move.piece_moved == 'wK':
self.white_king_location = (move.start_row, move.start_col)
elif move.piece_moved == 'bK':
self.black_king_location = (move.start_row, move.start_col)
# well it's to undo the en passant huh
if move.is_en_passant_move:
self.board[move.end_row][move.end_col] = "--"
self.board[move.start_row][move.end_col] = 'bP' if self.white_to_move else 'wP'
self.en_passant_possible = (move.end_row, move.end_col)
move.piece_captured = self.board[((move.start_row + move.end_row) // 2)][
((move.start_col + move.end_col) // 2)]
if move.piece_moved[1] == 'P' and \
abs(move.start_row - move.end_row) == 2 and \
abs(move.start_col - move.end_col) == 0:
self.en_passant_possible = ()
# to undo a dama take
if move.is_dama_take:
move.is_dama_take = False
self.board[move.end_row][move.end_col] = "--"
self.dama_take_possible.append((move.end_row, move.end_col))
self.board[((move.start_row + move.end_row) // 2)][
((move.start_col + move.end_col) // 2)] = move.dama_piece_taken
# to undo a dama/pawn promotion
if move.is_pawn_promotion:
if self.white_to_move:
move.is_pawn_promotion = False
if len(self.white_dama_count) != 0:
self.white_dama_count.pop()
else:
move.is_pawn_promotion = False
if len(self.black_dama_count) != 0:
self.black_dama_count.pop()
# to undo a first pawn promotion
if self.first_white_dama:
if self.black_king_start[0] - 1 == (len(self.move_log)):
self.board[(self.black_pawn_conv[0][0])][(self.black_pawn_conv[0][1])] = 'bP'
if self.first_black_dama:
if self.white_king_start[0] - 1 == (len(self.move_log)):
self.board[(self.white_pawn_conv[0][0])][(self.white_pawn_conv[0][1])] = 'bP'
# literally mario 1 up. Don't mess up this time.
if self.check_mate:
self.check_mate = False
if self.stale_mate:
self.stale_mate = False
# To make sure you don't do anything stupid. (Hopefully)
def get_valid_moves(self):
# well it's the list of valid moves.
moves = []
# steals stuff from another function to make it's own life easier
self.in_check, self.pins, self.checks = self.see_checks_and_pins()
# to keep track of the king, yes we already know that
if self.white_to_move:
king_row = self.white_king_location[0]
king_col = self.white_king_location[1]
else:
king_row = self.black_king_location[0]
king_col = self.black_king_location[1]
# oh no
if self.in_check:
# if there's one piece that's aiming a gun at you
if len(self.checks) == 1:
moves = self.get_all_possible_moves()
# converts the assailant square to its own variable
check = self.checks[0]
# converts the square into coordinates
check_row = check[0]
check_col = check[1]
# he's the baaaaaad guy
piece_checking = self.board[check_row][check_col]
# duh
valid_squares = []
# if the attacker is a knight
if piece_checking[1] == 'N':
# the king will check using the knight moves.
# if no knight is in the 8 moves, all moves are valid squares.
# wait I'm not sure yet
valid_squares = [(check_row, check_col)]
else:
for i in range(1, 8):
valid_square = (king_row + check[2] * i, king_col + check[3] * i)
valid_squares.append(valid_square)
if valid_square[0] == check_row and valid_square[1] == check_col:
break
for i in range(len(moves) - 1, -1, -1):
if moves[i].piece_moved[1] != 'K':
if not (moves[i].end_row, moves[i].end_col) in valid_squares:
moves.remove(moves[i])
else:
self.get_king_moves(king_row, king_col, moves)
else:
moves = self.get_all_possible_moves()
if len(moves) == 0:
if self.see_checks_and_pins():
if self.in_check:
self.check_mate = True
if self.white_to_move:
print('Checkmate, Black Wins')
else:
print('Checkmate, White Wins')
else:
self.stale_mate = True
print('Stalemate')
return moves
def see_checks_and_pins(self):
pins = []
checks = []
in_check = False
if self.white_to_move:
enemy_color = 'b'
ally_color = 'w'
start_row = self.white_king_location[0]
start_col = self.white_king_location[1]
else:
enemy_color = 'w'
ally_color = 'b'
start_row = self.black_king_location[0]
start_col = self.white_king_location[1]
directions = ((-1, 0), (1, 0), (0, -1), (0, 1), (-1, -1), (-1, 1), (1, -1), (1, 1))
for j in range(len(directions)):
d = directions[j]
possible_pin = ()
for i in range(1, 8):
end_row = start_row + d[0] * i
end_col = start_col + d[1] * i
if 0 <= end_row < 8 and 0 <= end_col < 8:
end_piece = self.board[end_row][end_col]
if end_piece[0] == ally_color and end_piece[1] != 'K':
if possible_pin == ():
possible_pin = (end_row, end_col, d[0], d[1])
else:
break
elif end_piece[0] == enemy_color:
piece_kind = end_piece[1]
if (0 <= j <= 3 and piece_kind == 'R') or \
(4 <= j <= 7 and piece_kind == 'B') or \
(i == 1 and piece_kind == 'P' and ((enemy_color == 'w' and 6 <= j <= 7) or (
enemy_color == 'b' and 4 <= j <= 5))) or \
(piece_kind == 'Q') or (i == 1 and piece_kind == 'K'):
if possible_pin == ():
in_check = True
checks.append((end_row, end_col, d[0], d[1]))
break
else:
pins.append(possible_pin)
break
else:
if end_piece[1] != 'K':
if possible_pin == ():
possible_pin = (end_row, end_col, d[0], d[1])
pins.append(possible_pin)
break
else:
break
knight_moves = ((-2, -1), (-2, 1), (-1, -2), (-1, 2), (1, -2), (1, 2), (2, -1), (2, 1))
for a in knight_moves:
end_row = start_row + a[0]
end_col = start_col + a[1]
if 0 <= end_row < 8 and 0 <= end_col < 8:
end_piece = self.board[end_row][end_col]
if end_piece[0] == enemy_color and end_piece[1] == "N":
in_check = True
checks.append((end_row, end_col, a[0], a[1]))
return in_check, pins, checks
def invalid_moves(self):
empty = []
for r in range(len(self.board)):
for c in range(len(self.board[r])):
if self.board[r][c] == "--":
empty.append((r, c))
return empty
def get_all_possible_moves(self):
moves = []
for r in range(len(self.board)):
for c in range(len(self.board[r])):
turn = self.board[r][c][0]
if (turn == 'w' and self.white_to_move) or (turn == 'b' and not self.white_to_move):
piece = self.board[r][c][1]
# noinspection PyArgumentList
self.move_functions[piece](r, c, moves)
return moves
def get_enemy_pawns(self):
for r in range(len(self.board)):
for c in range(len(self.board[r])):
pawn = self.board[r][c]
if (pawn == 'wP' and self.white_to_move) or (pawn == 'bP' and not self.white_to_move):
self.enemy_pawns.append((r, c))
def get_pawn_moves(self, r, c, moves):
piece_pinned = False
pin_direction = ()
for i in range(len(self.pins) - 1, -1, -1):
if self.pins[i][0] == r and self.pins[i][1] == c:
piece_pinned = True
pin_direction = (self.pins[i][2], self.pins[i][3])
self.pins.remove(self.pins[i])
break
self.get_dama_moves(r, c, moves)
if self.white_to_move:
if r - 1 >= 0:
if self.board[r - 1][c] == "--": # 1 move forward
if not piece_pinned or pin_direction == (-1, 0):
moves.append(Move((r, c), (r - 1, c), self.board))
if r == 6 and self.board[r - 2][c] == "--": # 2 moves forward
moves.append(Move((r, c), (r - 2, c), self.board))
if c - 1 >= 0: # capture to the left
if self.board[r - 1][c - 1][0] == 'b':
if not piece_pinned or pin_direction == (-1, -1):
moves.append(Move((r, c), (r - 1, c - 1), self.board))
if self.board[r][c - 1][0] != 'w':
if (r - 1, c - 1) == self.en_passant_possible:
moves.append(Move((r, c), (r - 1, c - 1), self.board, (r - 1, c - 1)))
if c + 1 <= 7: # capture to the right
if self.board[r - 1][c + 1][0] == 'b':
if not piece_pinned or pin_direction == (-1, 1):
moves.append(Move((r, c), (r - 1, c + 1), self.board))
if self.board[r][c + 1][0] != 'w':
if (r - 1, c + 1) == self.en_passant_possible:
moves.append(Move((r, c), (r - 1, c + 1), self.board, (r - 1, c + 1)))
else: # black pawn moves
if r + 1 <= 7:
if self.board[r + 1][c] == "--": # 1 move forward
if not piece_pinned or pin_direction == (1, 0):
moves.append(Move((r, c), (r + 1, c), self.board))
if r == 1 and self.board[r + 2][c] == "--": # 2 moves forward
moves.append(Move((r, c), (r + 2, c), self.board))
if c - 1 >= 0: # capture to the left
if self.board[r + 1][c - 1][0] == 'w':
if not piece_pinned or pin_direction == (1, -1):
moves.append(Move((r, c), (r + 1, c - 1), self.board))
if self.board[r][c - 1][0] != 'b':
if (r + 1, c - 1) == self.en_passant_possible:
moves.append(Move((r, c), (r + 1, c - 1), self.board, (r + 1, c - 1)))
if c + 1 <= 7: # capture to the right
if self.board[r + 1][c + 1][0] == 'w':
if not piece_pinned or pin_direction == (1, 1):
moves.append(Move((r, c), (r + 1, c + 1), self.board))
if self.board[r][c + 1][0] != 'b':
if (r + 1, c + 1) == self.en_passant_possible:
moves.append(Move((r, c), (r + 1, c + 1), self.board, (r + 1, c + 1)))
def get_knight_moves(self, r, c, moves):
piece_pinned = False
for i in range(len(self.pins) - 1, -1, -1):
if self.pins[i][0] == r and self.pins[i][1] == c:
piece_pinned = True
if self.board[r][c][1] == 'Q':
self.pins.remove(self.pins[i])
break
self.get_dama_moves(r, c, moves)
horse_jumps = ((-2, -1), (-2, 1), (-1, -2), (-1, 2), (1, -2), (1, 2), (2, -1), (2, 1))
ally_color = 'w' if self.white_to_move else 'b'
for m in horse_jumps:
end_row = r + m[0]
end_col = c + m[1]
if 0 <= end_row < 8 and 0 <= end_col < 8:
if not piece_pinned:
end_piece = self.board[end_row][end_col]
if end_piece[0] != ally_color:
moves.append(Move((r, c), (end_row, end_col), self.board))
def get_bishop_moves(self, r, c, moves):
piece_pinned = False
pin_direction = ()
for i in range(len(self.pins) - 1, -1, -1):
if self.pins[i][0] == r and self.pins[i][1] == c:
piece_pinned = True
pin_direction = (self.pins[i][2], self.pins[i][3])
if self.board[r][c][1] == 'Q':
self.pins.remove(self.pins[i])
break
self.get_dama_moves(r, c, moves)
directions = ((-1, -1), (-1, 1), (1, -1), (1, 1))
enemy_color = 'b' if self.white_to_move else 'w'
for d in directions:
for i in range(1, 8):
end_row = r + (d[0] * i)
end_col = c + (d[1] * i)
if 0 <= end_row < 8 and 0 <= end_col < 8:
if not piece_pinned or pin_direction == d or pin_direction == (-d[0], -d[1]):
end_piece = self.board[end_row][end_col]
if end_piece == "--":
moves.append(Move((r, c), (end_row, end_col), self.board))
elif end_piece[0] == enemy_color:
moves.append(Move((r, c), (end_row, end_col), self.board))
break
else:
break
else:
break
def get_rook_moves(self, r, c, moves):
piece_pinned = False
pin_direction = ()
for i in range(len(self.pins) - 1, -1, -1):
if self.pins[i][0] == r and self.pins[i][1] == c:
piece_pinned = True
pin_direction = (self.pins[i][2], self.pins[i][3])
if self.board[r][c][1] == 'Q':
self.pins.remove(self.pins[i])
break
self.get_dama_moves(r, c, moves)
directions = ((-1, 0), (1, 0), (0, -1), (0, 1))
enemy_color = 'b' if self.white_to_move else 'w'
for d in directions:
for i in range(1, 8):
end_row = r + (d[0] * i)
end_col = c + (d[1] * i)
if 0 <= end_row < 8 and 0 <= end_col < 8:
if not piece_pinned or pin_direction == d or pin_direction == (-d[0], -d[1]):
end_piece = self.board[end_row][end_col]
if end_piece == "--":
moves.append(Move((r, c), (end_row, end_col), self.board))
elif end_piece[0] == enemy_color:
moves.append(Move((r, c), (end_row, end_col), self.board))
break
else:
break
else:
break
def get_queen_moves(self, r, c, moves):
self.get_dama_moves(r, c, moves)
self.get_rook_moves(r, c, moves)
self.get_bishop_moves(r, c, moves)
def get_king_moves(self, r, c, moves):
self.get_dama_moves(r, c, moves)
row_moves = (-1, -1, -1, 0, 0, 1, 1, 1)
col_moves = (-1, 0, 1, -1, 1, -1, 0, 1)
ally_color = 'w' if self.white_to_move else 'b'
for i in range(8):
end_row = r + row_moves[i]
end_col = c + col_moves[i]
if 0 <= end_row < 8 and 0 <= end_col < 8:
end_piece = self.board[end_row][end_col]
if end_piece[0] != ally_color:
if ally_color == 'w':
self.white_king_location = (end_row, end_col)
else:
self.black_king_location = (end_row, end_col)
in_check, pins, checks = self.see_checks_and_pins()
if not in_check:
moves.append(Move((r, c), (end_row, end_col), self.board))
if ally_color == 'w':
self.white_king_location = (r, c)
else:
self.black_king_location = (r, c)
def get_dama_moves(self, r, c, moves):
if self.white_to_move:
if r >= 1 and c - 1 >= 0: # dama move to the left
if self.board[r - 1][c - 1] == "--":
moves.append(Move((r, c), (r - 1, c - 1), self.board))
if r >= 1 and c + 1 <= 7: # dama move to the right
if self.board[r - 1][c + 1] == "--":
moves.append(Move((r, c), (r - 1, c + 1), self.board))
if r >= 2 and c - 2 >= 0: # dama move/capture to the left
if self.board[r - 2][c - 2] == "--":
if self.board[r - 1][c - 1][0] == "b":
moves.append(Move((r, c), (r - 2, c - 2), self.board))
self.dama_take_possible.append((r - 2, c - 2))
if r >= 2 and c + 2 <= 7: # dama move/capture to the right
if self.board[r - 2][c + 2] == "--":
if self.board[r - 1][c + 1][0] == "b":
moves.append(Move((r, c), (r - 2, c + 2), self.board))
self.dama_take_possible.append((r - 2, c + 2))
else:
if r <= 6 and c - 1 >= 0: # dama move to the left
if self.board[r + 1][c - 1] == "--":
moves.append(Move((r, c), (r + 1, c - 1), self.board))
if r <= 6 and c + 1 <= 7: # dama move/capture to the right
if self.board[r + 1][c + 1] == "--":
moves.append(Move((r, c), (r + 1, c + 1), self.board))
if r <= 5 and c - 2 >= 0: # dama move/capture to the left
if self.board[r + 2][c - 2] == "--":
if self.board[r + 1][c - 1][0] == "w":
moves.append(Move((r, c), (r + 2, c - 2), self.board))
self.dama_take_possible.append((r + 2, c - 2))
if r <= 5 and c + 2 <= 7: # dama move/capture to the right
if self.board[r + 2][c + 2] == "--":
if self.board[r + 1][c + 1][0] == "w":
moves.append(Move((r, c), (r + 2, c + 2), self.board))
self.dama_take_possible.append((r + 2, c + 2))
class Move:
ranks_to_rows = {"1": 7, "2": 6, "3": 5, "4": 4,
"5": 3, "6": 2, "7": 1, "8": 0}
rows_to_ranks = {v: k for k, v in ranks_to_rows.items()}
files_to_cols = {"a": 0, "b": 1, "c": 2, "d": 3,
"e": 4, "f": 5, "g": 6, "h": 7}
cols_to_files = {v: k for k, v in files_to_cols.items()}
def __init__(self, start_sq, end_sq, board, en_passant_possible=(), dama_take_possible=()):
self.start_row = start_sq[0]
self.start_col = start_sq[1]
self.end_row = end_sq[0]
self.end_col = end_sq[1]
self.piece_moved = board[self.start_row][self.start_col]
self.piece_captured = board[self.end_row][self.end_col]
# en passant stuff
self.is_en_passant_move = False
if self.piece_moved[1] == 'P' and (self.end_row, self.end_col) == en_passant_possible:
self.is_en_passant_move = True
# dama takes stuff
self.dama_piece_taken = board[((self.start_row + self.end_row) // 2)][((self.start_col + self.end_col) // 2)]
self.is_dama_take = False
if (self.end_row, self.end_col) in dama_take_possible:
self.is_dama_take = True
# pawn promotion stuff
self.is_pawn_promotion = False
self.possible_promotion = (self.piece_moved == 'wP' and self.end_row == 0) or \
(self.piece_moved == 'bP' and self.end_row == 7)
if self.possible_promotion:
self.is_pawn_promotion = True
self.move_id = self.start_row * 1000 + self.start_col * 100 + self.end_row * 10 + self.end_col
# print(self.move_id)
def __eq__(self, other):
if isinstance(other, Move):
return self.move_id == other.move_id
return False
def get_chess_notation(self):
return self.get_rank_file(self.start_row, self.start_col) + self.get_rank_file(self.end_row, self.end_col)
def get_rank_file(self, r, c):
return self.cols_to_files[c] + self.rows_to_ranks[r]
|
'''
1. Write a Python program to find the largest palindrome made from the product of two 4-digit numbers.
According Wikipedia - A palindromic number or numeral palindrome is a number that remains the same when its digits are reversed. Like 16461, for example, it is "symmetrical". The term palindromic is derived from palindrome, which refers to a word (such as rotor or racecar) whose spelling is unchanged when its letters are reversed. The first 30 palindromic numbers (in decimal) are: 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 22, 33, 44, 55, 66, 77, 88, 99, 101, 111, 121, 131, 141, 151, 161, 171, 181, 191, 202,...
The largest palindrome made from the product of two 3-digit numbers is 913 * 993 = 906609.
Note: 9999 * 9901 = 906609
2. Write a Python program to find the smallest positive number that is evenly divisible by all of the numbers from 1 to 30.
2520 is the smallest number that can be divided by each of the numbers from 1 to 10 without any remainder.
Result: 2329089562800.0
3. Write a Python program to find the smallest positive number that is evenly divisible by all of the numbers from 1 to 30.
2520 is the smallest number that can be divided by each of the numbers from 1 to 10 without any remainder.
Result: 2329089562800.0
'''
|
level = 3
name = 'Banjaran'
capital = 'Banjaran'
area = 42.92
|
from flask import Flask, render_template, url_for, flash, redirect, request
import pandas as pd
# from sklearn.feature_extraction.text import CountVectorizer
# from sklearn.metrics.pairwise import cosine_similarity
app = Flask(__name__)
#import pandas as pd
lko_rest = pd.read_csv("food1.csv")
def fav(lko_rest1):
lko_rest1 = lko_rest1.reset_index()
from sklearn.feature_extraction.text import CountVectorizer
count1 = CountVectorizer(stop_words='english')
count_matrix = count1.fit_transform(lko_rest1['highlights'])
from sklearn.metrics.pairwise import cosine_similarity
cosine_sim2 = cosine_similarity(count_matrix, count_matrix)
sim = list(enumerate(cosine_sim2[0]))
sim = sorted(sim, key=lambda x: x[1], reverse=True)
sim = sim[1:11]
indi = [i[0] for i in sim]
final = lko_rest1.copy().iloc[indi[0]]
final = pd.DataFrame(final)
final = final.T
for i in range(1, len(indi)):
final1 = lko_rest1.copy().iloc[indi[i]]
final1 = pd.DataFrame(final1)
final1 = final1.T
final = pd.concat([final, final1])
return final
def rest_rec(cost, people=2, min_cost=0, cuisine=[], Locality=[], fav_rest="", lko_rest=lko_rest):
cost = cost + 200
x = cost / people
y = min_cost / people
lko_rest1 = lko_rest.copy().loc[lko_rest['locality'] == Locality[0]]
for i in range(1, len(Locality)):
lko_rest2 = lko_rest.copy().loc[lko_rest['locality'] == Locality[i]]
lko_rest1 = pd.concat([lko_rest1, lko_rest2])
lko_rest1.drop_duplicates(subset='name', keep='last', inplace=True)
lko_rest_locale = lko_rest1.copy()
lko_rest_locale = lko_rest_locale.loc[lko_rest_locale['average_cost_for_one'] <= x]
lko_rest_locale = lko_rest_locale.loc[lko_rest_locale['average_cost_for_one'] >= y]
lko_rest_locale['Start'] = lko_rest_locale['cuisines'].str.find(cuisine[0])
lko_rest_cui = lko_rest_locale.copy().loc[lko_rest_locale['Start'] >= 0]
for i in range(1, len(cuisine)):
lko_rest_locale['Start'] = lko_rest_locale['cuisines'].str.find(cuisine[i])
lko_rest_cu = lko_rest_locale.copy().loc[lko_rest_locale['Start'] >= 0]
lko_rest_cui = pd.concat([lko_rest_cui, lko_rest_cu])
lko_rest_cui.drop_duplicates(subset='name', keep='last', inplace=True)
if fav_rest != "":
favr = lko_rest.loc[lko_rest['name'] == fav_rest].drop_duplicates()
favr = pd.DataFrame(favr)
lko_rest3 = pd.concat([favr, lko_rest_cui])
lko_rest3.drop('Start', axis=1, inplace=True)
rest_selected = fav(lko_rest3)
else:
lko_rest_cui = lko_rest_cui.sort_values('scope', ascending=False)
rest_selected = lko_rest_cui.head(10)
return rest_selected
def calc(max_Price, people, min_Price, cuisine, locality):
rest_sugg = rest_rec(max_Price, people, min_Price, [cuisine], [locality])
rest_list1 = rest_sugg.copy().loc[:,
['name', 'address', 'locality', 'timings', 'aggregate_rating', 'url', 'cuisines']]
rest_list = pd.DataFrame(rest_list1)
rest_list = rest_list.reset_index()
rest_list = rest_list.rename(columns={'index': 'res_id'})
rest_list.drop('res_id', axis=1, inplace=True)
rest_list = rest_list.T
rest_list = rest_list
ans = rest_list.to_dict()
res = [value for value in ans.values()]
return res
@app.route("/")
@app.route("/home", methods=['POST'])
def home():
return render_template('home.html')
@app.route("/search", methods=['POST'])
def search():
if request.method == 'POST':
people = int(request.form['people'])
min_Price = int(request.form['min_Price'])
max_Price =int(request.form['max_Price'])
cuisine1 = request.form['cuisine']
locality1 = request.form['locality']
res = calc(max_Price, people, min_Price,cuisine1, locality1)
return render_template('search.html', title='Search', restaurants=res)
#return res
else:
return redirect(url_for('home'))
if __name__ == '__main__':
app.run(debug=True)
|
import pydata_sphinx_theme
import datetime
import os
import sys
import cake
sys.path.append(os.path.abspath('../extensions'))
project = 'Documentation'
copyright = '2021, Mecha Karen'
author = 'Mecha Karen'
release = cake.__version__
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.coverage',
'sphinx.ext.napoleon',
'sphinx.ext.extlinks',
'sphinx.ext.intersphinx',
'sphinx.ext.autosummary',
]
intersphinx_mapping = {
'py': ('https://docs.python.org/3', None),
}
templates_path = ['_templates']
exclude_patterns = ['*.md', '*.template']
html_theme = 'pydata_sphinx_theme'
html_logo = "_static/karen.png"
html_theme_options = {
"favicons": [
{
"rel": "icon",
"sizes": "16x16",
"href": "karen.png",
},
{
"rel": "icon",
"sizes": "32x32",
"href": "karen.png",
},
{
"rel": "apple-touch-icon",
"sizes": "180x180",
"href": "karen.png"
},
],
"icon_links": [
{
"name": "GitHub",
"url": "https://github.com/Mecha-Karen/",
"icon": "fab fa-github",
},
{
"name": "Discord",
"url": "https://discord.com/invite/Q5mFhUM",
"icon": "fab fa-discord"
},
{
"name": "Dashboard",
"url": "https://mechakaren.xyz/dashboard",
"icon": "fas fa-box"
}
],
"use_edit_page_button": True,
"collapse_navigation": False,
"show_prev_next": False,
"navigation_depth": 3,
"search_bar_text": "Search the docs ...",
"footer_items": ["copyright", "last-updated"],
}
html_context = {
"github_url": "https://github.com",
"github_user": "Mecha-Karen",
"github_repo": "Documentation",
"github_version": "main",
"doc_path": "source",
"last_updated": datetime.datetime.utcnow().strftime('%d/%m/%Y'),
}
html_sidebars = {
"**": ["search-field", "sidebar-nav-bs"],
}
html_static_path = ['_static']
html_css_files = [
'css/style.css',
'css/codeblocks.css'
]
html_title = "Mecha Karen"
suppress_warnings = [
"image.not_readable"
]
|
import random
from panda3d.core import LVector3f
import math
class Bone():
def __init__( self, joint, parent=None, static=False ):
self.axis = None
self.minAng = -math.pi
self.maxAng = math.pi
self.joint = joint
self.parent = parent
self.static = static
self.controlNode = None
self.exposedNode = None
#self.ikNode = None
self.children = []
if parent:
self.parent.addChild( self )
self.col = ( 0.2, 0.2, 0.5 )
def addChild( self, child ):
if not child in self.children:
self.children.append( child )
|
#########################
# tcp服务器
#########################
"""
流程:
- socket创建一个套接字
- bind绑定ip和port
- listen使套接字变为可以被动链接
- accept等待客户端的链接
- recv/send接收发送数据
"""
from socket import *
# 创建socket
tcpSerSocket = socket(AF_INET, SOCK_STREAM)
# 绑定本地信息
address = ('', 8088)
tcpSerSocket.bind(address)
print('Listening for broadcast at ', tcpSerSocket.getsockname())
# 使用socket创建的套接字默认的属性是主动的,使用listen将其变为被动的,这样就可以接收别人的链接了
tcpSerSocket.listen()
# 如果有新的客户端来链接服务器,那么就产生一个新的套接字专门为这个客户端服务器
# newSocket用来为这个客户端服务
# tcpSerSocket就可以省下来专门等待其他新客户端的链接
newSocket, clientAddress = tcpSerSocket.accept()
# 接收对方发送过来的数据,最大接收1024个字节
receive_data = newSocket.recv(1024)
print('接收到的数据为:', receive_data.decode("UTF-8"))
# 发送一些数据到客户端
newSocket.send("thank you !".encode())
# 关闭为这个客户端服务的套接字,只要关闭了,就意味着为不能再为这个客户端服务了,如果还需要服务,只能再次重新连接
newSocket.close()
# 关闭监听套接字,只要这个套接字关闭了,就意味着整个程序不能再接收任何新的客户端的连接
tcpSerSocket.close()
|
import csv
from urllib.parse import quote
import webbrowser
from . import geocoder_googs as geocoder
GOOGLE_STATIC_MAPS_ENDPOINT = (
'https://maps.googleapis.com/maps/api/staticmap?size=1280x720&markers=')
# Compute the max number of markers I can safely add before hitting the Static Map API char limit.
# String of addition at the end is composed of the following:
# Length of urlencoded delimiters: comma (lat/lon) and pipe (marker points)
# 2 numbers per point consisting of:
# 1 - Sign character
# 3 - Max number of digits used by integer part
# 1 - Decimal
# 7 - Max number of digits used by fractional part (Est. based on points used)
MAX_EST_MARKER_COUNT = (2048 - len(GOOGLE_STATIC_MAPS_ENDPOINT)) / (
len(quote(',|')) + 2 * (1 + 3 + 1 + 7))
def exportMapsUrls():
marker_data = [[]] # Generate a sanity-check list of Google Static Map urls
with open(geocoder.SCHOOL_GEODATA_FILE, 'rb') as csv_in:
school_data = csv.DictReader(csv_in)
for school in school_data:
if len(marker_data[-1]) >= MAX_EST_MARKER_COUNT:
marker_data.append([])
marker_data[-1].append('%s,%s' % (school['Latitude'], school['Longitude']))
for marker_list in marker_data:
map_url = GOOGLE_STATIC_MAPS_ENDPOINT + '|'.join(marker_list)
# Verify they will load in a pretty way
webbrowser.open_new_tab(map_url)
if __name__ == '__main__':
exportMapsUrls()
|
# -*- coding: utf-8 -*-
import re
import json
import time
import random
import collections
import requests
from bs4 import BeautifulSoup
# http://www.usagain.com/find-a-collection-bin
# Post with data:
# cityInput=&stateInput=%23&zipInput=11207&Submit.x=48&Submit.y=4
class BinLocation():
@staticmethod
def from_dict( data ):
return BinLocation(
data['name'],
data['address']
)
def __init__( self, name, address ):
self.name = name
self.address = address
def as_dict( self ):
return dict(
name = self.name,
address = self.address
)
# For creating a set
def __hash__(self): return hash(self.name+self.address)
def __eq__(self, other):
return (
self.name == other.name and
self.address == other.address
)
def __ne__(self, other): return not self.__eq__( other )
def getBinsForZip( zipCode ):
# We get a 403 if we don't use the User-Agent
r = requests.post(
'http://www.usagain.com/find-a-collection-bin',
data = {
'cityInput': '',
'stateInput': '#',
'zipInput': zipCode,
'Submit.x': 0,
'Submit.y': 4
}
)
if r.status_code != 200:
raise RuntimeError( '%s' % r )
soup = BeautifulSoup(r.text, 'html.parser')
table = soup.find_all( 'table', attrs={
'summary': 'USAgain Collection Sites'
} )[0].select('tbody')[0]
locations = []
for row in table.select('tr'):
name = re.sub( '\\s+', ' ', row.select('th')[0].getText() ).strip()
address = re.sub( '\\s+', ' ', row.select('td')[0].getText() ).strip()
locations.append( BinLocation(name,address) )
return locations
if __name__ == '__main__':
def prettyJson( obj ):
return json.dumps( obj, sort_keys=True, indent=2, separators=(',', ':') )
def getBinsNY():
import zip_codes
codes = zip_codes.getZipCodesForState('NY')
results = set()
for i in range(len(codes)):
print 'Checking %s [%s of %s], total results: %s' % (
codes[i],
i,
len(codes),
len(results)
)
try:
zipResults = getBinsForZip( codes[i] )
#print prettyJson( [ b.as_dict() for b in zipResults ] )
except Exception as e:
print e
else:
#results.extend( zipResults )
for x in zipResults:
results.add( x )
time.sleep( 1.0+random.random()*5.0 )
'''
print 'Found %s results total' % len( results )
sResults = set( results )
print 'Found %s unique results' % len(sResults)
'''
dictionaries = [ s.as_dict() for s in results ]
with open( 'usagain-bins.json', 'wb' ) as f:
f.write( prettyJson( dictionaries ) )
# If no cached dataset
def firstTime():
getBinsNY()
def secondTime():
# If you've already downloaded the names/addresses
with open( 'usagain-bins.json', 'rb' ) as f:
bins = json.loads( f.read() )
from shared.geocode import toGeoJson
geojson = toGeoJson( bins, lambda x: x['address'] )
with open( 'usagain.geojson', 'wb' ) as f:
f.write( prettyJson( geojson ) )
# Run one (or both)
#firstTime()
#secondTime()
|
#!/usr/bin/env python
import os
import click
import requests
from time import sleep
from models.trade import Trade
from models.stop import Stop
from pymongo import *
@click.group()
def cli():
pass
@cli.command()
@click.option('--count', default=1, help='Number of greetings.')
@click.option('--name', prompt='Your name',
help='The person to greet.')
def hello(count, name):
"""Simple program that greets NAME for a total of COUNT times."""
for x in range(count):
click.echo('Hello %s!' % name)
@cli.command()
@click.option('--quote', '-q', default='AG0', help='index of futures quote, could be a comma splitted array')
@click.option('--repeat','-r', default=True, help='refresh infinitely')
@click.option('--save', '-s', default=True, help='save ticks to mongo db')
def get_quote(quote, repeat, save):
""" list of codes:
郑商所:
TA0 PTA连续
OI0 菜油连续
RS1809 菜籽1809
RM0 菜粕连续
ZC0 动力煤连续
WH0 强麦连续
JR1805 粳稻1805
SR0 白糖连续
CF0 棉花连续
RI0 早籼稻连续
MA0 郑醇连续
FG0 玻璃连续
LR0 晚籼稻连续
SF0 硅铁连续
SM0 锰硅连续
CY1809 棉纱1809
大连商品交易所:
V0 PVC连续
P0 棕榈连续
B0 豆二连续
M0 豆粕连续
I0 铁矿石连续
JD0 鸡蛋连续
L0 塑料连续
PP0 PP连续
BB0 胶合板连续
Y0 豆油连续
C0 玉米连续
A0 豆一连续
J0 焦炭连续
JM0 焦煤连续
CS0 玉米淀粉连续
上海期货交易所:
FU0 燃油连续
AL0 沪铝连续
RU0 橡胶连续
ZN0 沪锌连续
CU0 沪铜连续
AU0 黄金连续
RB0 螺纹钢连续
WR0 线材连续
PB0 沪铅连续
AG0 白银连续
BU0 沥青连续
HC0 热轧卷板连续
SN0 沪锡连续
NI0 沪镍连续
中国金融期货交易所
IF0 期指0
TF0 TF0
更多请参考: http://vip.stock.finance.sina.com.cn/quotes_service/view/qihuohangqing.html
"""
response = requests.get("http://hq.sinajs.cn/list="+quote)
client = MongoClient()
db = client.futuresticks
collection = db.first_product
while True:
linenum = 0
for arrraw in response.text.splitlines():
arr = arrraw.split('=')[1].strip(';').strip('"').split(',')
print(arr[0]+","+arr[16]+"," + arr[15] + "," + arr[17] + "," + arr[5] + " " + arr[8])
if save:
collection.insert({'code':quote, 'tick': arr[8]})
linenum = linenum + 1
if not repeat:
break
sleep(1)
# clear buffer#
# "\033[F" move one line up, '\r' move cursor back to beginning of line
print("\033[F" * (linenum + 1) + '\r')
@cli.command()
@click.option('--code', '-c', help='code of tureus product')
@click.option('--limit', '-l', help='when to alert. a positive number means top limit, a minus number means bottom limit')
def watch(code, limit):
response = requests.get("http://hq.sinajs.cn/list="+code)
notified = False
while True:
linenum = 1
for arrraw in response.text.splitlines():
arr = arrraw.split('=')[1].strip(';').strip('"').split(',')
print(arr[0]+","+arr[16]+"," + arr[15] + "," + arr[17] + "," + arr[5] + " " + arr[8])
# if save:
# collection.insert({'code':quote, 'tick': arr[8]})
if float(limit) < 0 and float(arr[8]) + float(limit) < 0:
print("break bottom limit:" + limit * -1)
if not notified:
notify(title = 'Bottom limit hit',
subtitle = 'it is going down',
message = 'please pay attention to ' + arr[0] + ', it has hit bottom limit' + limit * -1 + " with value:" + arr[8])
notified = True
elif float(limit) > 0 and float(arr[8]) - float(limit) > 0 :
print('hit up limit:' + limit)
if not notified:
notify(title='Up limite hit',
subtitle = 'it is going up',
message = 'please pay attention to ' + arr[0] + ', it has hit up limit ' + limit + ' with value ' + arr[8])
notified = True
else:
print('watching...')
if notified:
notified = False
linenum = linenum + 1
sleep(1)
# clear buffer#
# "\033[F" move one line up, '\r' move cursor back to beginning of line
print("\033[F" * (linenum + 1) + '\r')
@cli.command()
@click.option('--code', '-c', help='code of futures product')
@click.option('--num', '-n', help='num of futures')
@click.option('--price', '-p', help='price of futures product')
def trade_product(code, num, price):
Trade.trade(code, num, price)
@cli.command()
@click.option('--code', '-c', help='code of futures product')
@click.option('--stop', '-s', help='stop loss limit')
@click.option('--num', '-n', help='num of product to sell, positive number for stop profit, negative for stop loss')
def stop_order(code, stop, num):
Stop.stop_order(code, stop, num)
pass
if __name__ == '__main__':
cli()
pass
# The notifier function, https://stackoverflow.com/questions/17651017/python-post-osx-notification/17651702#17651702
def notify(title, subtitle, message):
t = '-title {!r}'.format(title)
s = '-subtitle {!r}'.format(subtitle)
m = '-message {!r}'.format(message)
os.system('terminal-notifier {}'.format(' '.join([m, t, s])))
|
import streamlit as st
import pandas as pd
import numpy as np
import re
import os
import nltk
import joblib
from nltk.corpus import stopwords
nltk.download('stopwords')
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow import keras
nltk.download('wordnet')
from nltk.stem import WordNetLemmatizer
lemmatizer = WordNetLemmatizer()
import string
import keras
from keras.preprocessing import text,sequence
import dill
voc_size = 10000
news_title = ""
sent_length= 50
model = keras.models.load_model('model/model.h5')
st.title("Fake News Detection")
nav = st.sidebar.radio("Navigation",["HOME", "CHECK YOUR NEWS"])
if nav == "HOME":
st.image("images//news.jpg", width = 500)
st.header("Many a times these days we find ourselves caught up with misinformation due to coming in touch with fake news on the internet. This application will help you to stay away from such scams. Hope you find it useful. Thanks for using!")
st.subheader("This video shows the results of a research conducted by MIT, showing the spread and impact of FAKE NEWS!!")
video_file = open('images//Video.mp4', 'rb')
video_bytes = video_file.read()
st.video(video_bytes)
elif nav == "CHECK YOUR NEWS":
st.header("CHECK YOUR NEWS HERE!")
news_title = st.text_area('Enter your news title below')
X = list()
X.append(news_title)
tokenizer = open('model/tokenizer.pkl', 'rb')
tokenized = joblib.load(tokenizer)
max_len = 300
tokenized_pred = tokenized.texts_to_sequences(X)
X = sequence.pad_sequences(tokenized_pred, maxlen=max_len)
prediction = model.predict_classes(X)
if st.button("Detect"):
if prediction[0] == 1:
st.success("Your news is FAKE!")
else:
st.success("Your news is REAL!")
|
#!/usr/bin/env python3
# aids manually identifying sticons...
import csv
import os
dload_dir = '_sticon'
keywords_file = 'sticon_keywords.tsv'
with open(keywords_file) as tsvfile:
reader = csv.reader(tsvfile, delimiter='\t')
for keyword, path in reader:
folder = os.path.dirname(path)
path = path.replace('.png', '')
sticker_pack = folder.replace('_sticon/', '')
if os.path.exists(path + '.png'):
os.rename(path, path + '_' + keyword + '.png')
|
import os
from unittest.mock import patch
from pytest import fixture, mark, raises
from configpp.evolution import Evolution, EvolutionException
from configpp.evolution.revision import Revision, gen_rev_number
from configpp.soil import YamlTransform
from voidpp_tools.mocks.file_system import mockfs
from .utils import FileSystem, mock_import, SpiderManTransform
@fixture()
def fs():
template_file_path = Revision.ORIGINAL_TEMPLATE_FILE_PATH
with open(template_file_path) as f:
template_file_content = f.read()
data = {
'evolution': {
'versions': {},
'script.py.tmpl': template_file_content,
}
}
fs = FileSystem(data)
with fs.mock():
with mock_import(fs):
yield fs
@fixture()
def base_config():
return {
'script_location': 'evolution',
'revision_template_file': 'evolution/script.py.tmpl',
'configpp_urls': {},
}
@fixture(scope = 'module')
def yaml():
return YamlTransform()
def test_generate_valid_rev():
rev = gen_rev_number()
filename = '%s_teve.py' % rev
assert Revision.FILENAME_PATTERN.match(filename)
def test_first_rev_no_uri(fs, base_config):
ev = Evolution()
ev.load(base_config)
assert ev.revision('test') is None
def test_write_config_file_after_first_revision_created_with_new_config(fs: FileSystem, base_config, yaml: YamlTransform):
fs.set_data('/evolution.yaml', yaml.serialize(base_config))
ev = Evolution()
ev.load()
ev.revision('test1', 'configpp://app.yaml')
cfg_data = yaml.deserialize(fs.get_data('/evolution.yaml'))
assert len(cfg_data['configpp_urls']) == 1
assert 'head' in cfg_data['configpp_urls']
assert cfg_data['configpp_urls']['head'] == 'configpp://app.yaml'
def test_write_config_file_after_nth_revision_created_with_new_config(fs: FileSystem, base_config, yaml: YamlTransform):
fs.set_data('/evolution.yaml', yaml.serialize(base_config))
ev = Evolution()
ev.load()
r1 = ev.revision('test1', 'configpp://app.yaml')
r2 = ev.revision('test2', 'configpp://core.yaml&logger.json@app1')
cfg_data = yaml.deserialize(fs.get_data('/evolution.yaml'))
assert len(cfg_data['configpp_urls']) == 2
assert 'head' in cfg_data['configpp_urls']
assert r1.id in cfg_data['configpp_urls']
assert cfg_data['configpp_urls']['head'] == 'configpp://core.yaml&logger.json@app1'
def test_first_rev_single_config(fs: FileSystem, base_config):
ev = Evolution()
ev.load(base_config)
rev = ev.revision('test', 'configpp://app.yaml')
data = fs.get_data('evolution/versions/' + rev.filename)
revision_file_lines = data.split('\n')
assert "from configpp.soil.transform import YamlTransform" in revision_file_lines
assert "from configpp.soil.transport import Transport" in revision_file_lines
assert " config = Config('app.yaml', YamlTransform(), Transport())" in revision_file_lines
assert " return config" in revision_file_lines
def test_first_rev_group_config(fs: FileSystem, base_config):
ev = Evolution()
ev.load(base_config)
rev = ev.revision('test', 'configpp://core.yaml&logger.json@app1')
rev_file_data = fs.get_data('evolution/versions/' + rev.filename)
revision_file_lines = rev_file_data.split('\n')
assert "from configpp.soil.transform import JSONTransform, YamlTransform" in revision_file_lines
assert "from configpp.soil.transport import Transport" in revision_file_lines
assert " core = GroupMember('core.yaml', YamlTransform())" in revision_file_lines
assert " logger = GroupMember('logger.json', JSONTransform())" in revision_file_lines
assert " config = Group('app1', [core, logger], Transport())" in revision_file_lines
assert " return config" in revision_file_lines
def test_single_config_not_changed(fs: FileSystem, base_config):
ev = Evolution()
ev.load(base_config)
ev.revision('test1', 'configpp://app.yaml')
rev = ev.revision('test2')
data = fs.get_data('evolution/versions/' + rev.filename)
lines = data.split('\n')
assert "from configpp.soil.transform import YamlTransform" in lines
assert "from configpp.soil.transport import Transport" in lines
assert "def upgrade(config: Config):" in lines
def test_group_config_not_changed(fs: FileSystem, base_config):
ev = Evolution()
ev.load(base_config)
ev.revision('test1', 'configpp://core.yaml&logger.json@app1')
rev = ev.revision('test2')
data = fs.get_data('evolution/versions/' + rev.filename)
lines = data.split('\n')
assert "from configpp.soil.transform import JSONTransform, YamlTransform" in lines
assert "from configpp.soil.transport import Transport" in lines
assert "def upgrade(core: GroupMember, logger: GroupMember, config: Group):" in lines
def test_single_config_change_to_group_config(fs: FileSystem, base_config):
ev = Evolution()
ev.load(base_config)
ev.revision('test1', 'configpp://app1.yaml')
rev = ev.revision('test2', 'configpp://core.yaml&logger.json@app1')
data = fs.get_data('evolution/versions/' + rev.filename)
lines = data.split('\n')
assert "from configpp.soil.transform import JSONTransform, YamlTransform" in lines
assert "from configpp.soil.transport import Transport" in lines
assert "def upgrade(config: Config):" in lines
assert " core = GroupMember('core.yaml', YamlTransform())" in lines
assert " logger = GroupMember('logger.json', JSONTransform())" in lines
assert " new_config = Group('app1', [core, logger], Transport())" in lines
assert " new_config.location = config.location" in lines
assert " return new_config" in lines
def test_group_config_add_new_member(fs: FileSystem, base_config):
ev = Evolution()
ev.load(base_config)
ev.revision('test1', 'configpp://core.yaml&logger.json@app1')
rev = ev.revision('test2', 'configpp://core.yaml&logger.json&clients.json@app1')
data = fs.get_data('evolution/versions/' + rev.filename)
lines = data.split('\n')
assert " clients = GroupMember('clients.json', JSONTransform())" in lines
assert " clients.data = {} # put initial data here" in lines
assert " config.add_member(clients)" in lines
def test_group_config_del_member(fs: FileSystem, base_config):
ev = Evolution()
ev.load(base_config)
ev.revision('test1', 'configpp://core.yaml&logger.json&clients.json@app1')
rev = ev.revision('test2', 'configpp://core.yaml&logger.json@app1')
data = fs.get_data('evolution/versions/' + rev.filename)
lines = data.split('\n')
assert " del config.members['clients.json']" in lines
def test_single_config_change_name(fs: FileSystem, base_config):
ev = Evolution()
ev.load(base_config)
ev.revision('test1', 'configpp://app.yaml')
rev = ev.revision('test2', 'configpp://app2.yaml')
data = fs.get_data('evolution/versions/' + rev.filename)
lines = data.split('\n')
assert " config.name = 'app2.yaml'" in lines
def test_group_config_change_name(fs: FileSystem, base_config):
ev = Evolution()
ev.load(base_config)
ev.revision('test1', 'configpp://core.yaml&logger.json@app1')
rev = ev.revision('test2', 'configpp://core.yaml&logger.json@app2')
data = fs.get_data('evolution/versions/' + rev.filename)
lines = data.split('\n')
assert " config.name = 'app2'" in lines
def test_group_config_member_change_transform(fs: FileSystem, base_config):
ev = Evolution()
ev.load(base_config)
ev.revision('test1', 'configpp://core.yaml&logger.json@app1')
rev = ev.revision('test2', 'configpp://core.yaml&logger.yaml@app1')
data = fs.get_data('evolution/versions/' + rev.filename)
lines = data.split('\n')
print(data)
assert " logger.transform = YamlTransform()" in lines
def test_single_config_change_transform(fs: FileSystem, base_config):
ev = Evolution()
ev.load(base_config)
ev.revision('test1', 'configpp://app.yaml')
rev = ev.revision('test2', 'configpp://app.json')
data = fs.get_data('evolution/versions/' + rev.filename)
lines = data.split('\n')
assert " config.transform = JSONTransform()" in lines
def test_use_custom_transform_single_firts_rev(fs: FileSystem, base_config):
ev = Evolution()
ev.load(base_config)
rev = ev.revision('test', 'configpp://app.yaml%test_evolution.utils:SpiderManTransform')
data = fs.get_data('evolution/versions/' + rev.filename)
revision_file_lines = data.split('\n')
assert "from test_evolution.utils import SpiderManTransform" in revision_file_lines
assert " return config" in revision_file_lines
|
#!/usr/bin/env python3
############################################################################################
# #
# Program purpose: Finds whether a given string starts with a given character using #
# lambda. #
# Program Author : Happi Yvan <ivensteinpoker@gmail.com> #
# Creation Date : February 04, 2020 #
# #
############################################################################################
def obtain_user_data(input_mess) -> str:
user_data, valid = '', False
while not valid:
try:
user_data = input(input_mess)
if len(user_data) == 0:
raise ValueError("Oops, data needed")
valid = True
except ValueError as ve:
print(f'[ERROR]: {ve}')
return user_data
if __name__ == "__main__":
lambda_func = lambda data, search: True if data.startswith(search) else False
main_str = obtain_user_data(input_mess='Enter main string: ')
search_str = obtain_user_data(input_mess='Enter search string: ')
print(f"Search 'startswith' found: {lambda_func(main_str, search_str)}")
|
__all__ = ['MACD', 'KDJ', 'BIAS', 'BBANDS', 'RSI', 'WR']
def MACD(series, short=12, long=26, mid=9):
ema_short = series.ewm(adjust=False, span=short, ignore_na=True).mean()
ema_long = series.ewm(adjust=False, span=long, ignore_na=True).mean()
ema_diff = (ema_short-ema_long)
ema_dea = ema_diff.ewm(adjust=False, span=mid, ignore_na=True).mean()
macd = 2*(ema_diff-ema_dea)
return macd
def KDJ(data, N=9, M=2):
lowList = data['low'].rolling(N).min().fillna(value=data['low'].expanding().min())
highList = data['high'].rolling(N).max().fillna(value=data['high'].expanding().max())
rsv = (data['close'] - lowList) / (highList - lowList) * 100
kdj_k = rsv.ewm(alpha=1/M, adjust=False).mean()
kdj_d = kdj_k.ewm(alpha=1/M, adjust=False).mean()
kdj_j = 3.0 * kdj_k - 2.0 * kdj_d
return {'kdj_k':kdj_k, 'kdj_d':kdj_d, 'kdj_j':kdj_j}
def BIAS(series, window):
"""乖离率,描述收盘价距离均线的百分比,常用来衡量收盘价偏离程度。"""
bias = (series - series.rolling(window).mean())/series.rolling(window).mean() * 100
return bias
def BBANDS(series, window):
middleband = series.rolling(window).mean()
upperband = middleband + 2 * series.rolling(window).std()
lowerband = middleband - 2 * series.rolling(window).std()
return {'middleband':middleband, 'upperband':upperband, 'lowerband':lowerband}
def RSI(series, N):
''' 计算RSI相对强弱指数'''
diff = series.diff().fillna(0)
rsi = diff.clip(lower=0).ewm(alpha=1/N, adjust=False).mean()/(diff.abs().ewm(alpha=1/N, adjust=False).mean())*100
return rsi
def WR(data, window):
'''计算威廉指数'''
a = data['high'].rolling(window).max() - data['close']
b = data['high'].rolling(window).max() - data['low'].rolling(window).min()
c = data['high'].expanding().max() - data['close']
d = data['high'].expanding().max() - data['low'].expanding().min()
wr = (a/b).fillna(c/d)*100
return wr
|
import requests
# Please NOTE: In this sample we're assuming Cloud Api Server is hosted at "https://localhost".
# If it's not then please replace this with with your hosting url.
url = "<insert presignedUrl generated by https://localhost/file/upload/get-presigned-url >"
payload = {}
files = [
('file', open('/Users/em/Downloads/logo.png','rb'))
]
headers = {
'x-api-key': '{{x-api-key}}'
}
response = requests.request("PUT", url, headers=headers, data = payload, files = files)
print(response.text.encode('utf8'))
|
import pandas as pd
from sqlalchemy import create_engine
from influxdb import InfluxDBClient
import time
def connectSQL():
connection_str = 'mssql+pyodbc://royg:Welcome1@SCADA'
engine = create_engine(connection_str)
conn = engine.connect()
return conn
def getData(conn,interval):
if (interval==1):
tabname='data_values_min_4_2017'
else:
tabname='data_values_'+str(interval)+'min_4_2017'
queryResult = conn.execute('''
-- SELECT TOP 10 RTRIM(LTRIM(REPLACE(REPLACE(dd.name,' ','\ '),',','\,'))) measurement,
SELECT LTRIM(dd.name) measurement,
CAST(dd.osi_key AS VARCHAR) AS [key],
CAST(dd.station_id AS VARCHAR) site,
SUBSTRING(dd.[name],1,1) array,
dt.description data_type,
'''+str(interval)+''' interval,
CAST(VALUE AS VARCHAR(30)) value,
CONVERT(VARCHAR(19),d.u_time,126)+'Z' timestamp
FROM [dbo].'''+tabname+''' d WITH(NOLOCK)
JOIN tempdb..dd1 dd
ON dd.osi_key = d.osi_key
JOIN dbo.stations s
ON s.station_id = dd.station_id
JOIN dbo.data_types dt
ON dt.data_type = d.data_type
-- WHERE u_time BETWEEN '2017-04-19 00:00:00' and '2017-04-19 01:00:00'
WHERE u_time > DATEADD(mi,-3,CURRENT_TIMESTAMP)
''')
pNodeIDsDF = pd.DataFrame(queryResult.fetchall())
if pNodeIDsDF.empty == False:
pNodeIDsDF.columns = queryResult.keys()
return pNodeIDsDF
c=connectSQL()
host = '50.23.122.133'
port = 8086
user = 'roy'
password = 'Kaftor'
dbname = 'w209'
client = InfluxDBClient(host, port, user, password, dbname)
rc=0
while(True):
for interval in (15,5,1):
df = getData(c, interval)
for node in df.itertuples():
# print(node[8])
json_body = [
{
"measurement": node[1],
"tags": {
"key": node[2],
"site": node[3],
"array": node[4],
"data_type": node[5],
"interval": node[6]
},
"time": node[8],
"fields": {
"value": float(node[7]) # str(float(node[7]))
}
}
]
rc = client.write_points(json_body, time_precision='s')
print('1 row written for interval {0}'.format(interval))
if (rc == 0):
print("reconnecting...")
c = connectSQL()
client = InfluxDBClient(host, port, user, password, dbname)
if (rc == 1):
print('{0} rows written for interval {1}'.format(df.shape[0],interval))
time.sleep(60)
|
import configparser
import logging
import os
import pathlib
import sys
from typing import Union
from selenium.webdriver import Chrome, Remote
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.chrome.remote_connection import ChromeRemoteConnection
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
logger = logging.getLogger(__name__)
def get_directories(root: Union[str, os.PathLike]):
root = pathlib.Path(root)
directories = [elem for elem in root.iterdir() if elem.is_dir()]
if not directories:
logger.warning(f"No directory found inside {root}")
return []
logger.info(f"Found {len(directories)} dirs inside {root}")
return directories
def get_options(**kwargs):
global config
options = Options()
options.add_argument("no-sandbox")
options.add_argument("ignore-certificate-errors")
options.add_argument("allow-running-insecure-content")
options.add_argument("disable-dev-shm-usage")
ua = kwargs.get("user_agent")
if ua:
options.add_argument(f"user-agent={ua}")
headless = kwargs.get("headless", config["selenium"]["headless"])
if headless:
options.add_argument("headless")
if sys.platform in ("win32", "cygwin"):
# fix for windows platforms
options.add_argument("disable-gpu")
return options
def get_config(cfg_fp="moodle.cfg"):
parser = configparser.ConfigParser()
if not parser.read(cfg_fp):
err = f"No such file or directory: {cfg_fp}"
logger.error(err)
raise EnvironmentError(err)
# get selenium options
env = parser.get("selenium", "env", fallback="local").lower()
path = parser.get("selenium", "path", fallback="chromedriver").lower()
url = parser.get(
"selenium", "url", fallback="http://selenium-hub:4444/wd/hub"
).lower()
headless = parser.getboolean("selenium", "headless", fallback=True)
# get moodle options
# credentials section
username = parser.get("moodle:credentials", "username")
password = parser.get("moodle:credentials", "password")
# urls section
login = parser.get("moodle:urls", "login")
course = parser.get("moodle:urls", "course")
module = parser.get("moodle:urls", "module")
base_name = parser.get("upload:file_parameters", "base_name")
base_name_in_course = parser.get("upload:file_parameters", "base_name_in_course")
if any(not field for field in (username, password)):
msg = "Username or password cannot be empty!"
logger.error(msg)
raise ValueError(msg)
if env not in ("local", "remote"):
err = "Invalid selenium env provided!"
logger.error(err)
raise ValueError(err)
return {
"credentials": dict(username=username, password=password),
"site": dict(login=login, course=course, module=module),
"selenium": dict(env=env, path=path, url=url, headless=headless),
"file_parameters": dict(
base_name_in_course=base_name_in_course, base_name=base_name
),
}
# read one time and then use it
config = get_config()
def get_driver(**kwargs):
"""Get a Selenium Chromedriver. Options can be passed
as kwargs, or in the configuration file"""
global config
options = get_options(**kwargs)
path = kwargs.get("path", config["selenium"]["path"])
url = kwargs.get("url", config["selenium"]["url"])
env = config["selenium"]["env"]
if env == "local":
driver = Chrome(executable_path=path, options=options)
elif env == "remote":
driver = Remote(
command_executor=ChromeRemoteConnection(remote_server_addr=url),
desired_capabilities=DesiredCapabilities.CHROME,
options=options,
)
else:
# cannot enter this branch
raise AssertionError
driver.maximize_window()
return driver
def change_user_agent(driver, new_user_agent: str):
"""Dinamically change chromedriver user-agent, and then
assert that the change occurred.
Raise an AssertionError if this is false."""
cmd = "Network.setUserAgentOverride"
cmd_args = dict(userAgent=new_user_agent)
driver.execute("executeCdpCommand", {"cmd": cmd, "params": cmd_args})
actual_user_agent = str(driver.execute_script("return navigator.userAgent;"))
assert actual_user_agent == new_user_agent, "Cannot set user-agent!"
logger.info(f"Changed user-agent to {new_user_agent}")
def test_environment(**kwargs):
"""Determine if current environment is correctly set"""
try:
get_driver(**kwargs).quit()
except Exception as err:
logger.error(str(err))
raise err
else:
logger.info("Selenium driver found!")
|
class Solution:
def twoSum(self, nums: List[int], target: int) -> List[int]:
d = {}
for i, n in enumerate(nums):
try:
d[n].append(i)
except KeyError:
d[n] = [i]
for k in d:
if target - k in d:
if k == target - k:
if len(d[k]) == 2:
return [d[k][0], d[k][1]]
else:
return [d[k][0], d[target-k][0]]
|
#!/usr/bin/env python
from flask import Markup
import xlrd
class CopyException(Exception):
pass
class Error(object):
"""
An error object that can mimic the structure of the COPY data, whether the error happens at the Copy, Sheet or Row level. Will print the error whenever it gets repr'ed.
"""
_error = ''
def __init__(self, error):
self._error = error
def __getitem__(self, i):
return self
def __iter__(self):
return iter([self])
def __len__(self):
return 1
def __repr__(self):
return self._error
class Row(object):
"""
Wraps a row of copy for error handling.
"""
_sheet = None
_row = []
_columns = []
_index = 0
def __init__(self, sheet, row, columns, index):
self._sheet = sheet
self._row = row
self._columns = columns
self._index = index
def __getitem__(self, i):
"""
Allow dict-style item access by index (column id), or by column name.
"""
if isinstance(i, int):
if i >= len(self._row):
return Error('COPY.%s.%i.%i [column index outside range]' % (self._sheet.name, self._index, i))
return Markup(self._row[i])
if i not in self._columns:
return Error('COPY.%s.%i.%s [column does not exist in sheet]' % (self._sheet.name, self._index, i))
return Markup(self._row[self._columns.index(i)])
def __iter__(self):
return iter(self._row)
def __len__(self):
return len(self._row)
def __repr__(self):
if 'value' in self._columns:
return Markup(self._row[self._columns.index('value')])
return Error('COPY.%s.%s [no value column in sheet]' % (self._sheet.name, self._row[self._columns.index('key')]))
class Sheet(object):
"""
Wrap copy text, for a single worksheet, for error handling.
"""
name = None
_sheet = []
_columns = []
def __init__(self, name, data, columns):
self.name = name
self._sheet = [Row(self, [row[c] for c in columns], columns, i) for i, row in enumerate(data)]
self._columns = columns
def __getitem__(self, i):
"""
Allow dict-style item access by index (row id), or by row name ("key" column).
"""
if isinstance(i, int):
if i >= len(self._sheet):
return Error('COPY.%s.%i [row index outside range]' % (self.name, i))
return self._sheet[i]
if 'key' not in self._columns:
return Error('COPY.%s.%s [no key column in sheet]' % (self.name, i))
for row in self._sheet:
if row['key'] == i:
return row
return Error('COPY.%s.%s [key does not exist in sheet]' % (self.name, i))
def __iter__(self):
return iter(self._sheet)
def __len__(self):
return len(self._sheet)
class Copy(object):
"""
Wraps copy text, for multiple worksheets, for error handling.
"""
_filename = ''
_copy = {}
def __init__(self, filename='data/copy.xls'):
self._filename = filename
self.load()
def __getitem__(self, name):
"""
Allow dict-style item access by sheet name.
"""
if name not in self._copy:
return Error('COPY.%s [sheet does not exist]' % name)
return self._copy[name]
def load(self):
"""
Parses the downloaded .xls file and writes it as JSON.
"""
try:
book = xlrd.open_workbook(self._filename)
except IOError:
raise CopyException('"%s" does not exist. Have you run "fab update_copy"?' % self._filename)
for sheet in book.sheets():
columns = sheet.row_values(0)
rows = []
for n in range(0, sheet.nrows):
# Sheet takes array of rows
rows.append(dict(zip(columns, sheet.row_values(n))))
self._copy[sheet.name] = Sheet(sheet.name, rows, columns)
def json(self):
"""
Serialize the copy as JSON.
"""
import json
obj = {}
for name, sheet in self._copy.items():
if 'key' in sheet._columns:
obj[name] = {}
for row in sheet:
obj[name][row['key']] = row['value']
else:
obj[name] = []
for row in sheet:
obj[name].append(row._row)
return json.dumps(obj)
|
"""
Library to fetch and parse the public Princeton COS courses history as a
Python dictionary or JSON data source.
"""
__version__ = '1.0.0'
__author__ = "Jérémie Lumbroso <lumbroso@cs.princeton.edu>"
__all__ = [
"CosCourseInstance",
"CosCourseTerm",
"fetch_cos_courses",
]
from princeton_scraper_cos_courses.parsing import CosCourseInstance
from princeton_scraper_cos_courses.parsing import CosCourseTerm
from princeton_scraper_cos_courses.cos_courses import fetch_cos_courses
version_info = tuple(int(v) if v.isdigit() else v for v in __version__.split('.'))
|
import glob
import cv2
from moviepy.editor import VideoFileClip, concatenate_videoclips
def createCombo(ComboWindow, inputFolder, outputFile):
"""
find all video files and combine it into one file
params:
ComboWindow : UI for the application, object of QWindow class
inputFolder : input files folder path
outputFile : path to store the video file created
output : Outputs a single video file with file name provided at the location given
"""
input_files = [] # array to store names of input files
clip = [] # to store the combination of above files
# reading the path of input folder and output file name
inputFolder = str(inputFolder)
outputFile = str(outputFile)
ComboWindow.setComboStatusTipText('Creating Video.......') # setting status on the ui
# retrieving file names
for fileInput in glob.glob(inputFolder + '/*.' + "mp4"):
input_files.append(fileInput)
input_files = sorted(input_files, key=str.lower)
lenInputFiles = len(input_files)
i = 0
# appending file names
for i in range(0, lenInputFiles):
per = float(i + 1) / float(lenInputFiles)
ComboWindow.setComboProgress(round(per * 60))
clip.append(VideoFileClip(input_files[i]))
# get default fps for output video
myClip = cv2.VideoCapture(input_files[i])
fps = myClip.get(cv2.CAP_PROP_FPS)
# creating a video and writing it to the directory
final_clip = concatenate_videoclips(clip)
final_clip.write_videofile(inputFolder + "/" + outputFile, fps)
ComboWindow.setComboProgress(100)
|
import itertools
import logging
import pathlib
import unittest
import rsa
from encrypted_config.crypto import encrypt, decrypt
_LOG = logging.getLogger(__name__)
_HERE = pathlib.Path(__file__).parent
class Tests(unittest.TestCase):
def test_encrypt_decrypt(self):
public_key_path = pathlib.Path(_HERE, 'test_id_rsa.pub.pem')
with public_key_path.open() as public_key_file:
public_key_str = public_key_file.read()
public_key = rsa.PublicKey.load_pkcs1(public_key_str, format='PEM')
self.assertIsInstance(public_key, rsa.PublicKey)
_LOG.info('using public key: %s', public_key_path)
private_key_path = pathlib.Path(_HERE, 'test_id_rsa')
with private_key_path.open() as private_key_file:
private_key_str = private_key_file.read()
private_key = rsa.PrivateKey.load_pkcs1(private_key_str, format='PEM')
self.assertIsInstance(private_key, rsa.PrivateKey)
_LOG.info('using private key: %s', private_key_path)
for public_, private_ in itertools.product(
(public_key_path, public_key_str, public_key),
(private_key_path, private_key_str, private_key)):
for example in ('1234', b'1234'):
ciphertext = encrypt(example, public_)
self.assertIsInstance(ciphertext, type(example))
cleartext = decrypt(ciphertext, private_)
self.assertEqual(cleartext, example)
|
"""
Parse and display test memory.
Uses pytest-monitor plugin from https://github.com/CFMTech/pytest-monitor
Lots of other metrics can be read from the file via sqlite parsing.,
currently just MEM_USAGE (RES memory, in MB).
"""
import sqlite3
import sys
from operator import itemgetter
def _get_big_mem_tests(cur):
"""Find out which tests are heavy on memory."""
big_mem_tests = []
for row in cur.execute('select ITEM, MEM_USAGE from TEST_METRICS;'):
test_name, memory_used = row[0], row[1]
if memory_used > 1000.: # test result in RES mem in MB
print("Test name / memory (MB)")
print(test_name, memory_used)
elif memory_used > 4000.:
big_mem_tests.append((test_name, memory_used))
return big_mem_tests
def _get_slow_tests(cur):
"""Find out which tests are slow."""
timed_tests = []
sq_command = \
'select ITEM, ITEM_VARIANT, ITEM_PATH, TOTAL_TIME from TEST_METRICS;'
for row in cur.execute(sq_command):
test_name, test_var, test_path, time_used = \
row[0], row[1], row[2], row[3]
timed_tests.append((test_name, test_var, test_path, time_used))
timed_tests = sorted(timed_tests, reverse=True, key=itemgetter(3))
hundred_slowest_tests = timed_tests[0:100]
print("List of 100 slowest tests (duration, path, name")
if hundred_slowest_tests:
for _, test_var, pth, test_duration in hundred_slowest_tests:
pth = pth.replace(".", "/") + ".py"
executable_test = pth + "::" + test_var
if ", " in executable_test:
executable_test = executable_test.replace(", ", "-")
mssg = "%.2f" % test_duration + "s " + executable_test
print(mssg)
else:
print("Could not retrieve test timing data.")
def _parse_pymon_database():
# Create a SQL connection to our SQLite database
con = sqlite3.connect("../.pymon")
cur = con.cursor()
# The result of a "cursor.execute" can be iterated over by row
# first look at memory
print("Looking for tests that exceed 1GB resident memory.")
big_mem_tests = _get_big_mem_tests(cur)
# then look at total time (in seconds)
# (user time is availbale too via USER_TIME, kernel time via KERNEL_TIME)
_get_slow_tests(cur)
# Be sure to close the connection
con.close()
# Throw a sys exit so test fails if we have >4GB tests
if big_mem_tests:
print("Some tests exceed 4GB of RES memory, look into them!")
print(big_mem_tests)
sys.exit(1)
if __name__ == '__main__':
_parse_pymon_database()
|
# list(map(int, input().split()))
# int(input())
def main():
S = input()
T = input()
# Sのi文字目をTの先頭として,それらの文字列の一致度を見る.
st = set()
for i in range(len(S)-len(T)+1):
cnt = 0
for s, t in zip(S[i:i+len(T)], T):
cnt += s == t
st.add(cnt)
print(len(T) - max(st))
if __name__ == '__main__':
main()
|
import collections
from winnow.utils import deep_copy_dict as deepcopy
# from copy import deepcopy
from winnow import utils
from winnow.values.option_values import OptionWinnowValue
from winnow.values import value_factory, value_path_factory
from winnow.values.option_values import OptionResourceWinnowValue, OptionStringWinnowValue
from winnow.values.exception_values import ExceptionWinnowValue
from winnow.keys.key_matching import KeyMatcher
from winnow.exceptions import OptionsExceptionSetWithException
import time
"""
OptionsSet
This is the beef.
all the logical operations on sieves actually happen in their options dict
"""
class OptionsSet(collections.MutableMapping):
"""a dict like object that supports merging, patching etc wraps an existing dict"""
def __init__(self, d):
"""
really its just a wrapped around an existing dict
"""
self.store = d
self.matcher = KeyMatcher.from_dict(d)
def __getitem__(self, key):
return self.store[key]
def __setitem__(self, key, value):
self.store[key] = value
def __delitem__(self, key):
del self.store[key]
def __iter__(self):
return iter(self.store)
def __len__(self):
return len(self.store)
def mega_store(self, other):
#
# print "****STORES****"
# print self.store
# print other.store
#
expanded = deepcopy(self.store)
for k in self.store.keys():
if "*" in k:
matching = other.matcher.get_matching_paths(k)
for match in matching:
expanded[match] = self.store[k]
# this consumes matched wildcards values
if matching:
del expanded[k]
mega_store = {}
for k, v in expanded.iteritems():
new_key, real_value = value_path_factory(k, v)
if real_value is not None:
if not new_key in mega_store.keys():
mega_store[new_key] = []
mega_store[new_key].append(real_value)
return mega_store
def _merge_value_array(self, key, values):
value_types = set([type(v) for v in values])
#
if value_types == {OptionStringWinnowValue, OptionResourceWinnowValue}:
raise Exception("cant mix strings and resources")
if len(values) == 1:
return values[0]
result = values[0]
for v in values[1:]:
result = result.intersection(v)
if result == None:
return ExceptionWinnowValue(key, [v.as_json() for v in values])
return result
def _check_for_exceptions(self, all_values):
for v in all_values:
if isinstance(v, ExceptionWinnowValue):
return v
return None
def merge(self, other):
"""
A union of all keys
An intersection of values
"""
options = {}
this_mega_store = self.mega_store(other)
that_mega_store = other.mega_store(self)
this_keys = set(this_mega_store.keys())
that_keys = set(that_mega_store.keys())
emptyValues = []
# print this_keys, that_keys
for key in this_keys.union(that_keys):
all_values = this_mega_store.get(key, []) + that_mega_store.get(key, [])
exception_value = self._check_for_exceptions(all_values)
if exception_value:
merged_value = exception_value
else:
merged_value = self._merge_value_array(key, all_values)
options[key] = merged_value.as_json()
if isinstance(merged_value, ExceptionWinnowValue):
options[key] = None
emptyValues.append((key, all_values))
options_set = OptionsSet(options)
if emptyValues:
raise OptionsExceptionSetWithException(options_set, emptyValues)
return options_set
def disallowed_keys(self, other):
return self._disallowed(other)
def allows(self, other):
disallowed = self._disallowed(other)
return not bool(disallowed)
def _disallowed(self, other):
"""
An intersection of keys
A subset check on values
"""
disallowed = []
this_mega_store = self.mega_store(other)
that_mega_store = other.mega_store(self)
this_keys = set(this_mega_store.keys())
that_keys = set(that_mega_store.keys())
if this_keys is not None and that_keys is not None:
all_keys = this_keys.intersection(that_keys)
if all_keys is not None:
for key in all_keys:
all_values = this_mega_store.get(key, []) + that_mega_store.get(key, [])
exception_value = self._check_for_exceptions(all_values)
if exception_value:
disallowed.append(key)
else:
this = self._merge_value_array(key, this_mega_store[key])
that = self._merge_value_array(key, that_mega_store[key])
if not that.issubset(this):
disallowed.append(key)
return disallowed
def default(self):
options = {}
for k, v in self.store.iteritems():
value = value_factory(v)
options[k] = value.default
if isinstance(value, OptionWinnowValue):
child_options = value.get_default_value_options()
if child_options is not None:
childSet = OptionsSet(child_options)
child_defaults = childSet.default().store
for ck, cv in child_defaults.iteritems():
path = "{}/{}".format(k, ck)
options[path] = cv
return OptionsSet(options)
def default_full_values(self):
options = {}
for k, v in self.store.iteritems():
options[k] = value_factory(v).default_full_value
return OptionsSet(options)
#
# def scope(self, scope_name):
# """
# extracts a subset of options by scope
# """
# options = {}
# for k, v in self.store.iteritems():
# if isinstance(v, dict) and u"scopes" in v.keys():
# scopes = set(v[u"scopes"])
# if not scopes.isdisjoint(set([scope_name])):
# options[k] = deepcopy(v)
# else:
# options[k] = deepcopy(v)
# return OptionsSet(options)
def match(self, others):
return [other for other in others if self.allows(other)]
def reverse_match(self, others):
return [other for other in others if other.allows(self)]
@property
def key_set(self):
return set(self.store.keys())
|
#!/usr/bin/env python
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import heapq
import logging
import os
import psutil
import sys
BYTE_UNITS = ['B', 'KiB', 'MiB', 'GiB']
def FormatBytes(value):
def GetValueAndUnit(value):
for unit in BYTE_UNITS[:-1]:
if abs(value) < 1024.0:
return value, unit
value /= 1024.0
return value, BYTE_UNITS[-1]
if value is not None:
return '%.1f %s' % GetValueAndUnit(value)
else:
return 'N/A'
def _GetProcessInfo(p):
pinfo = p.as_dict(attrs=['pid', 'name', 'memory_info'])
pinfo['mem_rss'] = getattr(pinfo['memory_info'], 'rss', 0)
return pinfo
def _LogProcessInfo(pinfo, level):
pinfo['mem_rss_fmt'] = FormatBytes(pinfo['mem_rss'])
logging.log(level, '%(mem_rss_fmt)s (pid=%(pid)s)', pinfo)
def LogHostMemoryUsage(top_n=10, level=logging.INFO):
if psutil.version_info < (2, 0):
logging.warning('psutil %s too old, upgrade to version 2.0 or higher'
' for memory usage information.', psutil.__version__)
return
# TODO(crbug.com/777865): Remove the following pylint disable. Even if we
# check for a recent enough psutil version above, the catapult presubmit
# builder (still running some old psutil) fails pylint checks due to API
# changes in psutil.
# pylint: disable=no-member
mem = psutil.virtual_memory()
logging.log(level, 'Used %s out of %s memory available.',
FormatBytes(mem.used), FormatBytes(mem.total))
logging.log(level, 'Memory usage of top %i processes groups', top_n)
pinfos_by_names = {}
for p in psutil.process_iter():
pinfo = _GetProcessInfo(p)
pname = pinfo['name']
if pname not in pinfos_by_names:
pinfos_by_names[pname] = {'name': pname, 'total_mem_rss': 0, 'pids': []}
pinfos_by_names[pname]['total_mem_rss'] += pinfo['mem_rss']
pinfos_by_names[pname]['pids'].append(str(pinfo['pid']))
sorted_pinfo_groups = heapq.nlargest(
top_n, pinfos_by_names.values(), key=lambda item: item['total_mem_rss'])
for group in sorted_pinfo_groups:
group['total_mem_rss_fmt'] = FormatBytes(group['total_mem_rss'])
group['pids_fmt'] = ', '.join(group['pids'])
logging.log(
level, '- %(name)s - %(total_mem_rss_fmt)s - pids: %(pids)s', group)
logging.log(level, 'Current process:')
pinfo = _GetProcessInfo(psutil.Process(os.getpid()))
_LogProcessInfo(pinfo, level)
def main():
logging.basicConfig(level=logging.INFO)
LogHostMemoryUsage()
if __name__ == '__main__':
sys.exit(main())
|
from . import equilibrium
from . import freeflame
from . import ignition
from . import minimal
from . import solution
|
import importlib
from parsel import Selector
def convert_html_to_selector(html):
return Selector(html)
class SelectorExtractor(object):
@staticmethod
def get_list_data(elements=None):
data_cleaned = []
data = elements.extract()
for i, datum in enumerate(data):
if datum:
data_cleaned.append(datum.strip())
return data_cleaned
@staticmethod
def get_single_data(elements=None):
data = elements.extract_first()
if data:
return data.strip()
return data
def transform_data(data=None, data_type=None):
fields = importlib.import_module("web_parsers.fields")
Klass = getattr(fields, data_type)
data = Klass(data=data).transform()
return data
def clean_data(elements=None, item_extractor=None):
"""
This is where are the extracted data will be cleaned up and applied functions and data types as needed.
:param elements:
:param item_extractor:
:return:
"""
# TODO - list is calculated
data_type = item_extractor.data_type
if data_type.startswith("List"):
multiple = True
else:
multiple = False
data_extractor = SelectorExtractor()
if multiple is True:
extracted_data = data_extractor.get_list_data(elements=elements)
else:
extracted_data = data_extractor.get_single_data(elements=elements)
data = transform_data(data=extracted_data, data_type=data_type)
return data
def extract_html_field(html_selector, item_extractor):
element_query = item_extractor.element_query
if item_extractor.data_attribute in ['text']:
if element_query.get("type") == 'css':
elements = html_selector.css("{0}::{1}".format(element_query.get('value'),
item_extractor.data_attribute))
return clean_data(elements=elements, item_extractor=item_extractor)
else:
elements = html_selector.xpath("{0}/{1}".format(element_query.get('value'),
item_extractor.data_attribute))
return clean_data(elements=elements, item_extractor=item_extractor)
elif item_extractor.data_attribute == 'html':
if element_query.get('type') == 'css':
elements = html_selector.css(element_query.get('value'))
return clean_data(elements=elements, item_extractor=item_extractor)
else:
elements = html_selector.xpath("{0}/{1}".format(element_query.get('value'),
item_extractor.data_attribute))
return clean_data(elements=elements, item_extractor=item_extractor)
else:
if element_query.get('type') == 'css':
elements = html_selector.css(element_query.get('value')) \
.xpath("@{0}".format(item_extractor.data_attribute))
return clean_data(elements=elements, item_extractor=item_extractor)
else:
elements = html_selector.xpath("{0}/{1}".format(element_query.get('value'),
item_extractor.data_attribute))
return clean_data(elements=elements, item_extractor=item_extractor)
|
#!/usr/bin/env python
"""Tests for the howfairis module.
"""
import random
import pytest
import requests
from howfairis import Checker
from howfairis import Repo
from howfairis import Compliance
def get_urls(n=None):
software_api_url = "https://www.research-software.nl/api/software?isPublished=true"
try:
response = requests.get(software_api_url)
# If the response was successful, no Exception will be raised
response.raise_for_status()
except requests.HTTPError:
print("Unable to retrieve the list of URLs")
return False
urls = []
for d in response.json():
for key, values in d["repositoryURLs"].items():
urls.extend(values)
if n is None:
return random.shuffle(urls)
else:
random.shuffle(urls)
return urls[:n]
@pytest.fixture(params=get_urls(5))
def url_fixture(request):
return request.param
def test_heavy_handed_testing_of_rsd_urls(url_fixture):
repo = Repo(url_fixture)
checker = Checker(repo)
compliance = checker.check_five_recommendations()
assert isinstance(compliance, Compliance)
|
import pandas as pd
import numpy as np
# TODO: cleanup this file, as it is almost unused in current solution (only for water and only small number
# of variables actually used)
N_Cls = 10
inDir = '../../data'
DF = pd.read_csv(inDir + '/train_wkt_v4.csv')
GS = pd.read_csv(inDir + '/grid_sizes.csv', names=['ImageId', 'Xmax', 'Ymin'], skiprows=1)
SB = pd.read_csv('sample_submission.csv')
smooth = 1e-12
scene_ids = ['6010', '6020', '6030', '6040', '6050', '6060',
'6070', '6080', '6090', '6100', '6110', '6120',
'6130', '6140', '6150', '6160', '6170', '6180']
train_ids = sorted(DF.ImageId.unique())
# Give short names, sensible colors and zorders to object types
CLASSES = {
0 : 'Bldg',
1 : 'Struct',
2 : 'Road',
3 : 'Track',
4 : 'Trees',
5 : 'Crops',
6 : 'Fast H20',
7 : 'Slow H20',
8 : 'Truck',
9 : 'Car',
10 : 'Background',
}
COLORS = {
1 : '#FFFDFD',
2 : '#FFFDFD',
3 : '#FFFDFD',
4 : '0.00',
5 : '#FFFDFD',
6 : '#FFFDFD',
7 : '#FFFDFD',
8 : '#FFFDFD',
9 : '#FFFDFD',
10: '#FFFDFD',
}
ZORDER = {
1 : 5,
2 : 5,
3 : 4,
4 : 1,
5 : 3,
6 : 2,
7 : 7,
8 : 8,
9 : 9,
10: 10,
}
# channels = 'all' 'three' other m p
channels = 'm'
channels_count = 8
weighting = 'random'
ISZ = 80
ISZ_mult = 3360 // ISZ # about 3350 = ISZ*ISZ_mult
batch_size = 128 # for bigger patch size may be even lower
learning_rate = 1e-4
total_runs = 100
CURRENT_CLASS = 3
val_patches = 2048
train_patches = 1024
aug = True
aug_a_lot = False
num_epochs = 2
load_weights = True
use_jaccard_loss = False
class_weights = [0, 0, 0, 1, 0, 0, 0, 0, 0, 0]
loss_weights = np.array([0.1, 0.1, 0.3, 9, 0.1, 0.1, 0.1, 0.1, 0.001, 0.001])
model_type = 'unet'
# model_type = 'fc-densenet'
# model_type = 'resnet-38'
# model_type = 'get_another_net'
# model_type = 'two_head'
weights_path = 'weights/{}_{}_{}_{}_{}_last'.format(model_type, CLASSES[CURRENT_CLASS], channels, channels_count, weighting)
# weights_path = 'weights/unet_10_last9_0.32'
trs = [0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5]
|
import os
from day_12 import parse_input, count_paths, count_paths_2
def test_part_1() -> None:
os.chdir(os.path.dirname(__file__))
with open("data/test_day_12_1.in") as input_file:
caves = parse_input(input_file)
assert count_paths(caves) == 10
with open("data/test_day_12_2.in") as input_file:
caves = parse_input(input_file)
assert count_paths(caves) == 19
with open("data/test_day_12_3.in") as input_file:
caves = parse_input(input_file)
assert count_paths(caves) == 226
def test_part_2() -> None:
os.chdir(os.path.dirname(__file__))
with open("data/test_day_12_1.in") as input_file:
caves = parse_input(input_file)
assert count_paths_2(caves) == 36
with open("data/test_day_12_2.in") as input_file:
caves = parse_input(input_file)
assert count_paths_2(caves) == 103
with open("data/test_day_12_3.in") as input_file:
caves = parse_input(input_file)
assert count_paths_2(caves) == 3509
|
from .analyzer import Analyzer
from .holdemai import HoldemAI
from .nn import NeuralNetwork
from .player import Player
from .playercontrol import PlayerControl, PlayerControlProxy
from .table import Table, TableProxy
from .teacher import Teacher, TeacherProxy
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
from future.utils import raise_from, string_types
from builtins import (bytes, str, open, super, range,
zip, round, input, int, pow, object)
import os
import shutil
import traceback
from termcolor import colored
import colorama
colorama.init()
import gslab_make.private.messages as messages
import gslab_make.private.metadata as metadata
from gslab_make.private.exceptionclasses import CritError, ColoredError
from gslab_make.private.utility import get_path, format_message, norm_path, open_yaml
def _check_os(osname = os.name):
"""Check OS is either POSIX or NT.
Parameters
----------
osname : str, optional
Name of OS. Defaults to ``os.name``.
Returns
-------
None
"""
if osname not in ['posix', 'nt']:
raise CritError(messages.crit_error_unknown_system % osname)
def update_executables(paths, osname = None):
""".. Update executable names using user configuration file.
Updates executable names with executables listed in file ``config_user``.
Note
----
Executable names are used by :ref:`program functions <program functions>`.
Parameters
----------
paths : dict
Dictionary of paths. Dictionary should contain values for all keys listed below.
osname : str, optional
Name of OS. Defaults to ``os.name``.
Path Keys
---------
config_user : str
Path of user configuration file.
Returns
-------
None
"""
osname = osname if osname else os.name # https://github.com/sphinx-doc/sphinx/issues/759
try:
config_user = get_path(paths, 'config_user')
config_user = open_yaml(config_user)
_check_os(osname)
if config_user['local']['executables']:
metadata.default_executables[osname].update(config_user['local']['executables'])
except:
error_message = 'Error with update_executables. Traceback can be found below.'
error_message = format_message(error_message)
raise_from(ColoredError(error_message, traceback.format_exc()), None)
def update_external_paths(paths):
""".. Update paths using user configuration file.
Updates dictionary ``paths`` with externals listed in file ``config_user``.
Note
----
The ``paths`` argument for :ref:`sourcing functions<sourcing functions>` is used not only to get
default paths for writing/logging, but also to
`string format <https://docs.python.org/3.4/library/string.html#format-string-syntax>`__
sourcing instructions.
Parameters
----------
paths : dict
Dictionary of paths to update.
Dictionary should ex-ante contain values for all keys listed below.
Path Keys
---------
config_user : str
Path of user configuration file.
Returns
-------
paths : dict
Dictionary of updated paths.
"""
try:
config_user = get_path(paths, 'config_user')
config_user = open_yaml(config_user)
if config_user['external']:
paths.update(config_user['external'])
return(paths)
except:
error_message = 'Error with update_external_paths. Traceback can be found below.'
error_message = format_message(error_message)
raise_from(ColoredError(error_message, traceback.format_exc()), None)
def update_paths(paths):
""".. Alias for ``update_external_paths()``
Parameters
----------
paths : dict
Dictionary of paths to update.
Dictionary should ex-ante contain values for all keys listed below.
Path Keys
---------
config_user : str
Path of user configuration file.
Returns
-------
paths : dict
Dictionary of updated paths.
"""
return update_external_paths(paths)
def update_internal_paths(paths):
""".. Update within-directory paths using default configuration file.
Returns dictionary ``paths`` with directory locations listed in file ``config``.
Parameters
----------
paths : dict
Dictionary of paths to update.
Dictionary should ex-ante contain values for all keys listed below.
Path Keys
---------
root : str
Path of project repo root
config : str
Path of user configuration file.
Returns
-------
paths : dict
Dictionary of paths.
"""
try:
config_default = get_path(paths, 'config')
config_default = open_yaml(config_default)
root = get_path(paths, 'root')
relative_paths = {path_label: os.path.join(root, path) for \
path_label, path in config_default['make_paths']['root_relative'].items()}
absolute_paths = config_default['make_paths']['absolute']
paths.update(relative_paths)
paths.update(absolute_paths)
return(paths)
except:
error_message = 'Error with update_external_paths. Traceback can be found below.'
error_message = format_message(error_message)
raise_from(ColoredError(error_message, traceback.format_exc()), None)
def copy_output(file, copy_dir):
""".. Copy output file.
Copies output ``file`` to directory ``copy_dir`` with user prompt to confirm copy.
Parameters
----------
file : str
Path of file to copy.
copy_dir : str
Directory to copy file.
Returns
-------
None
"""
file = norm_path(file)
copy_dir = norm_path(copy_dir)
message = colored(messages.warning_copy, color = 'cyan')
upload = input(message % (file, copy_dir))
if upload.lower().strip() == "yes":
shutil.copy(file, copy_dir)
__all__ = ['update_executables', 'update_external_paths', 'update_internal_paths', 'copy_output']
|
#!/usr/bin/env python3
"""
linkedin2username by initstring (github.com/initstring)
OSINT tool to discover likely usernames and email addresses for employees
of a given company on LinkedIn. This tool actually logs in with your valid
account in order to extract the most results.
"""
import os
import sys
import re
import time
import argparse
import getpass
from distutils.version import StrictVersion
import urllib.parse
import requests
########## BEGIN GLOBAL DECLARATIONS ##########
CURRENT_REL = '0.20'
BANNER = r"""
.__ .__________
| | |__\_____ \ __ __
| | | |/ ____/| | \
| |_| / \| | /
|____/__\_______ \____/
linkedin2username
Spray away.
github.com/initstring
"""
# The dictionary below is a best-effort attempt to spread a search load
# across sets of geographic locations. This can bypass the 1000 result
# search limit as we are now allowed 1000 per geo set.
# developer.linkedin.com/docs/v1/companies/targeting-company-shares#additionalcodes
GEO_REGIONS = {
'r0':'us:0',
'r1':'ca:0',
'r2':'gb:0',
'r3':'au:0|nz:0',
'r4':'cn:0|hk:0',
'r5':'jp:0|kr:0|my:0|np:0|ph:0|sg:0|lk:0|tw:0|th:0|vn:0',
'r6':'in:0',
'r7':'at:0|be:0|bg:0|hr:0|cz:0|dk:0|fi:0',
'r8':'fr:0|de:0',
'r9':'gr:0|hu:0|ie:0|it:0|lt:0|nl:0|no:0|pl:0|pt:0',
'r10':'ro:0|ru:0|rs:0|sk:0|es:0|se:0|ch:0|tr:0|ua:0',
'r11':('ar:0|bo:0|br:0|cl:0|co:0|cr:0|do:0|ec:0|gt:0|mx:0|pa:0|pe:0'
'|pr:0|tt:0|uy:0|ve:0'),
'r12':'af:0|bh:0|il:0|jo:0|kw:0|pk:0|qa:0|sa:0|ae:0'}
########## END GLOBAL DECLARATIONS ##########
if sys.version_info < (3, 0):
print("\nSorry mate, you'll need to use Python 3+ on this one...\n")
sys.exit(1)
class PC:
"""PC (Print Color)
Used to generate some colorful, relevant, nicely formatted status messages.
"""
green = '\033[92m'
blue = '\033[94m'
orange = '\033[93m'
endc = '\033[0m'
ok_box = blue + '[*] ' + endc
note_box = green + '[+] ' + endc
warn_box = orange + '[!] ' + endc
def parse_arguments():
"""
Handle user-supplied arguments
"""
desc = ('OSINT tool to generate lists of probable usernames from a'
' given company\'s LinkedIn page. This tool may break when'
' LinkedIn changes their site. Please open issues on GitHub'
' to report any inconsistencies, and they will be quickly fixed.')
parser = argparse.ArgumentParser(description=desc)
parser.add_argument('-u', '--username', type=str, action='store',
required=True,
help='A valid LinkedIn username.')
parser.add_argument('-c', '--company', type=str, action='store',
required=True,
help='Company name exactly as typed in the company '
'linkedin profile page URL.')
parser.add_argument('-p', '--password', type=str, action='store',
help='Specify your password in clear-text on the '
'command line. If not specified, will prompt and '
'obfuscate as you type.')
parser.add_argument('-n', '--domain', type=str, action='store',
default='',
help='Append a domain name to username output. '
'[example: "-n uber.com" would output jschmoe@uber.com]'
)
parser.add_argument('-d', '--depth', type=int, action='store',
default=False,
help='Search depth (how many loops of 25). If unset, '
'will try to grab them all.')
parser.add_argument('-s', '--sleep', type=int, action='store', default=0,
help='Seconds to sleep between search loops.'
' Defaults to 0.')
parser.add_argument('-x', '--proxy', type=str, action='store',
default=False,
help='Proxy server to use. WARNING: WILL DISABLE SSL '
'VERIFICATION. [example: "-p https://localhost:8080"]')
parser.add_argument('-k', '--keywords', type=str, action='store',
default=False,
help='Filter results by a a list of command separated '
'keywords. Will do a separate loop for each keyword, '
'potentially bypassing the 1,000 record limit. '
'[example: "-k \'sales,human resources,information '
'technology\']')
parser.add_argument('-g', '--geoblast', default=False, action="store_true",
help='Attempts to bypass the 1,000 record search limit'
' by running multiple searches split across geographic'
' regions.')
args = parser.parse_args()
# Proxy argument is fed to requests as a dictionary, setting this now:
args.proxy_dict = {"https" : args.proxy}
# If appending an email address, preparing this string now:
if args.domain:
args.domain = '@' + args.domain
# Keywords are fed in as a list. Splitting comma-separated user input now:
if args.keywords:
args.keywords = args.keywords.split(',')
# These two functions are not currently compatible, squashing this now:
if args.keywords and args.geoblast:
print("Sorry, keywords and geoblast are currently not compatible. "
"Use one or the other.")
sys.exit()
# If password is not passed in the command line, prompt for it
# in a more secure fashion (not shown on screen)
args.password = args.password or getpass.getpass()
return args
def check_li2u_version():
"""Checks GitHub for a new version
Uses a simple regex to look at the 'releases' page on GitHub. Extracts the
First tag found and assumes it is the latest. Compares with the global
variable CURRENT_TAG and informs if a new version is available.
"""
latest_rel_regex = r'/initstring/linkedin2username/tree/(.*?)"'
session = requests.session()
rel_url = 'https://github.com/initstring/linkedin2username/releases'
rel_chars = re.compile(r'[^0-9\.]')
# Scrape the page and grab the regex.
response = session.get(rel_url)
latest_rel = re.findall(latest_rel_regex, response.text)
# Remove characters from tag name that will mess up version comparison.
# Also just continue if we can't find the tags - we don't want that small
# function to break the entire app.
if latest_rel[0]:
latest_rel = rel_chars.sub('', latest_rel[0])
else:
return
# Check the tag found with the one defined in this script.
if CURRENT_REL == latest_rel:
print("")
print(PC.ok_box + "Using version {}, which is the latest on"
" GitHub.\n".format(CURRENT_REL))
return
if StrictVersion(CURRENT_REL) > StrictVersion(latest_rel):
print("")
print(PC.warn_box + "Using version {}, which is NEWER than {}, the"
" latest official release. Good luck!\n"
.format(CURRENT_REL, latest_rel))
return
if StrictVersion(CURRENT_REL) < StrictVersion(latest_rel):
print("")
print(PC.warn_box + "You are using {}, but {} is available.\n"
" LinkedIn changes often - this version may not work.\n"
" https://github.com/initstring/linkedin2username.\n"
.format(CURRENT_REL, latest_rel))
return
def login(args):
"""Creates a new authenticated session.
Note that a mobile user agent is used. Parsing using the desktop results
proved extremely difficult, as shared connections would be returned in
a manner that was indistinguishable from the desired targets.
The other header matters as well, otherwise advanced search functions
(region and keyword) will not work.
The function will check for common failure scenarios - the most common is
logging in from a new location. Accounts using multi-factor auth are not
yet supported and will produce an error.
"""
session = requests.session()
# Special options below when using a proxy server. Helpful for debugging
# the application in Burp Suite.
if args.proxy:
print(PC.warn_box + "Using a proxy, ignoring SSL errors."
" Don't get pwned.")
session.verify = False
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
session.proxies.update(args.proxy_dict)
# Our search and regex will work only with a mobile user agent and
# the correct REST protocol specified below.
mobile_agent = ('Mozilla/5.0 (Linux; U; Android 4.4.2; en-us; SCH-I535 '
'Build/KOT49H) AppleWebKit/534.30 (KHTML, like Gecko) '
'Version/4.0 Mobile Safari/534.30')
session.headers.update({'User-Agent': mobile_agent,
'X-RestLi-Protocol-Version': '2.0.0'})
# We wll grab an anonymous response to look for the CSRF token, which
# is required for our logon attempt.
anon_response = session.get('https://www.linkedin.com/login')
login_csrf = re.findall(r'name="loginCsrfParam" value="(.*?)"',
anon_response.text)
if login_csrf:
login_csrf = login_csrf[0]
else:
print("Having trouble loading login page... try the command again.")
sys.exit()
# Define the data we will POST for our login.
auth_payload = {
'session_key': args.username,
'session_password': args.password,
'isJsEnabled': 'false',
'loginCsrfParam': login_csrf
}
# Perform the actual login. We disable redirects as we will use that 302
# as an indicator of a successful logon.
response = session.post('https://www.linkedin.com/checkpoint/lg/login-submit'
'?loginSubmitSource=GUEST_HOME',
data=auth_payload, allow_redirects=False)
# Define a successful login by the 302 redirect to the 'feed' page. Try
# to detect some other common logon failures and alert the user.
if response.status_code == 302 or response.status_code == 303:
redirect = response.headers['Location']
if 'feed' in redirect:
print(PC.ok_box + "Successfully logged in.\n")
return session
if 'challenge' in redirect:
print(PC.warn_box + "LinkedIn doesn't like something about this"
" login. Maybe you're being sneaky on a VPN or something."
" You may get an email with a verification token. You can"
" ignore the email. Log in from a web browser and try"
" again.\n")
return False
if 'captcha' in redirect:
print(PC.warn_box + "You've triggered a CAPTCHA. Oops. Try logging"
" in with your web browser first and come back later.")
return False
if 'add-phone' in redirect:
print(PC.warn_box + "LinkedIn is prompting to add your phone"
" number to your profile. Please handle that in the web and"
" then try again.")
return False
if 'manage-account' in redirect:
print(PC.warn_box + "LinkedIn has some account notification for you"
" to check. Please log in first via the web and clear that.")
return False
if 'add-email' in redirect:
print(PC.warn_box + "LinkedIn wants you to add an email address to"
" your account. Log in via the web first and do that.")
return False
# The below will detect some 302 that I don't yet know about.
print(PC.warn_box + "Some unknown redirection occurred. If this"
" persists, please open an issue on github wih the DEBUG"
" message below:\n")
print("DEBUG INFO:")
print("LOCATION: {}".format(redirect))
print("RESPONSE TEXT:\n{}".format(response.text))
return False
# A failed logon doesn't generate a 302 at all, but simply responds with
# the logon page. We detect this here.
if '<title>LinkedIn Login' in response.text:
print(PC.warn_box + "You've been returned to a login page. Check your"
" username and password and try again.\n")
return False
# If we make it past everything above, we have no idea what happened.
# Oh well, we fail.
print(PC.warn_box + "Some unknown error logging in. If this persists,"
"please open an issue on github.\n")
print("DEBUG INFO:")
print("RESPONSE CODE: {}".format(response.status_code))
print("RESPONSE TEXT:\n{}".format(response.text))
return False
def set_search_csrf(session):
"""Extract the required CSRF token.
LinkedIn's search function requires a CSRF token equal to the JSESSIONID.
"""
csrf_token = session.cookies['JSESSIONID'].replace('"', '')
session.headers.update({'Csrf-Token': csrf_token})
return session
def get_company_info(name, session):
"""Scrapes basic company info.
Note that not all companies fill in this info, so exceptions are provided.
The company name can be found easily by browsing LinkedIn in a web browser,
searching for the company, and looking at the name in the address bar.
"""
# The following regexes may be moving targets, I will try to keep them up
# to date. If you have issues with these, please open a ticket on GitHub.
# Thanks!
website_regex = r'companyPageUrl":"(http.*?)"'
staff_regex = r'staffCount":([0-9]+),'
id_regex = r'"objectUrn":"urn:li:company:([0-9]+)"'
desc_regex = r'tagline":"(.*?)"'
escaped_name = urllib.parse.quote_plus(name)
response = session.get(('https://www.linkedin.com'
'/voyager/api/organization/companies?'
'q=universalName&universalName=' + escaped_name))
# Some geo regions are being fed a 'lite' version of LinkedIn mobile:
# https://bit.ly/2vGcft0
# The following bit is a temporary fix until I can figure out a
# low-maintenance solution that is inclusive of these areas.
if 'mwlite' in response.text:
print(PC.warn_box + "You are being served the 'lite' version of"
" LinkedIn (https://bit.ly/2vGcft0) that is not yet supported"
" by this tool. Please try again using a VPN exiting from USA,"
" EU, or Australia.")
print(" A permanent fix is being researched. Sorry about that!")
sys.exit()
# Will search for the company ID in the response. If not found, the
# program cannot succeed and must exit.
found_id = re.findall(id_regex, response.text)
if not found_id:
print(PC.warn_box + "Could not find that company name. Please"
" double-check LinkedIn and try again.")
sys.exit()
# Below we will try to scrape metadata on the company. If not found, will
# set generic strings as warnings.
found_desc = re.findall(desc_regex, response.text)
if not found_desc:
found_desc = ["NOT FOUND"]
found_staff = re.findall(staff_regex, response.text)
if not found_staff:
found_staff = ["RegEx issues, please open a ticket on GitHub!"]
found_website = re.findall(website_regex, response.text)
if not found_website:
found_website = ["RegEx issues, please open a ticket on GitHub!"]
print(" ID: " + found_id[0])
print(" Alias: " + name)
print(" Desc: " + found_desc[0])
print(" Staff: " + str(found_staff[0]))
print(" URL: " + found_website[0])
print("\n" + PC.ok_box + "Hopefully that's the right {}! If not,"
"double-check LinkedIn and try again.\n".format(name))
return(found_id[0], int(found_staff[0]))
def set_loops(staff_count, args):
"""Defines total hits to the search API.
Sets a maximum amount of loops based on either the number of staff
discovered in the get_company_info function or the search depth argument
provided by the user. This limit is PER SEARCH, meaning it may be
exceeded if you use the geoblast or keyword feature.
Loops may stop early if no more matches are found or if a single search
exceeds LinkedIn's 1000 non-commercial use limit.
"""
# We will look for 25 names on each loop. So, we set a maximum amount of
# loops to the amount of staff / 25 +1 more to catch remainders.
loops = int((staff_count / 25) + 1)
print(PC.ok_box + "Company has {} profiles to check. Some may be"
" anonymous.".format(staff_count))
# The lines below attempt to detect large result sets and compare that
# with the command line arguments passed. The goal is to warn when you
# may not get all the results and to suggest ways to get more.
if staff_count > 1000 and not args.geoblast and not args.keywords:
print(PC.warn_box + "Note: LinkedIn limits us to a maximum of 1000"
" results!\n"
" Try the --geoblast or --keywords parameter to bypass")
elif staff_count < 1000 and args.geoblast:
print(PC.warn_box + "Geoblast is not necessary, as this company has"
" less than 1,000 staff. Disabling.")
args.geoblast = False
elif staff_count > 1000 and args.geoblast:
print(PC.ok_box + "High staff count, geoblast is enabled. Let's rock.")
elif staff_count > 1000 and args.keywords:
print(PC.ok_box + "High staff count, using keywords. Hope you picked"
" some good ones.")
# If the user purposely restricted the search depth, they probably know
# what they are doing, but we warn them just in case.
if args.depth and args.depth < loops:
print(PC.warn_box + "You defined a low custom search depth, so we"
" might not get them all.")
else:
print(PC.ok_box + "Setting each iteration to a maximum of {} loops of"
" 25 results each.".format(loops))
args.depth = loops
print("\n\n")
return args
def get_results(session, company_id, page, region, keyword):
"""Scrapes raw data for processing.
The URL below is what the LinkedIn mobile HTTP site queries when manually
scrolling through search results.
The mobile site defaults to using a 'count' of 10, but testing shows that
25 is allowed. This behavior will appear to the web server as someone
scrolling quickly through all available results.
"""
# When using the --geoblast feature, we need to inject our set of region
# codes into the search parameter.
if region:
region = re.sub(':', '%3A', region) # must URL encode this parameter
# Build the base search URL.
url = ('https://www.linkedin.com'
'/voyager/api/search/hits'
'?facetCurrentCompany=List({})'
'&facetGeoRegion=List({})'
'&keywords=List({})'
'&q=people&maxFacetValues=15'
'&supportedFacets=List(GEO_REGION,CURRENT_COMPANY)'
'&count=25'
'&origin=organization'
'&start={}'
.format(company_id, region, keyword, page * 25))
# Perform the search for this iteration.
result = session.get(url)
return result.text
def scrape_info(session, company_id, staff_count, args):
"""Uses regexes to extract employee names.
The data returned is similar to JSON, but not always formatted properly.
The regex queries below will build individual lists of first and last
names. Every search tested returns an even number of each, so we can safely
match the two lists together to get full names.
Has the concept of inner an outer loops. Outerloops come into play when
using --keywords or --geoblast, both which attempt to bypass the 1,000
record search limit.
This function will stop searching if a loop returns 0 new names.
"""
full_name_list = []
print(PC.ok_box + "Starting search....\n")
# We pass the full 'args' below as we need to define a few variables from
# there - the loops as well as potentially disabling features that are
# deemed unnecessary due to small result sets.
args = set_loops(staff_count, args)
# If we are using geoblast or keywords, we need to define a numer of
# "outer_loops". An outer loop will be a normal LinkedIn search, maxing
# out at 1000 results.
if args.geoblast:
outer_loops = range(0, len(GEO_REGIONS))
elif args.keywords:
outer_loops = range(0, len(args.keywords))
else:
outer_loops = range(0, 1)
# Crafting the right URL is a bit tricky, so currently unnecessary
# parameters are still being included but set to empty. You will see this
# below with geoblast and keywords.
for current_loop in outer_loops:
if args.geoblast:
region_name = 'r' + str(current_loop)
current_region = GEO_REGIONS[region_name]
current_keyword = ''
print("\n" + PC.ok_box + "Looping through region {}"
.format(current_region))
elif args.keywords:
current_keyword = args.keywords[current_loop]
current_region = ''
print("\n" + PC.ok_box + "Looping through keyword {}"
.format(current_keyword))
else:
current_region = ''
current_keyword = ''
## This is the inner loop. It will search results 25 at a time.
for page in range(0, args.depth):
new_names = 0
sys.stdout.flush()
sys.stdout.write(PC.ok_box + "Scraping results on loop "
+ str(page+1) + "... ")
result = get_results(session, company_id, page, current_region,
current_keyword)
first_name = re.findall(r'"firstName":"(.*?)"', result)
last_name = re.findall(r'"lastName":"(.*?)"', result)
# Commercial Search Limit might be triggered
if "UPSELL_LIMIT" in result:
sys.stdout.write('\n')
print(PC.warn_box + "You've hit the commercial search limit!"
" Try again on the 1st of the month. Sorry. :(")
break
# If the list of names is empty for a page, we assume that
# there are no more search results. Either you got them all or
# you are not connected enough to get them all.
if not first_name and not last_name:
sys.stdout.write('\n')
print(PC.ok_box + "We have hit the end of the road!"
" Moving on...")
break
# re.findall puts all first names and all last names in a list.
# They are ordered, so the pairs should correspond with each other.
# We parse through them all here, and see which ones are new to us.
for first, last in zip(first_name, last_name):
full_name = first + ' ' + last
if full_name not in full_name_list:
full_name_list.append(full_name)
new_names += 1
sys.stdout.write(" " + PC.ok_box + "Added " + str(new_names) +
" new names. Running total: "\
+ str(len(full_name_list)) + " \r")
# If the user has defined a sleep between loops, we take a little
# nap here.
time.sleep(args.sleep)
return full_name_list
def remove_accents(raw_text):
"""Removes common accent characters.
Our goal is to brute force login mechanisms, and I work primary with
companies deploying English-language systems. From my experience, user
accounts tend to be created without special accented characters. This
function tries to swap those out for standard English alphabet.
"""
raw_text = re.sub(u"[àáâãäå]", 'a', raw_text)
raw_text = re.sub(u"[èéêë]", 'e', raw_text)
raw_text = re.sub(u"[ìíîï]", 'i', raw_text)
raw_text = re.sub(u"[òóôõö]", 'o', raw_text)
raw_text = re.sub(u"[ùúûü]", 'u', raw_text)
raw_text = re.sub(u"[ýÿ]", 'y', raw_text)
raw_text = re.sub(u"[ß]", 'ss', raw_text)
raw_text = re.sub(u"[ñ]", 'n', raw_text)
return raw_text
def clean(raw_list):
"""Removes common punctuation.
LinkedIn users tend to add credentials to their names to look special.
This function is based on what I have seen in large searches, and attempts
to remove them.
"""
clean_list = []
allowed_chars = re.compile('[^a-zA-Z -]')
for name in raw_list:
# Try to transform non-English characters below.
name = remove_accents(name)
# The line below basically trashes anything weird left over.
# A lot of users have funny things in their names, like () or ''
# People like to feel special, I guess.
name = allowed_chars.sub('', name)
# Lower-case everything to make it easier to de-duplicate.
name = name.lower()
# The line below tries to consolidate white space between words
# and get rid of leading/trailing spaces.
name = re.sub(r'\s+', ' ', name).strip()
# If what is left is non-empty and unique, we add it to the list.
if name and name not in clean_list:
clean_list.append(name)
return clean_list
def write_files(company, domain, name_list):
"""Writes data to various formatted output files.
After scraping and processing is complete, this function formats the raw
names into common username formats and writes them into a directory called
'li2u-output'.
See in-line comments for decisions made on handling special cases.
"""
# Check for and create an output directory to store the files.
out_dir = 'li2u-output'
if not os.path.exists(out_dir):
os.makedirs(out_dir)
# Define all the files names we will be creating.
files = {}
files['rawnames'] = open(out_dir + '/' + company + '-rawnames.txt', 'w')
files['flast'] = open(out_dir + '/' + company + '-flast.txt', 'w')
files['firstl'] = open(out_dir + '/' + company + '-firstl.txt', 'w')
files['firstlast'] = open(out_dir + '/' + company + '-first.last.txt', 'w')
files['fonly'] = open(out_dir + '/' + company + '-first.txt', 'w')
files['lastf'] = open(out_dir + '/' + company + '-lastf.txt', 'w')
# First, write all the raw names to a file.
for name in name_list:
files['rawnames'].write(name + '\n')
# Split the name on spaces and hyphens:
parse = re.split(' |-', name)
# Users with hyphenated or multiple last names could have several
# variations on the username. For a best-effort, we will try using
# one or the other, but not both. Users with more than three names
# will be truncated down, assuming the second of four is a middle
# name.
try:
if len(parse) > 2: # for users with more than one last name.
first, second, third = parse[0], parse[-2], parse[-1]
files['flast'].write(first[0] + second + domain + '\n')
files['flast'].write(first[0] + third + domain + '\n')
files['lastf'].write(second + first[0] + domain + '\n')
files['lastf'].write(third + first[0] + domain + '\n')
files['firstlast'].write(first + '.' + second + domain + '\n')
files['firstlast'].write(first + '.' + third + domain + '\n')
files['firstl'].write(first + second[0] + domain + '\n')
files['firstl'].write(first + third[0] + domain + '\n')
files['fonly'].write(first + domain + '\n')
else: # for users with only one last name
first, last = parse[0], parse[-1]
files['flast'].write(first[0] + last + domain + '\n')
files['lastf'].write(last + first[0] + domain + '\n')
files['firstlast'].write(first + '.' + last + domain + '\n')
files['firstl'].write(first + last[0] + domain + '\n')
files['fonly'].write(first + domain + '\n')
# The exception below will try to weed out string processing errors
# I've made in other parts of the program.
except IndexError:
print(PC.warn_box + "Struggled with this tricky name: '{}'."
.format(name))
# Cleanly close all the files.
for file_name in files:
files[file_name].close()
def main():
"""Main Function"""
print(BANNER + "\n\n\n")
args = parse_arguments()
# Check the version
check_li2u_version()
# Instantiate a session by logging in to LinkedIn.
session = login(args)
# If we can't get a valid session, we quit now. Specific errors are
# printed to the console inside the login() function.
if not session:
sys.exit()
# Prepare and execute the searches.
session = set_search_csrf(session)
company_id, staff_count = get_company_info(args.company, session)
found_names = scrape_info(session, company_id, staff_count, args)
# Clean up all the data.
clean_list = clean(found_names)
# Write the data to some files.
write_files(args.company, args.domain, clean_list)
# Time to get hacking.
print("\n\n" + PC.ok_box + "All done! Check out your lovely new files in"
"the li2u-output directory.")
if __name__ == "__main__":
main()
|
import enum
import typing
import warnings
import attr
from uecp.commands.base import (
UECPCommand,
UECPCommandDecodeElementCodeMismatchError,
UECPCommandDecodeNotEnoughData,
UECPCommandException,
)
from uecp.commands.mixins import UECPCommandDSNnPSN
# PIN 0x06 / Programme Item Number not implemented as deprecated
# MS 0x05 / Music/Speech flag deprecated
class InvalidProgrammeIdentification(UECPCommandException):
pass
@UECPCommand.register_type
class ProgrammeIdentificationSetCommand(UECPCommand, UECPCommandDSNnPSN):
ELEMENT_CODE = 0x01
def __init__(self, pi=0, data_set_number=0, programme_service_number=0):
super().__init__(
data_set_number=data_set_number,
programme_service_number=programme_service_number,
)
self.__pi = 0
self.pi = pi
def encode(self) -> list[int]:
return [
self.ELEMENT_CODE,
self.data_set_number,
self.programme_service_number,
self.__pi >> 8,
self.__pi & 0xFF,
]
@classmethod
def create_from(
cls, data: typing.Union[bytes, list[int]]
) -> tuple["ProgrammeIdentificationSetCommand", int]:
data = list(data)
if len(data) < 5:
raise UECPCommandDecodeNotEnoughData(len(data), 5)
mec, dsn, psn, pi_msb, pi_lsb = data[0:5]
if mec != cls.ELEMENT_CODE:
raise UECPCommandDecodeElementCodeMismatchError(mec, cls.ELEMENT_CODE)
pi = pi_msb << 8 | pi_lsb
return cls(pi=pi, data_set_number=dsn, programme_service_number=psn), 5
@property
def pi(self) -> int:
return self.__pi
@pi.setter
def pi(self, new_pi: int):
try:
if new_pi == int(new_pi):
new_pi = int(new_pi)
else:
raise ValueError()
except ValueError:
raise InvalidProgrammeIdentification(new_pi)
if not (0x0 <= new_pi <= 0xFFFF):
raise InvalidProgrammeIdentification(new_pi)
self.__pi = new_pi
class InvalidProgrammeServiceName(UECPCommandException):
def __init__(self, programme_service_name, cause: str = "unknown"):
self.programme_service_name = programme_service_name
self.cause = str(cause)
def __repr__(self) -> str:
return f"{self.__class__.__name__}(programme_service_name={self.programme_service_name!r}, cause={self.cause!r})"
def __str__(self):
return f"Supplied an invalid value for programme service name, cause: {self.cause}. Supplied {self.programme_service_name!r}"
@UECPCommand.register_type
class ProgrammeServiceNameSetCommand(UECPCommand, UECPCommandDSNnPSN):
ELEMENT_CODE = 0x02
def __init__(self, ps: str = "", data_set_number=0, programme_service_number=0):
super().__init__(
data_set_number=data_set_number,
programme_service_number=programme_service_number,
)
self.__ps = ""
self.ps = ps
@property
def ps(self) -> str:
return self.__ps
@ps.setter
def ps(self, new_ps: str):
new_ps = str(new_ps)
if len(new_ps) > 8:
raise InvalidProgrammeServiceName(new_ps, f"PS supports only 8 characters")
new_ps = new_ps.rstrip(" ")
try:
new_ps.encode("basic_rds_character_set")
except ValueError as e:
raise InvalidProgrammeServiceName(
new_ps, f"PS cannot be encoded, exc={e!r}"
)
self.__ps = new_ps
def encode(self) -> list[int]:
return [
self.ELEMENT_CODE,
self.data_set_number,
self.programme_service_number,
] + list(self.__ps.ljust(8).encode("basic_rds_character_set"))
@classmethod
def create_from(
cls, data: typing.Union[bytes, list[int]]
) -> tuple["ProgrammeServiceNameSetCommand", int]:
data = list(data)
if len(data) < 11:
raise UECPCommandDecodeNotEnoughData(len(data), 11)
mec, dsn, psn = data[0:3]
if mec != cls.ELEMENT_CODE:
raise UECPCommandDecodeElementCodeMismatchError(mec, cls.ELEMENT_CODE)
ps = bytes(data[3:11]).decode("basic_rds_character_set")
return cls(ps=ps, data_set_number=dsn, programme_service_number=psn), 11
@UECPCommand.register_type
class DecoderInformationSetCommand(UECPCommand, UECPCommandDSNnPSN):
ELEMENT_CODE = 0x04
def __init__(
self,
stereo=False,
dynamic_pty=False,
data_set_number=0,
programme_service_number=0,
):
super().__init__(
data_set_number=data_set_number,
programme_service_number=programme_service_number,
)
self.__stereo = bool(stereo)
self.__dynamic_pty = bool(dynamic_pty)
# artificial head & compressed deprecated; stereo / mono flagged to be checked
@property
def stereo(self) -> bool:
return self.__stereo
@stereo.setter
def stereo(self, enabled: bool):
self.__stereo = bool(enabled)
@property
def mono(self) -> bool:
return not self.__stereo
@mono.setter
def mono(self, enabled: bool):
self.__stereo = not bool(enabled)
@property
def dynamic_pty(self) -> bool:
return self.__dynamic_pty
@dynamic_pty.setter
def dynamic_pty(self, enabled: bool):
self.__dynamic_pty = bool(enabled)
def encode(self) -> list[int]:
return [
self.ELEMENT_CODE,
self.data_set_number,
self.programme_service_number,
self.__dynamic_pty << 3 | self.__stereo,
]
@classmethod
def create_from(
cls, data: typing.Union[bytes, list[int]]
) -> tuple["DecoderInformationSetCommand", int]:
data = list(data)
if len(data) < 4:
raise UECPCommandDecodeNotEnoughData(len(data), 4)
mec, dsn, psn, flags = data[0:4]
if mec != cls.ELEMENT_CODE:
raise UECPCommandDecodeElementCodeMismatchError(mec, cls.ELEMENT_CODE)
stereo = 0b1 & flags
dynamic_pty = 0b1000 & flags
return (
cls(
stereo=stereo,
dynamic_pty=dynamic_pty,
data_set_number=dsn,
programme_service_number=psn,
),
4,
)
@UECPCommand.register_type
class TrafficAnnouncementProgrammeSetCommand(UECPCommand, UECPCommandDSNnPSN):
ELEMENT_CODE = 0x03
def __init__(
self,
announcement=False,
programme=False,
data_set_number=0,
programme_service_number=0,
):
super().__init__(
data_set_number=data_set_number,
programme_service_number=programme_service_number,
)
self.__announcement = bool(announcement)
self.__programme = bool(programme)
@property
def announcement(self) -> bool:
return self.__announcement
@announcement.setter
def announcement(self, enabled: bool):
self.__announcement = bool(enabled)
@property
def programme(self):
return self.__programme
@programme.setter
def programme(self, enabled: bool):
self.__programme = bool(enabled)
def encode(self) -> list[int]:
return [
self.ELEMENT_CODE,
self.data_set_number,
self.programme_service_number,
self.__programme << 1 | self.__announcement,
]
@classmethod
def create_from(
cls, data: typing.Union[bytes, list[int]]
) -> tuple["TrafficAnnouncementProgrammeSetCommand", int]:
data = list(data)
if len(data) < 4:
raise UECPCommandDecodeNotEnoughData(len(data), 4)
mec, dsn, psn, flags = data[0:4]
if mec != cls.ELEMENT_CODE:
raise UECPCommandDecodeElementCodeMismatchError(mec, cls.ELEMENT_CODE)
announcement = 0b1 & flags
programme = 0b10 & flags
return (
cls(
announcement=announcement,
programme=programme,
data_set_number=dsn,
programme_service_number=psn,
),
4,
)
@enum.unique
class ProgrammeType(enum.IntEnum):
UNDEFINED = 0
NEWS = 1
CURRENT_AFFAIRS = 2
INFORMATION = 3
SPORT = 4
EDUCATION = 5
DRAMA = 6
CULTURE = 7
SCIENCE = 8
VARIED = 9
POP_MUSIC = 10
ROCK_MUSIC = 11
EASY_LISTENING_MUSIC = 12
LIGHT_CLASSICAL = 13
SERIOUS_CLASSICAL = 14
OTHER_MUSIC = 15
WEATHER = 16
FINANCE = 17
CHILDREN_PROGRAMME = 18
SOCIAL_AFFAIRS = 19
RELIGION = 20
PHONE_IN = 21
TRAVEL = 22
LEISURE = 23
JAZZ_MUSIC = 24
COUNTRY_MUSIC = 25
NATIONAL_MUSIC = 26
OLDIES_MUSIC = 27
FOLK_MUSIC = 28
DOCUMENTARY = 29
ALARM_TEST = 30
ALARM = 31
@UECPCommand.register_type
class ProgrammeTypeSetCommand(UECPCommand, UECPCommandDSNnPSN):
ELEMENT_CODE = 0x07
def __init__(
self,
programme_type: typing.Union[ProgrammeType, int] = ProgrammeType.UNDEFINED,
data_set_number=0,
programme_service_number=0,
):
super().__init__(
data_set_number=data_set_number,
programme_service_number=programme_service_number,
)
self.__programme_type = ProgrammeType(programme_type)
@property
def programme_type(self) -> ProgrammeType:
return self.__programme_type
@programme_type.setter
def programme_type(self, new_programme_type: typing.Union[int, ProgrammeType]):
self.__programme_type = ProgrammeType(new_programme_type)
def encode(self) -> list[int]:
return [
self.ELEMENT_CODE,
self.data_set_number,
self.programme_service_number,
int(self.__programme_type),
]
@classmethod
def create_from(
cls, data: typing.Union[bytes, list[int]]
) -> tuple["ProgrammeTypeSetCommand", int]:
data = list(data)
if len(data) < 4:
raise UECPCommandDecodeNotEnoughData(len(data), 4)
mec, dsn, psn, programme_type = data[0:4]
if mec != cls.ELEMENT_CODE:
raise UECPCommandDecodeElementCodeMismatchError(mec, cls.ELEMENT_CODE)
programme_type = ProgrammeType(programme_type)
return (
cls(
programme_type=programme_type,
data_set_number=dsn,
programme_service_number=psn,
),
4,
)
class InvalidProgrammeTypeName(UECPCommandException):
def __init__(self, programme_type_name, cause: str = "unknown"):
self.programme_type_name = programme_type_name
self.cause = str(cause)
def __repr__(self) -> str:
return f"{self.__class__.__name__}(programme_type_name={self.programme_type_name!r}, cause={self.cause!r})"
def __str__(self):
return f"Supplied an invalid value for programme type name, cause: {self.cause}. Supplied {self.programme_type_name!r}"
@UECPCommand.register_type
class ProgrammeTypeNameSetCommand(UECPCommand, UECPCommandDSNnPSN):
ELEMENT_CODE = 0x3E
def __init__(
self, programme_type_name="", data_set_number=0, programme_service_number=0
):
super().__init__(
data_set_number=data_set_number,
programme_service_number=programme_service_number,
)
self.__programme_type_name = ""
self.programme_type_name = programme_type_name
@property
def programme_type_name(self) -> str:
return self.__programme_type_name
@programme_type_name.setter
def programme_type_name(self, new_programme_type_name: str):
new_programme_type_name = str(new_programme_type_name)
if len(new_programme_type_name) > 8:
raise InvalidProgrammeTypeName(
new_programme_type_name, f"PTYN supports only 8 characters"
)
new_programme_type_name = new_programme_type_name.rstrip(" ")
try:
new_programme_type_name.encode("basic_rds_character_set")
except ValueError as e:
raise InvalidProgrammeServiceName(
new_programme_type_name, f"PTYN cannot be encoded, exc={e!r}"
)
self.__programme_type_name = new_programme_type_name
def encode(self) -> list[int]:
return [
self.ELEMENT_CODE,
self.data_set_number,
self.programme_service_number,
] + list(self.__programme_type_name.ljust(8).encode("basic_rds_character_set"))
@classmethod
def create_from(
cls, data: typing.Union[bytes, list[int]]
) -> tuple["ProgrammeTypeNameSetCommand", int]:
data = list(data)
if len(data) < 11:
raise UECPCommandDecodeNotEnoughData(len(data), 11)
mec, dsn, psn = data[0:3]
if mec != cls.ELEMENT_CODE:
raise UECPCommandDecodeElementCodeMismatchError(mec, cls.ELEMENT_CODE)
programme_type_name = bytes(data[3:11]).decode("basic_rds_character_set")
return (
cls(
programme_type_name=programme_type_name,
data_set_number=dsn,
programme_service_number=psn,
),
11,
)
@enum.unique
class RadioTextBufferConfiguration(enum.IntEnum):
TRUNCATE_BEFORE = 0b00
APPEND = 0b10
class InvalidNumberOfTransmissions(UECPCommandException):
pass
def _ensure_radio_text_carriage_return(*values: str) -> str:
if len(values) < 1:
import inspect
raise TypeError(
f"{inspect.stack()[0][3]} must be called with at least one argument and the last one must be a string"
)
value = values[-1]
value = str(value)
if 0 < len(value) < 61 and value[-1] != "\r":
value += "\r"
warnings.warn(
"Implicitly appending a carriage return 0x0D to radio text shorter than 61 characters"
)
return value
def _check_radio_text(_, __, value: str):
value = str(value)
if len(value) > 64 or len(value) == 0:
raise ValueError(
f"Radio text supports only up to 64 characters and must not be empty, given {value!r}",
)
if len(value) < 61 and value[-1] != "\r":
raise ValueError(
f"Radio text shorter than 61 characters must be terminated by a carriage return, given {value!r}"
)
value.encode("basic_rds_character_set")
return value
@attr.s
class RadioText:
text: str = attr.ib(
converter=_ensure_radio_text_carriage_return,
validator=_check_radio_text,
on_setattr=[
_ensure_radio_text_carriage_return, # type:ignore
_check_radio_text,
],
)
number_of_transmissions: int = attr.ib(default=0, converter=int)
a_b_toggle: bool = attr.ib(default=False, converter=bool)
@number_of_transmissions.validator
def _check_number_of_transmissions(self, _, new_not: int):
if not (0x0 <= new_not <= 0xF):
raise InvalidNumberOfTransmissions(new_not)
@UECPCommand.register_type
class RadioTextSetCommand(UECPCommand, UECPCommandDSNnPSN):
ELEMENT_CODE = 0x0A
INFINITE_TRANSMISSIONS = 0
def __init__(
self,
text="",
number_of_transmissions=0,
a_b_toggle=False,
buffer_configuration=RadioTextBufferConfiguration.TRUNCATE_BEFORE,
data_set_number=0,
programme_service_number=0,
radiotext: RadioText = None,
):
super().__init__(
data_set_number=data_set_number,
programme_service_number=programme_service_number,
)
if radiotext is not None:
self._radiotext = radiotext
else:
self._radiotext = RadioText(
text=text,
number_of_transmissions=number_of_transmissions,
a_b_toggle=a_b_toggle,
)
self._buffer_configuration = RadioTextBufferConfiguration(buffer_configuration)
@property
def text(self) -> str:
return self._radiotext.text
@text.setter
def text(self, new_text: str):
self._radiotext.text = new_text
@property
def number_of_transmissions(self) -> int:
return self._radiotext.number_of_transmissions
@number_of_transmissions.setter
def number_of_transmissions(self, new_not: int):
self._radiotext.number_of_transmissions = new_not
@property
def a_b_toggle(self) -> bool:
return self._radiotext.a_b_toggle
@a_b_toggle.setter
def a_b_toggle(self, toggle: bool):
self._radiotext.a_b_toggle = toggle
@property
def buffer_configuration(self) -> RadioTextBufferConfiguration:
return self._buffer_configuration
@buffer_configuration.setter
def buffer_configuration(self, buffer_conf: RadioTextBufferConfiguration):
self._buffer_configuration = RadioTextBufferConfiguration(buffer_conf)
@property
def radiotext(self) -> RadioText:
return self._radiotext
def encode(self) -> list[int]:
data = [self.ELEMENT_CODE, self.data_set_number, self.programme_service_number]
if (
len(self._radiotext.text) == 0
and self._buffer_configuration
is RadioTextBufferConfiguration.TRUNCATE_BEFORE
):
data.append(0)
else:
mel = 1 + len(self._radiotext.text)
flags = (
self._buffer_configuration << 5
| self._radiotext.number_of_transmissions << 1
| self._radiotext.a_b_toggle
)
data += [mel, flags]
data += list(self._radiotext.text.encode("basic_rds_character_set"))
return data
@classmethod
def create_from(
cls, data: typing.Union[bytes, list[int]]
) -> tuple["RadioTextSetCommand", int]:
data = list(data)
if len(data) < 4:
raise UECPCommandDecodeNotEnoughData(len(data), 4)
mec, dsn, psn, mel = data[0:4]
if mec != cls.ELEMENT_CODE:
raise UECPCommandDecodeElementCodeMismatchError(mec, cls.ELEMENT_CODE)
if mel == 0:
return cls(data_set_number=0, programme_service_number=0), 4
data = data[4:]
if len(data) < mel:
raise UECPCommandDecodeNotEnoughData(len(data), mel)
flags = data[0]
buffer_configuration = (flags & 0b0110_0000) >> 5
number_of_transmission = (flags & 0b0001_1110) >> 1
a_b_toggle = flags & 0b0000_0001
text_data = bytes(data[1:mel])
text = text_data.decode("basic_rds_character_set")
return (
cls(
text=text,
buffer_configuration=buffer_configuration,
number_of_transmissions=number_of_transmission,
a_b_toggle=a_b_toggle,
data_set_number=dsn,
programme_service_number=psn,
),
4 + mel,
)
# TODO AF
# TODO EON-AF
# TODO Slow Labeling Codes
# TODO Linkage information
|
import tensorflow as tf
class VGG(tf.keras.layers.Layer):
def __init__(self):
super(VGG, self).__init__()
self.conv1 = tf.keras.layers.Conv2D(filters=64, kernel_size=3, strides=1, padding="same",
activation=tf.keras.activations.relu)
self.conv2 = tf.keras.layers.Conv2D(filters=64, kernel_size=3, strides=1, padding="same",
activation=tf.keras.activations.relu)
self.pool1 = tf.keras.layers.MaxPool2D(pool_size=(2, 2), strides=2)
self.conv3 = tf.keras.layers.Conv2D(filters=128, kernel_size=3, strides=1, padding="same",
activation=tf.keras.activations.relu)
self.conv4 = tf.keras.layers.Conv2D(filters=128, kernel_size=3, strides=1, padding="same",
activation=tf.keras.activations.relu)
self.pool2 = tf.keras.layers.MaxPool2D(pool_size=(2, 2), strides=2)
self.conv5 = tf.keras.layers.Conv2D(filters=256, kernel_size=3, strides=1, padding="same",
activation=tf.keras.activations.relu)
self.conv6 = tf.keras.layers.Conv2D(filters=256, kernel_size=3, strides=1, padding="same",
activation=tf.keras.activations.relu)
self.conv7 = tf.keras.layers.Conv2D(filters=256, kernel_size=3, strides=1, padding="same",
activation=tf.keras.activations.relu)
self.conv8 = tf.keras.layers.Conv2D(filters=256, kernel_size=3, strides=1, padding="same",
activation=tf.keras.activations.relu)
self.pool3 = tf.keras.layers.MaxPool2D(pool_size=(2, 2), strides=2)
self.conv9 = tf.keras.layers.Conv2D(filters=512, kernel_size=3, strides=1, padding="same",
activation=tf.keras.activations.relu)
self.conv10 = tf.keras.layers.Conv2D(filters=512, kernel_size=3, strides=1, padding="same",
activation=tf.keras.activations.relu)
def call(self, inputs, **kwargs):
x = self.conv1(inputs)
x = self.conv2(x)
x = self.pool1(x)
x = self.conv3(x)
x = self.conv4(x)
x = self.pool2(x)
x = self.conv5(x)
x = self.conv6(x)
x = self.conv7(x)
x = self.conv8(x)
x = self.pool3(x)
x = self.conv9(x)
x = self.conv10(x)
return x
def get_backbone():
return VGG()
|
# Generated by Django 3.0.7 on 2020-06-10 16:01
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Veiculo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('modelo', models.CharField(help_text='Modelo do veículo', max_length=30)),
('placa', models.CharField(help_text='Placa do veículo', max_length=8)),
('cor', models.CharField(help_text='Cor do veículo', max_length=100)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='veiculos', to=settings.AUTH_USER_MODEL, verbose_name='Usuário')),
],
),
migrations.CreateModel(
name='UserType',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('is_passive', models.BooleanField(default=True)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='user_type', to=settings.AUTH_USER_MODEL)),
],
),
]
|
import sys, codecs, json, re, time, os, getopt, traceback
import signal, psutil
from urlparse import urlparse
from multiprocessing import Process as Task, Queue
from subprocess import call, PIPE, STDOUT
import multiprocessing as mp
import random, calendar, shutil, sys, commands, hmac
from termcolor import colored
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.common.exceptions import NoSuchElementException, ElementNotVisibleException, WebDriverException, NoSuchWindowException
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.keys import Keys
from urlparse import urlparse
import recipe
from pympler import asizeof
browser_width = None
browser_hieght = None
MAC_KEY = '12345'
#new_script_hash = hmac.new(MAC_KEY, page)
def getlocaltime():
return time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
# This function tries to ensure that no extra zombie children stick around
def kill_child_processes(parent_pid=None, parent=None, timeout=3, sig=signal.SIGTERM, include_parent = True):
global log_f
#current_time = getlocaltime()
if not parent and not parent_pid:
return (None, None)
try:
if not parent and parent_pid:
parent = psutil.Process(parent_pid)
except (psutil.NoSuchProcess, psutil.ZombieProcess, psutil.AccessDenied):
return (None, None)
if parent.pid == os.getpid():
include_parent = False
children = parent.children(recursive=True)
if include_parent:
children.append(parent)
for process in children:
#msg = '%s\tKilling child process [%d] of [%d]...\n' % (current_time, process.pid, parent.pid)
#if log_f:
#log_f.write(msg)
try:
process.send_signal(sig)
except (psutil.NoSuchProcess, psutil.ZombieProcess, psutil.AccessDenied):
pass
gone, alive = psutil.wait_procs(children, timeout=timeout, callback=None)
if alive:
for process in alive:
try:
process.kill() # SEND SIGKILL
except (psutil.NoSuchProcess, psutil.ZombieProcess, psutil.AccessDenied):
pass
gone, alive = psutil.wait_procs(alive, timeout=timeout, callback=None)
return (gone, alive)
def get_child_processes(parent_pid):
try:
parent = psutil.Process(parent_pid)
except (psutil.NoSuchProcess, psutil.ZombieProcess, psutil.AccessDenied):
return None
children = parent.children(recursive=True)
return children
def signal_term_handler(sig, frame):
global parent_pid
current_pid = os.getpid()
if current_pid == parent_pid:
#msg = '%s\tPARENT PROCESS [%d] received SIGTERM!!! Killing all child processes...\n' % (current_time, current_pid)
process_name = 'chrome'
kill_processes_by_name(process_name)
kill_all_processes()
def kill_all_processes(restart_parent_flag=False):
global parent_pid, process_list, log_f
current_time = getlocaltime()
current_pid = os.getpid()
if current_pid == parent_pid:
msg = '%s\tPARENT PROCESS [%d] received SIGTERM!!! Killing all child processes...\n' % (current_time, current_pid)
else:
msg = '%s\tProcess [%d] received SIGTERM!!! Killing all child processes... PARENT PID=[%d]\n' % (current_time, current_pid, parent_pid)
#print(msg)
#sys.stdout.flush()
log_f.write(msg)
kill_child_processes(parent_pid = current_pid)
current_time = getlocaltime()
msg = '%s\tAll child processes of Process [%d] are killed!!!\n' % (current_time, current_pid)
#print(msg)
log_f.write(msg)
if current_pid == parent_pid:
if restart_parent_flag:
restart_all_tasks(log_f)
else:
log_f.close()
sys.exit()
def get_task_queue(input_file):
global num_instances
url_queue = [[] for i in range(num_instances)]
rule = re.compile(ur"[^a-zA-Z0-9\u4e00-\u9fa5]")
try:
with codecs.open(input_file, mode='r', encoding='utf-8') as input_f:
data = json.loads(input_f.read())
except Exception as e:
print(e)
sys.exit(1)
for index, script_list in data.items():
index = int(index)
split = index % num_instances
for script in script_list:
append_str = rule.sub('', script[-10:])
url_queue[split].append((index, append_str))
for split, tasks in enumerate(url_queue):
url_queue[split] = sorted(tasks, key=lambda x:x[0])
#print(url_queue)
return url_queue
def determine_parameter_position(f, line_number, funcID, funcName, scriptID_, scriptURL_, windowID_):
next_line = f[line_number][1:]
while ',--,' in f[line_number][1:]:
split_list = f[line_number][1:].split(",elem_js_conflict,")
if split_list[-2].split("\"")[0]:
functionID = int(split_list[-2].split("\"")[0])
else:
functionID = -1
functionName = split_list[-3]
if functionID != funcID or functionName != funcName:
return (None, None)
line_number += 1
next_line = f[line_number][1:]
split_list = next_line.split(",elem_js_conflict,")
first_tuple = split_list[0]
if first_tuple == '*** start_of_func':
if split_list[-3]:
functionID = int(split_list[-3])
else:
functionID = -1
windowID = int(split_list[-1])
functionName = split_list[-4]
if (funcID == functionID and funcID != -1) or funcName == functionName or funcName.split('.')[-1] == functionName or funcName.split('[')[0] == functionName or funcName.split('-')[-1] == functionName:
functionEndPos = int(split_list[-5])
functionStartPos = int(split_list[-6])
#functions_list.append((functionName, functionID)) #, functionStartPos, functionEndPos))
scriptID = int(split_list[1])
scriptURL = split_list[2]
if scriptID == scriptID_ and scriptURL == scriptURL_ and windowID == windowID_:
return (functionStartPos, functionEndPos)
else:
return (None, None)
else:
return (None, None)
def update_write_stack_using_alias(alias_target, alias, write_stack, position_tuple = None):
# alias: (alias_name, rhsFullString, assignID, windowID, scriptID, scriptURL, exprPos)
write_index = 0
#for write in write_stack:
while write_index < len(write_stack):
write = write_stack[write_index]
# write: (assignID, windowID, lhsFullString, rhsFullString, rhsType, scriptID, scriptURL, exprPos, typeofLHS)
target = write[2]
if alias[0] == target or alias[0] in re.split(r'\[|\]|\.', target): #target.split('.') or alias[0] in target.split('['):
window_id = write[1]
script_id = write[-4]
script_url = write[-3]
alias_window_id = alias[3]
alias_script_id = alias[-3]
alias_script_url = alias[-2]
#print('write', write)
#print('alias', alias)
if window_id == alias_window_id and script_id == alias_script_id and script_url == alias_script_url and write[3] != alias[1]:
alias_write_target = target.replace(alias[0], alias_target)
alias_write = (write[0], write[1], alias_write_target, write[3], write[4], write[5], write[6], write[7], write[8])
#print(' ==>', alias_write)
#print('- - - - - - - - - - - - - -')
move_index = len(write_stack)
for write_ in reversed(write_stack):
move_index -= 1
if move_index == write_index:
break
if move_index == len(write_stack)-1:
write_stack.append(write_stack[move_index])
else:
write_stack[move_index+1] = write_stack[move_index]
if move_index == len(write_stack)-1:
write_stack.append(alias_write)
else:
write_stack[move_index+1] = alias_write
if write[-1] == 'object' and ('(=)' in write[3] or '=)' not in write[3]):
recurse_alias_name = write[3].split('(=)')[-1].split('=')[0]
recurse_alias_target = alias_target
recurse_alias = (recurse_alias_name, recurse_alias_name, write[0], write[-4], write[-3], write[-2])
#print('recursion', recurse_alias_target, recurse_alias)
update_write_stack_using_alias(recurse_alias_target, recurse_alias, write_stack)
#if write[3].split('(=)')[-1].split('=')[0]
write_index += 1
def update_write_dict_using_alias(alias_target, alias, write_dict):
# alias: (alias_name, rhsFullString, assignID, windowID, scriptID, scriptURL, exprPos)
alias_assign_id = alias[2]
replace_start_assign_id = None
replace_end_assign_id = None
if alias_target in write_dict:
for write_ in reversed(write_dict[alias_target]):
# write_: (assignID, windowID, rhsFullString, rhsType, scriptID, scriptURL, exprPos, typeofLHS)
assign_id_ = write_[0]
rhs_full_string_ = write_[2]
if assign_id_ < alias_assign_id and ('(=)' in rhs_full_string_ or '=)' not in rhs_full_string_):
replace_start_assign_id = assign_id_
break
for write_ in write_dict[alias_target]:
# write_: (assignID, windowID, rhsFullString, rhsType, scriptID, scriptURL, exprPos, typeofLHS)
assign_id_ = write_[0]
rhs_full_string_ = write_[2]
if assign_id_ > alias_assign_id and ('(=)' in rhs_full_string_ or '=)' not in rhs_full_string_):
replace_end_assign_id = assign_id_
break
#print('alias_target: %s\tstart_index: %s'%(alias_target, str(replace_start_assign_id)))
#print('alias_target: %s\tend_index: %s'%(alias_target, str(replace_end_assign_id)))
# if replace_start_assign_id is not None, we should replace alias_name with alias_target for assignments with ID > replace_start_assign_id and < alias_assign_id
# if replace_start_assign_id is None, we replace alias_name with alias_target for all assignments with ID < alias_assign_id
for write_target, write_list in write_dict.items():
for write in write_list:
# write: (assignID, windowID, rhsFullString, rhsType, scriptID, scriptURL, exprPos, typeofLHS)
if write_target == alias[0] or alias[0] in re.split(r'\[|\]|\.', write_target): #alias[0] in write_target.split('.') or alias[0] in write_target.split('[')
if (replace_start_assign_id is not None and write[0] > replace_start_assign_id and write[0] < alias_assign_id) or (replace_start_assign_id is None and write[0] < alias_assign_id):
if write[1] == alias[3]:
alias_write_target = write_target.replace(alias[0], alias_target)
if alias_write_target not in write_dict:
write_dict[alias_write_target] = list()
write_dict[alias_write_target].append((alias_assign_id, write[1], write[2], write[3], write[4], write[5], write[6], write[7]))
#print('==>', alias_assign_id, alias_write_target, write_target, write)
# if replace_end_assign_id is not None, we should replace alias_name with alias_target, and alias_target with alias_name
# for assignments with ID > alias_assign_id and < replace_end_assign_id
# if replace_end_assign_id is None, we replace alias_name with alias_target, and alias_target with alias_name
# for assignments with ID > alias_assign_id
for write_target, write_list in write_dict.items():
for write in write_list:
# write: (assignID, windowID, rhsFullString, rhsType, scriptID, scriptURL, exprPos, typeofLHS)
alias_write_target = None
if (write_target == alias[0] or alias[0] in re.split(r'\[|\]|\.', write_target)) and write[1] == alias[3]:
if (replace_end_assign_id is not None and write[0] > alias_assign_id and write[0] < replace_end_assign_id) or (replace_end_assign_id is None and write[0] > alias_assign_id):
alias_write_target = write_target.replace(alias[0], alias_target)
if (write_target == alias_target or alias_target in re.split(r'\[|\]|\.', write_target)) and write[1] == alias[3]:
if (replace_end_assign_id is not None and write[0] > alias_assign_id and write[0] < replace_end_assign_id) or (replace_end_assign_id is None and write[0] > alias_assign_id):
alias_write_target = write_target.replace(alias_target, alias[0])
if alias_write_target is not None:
if alias_write_target not in write_dict:
write_dict[alias_write_target] = list()
write_dict[alias_write_target].append((alias_assign_id, write[1], write[2], write[3], write[4], write[5], write[6], write[7]))
#print('==>', alias_assign_id, alias_write_target, write_target, write)
def update_write_stack_using_args(start, end, before, after, write_stack, call_stack):
# write: (assignID, windowID, lhsFullString, rhsFullString, rhsType, scriptID, scriptURL, exprPos, tpyeofLHS)
index = len(write_stack)
matched = False
for write in reversed(write_stack):
index -= 1
target = write[2]
if before not in target:
continue
writePos = write[-2]
if writePos <= end and writePos >= start:
matched = True
new_write = (write[0], write[1], write[2].replace(before, after), write[3], write[4], write[5], write[6], write[7], write[8])
write_stack[index] = new_write
else:
if matched == True:
break
def update_write_dict_using_args(start, end, before, after, write_dict, call_stack):
for target, write_list in write_dict.items():
if target != before:
continue
new_write_dict = dict()
new_write_list = list()
for write in write_list:
writePos = int(write[-2])
located_function = locate_function(writePos, call_stack)
newWriteKey = after
if located_function is not None:
if located_function[0]:
thisAlias = located_function[1]
if 'this' in argumentName:
newWriteKey = after.replace('this', thisAlias)
elif located_function[2]:
newAlias = located_function[3]
if 'this' in argumentName:
newWriteKey = after.replace('this', newAlias)
if writePos >= start and writePos <= end:
new_write_list.append(write)
if newWriteKey not in new_write_dict:
new_write_dict[newWriteKey] = list()
new_write_dict[newWriteKey].append(write)
#write_dict.pop(target, None)
for newWriteKey, updated_write_list in new_write_dict.items():
if newWriteKey not in write_dict:
write_dict[newWriteKey] = updated_write_list
else:
for new_write in updated_write_list:
write_dict[newWriteKey].append(new_write)
unchanged_write_list = [write for write in write_list if write not in new_write_list]
if len(unchanged_write_list) > 0:
write_dict[before] = unchanged_write_list
else:
write_dict.pop(target, None)
def update_write_stack_using_new_temp(var_name, write_stack, func_id, func_name, window_id, script_id, script_url, call_stack):
# write: (assignID, windowID, lhsFullString, rhsFullString, rhsType, scriptID, scriptURL, exprPos, tpyeofLHS)
index = len(write_stack)
matched = False
for write in reversed(write_stack):
index -= 1
target = write[2]
if 'this' not in target:
continue
writePos = write[-2]
located_function = locate_function(writePos, call_stack)
#print('==> ', write, writePos, located_function, len(call_stack))
if located_function is not None:
# located_function: (isObjCall, thisAlias, isObjNew, newAlias, functionID, functionName, windowID, scriptID, scriptURL, functionStartPos, functionEndPos)
funcName = located_function[5]
funcID = located_function[4]
windowID = located_function[6]
scriptID = located_function[7]
scriptURL = located_function[8]
#print(' ==> ', funcName, funcID, windowID, scriptID, scriptURL)
#print(' ==> ', func_name, func_id, window_id, script_id, script_url)
if (funcName == func_name or funcName == func_name.split('.')[-1] or funcName == func_name.split('[')[0] or funcName == func_name.split('-')[-1]) and scriptID == script_id and scriptURL == script_url and windowID == window_id:
if (func_id is not None and func_id == funcID) or func_id is None:
matched = True
new_write = (write[0], write[1], write[2].replace('this', var_name), write[3], write[4], write[5], write[6], write[7], write[8])
write_stack[index] = new_write
elif matched == True:
break
elif matched == True:
break
def locate_function(exprPos, call_stack):
for call in reversed(call_stack):
functionStartPos = int(call[-2])
functionEndPos = int(call[-1])
#print(exprPos, functionStartPos, functionEndPos)
if exprPos <= functionEndPos and exprPos >= functionStartPos:
return call
return None
def locate_obj_call_or_obj_new(exprPos, obj_stack):
pass
def pop_param_args_dict(param2args, poped_call):
# poped_call: (isObjCall, thisAlias, isObjNew, newAlias, functionID, functionName, windowID, scriptID, scriptURL, functionStartPos, functionEndPos)
poped_func_id = poped_call[4]
poped_func_name = poped_call[5]
poped_window_id = poped_call[6]
poped_script_id = poped_call[7]
poped_script_url = poped_call[8]
for param2arg in reversed(param2args):
# param2arg: (paramName, assignID, windowID, argumentName, argumentIndex, scriptID, scriptURL, functionID, functionName, functionStartPos, functionEndPos, argumentPos, typeof)
arg_func_id = param2arg[7]
arg_func_name = param2arg[8]
arg_window_id = param2arg[2]
arg_script_id = param2arg[5]
arg_script_url = param2arg[6]
if ((arg_func_id == poped_func_id and arg_func_id != -1) or arg_func_name == poped_func_name or poped_func_name == arg_func_name.split('.')[-1] or poped_func_name == arg_func_name.split('[')[0] or poped_func_name == arg_func_name.split('-')[-1]) and arg_window_id == poped_window_id and arg_script_id == poped_script_id and arg_script_url == poped_script_url:
param2args.remove(param2arg)
def pop_call_stack(startPos, endPos, call_stack, param2args):
if len(call_stack) > 0:
last_call = call_stack[-1]
if endPos is None:
if startPos < int(last_call[-2]) or startPos > int(last_call[-1]):
poped_call = call_stack.pop()
print('poped_call', startPos, endPos, poped_call)
pop_param_args_dict(param2args, poped_call)
else:
if endPos < int(last_call[-2]) or startPos > int(last_call[-1]):
poped_call = call_stack.pop()
print('poped_call', startPos, endPos, poped_call)
pop_param_args_dict(param2args, poped_call)
def determine_direct_call_or_not(func_id, func_name, window_id, script_id, script_url, input_f, line_number, valid_first_tuple_list, arg_is_valid):
#print('\n\n')
while line_number <= len(input_f):
next_line = input_f[line_number][1:]
#print(next_line)
split_list = next_line.split(',elem_js_conflict,')
first_tuple = split_list[0]
if first_tuple == '*** start_of_func':
if split_list[-3]:
functionID = int(split_list[-3])
else:
functionID = -1
windowID = int(split_list[-1])
functionName = split_list[-4]
scriptID = int(split_list[1])
scriptURL = split_list[2]
if (func_name == functionName or functionName == func_name.split('.')[-1] or functionName == func_name.split('[')[0] or functionName == func_name.split('-')[-1]) and windowID == window_id and scriptID == script_id and scriptURL == script_url:
if func_id is not None:
if func_id == functionID:
return True
else:
return False
else:
return True
if first_tuple in valid_first_tuple_list:
if first_tuple == 'is_new_temp':
windiowID = int(split_list[-1])
if split_list[2]:
functionID = int(split_list[2])
else:
functionID = -1
functionName = split_list[1]
scriptID = int(split_list[-4])
scriptURL = split_list[-3]
if scriptID == script_id and windowID == window_id and scriptURL == script_url and func_name == functionName:
if func_id is not None and func_id != functionID:
return False
else:
return False
elif first_tuple == 'is_keyed_obj_new' or first_tuple == 'is_named_obj_new' or first_tuple == 'is_named_obj_call' or first_tuple == 'is_keyed_obj_call':
windowID = int(split_list[-1])
scriptURL = split_list[-3]
scriptID = int(split_list[-4])
callKey = split_list[1]
callValue = split_list[2]
if first_tuple == 'is_keyed_obj_new' or first_tuple == 'is_keyed_obj_call':
functionName = callKey + '[' + callValue + ']'
else:
functionName = callKey + '.' + callValue
if scriptID == script_id and windowID == window_id and scriptURL == script_url and func_name == functionName:
if func_id is not None and func_id != functionID:
return False
else:
return False
elif arg_is_valid and ',--,' in next_line:
windowID = int(split_list[-1])
if split_list[-2].split("\"")[0]:
functionID = int(split_list[-2].split("\"")[0])
else:
functionID = -1
functionName = split_list[-3]
scriptURL = split_list[-6]
scriptID = int(split_list[-7])
if scriptID == script_id and windowID == window_id and scriptURL == script_url and func_name == functionName:
if func_id is not None and func_id != functionID:
return False
else:
return False
else:
return False
line_number += 1
return False
def measure(user_dir, task_id, length, start, end, status_queue, process_index):
global processed_data_dir, conflicting_rank_set
current_pid = os.getpid()
current_dir = os.getcwd()
try:
status = 'Process %-4d task %d/%d PID [%d] starting ...' % (process_index, task_id+1, length, current_pid)
status_queue.put([process_index, status])
#print(status)
result_dict = dict()
processed_list = set()
raw_input_dir = user_dir + '_logs'
input_dir = os.path.join(current_dir, raw_input_dir)
#print(input_dir)
file_list = os.listdir(input_dir)
rank2func_files = dict()
for f in file_list:
if f.endswith('.func'):
split_list = f.split('.')
rank = int(split_list[0])
if rank not in rank2func_files:
rank2func_files[rank] = list()
rank2func_files[rank].append(f)
output_dir = os.path.join(processed_data_dir, raw_input_dir)
finished_files = os.listdir(output_dir)
finished_files = [f for f in finished_files if f.endswith('-defined_func.json')]
#files = [f for f in file_list if f.endswith('.func')]
for f in finished_files:
rank = int(f.split('-')[0])
processed_list.add(rank)
raw_output_dir = os.path.join(processed_data_dir, raw_input_dir)
if not os.path.isdir(raw_output_dir):
os.mkdir(raw_output_dir)
for rank, func_files in rank2func_files.items():
#rank = int(task.split('.')[0])
if rank > end:
#print('rank > end')
continue
if rank % num_instances != task_id or rank in processed_list or rank < start:
print('finished %d'%(rank))
continue
window2scriptID2func = dict()
#print('\n\n')
try:
for task in func_files:
try:
task_file = os.path.join(input_dir, task)
#print(task_file)
with open(task_file, 'r') as input_f:
for line in input_f:
#line = line.split("\"")[-1]
try:
line_split = line.split('\n')[0].split('\t')
function_name = line_split[0]
start_position = int(line_split[1])
end_position = int(line_split[2])
script_url = line_split[3]
timestamp = float(line_split[4])
script_id = int(line_split[5])
context_id = line_split[6]
if context_id not in window2scriptID2func:
window2scriptID2func[context_id] = dict()
if script_id not in window2scriptID2func[context_id]:
window2scriptID2func[context_id][script_id] = list()
func_info = (function_name, start_position, end_position, script_url, timestamp, task)
window2scriptID2func[context_id][script_id].append(func_info)
except Exception as e:
try:
exc_type, exc_value, exc_traceback = sys.exc_info()
lines = traceback.format_exception(exc_type, exc_value, exc_traceback)
print(''.join('!! ' + line for line in lines))
sys.stdout.flush()
except Exception:
pass
except OSError as e:
pass
except Exception as e:
#print(e)
try:
exc_type, exc_value, exc_traceback = sys.exc_info()
lines = traceback.format_exception(exc_type, exc_value, exc_traceback)
print(''.join('!! ' + line for line in lines))
sys.stdout.flush()
except Exception:
pass
pass
output_file = str(rank) + '-defined_func.json'
output_file = os.path.join(raw_input_dir, output_file)
output_file = os.path.join(processed_data_dir, output_file)
if len(window2scriptID2func) > 0:
print(output_file)
with open(output_file, 'w') as output_f:
output_f.write(json.dumps(window2scriptID2func))
except KeyboardInterrupt as e:
kill_all_processes()
except Exception as e:
status = 'Process %-4d task %s/%s raised an exception %s when processing URL [%d].' % (process_index, task_id+1, length, type(e), rank)
status_queue.put([process_index, status])
string = '%s\t%s' % (getlocaltime(), status)
try:
print(task_file)
print(string)
exc_type, exc_value, exc_traceback = sys.exc_info()
lines = traceback.format_exception(exc_type, exc_value, exc_traceback)
print(''.join('!! ' + line for line in lines))
sys.stdout.flush()
except Exception:
pass
except OSError as e:
pass
except Exception as e:
status = 'Process %-4d task %s/%s raised an exception %s.' % (process_index, task_id+1, length, type(e))
status_queue.put([process_index, status])
string = '%s\t%s' % (getlocaltime(), status)
try:
print(string)
exc_type, exc_value, exc_traceback = sys.exc_info()
lines = traceback.format_exception(exc_type, exc_value, exc_traceback)
print(''.join('!! ' + line for line in lines))
sys.stdout.flush()
except Exception:
pass
status = 'Process %-4d task %s/%s PID [%d] completed.' % (process_index, task_id+1, length, current_pid)
status_queue.put([process_index, status])
def main(argv):
global raw_data_dir, processed_data_dir, num_instances, parent_pid, process_list, log_f, conflicting_rank_set
signal.signal(signal.SIGTERM, signal_term_handler)
parent_pid = os.getpid()
try:
opts, args = getopt.getopt(argv, 'hu:d:i:n:p:s:e:t:', ['help', 'user_dir=', 'exp_dir=', 'num=', 'process=', 'start=', 'end=', 'type='])
except getopt.GetoptError:
usage()
sys.exit(2)
conflicting_rank_set = set()
user_dir = None
num_instances = 512
maximum_process_num = 8 # Change to 1 for debugging purpose
start = 0
end = None
exp_dir = "exps"
extract = False
clean = False
send = False
#input_type = 'info2index2script'
input_type = 'url2index'
for opt, arg in opts:
if opt in ('-u', '--user_dir'):
user_dir = arg
elif opt in ('-d', '--dir'):
exp_dir = arg
elif opt in ('-n', '--num'):
num_instances = int(arg)
elif opt in ('-p', '--process'):
maximum_process_num = int(arg)
elif opt in ('-s', '--start'):
start = int(arg)
elif opt in ('-e', '--end'):
end = int(arg)
elif opt in ('-t', '--type'):
input_type = arg
elif opt in ('-h', '--help'):
usage()
sys.exit(0)
if user_dir is None:
usage()
sys.exit(0)
input_file = 'top-1m.csv'
#task_queue = get_task_queue(input_file)
raw_data_dir = exp_dir
processed_data_dir = os.path.join(exp_dir, 'processed_data')
if not os.path.isdir(processed_data_dir):
try:
os.mkdir(processed_data_dir)
except Exception as e:
print(e)
log_file = 'convert_asg_logs.log'
log_file = os.path.join(exp_dir, log_file)
log_f = open(log_file, mode='w')
current_time = getlocaltime()
status = "PARENT SCRIPT STARTED! PARENT PID=[%d]" % parent_pid
string = '%s\t%s\n' % (current_time, status)
log_f.write(string)
string = "%s\tProcess started, argv=%s\n" % (current_time, argv)
log_f.write(string)
completed_list = set()
completion_reg = re.compile('Process [0-9\s]+task ([0-9]+)/[0-9]+ PID \[\d+\] completed.')
with codecs.open(log_file, encoding='utf-8', mode='r') as input_f:
for line in input_f:
m = re.search(completion_reg, line)
if m:
task = int(m.group(1)) - 1
completed_list.add(task)
completed_list = set()
try:
os.chdir(exp_dir)
except OSError as e:
print(e)
sys.exit(1)
#if end is None:
# end = max([max(q, key=lambda x:x[0]) for q in task_queue])[0]
tasks = [i for i in range(num_instances-1, -1, -1)]
try:
length = len(tasks)
status_queue = Queue()
final_status_set = set()
process_num = 0
process2status = dict()
running_processes = set()
process2index = dict()
index2task = dict()
round_num = 0
process_list = list()
killed_process_list = list()
alive_check_timeout = 10
alive_count = 0
while len(tasks) > 0 or len(running_processes) > 0:
current_time = getlocaltime()
num_alive_processes = sum(1 for process in process_list if process.is_alive())
status = '[%d] processes are still alive, [%d] are running ...' % (num_alive_processes, len(running_processes))
string = '%s\t%s\n' % (current_time, status)
print(string)
sys.stdout.flush()
while len(running_processes) < maximum_process_num and len(tasks) > 0:
group = tasks.pop()
task = group
if task in completed_list:
continue
user_dir_group = '%s_%d' % (user_dir, group)
process_index = process_num
#task_list = task_queue[task]
try:
process_list.append(Task(target=measure, args=(user_dir_group, task, length, start, end, status_queue, process_index)))
process = process_list[-1]
process.start()
except OSError as e:
tasks.append(group)
time.sleep(5)
continue
process_num += 1
running_processes.add(process_list[-1])
process2index[process_list[-1]] = process_index
index2task[process_index] = task
current_time = getlocaltime()
process_status = 'Process %-4d task %d/%d created. PID=%d ...' % (process_index, task+1, length, process.pid)
string = '%s\t%s' % (current_time, process_status)
print(string)
sys.stdout.flush()
if process_num % 32 == 0:
break
flag = False
while any(process.is_alive() for process in process_list):
time.sleep(1)
current_time = getlocaltime()
alive_count += 1
num_alive_processes = sum(1 for process in process_list if process.is_alive())
while not status_queue.empty():
process_index, process_status = status_queue.get()
string = '%s\t%s\n' % (current_time, process_status)
log_f.write(string)
if 'completed' in process_status:
flag = True
if process_status not in final_status_set:
final_status_set.add(process_status)
if alive_count % alive_check_timeout == 0:
status = '[%d] processes are still alive ...' % (num_alive_processes)
string = '%s\t%s\n' % (current_time, status)
print(string)
sys.stdout.flush()
# We need to get a list. Otherwise, we will receive an exception: RuntimeError: Set changed size during iteration
for process in list(running_processes):
process_index = process2index[process]
group = index2task[process_index]
if not process.is_alive():
flag = True
process_status = 'Process %-4d task %d/%d is no longer alive...' % (process_index, group+1, length)
else:
process_status = 'Process %-4d task %d/%d is still alive...' % (process_index, group+1, length)
string = '%s\t%s\n' % (current_time, process_status)
log_f.write(string)
if flag == True or (num_alive_processes < maximum_process_num and (len(tasks) > 0 or alive_count % alive_check_timeout == 0)):
break
for process in process_list:
if not process.is_alive():
if process in running_processes:
running_processes.remove(process)
except (KeyboardInterrupt, Exception) as e:
current_time = getlocaltime()
status = "PARENT SCRIPT exception %s" % type(e)
string = '%s\t%s\n' % (current_time, status)
log_f.write(string)
if not isinstance(e, KeyboardInterrupt):
exc_type, exc_value, exc_traceback = sys.exc_info()
lines = traceback.format_exception(exc_type, exc_value, exc_traceback)
print(type(e), "PARENT")
print(''.join('!! ' + line for line in lines))
status = ''.join('!! ' + line for line in lines)
string = '%s\t%s\n' % (current_time, status)
log_f.write(string)
kill_all_processes()
while not status_queue.empty():
process_index, process_status = status_queue.get()
string = '%s\t%s\n' % (current_time, process_status)
log_f.write(string)
for process in process_list:
try:
process.join()
except Exception:
pass
gone, alive = kill_child_processes(parent_pid = parent_pid)
timeout = 10
while timeout:
time.sleep(1)
timeout -= 1
if not mp.active_children():
break
current_time = getlocaltime()
status = "PARENT SCRIPT COMPLETED! PARENT PID=[%d]" % parent_pid
string = '%s\t%s\n' % (current_time, status)
log_f.write(string)
log_f.close()
#print(conflicting_rank_set)
def usage():
tab = '\t'
print('Usage:')
print(tab + 'python %s [OPTIONS]' % (__file__))
print(tab + '-d | --exp_dir=')
print(tab*2 + 'Exp directory')
print(tab + '-u | --user_dir=')
print(tab*2 + 'User directory of Chrome')
print(tab + '-n | --num=')
print(tab*2 + 'Number of task splits, default is 512')
print(tab + '-p | --process=')
print(tab*2 + 'Maximum number of processes, default is 8')
print(tab + '-s | --start')
print(tab*2 + 'Start index, default 0')
print(tab + '-e | --end')
print(tab*2 + 'End index, default number of URLs')
print(tab + '-t | --type=')
print(tab*2 + 'Input type, [url2index|info2index2script] default "url2index"')
if __name__ == '__main__':
main(sys.argv[1:])
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 28 10:42:09 2020
@author: User
"""
import numpy as np
import pandas as pd
from file_py_helper.PostChar import (
SampleSelection,
Characterization_TypeSetting,
SampleCodesChar,
)
def make_uniform_EvRHE(df, rounding_set=2):
lst = []
for E in df["E_AppV_RHE"].values:
match = 0
for i in np.arange(-0.10, 2, 0.05):
if np.isclose(i, E, atol=0.025):
match = 1
lst.append((E, i))
if match == 0:
if E < 0 and E > -0.04:
lst.append((E, i))
else:
lst.append((E, np.nan))
# print(E,i)
if len(df["E_AppV_RHE"].values) == len(lst):
df = df.assign(**{"E_RHE": [np.round(float(i[1]), rounding_set) for i in lst]})
print(
'Len({0}) matches, new column: "E_RHE"'.format(len(df["E_AppV_RHE"].values))
)
else:
print(
"make_uniform_EvRHE lengths do not match LenEIS : {0}, len(lst) : {1}".format(
len(df["E_AppV_RHE"].values), len(lst)
)
)
return df
def CheckCols(coll_lst, df):
return [i for i in coll_lst if i in df.columns]
def serie_model(
EIS_pars, sIDslice, ModEEC="Model(Singh2015_R3RQ)", neat_eis=True, RPM_lim=1000
):
ECB = EIS_pars.loc[EIS_pars.SampleID.isin(sIDslice)]
cols = EIS_pars.columns
if "Model_EEC" in cols:
ECB = ECB.query(f'Model_EEC == "{ModEEC}"')
if "RPM_DAC" in cols and RPM_lim:
ECB = ECB.query(f"RPM_DAC > {RPM_lim}")
if "ECexp" in cols and "ECuniq" not in cols:
ECB = ECB.dropna(subset=["ECexp"])
ECuniq = ["_".join(i.split("_")[:-1]) for i in ECB.ECexp.values]
ECB = ECB.assign(**{"ECuniq": ECuniq})
if "E_RHE" in cols:
ECuniqE = [f"{i[0]}_{i[1]:.2f}" for i in zip(ECuniq, ECB.E_RHE.values)]
ECB = ECB.assign(**{"ECuniqE": ECuniqE})
if neat_eis and all([i in EIS_pars.columns for i in ["Rs", "RedChisqr", "Rct"]]):
prel = len(ECB)
RedChiSq_limit = (
ECB.query("Rs > 1").RedChisqr.mean()
+ 1 * ECB.query("Rs > 1").RedChisqr.std()
)
ECB = ECB.query("RedChisqr < @RedChiSq_limit & Rs > 2 & Rct < 9E05")
print(f"Cleaned up {prel-len(ECB)} rows")
return ECB
def loading_series(pars):
Loading_pars = pd.concat(
[
i[1]
for i in [
(n, gr, gr.Loading_cm2.unique())
for n, gr in pars.query("RPM_DAC > 1000").groupby(
["SampleID"] + SampleSelection.EC_exp_cols[0:-2]
)
if gr.Loading_cm2.nunique() > 2
]
]
)
return Loading_pars
|
import json
import cv2
import imutils as imutils
import requests
video_capture = cv2.VideoCapture(0)
# video_capture.set(3, 1024)
# video_capture.set(4, 768)
# video_capture.set(15, -8.0)
params = list()
params.append(cv2.IMWRITE_PNG_COMPRESSION)
params.append(9)
firstFrame = None
while True:
(grabbed, frame) = video_capture.read()
if not grabbed:
break
# resize the frame, convert it to grayscale, and blur it
frame = imutils.resize(frame, width=500)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (21, 21), 0) \
\
if firstFrame is None:
firstFrame = gray
continue
frameDelta = cv2.absdiff(firstFrame, gray)
thresh = cv2.threshold(frameDelta, 25, 255, cv2.THRESH_BINARY)[1]
# dilate the thresholded image to fill in holes, then find contours
# on thresholded image
thresh = cv2.dilate(thresh, None, iterations=2)
(cnts, _) = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
for c in cnts:
# if the contour is too small, ignore it
if cv2.contourArea(c) > 500:
cv2.imwrite('img.png', frame, params)
json_data = {"device_id": 1010}
try:
requests.post('http://127.0.0.1:20100/request_face_detection_from_webcam',
data={"data": json.dumps(json_data)},
files={"file[]": open('img.png', 'rb')})
except Exception as e:
print 'error'
# print 'not send'
# sleep(0.3)
# When everything is done, release the capture
video_capture.release()
cv2.destroyAllWindows()
|
# -*- coding: utf-8 -*-
# pylint: disable=R0201, R0903, C0116, C0103
"""Base Model unit tests."""
import pytest
from loglan_db.model_db.base_type import BaseType as Type
from loglan_db.model_db.base_word import BaseWord
from loglan_db.model_db.addons.addon_word_sourcer import AddonWordSourcer
from loglan_db.model_db.base_word_source import BaseWordSource as WordSource
from tests.data import littles, little_types
from tests.data import words, types, prim_words, prim_types, other_word_2
from tests.functions import db_add_objects
class Word(BaseWord, AddonWordSourcer):
"""BaseWord class with Sourcer addon"""
@pytest.mark.usefixtures("db")
class TestWord:
"""Word tests."""
def test_get_sources_prim(self):
db_add_objects(Word, words)
db_add_objects(Type, types)
db_add_objects(Word, prim_words)
db_add_objects(Type, prim_types)
afx = Word.get_by_id(3802)
result = afx.get_sources_prim()
assert result is None
result = Word.get_by_id(3813).get_sources_prim()
assert len(result) == 5
assert isinstance(result, list)
assert isinstance(result[0], WordSource)
result = Word.get_by_id(291).get_sources_prim()
assert isinstance(result, str)
assert result == "balna: balnu"
result = Word.get_by_id(318).get_sources_prim()
assert isinstance(result, str)
assert result == "banko: Int."
result = Word.get_by_id(984).get_sources_prim()
assert isinstance(result, str)
assert result == "cimpe: abbreviation of cimpenizi"
result = Word.get_by_id(5655).get_sources_prim()
assert isinstance(result, str)
assert result == "murmu: Onamatopoetic"
result = Word.get_by_id(641).get_sources_prim()
assert isinstance(result, str)
assert result == "bordo: Fr. Bordeaux"
result = Word.get_by_id(849).get_sources_prim()
assert isinstance(result, str)
assert result == "carbo: ISV"
def test__get_sources_c_prim(self):
db_add_objects(Word, words)
db_add_objects(Type, types)
result = Word.get_by_id(3813)._get_sources_c_prim()
assert len(result) == 5
assert isinstance(result, list)
assert isinstance(result[0], WordSource)
afx = Word.get_by_id(3802)
result = afx._get_sources_c_prim()
assert result is None
def test__get_sources_c_prim_with_partial_data(self):
db_add_objects(Word, [other_word_2, ])
db_add_objects(Type, types)
result = Word.get_by_id(3813)._get_sources_c_prim()
assert len(result) == 5
def test_get_sources_cpx(self):
db_add_objects(Word, words)
db_add_objects(Type, types)
result = Word.get_by_id(7316).get_sources_cpx()
assert len(result) == 2
assert isinstance(result, list)
assert isinstance(result[0], Word)
result = Word.get_by_id(7316).get_sources_cpx(as_str=True)
assert len(result) == 2
assert isinstance(result, list)
assert isinstance(result[0], str)
assert result == ['pruci', 'kakto', ]
result = Word.get_by_id(3813).get_sources_cpx()
assert result == []
def test_get_sources_cpd(self):
db_add_objects(Word, littles)
db_add_objects(Type, little_types)
result = Word.get_by_id(479).get_sources_cpd()
assert len(result) == 2
assert isinstance(result, list)
assert isinstance(result[0], Word)
result = Word.get_by_id(479).get_sources_cpd(as_str=True)
assert len(result) == 2
assert isinstance(result, list)
assert isinstance(result[0], str)
assert result == ['bi', 'cio']
db_add_objects(Word, words)
db_add_objects(Type, types)
afx = Word.get_by_id(3802)
result = afx.get_sources_cpd()
assert result == []
|
from artikel.models import Artikel
from rest_framework import serializers
from django.contrib.auth.models import User
class ArtikelSerializer(serializers.ModelSerializer):
owner = serializers.ReadOnlyField(source='owner.username')
class Meta:
model = Artikel
fields = ('a_id', 'titel', 'text', 'tags', 'datum', 'owner')
class UserSerializer(serializers.ModelSerializer):
artikel = serializers.PrimaryKeyRelatedField(
many=True,
queryset=Artikel.objects.all()
)
class Meta:
model = User
fields = ('id', 'username', 'artikel')
|
import json
import os
import pathlib
import subprocess
import numpy as np
import pydot
os.chdir(pathlib.Path(__file__).parent.parent / "tmp")
for k in range(1, 78+1):
proc = subprocess.run(["neato", f"{k}.dot"], capture_output=True, text=True)
graph, = pydot.graph_from_dot_data(proc.stdout)
positions = {
int(n.get_name()): [
np.around(float(p) / 96, decimals=2)
for p in pos.strip('"').split(",")
]
for n in graph.get_nodes()
if (pos := n.get_pos())
}
positions = [positions[i] for i in range(len(positions))]
with open(f"{k}-dot.json", "w") as f:
f.write(json.dumps({"vertices": positions}, separators=(',', ':')))
# breakpoint()
|
# encoding: utf-8
##################################################
# This script shows an example of arithmetic operators.
# First, it shows different options for using arithmetic operators.
# This operators allow to perform basic arithmetic calculus.
#
##################################################
#
##################################################
# Author: Diego Pajarito
# Copyright: Copyright 2020, IAAC
# Credits: [Institute for Advanced Architecture of Catalonia - IAAC, Advanced Architecture group]
# License: Apache License Version 2.0
# Version: 1.0.0
# Maintainer: Diego Pajarito
# Email: diego.pajarito@iaac.net
# Status: development
##################################################
# We don't need any library so far
# Let's write our code
print('These are few examples of arithmetic operators in python\n')
# We have already used addition: (+) and subtraction (-). Let's mix them with multiplication (*) and division (/)
result = 1 * 2 + 6 / 3
print('The result of calculating - 1*2 + 6/3 - is:')
print(result)
print('Do not forget the concept of precedence within arithmetic operators. '
'There are differences when using operators in different positions')
result = 1 + 2 - 3 * 4 / 5
print('The result of - 1 + 2 - 3 * 4 / 5 - is: ')
print(result)
result = 1 / 2 * 3 - 4 + 5
print('Is different than the result of - 1 / 2 * 3 - 4 + 5 -: ')
print(result)
# Lastly, there are additional arithmetic functions
result = 20 % 8
print('............................')
print('If you want to know the reminder value when dividing two numbers you should use the modulus operator - % -')
print('The modulus result of - 20 % 8- is:')
print(result)
result = 20 // 8
print('............................')
print('If you want to know the quotient value when dividing two numbers you should use the modulus operator - // -')
print('The modulus result of - 10 // 4- is:')
print(result)
# See what happens when you divide negative numbers
result = 2 ** 8
print('............................')
print('If you want perform the power operation you should use the exponent operator - ** -')
print('The modulus result of - 2 ** 8 - is:')
print(result)
|
# -*- coding: utf-8 -*-
"""
/***************************************************************************
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
import os
from PyQt4 import QtGui, uic
FORM_CLASS, _ = uic.loadUiType(os.path.join(
os.path.dirname(__file__), 'valider_dialog_base.ui'))
class PluginPoussePousseValideDialog(QtGui.QDialog, FORM_CLASS):
def __init__(self, parent=None):
"""Constructor."""
super(PluginPoussePousseValideDialog, self).__init__(parent)
self.setupUi(self)
|
"""
Test the directory roster.
"""
import logging
import pytest
import salt.config
import salt.loader
import salt.roster.dir as dir_
@pytest.fixture
def roster_domain():
return "test.roster.domain"
@pytest.fixture
def expected(roster_domain):
return {
"basic": {
"test1_us-east-2_test_basic": {
"host": "127.0.0.2",
"port": 22,
"sudo": True,
"user": "scoundrel",
}
},
"domain": {
"test1_us-east-2_test_domain": {
"host": "test1_us-east-2_test_domain." + roster_domain,
"port": 2222,
"user": "george",
}
},
"empty": {
"test1_us-east-2_test_empty": {
"host": "test1_us-east-2_test_empty." + roster_domain,
}
},
}
@pytest.fixture
def create_roster_files(tmp_path):
badfile_contents = """
#!jinja|yaml
host: 127.0.0.2
port: 22
THIS FILE IS NOT WELL FORMED YAML
sudo: true
user: scoundrel
"""
basic_contents = """
#!jinja|yaml
host: 127.0.0.2
port: 22
sudo: true
user: scoundrel
"""
domain_contents = """
#!jinja|yaml
port: 2222
user: george
"""
empty_contents = """
"""
with pytest.helpers.temp_file(
"test1_us-east-2_test_badfile", badfile_contents, directory=tmp_path
), pytest.helpers.temp_file(
"test1_us-east-2_test_basic", basic_contents, directory=tmp_path
), pytest.helpers.temp_file(
"test1_us-east-2_test_domain", domain_contents, directory=tmp_path
), pytest.helpers.temp_file(
"test1_us-east-2_test_empty", empty_contents, directory=tmp_path
):
yield
@pytest.fixture
def configure_loader_modules(roster_domain, salt_master_factory, tmp_path):
opts = salt_master_factory.config.copy()
utils = salt.loader.utils(opts, whitelist=["json", "stringutils", "roster_matcher"])
runner = salt.loader.runner(opts, utils=utils, whitelist=["salt"])
return {
dir_: {
"__opts__": {
"extension_modules": "",
"optimization_order": [0, 1, 2],
"renderer": "jinja|yaml",
"renderer_blacklist": [],
"renderer_whitelist": [],
"roster_dir": str(tmp_path),
"roster_domain": roster_domain,
},
"__runner__": runner,
"__utils__": utils,
}
}
def _test_match(ret, expected):
"""
assertDictEquals is too strict with OrderedDicts. The order isn't crucial
for roster entries, so we test that they contain the expected members directly.
"""
assert ret != {}, "Found no matches, expected {}".format(expected)
for minion, data in ret.items():
assert minion in expected, "Expected minion {} to match, but it did not".format(
minion
)
assert (
dict(data) == expected[minion]
), "Data for minion {} did not match expectations".format(minion)
def test_basic_glob(expected, create_roster_files):
"""Test that minion files in the directory roster match and render."""
expected = expected["basic"]
ret = dir_.targets("*_basic", saltenv="")
_test_match(ret, expected)
def test_basic_re(expected, create_roster_files):
"""Test that minion files in the directory roster match and render."""
expected = expected["basic"]
ret = dir_.targets(".*basic$", "pcre", saltenv="")
_test_match(ret, expected)
def test_basic_list(expected, create_roster_files):
"""Test that minion files in the directory roster match and render."""
expected = expected["basic"]
ret = dir_.targets(expected.keys(), "list", saltenv="")
_test_match(ret, expected)
def test_roster_domain(expected, create_roster_files):
"""Test that when roster_domain is configured, it will provide a default hostname
in the roster of {filename}.{roster_domain}, so that users can use the minion
id as the local hostname without having to supply the fqdn everywhere."""
expected = expected["domain"]
ret = dir_.targets(expected.keys(), "list", saltenv="")
_test_match(ret, expected)
def test_empty(expected, create_roster_files):
"""Test that an empty roster file matches its hostname"""
expected = expected["empty"]
ret = dir_.targets("*_empty", saltenv="")
_test_match(ret, expected)
def test_nomatch(create_roster_files):
"""Test that no errors happen when no files match"""
try:
ret = dir_.targets("", saltenv="")
except: # pylint: disable=bare-except
pytest.fail(
"No files matched, which is OK, but we raised an exception and we should not have."
)
assert len(ret) == 0, "Expected empty target list to yield zero targets."
def test_badfile(create_roster_files):
"""Test error handling when we can't render a file"""
ret = dir_.targets("*badfile", saltenv="")
assert len(ret) == 0
def test_badfile_logging(caplog, create_roster_files):
"""Test error handling when we can't render a file"""
with caplog.at_level(logging.WARNING, logger="salt.roster.dir"):
dir_.targets("*badfile", saltenv="")
assert "test1_us-east-2_test_badfile" in caplog.text
|
import pytest
from zeroae.rocksdb.c import perfcontext
def test_fixture(rocksdb_perfcontext):
assert rocksdb_perfcontext is not None
def test_reset(rocksdb_perfcontext):
perfcontext.reset(rocksdb_perfcontext)
def test_report(rocksdb_perfcontext):
result = perfcontext.report(rocksdb_perfcontext, 0)
assert result is not None
result = perfcontext.report(rocksdb_perfcontext, 1)
assert result is not None
@pytest.mark.xfail(reason="swig:enums")
def test_metric(rocksdb_perfcontext):
val = perfcontext.metric(rocksdb_perfcontext, 0)
assert val == 0
assert False
@pytest.mark.xfail(reason="swig:enums")
def test_set_perf_level():
perfcontext.set_perf_level(0)
assert False
|
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import netaddr
from unittest import mock
import ironic_inspector_client
from oslo_concurrency import processutils
from oslo_utils import units
from tripleoclient import exceptions
from tripleoclient.tests import fakes
from tripleoclient.workflows import baremetal
class TestBaremetalWorkflows(fakes.FakePlaybookExecution):
def setUp(self):
super(TestBaremetalWorkflows, self).setUp()
self.app.client_manager.workflow_engine = self.workflow = mock.Mock()
self.glance = self.app.client_manager.image = mock.Mock()
self.tripleoclient = mock.Mock()
self.app.client_manager.tripleoclient = self.tripleoclient
self.mock_playbook = mock.patch(
'tripleoclient.utils.run_ansible_playbook',
autospec=True
)
self.mock_playbook.start()
self.addCleanup(self.mock_playbook.stop)
self.node_update = [{'op': 'add',
'path': '/properties/capabilities',
'value': 'boot_option:local'},
{'op': 'add',
'path': '/driver_info/deploy_ramdisk',
'value': None},
{'op': 'add',
'path': '/driver_info/deploy_kernel',
'value': None},
{'op': 'add',
'path': '/driver_info/rescue_ramdisk',
'value': None},
{'op': 'add',
'path': '/driver_info/rescue_kernel',
'value': None}]
# Mock data
self.disks = [
{'name': '/dev/sda', 'size': 11 * units.Gi},
{'name': '/dev/sdb', 'size': 2 * units.Gi},
{'name': '/dev/sdc', 'size': 5 * units.Gi},
{'name': '/dev/sdd', 'size': 21 * units.Gi},
{'name': '/dev/sde', 'size': 13 * units.Gi},
]
for i, disk in enumerate(self.disks):
disk['wwn'] = 'wwn%d' % i
disk['serial'] = 'serial%d' % i
self.baremetal.node.list.return_value = [
mock.Mock(uuid="ABCDEFGH"),
]
self.node = mock.Mock(uuid="ABCDEFGH", properties={})
self.baremetal.node.get.return_value = self.node
self.inspector.get_data.return_value = {
'inventory': {'disks': self.disks}
}
self.existing_nodes = [
{'uuid': '1', 'driver': 'ipmi',
'driver_info': {'ipmi_address': '10.0.0.1'}},
{'uuid': '2', 'driver': 'pxe_ipmitool',
'driver_info': {'ipmi_address': '10.0.0.1', 'ipmi_port': 6235}},
{'uuid': '3', 'driver': 'foobar', 'driver_info': {}},
{'uuid': '4', 'driver': 'fake',
'driver_info': {'fake_address': 42}},
{'uuid': '5', 'driver': 'ipmi', 'driver_info': {}},
{'uuid': '6', 'driver': 'pxe_drac',
'driver_info': {'drac_address': '10.0.0.2'}},
{'uuid': '7', 'driver': 'pxe_drac',
'driver_info': {'drac_address': '10.0.0.3', 'drac_port': 6230}},
]
def test_register_or_update_success(self):
self.assertEqual(baremetal.register_or_update(
self.app.client_manager,
nodes_json=[],
instance_boot_option='local'
), [mock.ANY])
def test_provide_success(self):
baremetal.provide(node_uuids=[])
def test_introspect_success(self):
baremetal.introspect(self.app.client_manager, node_uuids=[],
run_validations=True, concurrency=20,
node_timeout=1200, max_retries=1,
retry_timeout=120)
def test_introspect_manageable_nodes_success(self):
baremetal.introspect_manageable_nodes(
self.app.client_manager, run_validations=False, concurrency=20,
node_timeout=1200, max_retries=1, retry_timeout=120,
)
def test_provide_manageable_nodes_success(self):
baremetal.provide_manageable_nodes(
self.app.client_manager
)
def test_configure_success(self):
baremetal.configure(self.app.client_manager, node_uuids=[])
def test_configure_manageable_nodes_success(self):
baremetal.configure_manageable_nodes(self.app.client_manager)
def test_clean_nodes_success(self):
baremetal.clean_nodes(node_uuids=[])
def test_clean_manageable_nodes_success(self):
baremetal.clean_manageable_nodes(
self.app.client_manager
)
def test_run_instance_boot_option(self):
result = baremetal._configure_boot(
self.app.client_manager,
node_uuid='MOCK_UUID',
instance_boot_option='netboot')
self.assertIsNone(result)
self.node_update[0].update({'value': 'boot_option:netboot'})
self.baremetal.node.update.assert_called_once_with(
mock.ANY, self.node_update)
def test_run_instance_boot_option_not_set(self):
result = baremetal._configure_boot(
self.app.client_manager,
node_uuid='MOCK_UUID')
self.assertIsNone(result)
self.node_update[0].update({'value': ''})
self.baremetal.node.update.assert_called_once_with(
mock.ANY, self.node_update)
def test_run_instance_boot_option_already_set_no_overwrite(self):
node_mock = mock.MagicMock()
node_mock.properties.get.return_value = ({'boot_option': 'netboot'})
self.app.client_manager.baremetal.node.get.return_value = node_mock
result = baremetal._configure_boot(
self.app.client_manager,
node_uuid='MOCK_UUID')
self.assertIsNone(result)
self.node_update[0].update({'value': 'boot_option:netboot'})
self.baremetal.node.update.assert_called_once_with(
mock.ANY, self.node_update)
def test_run_instance_boot_option_already_set_do_overwrite(self):
node_mock = mock.MagicMock()
node_mock.properties.get.return_value = ({'boot_option': 'netboot'})
self.app.client_manager.baremetal.node.get.return_value = node_mock
result = baremetal._configure_boot(
self.app.client_manager,
node_uuid='MOCK_UUID',
instance_boot_option='local')
self.assertIsNone(result)
self.node_update[0].update({'value': 'boot_option:local'})
self.baremetal.node.update.assert_called_once_with(
mock.ANY, self.node_update)
def test_run_exception_on_node_update(self):
self.baremetal.node.update.side_effect = Exception("Update error")
self.assertRaises(
Exception,
baremetal._configure_boot,
self.app.client_manager,
node_uuid='MOCK_UUID')
self.inspector.get_data.return_value = {
'inventory': {'disks': self.disks}
}
def test_smallest(self):
baremetal._apply_root_device_strategy(
self.app.client_manager,
node_uuid='MOCK_UUID',
strategy='smallest')
self.assertEqual(self.baremetal.node.update.call_count, 1)
root_device_args = self.baremetal.node.update.call_args_list[0]
expected_patch = [{'op': 'add', 'path': '/properties/root_device',
'value': {'wwn': 'wwn2'}},
{'op': 'add', 'path': '/properties/local_gb',
'value': 4}]
self.assertEqual(mock.call('ABCDEFGH', expected_patch),
root_device_args)
def test_smallest_with_ext(self):
self.disks[2]['wwn_with_extension'] = 'wwnext'
baremetal._apply_root_device_strategy(
self.app.client_manager,
node_uuid='MOCK_UUID',
strategy='smallest')
self.assertEqual(self.baremetal.node.update.call_count, 1)
root_device_args = self.baremetal.node.update.call_args_list[0]
expected_patch = [{'op': 'add', 'path': '/properties/root_device',
'value': {'wwn_with_extension': 'wwnext'}},
{'op': 'add', 'path': '/properties/local_gb',
'value': 4}]
self.assertEqual(mock.call('ABCDEFGH', expected_patch),
root_device_args)
def test_largest(self):
baremetal._apply_root_device_strategy(
self.app.client_manager,
node_uuid='MOCK_UUID',
strategy='largest')
self.assertEqual(self.baremetal.node.update.call_count, 1)
root_device_args = self.baremetal.node.update.call_args_list[0]
expected_patch = [{'op': 'add', 'path': '/properties/root_device',
'value': {'wwn': 'wwn3'}},
{'op': 'add', 'path': '/properties/local_gb',
'value': 20}]
self.assertEqual(mock.call('ABCDEFGH', expected_patch),
root_device_args)
def test_largest_with_ext(self):
self.disks[3]['wwn_with_extension'] = 'wwnext'
baremetal._apply_root_device_strategy(
self.app.client_manager,
node_uuid='MOCK_UUID',
strategy='largest')
self.assertEqual(self.baremetal.node.update.call_count, 1)
root_device_args = self.baremetal.node.update.call_args_list[0]
expected_patch = [{'op': 'add', 'path': '/properties/root_device',
'value': {'wwn_with_extension': 'wwnext'}},
{'op': 'add', 'path': '/properties/local_gb',
'value': 20}]
self.assertEqual(mock.call('ABCDEFGH', expected_patch),
root_device_args)
def test_no_overwrite(self):
self.node.properties['root_device'] = {'foo': 'bar'}
baremetal._apply_root_device_strategy(
self.app.client_manager,
node_uuid='MOCK_UUID',
strategy='smallest')
self.assertEqual(self.baremetal.node.update.call_count, 0)
def test_with_overwrite(self):
self.node.properties['root_device'] = {'foo': 'bar'}
baremetal._apply_root_device_strategy(
self.app.client_manager,
node_uuid='MOCK_UUID',
strategy='smallest',
overwrite=True)
self.assertEqual(self.baremetal.node.update.call_count, 1)
root_device_args = self.baremetal.node.update.call_args_list[0]
expected_patch = [{'op': 'add', 'path': '/properties/root_device',
'value': {'wwn': 'wwn2'}},
{'op': 'add', 'path': '/properties/local_gb',
'value': 4}]
self.assertEqual(mock.call('ABCDEFGH', expected_patch),
root_device_args)
def test_minimum_size(self):
baremetal._apply_root_device_strategy(
self.app.client_manager,
node_uuid='MOCK_UUID',
strategy='smallest',
minimum_size=10)
self.assertEqual(self.baremetal.node.update.call_count, 1)
root_device_args = self.baremetal.node.update.call_args_list[0]
expected_patch = [{'op': 'add', 'path': '/properties/root_device',
'value': {'wwn': 'wwn0'}},
{'op': 'add', 'path': '/properties/local_gb',
'value': 10}]
self.assertEqual(mock.call('ABCDEFGH', expected_patch),
root_device_args)
def test_bad_inventory(self):
self.inspector.get_data.return_value = {}
self.assertRaisesRegex(exceptions.RootDeviceDetectionError,
"Malformed introspection data",
baremetal._apply_root_device_strategy,
self.app.client_manager,
node_uuid='MOCK_UUID',
strategy='smallest')
self.assertEqual(self.baremetal.node.update.call_count, 0)
def test_no_disks(self):
self.inspector.get_data.return_value = {
'inventory': {
'disks': [{'name': '/dev/sda', 'size': 1 * units.Gi}]
}
}
self.assertRaisesRegex(exceptions.RootDeviceDetectionError,
"No suitable disks",
baremetal._apply_root_device_strategy,
self.app.client_manager,
node_uuid='MOCK_UUID',
strategy='smallest')
self.assertEqual(self.baremetal.node.update.call_count, 0)
def test_no_data(self):
self.inspector.get_data.side_effect = (
ironic_inspector_client.ClientError(mock.Mock()))
self.assertRaisesRegex(exceptions.RootDeviceDetectionError,
"No introspection data",
baremetal._apply_root_device_strategy,
self.app.client_manager,
node_uuid='MOCK_UUID',
strategy='smallest')
self.assertEqual(self.baremetal.node.update.call_count, 0)
def test_no_wwn_and_serial(self):
self.inspector.get_data.return_value = {
'inventory': {
'disks': [{'name': '/dev/sda', 'size': 10 * units.Gi}]
}
}
self.assertRaisesRegex(exceptions.RootDeviceDetectionError,
"Neither WWN nor serial number are known",
baremetal._apply_root_device_strategy,
self.app.client_manager,
node_uuid='MOCK_UUID',
strategy='smallest')
self.assertEqual(self.baremetal.node.update.call_count, 0)
def test_device_list(self):
baremetal._apply_root_device_strategy(
self.app.client_manager,
node_uuid='MOCK_UUID',
strategy='hda,sda,sdb,sdc')
self.assertEqual(self.baremetal.node.update.call_count, 1)
root_device_args = self.baremetal.node.update.call_args_list[0]
expected_patch = [{'op': 'add', 'path': '/properties/root_device',
'value': {'wwn': 'wwn0'}},
{'op': 'add', 'path': '/properties/local_gb',
'value': 10}]
self.assertEqual(mock.call('ABCDEFGH', expected_patch),
root_device_args)
def test_device_list_not_found(self):
self.assertRaisesRegex(exceptions.RootDeviceDetectionError,
"Cannot find a disk",
baremetal._apply_root_device_strategy,
self.app.client_manager,
node_uuid='MOCK_UUID',
strategy='hda')
self.assertEqual(self.baremetal.node.update.call_count, 0)
def test_existing_ips(self):
result = baremetal._existing_ips(self.existing_nodes)
self.assertEqual({('10.0.0.1', 623), ('10.0.0.1', 6235),
('10.0.0.2', None), ('10.0.0.3', 6230)},
set(result))
def test_with_list(self):
result = baremetal._get_candidate_nodes(
['10.0.0.1', '10.0.0.2', '10.0.0.3'],
[623, 6230, 6235],
[['admin', 'password'], ['admin', 'admin']],
self.existing_nodes)
self.assertEqual([
{'ip': '10.0.0.3', 'port': 623,
'username': 'admin', 'password': 'password'},
{'ip': '10.0.0.1', 'port': 6230,
'username': 'admin', 'password': 'password'},
{'ip': '10.0.0.3', 'port': 6235,
'username': 'admin', 'password': 'password'},
{'ip': '10.0.0.3', 'port': 623,
'username': 'admin', 'password': 'admin'},
{'ip': '10.0.0.1', 'port': 6230,
'username': 'admin', 'password': 'admin'},
{'ip': '10.0.0.3', 'port': 6235,
'username': 'admin', 'password': 'admin'},
], result)
def test_with_subnet(self):
result = baremetal._get_candidate_nodes(
'10.0.0.0/30',
[623, 6230, 6235],
[['admin', 'password'], ['admin', 'admin']],
self.existing_nodes)
self.assertEqual([
{'ip': '10.0.0.1', 'port': 6230,
'username': 'admin', 'password': 'password'},
{'ip': '10.0.0.1', 'port': 6230,
'username': 'admin', 'password': 'admin'},
], result)
def test_invalid_subnet(self):
self.assertRaises(
netaddr.core.AddrFormatError,
baremetal._get_candidate_nodes,
'meow',
[623, 6230, 6235],
[['admin', 'password'], ['admin', 'admin']],
self.existing_nodes)
@mock.patch.object(processutils, 'execute', autospec=True)
def test_success(self, mock_execute):
result = baremetal._probe_node('10.0.0.42', 623,
'admin', 'password')
self.assertEqual({'pm_type': 'ipmi',
'pm_addr': '10.0.0.42',
'pm_user': 'admin',
'pm_password': 'password',
'pm_port': 623},
result)
mock_execute.assert_called_once_with('ipmitool', '-I', 'lanplus',
'-H', '10.0.0.42',
'-L', 'ADMINISTRATOR',
'-p', '623', '-U', 'admin',
'-f', mock.ANY, 'power', 'status',
attempts=2)
@mock.patch.object(processutils, 'execute', autospec=True)
def test_failure(self, mock_execute):
mock_execute.side_effect = processutils.ProcessExecutionError()
self.assertIsNone(baremetal._probe_node('10.0.0.42', 623,
'admin', 'password'))
mock_execute.assert_called_once_with('ipmitool', '-I', 'lanplus',
'-H', '10.0.0.42',
'-L', 'ADMINISTRATOR',
'-p', '623', '-U', 'admin',
'-f', mock.ANY, 'power', 'status',
attempts=2)
|
#!/usr/bin/python
# THIS TOOL, LIKE, GETS COMMENTS FROM A COMMUNITY OR WHATEVER.
import codecs, glob, os, pickle, pprint, logging, re, sys, time, urllib, urllib2
import xml.dom.minidom, xmlrpclib, socket
from xml.sax import saxutils
from optparse import OptionParser
import hswcsecret, hswcutil
import sqlite3
import dwump
dbconn = sqlite3.connect('hswc.db')
cursor = dbconn.cursor()
teamsdict = {'blue':['abstrata','alpha!dave<3alpha!rose','dave<3karkat','denizens', 'dualscar<3signless','equius<>nepeta','eridan<3<rose','eridan<3karkat','gamzee<>karkat','jade<3roxy','kanaya<3rose','palepitch'], 'ceph':['psiioniic<3redglare', 'damarac3<horussc3<rufioh','cronus<3karkat','gamzee<3jane','feferi<3jade','jane<3roxy','terezi<3vriska','rose<>terezi', 'cronus<3kankri', 'cronus<3<kurloz','jade<3rose','eridan<3<sollux'], 'teaghan':['ancestors', 'dave<3jade<3john<3rose', 'dave<3jane', 'dave<3john', 'dave<3sollux', 'dirk<3jake<3jane<3roxy', 'equius<3gamzee', 'equius<3sollux', 'jake<3roxy', 'kanaya<3vriska', 'dirk<3jake<3jane<3roxy'], 'ketsu':['feferi<3nepeta', 'jake<>john', 'caliborn<3dirk', 'dirk<3john', 'dave<3terezi', 'robots', 'john<3karkat', 'dirk<3jake', 'eridan<3sollux'], 'maggie':['jake<3jane', 'feferi<3terezi', 'john<3roxy', 'gamzeec3<rosec3<terezi', 'jade<3karkat', 'john<3vriska', 'rose<3roxy', 'dave<3nepeta', 'eridan<3feferi<3sollux', 'dave<>karkat', 'gamzee<3tavros', 'john<3rose'], 'jay':['latula<3mituna', 'kismesissitude', 'dirk<3jane', 'calliope<3roxy', 'john<3<tavros', 'karkat<3sollux', 'guardians', 'bro<3john', 'karkat<3terezi', 'aradia<3sollux', 'hella jeff<3sweet bro'], 'soni':['calliope<3jade', 'tricksters', 'dave<3<karkat', 'jake<3karkat', 'dualscar<3psiioniic', 'eridan<>feferi', 'bro<3dave', 'alpha!dave<3dirk', 'eridan<3roxy', 'dave<3jade', 'kanaya<>karkat']}
# man this should be coded better at some point
round = 86
roundname = 'br5'
def pretty_print_comment(comment, scoringfile):
"""pretty print a comment it's pretty fuckin' self-explanatory fef
apparently eridan wrote this comment???"""
poster = comment[7]
team = comment[8]
subject = comment[3]
text = comment[4]
# no interest in non-prompt non-fills
if not (re.search("^prompt", subject.lower()) or re.search("^fill", subject.lower())):
return
# for br4+ needs to detect if comment is screened and if so not print it
if comment[11] == "screened":
return
scoringfile.write("Poster: " + poster + " Team: " + team + '\n')
scoringfile.write("Subject: " + subject.encode('utf-8') + '\n')
encodedtext = text.encode('utf-8')
scoringfile.write(encodedtext)
scoringfile.write("\n --- \n")
return
def generate_all_scores_for_round():
"""generate all the scores for a round
basically, this is what the script did before, except coded in a
semi-reasonable way!"""
for mod in teamsdict.keys():
scoringfilename = mod + roundname + '.txt'
scoringfile = open(scoringfilename, 'w')
for team in teamsdict[mod]:
array = (round, team)
blob = cursor.execute('SELECT * from comments where jitemid=? and team=?', array)
for x in cursor.fetchall():
pretty_print_comment(x, scoringfile)
scoringfile.close()
if __name__ == '__main__':
print "generating all scores for round " + roundname
generate_all_scores_for_round()
print "done, hopefully"
|
from lxml import etree
def xml_to_string(xml_object: etree.Element) -> str:
return etree.tostring(xml_object).decode()
|
# Generated by Django 2.0.5 on 2018-09-01 06:07
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('sunless_web', '0037_auto_20180901_1452'),
]
operations = [
migrations.AddField(
model_name='entity',
name='hash_v2',
field=models.CharField(blank=True, db_index=True, max_length=70, null=True, unique=True,
verbose_name='HashHex v2'),
),
migrations.AlterField(
model_name='entity',
name='hash',
field=models.CharField(db_index=True, max_length=70, unique=True, verbose_name='HashHex V1'),
),
]
|
class GokoBot():
def __init__(self, bot):
self.__bot = bot
self.__id = bot.id
self.__first_name = bot.first_name
self.__is_bot = bot.is_bot
self.__user_name = bot.username
self.__can_join_groups = bot.can_join_groups
self.__can_read_all_group_messages = bot.can_read_all_group_messages
# self.__suprts_inline_queries = bot.suprts_inline_queries
def get_id(self) -> int:
return self.__id
def get_first_name(self) -> str:
return self.__first_name
def is_bot(self) -> bool:
return self.__is_bot
def get_user_name(self) -> str:
return self.__user_name
def can_join_groups(self) -> bool:
return self.__can_join_groups
def can_read_all_group_messages(self) -> bool:
return self.__can_read_all_group_messages
def suprts_inline_queries(self) -> bool:
""" return self.__suprts_inline_queries """
pass
def __str__(self) -> str:
return f"El bot {self.__user_name} esta escuchando peticiones."
|
"""Collection of helper methods.
All containing methods are legacy helpers that should not be used by new
components. Instead call the service directly.
"""
from homeassistant.components.fan import (
ATTR_DIRECTION,
ATTR_OSCILLATING,
ATTR_PERCENTAGE,
ATTR_PERCENTAGE_STEP,
ATTR_PRESET_MODE,
ATTR_SPEED,
DOMAIN,
SERVICE_DECREASE_SPEED,
SERVICE_INCREASE_SPEED,
SERVICE_OSCILLATE,
SERVICE_SET_DIRECTION,
SERVICE_SET_PERCENTAGE,
SERVICE_SET_PRESET_MODE,
SERVICE_SET_SPEED,
)
from homeassistant.const import (
ATTR_ENTITY_ID,
ENTITY_MATCH_ALL,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
)
async def async_turn_on(
hass,
entity_id=ENTITY_MATCH_ALL,
speed: str = None,
percentage: int = None,
preset_mode: str = None,
) -> None:
"""Turn all or specified fan on."""
data = {
key: value
for key, value in [
(ATTR_ENTITY_ID, entity_id),
(ATTR_SPEED, speed),
(ATTR_PERCENTAGE, percentage),
(ATTR_PRESET_MODE, preset_mode),
]
if value is not None
}
await hass.services.async_call(DOMAIN, SERVICE_TURN_ON, data, blocking=True)
async def async_turn_off(hass, entity_id=ENTITY_MATCH_ALL) -> None:
"""Turn all or specified fan off."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else {}
await hass.services.async_call(DOMAIN, SERVICE_TURN_OFF, data, blocking=True)
async def async_oscillate(
hass, entity_id=ENTITY_MATCH_ALL, should_oscillate: bool = True
) -> None:
"""Set oscillation on all or specified fan."""
data = {
key: value
for key, value in [
(ATTR_ENTITY_ID, entity_id),
(ATTR_OSCILLATING, should_oscillate),
]
if value is not None
}
await hass.services.async_call(DOMAIN, SERVICE_OSCILLATE, data, blocking=True)
async def async_set_speed(hass, entity_id=ENTITY_MATCH_ALL, speed: str = None) -> None:
"""Set speed for all or specified fan."""
data = {
key: value
for key, value in [(ATTR_ENTITY_ID, entity_id), (ATTR_SPEED, speed)]
if value is not None
}
await hass.services.async_call(DOMAIN, SERVICE_SET_SPEED, data, blocking=True)
async def async_set_preset_mode(
hass, entity_id=ENTITY_MATCH_ALL, preset_mode: str = None
) -> None:
"""Set preset mode for all or specified fan."""
data = {
key: value
for key, value in [(ATTR_ENTITY_ID, entity_id), (ATTR_PRESET_MODE, preset_mode)]
if value is not None
}
await hass.services.async_call(DOMAIN, SERVICE_SET_PRESET_MODE, data, blocking=True)
async def async_set_percentage(
hass, entity_id=ENTITY_MATCH_ALL, percentage: int = None
) -> None:
"""Set percentage for all or specified fan."""
data = {
key: value
for key, value in [(ATTR_ENTITY_ID, entity_id), (ATTR_PERCENTAGE, percentage)]
if value is not None
}
await hass.services.async_call(DOMAIN, SERVICE_SET_PERCENTAGE, data, blocking=True)
async def async_increase_speed(
hass, entity_id=ENTITY_MATCH_ALL, percentage_step: int = None
) -> None:
"""Increase speed for all or specified fan."""
data = {
key: value
for key, value in [
(ATTR_ENTITY_ID, entity_id),
(ATTR_PERCENTAGE_STEP, percentage_step),
]
if value is not None
}
await hass.services.async_call(DOMAIN, SERVICE_INCREASE_SPEED, data, blocking=True)
async def async_decrease_speed(
hass, entity_id=ENTITY_MATCH_ALL, percentage_step: int = None
) -> None:
"""Decrease speed for all or specified fan."""
data = {
key: value
for key, value in [
(ATTR_ENTITY_ID, entity_id),
(ATTR_PERCENTAGE_STEP, percentage_step),
]
if value is not None
}
await hass.services.async_call(DOMAIN, SERVICE_DECREASE_SPEED, data, blocking=True)
async def async_set_direction(
hass, entity_id=ENTITY_MATCH_ALL, direction: str = None
) -> None:
"""Set direction for all or specified fan."""
data = {
key: value
for key, value in [(ATTR_ENTITY_ID, entity_id), (ATTR_DIRECTION, direction)]
if value is not None
}
await hass.services.async_call(DOMAIN, SERVICE_SET_DIRECTION, data, blocking=True)
|
from django.urls import path
from scrumate.core.daily_scrum import views
urlpatterns = [
path('', views.daily_scrum_entry, name='daily_scrum'),
path('<int:deliverable_id>/set_actual_hour/', views.set_actual_hour, name='set_actual_hour'),
path('<int:deliverable_id>/update_actual_hour/', views.update_actual_hour, name='update_actual_hour'),
path('<int:deliverable_id>/assign_dev/', views.assign_dev, name='assign_dev'),
]
|
# coding=utf-8
import os
import unittest
from mock import patch
import activity.activity_PostDecisionLetterJATS as activity_module
from activity.activity_PostDecisionLetterJATS import (
activity_PostDecisionLetterJATS as activity_object,
)
import tests.activity.settings_mock as settings_mock
from tests.activity.classes_mock import FakeLogger, FakeResponse
import tests.test_data as test_case_data
from tests.activity.classes_mock import FakeSession, FakeStorageContext
from tests.classes_mock import FakeSMTPServer
SESSION_DATA = {
"bucket_folder_name": "elife39122",
"xml_file_name": "elife-39122.xml",
}
def input_data(file_name_to_change=""):
activity_data = test_case_data.ingest_decision_letter_data
activity_data["file_name"] = file_name_to_change
return activity_data
class TestPostDecisionLetterJats(unittest.TestCase):
def setUp(self):
self.fake_logger = FakeLogger()
self.activity = activity_object(
settings_mock, self.fake_logger, None, None, None
)
self.input_data = input_data("elife-39122.zip")
def tearDown(self):
# clean the temporary directory
self.activity.clean_tmp_dir()
@patch.object(activity_module, "get_session")
@patch.object(activity_module.email_provider, "smtp_connect")
@patch("requests.post")
@patch.object(activity_module.download_helper, "storage_context")
def test_do_activity(
self,
fake_download_storage_context,
requests_method_mock,
fake_email_smtp_connect,
mock_session,
):
expected_result = activity_object.ACTIVITY_SUCCESS
fake_download_storage_context.return_value = FakeStorageContext()
fake_email_smtp_connect.return_value = FakeSMTPServer(
self.activity.get_tmp_dir()
)
# mock the session
fake_session = FakeSession(SESSION_DATA)
mock_session.return_value = fake_session
# POST response
requests_method_mock.return_value = FakeResponse(200, None)
# do the activity
result = self.activity.do_activity(self.input_data)
# check assertions
self.assertEqual(result, expected_result)
xml_file_name = self.activity.xml_file.split(os.sep)[-1]
self.assertEqual(xml_file_name, "elife-39122.xml")
self.assertEqual(self.activity.doi, "10.7554/eLife.39122")
@patch.object(activity_module, "get_session")
@patch.object(activity_module.email_provider, "smtp_connect")
@patch("requests.post")
@patch.object(activity_module.download_helper, "storage_context")
def test_do_activity_post_failed(
self,
fake_download_storage_context,
requests_method_mock,
fake_email_smtp_connect,
mock_session,
):
expected_result = activity_object.ACTIVITY_PERMANENT_FAILURE
fake_download_storage_context.return_value = FakeStorageContext()
fake_email_smtp_connect.return_value = FakeSMTPServer(
self.activity.get_tmp_dir()
)
# mock the session
fake_session = FakeSession(SESSION_DATA)
mock_session.return_value = fake_session
# POST response
requests_method_mock.return_value = FakeResponse(500, None)
# do the activity
result = self.activity.do_activity(self.input_data)
# check assertions
self.assertEqual(result, expected_result)
self.assertTrue(
self.activity.post_error_message.startswith(
"POST was not successful, details: Error posting decision letter JATS to endpoint"
" https://typesetter/updatedigest: status_code: 500\n"
"request headers: {}\n"
"request body: None\n"
"response headers: {}\n"
"response: None"
)
)
@patch.object(activity_module, "get_session")
@patch.object(activity_module.email_provider, "smtp_connect")
@patch.object(activity_module.download_helper, "storage_context")
@patch.object(activity_module.requests_provider, "post_to_endpoint")
def test_do_activity_post_exception(
self,
fake_post_jats,
fake_download_storage_context,
fake_email_smtp_connect,
mock_session,
):
expected_result = activity_object.ACTIVITY_PERMANENT_FAILURE
fake_download_storage_context.return_value = FakeStorageContext()
fake_email_smtp_connect.return_value = FakeSMTPServer(
self.activity.get_tmp_dir()
)
# mock the session
fake_session = FakeSession(SESSION_DATA)
mock_session.return_value = fake_session
# exception in post
fake_post_jats.side_effect = Exception("Something went wrong!")
# do the activity
result = self.activity.do_activity(self.input_data)
self.assertEqual(result, expected_result)
self.assertTrue(self.activity.statuses.get("error_email"))
self.assertEqual(
self.fake_logger.logexception,
"Exception raised in do_activity. Details: Something went wrong!",
)
@patch.object(activity_module, "get_session")
def test_do_activity_bad_session(self, mock_session):
expected_result = activity_object.ACTIVITY_PERMANENT_FAILURE
# mock the session
fake_session = FakeSession({})
mock_session.return_value = fake_session
# do the activity
result = self.activity.do_activity(self.input_data)
# check assertions
self.assertEqual(result, expected_result)
self.assertEqual(
self.fake_logger.logerror, "Missing session data in PostDecisionLetterJATS."
)
class TestPostDecisionLetterBadSettings(unittest.TestCase):
def setUp(self):
self.fake_logger = FakeLogger()
self.activity = activity_object(
settings_mock, self.fake_logger, None, None, None
)
self.input_data = input_data("elife-39122.zip")
@patch.object(activity_module, "get_session")
def test_do_activity_missing_endpoint(self, mock_session):
expected_result = activity_object.ACTIVITY_PERMANENT_FAILURE
# mock the session
fake_session = FakeSession(SESSION_DATA)
mock_session.return_value = fake_session
# remove the setting value
del self.activity.settings.typesetter_decision_letter_endpoint
# do the activity
result = self.activity.do_activity(self.input_data)
# check assertions
self.assertEqual(result, expected_result)
self.assertEqual(
self.fake_logger.logerror,
"No typesetter endpoint in settings, skipping PostDecisionLetterJATS.",
)
@patch.object(activity_module, "get_session")
def test_do_activity_blank_endpoint(self, mock_session):
expected_result = activity_object.ACTIVITY_PERMANENT_FAILURE
# mock the session
fake_session = FakeSession(SESSION_DATA)
mock_session.return_value = fake_session
# remove the setting value
self.activity.settings.typesetter_decision_letter_endpoint = None
# do the activity
result = self.activity.do_activity(self.input_data)
# check assertions
self.assertEqual(result, expected_result)
self.assertEqual(
self.fake_logger.logerror,
"Typesetter endpoint in settings is blank, skipping PostDecisionLetterJATS.",
)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-04-20 18:04
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('validate', '0002_branch_contact'),
]
operations = [
migrations.AlterField(
model_name='bank',
name='code',
field=models.CharField(db_index=True, max_length=64, unique=True),
),
migrations.AlterField(
model_name='branch',
name='ifsc',
field=models.CharField(db_index=True, max_length=32, unique=True),
),
]
|
The loop ran 1 time
The loop ran 2 times
|
#!/usr/bin/env python3
#from HTMLParser import HTMLParser
import requests
from lxml import html
import jinja2
import boto3
import six
import os
import hashlib
URL = 'https://launchpad.net/~rquillo/+archive/ansible/+packages'
data = {}
packages = requests.get(URL)
tree = html.fromstring(packages.text)
data['author']= tree.xpath("//*[@itemprop='breadcrumb']/li[1]/a/text()")[0]
data['title'] = ' : '.join([x.strip() for x in tree.xpath("//*[@itemprop='breadcrumb']/li//text()") if x.strip()])
data['items'] = []
for row in tree.xpath("//*[@id='packages_list']/*//tr[contains(@class,'archive_package_row')]"):
rowstrs = [x.strip() for x in row.xpath("td//text()") if x.strip()]
if not rowstrs:
continue
content = ' '.join((rowstrs)).encode('utf-8')
data['items'].append( {
'title': rowstrs[0][0],
'link': URL,
'content': content,
'id': hashlib.sha256(content).hexdigest()
})
# idea stolen from codeape on stackoverflow: http://stackoverflow.com/a/2101186/659298
curr_dir = os.path.dirname(os.path.realpath(__file__))
output_atom = six.BytesIO(jinja2.Environment(loader=jinja2.FileSystemLoader(curr_dir)).get_template("atomtemplate.xml.j2").render(data).encode('utf-8'))
s3 = boto3.client('s3')
s3.upload_fileobj(output_atom, 'dyn.tedder.me', 'rss/ppa/rquillo.atom', ExtraArgs={
'CacheControl': 'public, max-age=3600',
'ContentType': 'application/atom+xml',
'ACL': 'public-read'
})
#p = HTMLParser()
#p.feed(packages.text)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.