max_stars_repo_path stringlengths 3 269 | max_stars_repo_name stringlengths 4 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.05M | score float64 0.23 5.13 | int_score int64 0 5 |
|---|---|---|---|---|---|---|
app/admin/daq_admin.py | quanpower/sitp | 0 | 12768551 | <gh_stars>0
from flask_admin import Admin, BaseView, expose
from flask_admin.contrib.sqla import ModelView
from app.models import Temperature
import flask_login as login
class TemperatureModelView(ModelView):
def __init__(self, session, **kwargs):
# You can pass name and other parameters if you want to
super(TemperatureModelView, self).__init__(Temperature, session, **kwargs)
def is_accessible(self):
return login.current_user.is_authenticated
| 2.21875 | 2 |
Lintcode/Ladder_49_Binary_Index_Tree/207. Interval Sum II.py | ctc316/algorithm-python | 0 | 12768552 | <reponame>ctc316/algorithm-python<gh_stars>0
class BITree:
def __init__(self, nums):
self.n = len(nums)
self.arr = [0 for _ in range(self.n)]
self.bitree = [0 for _ in range(self.n + 1)]
for i in range(self.n):
self.update(i, nums[i])
def update(self, i, val):
diff = val - self.arr[i]
self.arr[i] = val
i += 1
while i <= self.n:
self.bitree[i] += diff
i += i & (-i)
def sumRange(self, i, j):
return self._getSum(j) - self._getSum(i - 1)
def _getSum(self, i):
i += 1
sum = 0
while i > 0:
sum += self.bitree[i]
i -= i & (-i)
return sum
class Solution:
"""
@param: A: An integer array
"""
def __init__(self, A):
self.bitree = BITree(A)
"""
@param: start: An integer
@param: end: An integer
@return: The sum from start to end
"""
def query(self, start, end):
return self.bitree.sumRange(start, end)
"""
@param: index: An integer
@param: value: An integer
@return: nothing
"""
def modify(self, index, value):
self.bitree.update(index, value) | 3.328125 | 3 |
plot_max.py | IanHawke/three-lies | 0 | 12768553 | from matplotlib import pyplot
import numpy
x = numpy.linspace(-1, 1, 1000)
y1 = numpy.exp(-x**2 * 10) * (1 + 0.05 * numpy.random.rand(len(x)))
y2 = (numpy.exp(10*(-(x-0.3)**2 - 0.75*x**4 - 0.25*x**6)) + numpy.piecewise(x, [x < 0.3, x >= 0.3], [lambda x: -(x-0.3)*numpy.sqrt(1+x), 0])) * (1 + 0.05 * numpy.random.rand(len(x)))
def plot_max(x, y):
x_c = numpy.argmax(y) / len(x)
ax_lim = (x_c - 0.1, 0.2, 0.2, 0.2)
f = pyplot.plot(x, y)
pyplot.xlim(-1, 1)
pyplot.arrow(2 * x_c - 1, 0.4, 0, 0.5, head_width=0.025, head_length=0.05)
ax = pyplot.axes(ax_lim)
ax.plot(x, y)
ax.set_xlim(2 * (x_c - 0.56), 2 * (x_c - 0.44))
ax.set_ylim(0.9, 1.1)
return f
| 3.3125 | 3 |
main.py | hssergey/timeslideshow | 0 | 12768554 | <reponame>hssergey/timeslideshow
from flask import Flask, render_template, send_from_directory
import glob
app = Flask(__name__)
@app.route("/")
def hello_world():
images = glob.glob("images/*.*")
return render_template('main.html', images = images)
@app.route("/images/<path:name>")
def download_file(name):
return send_from_directory(
"images", name, as_attachment = False
)
| 2.4375 | 2 |
plugins/dragknife.py | hdo/bCNC | 0 | 12768555 | #!/usr/bin/python
# -*- coding: ascii -*-
# Author: @harvie <NAME>
# Date: 25 sept 2018
__author__ = "@harvie <NAME>"
#__email__ = ""
__name__ = _("DragKnife")
__version__ = "0.3.0"
import math
import os.path
import re
from CNC import CNC,Block
from bmath import Vector
from bpath import eq, Path, Segment
from ToolsPage import Plugin
from math import pi, sqrt, sin, cos, asin, acos, atan2, hypot, degrees, radians, copysign, fmod
class Tool(Plugin):
__doc__ = _("""Drag knife postprocessor""") #<<< This comment will be show as tooltip for the ribbon button
def __init__(self, master):
Plugin.__init__(self, master,"DragKnife")
self.icon = "dragknife" #<<< This is the name of file used as icon for the ribbon button. It will be search in the "icons" subfolder
self.group = "CAM" #<<< This is the name of group that plugin belongs
#self.oneshot = True
#Here we are creating the widgets presented to the user inside the plugin
#Name, Type , Default value, Description
self.variables = [ #<<< Define a list of components for the GUI
("name" , "db" , "", _("Name")), #used to store plugin settings in the internal database
("offset", "mm", 3, _("dragknife offset"), _("distance from dragknife rotation center to the tip of the blade")),
("angle", "float", 20, _("angle threshold"), _("do not perform pivot action for angles smaller than this")),
("swivelz", "mm", 0, _("swivel height"), _("retract to this height for pivots (useful for thick materials, you should enter number slightly lower than material thickness)")),
("initdir", "X+,Y+,Y-,X-,none", "X+", _("initial direction"), _("direction that knife blade is facing before and after cut. Eg.: if you set this to X+, then the knifes rotation axis should be on the right side of the tip. Meaning that the knife is ready to cut towards right immediately without pivoting. If you cut multiple shapes in single operation, it's important to have this set consistently across all of them.")),
("feed", "mm", 200, _("feedrate")),
("simulate", "bool", False, _("simulate"), _("Use this option to simulate cuting of dragknife path. Resulting shape will reflect what shape will actuall be cut. This should reverse the dragknife procedure and give you back the original shape from g-code that was previously processed for dragknife.")),
("simpreci", "mm", 0.5, _("simulation precision"), _("Simulation is currently approximated by using lots of short lines. This is the length of these lines."))
]
self.buttons.append("exe") #<<< This is the button added at bottom to call the execute method below
self.help = """DragKnifes are special kind of razor/blade holders that can be fit into spindle of your CNC (do not turn the spindle on!!!). They are often used to cut soft and thin materials like vinyl stickers, fabric, leather, rubber gaskets, paper, cardboard, etc...
Dragknife blade is located off center to allow for automatic rotation (kinda like rear wheels of car pivot to the direction of front wheels).
This fact introduces the need for preprocessing the g-code to account with that offset. Otherwise it wouldn't be able to cut sharp corners. This plugin does this g-code postprocessing.
"""
# ----------------------------------------------------------------------
# This method is executed when user presses the plugin execute button
# ----------------------------------------------------------------------
def execute(self, app):
dragoff = self.fromMm("offset")
angleth = self["angle"]
swivelz = self.fromMm("swivelz")
initdir = self["initdir"]
CNC.vars["cutfeed"] = self.fromMm("feed")
simulate = self["simulate"]
simpreci = self["simpreci"]
def initPoint(P, dir, offset):
P = Vector(P[0], P[1])
if dir == 'X+':
P[0]+=offset
elif dir == 'X-':
P[0]-=offset
elif dir == 'Y+':
P[1]+=offset
elif dir == 'Y-':
P[1]-=offset
return P
blocks = []
for bid in app.editor.getSelectedBlocks():
if len(app.gcode.toPath(bid)) < 1: continue
opath = app.gcode.toPath(bid)[0]
npath = Path("dragknife %s: %s"%(dragoff,app.gcode[bid].name()))
if not simulate:
#Entry vector
ventry = Segment(Segment.LINE, initPoint(opath[0].A, initdir, -dragoff), opath[0].A)
#Exit vector
vexit = Segment(Segment.LINE, opath[-1].B, initPoint(opath[-1].B, initdir, dragoff))
opath.append(vexit)
prevseg = ventry
#Generate path with tangential lag for dragknife operation
for i,seg in enumerate(opath):
#Get adjacent tangential vectors in this point
TA = prevseg.tangentEnd()
TB = seg.tangentStart()
#Compute difference between tangential vectors of two neighbor segments
angle = degrees(acos(TA.dot(TB)))
#Compute swivel direction
arcdir = ( TA[0] * TB[1] ) - ( TA[1] * TB[0] )
if arcdir < 0:
arcdir = Segment.CW
else:
arcdir = Segment.CCW
#Append swivel if needed (also always do entry/exit)
if abs(angle) > angleth or (abs(angle) > 1 and ( i == 0 or i == len(opath)-1 )):
arca = Segment(arcdir, prevseg.tangentialOffset(dragoff).B, seg.tangentialOffset(dragoff).A, prevseg.B)
if swivelz !=0: arca._inside = [swivelz]
npath.append(arca)
#Append segment with tangential offset
if i < len(opath)-1:
npath.append(seg.tangentialOffset(dragoff))
prevseg = seg
elif simulate:
opath = opath.linearize(simpreci, True)
prevknife = initPoint(opath[0].A, initdir, -dragoff)
for seg in opath:
dist = sqrt((seg.B[0]-prevknife[0])**2+(seg.B[1]-prevknife[1])**2)
move = ( seg.B - prevknife ).unit() * ( dist - dragoff )
newknife = prevknife + move
if not eq(newknife, prevknife):
npath.append(Segment(Segment.LINE, prevknife, newknife))
prevknife = newknife
eblock = app.gcode.fromPath(npath)
blocks.append(eblock)
#active = app.activeBlock()
#if active == 0: active+=1
active=-1 #add to end
app.gcode.insBlocks(active, blocks, "Dragknife") #<<< insert blocks over active block in the editor
app.refresh() #<<< refresh editor
app.setStatus(_("Generated: Dragknife")) #<<< feed back result
#app.gcode.blocks.append(block)
| 2.484375 | 2 |
missions_to_mars/scrape_mars.py | paola1395/web-scraping-challenge | 0 | 12768556 | #Dependencies
from bs4 import BeautifulSoup as bs
import requests
from webdriver_manager.chrome import ChromeDriverManager
from splinter import Browser
import pandas as pd
import os
def init_browser():
executable_path = {'executable_path': ChromeDriverManager().install()}
return Browser("chrome", **executable_path, headless=False)
def scrape():
browser = init_browser()
mars_data = {}
##### NASA MARS NEWS #####
#URL of page being scraped
url= "https://mars.nasa.gov/news/"
browser.visit(url)
#HTML Object
html= browser.html
# Parse HTML with Beautiful Soup
soup = bs(html, 'html.parser')
#Extract title
latest_news = soup.find_all('div', class_="list_text")
news = latest_news[0]
news_title = news.find('div', class_='content_title').text
#Extract paragraph text
news_p= news.find('div', class_='article_teaser_body').text
#Add to mars_data dictionary
news_title= str(news_title)
news_p= str(news_p)
mars_data["news_title"]= news_title
mars_data["news_p"]= news_p
##### JPL MARS SPACE IMAGES #####
#URL of page
featured_image_url= 'https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars'
browser.visit(featured_image_url)
#HTML Object; Parse HTML with Beautiful Soup
image_html = browser.html
soup = bs(image_html, 'html.parser')
#Extract featured image URL
featured_img_url= soup.find('article')['style'].replace('background-image: url(','').replace(');', '')[1:-1]
main= "https://www.jpl.nasa.gov"
featured_image_url= main + featured_img_url
#Add to mars_data dictionary
featured_image_url= str(featured_image_url)
mars_data["featured_image_url"]= featured_image_url
##### MARS FACTS #####
#URL of page
marsfacts_url= 'https://space-facts.com/mars/'
#Read tables and convert to pandas df
mars_facts= pd.read_html(marsfacts_url)
mars_df= mars_facts[0]
mars_df.columns= ['Description', 'Value']
mars_df.set_index('Description', inplace=True)
#Convert to HTML
mars_html= mars_df.to_html()
mars_facts_html= mars_html.replace('\n', '')
#Add to mars_data dictionary
mars_data["mars_facts_table"]= mars_facts_html
##### MARS HEMISPHERES #####
#URL of page
hemisphere_url= "https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars"
browser.visit(hemisphere_url)
#HTML Object; Parse HTML with Beautiful Soup
mars_images_html = browser.html
soup = bs(mars_images_html, 'html.parser')
#Extract image urls
items= soup.find_all('div', class_='item')
#Create empty list to store image urls
hemisphere_image_urls= []
main_url= "https://astrogeology.usgs.gov"
#Loop through items to get each hemisphere url
for item in items:
# Use Beautiful Soup's find() method to navigate and retrieve attributes
title = item.find('h3').text
link = item.find('a', class_="itemLink product-item")['href']
#Create HTML Object for individual hemisphere page; Parse HTML with Beautiful Soup
browser.visit(main_url + link)
hemisphere_html= browser.html
soup= bs(hemisphere_html, 'html.parser')
#Extract image source url
img_url= main_url + soup.find('img', class_='wide-image')['src']
#Create dictionary
hemisphere_image_urls.append({
"title": title,
"img_url": img_url
})
#Add to mars_data dictionary
mars_data['hemisphere_image_urls']= hemisphere_image_urls
browser.quit()
return mars_data
| 3.15625 | 3 |
tests/v2/test_incident_team_create_data.py | MichaelTROEHLER/datadog-api-client-python | 0 | 12768557 | # coding: utf-8
# Unless explicitly stated otherwise all files in this repository are licensed under the Apache-2.0 License.
# This product includes software developed at Datadog (https://www.datadoghq.com/).
# Copyright 2019-Present Datadog, Inc.
import sys
import unittest
import datadog_api_client.v2
from datadog_api_client.v2.model.incident_team_create_attributes import IncidentTeamCreateAttributes
from datadog_api_client.v2.model.incident_team_relationships import IncidentTeamRelationships
from datadog_api_client.v2.model.incident_team_type import IncidentTeamType
globals()['IncidentTeamCreateAttributes'] = IncidentTeamCreateAttributes
globals()['IncidentTeamRelationships'] = IncidentTeamRelationships
globals()['IncidentTeamType'] = IncidentTeamType
from datadog_api_client.v2.model.incident_team_create_data import IncidentTeamCreateData
class TestIncidentTeamCreateData(unittest.TestCase):
"""IncidentTeamCreateData unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testIncidentTeamCreateData(self):
"""Test IncidentTeamCreateData"""
# FIXME: construct object with mandatory attributes with example values
# model = IncidentTeamCreateData() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 2.125 | 2 |
src/crop_UCF_QNRF.py | xwjBupt/Counting-ICCV-DSSINet | 63 | 12768558 | import cv2
import scipy.io as scio
from scipy.ndimage import imread
from scipy.misc import imsave
import os
import re
import numpy as np
import itertools
def crop():
image_path = '/UCF-QNRF_ECCV18/Train/images'
label_path = '/UCF-QNRF_ECCV18/Train/ground_truth'
image_save_path = '/UCF-QNRF_ECCV18/Train/crop_images1'
label_save_path = '//UCF-QNRF_ECCV18/Train/crop_ground_truth1'
crop_size = 512
image_files = [filename for filename in os.listdir(image_path) \
if os.path.isfile(os.path.join(image_path,filename))]
label_files = [filename for filename in os.listdir(label_path) \
if os.path.isfile(os.path.join(label_path,filename))]
image_files.sort(cmp=lambda x, y: cmp((re.findall(r'\d+',x)[0]),(re.findall(r'\d+',y)[0])))
label_files.sort(cmp=lambda x, y: cmp((re.findall(r'\d+',x)[0]),(re.findall(r'\d+',y)[0])))
# print image_files
# print label_files
for image_file, label_file in zip(image_files, label_files):
img = imread(os.path.join(image_path, image_file), 0)
img = img.astype(np.float32, copy=False)
hh = img.shape[0]
ww = img.shape[1]
annPoints = scio.loadmat((os.path.join(label_path, label_file)))['annPoints']
if hh < crop_size or ww < crop_size:
imsave(os.path.join(image_save_path, image_file.split('.')[0] + "_%d.jpg" % i), img)
scio.savemat(os.path.join(label_save_path, label_file.split('.')[0] + "_%d.mat" % i), {"annPoints": annPoints})
continue
h_pad = crop_size - hh % crop_size
w_pad = crop_size - ww % crop_size
h_slice = [crop_size * i for i in range(hh / crop_size + 1)]
w_slice = [crop_size * i for i in range(ww / crop_size + 1)]
h_border = [h_pad / (hh / crop_size)] * (hh / crop_size - 1) + [h_pad - h_pad / (hh / crop_size) * (hh / crop_size - 1)]
w_border = [w_pad / (ww / crop_size)] * (ww / crop_size - 1) + [w_pad - w_pad / (ww / crop_size) * (ww / crop_size - 1)]
h_border = [0] + np.cumsum(h_border).tolist()
w_border = [0] + np.cumsum(w_border).tolist()
# print h_pad, h_border, w_pad, w_border
h_indexs = np.array(h_slice) - np.array(h_border)
w_indexs = np.array(w_slice) - np.array(w_border)
# print h_index, w_index
sum_count = 0
for i, (h_index, w_index) in enumerate(itertools.product(h_indexs, w_indexs)):
patch = img[h_index:h_index + crop_size, w_index:w_index + crop_size,...]
count = annPoints[((annPoints[:,0] >= w_index) & (annPoints[:,0] < w_index + crop_size) & \
(annPoints[:,1] >= h_index) & (annPoints[:,1] < h_index + crop_size) )]
print w_index, w_index + crop_size, h_index, h_index + crop_size, count, '==' * 10
count[:, 0] = count[:, 0] - w_index
count[:, 1] = count[:, 1] - h_index
print patch.shape, np.size(count)
sum_count += np.size(count)
imsave(os.path.join(image_save_path, image_file.split('.')[0] + "_%d.jpg" % i), patch)
scio.savemat(os.path.join(label_save_path, label_file.split('.')[0] + "_%d.mat" % i), {"annPoints": count})
print img.shape, annPoints.shape, annPoints[:,0].max(), annPoints[:,1].min()
print "*" * 30, sum_count | 2.5 | 2 |
src/zswag/app.py | gvarga2/zswag | 0 | 12768559 | <filename>src/zswag/app.py
import connexion
import os
import yaml
import inspect
import zserio
import base64
import sys
from types import ModuleType
from typing import Type
from .doc import get_doc_str, IdentType, md_filter_definition
from zswag_client.spec import ZserioSwaggerSpec, ParamFormat, ParamLocation, ZSERIO_REQUEST_PART, ZSERIO_OBJECT_CONTENT_TYPE, ZSERIO_REQUEST_PART_WHOLE
# Name of variable that is added to controller
CONTROLLER_SERVICE_INSTANCE = "_service"
class MethodInfo:
"""
(Private) Return value of ZserioSwaggerApp._method_info()
"""
def __init__(self, *, name, docstring="", returntype="", argtype="", returndoc="", argdoc=""):
self.name = name
self.docstring = docstring
self.returntype = returntype
self.argtype = argtype
self.returndoc = returndoc
self.argdoc = argdoc
class ZserioSwaggerApp(connexion.App):
def __init__(self, *,
controller: ModuleType,
service_type: Type[zserio.ServiceInterface],
zs_pkg_path: str = None,
yaml_path: str = None):
"""
Brief
Marry a user-written app controller with a zserio-generated app server class
(argument parser/response serialiser) and a fitting Swagger OpenApi spec.
The OpenApi spec is auto-generated if the user does not specify an existing file.
If the user specifies an empty YAML path, the yaml file is placed next to the
zserio python-service source-file.
If you have installed `pip install connexion[swagger-ui]`, you can view
API docs of your service under [/prefix]/ui.
Documentation for the service is automatically extracted if `zs_pkg_path` is issued.
Code example
In file my.app.__init__:
from zserio_gen.my.service import Service
from zswag import ZserioSwaggerApp
app = ZserioSwaggerApp(my.app.controller, Service)
In file my.app.controller:
# NOTE: Injected by ZserioSwaggerApp, invisible to developer!
# In swagger yaml, the path specs reference `my.app.controller._service.myApi`
_service = Service()
_service.myApi = lambda request: _service._myApiMethod(request)
_service._myApiImpl = my.app.controller.myApiImpl
# Written by user
def myApi(request):
return "response"
General call structure:
OpenAPI `yaml_file` "paths/service" references
Zserio Server instance injected method (ns.service.service(base64): blob), which calls
ns.service._serviceMethod(blob), which calls
ns.service._serviceImpl(value) which is remapped to
Runtime-generated user function (ns.serviceImpl(value): value).
"""
if not yaml_path:
service_module = sys.modules[service_type.__module__]
yaml_path = os.path.join(
os.path.dirname(os.path.abspath(service_module.__file__)),
f"{service_module.__name__.split('.')[-1]}.{service_type.__name__}.yaml")
print(f"Using yaml path {yaml_path}")
self.yaml_path = yaml_path
yaml_parent_path = os.path.dirname(yaml_path)
yaml_basename = os.path.basename(yaml_path)
self.zs_pkg_path = zs_pkg_path
# Initialise zserio service
self.service_type = service_type
assert inspect.isclass(self.service_type)
# Initialise zserio service working instance
self.controller_path = controller.__name__
self.controller = controller
self.service_instance_path = self.controller_path+f".{CONTROLLER_SERVICE_INSTANCE}"
if not hasattr(self.controller, CONTROLLER_SERVICE_INSTANCE):
setattr(self.controller, CONTROLLER_SERVICE_INSTANCE, self.service_type())
self.service_instance = getattr(self.controller, CONTROLLER_SERVICE_INSTANCE)
# Verify or generate yaml file
if not os.path.isfile(yaml_path):
self.generate_openapi_schema()
self.spec = ZserioSwaggerSpec(yaml_path)
self.verify_openapi_schema()
# Re-route service impl methods
for method_name in self.service_instance._methodMap:
user_function = getattr(self.controller, method_name)
zserio_modem_function = getattr(self.service_instance, f"_{method_name}Method")
assert zserio_modem_function
if not user_function or not inspect.isfunction(user_function):
print(f"WARNING: The controller {self.controller_path} does not implement {method_name}!")
continue
print(f"Found {self.controller_path}.{method_name}.")
if len(inspect.signature(user_function).parameters) != 1:
print(f"ERROR: {self.controller_path}.{method_name} must have single 'request' parameter!")
continue
method_spec = self.spec.method_spec(method_name)
param_spec = method_spec.params[0]
def wsgi_method(fun=zserio_modem_function, param=param_spec, **kwargs):
param_name = param.name if param.location != ParamLocation.BODY else "body"
assert param_name in kwargs
param_value = kwargs[param_name]
if param.format == ParamFormat.BYTE:
param_value = base64.b64decode(param_value)
else:
assert param.format == ParamFormat.BINARY
return bytes(fun(param_value, None))
setattr(self.service_instance, method_name, wsgi_method)
def method_impl(request, ctx=None, fun=user_function):
return fun(request)
setattr(self.service_instance, f"_{method_name}Impl", method_impl)
# Initialise connexion app
super(ZserioSwaggerApp, self).__init__(
self.controller_path,
specification_dir=yaml_parent_path)
# Add the API according to the verified yaml spec.
self.add_api(
yaml_basename,
arguments={"title": f"REST API for {service_type.__name__}"},
pythonic_params=False)
def verify_openapi_schema(self):
for method_name in self.service_instance._methodMap:
assert self.spec.method_spec(method_name)
def generate_openapi_schema(self):
print(f"NOTE: Writing OpenApi schema to {self.yaml_path}")
service_name_parts = self.service_instance.SERVICE_FULL_NAME.split(".")
schema = {
"openapi": "3.0.0",
"info": {
"title": ".".join(service_name_parts[1:]),
"description": md_filter_definition(get_doc_str(
ident_type=IdentType.SERVICE,
pkg_path=self.zs_pkg_path,
ident=self.service_instance.SERVICE_FULL_NAME,
fallback=[f"REST API for {self.service_instance.SERVICE_FULL_NAME}"]
)[0]),
"contact": {
"email": "TODO"
},
"license": {
"name": "TODO"
},
"version": "TODO",
},
"servers": [],
"paths": {
f"/{method_info.name}": {
"post": {
"summary": method_info.docstring,
"description": method_info.docstring,
"operationId": method_info.name,
"requestBody": {
"description": method_info.argdoc,
"content": {
ZSERIO_OBJECT_CONTENT_TYPE: {
"schema": {
"type": "string"
}
}
}
},
"responses": {
"200": {
"description": method_info.returndoc,
"content": {
"application/octet-stream": {
"schema": {
"type": "string",
"format": "binary"
}
}
}
}
},
"x-openapi-router-controller": self.service_instance_path
},
} for method_info in (self._method_info(method_name) for method_name in self.service_instance._methodMap)
}
}
with open(self.yaml_path, "w") as yaml_file:
yaml.dump(schema, yaml_file, default_flow_style=False)
def _method_info(self, method_name: str) -> MethodInfo:
result = MethodInfo(name=method_name)
if not self.zs_pkg_path:
return result
doc_strings = get_doc_str(
ident_type=IdentType.RPC,
pkg_path=self.zs_pkg_path,
ident=f"{self.service_instance.SERVICE_FULL_NAME}.{method_name}")
if not doc_strings:
return result
result.docstring = doc_strings[0]
result.returntype = doc_strings[1]
result.argtype = doc_strings[2]
result.returndoc = md_filter_definition(get_doc_str(
ident_type=IdentType.STRUCT,
pkg_path=self.zs_pkg_path,
ident=result.returntype,
fallback=[f"### struct {result.returntype}"])[0])
result.argdoc = md_filter_definition(get_doc_str(
ident_type=IdentType.STRUCT,
pkg_path=self.zs_pkg_path,
ident=result.argtype,
fallback=[f"### struct {result.argtype}"])[0])
return result
| 2.34375 | 2 |
problems/191.Number_of_1_Bits/li.py | subramp-prep/leetcode | 0 | 12768560 | class Solution(object):
def hammingWeight(self, n):
"""
:type n: int
:rtype: int
"""
res = 0
while n:
res += 1
n &= (n-1)
return res
def hammingWeight(self, n):
for i in range(33):
if not n: return i
n &= (n - 1)
# cheat
def hammingWeight(self, n):
return bin(n).count('1')
| 3.46875 | 3 |
maple/core/__init__.py | UT-Austin-RPL/maple | 9 | 12768561 | """
General classes, functions, utilities that are used throughout maple.
"""
from maple.core.logging import logger
__all__ = ['logger']
| 0.851563 | 1 |
cnn_denoiser/model_definitions/moussaka.py | ramonfmir/deblurring-public | 0 | 12768562 | import tensorflow as tf
import cnn_denoiser.input_data as input_data
import glob
import os
# Parameters
channels = 1
dataset_path = 'data/pretraining'
image_width = 270
image_height = 90
batch_size = 64
image_data = input_data.load_images(dataset_path, image_width, image_height)
pretrain_steps_first_layer = 1000
pretrain_steps = 100
training_dropout = 0.5
start_learning_rate = 0.001
steps_before_decay = 1000
decay_rate = 0.995
global_step = tf.Variable(0, trainable=False)
alpha = tf.train.exponential_decay(start_learning_rate,
global_step,
steps_before_decay,
decay_rate,
staircase=True)
def summary_layer(net, name):
tf.summary.image(name, tf.expand_dims(tf.transpose(net, [3, 0, 1, 2])[0], 3), max_outputs=1)
def conv_layer_dropout(net, layer, out_channels, filter_dims, strides, padding, name, dropout=0.5, act_f = tf.nn.relu, pre=False):
net = tf.layers.dropout(net, dropout)
return conv_layer(net, layer, out_channels, filter_dims, strides, padding, name, act_f, pre=pre)
def conv_layer(net, layer, out_channels, filter_dims, strides, padding, name, act_f = tf.nn.relu, pre=False):
net = layer(net, out_channels, filter_dims, strides=strides, padding=padding)
net = act_f(net)
if not pre:
summary_layer(net, name)
return net
def pre_train_conv_layer(inputs, layer, out_channels, filt, strides, name, act_f = tf.nn.relu, dropout=0.5):
forward = layer
backward = tf.layers.conv2d_transpose if layer is tf.layers.conv2d else tf.layers.conv2d
name_scope = name + '_pretrain_scope'
with tf.variable_scope(name_scope) as vs:
net = conv_layer_dropout(inputs, forward, out_channels, filt, strides, 'SAME', name, dropout=dropout)
out = conv_layer_dropout(net, backward, int(inputs.shape[-1]), filt, strides, 'SAME', name + '_pretrain', pre=True)
trainable_variables = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=vs.name)
var_list = [v for v in trainable_variables if name in v.name]
if len(var_list) != 4:
raise Exception("No two unique output layers to pretrain")
cost = tf.reduce_mean(tf.square(out - inputs))
step = tf.train.GradientDescentOptimizer(alpha).minimize(cost, var_list=var_list, global_step=global_step)
return net, step, cost
files = glob.glob('./pretrain/*')
for f in files:
os.remove(f)
writer = tf.summary.FileWriter("./pretrain", graph=tf.get_default_graph())
def pretrain(epochs, step, loss, placeholder, name, training):
if not training:
return
sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
sess.run(tf.global_variables_initializer())
summary_op = tf.summary.merge_all()
for i in range(epochs):
input_, blurred = image_data.next_batch(batch_size)
_, cost, summary = sess.run([step, loss, summary_op], feed_dict={placeholder: blurred})
writer.add_summary(summary, i)
print(i, "Pretrain " + name, cost)
def autoencoder(original, inputs, training):
dropout = training_dropout if training else 0.0
# Encoder
net, step, loss = pre_train_conv_layer(inputs, tf.layers.conv2d, 512, [5, 5], (3, 3), 'conv1', dropout=0.0)
print(net.shape)
pretrain(pretrain_steps_first_layer, step, loss, original, 'conv1', training)
net, step, loss = pre_train_conv_layer(net, tf.layers.conv2d, 256, [5, 5], (3, 3), 'conv2', dropout=dropout)
print(net.shape)
pretrain(pretrain_steps, step, loss, original, 'conv2', training)
net, step, loss = pre_train_conv_layer(net, tf.layers.conv2d, 128, [5, 5], (1, 1), 'conv3', dropout=dropout)
print(net.shape)
pretrain(pretrain_steps, step, loss, original, 'conv3', training)
# Decoder
net, step, loss = pre_train_conv_layer(net, tf.layers.conv2d_transpose, 128, [5, 5], (1, 1), 'deconv1', dropout=dropout)
print(net.shape)
pretrain(pretrain_steps, step, loss, original, 'deconv1', training)
net, step, loss = pre_train_conv_layer(net, tf.layers.conv2d_transpose, 256, [5, 5], (3, 3), 'deconv2', dropout=dropout)
print(net.shape)
pretrain(pretrain_steps, step, loss, original, 'deconv2', training)
net, step, loss = pre_train_conv_layer(net, tf.layers.conv2d_transpose, channels, [5, 5], (3, 3), 'deconv3', dropout=dropout)
print(net.shape)
pretrain(pretrain_steps, step, loss, original, 'deconv3', training)
# Final tanh activation
net = tf.nn.tanh(net)
return net
| 2.796875 | 3 |
api/serializers.py | IzmdI/yamdb_reviews_api_service | 0 | 12768563 | <gh_stars>0
from django.contrib.auth import password_validation
from django.contrib.auth.hashers import make_password
from django.core import exceptions
from rest_framework import serializers
from .models import Category, Comment, Genre, Review, Title, User
class UserSerializer(serializers.ModelSerializer):
password = serializers.CharField(write_only=True, required=False)
def validate_password(self, password):
try:
password_validation.validate_password(password=password)
except exceptions.ValidationError as err:
raise serializers.ValidationError(err.messages)
return make_password(password)
class Meta:
fields = (
"first_name",
"last_name",
"username",
"bio",
"email",
"role",
"password",
)
model = User
class UserCodeSerializer(serializers.ModelSerializer):
email = serializers.EmailField(required=True)
class Meta:
fields = "__all__"
model = User
extra_kwargs = {
"password": {"required": False},
"username": {"required": False},
}
class UserJwtSerializer(serializers.ModelSerializer):
email = serializers.EmailField(required=True)
confirmation_code = serializers.CharField(required=True)
class Meta:
fields = "__all__"
model = User
extra_kwargs = {
"password": {"required": False},
"username": {"required": False},
}
class GenreSerializer(serializers.ModelSerializer):
class Meta:
exclude = ["id"]
model = Genre
class CategorySerializer(serializers.ModelSerializer):
class Meta:
exclude = ["id"]
model = Category
class TitleSerializerCreate(serializers.ModelSerializer):
genre = serializers.SlugRelatedField(
slug_field="slug",
queryset=Genre.objects.all(),
many=True,
required=False,
)
category = serializers.SlugRelatedField(
slug_field="slug",
queryset=Category.objects.all(),
required=False,
)
class Meta:
fields = "__all__"
model = Title
extra_kwargs = {
"description": {"required": False},
}
class TitleSerializer(serializers.ModelSerializer):
genre = GenreSerializer(many=True, read_only=True)
category = CategorySerializer(read_only=True)
rating = serializers.FloatField(read_only=True)
class Meta:
fields = "__all__"
model = Title
class ReviewSerializer(serializers.ModelSerializer):
title = serializers.SlugRelatedField(
default=None,
slug_field="id",
write_only=True,
queryset=Title.objects.all(),
)
author = serializers.SlugRelatedField(
default=serializers.CurrentUserDefault(),
read_only=True,
slug_field="username",
)
def validate(self, attrs):
title = self.context["request"].parser_context["kwargs"]["title_id"]
request = self.context.get("request")
if (
request.method == "POST"
and Review.objects.filter(
title=title, author=request.user
).exists()
):
raise serializers.ValidationError(
"This author has already written a review"
)
return attrs
class Meta:
model = Review
fields = "__all__"
class CommentSerializer(serializers.ModelSerializer):
author = serializers.SlugRelatedField(
read_only=True,
slug_field="username",
)
class Meta:
model = Comment
exclude = ["review"]
extra_kwargs = {
"title": {"required": False},
"review": {"required": False},
}
| 2.390625 | 2 |
python_practice/data_structure/queue/queue_linked_listTest.py | jeremykid/FunAlgorithm | 0 | 12768564 | <gh_stars>0
from queue_link_list import queue_linked_list
def test1():
QLL = queue_linked_list()
QLL.enqueue('A')
QLL.enqueue('B')
QLL.enqueue('C')
print "1. QLL = ",QLL
QLL.enqueue('D')
print "2. After QLL.enqueue('D') QLL = ",QLL
print "3. After QLL.empty() = ",QLL.empty()
QLL.dequeue()
print "4. After QLL.dequeue() QLL = ",QLL
def main():
test1()
main() | 3.34375 | 3 |
notebook/opencv_threshold.py | vhn0912/python-snippets | 174 | 12768565 | import cv2
im = cv2.imread('data/src/lena_square_half.png')
th, im_th = cv2.threshold(im, 128, 255, cv2.THRESH_BINARY)
print(th)
# 128.0
cv2.imwrite('data/dst/opencv_th.jpg', im_th)
# True
th, im_th_tz = cv2.threshold(im, 128, 255, cv2.THRESH_TOZERO)
print(th)
# 128.0
cv2.imwrite('data/dst/opencv_th_tz.jpg', im_th_tz)
# True
# th, im_th_otsu = cv2.threshold(im, 128, 192, cv2.THRESH_OTSU)
# error: OpenCV(4.2.0) /tmp/opencv-20200105-17262-cwpzm4/opencv-4.2.0/modules/imgproc/src/thresh.cpp:1529: error: (-215:Assertion failed) src.type() == CV_8UC1 in function 'threshold'
im_gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
th, im_gray_th_otsu = cv2.threshold(im_gray, 128, 192, cv2.THRESH_OTSU)
print(th)
# 117.0
cv2.imwrite('data/dst/opencv_th_otsu.jpg', im_gray_th_otsu)
# True
| 2.828125 | 3 |
src/statistics.py | Thefalas/disksMD | 0 | 12768566 | <reponame>Thefalas/disksMD
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 8 17:33:47 2018
@author: malopez
"""
import math
import numpy as np
from scipy.stats import kurtosis
import matplotlib.pyplot as plt
import seaborn
from tools import readData
def getStationaryState(n_collisions, data_folder):
""" Here we read the last 75 % of the data and return those velocities """
# First, we need to read the data from the simulation
velocities = []
for a in range(n_collisions):
result = readData(a, data_folder)
velocities.append(result)
# Taking the last 20% of collisions and saving them in an array
last75 = int(75*n_collisions/100)
vel = velocities[n_collisions-1][:,:]
for a in range(last75):
current = velocities[(n_collisions-2)-a][:,:]
tup = (vel, current)
vel = np.vstack(tup)
return vel
def velocityDistribution(n_collisions, data_folder):
""" This function returns an histogram plot of the velocity distribution of
particles, the last 75% of total collisions are used for this """
# First, we need to read the data from the simulation
vel = getStationaryState(n_collisions, data_folder)
# velX = vel[:,0]
# velY = vel[:,1]
# h = np.histogram(vel)
# h2 = np.hstack(h)
seaborn.set_style('whitegrid')
seaborn.kdeplot(vel[:,0], bw=0.5)
#seaborn.kdeplot(vel[:,0]+vel[:,1], bw=0.5)
# With former array vel we can plot two histograms (for x and y directions)
#b = [-9,-8,-7,-6,-5,-4,-3,-2,-1,0,1,2,3,4,5,6,7,8,9]
#histPlot = plt.hist(vel, density=True, bins=b)
histPlot = plt.hist(vel, density=True, bins='auto')
# Another interesting visualization
# histPlot2D = plt.hist2d(vel[:,0], vel[:,1], bins=b)
return histPlot
def computeKurtosis(n_collisions, data_folder):
# First, we need to read the data from the simulation
vel = getStationaryState(n_collisions, data_folder)
k = kurtosis(vel, fisher=False)
return k
def computeKurtosisCustom(vel):
v2_sep = vel*vel
v2 = v2_sep[:,0] + v2_sep[:,1]
# v can also come in handy for calculating the kurtosis later-on
v = np.vectorize(math.sqrt)(v2)
k = (v**4).mean()/((v**2).mean())**2
return k
def computeExcessKurtosis_a2(kurtosis, dimensions):
a2 = (dimensions/(dimensions+2))*kurtosis -1
return a2 | 3.03125 | 3 |
approval_system/settings.py | rmboot/approvalsystem | 3 | 12768567 | <reponame>rmboot/approvalsystem<gh_stars>1-10
import os
import sys
basedir = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
WIN = sys.platform.startswith('win')
if WIN:
prefix = 'sqlite:///'
else:
prefix = 'sqlite:////'
class BaseConfig:
SQLALCHEMY_TRACK_MODIFICATIONS = False
SECRET_KEY = os.urandom(24)
BOOTSTRAP_SERVE_LOCAL = True
UPLOADED_ARCHIVES_DEST = basedir + '/uploads/'
CKEDITOR_ENABLE_CSRF = True
CKEDITOR_FILE_UPLOADER = 'user.upload_image'
CKEDITOR_HEIGHT = '100'
class DevelopmentConfig(BaseConfig):
# SQLALCHEMY_DATABASE_URI Mysql Format:'mysql+mysqlconnector://username:password@server/db'
SQLALCHEMY_DATABASE_URI = 'mysql+mysqlconnector://zw:zhangwang@localhost/approval_system?charset=utf8'
# SQLALCHEMY_DATABASE_URI = prefix + os.path.join(basedir, 'data-dev.db')
class TestingConfig(BaseConfig):
TESTING = True
WTF_CSRF_ENABLED = False
SQLALCHEMY_DATABASE_URI = 'sqlite:///' # in-memory database
class ProductionConfig(BaseConfig):
SQLALCHEMY_DATABASE_URI = 'mysql+mysqlconnector://zw:zhangwang@localhost/approval_system?charset=utf8'
# SQLALCHEMY_DATABASE_URI = os.getenv('DATABASE_URL', prefix + os.path.join(basedir, 'data.db'))
config = {
'development': DevelopmentConfig,
'testing': TestingConfig,
'production': ProductionConfig,
}
| 1.851563 | 2 |
cgls.py | iggyuga/raisr2 | 275 | 12768568 | import numpy as np
def cgls(A, b):
height, width = A.shape
x = np.zeros((height))
while(True):
sumA = A.sum()
if (sumA < 100):
break
if (np.linalg.det(A) < 1):
A = A + np.eye(height, width) * sumA * 0.000000005
else:
x = np.linalg.inv(A).dot(b)
break
return x | 2.40625 | 2 |
test/python/topology/test2_spl_window.py | markheger/streamsx.topology | 31 | 12768569 | # Licensed Materials - Property of IBM
# Copyright IBM Corp. 2016
import unittest
import sys
import itertools
import threading
from streamsx.topology.topology import *
from streamsx.topology.tester import Tester
from streamsx.topology import schema
import streamsx.topology.context
from streamsx.topology.context import JobConfig
from streamsx.topology.context import ConfigParams
import streamsx.spl.op as op
class TestSPLWindow(unittest.TestCase):
""" Test invocations of SPL operators from Python topology.
"""
_multiprocess_can_split_ = True
# Fake out subTest
if sys.version_info.major == 2:
def subTest(self, **args): return threading.Lock()
def setUp(self):
Tester.setup_standalone(self)
def test_sliding_count(self):
for step in [1, 3]:
with self.subTest(step=step):
topo = Topology()
b = op.Source(topo, "spl.utility::Beacon",
'tuple<uint64 seq>',
params = {'iterations':12})
b.seq = b.output('IterationCount()')
s = b.stream
agg = op.Map('spl.relational::Aggregate', s.last(4).trigger(step),
schema = 'tuple<uint64 sum, uint64 max>')
agg.sum = agg.output('Sum(seq)')
agg.max = agg.output('Max(seq)')
expected = []
for i in range(4 + step - 2, 12, step):
expected.append({'sum': sum(range(i-3, i+1)), 'max': i})
tester = Tester(topo)
tester.contents(agg.stream, expected)
tester.test(self.test_ctxtype, self.test_config)
def test_sliding_count_stv(self):
for step in [1, 3]:
with self.subTest(step=step):
topo = Topology()
b = op.Source(topo, "spl.utility::Beacon",
'tuple<uint64 seq>',
params = {'iterations':12})
b.seq = b.output('IterationCount()')
s = b.stream
count = topo.create_submission_parameter('count', 4)
window = s.last(count).trigger(step)
agg = op.Map('spl.relational::Aggregate', window,
schema = 'tuple<uint64 sum, uint64 max>')
agg.sum = agg.output('Sum(seq)')
agg.max = agg.output('Max(seq)')
expected = []
for i in range(4 + step - 2, 12, step):
expected.append({'sum': sum(range(i-3, i+1)), 'max': i})
tester = Tester(topo)
tester.contents(agg.stream, expected)
tester.test(self.test_ctxtype, self.test_config)
def test_sliding_count_stv_no_default(self):
step =1
topo = Topology()
b = op.Source(topo, "spl.utility::Beacon",
'tuple<uint64 seq>',
params = {'iterations':12})
b.seq = b.output('IterationCount()')
s = b.stream
count = topo.create_submission_parameter('count', type_=int)
window = s.last(count).trigger(step)
agg = op.Map('spl.relational::Aggregate', window,
schema = 'tuple<uint64 sum, uint64 max>')
agg.sum = agg.output('Sum(seq)')
agg.max = agg.output('Max(seq)')
expected = []
for i in range(4 + step - 2, 12, step):
expected.append({'sum': sum(range(i-3, i+1)), 'max': i})
jc = JobConfig()
jc.submission_parameters['count'] = 4
jc.add(self.test_config)
tester = Tester(topo)
tester.contents(agg.stream, expected)
tester.test(self.test_ctxtype, self.test_config)
def test_sliding_time_stv(self):
topo = Topology()
b = op.Source(topo, "spl.utility::Beacon",
'tuple<uint64 seq>',
params = {'iterations':12})
b.seq = b.output('IterationCount()')
s = b.stream
time = topo.create_submission_parameter('time', 2)
window = s.lastSeconds(time).trigger(1)
agg = op.Map('spl.relational::Aggregate', window,
schema = 'tuple<uint64 sum, uint64 max>')
agg.sum = agg.output('Sum(seq)')
agg.max = agg.output('Max(seq)')
tester = Tester(topo)
tester.tuple_count(agg.stream, 12)
tester.test(self.test_ctxtype, self.test_config)
def test_sliding_time_stv_no_default(self):
topo = Topology()
b = op.Source(topo, "spl.utility::Beacon",
'tuple<uint64 seq>',
params = {'iterations':12})
b.seq = b.output('IterationCount()')
s = b.stream
wtime = topo.create_submission_parameter(name='secs', type_=int)
window = s.lastSeconds(wtime).trigger(1)
agg = op.Map('spl.relational::Aggregate', window,
schema = 'tuple<uint64 sum, uint64 max>')
agg.sum = agg.output('Sum(seq)')
agg.max = agg.output('Max(seq)')
jc = JobConfig()
jc.submission_parameters['secs'] = 2
jc.add(self.test_config)
tester = Tester(topo)
tester.tuple_count(agg.stream, 12)
tester.test(self.test_ctxtype, self.test_config)
class TestDistributedSPLWindow(TestSPLWindow):
def setUp(self):
Tester.setup_distributed(self)
self.test_config[ConfigParams.SSL_VERIFY] = False
| 2.078125 | 2 |
adt/core.py | catethos/adt | 0 | 12768570 | from pampy import match, _
from dataclasses import dataclass
from typing import TypeVar, Generic
T = TypeVar('T')
class Matcher:
def __init__(self):
self.pattern = []
def __setitem__(self, pat, method):
self.pattern.append(pat)
self.pattern.append(method)
def __call__(self, *x):
if len(x)==1:
x = x[0]
return match(x, *self.pattern)
class Multimethod:
def __init__(self):
pass
def __enter__(self):
return Matcher()
def __exit__(self, exc_type, exc_value, exc_traceback):
pass
class Case:
def __init__(self,**kwargs):
self.slot = kwargs
class ADTMeta(type):
def __init__(cls, clsname, bases, clsdict):
annot = clsdict["__annotations__"]
for name in list(annot.keys()):
try:
d = annot[name].slot
t = dataclass(type(name, (cls,), {"__annotations__":d}))
setattr(cls, name, t)
except AttributeError:
pass
super().__init__(clsname, bases, clsdict) | 2.578125 | 3 |
networks/activations.py | Chappie733/MLPack | 0 | 12768571 | <reponame>Chappie733/MLPack
import numpy as np
from numbers import Number
ACTIVATIONS = {}
def parser(func):
def wrapper(x, deriv=False, **kwargs):
if not isinstance(deriv, bool):
raise TypeError("Expected the parameter \'deriv\' to be a boolean, but received {type} instead!".format(type=type(deriv)))
elif not isinstance(x, np.ndarray) and not isinstance(x, Number):
raise TypeError("Expected the parameter \'x\' to be a numpy array or a number, but received {type} instead!".format(type=type(x)))
return func(x, deriv, kwargs)
wrapper.__name__ = func.__name__
ACTIVATIONS[wrapper.__name__.lower()] = wrapper
return wrapper
@parser
def linear(x, deriv=False, *args):
return x if not deriv else np.ones(x.shape)
@parser
def sigmoid(x, deriv=False, *args):
return 1/(1+np.exp(-x)) if not deriv else sigmoid(x)*(1-sigmoid(x))
@parser
def tanh(x, deriv=False, *args):
return np.tanh(x) if not deriv else 1-np.tanh(x)**2
@parser
def ReLu(x, deriv=False, *args):
return (np.abs(x)+x)/2 if not deriv else (np.sign(x)+1)/2
@parser
def ELU(x, deriv=False, *args):
alpha = 1 if 'alpha' not in args[0] else args[0]['alpha']
return np.where(x>0, x, alpha*(np.exp(x)-1)) if not deriv else np.where(x>0, 1, alpha*np.exp(x))
@parser
def LeakyReLu(x, deriv=False, *args):
alpha = 1 if 'alpha' not in args[0] else args[0]['alpha']
return np.where(x>0, x, alpha*x) if not deriv else np.where(x>0, 1, alpha)
@parser
def atan(x, deriv=False, *args):
return np.arctan(x) if not deriv else 1/(x**2+1)
@parser
def BentIdentity(x, deriv=False, *args):
return x+(np.sqrt(x**2+1)-1)/2 if not deriv else x/(2*np.sqrt(x**2+1))+1
@parser
def BipolarSigmoid(x, deriv=False, *args):
return (1-np.exp(-x))/(1+np.exp(x)) if not deriv else (2+np.exp(-x)-np.exp(x))/((1+np.exp(x))**2)
@parser
def gaussian(x, deriv=False, *args):
return np.exp(-x**2) if not deriv else -2*x*np.exp(-x**2)
@parser
def hardtanh(x, deriv=False, *args):
return np.where(np.abs(x)>1, np.sign(x), x) if not deriv else np.where(np.abs(x)>1, 0, 1)
@parser
def InverseSqrt(x, deriv=False, *args):
alpha = 0.1 if 'alpha' not in args[0] else args[0]['alpha']
return x/(np.sqrt(1+alpha*x**2)) if not deriv else 1/(1+alpha*x**2)**(3/2)
@parser
def LeCunTanh(x, deriv=False, *args):
alpha = 1.7159 if 'alpha' not in args[0] else args[0]['alpha']
return alpha*np.tanh(2*x/3) if not deriv else 2*alpha/(3*np.cosh(2*x/3)**2)
@parser
def LogLog(x, deriv=False, *args):
return 1-np.exp(-np.exp(x)) if not deriv else np.exp(x)*(LogLog(x)-1)
@parser
def LogSigmoid(x, deriv=False, *args):
return np.log(sigmoid(x)) if not deriv else 1-sigmoid(x)
@parser
def SELU(x, deriv=False, *args):
alpha = 1.67326 if 'alpha' not in args[0] else args[0]['alpha']
beta = 1.0507 if 'beta' not in args[0] else args[0]['beta']
return beta*np.where(x>0, x, alpha*(np.exp(x-1))) if not deriv else beta*np.where(x>0, 1, alpha*np.exp(x))
@parser
def sinc(x, deriv=False, *args):
return np.where(x!=0, np.sin(x)/x, 1) if not deriv else np.where(x!=0, np.cos(x)/x-np.sin(x)/(x**2), 0)
@parser
def swish(x, deriv=False, *args):
return x/(1+np.exp(-x)) if not deriv else np.exp(x)*(x+np.exp(x)+1)/(np.exp(x)+1)**2
@parser
def softsign(x, deriv=False, *args):
return x/(1+np.abs(x)) if not deriv else 1/(1+np.abs(x))**2
@parser
def softplus(x, deriv=False, *args):
return np.log(1+np.exp(x)) if not deriv else np.exp(x)/(1+np.exp(x))
@parser
def softmax(x, *args):
return np.exp(x)/np.sum(np.exp(x))
| 2.75 | 3 |
manage.py | PapaCharlie/SteamReviews | 0 | 12768572 | import nltk
import os
from flask_script import Manager
from app import app
from app.dynamodb.utils import create_dynamo_table
from app.models.review import Review
from app.models.game import Game
from app.models.tag import Tag
manager = Manager(app)
@manager.command
def create_tables():
create_dynamo_table(Review)
create_dynamo_table(Tag)
create_dynamo_table(Game)
@manager.command
def get_punkt():
nltk.download("punkt")
if __name__ == "__main__":
manager.run()
| 1.875 | 2 |
sumo/tools/sumolib/route.py | iltempe/osmosi | 0 | 12768573 | #!/usr/bin/env python
"""
@file route.py
@author <NAME>
@date 2013-10-23
@version $Id$
Route helper functions.
SUMO, Simulation of Urban MObility; see http://sumo.dlr.de/
Copyright (C) 2009-2017 DLR (http://www.dlr.de/) and contributors
This file is part of SUMO.
SUMO is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License, or
(at your option) any later version.
"""
import os
import sys
SUMO_HOME = os.environ.get('SUMO_HOME',
os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..'))
sys.path.append(os.path.join(SUMO_HOME, 'tools'))
from sumolib.miscutils import euclidean
def _getMinPath(paths):
minDist = 1e400
minPath = None
for path, dist in paths.iteritems():
if dist < minDist:
minPath = path
minDist = dist
return minPath
def mapTrace(trace, net, delta, verbose=False):
"""
matching a list of 2D positions to consecutive edges in a network
"""
result = []
paths = {}
if verbose:
print("mapping trace with %s points" % len(trace))
for pos in trace:
newPaths = {}
candidates = net.getNeighboringEdges(pos[0], pos[1], delta)
if len(candidates) == 0 and verbose:
print("Found no candidate edges for %s,%s" % pos)
for edge, d in candidates:
if paths:
minDist = 1e400
minPath = None
for path, dist in paths.iteritems():
if dist < minDist:
if edge == path[-1]:
minPath = path
minDist = dist
elif edge in path[-1].getOutgoing():
minPath = path + (edge,)
minDist = dist
else:
minPath = path + (edge,)
minDist = dist + euclidean(
path[-1].getToNode().getCoord(),
edge.getFromNode().getCoord())
if minPath:
newPaths[minPath] = minDist + d * d
else:
newPaths[(edge,)] = d * d
if not newPaths:
if paths:
result += [e.getID() for e in _getMinPath(paths)]
paths = newPaths
if paths:
return result + [e.getID() for e in _getMinPath(paths)]
return result
| 2.75 | 3 |
app/db/models.py | aarangop/floraqua-backend | 0 | 12768574 | <filename>app/db/models.py
from typing import Optional
from bson import ObjectId
from pydantic import BaseModel, Field
class PyObjectId(ObjectId):
@classmethod
def __get_validators__(cls):
yield cls.validate
@classmethod
def validate(cls, v):
if not ObjectId.is_valid(v):
raise ValueError("Invalid objectid")
return ObjectId(v)
@classmethod
def __modify_schema__(cls, field_schema):
field_schema.update(type="string")
class VirtualNode(BaseModel):
id: Optional[PyObjectId] = Field(default_factory=PyObjectId, alias="_id")
name: str = Field(...)
plant: str = Field(...)
target_moisture: int = Field(..., gt=0, le=100)
class Config:
allow_population_by_field_name = True
arbitrary_types_allowed = True
json_encoders = {ObjectId: str}
class UpdateFloraquaVirtualNode(BaseModel):
name: Optional[str]
plant: Optional[str]
target_moisture: Optional[str]
class Config:
arbitrary_types_allowed = True
json_encoders = {ObjectId: str}
| 2.4375 | 2 |
loremipsum.py | connordelacruz/python-loremipsum | 2 | 12768575 | <gh_stars>1-10
import argparse
from urllib import request, error
import pyperclip
URL = 'https://loripsum.net/api/'
class ParagraphLength():
"""Constants for the ``paragraph_length`` parameter in generate()"""
#: List of values for paragraph lengths. Used internally.
OPTIONS = [
'short',
'medium',
'long',
'verylong',
]
SHORT = 0
MEDIUM = 1
LONG = 2
VERY_LONG = 3
@classmethod
def get_option(cls, index):
"""Returns the string value that corresponds to the constants declared
in this class. Used internally.
:param index: One of the constants declared in this class
(``SHORT``, ``MEDIUM``, ``LONG``, ``VERY_LONG``)
:return: The corresponding string value for the loripsum.net API or
None if the index is invalid
"""
if isinstance(index, int) and index < len(cls.OPTIONS):
return cls.OPTIONS[index]
# Previous version of the class used strings instead of a list
elif isinstance(index, str) and index in cls.OPTIONS:
return index
else:
return None
#: Valid keys for html_options
HTML_OPTIONS = [
'decorate', # Add bold, italic and marked text.
'link', # Add links.
'ul', # Add unordered lists.
'ol', # Add numbered lists.
'dl', # Add description lists.
'bq', # Add blockquotes.
'code', # Add code samples.
'headers', # Add headers.
]
def _request_url_string(request_args):
url_string = URL
for arg in request_args:
url_string += '{}/'.format(arg)
return url_string
def _generate(request_url):
placeholder_text = request.urlopen(request_url).read().decode('utf8')
return placeholder_text
def generate(paragraph_count=None, paragraph_length=None, allcaps=False,
prude=False, plaintext=True, html_options=None,
trailing_newlines=False):
"""Generate Lorem Ipsum placeholder text using the https://loripsum.net API.
Further documentation of parameters can be found at `loripsum.net <https://loripsum.net>`_
:param paragraph_count: (Optional) The number of paragraphs to generate. If
unspecified, API defaults to 4
:param paragraph_length: (Optional) The average length of a paragraph. Possible
values are declared as attributes in ``loremipsum.ParagraphLength``
(``SHORT``, ``MEDIUM``, ``LONG``, ``VERY_LONG``). If unspecified, API
defaults to 'long'
:param allcaps: (Default = False) Use ALL CAPS
:param prude: (Default = False) Prude version. From the API documentation:
"The original text contains a few instances of words like 'sex' or 'homo'.
Personally, we don't mind, because these are just common latin words
meaning 'six' and 'man'. However, some people (or your clients) might be
offended by this, so if you select the 'Prude version', these words will be
censored."
:param plaintext: (Default = True) Return plain text, no HTML
:param html_options: (Default = None) List of html options to specify in
request. This will be ignored if plaintext = True. The following options
are accepted
- 'decorate' - Add bold, italic and marked text.
- 'link'- Add links.
- 'ul' - Add unordered lists.
- 'ol' - Add numbered lists.
- 'dl' - Add description lists.
- 'bq' - Add blockquotes.
- 'code' - Add code samples.
- 'headers' - Add headers.
:param trailing_newlines: (Default = False) If False, strip trailing new lines
in generated text. If True, leave trailing new lines in.
:return: Result of querying loripsum.net API using the specified options
"""
request_args = []
paragraph_length = ParagraphLength.get_option(paragraph_length)
if paragraph_count is not None:
request_args.append(paragraph_count)
if paragraph_length:
request_args.append(paragraph_length)
if allcaps:
request_args.append('allcaps')
if prude:
request_args.append('prude')
if plaintext:
request_args.append('plaintext')
# If not plaintext and html_options is specified, add those args as well
elif html_options is not None:
# Ignore invalid options
valid_html_options = [
option for option in html_options if option in HTML_OPTIONS
]
request_args.extend(valid_html_options)
request_url = _request_url_string(request_args)
placeholder_text = _generate(request_url)
return placeholder_text if trailing_newlines else placeholder_text.rstrip()
# Command Line Functions
def _parser(description=None):
"""Returns common ArgumentParser for command line functions"""
parser = argparse.ArgumentParser(
description=description,
epilog='For more information, visit <https://connordelacruz.com/py-loremipsum/>'
)
parser.add_argument(
'paragraph_count', type=int, nargs='?', default=1,
help='(Default: 1) The number of paragraphs to generate'
)
parser.add_argument(
'-l', '--length', dest='paragraph_length', type=str.lower, choices=ParagraphLength.OPTIONS,
help='Specify the average length of a paragraph. API defaults to "long"'
)
parser.add_argument(
'-C', '--allcaps', action='store_true', default=False,
help='Use ALL CAPS'
)
parser.add_argument(
'-p', '--prude', action='store_true', default=False,
help='Omit Latin words that may be inappropriate in English'
)
# TODO: take html options? (using subparser or append w/ default=None and nargs='*'?)
parser.add_argument(
'-H', '--html', dest='plaintext', action='store_false', default=True,
help='Generate HTML instead of plain text'
)
parser.add_argument(
'-n', '--trailing-newlines', action='store_true', default=False,
help='Keep trailing new lines'
)
return parser
def main():
"""Prints generated text using parsed args"""
description = 'Generate "Lorem ipsum" text'
args = _parser(description).parse_args()
print(generate(**vars(args)))
def copy():
"""Copies generated text using parsed args to clipboard"""
description = 'Generate "Lorem ipsum" text and copy to clipboard'
args = _parser(description).parse_args()
text = generate(**vars(args))
try:
pyperclip.copy(text)
except pyperclip.PyperclipException as e:
print(str(e), 'Generated text:\n', text, sep='\n')
else:
print('Generated text copied to clipboard.')
if __name__ == '__main__':
main()
| 3.515625 | 4 |
Pyrado/pyrado/environments/rcspysim/base.py | jacarvalho/SimuRLacra | 0 | 12768576 | import numpy as np
from abc import abstractmethod
from init_args_serializer import Serializable
from rcsenv import RcsSimEnv, JointLimitException
import pyrado
from pyrado.environments.sim_base import SimEnv
from pyrado.spaces.base import Space
from pyrado.spaces.box import BoxSpace
from pyrado.spaces.empty import EmptySpace
from pyrado.tasks.base import Task
from pyrado.utils.data_types import RenderMode
def to_pyrado_space(space) -> [BoxSpace, EmptySpace]:
"""
Convert the box space implementation from RcsPySim to the one of Pyrado.
:param space: a space from RcsPySim
:return: a Pyrado `BoxSpace` or an Pyrado`EmptySpace` if `None` was given
"""
if space is None:
return EmptySpace
return BoxSpace(space.min, space.max, labels=space.names)
class RcsSim(SimEnv, Serializable):
""" Base class for RcsPySim environments. Uses Serializable to facilitate proper serialization. """
def __init__(self,
envType: str,
task_args: dict,
dt: float = 0.01,
max_steps: int = pyrado.inf,
init_state: np.ndarray = None,
checkJointLimits: bool = False,
joint_limit_penalty: float = -1e3,
**kwargs):
"""
Constructor
.. note::
The joint type (i.e. position or torque control) is set in the config-xml file in Rcs.
:param envType: environment type name as defined on the C++ side
:param task_args: arguments for the task construction
:param dt: integration step size in seconds
:param max_steps: max number of simulation time steps
:param domain_param: initial domain param values
:param init_state: initial state sampler can be a callable or one fixed state
:param checkJointLimits: flags if the joint limits should be ignored or not passed to the C++ constructor
:param joint_limit_penalty: cost returned on termination due to joint limits. This is a different from the
state bounds since `RcsPySim` return an error when the joint limits are violated.
:param kwargs: keyword arguments which are available for `RcsSim` on the C++ side. These arguments will not
be stored in the environment object, thus are saved e.g. when pickled.
"""
Serializable._init(self, locals())
# Initialize basic variables
super().__init__(dt, max_steps)
self._check_joint_limits = checkJointLimits
# Create Rcs-based implementation (RcsSimEnv comes from the pybind11 module)
self._impl = RcsSimEnv(
dt=dt,
envType=envType,
checkJointLimits=self._check_joint_limits,
**kwargs
)
# Setup the initial domain parameters
self._domain_param = self._unadapt_domain_param(self._impl.domainParam)
if joint_limit_penalty > 0:
raise pyrado.ValueErr(given=joint_limit_penalty, le_constraint='0')
self._joint_limit_penalty = joint_limit_penalty
# Initial init state space is taken from C++
self._init_space = to_pyrado_space(self._impl.initStateSpace)
# By default, the state space is a subset of the observation space. Set this to customize in subclass.
self.state_mask = None
# Dummy initialization, must be set by the derived classes
self.init_state = None
self.task_args = task_args
self._task = self._create_task(self.task_args)
@property
def state_space(self) -> Space:
""" Derives the state space from the observation space using _state_from_obs or state_mask. """
obs_space = self.obs_space
# Check if _state_from_obs was overridden
if self._state_from_obs.__func__ != RcsSim._state_from_obs:
return BoxSpace(self._state_from_obs(obs_space.bound_lo), self._state_from_obs(obs_space.bound_up), None)
# Check if there is a state mask
if self.state_mask is not None:
return obs_space.subspace(self.state_mask)
# Identical to obs space
return obs_space
@property
def obs_space(self) -> Space:
return to_pyrado_space(self._impl.observationSpace)
@property
def init_space(self) -> Space:
return self._init_space
@init_space.setter
def init_space(self, space: Space):
assert to_pyrado_space(self._impl.initStateSpace).shape == space.shape
self._init_space = space
@property
def act_space(self) -> Space:
return to_pyrado_space(self._impl.actionSpace)
@property
def task(self) -> Task:
return self._task
@abstractmethod
def _create_task(self, task_args: dict) -> Task:
# Needs to implemented by subclasses
raise NotImplementedError
@property
def domain_param(self) -> dict:
return self._unadapt_domain_param(self._impl.domainParam)
@domain_param.setter
def domain_param(self, param: dict):
if not isinstance(param, dict):
raise pyrado.TypeErr(given=param, expected_type=dict)
# Update the internal parameters. The New domain parameters will be applied on reset().
self._domain_param.update(param)
# Update task
self._task = self._create_task(self.task_args)
@classmethod
def get_nominal_domain_param(cls):
"""
Get the nominal a.k.a. default domain parameters.
.. note::
It is highly recommended to have the same values as in the associated physics config file (p<NAME>.xml),
since the nominal domain parameters are not set explicitly from Pyrado (only when randomizing).
"""
raise NotImplementedError
def _state_from_obs(self, obs: np.ndarray) -> np.ndarray:
"""
Retrieve the system state from the observation. In most cases, the system state is a part of the observation.
This function is to be used when the observations include additional information.
The default implementation is based off `self.state_mask` which is set in sub-classes of `RcsSim`.
:param obs: observation from the environment
:return: state of the environment
"""
if self.state_mask is not None:
return obs[self.state_mask]
return obs.copy()
def _adapt_domain_param(self, params: dict) -> dict:
"""
Changes the domain parameters before passing them to the Rcs simulation.
One use case is for example the rolling friction coefficient which is usually given unit-less but the Vortex
physics engine expects it to be multiplied with the body's curvature radius.
:param params: domain parameters to adapt
:return: adapted parameters
"""
return params
def _unadapt_domain_param(self, params: dict) -> dict:
"""
Changes the domain parameters coming from to the Rcs simulation.
.. note::
This function is called from the constructor.
:param params: domain parameters to revert the previously done adaptation
:return: unadapted parameters
"""
return params
def _get_state(self, state_dict: dict):
state_dict['domain_param'] = self.domain_param
state_dict['init_state'] = self.init_state
def _set_state(self, state_dict: dict, copying: bool = False):
self.domain_param = state_dict['domain_param']
self.init_state = state_dict['init_state']
def _disturbance_generator(self) -> (np.ndarray, None):
""" Provide an artificial disturbance. For example a force on a body in the physics simulation. """
return None
def reset(self, init_state: np.ndarray = None, domain_param: dict = None) -> np.ndarray:
# Reset time
self._curr_step = 0
# Reset the state
if init_state is None:
# Sample from the init state space
init_state = self._init_space.sample_uniform()
else:
if not init_state.shape == self._init_space.shape:
raise pyrado.ShapeErr(given=init_state, expected_match=self._init_space)
# Reset the task
self._task.reset(env_spec=self.spec)
# Use stored domain parameters if not overwritten
if domain_param is None:
domain_param = self._domain_param
# Forward to C++ implementation
obs = self._impl.reset(domainParam=self._adapt_domain_param(domain_param), initState=init_state)
self.state = self._state_from_obs(obs)
return obs
def step(self, act: np.ndarray) -> tuple:
# Current reward depending on the state (before step) and the (unlimited) action
remaining_steps = self._max_steps - (self._curr_step + 1) if self._max_steps is not pyrado.inf else 0
self._curr_rew = self._task.step_rew(self.state, act, remaining_steps)
# Apply actuator limits
act = self.limit_act(act)
# Get the disturbance to be applied on the Rcs side
disturbance = self._disturbance_generator()
# Dynamics are calculated in the Rcs simulation
try:
obs = self._impl.step(act, disturbance)
except JointLimitException:
# Joint limits exceeded! Return (obs, rew, done, info) directly after this failure.
return self._impl.lastObservation, self._joint_limit_penalty, True, dict(t=self._curr_step*self._dt)
self.state = self._state_from_obs(obs) # only for the Python side
info = dict(t=self._curr_step*self._dt)
self._curr_step += 1
# Check if the task or the environment is done
done = self._task.is_done(self.state)
if self._curr_step >= self._max_steps:
done = True
if done:
# Add final reward if done
self._curr_rew += self._task.final_rew(self.state, remaining_steps)
return obs, self._curr_rew, done, info
def render(self, mode: RenderMode = RenderMode(text=True), render_step: int = 1):
if self._curr_step%render_step == 0:
# Call base class
super().render(mode)
# Forward to Rcs GUI
if mode.video:
self._impl.render()
def save_config_xml(self, fileName: str):
"""
Save environment configuration as xml file for use on the C++ side.
:param fileName: output file name
"""
self._impl.saveConfigXML(fileName)
def get_body_position(self, bodyName: str, refFrameName: str, refBodyName: str) -> np.ndarray:
"""
Get the position of a body in the simulators config graph.
This function uses code coped from `Rcs` to transform the position depending on a refernce frame and/or body.
:param bodyName: name of the body in the graph
:param refFrameName: name of the reference frame, pass '' to use world coordinates
:param refBodyName: name of the reference body, pass '' to use world coordinates
:return: x,y,z positions in a reference frame coordinates relative to a reference bodies
"""
return self._impl.getBodyPosition(bodyName, refFrameName, refBodyName)
def get_body_extents(self, bodyName: str, shapeIdx: int = 0) -> np.ndarray:
"""
Get the dimensions of a body in the simulators config graph.
This function uses code coped from `Rcs` to transform the position depending on a refernce frame and/or body.
.. note::
Depending on the kind of shape (e.g. box, sphere, torus, ect.) the extends mean different things.
:param bodyName: name of the body in the graph
:param shapeIdx: index of the shape in the `Body` node, defaults to the first shape of the body
:return: x,y,z positions in a reference frame coordinates relative to a reference bodies
"""
return self._impl.getBodyExtents(bodyName, shapeIdx)
| 2.34375 | 2 |
customers/services/sms_notification_service.py | City-of-Helsinki/berth-reservations | 3 | 12768577 | import requests
from django.conf import settings
from django_ilmoitin.models import NotificationTemplate
from django_ilmoitin.utils import render_notification_template
NOTIFICATION_SERVICE_API_URL = "NOTIFICATION_SERVICE_API_URL"
NOTIFICATION_SERVICE_SENDER_NAME = "NOTIFICATION_SERVICE_SENDER_NAME"
NOTIFICATION_SERVICE_TOKEN = "NOTIFICATION_SERVICE_TOKEN"
DEFAULT_LANGUAGE = settings.LANGUAGE_CODE
class SMSNotificationService:
"""
The documentation for the API can be found on the GitHub repo:
https://github.com/City-of-Helsinki/notification-service-api
"""
def __init__(self, **kwargs):
if "config" in kwargs:
self.config = kwargs.get("config")
self.api_url = self.config.get(NOTIFICATION_SERVICE_API_URL)
self.sender_name = self.config.get(NOTIFICATION_SERVICE_SENDER_NAME)
self.token = kwargs.get("token") or self.config.get(NOTIFICATION_SERVICE_TOKEN)
assert self.token
@staticmethod
def get_config_template():
return {
NOTIFICATION_SERVICE_API_URL: str,
NOTIFICATION_SERVICE_SENDER_NAME: str,
NOTIFICATION_SERVICE_TOKEN: str,
}
def send(
self,
notification_type: str,
context: dict,
phone_number: str,
language=DEFAULT_LANGUAGE,
):
template = NotificationTemplate.objects.get(type=notification_type)
message = render_notification_template(template, context, language).body_text
return self.send_plain_text(phone_number, message)
def send_plain_text(self, phone_number: str, message: str):
data = {
"sender": self.sender_name,
"to": [{"destination": phone_number, "format": "MOBILE"}],
"text": message,
}
return self._do_send(data)
def send_batch(self, phone_numbers: list, message: str):
data = {
"sender": self.sender_name,
"to": [
{"destination": phone_number, "format": "MOBILE"}
for phone_number in phone_numbers
],
"text": message,
}
return self._do_send(data)
def _do_send(self, data):
headers = {"Authorization": f"Token {self.token}"}
return requests.post(f"{self.api_url}/message/send", json=data, headers=headers)
| 2.140625 | 2 |
cursors/coords.py | Jonas164/PSpaMM | 0 | 12768578 | <filename>cursors/coords.py<gh_stars>0
from collections import namedtuple
# The relationship between blocks and cells has become too complicated to
# safely bake into a coordinate system. The semantics has been moved to the
# cursor movement commands. A NewCoords object may represent a logical cell,
# a logical block start, or a physical block start depending on context.
# We are including a {relative|absolute} flag in order to reduce the number of methods.
C = namedtuple('C', 'down right absolute')
C.__new__.__defaults__ = (0, 0, False)
class Coords(C):
def copy(self):
return Coords(self.down, self.right, self.absolute)
def __add__(self, other):
absolute = self.absolute | other.absolute
return Coords(self.down+other.down, self.right+other.right, absolute)
def __sub__(self, other):
absolute = self.absolute != other.absolute # TODO: What is the math behind this?
return Coords(self.down-other.down, self.right-other.right, absolute)
def __neg__(self, other):
return Coords(-self.down, -self.right, self.absolute)
def __eq__(self, other):
return self.down == other.down and \
self.right == other.right and \
self.absolute == other.absolute
def __repr__(self):
if self.absolute:
absolute = ", absolute"
else:
absolute = ""
return "(d={},r={}{})".format(self.down, self.right,absolute) | 3.140625 | 3 |
src/pydap/server/devel_ssl.py | Juanlu001/pydap | 83 | 12768579 | <gh_stars>10-100
import multiprocessing
import time
import requests
import warnings
from werkzeug.serving import run_simple
from ..wsgi.ssf import ServerSideFunctions
from ..handlers.lib import BaseHandler
from .devel import DefaultDataset, LocalTestServer
def run_simple_server(application=BaseHandler(DefaultDataset),
port=8000, ssl_context=None):
application = ServerSideFunctions(application)
def app_check_for_shutdown(environ, start_response):
if environ['PATH_INFO'].endswith('shutdown'):
shutdown_server(environ)
return shutdown_application(environ, start_response)
else:
return application(environ, start_response)
run_simple('0.0.0.0', port,
app_check_for_shutdown,
ssl_context=ssl_context)
def shutdown_server(environ):
if 'werkzeug.server.shutdown' not in environ:
raise RuntimeError('Not running the development server')
environ['werkzeug.server.shutdown']()
def shutdown_application(environ, start_response):
start_response('200 OK', [('Content-Type', 'text/plain')])
return [b'Server is shutting down.']
class LocalTestServerSSL(LocalTestServer):
"""
Simple server instance that can be used to test pydap.
Relies on multiprocessing and is usually slow (it has to
start and shutdown which typically takes ~2 sec).
Usage:
>>> import numpy as np
>>> from pydap.handlers.lib import BaseHandler
>>> from pydap.model import DatasetType, BaseType
>>> DefaultDataset = DatasetType("Default")
>>> DefaultDataset["byte"] = BaseType("byte", np.arange(5, dtype="B"))
>>> DefaultDataset["string"] = BaseType("string", np.array(["one", "two"]))
>>> DefaultDataset["short"] = BaseType("short", np.array(1, dtype="h"))
>>> DefaultDataset
<DatasetType with children 'byte', 'string', 'short'>
>>> application = BaseHandler(DefaultDataset)
>>> from pydap.client import open_url
As an instance:
>>> with LocalTestServerSSL(application) as server:
... dataset = open_url("http://localhost:%s" % server.port)
... dataset
... print(dataset['byte'].data[:])
... print(dataset['string'].data[:])
... print(dataset['short'].data[:])
<DatasetType with children 'byte', 'string', 'short'>
[0 1 2 3 4]
[b'one' b'two']
1
Or by managing connection and deconnection:
>>> server = LocalTestServerSSL(application)
>>> server.start()
>>> dataset = open_url("http://localhost:%s" % server.port)
>>> dataset
<DatasetType with children 'byte', 'string', 'short'>
>>> print(dataset['byte'].data[:])
[0 1 2 3 4]
>>> server.shutdown()
"""
def __init__(self, application=BaseHandler(DefaultDataset),
port=None, wait=0.5, polling=1e-2,
as_process=False, ssl_context=None):
super(LocalTestServerSSL, self).__init__(application, port, wait,
polling, as_process)
self._ssl_context = ssl_context
@property
def url(self):
if self._ssl_context is None:
return "http://{0}:{1}/".format(self._address, self.port)
else:
return "https://{0}:{1}/".format(self._address, self.port)
def start(self):
# Start a simple WSGI server:
self._server = (multiprocessing
.Process(target=run_simple_server,
args=(self.application,
self.port,
self._ssl_context)))
self._server.start()
# Wait a little while for the server to start:
self.poll_server()
def shutdown(self):
# Shutdown the server:
url = "http://0.0.0.0:%s/shutdown"
if self._ssl_context is not None:
url = url.replace('http', 'https')
with warnings.catch_warnings():
warnings.simplefilter('ignore')
requests.head(url % self.port, verify=False)
time.sleep(self._wait)
self._server.join()
del(self._server)
| 2.171875 | 2 |
security/migrations/0001_initial.py | CASDON-MYSTERY/studentapp | 0 | 12768580 | # Generated by Django 3.1.5 on 2021-09-04 23:52
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Security_Alert',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('location', models.CharField(max_length=255, verbose_name='location')),
('created', models.DateTimeField(auto_now_add=True, verbose_name='created')),
('edited', models.DateTimeField(auto_now=True, verbose_name='edited')),
('description', models.CharField(max_length=255, verbose_name='description')),
('confirmation', models.FloatField(default=0.0, verbose_name='confirmation')),
('image', models.ImageField(null=True, upload_to='')),
],
options={
'verbose_name': 'Security_Alert',
'verbose_name_plural': 'Security_Alerts',
'db_table': '',
'managed': True,
},
),
migrations.CreateModel(
name='Security_alert_type',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=150)),
],
options={
'verbose_name': 'Security_alert_type',
'verbose_name_plural': 'Security_alert_types',
'db_table': '',
'managed': True,
},
),
]
| 1.765625 | 2 |
bin/export_guest.py | Kairiw/karesansui | 0 | 12768581 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# This file is part of Karesansui.
#
# Copyright (C) 2012 HDE, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
import os
import os.path
import sys
import re
import signal
import logging
from optparse import OptionParser
from ksscommand import KssCommand, KssCommandException, KssCommandOptException
import __cmd__
try:
import karesansui
from karesansui import __version__
from karesansui.lib.virt.virt import KaresansuiVirtConnection, KaresansuiVirtException
from karesansui.lib.utils import load_locale, preprint_r, base64_decode
from karesansui.db.access.machine import findby1uniquekey
from karesansui.lib.utils import string_from_uuid as StrFromUUID
from karesansui.lib.utils import generate_uuid as GenUUID
from karesansui.lib.virt.snapshot import KaresansuiVirtSnapshot
from karesansui.db.access.snapshot import findbyname_guestby1 as s_findbyname_guestby1
except ImportError as e:
print("[Error] some packages not found. - %s" % e, file=sys.stderr)
sys.exit(1)
_ = load_locale()
usage = '%prog [options]'
def getopts():
optp = OptionParser(usage=usage, version=__version__)
optp.add_option('-n', '--name', dest='name', help=_('Domain Name'))
optp.add_option('-p', '--pool', dest='pool', help=_('Storage pool name'))
#optp.add_option('-d', '--dir', dest='dir', help=_('Directory name'))
optp.add_option('-t', '--title',dest='title',default='', help=_('Export title'))
optp.add_option('-q', '--quiet',dest='verbose', action="store_false", default=True, help=_("don't print status messages"))
return optp.parse_args()
def chkopts(opts):
reg = re.compile("[^a-zA-Z0-9\./_:-]")
if opts.name:
if reg.search(opts.name):
raise KssCommandOptException('ERROR: Illigal option value. option=%s value=%s' % ('-n or --name', opts.name))
else:
raise KssCommandOptException('ERROR: %s option is required.' % '-n or --name')
if opts.pool:
if reg.search(opts.pool):
raise KssCommandOptException('ERROR: Illigal option value. option=%s value=%s' % ('-p or --pool', opts.pool))
else:
raise KssCommandOptException('ERROR: %s option is required.' % '-p or --pool')
#if not opts.pool and not opts.dir:
# raise KssCommandOptException('ERROR: -p/--pool or -d/--dir options are required.')
class ExportGuest(KssCommand):
def __grab_stdout(self, flag):
if flag:
self.stdout = sys.stdout
sys.stdout = os.fdopen(sys.stdout.fileno(), "w", 0)
logf = open("/dev/null", "a")
os.dup2(logf.fileno(), 1)
logf.close()
else:
os.dup2(sys.stdout.fileno(), 1)
sys.stdout = self.stdout
del self.stdout
def process(self):
(opts, args) = getopts()
chkopts(opts)
self.up_progress(10)
conn = KaresansuiVirtConnection(readonly=False)
try:
try:
src_pool = conn.get_storage_pool_name_bydomain(opts.name, "os")
if not src_pool:
raise KssCommandException("Source storage pool not found. domain=%s" % (opts.name))
if conn.get_storage_pool_type(src_pool) == 'dir':
raise KssCommandException("Storage pool type 'dir' is not. domain=%s" % (opts.name))
src_path = conn.get_storage_pool_targetpath(src_pool[0])
self.domain_dir = "%s/%s" % (src_path, opts.name,)
if os.path.isdir(self.domain_dir) is False:
raise KssCommandException(
'domain directory is not found or not directory. - %s' % (self.domain_dir))
# Model
virt_uuid = conn.domname_to_uuid(opts.name)
model = findby1uniquekey(self.kss_session, virt_uuid)
if not model:
raise KssCommandException("Export data does not exist in the database.")
database = {}
database['attribute'] = model.attribute
database['hypervisor'] = model.hypervisor
database['icon'] = model.icon
database['name'] = model.name
database['notebook'] = {"title" : model.notebook.title,
"value" : model.notebook.value,
}
tags = []
for _tag in model.tags:
tags.append(_tag.name)
database['tags'] = ",".join(tags)
database['uniq_key'] = model.uniq_key
# Snapshot
snapshots = []
kvs = KaresansuiVirtSnapshot(readonly=False)
try:
guest_id = model.id
snapshot_list = kvs.listNames(opts.name)[opts.name]
if len(snapshot_list) > 0:
for snapshot in snapshot_list:
s_model = s_findbyname_guestby1(self.kss_session, snapshot, guest_id)
if s_model is not None:
name = s_model.name
title = s_model.notebook.title
value = s_model.notebook.value
snapshots.append({"name":name, "title":title, "value":value,})
except:
raise KssCommandException("Cannot fetch the information of snapshots correctly.")
kvs.finish()
# Pool
target_dir = ""
if opts.pool:
inactive_storage_pools = conn.list_inactive_storage_pool()
active_storage_pools = conn.list_active_storage_pool()
if not (opts.pool in active_storage_pools or opts.pool in inactive_storage_pools):
raise KssCommandException('Target storage pool does not exist. - pool=%s' % (opts.pool))
pool = conn.search_kvn_storage_pools(opts.pool)
storage_info = pool[0].get_info()
if storage_info["type"] == "dir" and storage_info["target"]["path"] != "":
target_dir = storage_info["target"]["path"]
else:
raise KssCommandException("Target storage pool type is not 'dir'. pool=%s" % (opts.pool))
elif opts.dir:
target_dir = opts.dir
self.up_progress(10)
progresscb = None
if opts.verbose:
try:
from karesansui.lib.progress import ProgressMeter
progresscb = ProgressMeter(command_object=self)
except:
pass
else:
try:
from karesansui.lib.progress import ProgressMeter
progresscb = ProgressMeter(command_object=self,quiet=True)
except:
pass
if opts.title[0:4] == "b64:":
title = base64_decode(opts.title[4:])
else:
title = opts.title
uuid = StrFromUUID(GenUUID())
conn.export_guest(uuid=uuid,
name=opts.name,
directory=target_dir,
database=database,
realicon=model.realicon(),
title=title,
snapshots=snapshots,
progresscb=progresscb)
self.up_progress(40)
self.logger.info('Export guest completed. - pool=%s, uuid=%s' % (opts.pool, uuid))
print(_('Export guest completed. - pool=%s, uuid=%s' % (opts.pool, uuid)), file=sys.stdout)
return True
except KaresansuiVirtException as e:
raise KssCommandException('Failed to export guest. - %s to %s [%s]' \
% (opts.name,target_dir, ''.join(e.args)))
except KssCommandException:
raise
except:
raise KssCommandException('Failed to export guest. - %s to %s' \
% (opts.name,target_dir))
finally:
conn.close()
if __name__ == "__main__":
target = ExportGuest()
sys.exit(target.run())
| 1.375 | 1 |
TwitterScraperAPI/packages/__init__.py | vaskrneup/TwitterScraperAPI | 1 | 12768582 | from . import default_data
from . import extractor
| 1.101563 | 1 |
src/jbdl/rbdl/math/x_rotz.py | yz-mao/jbdl | 21 | 12768583 | import jax.numpy as jnp
from jax.api import jit
@jit
def x_rotz(theta):
c = jnp.cos(theta)
s = jnp.sin(theta)
x = jnp.array([[c, s, 0.0, 0.0, 0.0, 0.0],
[-s, c, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, c, s, 0.0],
[0.0, 0.0, 0.0, -s, c, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 1.0]])
return x
if __name__ == "__main__":
import math
from jax import make_jaxpr
print(make_jaxpr(x_rotz)(math.pi))
print(x_rotz(math.pi))
| 2.875 | 3 |
src/pyexpressions/highlevel/PyScoped.py | Xiddoc/ComPy | 0 | 12768584 | <reponame>Xiddoc/ComPy
"""
Python object which manages it's own private scope.
"""
from _ast import AST
from abc import ABCMeta
from typing import Optional
from src.pyexpressions.abstract.PyExpression import PyExpression
from src.scopes.Scope import Scope
from src.structures.TypeRenames import GENERIC_PYEXPR_TYPE
class PyScoped(PyExpression, metaclass=ABCMeta):
"""
Python object which manages it's own private scope.
For example, functions have bodies in which
variables retain there. They can be accessed
from the external scope, but those initialized
inside the function body cannot be used outside it.
"""
__scope: Scope
def __init__(self, expression: AST, parent: Optional[GENERIC_PYEXPR_TYPE]):
# Super call
super().__init__(expression, parent)
# Create scope using exterior scope
self.update_from_nearest_scope()
def update_from_nearest_scope(self) -> None:
"""
Overwrites this scope with the nearest scope available.
"""
# Make sure we have a parent (Unlike head Module)
if self.get_parent() is None:
# We must be Module, so let's make a new scope
self.__scope = Scope()
else:
self.__scope = Scope(self.get_nearest_scope())
# noinspection PyUnusedFunction
def get_scope(self) -> Scope:
"""
Returns our Scope instance.
"""
return self.__scope
| 2.546875 | 3 |
image-segmentation/custom-recipes/score_image/recipe.py | gbetegon88/dataiku-contrib | 93 | 12768585 | <filename>image-segmentation/custom-recipes/score_image/recipe.py<gh_stars>10-100
# -*- coding: utf-8 -*-
import dataiku
from dataiku.customrecipe import *
import os
import json
import skimage.io
import logging
from mrcnn import utils, visualize
import mrcnn.model as modellib
from coco import coco
from utils import jsonify
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO, # avoid getting log from 3rd party module
format='image-segmentation plugin %(levelname)s - %(message)s')
# Checking whether pycocotools is installed, if not importing it
try :
import pycocotools
except :
raise ValueError("""The Codenv update failed : could not import pycocotools,
please run download-weights macro to update the codenv""")
##### Recipe Inputs #####
plugin_config = get_recipe_config()
logger.info(plugin_config)
# Local path to trained weights file
model_folder_input = get_input_names_for_role('model_folder')[0]
model_folder_path = dataiku.Folder(model_folder_input).get_path()
COCO_MODEL_PATH = os.path.join(model_folder_path,
"mask_rcnn_coco.h5")
image_folder_input = get_input_names_for_role('image_folder')[0]
image_folder = dataiku.Folder(image_folder_input)
IMAGE_DIR = image_folder.get_path()
to_score = image_folder.list_paths_in_partition(partition='')
image_folder_output = get_output_names_for_role('scored_folder')[0]
images_segmented = dataiku.Folder(image_folder_output)
SEGMENTED_IM_DIR = images_segmented.get_path()
details_folder_name = get_output_names_for_role('details_folder')[0]
details_folder = dataiku.Folder(details_folder_name)
DETAILS_DIR = details_folder.get_path()
output_images = get_recipe_config().get('save_image')
#### Recipe body ####
class InferenceConfig(coco.CocoConfig):
# Set batch size to 1 since we'll be running inference on
# one image at a time. Batch size = GPU_COUNT * IMAGES_PER_GPU
GPU_COUNT = 1
IMAGES_PER_GPU = len(image_folder.list_paths_in_partition(partition=''))
config = InferenceConfig()
# Loading pre-trained model with weights
model = modellib.MaskRCNN(mode="inference", model_dir=model_folder_path, config=config)
model.load_weights(COCO_MODEL_PATH, by_name=True)
class_names = ['BG', 'person', 'bicycle', 'car', 'motorcycle', 'airplane',
'bus', 'train', 'truck', 'boat', 'traffic light',
'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird',
'cat', 'dog', 'horse', 'sheep', 'cow', 'elephant', 'bear',
'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie',
'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball',
'kite', 'baseball bat', 'baseball glove', 'skateboard',
'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup',
'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple',
'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza',
'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed',
'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
'keyboard', 'cell phone', 'microwave', 'oven', 'toaster',
'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors',
'teddy bear', 'hair drier', 'toothbrush']
image_list = []
for im in to_score :
image_path = IMAGE_DIR+im
image = skimage.io.imread(image_path)
image_list.append(image)
result = model.detect(image_list,verbose=1)
for i, im in enumerate(result):
result[i]["image_path"] = to_score[i]
result_json = []
for res in result:
result_json.append(jsonify(res))
#### Writing output to folders #####
with open(DETAILS_DIR + '/results.json', 'w') as outfile:
json.dump(result_json,
outfile)
if output_images :
for ix, im_path in enumerate(to_score):
r = result[ix]
image = image_list[ix]
im_name = im_path[:-4] + '_scored.png'
visualize.save_instance(image, r['rois'],
r['masks'], r['class_ids'],
class_names, r['scores'],
path=SEGMENTED_IM_DIR, name=im_name
) | 1.71875 | 2 |
tests/test_handwrite.py | ReveRoyl/Handright-generator | 706 | 12768586 | <reponame>ReveRoyl/Handright-generator<filename>tests/test_handwrite.py
# coding: utf-8
import copy
import PIL.Image
import PIL.ImageDraw
from handright import *
from tests.util import *
BACKGROUND_COLOR = "white"
WIDTH = 32
HEIGHT = 32
SIZE = (WIDTH, HEIGHT)
SEED = "Handright"
def get_default_template():
template = Template(
background=PIL.Image.new(
mode="RGB",
size=SIZE,
color=BACKGROUND_COLOR
),
left_margin=3,
top_margin=6,
right_margin=3,
bottom_margin=6,
line_spacing=2,
font=get_default_font(2),
font_size_sigma=0,
)
return template
def test_side_effect():
text = get_short_text()
template = get_default_template()
template_clone = copy.copy(template)
handwrite(text, template)
assert text == get_short_text()
assert template == template_clone
def test_null_text():
assert list(handwrite("", get_default_template())) == []
def test_blank_text():
temp = get_default_template()
images = handwrite(" ", temp)
assert temp.get_background() == next(images)
def test_seed():
text = get_long_text()
template = get_default_template()
for seed in (0, "Handright"):
ims1 = handwrite(text, template, seed=seed)
ims2 = handwrite(text, template, seed=seed)
assert list(ims1) == list(ims2)
def test_line_and_page_breaks():
text = "哈" * 4
template = Template(
background=PIL.Image.new(mode="L", size=(30, 30), color="white"),
font=get_default_font(12),
left_margin=3,
right_margin=3,
top_margin=3,
bottom_margin=3,
word_spacing_sigma=0,
font_size_sigma=0,
)
images = handwrite(text, template)
assert len(list(images)) == 1
def test_line_separators():
text1 = "a\nb\nc\n"
text2 = "a\rb\rc\r"
text3 = "a\r\nb\r\nc\r\n"
text4 = "a\rb\nc\r\n"
text5 = "a\rb\nc\r"
text6 = "a\r\nb\rc\r"
text7 = "a\r\nb\nc\n"
template = get_default_template()
assert (list(handwrite(text1, template, seed=SEED))
== list(handwrite(text2, template, seed=SEED))
== list(handwrite(text3, template, seed=SEED))
== list(handwrite(text4, template, seed=SEED))
== list(handwrite(text5, template, seed=SEED))
== list(handwrite(text6, template, seed=SEED))
== list(handwrite(text7, template, seed=SEED)))
| 2.59375 | 3 |
slump.py | szwieback/FVEH | 0 | 12768587 | '''
Created on 19 Nov 2017
@author: Simon
'''
from fipy import Variable, FaceVariable, CellVariable, TransientTerm, DiffusionTerm
import numpy as np
import datetime
import pickle
from scipy.interpolate import interp1d
from boundary import BoundaryConditionCollection1D
from diagnostic import DiagnosticModule
class ThawSlump(object): # 1D
# time_initial only works when forcing is provided
def __init__(
self, tsmesh, time_step_module=None, output_step_module=None,
forcing_module=None, thermal_properties=None, time_initial=None):
self.mesh = tsmesh
self.variables = {}
self.variables_store = []
self.diagnostic_modules = {}
self.diagnostic_update_order = []
self.eq = None
self.boundary_condition_collection = None
self._time = Variable(value=0)
self._time_step_module = time_step_module
self._timeref = None # will generally be set by forcing_module; otherwise manually
if forcing_module is not None:
self.initializeForcing(forcing_module)
if time_initial is not None:
self.time = time_initial
if thermal_properties is not None:
self.initializeThermalProperties(thermal_properties)
self._output_step_module = output_step_module
self._output_module = SlumpOutput()
if output_step_module is None:
self._output_step_module = OutputStep()
@property
def time(self):
return float(self._time.value)
@time.setter
def time(self, t): # can also handle date objects
try:
self.date = t
except:
self._time.setValue(t)
@property
def timeStep(self):
return self._time_step_module.calculate(self)
@property
def date(self):
return self._internal_time_to_date(self.time)
def _internal_time_to_date(self, internal_time):
return self._timeref + datetime.timedelta(seconds=internal_time)
@date.setter
def date(self, d):
dtsec = self._date_to_internal_time(d)
self._time.setValue(dtsec)
def _date_to_internal_time(self, d):
dt = d - self._timeref
dtsec = dt.days * 24 * 3600 + dt.seconds + dt.microseconds * 1e-6
return dtsec
def initializeTimeReference(self, timeref):
# timeref is a datetime object
self._timeref = timeref
def initializePDE(self, tseq=None):
self.eq = tseq
def initializeTimeStepModule(self, time_step_module):
self._time_step_module = time_step_module
def _initializeSourcesZero(self, source_name='S'):
self.variables[source_name] = CellVariable(
name=source_name, mesh=self.mesh.mesh, value=0.0)
def initializeDiagnostic(
self, variable, funpointer, default=0.0, face_variable=False,
output_variable=True):
if not face_variable:
self.variables[variable] = CellVariable(
name=variable, mesh=self.mesh.mesh, value=default)
else:
self.variables[variable] = FaceVariable(
name=variable, mesh=self.mesh.mesh, value=default)
self.diagnostic_modules[variable] = DiagnosticModule(funpointer, self)
if output_variable:
self.variables_store.append(variable)
self.diagnostic_update_order.append(variable)
def initializeOutputStepModule(self, output_step_module):
self._output_step_module = output_step_module
def initializeThermalProperties(self, thermal_properties):
self.thermal_properties = thermal_properties
self.thermal_properties.initializeVariables(self)
self.initializeTright()
def initializeForcing(self, forcing_module):
self.forcing_module = forcing_module
for varj in self.forcing_module.variables:
assert varj not in self.variables
self.variables[varj] = self.forcing_module.variables[varj]
self.initializeTimeReference(self.forcing_module._timeref)
def initializeEnthalpyTemperature(self, T_initial, proportion_frozen=None,
time=None):
# time can be internal time or also a datetime object
pf = 0.0 if proportion_frozen is None else proportion_frozen
assert pf >= 0.0 and pf <= 1.0
self.variables['T'].setValue(T_initial)
self.variables['h'].setValue(self.thermal_properties.enthalpyFromTemperature(
self, T=T_initial, proportion_frozen=pf))
self.updateDiagnostics()
if time is not None:
self.time = time
def updateDiagnostic(self, variable):
self.variables[variable].setValue(self.diagnostic_modules[variable].evaluate())
def updateDiagnostics(self, variables=None):
if variables is not None:
variablesorder = variables
else:
variablesorder = self.diagnostic_update_order
for variable in variablesorder:
self.updateDiagnostic(variable)
def specifyBoundaryConditions(self, boundary_condition_collection):
self.boundary_condition_collection = boundary_condition_collection
self.updateGeometryBoundaryConditions()
self.invokeBoundaryConditions()
self.initializePDE()
def updateGeometryBoundaryConditions(self):
self.boundary_condition_collection.updateGeometry(self)
def updateBoundaryConditions(self, bc_data, invoke=True):
self.boundary_condition_collection.update(bc_data)
if invoke:
self.invokeBoundaryConditions()
def invokeBoundaryConditions(self):
self.boundary_condition_collection.invoke(self)
def updateGeometry(self):
self.boundary_condition_collection.updateGeometry(self)
def nextOutput(self):
return self._output_step_module.next(self)
def updateOutput(self, datanew={}):
for v in self.variables_store:
datanew[v] = np.copy(self.variables[v].value)
# boundary condition outputs:
# separate routine: total source, source components, or for basic b.c. just value)
datanew.update(self.boundary_condition_collection.output())
self._output_module.update(self.date, datanew)
def exportOutput(self, fn):
self._output_module.export(fn)
def addStoredVariable(self, varname):
# varname can also be list
if isinstance(varname, str):
if varname not in self.variables_store:
self.variables_store.append(varname)
else: # tuple/list,etc.
for varnamej in varname:
self.addStoredVariable(varnamej)
class ThawSlumpEnthalpy(ThawSlump):
# both boundary conditions bc_inside and bc_headwall have to be provided,
# and they are only activated when forcing and thermal_properties are also given
def __init__(
self, tsmesh, time_step_module=None, output_step_module=None, h_initial=0.0,
T_initial=None, time_initial=None, proportion_frozen_initial=None,
forcing_module=None, thermal_properties=None, bc_inside=None, bc_headwall=None):
# T_initial only works if thermal_properties are provided
ThawSlump.__init__(
self, tsmesh, time_step_module=time_step_module,
output_step_module=output_step_module, time_initial=time_initial,
forcing_module=forcing_module, thermal_properties=thermal_properties)
self._initializeSourcesZero(source_name='S')
self._initializeSourcesZero(source_name='S_inside')
self._initializeSourcesZero(source_name='S_headwall')
# specific volumetric enthalpy
self.variables['h'] = CellVariable(
name='h', mesh=self.mesh.mesh, value=h_initial, hasOld=True)
self.addStoredVariable('h')
if T_initial is not None: # essentially overrides h_initial
self.initializeEnthalpyTemperature(
T_initial, proportion_frozen=proportion_frozen_initial)
if (bc_inside is not None and bc_headwall is not None
and self.thermal_properties is not None and self.forcing_module is not None):
bcc = BoundaryConditionCollection1D(
bc_headwall=bc_headwall, bc_inside=bc_inside)
self.specifyBoundaryConditions(bcc)
self._output_module.storeInitial(self)
def initializePDE(self):
self.eq = (TransientTerm(var=self.variables['h']) ==
DiffusionTerm(coeff=self.variables['k'], var=self.variables['T']) +
self.variables['S'] + self.variables['S_headwall'] +
self.variables['S_inside'])
def initializeTright(self):
extrapol_dist = (self.mesh.mesh.faceCenters[0, self.mesh.mesh.facesRight()][0]
-self.mesh.cell_mid_points)
self.dxf = CellVariable(mesh=self.mesh.mesh, value=extrapol_dist)
self.variables['T_right'] = (
self.variables['T'] + self.variables['T'].grad[0] * self.dxf)
def updateGeometry(self):
ThawSlump.updateGeometry(self)
self.initializeTright()
def _integrate(
self, time_step, max_time_step=None, residual_threshold=1e-3, max_steps=20):
apply_max_time_step = False
if time_step is None:
time_step = self.timeStep
if max_time_step is not None and time_step > max_time_step:
time_step = max_time_step
apply_max_time_step = True
residual = residual_threshold + 1
steps = 0
assert self._timeref == self.forcing_module._timeref
self.forcing_module.evaluateToVariable(t=self.time)
while residual > residual_threshold:
residual = self.eq.sweep(var=self.variables['h'], dt=time_step)
steps = steps + 1
if steps >= max_steps:
raise RuntimeError('Sweep did not converge')
self.time = self.time + time_step
self.variables['h'].updateOld()
self.updateDiagnostics()
return time_step, apply_max_time_step
def integrate(
self, time_end, time_step=None, residual_threshold=1e-2, max_steps=10,
time_start=None, viewer=None):
# time_end can also be date
if time_start is not None:
self.time = time_start
self.variables['h'].updateOld()
try:
interval = time_end - self.time
time_end_internal = time_end
except:
time_end_internal = self._date_to_internal_time(time_end)
time_output = self.nextOutput()
write_output = False
write_output_limit = False
time_steps = []
while self.time < time_end_internal:
max_time_step = time_end_internal - self.time
if time_output is not None and time_output < time_end_internal:
max_time_step = time_output - self.time
write_output_limit = True
time_step_actual, apply_max_time_step = self._integrate(
time_step, max_time_step=max_time_step)
time_steps.append(time_step_actual)
if apply_max_time_step and write_output_limit:
write_output = True
if viewer is not None:
viewer.plot()
viewer.axes.set_title(self.date)
if write_output:
time_output = self.nextOutput()
write_output = False
write_output_limit = False
# actually write output
datanew = {'nsteps':len(time_steps), 'mean_time_step':np.mean(time_steps)}
self.updateOutput(datanew=datanew)
time_steps = []
class SlumpOutput(object):
def __init__(self):
self.dates = []
self.data = {}
self.initial = {}
def update(self, date, datanew):
records = set(self.data.keys() + datanew.keys())
for record in records:
if record in self.data and record in datanew:
self.data[record].append(datanew[record])
elif record in self.data:
self.data[record].append(None)
else:
# new record; fill with Nones
self.data[record] = [None] * len(self.dates)
self.data[record].append(datanew[record])
self.dates.append(date)
def storeInitial(self, ts):
self.initial['mesh_mid_points'] = ts.mesh.cell_mid_points
self.initial['mesh_face_left'] = ts.mesh.face_left_position
self.initial['mesh_face_right'] = ts.mesh.face_right_position
self.initial['mesh_cell_volumes'] = ts.mesh.cell_volumes
self.initial['T_initial'] = np.copy(ts.variables['T'].value)
self.initial.update(ts.thermal_properties.output())
def export(self, fn):
with open(fn, 'wb') as f:
pickle.dump(self.read(), f)
def read(self):
return (self.dates, self.data, self.initial)
# nice way to read pickled SlumpOutput data (read method)
class SlumpResults(object):
def __init__(self, dates, data, initial, timeref=None):
self.dates = dates
self.data = data
self.initial = initial
if timeref is not None:
self._timeref = timeref
else:
self._timeref = self.dates[0]
@classmethod
def fromFile(cls, fn):
dates, data, initial = pickle.load(open(fn, 'rb'))
return cls(dates, data, initial)
def _date_to_internal_time(self, ds):
# ds is list
dts = [d - self._timeref for d in ds]
dtsec = [dt.days * 24 * 3600 + dt.seconds + dt.microseconds * 1e-6 for dt in dts]
return np.array(dtsec)
@property
def _depths(self):
return self.initial['mesh_face_right'] - self.initial['mesh_mid_points']
def readVariable(self, variable_name='T', interp_dates=None, interp_depths=None):
vararr = np.array(self.data[variable_name])
if interp_dates is not None:
dates_int = self._date_to_internal_time(self.dates)
interp_dates_int = self._date_to_internal_time(interp_dates)
interpolator_dates = interp1d(dates_int, vararr, axis=0)
vararr = interpolator_dates(interp_dates_int)
if interp_depths is not None:
# check dimensions
assert len(vararr.shape) == 2
assert vararr.shape[1] == self.initial['mesh_mid_points'].shape[0]
# interpolate
interpolator_depths = interp1d(self._depths, vararr, axis=1)
vararr = interpolator_depths(interp_depths)
return vararr
class TimeStep(object):
def __init__(self):
pass
def calculate(self, ts):
pass
class TimeStepConstant(TimeStep):
def __init__(self, step=1.0):
self.step = step
def calculate(self, ts):
return self.step
class TimeStepCFL(TimeStep):
def __init__(self, safety=0.9):
self.safety = safety
def calculate(self, ts):
K = np.array(ts.variables['K'])
K = 0.5 * (K[1::] + K[:-1:])
CFL = np.min(0.5 * (ts.mesh.cell_volumes) ** 2 / np.array((K / ts.variables['C'])))
step = self.safety * CFL
return step
class TimeStepCFLSources(TimeStep):
def __init__(
self, safety=0.9, relative_enthalpy_change=0.01,
slow_time_scale=3600 * 24 * 30):
self.safety = safety
self.relative_enthalpy_change = relative_enthalpy_change
# internal time scale, should be >> process time scale; to avoid / zero
self.slow_time_scale = slow_time_scale
def calculate(self, ts):
K = np.array(ts.variables['k'])
# hack, only works in 1D and is insufficient for highly irregular grids
K = 0.5 * (K[1::] + K[:-1:])
CFL = np.min(0.5 * (ts.mesh.cell_volumes) ** 2 / np.array((K / ts.variables['c'])))
step = self.safety * CFL
S_total = np.abs(
ts.variables['S'] + ts.variables['S_headwall'] + ts.variables['S_inside'])
denom = (np.abs(ts.variables['h']) / self.slow_time_scale + S_total)
step_sources = (self.relative_enthalpy_change
* np.min(np.abs(np.array(ts.variables['h'] / denom))))
if step_sources < step:
step = step_sources
return step
class OutputStep(object):
def __init__(self):
pass
def next(self, ts):
return None
class OutputStepHourly(OutputStep):
def __init__(self):
pass
def next(self, ts):
d0 = ts.date
datenext = (datetime.datetime(d0.year, d0.month, d0.day, d0.hour)
+ datetime.timedelta(seconds=3600))
return ts._date_to_internal_time(datenext)
class Forcing(object):
def __init__(self, values_inp, timeref=datetime.datetime(2012, 1, 1), variables=None):
if variables is None:
self.variables = [vj for vj in values_inp]
else:
self.variables = variables
self._timeref = timeref
self.variables = {vj:Variable(value=values_inp[vj]) for vj in self.variables}
self.values = {vj: values_inp[vj] for vj in self.variables}
def evaluate(self, t=None):
return self.values
def evaluateToVariable(self, t=None):
for vj, ij in self.evaluate(t=t).iteritems():
self.variables[vj].setValue(ij)
class ForcingInterpolation(Forcing):
def __init__(self, values_inp, t_inp=None, variables=None, key_time='time'):
if t_inp is None:
t_inp_int = values_inp[key_time]
else:
t_inp_int = t_inp
self.t_inp = t_inp_int
self._timeref = t_inp_int[0]
t_inp_rel = [tj - self._timeref for tj in self.t_inp]
try:
self.t_inp_rel = np.array([tj.total_seconds() for tj in t_inp_rel])
except:
self.t_inp_rel = np.array(t_inp_rel)
if variables is None:
self.variables = [vj for vj in values_inp if vj != key_time]
else:
self.variables = variables
self.variables = {vj:Variable(value=values_inp[vj][0]) for vj in self.variables}
self.values = {vj: values_inp[vj] for vj in self.variables}
def evaluate(self, t=0):
try:
t_rel = (t - self._timeref).total_seconds() # datetime object
except:
t_rel = t # slump-internal time
vals = {vj:np.interp(t_rel, self.t_inp_rel, self.values[vj])
for vj in self.variables}
return vals
| 1.757813 | 2 |
reinvent-2019/connected-photo-booth/py_client/config.py | chriscoombs/aws-builders-fair-projects | 0 | 12768588 | <gh_stars>0
import boto3
import botocore
import os
import glob
import json
import requests
from datetime import datetime
from time import sleep
from time import gmtime, strftime
import sys, getopt
import argparse
import subprocess
from shutil import copyfile, rmtree
import logging
import configparser
__CONFIG_FILE_PATH__ = "cerebro.config"
__SSM_BASE_PATH__ = "/Cerebro"
class Configuration(object):
def __init__(self,config_file=__CONFIG_FILE_PATH__):
self.config_file = config_file
self.config_parser = configparser.ConfigParser()
self.config_parser.read(self.config_file)
self.get_config_entries()
self._ssm = boto3.client('ssm')
def get_config_entries(self):
self.config_entries = {}
for section in self.config_parser.sections():
#print("Section: %s" % section)
for item in self.config_parser.items(section):
#print("Item: ")
#print(item)
#print(item[0], item[1])
param_name = item[0].upper()
param_value = "%s/%s/%s" % (__SSM_BASE_PATH__, section, item[1])
param_dict = {param_name:param_value}
#print(param_dict)
self.config_entries.update(param_dict)
return True
def getConfig(self, configEntry):
if configEntry not in self.config_entries:
return None
ssm_param_name = self.config_entries[configEntry]
#print(ssm_param_name)
response = self._ssm.get_parameter(
Name=ssm_param_name
)
#print(response)
if ("Parameter" in response) and ("Name" in response["Parameter"]) and ("Value" in response["Parameter"]):
ssm_param_value = response["Parameter"]["Value"]
#print(ssm_param_value)
else:
return None
return ssm_param_value
'''
config_entry = self.config_parser.get("Cerebro", configEntry)
print(config_entry)
if "ssm:" in config_entry:
# then this means that we need to retrieve the actual value from the SSM Param store
config_entry = "foobar"
return config_entry
'''
@property
def __QUEUE_URL__(self):
return self.getConfig("__QUEUE_URL__")
@property
def __SQS_QUEUE_NAME__(self):
return self.getConfig("__SQS_QUEUE_NAME__")
@property
def __SQS_BACKEND_QUEUE__(self):
return self.getConfig("__SQS_BACKEND_QUEUE__")
@property
def __APIGW_X_API_KEY__(self):
return self.getConfig("__APIGW_X_API_KEY__")
@property
def __APIGW_X_API_KEY_QR_CODE__(self):
return self.getConfig("__APIGW_X_API_KEY_QR_CODE__")
@property
def __APIGW_API__(self):
return self.getConfig("__APIGW_API__")
@property
def __APIGW_API_QR_CODE__(self):
return self.getConfig("__APIGW_API_QR_CODE__")
@property
def __S3_BUCKET__(self):
return self.getConfig("__S3_BUCKET__")
@property
def __CEREBRO_TEMP_DIR__(self):
return self.getConfig("__CEREBRO_TEMP_DIR__")
@property
def __CEREBRO_MEDIA_DIR__(self):
return self.getConfig("__CEREBRO_MEDIA_DIR__")
@property
def __CEREBRO_LOGS_DIR__(self):
return self.getConfig("__CEREBRO_LOGS_DIR__")
@property
def __CEREBRO_PROFILES_DIR__(self):
return self.getConfig("__CEREBRO_PROFILES_DIR__")
@property
def __CEREBRO_SYSTEM_DIR__(self):
return self.getConfig("__CEREBRO_SYSTEM_DIR__")
@property
def __IMAGE_MAX_COUNT__(self):
return int(self.getConfig("__IMAGE_MAX_COUNT__"))
@property
def __GREEN_LED__(self):
return int(self.getConfig("__GREEN_LED__"))
@property
def __GREEN_BUTTON__(self):
return int(self.getConfig("__GREEN_BUTTON__"))
@property
def __YELLOW_LED__(self):
return int(self.getConfig("__YELLOW_LED__"))
@property
def __YELLOW_BUTTON__(self):
return int(self.getConfig("__YELLOW_BUTTON__"))
@property
def __IOT_TOPIC__(self):
return self.getConfig("__IOT_TOPIC__")
@property
def __IOT_HOST__(self):
return self.getConfig("__IOT_HOST__")
@property
def __IOT_ROOT_CA_PATH__(self):
return self.getConfig("__IOT_ROOT_CA_PATH__")
@property
def __IOT_CERTIFICATE_PATH__(self):
return self.getConfig("__IOT_CERTIFICATE_PATH__")
@property
def __IOT_PRIVATE_KEY_PATH__(self):
return self.getConfig("__IOT_PRIVATE_KEY_PATH__")
@property
def __IOT_CLIENT_ID_REQUESTER__(self):
return self.getConfig("__IOT_CLIENT_ID_REQUESTER__")
@property
def __IOT_CLIENT_ID_PROCESSOR__(self):
return self.getConfig("__IOT_CLIENT_ID_PROCESSOR__")
@property
def __CEREBRO_AUDIO_DIR__(self):
return self.getConfig("__CEREBRO_AUDIO_DIR__")
@property
def __PUSHBUTTON_DELAY__(self):
return int(self.getConfig("__PUSHBUTTON_DELAY__"))
@property
def __S3_BUCKET__(self):
return self.getConfig("__S3_BUCKET__")
@property
def __ACCEPT_INPUT__(self):
return int(self.getConfig("__ACCEPT_INPUT__"))
@property
def __CHOOSE_AGAIN__(self):
return int(self.getConfig("__CHOOSE_AGAIN__"))
@property
def __CADENCE__(self):
return int(self.getConfig("__CADENCE__"))
@property
def __DDB_TABLE__(self):
return self.getConfig("__DDB_TABLE__")
@property
def __PRINTER_TYPE__(self):
return self.getConfig("__PRINTER_TYPE__")
@property
def __FILTERED_IMAGE_NAME__(self):
return self.getConfig("__FILTERED_IMAGE_NAME__")
@property
def __PIG_NOSE_FILTER__(self):
return self.getConfig("__PIG_NOSE_FILTER__")
@property
def __FLOWER_CROWN_FILTER__(self):
return self.getConfig("__FLOWER_CROWN_FILTER__")
@property
def __EYE_MASK_FILTER__(self):
return self.getConfig("__EYE_MASK_FILTER__")
@property
def __DOG_NOSE_FILTER__(self):
return self.getConfig("__DOG_NOSE_FILTER__")
@property
def __DOG_LEFT_EAR_FILTER__(self):
return self.getConfig("__DOG_LEFT_EAR_FILTER__")
@property
def __DOG_RIGHT_EAR_FILTER__(self):
return self.getConfig("__DOG_RIGHT_EAR_FILTER__")
@property
def __DOG_TONGUE_FILTER__(self):
return self.getConfig("__DOG_TONGUE_FILTER__")
| 2.0625 | 2 |
goorm-api/product/urls.py | labiss96/goorm-service | 0 | 12768589 | <gh_stars>0
from django.urls import include, path
from rest_framework.routers import DefaultRouter
from . import views
router = DefaultRouter()
router.register('brand', views.BrandViewSet)
router.register('tobacco', views.TobaccoViewSet)
router.register('review', views.ReviewViewSet)
urlpatterns = [
path('', include(router.urls)),
]
| 1.765625 | 2 |
vpp/vppapi.py | pimvanpelt/vppcfg | 0 | 12768590 | #
# Copyright (c) 2022 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -*- coding: utf-8 -*-
"""
The functions in this file interact with the VPP API to retrieve certain
interface metadata. Its base class will never change state. See the
derived classes VPPApiDumper() and VPPApiApplier()
"""
import os
import fnmatch
import logging
import socket
from vpp_papi import VPPApiClient
class VPPApi:
"""The VPPApi class is a base class that abstracts the vpp_papi."""
def __init__(
self,
vpp_api_socket="/run/vpp/api.sock",
vpp_json_dir="/usr/share/vpp/api/",
clientname="vppcfg",
):
self.logger = logging.getLogger("vppcfg.vppapi")
self.logger.addHandler(logging.NullHandler())
if not os.path.exists(vpp_api_socket):
self.logger.error(f"VPP api socket file not found: {vpp_api_socket}")
if not os.path.isdir(vpp_json_dir):
self.logger.error(f"VPP api json directory not found: {vpp_json_dir}")
self.vpp_api_socket = vpp_api_socket
self.vpp_json_dir = vpp_json_dir
self.connected = False
self.clientname = clientname
self.vpp = None
self.cache = self.cache_clear()
self.cache_read = False
self.lcp_enabled = False
def connect(self):
"""Connect to the VPP Dataplane, if we're not already connected"""
if self.connected:
return True
# construct a list of all the json api files
jsonfiles = []
for root, _dirnames, filenames in os.walk(self.vpp_json_dir):
for filename in fnmatch.filter(filenames, "*.api.json"):
jsonfiles.append(os.path.join(root, filename))
if not jsonfiles:
self.logger.error("no json api files found")
return False
self.vpp = VPPApiClient(apifiles=jsonfiles, server_address=self.vpp_api_socket)
try:
self.logger.debug("Connecting to VPP")
self.vpp.connect(self.clientname)
except:
return False
# pylint: disable=no-member
api_response = self.vpp.api.show_version()
self.logger.info(f"VPP version is {api_response.version}")
self.connected = True
return True
def disconnect(self):
"""Disconnect from the VPP dataplane, if we are still connected."""
if not self.connected:
return True
self.vpp.disconnect()
self.logger.debug("Disconnected from VPP")
self.connected = False
return True
def cache_clear(self):
"""Remove the cached VPP configuration elements and return an empty dictionary"""
self.cache_read = False
return {
"lcps": {},
"interface_names": {},
"interfaces": {},
"interface_addresses": {},
"bondethernets": {},
"bondethernet_members": {},
"bridgedomains": {},
"vxlan_tunnels": {},
"l2xcs": {},
"taps": {},
}
def cache_remove_lcp(self, lcpname):
"""Removes the LCP and TAP interface, identified by lcpname, from the VPP config cache"""
for _idx, lcp in self.cache["lcps"].items():
if lcp.host_if_name == lcpname:
ifname = self.cache["interfaces"][lcp.host_sw_if_index].interface_name
del self.cache["lcps"][lcp.phy_sw_if_index]
return self.cache_remove_interface(ifname)
self.logger.warning(
f"Trying to remove an LCP which is not in the config: {lcpname}"
)
return False
def cache_remove_bondethernet_member(self, ifname):
"""Removes the bonderthernet member interface, identified by name, from the VPP config cache"""
if not ifname in self.cache["interface_names"]:
self.logger.warning(
f"Trying to remove a bondethernet member interface which is not in the config: {ifname}"
)
return False
iface = self.cache["interface_names"][ifname]
for bond_idx, members in self.cache["bondethernet_members"].items():
if iface.sw_if_index in members:
self.cache["bondethernet_members"][bond_idx].remove(iface.sw_if_index)
return True
def cache_remove_l2xc(self, ifname):
"""Remvoes the l2xc from the VPP config cache"""
if not ifname in self.cache["interface_names"]:
self.logger.warning(
f"Trying to remove an L2XC which is not in the config: {ifname}"
)
return False
iface = self.cache["interface_names"][ifname]
self.cache["l2xcs"].pop(iface.sw_if_index, None)
return True
def cache_remove_vxlan_tunnel(self, ifname):
"""Removes a vxlan_tunnel from the VPP config cache"""
if not ifname in self.cache["interface_names"]:
self.logger.warning(
f"Trying to remove a VXLAN Tunnel which is not in the config: {ifname}"
)
return False
iface = self.cache["interface_names"][ifname]
self.cache["vxlan_tunnels"].pop(iface.sw_if_index, None)
return True
def cache_remove_interface(self, ifname):
"""Removes the interface, identified by name, from the VPP config cache"""
if not ifname in self.cache["interface_names"]:
self.logger.warning(
f"Trying to remove an interface which is not in the config: {ifname}"
)
return False
iface = self.cache["interface_names"][ifname]
del self.cache["interfaces"][iface.sw_if_index]
if len(self.cache["interface_addresses"][iface.sw_if_index]) > 0:
self.logger.warning(f"Not all addresses were removed on {ifname}")
del self.cache["interface_addresses"][iface.sw_if_index]
del self.cache["interface_names"][ifname]
## Use my_dict.pop('key', None), as it allows 'key' to be absent
if iface.sw_if_index in self.cache["bondethernet_members"]:
if len(self.cache["bondethernet_members"][iface.sw_if_index]) != 0:
self.logger.warning(
f"When removing BondEthernet {ifname}, its members are not empty: {self.cache['bondethernet_members'][iface.sw_if_index]}"
)
else:
del self.cache["bondethernet_members"][iface.sw_if_index]
self.cache["bondethernets"].pop(iface.sw_if_index, None)
self.cache["taps"].pop(iface.sw_if_index, None)
return True
def readconfig(self):
"""Read the configuration out of a running VPP Dataplane and put it into a
VPP config cache"""
# pylint: disable=no-member
if not self.connected and not self.connect():
self.logger.error("Could not connect to VPP")
return False
self.cache_read = False
## Workaround LCPng and linux-cp, in order.
self.lcp_enabled = False
try:
self.logger.debug("Retrieving LCPs")
api_response = self.vpp.api.lcp_itf_pair_get()
if isinstance(api_response, tuple) and api_response[0].retval == 0:
for lcp in api_response[1]:
if lcp.phy_sw_if_index > 65535 or lcp.host_sw_if_index > 65535:
## Work around endianness bug: https://gerrit.fd.io/r/c/vpp/+/35479
## TODO(pim) - remove this when 22.06 ships
lcp = lcp._replace(
phy_sw_if_index=socket.ntohl(lcp.phy_sw_if_index)
)
lcp = lcp._replace(
host_sw_if_index=socket.ntohl(lcp.host_sw_if_index)
)
lcp = lcp._replace(vif_index=socket.ntohl(lcp.vif_index))
self.logger.warning(
f"LCP workaround for endianness issue on {lcp.host_if_name}"
)
self.cache["lcps"][lcp.phy_sw_if_index] = lcp
self.lcp_enabled = True
except:
self.logger.warning(
"linux-cp not found, will not reconcile Linux Control Plane"
)
self.logger.debug("Retrieving interfaces")
api_response = self.vpp.api.sw_interface_dump()
for iface in api_response:
self.cache["interfaces"][iface.sw_if_index] = iface
self.cache["interface_names"][iface.interface_name] = iface
self.cache["interface_addresses"][iface.sw_if_index] = []
self.logger.debug(f"Retrieving IPv4 addresses for {iface.interface_name}")
ipr = self.vpp.api.ip_address_dump(
sw_if_index=iface.sw_if_index, is_ipv6=False
)
for addr in ipr:
self.cache["interface_addresses"][iface.sw_if_index].append(
str(addr.prefix)
)
self.logger.debug(f"Retrieving IPv6 addresses for {iface.interface_name}")
ipr = self.vpp.api.ip_address_dump(
sw_if_index=iface.sw_if_index, is_ipv6=True
)
for addr in ipr:
self.cache["interface_addresses"][iface.sw_if_index].append(
str(addr.prefix)
)
self.logger.debug("Retrieving bondethernets")
api_response = self.vpp.api.sw_bond_interface_dump()
for iface in api_response:
self.cache["bondethernets"][iface.sw_if_index] = iface
self.cache["bondethernet_members"][iface.sw_if_index] = []
for member in self.vpp.api.sw_member_interface_dump(
sw_if_index=iface.sw_if_index
):
self.cache["bondethernet_members"][iface.sw_if_index].append(
member.sw_if_index
)
self.logger.debug("Retrieving bridgedomains")
api_response = self.vpp.api.bridge_domain_dump()
for bridge in api_response:
self.cache["bridgedomains"][bridge.bd_id] = bridge
self.logger.debug("Retrieving vxlan_tunnels")
api_response = self.vpp.api.vxlan_tunnel_v2_dump()
for vxlan in api_response:
self.cache["vxlan_tunnels"][vxlan.sw_if_index] = vxlan
self.logger.debug("Retrieving L2 Cross Connects")
api_response = self.vpp.api.l2_xconnect_dump()
for l2xc in api_response:
self.cache["l2xcs"][l2xc.rx_sw_if_index] = l2xc
self.logger.debug("Retrieving TAPs")
api_response = self.vpp.api.sw_interface_tap_v2_dump()
for tap in api_response:
self.cache["taps"][tap.sw_if_index] = tap
self.cache_read = True
return self.cache_read
def phys_exist(self, ifname_list):
"""Return True if all interfaces in the `ifname_list` exist as physical interface names
in VPP. Return False otherwise."""
ret = True
for ifname in ifname_list:
if not ifname in self.cache["interface_names"]:
self.logger.warning(f"Interface {ifname} does not exist in VPP")
ret = False
return ret
def get_sub_interfaces(self):
"""Return all interfaces which have a sub-id and one or more tags"""
subints = [
self.cache["interfaces"][x].interface_name
for x in self.cache["interfaces"]
if self.cache["interfaces"][x].sub_id > 0
and self.cache["interfaces"][x].sub_number_of_tags > 0
]
return subints
def get_qinx_interfaces(self):
"""Return all interfaces which have a sub-id and a non-zero inner vlan tag"""
qinx_subints = [
self.cache["interfaces"][x].interface_name
for x in self.cache["interfaces"]
if self.cache["interfaces"][x].sub_id > 0
and self.cache["interfaces"][x].sub_inner_vlan_id > 0
]
return qinx_subints
def get_dot1x_interfaces(self):
"""Return all interfaces which have only an outer vlan tag (dot1q/dot1ad)"""
dot1x_subints = [
self.cache["interfaces"][x].interface_name
for x in self.cache["interfaces"]
if self.cache["interfaces"][x].sub_id > 0
and self.cache["interfaces"][x].sub_inner_vlan_id == 0
]
return dot1x_subints
def get_loopbacks(self):
"""Return all interfaces of VPP type 'Loopback'"""
loopbacks = [
self.cache["interfaces"][x].interface_name
for x in self.cache["interfaces"]
if self.cache["interfaces"][x].interface_dev_type == "Loopback"
]
return loopbacks
def get_phys(self):
"""Return all interfaces for which the super interface has the same sw_if_index
and aren't known to be virtual interfaces"""
phys = [
self.cache["interfaces"][x].interface_name
for x in self.cache["interfaces"]
if self.cache["interfaces"][x].sw_if_index
== self.cache["interfaces"][x].sup_sw_if_index
and self.cache["interfaces"][x].interface_dev_type
not in ["virtio", "BVI", "Loopback", "VXLAN", "local", "bond"]
]
return phys
def get_bondethernets(self):
"""Return all bondethernet interfaces"""
bonds = [
self.cache["bondethernets"][x].interface_name
for x in self.cache["bondethernets"]
]
return bonds
def get_vxlan_tunnels(self):
"""Return all vxlan_tunnel interfaces"""
vxlan_tunnels = [
self.cache["interfaces"][x].interface_name
for x in self.cache["interfaces"]
if self.cache["interfaces"][x].interface_dev_type in ["VXLAN"]
]
return vxlan_tunnels
def get_lcp_by_interface(self, sw_if_index):
"""Return the LCP config cache for the interface given by sw_if_index"""
for _idx, lcp in self.cache["lcps"].items():
if lcp.phy_sw_if_index == sw_if_index:
return lcp
return None
def tap_is_lcp(self, tap_ifname):
"""Returns True if the given tap_ifname is a TAP interface belonging to an LCP,
or False otherwise."""
if not tap_ifname in self.cache["interface_names"]:
return False
vpp_iface = self.cache["interface_names"][tap_ifname]
if not vpp_iface.interface_dev_type == "virtio":
return False
for _idx, lcp in self.cache["lcps"].items():
if vpp_iface.sw_if_index == lcp.host_sw_if_index:
return True
return False
| 2.265625 | 2 |
watcher.py | ATLJoeReed/eventbrite_watcher | 0 | 12768591 | #!/usr/bin/python3.6
import datetime
import time
import requests
from twilio.rest import Client
from config import settings
class EventbriteWatcher(object):
"""
This class is used to watch Eventbrite for new events at a specified
organization.
"""
def __init__(self, watch_time, organizer_id, keyword):
"""
Args:
__watch_time__: Amount of time to watch for the event.
__organizer_id__: The ID of the organization to watch.
__keyword__: Keyword to alert on.
"""
self.watch_time = watch_time
self.organizer_id = organizer_id
self.keyword = keyword
def build_headers(self, oauth_token):
token = "Bearer {}".format(oauth_token)
return {'Authorization': token}
def build_payload(self, organizer_id):
return {'organizer.id': organizer_id}
def check_events(self, events, keyword):
results = []
for event in events:
event_name = event['name']['text']
event_url = event.get('url', None)
if keyword.lower() in event_name.lower():
results.append(event_url)
return results
def fetch_events(self):
response = requests.get(
settings.BASE_URL,
headers=self.build_headers(settings.OAUTH_TOKEN),
params=self.build_payload(self.organizer_id),
)
response_status_code = response.status_code
if response_status_code != 200:
self.send_sms(
'Bad response status code: {}'.format(response_status_code)
)
return None
r = response.json()
return r.get('events', None)
def get_end_time(self, watch_time):
return datetime.datetime.now() + \
datetime.timedelta(minutes=watch_time)
def send_sms(self, message):
twilio = settings.TWILIO
client = Client(
twilio.get('account_sid', None),
twilio.get('auth_token', None),
)
response = client.messages.create(
to=twilio.get('to_number', None),
from_=twilio.get('from_number', None),
body=message,
)
return response
def start_watching(self):
self.send_sms('Starting to watch...')
results_found = 0
no_events_found = 0
end_time = self.get_end_time(self.watch_time)
while results_found < 3 and datetime.datetime.now() < end_time:
events = self.fetch_events()
if not events:
no_events_found += 1
if no_events_found > 24:
self.send_sms('No events found in past 2 hours...')
no_events_found = 0
else:
results = self.check_events(events, self.keyword)
if results:
results_found += 1
for result in results:
self.send_sms(result)
time.sleep(300)
| 2.59375 | 3 |
expertise/setup/__init__.py | mspector/expertise | 12 | 12768592 | from .core import setup_model
| 1.03125 | 1 |
assignment3/code/Task3_PCA.py | cjy513203427/SML_Assignment | 0 | 12768593 | <reponame>cjy513203427/SML_Assignment
# -*- encoding: utf-8 -*-
'''
@File : __init__.py.py
@Modify Time @Author @Desciption
------------ ------- -----------
2021/6/27 13:35 Jonas None
'''
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
data = pd.read_csv("iris.txt", sep=",", header=None)
x = data.iloc[:, :-1].values
y = data.iloc[:, -1].values
def normalize(x):
return ((x - np.mean(x, axis=0)) / np.std(x, axis=0)).T
def PCA(x, n_eigen):
# lecture10 23 (25 / 73)
C = x.dot(x.T) / x.shape[1]
eigenvalues, eigenvectors = np.linalg.eig(C)
eigenvalues_total = np.sum(eigenvalues)
# eigenvalues are already sorted
explained = np.sum(eigenvalues[:n_eigen + 1]) / eigenvalues_total
B = eigenvectors[:, :n_eigen + 1]
# lecture10 27 (29 / 73)
a = B.T.dot(x)
return B, a, explained
def find_threshold(x):
max_eigenvalue = x.shape[1]
explained = np.empty(shape=(max_eigenvalue,))
for n_eigen in range(max_eigenvalue):
_, a, var = PCA(normalize(x), n_eigen)
explained[n_eigen] = var
plt.plot(np.arange(1, max_eigenvalue + 1), explained, label="marginal variance captured")
plt.plot(np.arange(1, max_eigenvalue + 1), np.full(max_eigenvalue, 0.95), "--", label="threshold $\lambda=0.95$")
plt.xticks(np.arange(1, max_eigenvalue + 1))
plt.title("Threshold of marginal variance captured")
plt.xlabel("# eigenvectors")
plt.ylabel("marginal variance captured")
plt.legend()
plt.show()
def reverse_PCA(x):
mean = np.mean(x, axis=0)
std = np.std(x, axis=0)
max_eigenvalue = x.shape[1]
nrmse_total = np.empty(shape=(max_eigenvalue, x.shape[1]))
for n_eigen in range(max_eigenvalue):
B, a, _ = PCA(normalize(x), n_eigen)
norm = np.amax(x, axis=0) - np.amin(x, axis=0)
reverse = std * B.dot(a).T + mean
nrmse_total[n_eigen] = nrmse(reverse, x, norm)
print(nrmse_total)
def nrmse(y1, y2, norm):
return np.sqrt(np.sum((y1 - y2) ** 2, axis=0) / y1.shape[0]) / norm
def plot_PCA_data(x, y):
max_eigenvalue = x.shape[1]
projection = None
for n_eigen in range(max_eigenvalue):
_, a, var = PCA(normalize(x), n_eigen)
if var < .95: continue
projection = a
break
# colors = [int(i % np.unique(y).shape[0]) for i in y]
plt.scatter(projection[0], projection[1], c=y)
plt.title("Data points after PCA")
plt.xlabel("x1")
plt.ylabel("x2")
plt.show()
find_threshold(x)
plot_PCA_data(x, y)
reverse_PCA(x)
| 2.46875 | 2 |
src/rubrix/metrics/text_classification/metrics.py | anunn1417/Rubrix | 0 | 12768594 | from rubrix import _client_instance as client
from rubrix.metrics import helpers
from rubrix.metrics.models import MetricSummary
def f1(name: str) -> MetricSummary:
"""Computes the single label f1 metric for a dataset
Args:
name:
The dataset name.
Returns:
The f1 metric summary
Examples:
>>> from rubrix.metrics.text_classification import f1
>>> summary = f1(name="example-dataset")
>>> summary.visualize() # will plot a bar chart with results
>>> summary.data # returns the raw result data
"""
current_client = client()
metric = current_client.calculate_metric(name, metric="F1")
return MetricSummary.new_summary(
data=metric.results,
visualization=lambda: helpers.bar(
metric.results,
title=metric.description,
),
)
def f1_multilabel(name: str) -> MetricSummary:
"""Computes the multi-label label f1 metric for a dataset
Args:
name:
The dataset name.
Returns:
The f1 metric summary
Examples:
>>> from rubrix.metrics.text_classification import f1_multilabel
>>> summary = f1_multilabel(name="example-dataset")
>>> summary.visualize() # will plot a bar chart with results
>>> summary.data # returns the raw result data
"""
current_client = client()
metric = current_client.calculate_metric(name, metric="MultiLabelF1")
return MetricSummary.new_summary(
data=metric.results,
visualization=lambda: helpers.bar(
{
"micro": metric.results["micro"],
"macro": metric.results["macro"],
**metric.results["per_label"],
}
if metric.results
else metric.results,
title=metric.description,
),
)
| 2.859375 | 3 |
src/masonite_commerce/migrations/create_commerce_comments_table.py | yubarajshrestha/masonite-commerce | 1 | 12768595 | <reponame>yubarajshrestha/masonite-commerce
"""CreateCommerceCommentsTable Migration."""
from masoniteorm.migrations import Migration
class CreateCommerceCommentsTable(Migration):
def up(self):
"""
Run the migrations.
"""
with self.schema.create("commerce_comments") as table:
table.increments("id")
table.integer("creator_id").unsigned().nullable()
table.foreign("creator_id").references("id").on("users").on_delete("set null")
table.integer("parent_id").unsigned().nullable()
table.foreign("parent_id").references("id").on("commerce_comments").on_delete(
"set null"
)
table.integer("product_id").unsigned()
table.foreign("product_id").references("id").on("commerce_products").on_delete(
"cascade"
)
table.text("content")
table.string("status", 12) # draft, published, archived
table.timestamps()
def down(self):
"""
Revert the migrations.
"""
self.schema.drop("commerce_comments")
| 2.484375 | 2 |
PhysicsTools/NanoAOD/python/custom_jme_cff.py | scodella/cmssw | 1 | 12768596 | import FWCore.ParameterSet.Config as cms
from Configuration.Eras.Modifier_run2_miniAOD_80XLegacy_cff import run2_miniAOD_80XLegacy
from Configuration.Eras.Modifier_run2_nanoAOD_94X2016_cff import run2_nanoAOD_94X2016
from PhysicsTools.NanoAOD.common_cff import Var, P4Vars
from PhysicsTools.NanoAOD.jets_cff import jetTable
from PhysicsTools.PatAlgos.tools.jetCollectionTools import GenJetAdder, RecoJetAdder
import copy
#
# By default, these collections are saved in NanoAODs:
# - ak4gen (GenJet in NanoAOD)
# - ak8gen (GenJetAK8 in NanoAOD)
# Below is a list of genjets that we can save in NanoAOD. Set
# "enabled" to true if you want to store the jet collection
config_genjets = [
{
"jet" : "ak5gen",
"enabled" : False,
},
{
"jet" : "ak6gen",
"enabled" : False,
},
{
"jet" : "ak7gen",
"enabled" : False,
},
{
"jet" : "ak9gen",
"enabled" : False,
},
{
"jet" : "ak10gen",
"enabled" : False,
},
]
config_genjets = list(filter(lambda k: k['enabled'], config_genjets))
#
# GenJets info in NanoAOD
#
nanoInfo_genjets = {
"ak5gen" : {
"name" : "GenJetAK5",
"doc" : "AK5 jets",
},
"ak6gen" : {
"name" : "GenJetAK6",
"doc" : "AK6 jets",
},
"ak7gen" : {
"name" : "GenJetAK7",
"doc" : "AK9 jets",
},
"ak9gen" : {
"name" : "GenJetAK9",
"doc" : "AK9 jets",
},
"ak10gen" : {
"name" : "GenJetAK10",
"doc" : "AK10 jets",
},
}
#
# By default, these collections are saved in NanoAODs:
# - ak4pfchs (Jet in NanoAOD)
# - ak8pfpuppi (FatJet in NanoAOD)
# By default, the ak4pfchs (Jet) and ak8pfpuppi (FatJet) collections
# are saved in NanoAODs.
# Below is a list of recojets that we can save in NanoAOD. Set "enabled"
# to true if you want to store the recojet collection.
#
config_recojets = [
{
"jet" : "ak4pfpuppi",
"enabled" : True,
"inputCollection" : "slimmedJetsPuppi", #Exist in MiniAOD
"genJetsCollection": "slimmedGenJets",
},
{
"jet" : "ak4calo",
"enabled" : True,
"inputCollection" : "slimmedCaloJets", #Exist in MiniAOD
"genJetsCollection": "slimmedGenJets",
},
{
"jet" : "ak4pf",
"enabled" : True,
"inputCollection" : "",
"genJetsCollection": "slimmedGenJets",
},
{
"jet" : "ak8pf",
"enabled" : True,
"inputCollection" : "",
"genJetsCollection": "slimmedGenJetsAK8",
},
{
"jet" : "ak8pfchs",
"enabled" : True,
"inputCollection" : "",
"genJetsCollection": "slimmedGenJetsAK8",
},
{
"jet" : "ak6pf",
"enabled" : False,
"inputCollection" : "",
"genJetsCollection": "AK6GenJetsNoNu",
},
{
"jet" : "ak10pf",
"enabled" : False,
"inputCollection" : "",
"genJetsCollection": "AK10GenJetsNoNu",
},
]
config_recojets = list(filter(lambda k: k['enabled'], config_recojets))
#
# RecoJets info in NanoAOD
#
nanoInfo_recojets = {
"ak4pfpuppi" : {
"name" : "JetPUPPI",
"doc" : "AK4PFPUPPI jets",
},
"ak4calo" : {
"name": "JetCalo",
"doc" : "AK4Calo jets",
},
"ak4pf" : {
"name": "JetPF",
"doc" : "AK4PF jets",
},
"ak8pf" : {
"name": "FatJetPF",
"doc" : "AK8PF jets",
},
"ak8pfchs" : {
"name" : "FatJetCHS",
"doc" : "AK8PFCHS jets",
},
"ak6pf" : {
"name": "JetAK6PF",
"doc" : "AK6PF jets",
},
"ak10pf" : {
"name" : "FatJetAK10PF",
"doc" : "AK10PF jets",
},
}
#
# The reco jet names already exists
# in NanoAOD.
#
recojetNameInNano = [ "Jet", "FatJet" ]
#
# The gen jet names already exists
# in NanoAOD.
#
genjetNameInNano = [ "GenJet", "GenJetAK8" ]
JETVARS = cms.PSet(P4Vars,
HFHEF = Var("HFHadronEnergyFraction()", float, doc = "energy fraction in forward hadronic calorimeter", precision = 6),
HFEMEF = Var("HFEMEnergyFraction()", float, doc = "energy fraction in forward EM calorimeter", precision = 6),
area = jetTable.variables.area,
chHEF = jetTable.variables.chHEF,
neHEF = jetTable.variables.neHEF,
chEmEF = jetTable.variables.chEmEF,
neEmEF = jetTable.variables.neEmEF,
muEF = jetTable.variables.muEF,
rawFactor = jetTable.variables.rawFactor,
jetId = jetTable.variables.jetId,
jercCHPUF = jetTable.variables.jercCHPUF,
jercCHF = jetTable.variables.jercCHF,
)
for modifier in run2_miniAOD_80XLegacy, run2_nanoAOD_94X2016:
modifier.toModify(JETVARS,
jetId = Var("userInt('tightId')*2+userInt('looseId')", int, doc = "Jet ID flags bit1 is loose, bit2 is tight")
)
#============================================
#
# TableGenJetAdder
#
#============================================
class TableGenJetAdder(object):
"""
Tool to store gen jet variables in NanoAOD for customized
gen jet collections.
"""
def __init__(self):
self.main = []
def getSequence(self, proc):
"""
Tool to add
"""
tasks = self.main
resultSequence = cms.Sequence()
for idx, task in enumerate(tasks):
if idx == 0:
resultSequence = cms.Sequence(getattr(proc, task))
else:
resultSequence.insert(idx, getattr(proc, task))
return resultSequence
def addTable(self, proc, genJetInfo):
currentTasks = []
print("custom_jme_cff::TableGenJetAdder::addTable: Adding Table for GenJet Collection: {}".format(genJetInfo.jet))
name = nanoInfo_genjets[genJetInfo.jet]["name"]
doc = nanoInfo_genjets[genJetInfo.jet]["doc"]
if name in genjetNameInNano:
raise RuntimeError('GenJet collection name (%s) taken in NanoAOD for %s' %(name, genJetInfo.jet))
#
# GenJet Table
#
table = "{}Table".format(genJetInfo.jetTagName)
genJetsCollection = "{}{}{}".format(genJetInfo.jetAlgo.upper(), genJetInfo.jetSize, 'GenJetsNoNu')
setattr(proc, table, cms.EDProducer("SimpleCandidateFlatTableProducer",
src = cms.InputTag(genJetsCollection),
cut = cms.string(""),
name = cms.string(name),
doc = cms.string('{} (generator level)'.format(doc)),
singleton = cms.bool(False),
extension = cms.bool(False),
variables = cms.PSet(P4Vars,
area = jetTable.variables.area,
),
)
)
currentTasks.append(table)
#
# GenJet Flavour Table
#
genFlavour = "{}Flavour".format(genJetInfo.jetTagName)
genFlavourTable = "{}Table".format(genFlavour)
if genFlavourTable in self.main:
raise ValueError("Step '%s' already implemented" % genFlavourTable)
setattr(proc, genFlavourTable, cms.EDProducer("GenJetFlavourTableProducer",
name = cms.string(name),
src = cms.InputTag(genJetsCollection),
cut = cms.string(""),
deltaR = cms.double(0.1),
jetFlavourInfos = cms.InputTag(genFlavour),
)
)
currentTasks.append(genFlavourTable)
self.main.extend(currentTasks)
#============================================
#
# TableRecoJetAdder
#
#============================================
class TableRecoJetAdder(object):
"""
Tool to store reco jet variables in NanoAOD for customized
reco jet collections.
"""
def __init__(self):
self.main = []
def getSequence(self, proc):
tasks = self.main
resultSequence = cms.Sequence()
for idx, task in enumerate(tasks):
if idx == 0:
resultSequence = cms.Sequence(getattr(proc, task))
else:
resultSequence.insert(idx, getattr(proc, task))
return resultSequence
def addTable(self, proc, recoJetInfo):
currentTasks = []
print("custom_jme_cff::TableRecoJetAdder::addTable: Adding Table for Reco Jet Collection: {}".format(recoJetInfo.jet))
name = nanoInfo_recojets[recoJetInfo.jet]["name"]
doc = nanoInfo_recojets[recoJetInfo.jet]["doc"]
if name in recojetNameInNano:
raise RuntimeError('RecoJet collection name (%s) taken in NanoAOD for %s' %(name, recoJetInfo.jet))
table = "{}Table".format(recoJetInfo.jetTagName)
if recoJetInfo.skipUserData:
if recoJetInfo.doCalo:
tableContents = cms.PSet(
P4Vars,
area = jetTable.variables.area,
rawFactor = jetTable.variables.rawFactor,
emf = Var("emEnergyFraction()", float, doc = "electromagnetic energy fraction", precision = 10),
)
else:
tableContents = cms.PSet(
P4Vars,
area = jetTable.variables.area,
rawFactor = jetTable.variables.rawFactor,
)
else:
tableContents = JETVARS.clone()
updatedJets = "updatedJets{}".format(recoJetInfo.jetTagName)
setattr(proc, table, cms.EDProducer("SimpleCandidateFlatTableProducer",
src = cms.InputTag(updatedJets),
cut = cms.string(""),
name = cms.string(name),
doc = cms.string(doc),
singleton = cms.bool(False),
extension = cms.bool(False),
variables = tableContents,
)
)
currentTasks.append(table)
tightJetIdLepVeto = "tightJetIdLepVeto{}".format(recoJetInfo.jetTagName)
if not recoJetInfo.skipUserData:
altTasks = copy.deepcopy(currentTasks)
for idx, task in enumerate(altTasks):
if task == tightJetIdLepVeto:
altTasks[idx] = looseJetId
for modifier in run2_miniAOD_80XLegacy, run2_nanoAOD_94X2016:
modifier.toReplaceWith(currentTasks, altTasks)
self.main.extend(currentTasks)
def PrepJMECustomNanoAOD(process):
#
# Additional variables to AK4GenJets
#
process.genJetTable.variables.area = JETVARS.area
#
# Additional variables to AK8GenJets
#
process.genJetAK8Table.variables.area = JETVARS.area
#
# Additional variables for AK4PFCHS
#
process.jetTable.variables.HFHEF = JETVARS.HFHEF
process.jetTable.variables.HFEMEF = JETVARS.HFEMEF
#
# Additional variables to AK8PFPUPPI
#
# These variables are not stored for AK8PFCHS (slimmedJetsAK8)
# in MiniAOD if their pt < 170 GeV. Hence the conditional fill.
#
process.fatJetTable.variables.chHEF = Var("?isPFJet()?chargedHadronEnergyFraction():-1", float, doc="charged Hadron Energy Fraction", precision = 6)
process.fatJetTable.variables.neHEF = Var("?isPFJet()?neutralHadronEnergyFraction():-1", float, doc="neutral Hadron Energy Fraction", precision = 6)
process.fatJetTable.variables.chEmEF = Var("?isPFJet()?chargedEmEnergyFraction():-1", float, doc="charged Electromagnetic Energy Fraction", precision = 6)
process.fatJetTable.variables.neEmEF = Var("?isPFJet()?neutralEmEnergyFraction():-1", float, doc="neutral Electromagnetic Energy Fraction", precision = 6)
process.fatJetTable.variables.muEF = Var("?isPFJet()?muonEnergyFraction():-1", float, doc="muon Energy Fraction", precision = 6)
process.fatJetTable.variables.HFHEF = Var("?isPFJet()?HFHadronEnergyFraction():-1", float, doc="energy fraction in forward hadronic calorimeter", precision = 6)
process.fatJetTable.variables.HFEMEF = Var("?isPFJet()?HFEMEnergyFraction():-1", float, doc="energy fraction in forward EM calorimeter", precision = 6)
#
#
#
process.jercVarsFatJet = process.jercVars.clone(
srcJet = "updatedJetsAK8",
maxDR = 0.8,
)
process.jetSequence.insert(process.jetSequence.index(process.updatedJetsAK8WithUserData), process.jercVarsFatJet)
process.updatedJetsAK8WithUserData.userFloats.jercCHPUF = cms.InputTag(
"%s:chargedHadronPUEnergyFraction" % process.jercVarsFatJet.label()
)
process.updatedJetsAK8WithUserData.userFloats.jercCHF = cms.InputTag(
"%s:chargedHadronCHSEnergyFraction" % process.jercVarsFatJet.label()
)
process.fatJetTable.variables.jercCHPUF = JETVARS.jercCHPUF
process.fatJetTable.variables.jercCHF = JETVARS.jercCHF
#
# Remove any pT cuts.
#
process.finalJets.cut = "" # 15 -> 10
process.finalJetsAK8.cut = "" # 170 -> 170
process.genJetTable.cut = "" # 10 -> 8
process.genJetFlavourTable.cut = "" # 10 -> 8
process.genJetAK8Table.cut = "" # 100 -> 80
process.genJetAK8FlavourTable.cut = "" # 100 -> 80
######################################################################################################################
#
# Add GenJets to NanoAOD
#
genJA = GenJetAdder()
tableGenJA = TableGenJetAdder()
for jetConfig in config_genjets:
cfg = { k : v for k, v in jetConfig.items() if k != "enabled" }
genJetInfo = genJA.addGenJetCollection(process, **cfg)
tableGenJA.addTable(process, genJetInfo)
process.nanoSequenceMC += genJA.getSequence(process)
process.nanoSequenceMC += tableGenJA.getSequence(process)
#
# Add RecoJets to NanoAOD
#
recoJA = RecoJetAdder()
tableRecoJA = TableRecoJetAdder()
for jetConfig in config_recojets:
cfg = { k : v for k, v in jetConfig.items() if k != "enabled" }
recoJetInfo = recoJA.addRecoJetCollection(process, **cfg)
tableRecoJA.addTable(process, recoJetInfo)
process.nanoSequenceMC += recoJA.getSequence(process)
process.nanoSequenceMC += tableRecoJA.getSequence(process)
| 1.828125 | 2 |
transaction_service/transactions/migrations/0010_rename_date_transactions_date_provided.py | deorz/TransactionService | 0 | 12768597 | # Generated by Django 4.0.3 on 2022-03-13 15:43
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('transactions', '0009_wallets_date_created'),
]
operations = [
migrations.RenameField(
model_name='transactions',
old_name='date',
new_name='date_provided',
),
]
| 1.679688 | 2 |
tests/datasets/seq_labeling/test_conll_yago_dataset.py | sean-dingxu/sciwing | 50 | 12768598 | import pytest
from sciwing.datasets.seq_labeling.conll_yago_dataset import ConllYagoDatasetsManager
from sciwing.datasets.seq_labeling.conll_yago_dataset import ConllYagoDataset
import sciwing.constants as constants
import pathlib
from sciwing.tokenizers.word_tokenizer import WordTokenizer
PATHS = constants.PATHS
DATA_DIR = PATHS["DATA_DIR"]
DATA_DIR = pathlib.Path(DATA_DIR)
@pytest.fixture(
params=["conll_yago_ner.train", "conll_yago_ner.dev", "conll_yago_ner.test"],
scope="session",
)
def conll_yago_dataset(request):
train_filename = DATA_DIR.joinpath(request.param)
dataset = ConllYagoDataset(
filename=str(train_filename),
tokenizers={"tokens": WordTokenizer(tokenizer="vanilla")},
column_names=["NER"],
)
return dataset
@pytest.fixture(scope="session")
def conll_yago_dataset_manager():
train_filename = DATA_DIR.joinpath("conll_yago_ner.train")
dev_filename = DATA_DIR.joinpath("conll_yago_ner.dev")
test_filename = DATA_DIR.joinpath("conll_yago_ner.test")
dataset_manager = ConllYagoDatasetsManager(
train_filename=str(train_filename),
dev_filename=str(dev_filename),
test_filename=str(test_filename),
)
return dataset_manager
class TestConllYagoDataset:
def test_get_lines_labels(self, conll_yago_dataset):
dataset = conll_yago_dataset
try:
lines, labels = dataset.get_lines_labels()
assert len(lines) > 0
assert len(labels) > 0
except:
pytest.fail("Getting Lines and Labels failed")
def test_labels_namespace(self, conll_yago_dataset):
dataset = conll_yago_dataset
lines, labels = dataset.get_lines_labels()
for label in labels:
namespaces = label.namespace
assert len(namespaces) == 1
assert "NER" in namespaces
def test_lines_labels_length(self, conll_yago_dataset):
dataset = conll_yago_dataset
lines, labels = dataset.get_lines_labels()
for line, label in zip(lines, labels):
line_tokens = line.tokens["tokens"]
labels_ner = label.tokens["NER"]
assert len(line_tokens) == len(labels_ner)
def test_conll_yago_dataset_manager(self, conll_yago_dataset_manager):
dataset_manager = conll_yago_dataset_manager
tokens_vocab = dataset_manager.namespace_to_vocab["tokens"]
assert tokens_vocab.get_vocab_len() > 0
def test_context_tokens_has_no_none(self, conll_yago_dataset):
dataset = conll_yago_dataset
lines, labels = dataset.get_lines_labels()
for line in lines:
context_tokens = line.tokens["contextual_tokens"]
assert "None" not in context_tokens
| 2.046875 | 2 |
DEW/build/lib/dependencyGraph/dg.py | STEELISI/DEW-public | 0 | 12768599 | <gh_stars>0
import networkx as nx
import sys
from appJar import gui
try:
# Python 3
import tkinter as tk
import tkinter.messagebox as tkm
import tkinter.simpledialog as tkd
from tkinter.font import Font
except ImportError:
# Python 2
from tkFont import Font
import Tkinter as tk
import tkMessageBox as tkm
import tkSimpleDialog as tkd
sys.path.append("..")
import globals
from netxCanvas.canvas import netxCanvas, GraphCanvas
from netxCanvas.style import NodeClass
from HighLevelBehaviorLanguage.hlb_parser import HLBParser
class dgStyle(NodeClass):
def render(self, data, node_name):
# Figure out what size the text we want is.
label_txt = data.get('label', None)
font = Font(family="Helvetica", size=12)
h = font.metrics("linespace") + 1
if label_txt:
w = font.measure(label_txt) + 2
else:
w = font.measure("....") + 2
self.config(width=w, height=h)
marker_options = {'fill': data.get('color','blue'), 'outline': 'white'}
if data.get('circle', None):
self.create_oval(0,0,w,h, **marker_options)
self.config(width=w, height=h)
if label_txt:
self.create_text(w/2, h/2, text=label_txt, font=font, fill="white")
else:
self.create_rectangle(0,0,w,h, **marker_options)
if label_txt:
self.create_text(w/2, h/2, text=label_txt, font=font, fill="white")
class dependencyGraphHandler(GraphCanvas, object):
added_behaviors = []
def __init__(self, canvas, width=0, height=0, **kwargs):
self.node_index = 2
self.dummy_node_needed = False
G = nx.Graph()
G.add_node(0)
G.node[0]['label'] = ''
G.node[0]['actors'] = []
G.node[0]['e_events'] = ['startTrigger', 't0']
G.node[0]['triggeredby'] = []
G.node[0]['color'] = 'green'
G.node[0]['circle'] = True
try:
# Python3
super().__init__(G, master=canvas, width=width, height=height, NodeClass=dgStyle, **kwargs)
except TypeError:
# Python 2
super(dependencyGraphHandler, self).__init__(G, master=canvas, width=width, height=height, NodeClass=dgStyle, home_node=0, **kwargs)
#self._draw_node((width, height), 0)
self.pack()
self.parser = HLBParser()
def setoffsets(self, xoffset=0, yoffset=0, width=0, height=0):
self.xoffset = xoffset
self.yoffset = yoffset
self.height = height
self.width = width
def dict_from_behaviors(self):
# Creates a graph from the current listed behaviors.
bdict = {}
for b in globals.behaviors:
statement = globals.behaviors[b]
(t_events, actors, action, e_events, wait_time) = self.parser.parse_stmt(statement)
try:
action = ''.join(action)
except TypeError:
pass
if actors == None:
# We failed to parse the statement, so skip it.
continue
if t_events == None:
t_events = ['startTrigger']
if e_events == None:
e_events = []
# action = [[t_events], [e_events], [actors]]
if action in bdict:
i = 0
for tup in bdict[action]:
if set(t_events) == set(tup[0]) and set(e_events) == set(tup[1]):
bdict[action][i][2] = list(set(bdict[action][i][2] + actors))
i = i + 1
else:
# easy add, we know we have no duplicates.
bdict[action] = []
bdict[action].append([list(set(t_events)),list(set(e_events)), list(set(actors))])
return bdict
def add_new_node(self, actors, action, e_events, t_events):
# Add node to new graph
#num_nodes = len(self.G)
num_nodes = self.node_index
self.node_index = self.node_index + 1
self.G.add_node(num_nodes)
self.G.node[num_nodes]['actors'] = list(set(actors))
try:
self.G.node[num_nodes]['label'] = ''.join(action)
except TypeError:
self.G.node[num_nodes]['label'] = action
if e_events != None:
self.G.node[num_nodes]['e_events'] = list(set(e_events))
else:
self.G.node[num_nodes]['e_events'] = []
if t_events != None:
self.G.node[num_nodes]['triggeredby'] = list(set(t_events))
else:
print("Triggered by start.")
self.G.node[num_nodes]['triggeredby'] = ['startTrigger']
self.G.node[num_nodes]['color'] = 'blue'
# self.Go through emits and make connections.
for e in self.G.node[num_nodes]['e_events']:
for n in self.G.nodes():
for t in self.G.nodes[n]['triggeredby']:
if t.strip() == e:
self.G.add_edge(num_nodes, n)
# Go through triggeredby and make connections.
for t in self.G.node[num_nodes]['triggeredby']:
for n in self.G.nodes():
for e in self.G.nodes[n]['e_events']:
if e.strip() == t:
if n not in self.G.neighbors(num_nodes):
self.G.add_edge(num_nodes, n)
return num_nodes
def plot_changes(self, newdict):
# First go through and remove nodes that have disappeared.
remove_nodes = []
for n in self.G.nodes():
found = False
Glabel = self.G.nodes[n].get('label', 'XXNONEXX')
if Glabel in newdict:
i = 0
for tup in newdict[Glabel]:
if set(self.G.nodes[n]['triggeredby']) == set(tup[0]) and set(self.G.nodes[n]['e_events']) == set(tup[1]):
found = True
self.G.nodes[n]['actors'] = list(set(newdict[Glabel][i][2]))
break
i = i + 1
if found:
# Remove the found info from the newdict since we don't want to add duplicate nodes.
newdict[Glabel].remove(tup)
if len (newdict[Glabel]) == 0:
del newdict[Glabel]
if not found and n != 0 and n != 1:
# Remove node if it's not in our new graph and not our start node.
print("REMOVING %s" % (Glabel))
remove_nodes.append(n)
for n in remove_nodes:
self.remove_node(n)
if len(remove_nodes) > 0:
self.refresh()
new = []
for label in newdict:
print("ADDING %s" % label)
for tup in newdict[label]:
# Add this new node to our data graph and plot list.
new_id = self.add_new_node(set(tup[2]), label, set(tup[1]), set(tup[0]))
print("Adding %s/%d" % (label, new_id))
new.append(new_id)
if len(new) > 0:
print("Plotting dg nodes:")
print(new)
self._plot_additional(new)
print("Plotted addtional")
# Check if everything's connected. If not, use self.dummy_node_needed
# to trigger needing the <UNKNOWN> trigger node.
have_known_trigger = []
have_no_trigger = []
for n,d in self.G.degree():
if n != 1 and n !=0:
if d==0 or (1 in self.G.nodes() and d==1 and n in self.G.neighbors(1)):
have_no_trigger.append(n)
else:
have_known_trigger.append(n)
if len(have_no_trigger) > 0:
plot_needed = False
if 1 not in self.G.nodes():
print("Adding node 1")
plot_needed = True
self.G.add_node(1)
self.G.node[1]['label'] = '<UNKNOWN>'
self.G.node[1]['actors'] = []
self.G.node[1]['e_events'] = ['unknownTrigger']
self.G.node[1]['triggeredby'] = ['startTrigger']
self.G.node[1]['color'] = 'red'
self.G.add_edge(0,1)
for n in have_known_trigger:
if n in self.G.neighbors(1):
self.G.remove_edge(1, n)
for n in have_no_trigger:
self.G.add_edge(1, n)
if plot_needed:
self._plot_additional([1])
self.refresh()
if len(have_no_trigger) == 0 and (1 in self.G.nodes()):
print("Removing 1.")
self.remove_node(1)
self.refresh()
def add_new_behavior(self, statement):
# Create new graph:
new = self.dict_from_behaviors()
self.plot_changes(new)
#self.plot(0)
#self._plot_additional(self.G.nodes())
#self._plot_additional([num_nodes])
#self.refresh()
def find_label(self, name):
for n in G.nodes():
data.get('label', None)
if label:
if label == name:
return n
return None
#def add_hlb_line(line):
# if line in self.processed
| 2.46875 | 2 |
pgweb/urls.py | WeilerWebServices/PostgreSQL | 0 | 12768600 | from django.conf.urls import include, url
from django.views.generic import RedirectView
from pgweb.util.signals import register_basic_signal_handlers
import pgweb.contributors.views
import pgweb.core.views
import pgweb.docs.views
import pgweb.downloads.views
import pgweb.events.views
import pgweb.featurematrix.views
import pgweb.legacyurl.views
import pgweb.lists.views
import pgweb.misc.views
import pgweb.news.views
import pgweb.profserv.views
import pgweb.pugs.views
import pgweb.search.views
import pgweb.security.views
import pgweb.sponsors.views
import pgweb.survey.views
from pgweb.core.feeds import VersionFeed
from pgweb.news.feeds import NewsFeed
from pgweb.events.feeds import EventFeed
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
# Register our save signal handlers
register_basic_signal_handlers()
urlpatterns = [
url(r'^$', pgweb.core.views.home),
url(r'^dyncss/(?P<css>base|docs).css$', pgweb.core.views.dynamic_css),
url(r'^about/$', pgweb.core.views.about),
url(r'^about/newsarchive/([^/]+/)?$', pgweb.news.views.archive),
url(r'^about/news/(\d+)(-.*)?/$', pgweb.news.views.item),
url(r'^about/news/taglist.json/$', pgweb.news.views.taglist_json),
url(r'^about/events/$', pgweb.events.views.main),
url(r'^about/eventarchive/$', pgweb.events.views.archive),
url(r'^about/event/(\d+)(-.*)?/$', pgweb.events.views.item),
url(r'^about/featurematrix/$', pgweb.featurematrix.views.root),
url(r'^about/featurematrix/detail/(\d+)/$', pgweb.featurematrix.views.detail),
url(r'^about/privacypolicy/$', RedirectView.as_view(url='/about/policies/privacy/', permanent=True)),
url(r'^ftp/(.*/)?$', pgweb.downloads.views.ftpbrowser),
url(r'^download/mirrors-ftp/+(.*)$', pgweb.downloads.views.mirrorselect),
url(r'^download/product-categories/$', pgweb.downloads.views.categorylist),
url(r'^download/products/(\d+)(-.*)?/$', pgweb.downloads.views.productlist),
url(r'^applications-v2.xml$', pgweb.downloads.views.applications_v2_xml),
url(r'^download/uploadftp/', pgweb.downloads.views.uploadftp),
url(r'^download/uploadyum/', pgweb.downloads.views.uploadyum),
url(r'^download/js/yum.js', pgweb.downloads.views.yum_js),
url(r'^docs/$', pgweb.docs.views.root),
url(r'^docs/manuals/$', pgweb.docs.views.manuals),
url(r'^docs/manuals/archive/$', pgweb.docs.views.manualarchive),
url(r'^docs/release/$', pgweb.docs.views.release_notes),
url(r'^docs/release/((?P<major_version>(\d+\.\d+)|\d+)\.(?P<minor_version>\d+))/$', pgweb.docs.views.release_notes),
# Legacy URLs for accessing the docs page; provides a permanent redirect
url(r'^docs/(current|devel|\d+(?:\.\d)?)/(static|interactive)/((.*).html?)?$', pgweb.docs.views.docspermanentredirect),
url(r'^docs/(current|devel|\d+(?:\.\d)?)/(.*).html?$', pgweb.docs.views.docpage),
url(r'^docs/(current|devel|\d+(?:\.\d)?)/(.*).svg$', pgweb.docs.views.docsvg),
url(r'^docs/(current|devel|\d+(?:\.\d)?)/$', pgweb.docs.views.docsrootpage),
url(r'^docs/(current|devel|\d+(?:\.\d)?)/$', pgweb.docs.views.redirect_root),
url(r'^community/$', pgweb.core.views.community),
url(r'^community/contributors/$', pgweb.contributors.views.completelist),
url(r'^community/lists/$', RedirectView.as_view(url='/list/', permanent=True)),
url(r'^community/lists/subscribe/$', RedirectView.as_view(url='https://lists.postgresql.org/', permanent=True)),
url(r'^community/lists/listinfo/$', pgweb.lists.views.listinfo),
url(r'^community/survey/vote/(\d+)/$', pgweb.survey.views.vote),
url(r'^community/survey[/\.](\d+)(-.*)?/$', pgweb.survey.views.results),
url(r'^community/user-groups/$', pgweb.pugs.views.index),
url(r'^search/$', pgweb.search.views.search),
url(r'^support/security/$', pgweb.security.views.index),
url(r'^support/security/(\d\.\d|\d{2})/$', pgweb.security.views.version),
url(r'^support/security_archive/$', RedirectView.as_view(url='/support/security/', permanent=True)),
url(r'^support/professional_(support|hosting)/$', pgweb.profserv.views.root),
url(r'^support/professional_(support|hosting)[/_](.*)/$', pgweb.profserv.views.region),
url(r'^account/submitbug/$', pgweb.misc.views.submitbug),
url(r'^account/submitbug/(\d+)/$', pgweb.misc.views.submitbug_done),
url(r'^support/submitbug/$', RedirectView.as_view(url='/account/submitbug/', permanent=True)),
url(r'^support/versioning/$', pgweb.core.views.versions),
url(r'^bugs_redir/(\d+)/$', pgweb.misc.views.bugs_redir),
url(r'^about/sponsors/$', pgweb.sponsors.views.sponsors),
url(r'^about/servers/$', pgweb.sponsors.views.servers),
url(r'^robots.txt$', pgweb.core.views.robots),
###
# RSS feeds
###
url(r'^versions.rss$', VersionFeed()),
url(r'^news(/(?P<tagurl>[^/]+))?.rss$', NewsFeed()),
url(r'^events.rss$', EventFeed()),
###
# Special sections
###
url(r'^account/', include('pgweb.account.urls')),
###
# Sitemap (FIXME: support for >50k urls!)
###
url(r'^sitemap.xml', pgweb.core.views.sitemap),
url(r'^sitemap_internal.xml', pgweb.core.views.sitemap_internal),
###
# Workaround for broken links pushed in press release
###
url(r'^downloads/$', RedirectView.as_view(url='/download/', permanent=True)),
###
# Legacy URLs from old structurs, but used in places like press releases
# so needs to live a bit longer.
###
url(r'^about/press/contact/$', RedirectView.as_view(url='/about/press/', permanent=True)),
###
# Images that are used from other community sites
###
url(r'^layout/images/(?P<f>[a-z0-9_\.]+)$', RedirectView.as_view(url='/media/img/layout/%(f)s', permanent=True)),
###
# Handle redirect on incorrect spelling of licence
###
url(r'^about/license/$', RedirectView.as_view(url='/about/licence', permanent=True)),
###
# Links included in emails on the lists (do we need to check this for XSS?)
###
url(r'^mailpref/([a-z0-9_-]+)/$', pgweb.legacyurl.views.mailpref),
# Some basic information about the connection (for debugging purposes)
url(r'^system_information/$', pgweb.core.views.system_information),
# Sync timestamp, for automirror
url(r'^web_sync_timestamp$', pgweb.core.views.sync_timestamp),
# API endpoints
url(r'^api/varnish/purge/$', pgweb.core.views.api_varnish_purge),
# Override some URLs in admin, to provide our own pages
url(r'^admin/pending/$', pgweb.core.views.admin_pending),
url(r'^admin/purge/$', pgweb.core.views.admin_purge),
url(r'^admin/mergeorg/$', pgweb.core.views.admin_mergeorg),
# Uncomment the next line to enable the admin:
url(r'^admin/', admin.site.urls),
# Crash testing URL :-)
url(r'^crashtest/$', pgweb.misc.views.crashtest),
# Fallback for static pages, must be at the bottom
url(r'^(.*)/$', pgweb.core.views.fallback),
]
| 1.75 | 2 |
python/orp/orp/authority_ner/__init__.py | UKGovernmentBEIS/open-regulation-platform-alpha | 0 | 12768601 | <filename>python/orp/orp/authority_ner/__init__.py<gh_stars>0
#from .extractAuthorities import extract_entities_from_string
from .new_extractAuthorities import extract_entities_from_string
| 1.507813 | 2 |
platform_agent/wireguard/wg_conf.py | leogsilva/syntropy-agent | 0 | 12768602 | import json
import os
import socket
import base64
import logging
import subprocess
import re
from pathlib import Path
import pyroute2
from pyroute2 import IPDB, WireGuard, NetlinkError
from nacl.public import PrivateKey
from platform_agent.cmd.iptables import add_iptable_rules, delete_iptable_rules, add_iptables_forward
from platform_agent.cmd.lsmod import module_loaded
from platform_agent.cmd.wg_show import get_wg_listen_port
from platform_agent.files.tmp_files import get_peer_metadata
from platform_agent.lib.ctime import now
from platform_agent.routes import Routes
from platform_agent.wireguard.helpers import find_free_port, get_peer_info, WG_NAME_PATTERN, WG_SYNTROPY_INT
logger = logging.getLogger()
class WgConfException(Exception):
pass
def delete_interface(ifname):
subprocess.run(['ip', 'link', 'del', ifname], check=False, stderr=subprocess.DEVNULL)
def create_interface(ifname):
try:
subprocess.run(['ip', 'link', 'add', 'dev', ifname, 'type', 'wireguard'], check=True, stderr=subprocess.DEVNULL)
except subprocess.CalledProcessError:
pass
def set_interface_up(ifname):
try:
subprocess.run(['ip', 'link', 'set', 'up', ifname], check=True, stderr=subprocess.DEVNULL)
except subprocess.CalledProcessError:
pass
def set_interface_ip(ifname, ip):
try:
subprocess.run(['ip', 'address', 'add', 'dev', ifname, ip], check=True, stderr=subprocess.DEVNULL)
except subprocess.CalledProcessError:
pass
class WgConf():
def __init__(self, client=None):
self.wg_kernel = module_loaded('wireguard')
self.wg = WireGuard() if self.wg_kernel else WireguardGo()
self.ipdb = IPDB()
self.routes = Routes()
self.client = client
def create_syntropy_interfaces(self, ifaces):
result = []
if not ifaces:
return result
for ifname in ifaces.keys():
int_data = self.create_interface("SYNTROPY_" + ifname, ifaces[ifname].get('internal_ip'), listen_port=ifaces[ifname].get('listen_port'))
if int_data.get('public_key') != ifaces[ifname].get('public_key') or int_data.get('listen_port') != ifaces[ifname].get('listen_port'):
result.append(
{
"fn": "create_interface",
"data": int_data
}
)
return result
@staticmethod
def get_wg_interfaces():
with IPDB() as ipdb:
current_interfaces = [k for k, v in ipdb.by_name.items() if re.match(WG_NAME_PATTERN, k) or k in WG_SYNTROPY_INT]
return current_interfaces
def clear_interfaces(self, dump, network_dump):
remote_interfaces = [d['args']['ifname'] for d in dump if d['fn'] == 'create_interface']
if network_dump:
remote_interfaces.extend(["SYNTROPY_" + ifname for ifname in network_dump.keys()])
current_interfaces = self.get_wg_interfaces()
remove_interfaces = set(current_interfaces) - set(remote_interfaces)
logger.debug(
f"Clearing interfaces REMOTE - {remote_interfaces}, CURRENT - {current_interfaces} REMOVE={remove_interfaces}"
)
for interface in remove_interfaces:
self.remove_interface(interface)
def clear_unused_routes(self, dump):
remote_peers = [d['args'] for d in dump if d['fn'] == 'add_peer']
remote_interfaces = [d['args']['ifname'] for d in dump if d['fn'] == 'create_interface']
for ifname in remote_interfaces:
allowed_ips = []
remote_peers = [allowed_ips.extend(peer['allowed_ips']) for peer in remote_peers if peer and peer['ifname'] == ifname]
self.routes.clear_unused_routes(ifname, allowed_ips)
def clear_peers(self, dump):
remote_peers = [d['args']['public_key'] for d in dump if d['fn'] == 'add_peer']
current_interfaces = self.get_wg_interfaces()
for iface in current_interfaces:
peers = get_peer_info(iface, self.wg)
for peer in peers:
if peer not in remote_peers:
self.remove_peer(iface, peer)
def get_wg_keys(self, ifname):
private_key_path = f"/etc/syntropy-agent/privatekey-{ifname}"
public_key_path = f"/etc/syntropy-agent/publickey-{ifname}"
private_key = Path(private_key_path)
public_key = Path(public_key_path)
if not private_key.is_file() or not public_key.is_file():
privKey = PrivateKey.generate()
pubKey = base64.b64encode(bytes(privKey.public_key))
privKey = base64.b64encode(bytes(privKey))
base64_privKey = privKey.decode('ascii')
base64_pubKey = pubKey.decode('ascii')
private_key.write_text(base64_privKey)
public_key.write_text(base64_pubKey)
private_key.chmod(0o600)
public_key.chmod(0o600)
if self.wg_kernel:
return public_key.read_text().strip(), private_key.read_text().strip()
else:
return public_key.read_text().strip(), private_key_path
def next_free_port(self, port=1024, max_port=65535):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
while port <= max_port:
try:
sock.bind(('', port))
sock.close()
return port
except OSError:
port += 1
raise IOError('no free ports')
def create_interface(self, ifname, internal_ip, listen_port=None, **kwargs):
public_key, private_key = self.get_wg_keys(ifname)
peer_metadata = {'metadata': get_peer_metadata(public_key=public_key)}
logger.info(
f"[WG_CONF] - Creating interface {ifname}, {internal_ip} - wg_kernel={self.wg_kernel}",
extra={'metadata': peer_metadata}
)
if self.wg_kernel:
create_interface(ifname)
else:
self.wg.create_interface(ifname)
set_interface_up(ifname)
set_interface_ip(ifname, internal_ip)
self.routes.clear_unused_iface_addrs(ifname, internal_ip.split('/')[0])
if os.environ.get("SYNTROPY_PORT_RANGE") and not listen_port:
listen_port = find_free_port()
try:
self.wg.set(
ifname,
private_key=private_key,
listen_port=listen_port
)
except NetlinkError as error:
if error.code != 98:
raise
else:
# if port was taken before creating.
self.wg.set(
ifname,
private_key=private_key,
)
listen_port = self.get_listening_port(ifname)
if not listen_port:
listen_port = find_free_port()
self.wg.set(
ifname,
private_key=private_key,
listen_port=listen_port
)
add_iptables_forward(ifname)
result = {
"public_key": public_key,
"listen_port": int(listen_port),
"ifname": ifname,
"internal_ip": internal_ip
}
logger.debug(
f"[WG_CONF] - interface_created {result}",
extra={'metadata': peer_metadata}
)
return result
def add_peer(self, ifname, public_key, allowed_ips, gw_ipv4, endpoint_ipv4=None, endpoint_port=None):
peer_metadata = get_peer_metadata(public_key=public_key)
if self.wg_kernel:
try:
peer_info = get_peer_info(ifname=ifname, wg=self.wg)
except ValueError as e:
raise WgConfException(str(e))
old_ips = set(peer_info.get(public_key, [])) - set(allowed_ips)
self.routes.ip_route_del(ifname, old_ips)
peer = {'public_key': public_key,
'persistent_keepalive': 15,
'allowed_ips': allowed_ips}
if endpoint_ipv4 and endpoint_port:
peer.update(
{
'endpoint_addr': endpoint_ipv4,
'endpoint_port': endpoint_port,
}
)
self.wg.set(ifname, peer=peer)
statuses = self.routes.ip_route_add(ifname, allowed_ips, gw_ipv4)
add_iptable_rules(allowed_ips)
data = {
"connection_id": peer_metadata.get('connection_id'),
"public_key": public_key,
"statuses": statuses,
}
self.client.batch_send.queue.put({"data": data, "msg_type": 'WG_ROUTE_STATUS'})
def remove_peer(self, ifname, public_key, allowed_ips=None):
if ifname not in self.get_wg_interfaces():
logger.debug(f'[WG_CONF] Remove peer - [{ifname}] does not exist')
return
peer = {
'public_key': public_key,
'remove': True
}
try:
self.wg.set(ifname, peer=peer)
if allowed_ips:
self.routes.ip_route_del(ifname, allowed_ips)
delete_iptable_rules(allowed_ips)
except pyroute2.netlink.exceptions.NetlinkError as error:
if error.code != 19:
raise
return
def remove_interface(self, ifname):
logger.debug(f'[WG_CONF] Removing interfcae - [{ifname}]')
delete_interface(ifname)
logger.debug(f'[WG_CONF] Removed interfcae - [{ifname}]')
return
def get_listening_port(self, ifname):
if self.wg_kernel:
wg_info = dict(self.wg.info(ifname)[0]['attrs'])
return wg_info['WGDEVICE_A_LISTEN_PORT']
else:
wg_info = self.wg.info(ifname)
return wg_info['listen_port']
class WireguardGo:
def set(self, ifname, peer=None, private_key=None, listen_port=None):
full_cmd = f"wg set {ifname}".split(' ')
if peer:
allowed_ips_cmd = ""
endpoint = f"endpoint {peer['endpoint_addr']}:{peer.get('endpoint_port')} " if peer.get('endpoint_addr') else ""
if not peer.get('remove'):
for ip in peer.get('allowed_ips', []):
allowed_ips_cmd += f"allowed-ips {ip} "
peer_cmd = f"peer {peer['public_key']} {allowed_ips_cmd}{endpoint}persistent-keepalive 15".split(
' ')
else:
peer_cmd = f"peer {peer['public_key']} remove".split(' ')
full_cmd += peer_cmd
if private_key:
private_key_cmd = f"private-key {private_key}".split(' ')
full_cmd += private_key_cmd
if not listen_port:
listen_port = find_free_port()
if listen_port:
listen_port_cmd = f"listen-port {listen_port}".split(' ')
full_cmd += listen_port_cmd
result_set = subprocess.run(full_cmd, encoding='utf-8', stdout=subprocess.PIPE, stderr=subprocess.PIPE)
complete_output = result_set.stdout or result_set.stderr
complete_output = complete_output or 'Success'
logger.debug(f"[Wireguard-go] - WG SET - {complete_output} , args {full_cmd}")
return complete_output
def create_interface(self, ifname):
try:
result_set = subprocess.Popen(
['wireguard-go', ifname],
encoding='utf-8',
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
start_new_session=True,
)
result_set.wait(timeout=2)
except FileNotFoundError:
raise WgConfException(f'Wireguard-go missing')
complete_output = result_set.stdout or result_set.stderr
complete_output = complete_output or 'Success'
logger.debug(f"[Wireguard-go] - WG Create - {complete_output.read()} , args {ifname}")
return complete_output
def info(self, ifname):
return {
"listen_port": get_wg_listen_port(ifname)
}
| 1.859375 | 2 |
run.py | GabrielDornelles/SEULGI-SR | 0 | 12768603 | import discord
from discord.ext import commands, tasks
from discord.voice_client import VoiceClient
import youtube_dl
import urllib.parse, urllib.request
import re
import json
from random import choice
youtube_dl.utils.bug_reports_message = lambda: ''
ytdl_format_options = {
'format': 'bestaudio/best',
'outtmpl': '%(extractor)s-%(id)s-%(title)s.%(ext)s',
'restrictfilenames': False,
'noplaylist': False,
'nocheckcertificate': True,
'ignoreerrors': True,
'logtostderr': False,
'quiet': True,
'no_warnings': True,
'default_search': 'auto',
'source_address': '0.0.0.0' # bind to ipv4
}
ffmpeg_options = {
'options': ''
}
ytdl = youtube_dl.YoutubeDL(ytdl_format_options)
class YTDLSource(discord.PCMVolumeTransformer):
def __init__(self, source, *, data, volume=0.5):
super().__init__(source, volume)
self.data = data
self.title = data.get('title')
self.url = data.get('url')
@classmethod
async def from_url(cls, url, *, loop=None, stream=False):
loop = loop or asyncio.get_event_loop()
data = await loop.run_in_executor(None, lambda: ytdl.extract_info(url, download=not stream))
if 'entries' in data:
# take first item from a playlist
data = data['entries'][0]
filename = data['url'] if stream else ytdl.prepare_filename(data)
return cls(discord.FFmpegPCMAudio(filename, **ffmpeg_options), data=data)
client = commands.Bot(command_prefix='seulgi ')
status = ['seulgi queue **url/song name**', 'seulgi view', 'seulgi play ~Song Name~']
queue = []
@client.event
async def on_ready():
print('Bot is online!')
@client.event
async def on_member_join(member):
channel = discord.utils.get(member.guild.channels, name='general')
await channel.send(f'Welcome {member.mention}! Ready to jam out? See `Seulgi help` command for details!')
@client.command(name='ping', help='Seulgi returns your latency')
async def ping(ctx):
await ctx.send(f'**Pong!** Latency: {round(client.latency * 1000)}ms')
@client.command(name='hello', help='Seulgi choose a random hello message for you')
async def hello(ctx):
responses = ['***grumble*** Why did you wake me up?', 'Top of the morning to you lad!', 'Hello, how are you?', 'Hi', '**Wasssuup!**']
await ctx.send(choice(responses))
@client.command(name="quem", help='This returns the true love')
async def teamo(ctx, *args):
buf=""
for x in args:
buf+= x + " "
buf = buf[:-1]
if buf=="e meu amor":
await ctx.send('tami')
else:
await ctx.send("quem?")
@client.command(name='join', help='This command makes the bot join the voice channel')
async def join(ctx):
if not ctx.message.author.voice:
await ctx.send("Você não está em um canal, e se nos encontrassemos no Geral? 👉👈")
return
else:
channel = ctx.message.author.voice.channel
await channel.connect()
@client.command(name='queue', help='This command adds a song to the queue')
async def queue_(ctx, url, *args):
global queue
helptext = url
for word in args:
helptext+= word
queue.append(helptext)
await ctx.send(f'`{queue[0]}` added to queue!')
@client.command(name='skip', help='This command skip a song')
async def skip(ctx):
global queue
server = ctx.message.guild
voice_channel = server.voice_client
if (len(queue)):
async with ctx.typing():
player = await YTDLSource.from_url(queue[0], loop=client.loop)
voice_channel.source = player
await ctx.send('Now playing: {}'.format(player.title))
await client.change_presence(activity=discord.Activity(type=discord.ActivityType.listening, name=('{}'.format(player.title))))
del(queue[0])
else:
await ctx.send("**Your queue is empty!**")
@client.command(name='remove', help='This command removes an item from the list')
async def remove(ctx, number):
global queue
try:
del(queue[int(number)])
await ctx.send(f'Your queue is now `{queue}!`')
except:
await ctx.send('Your queue is either **empty** or the index is **out of range**')
@client.command(name='play', help='This command plays songs')
async def play(ctx, url=None,*args):
global queue
try:
await ctx.invoke(client.get_command('join'))
except:
pass #Seulgi is already at the channel
server = ctx.message.guild
voice_channel = server.voice_client
helptext = url
for word in args:
helptext+= ' '+word
queue.append(helptext)
if voice_channel.is_playing():
await ctx.send(f'`{queue[len(queue)-1]}` added to queue!')
return
async with ctx.typing():
player = await YTDLSource.from_url(queue[0], loop=client.loop)
voice_channel.play(player, after=lambda e: print('Player error: %s' % e) if e else None)
playing.start(ctx)
await ctx.send('**Now playing:** {}'.format(player.title))
await client.change_presence(activity=discord.Activity(type=discord.ActivityType.listening, name=('{}'.format(player.title))))
del(queue[0])
@client.command(name='pause', help='This command pauses the song')
async def pause(ctx):
server = ctx.message.guild
voice_channel = server.voice_client
playing.cancel()
voice_channel.pause()
@client.command(name='resume', help='This command resumes the song!')
async def resume(ctx):
server = ctx.message.guild
voice_channel = server.voice_client
playing.start(ctx)
voice_channel.resume()
@client.command(name='view', help='This command shows the queue')
async def view(ctx):
indexes = list(range(len(queue)))
pretty_display = dict(zip(indexes,queue))
await ctx.send(f'Your queue is now:\n```py\n{json.dumps(pretty_display, indent=4)}```')
@client.command(name='leave', help='This command stops makes the bot leave the voice channel')
async def leave(ctx):
voice_client = ctx.message.guild.voice_client
playing.cancel()
await voice_client.disconnect()
@client.command(name='stop', help='This command stops the song!')
async def stop(ctx):
server = ctx.message.guild
voice_channel = server.voice_client
playing.cancel()
voice_channel.stop()
@tasks.loop(seconds=1)
async def playing(ctx):
global queue
voice_channel = ctx.message.guild.voice_client
if(voice_channel.is_playing() is False):
async with ctx.typing():
player = await YTDLSource.from_url(queue[0], loop=client.loop)
voice_channel.play(player, after=lambda e: print('Player error: %s' % e) if e else None)
await ctx.send('**Now playing:** {}'.format(player.title))
await client.change_presence(activity=discord.Activity(type=discord.ActivityType.listening, name=('{}'.format(player.title))))
del (queue[0])
else:
pass
client.run('') #add your token there and run
| 2.421875 | 2 |
axonius_api_client/cli/grp_system/grp_activity_logs/grp_common.py | kf-careem/axonius_api_client | 11 | 12768604 | <reponame>kf-careem/axonius_api_client
# -*- coding: utf-8 -*-
"""Command line interface for Axonius API Client."""
import csv
import io
from ....api.json_api.audit_logs import AuditLog
from ....tools import json_dump
from ...context import click
EXPORT = [
click.option(
"--export-format",
"-xf",
"export_format",
type=click.Choice(["json-raw", "json", "str", "csv"]),
help="Format to export data in",
default="str",
show_envvar=True,
show_default=True,
),
]
def join(obj):
"""Pass."""
return "\n - " + "\n - ".join(obj)
def handle_export(ctx, data, export_format, **kwargs):
"""Pass."""
if export_format == "json":
click.secho(json_dump([x.to_dict() for x in data]))
ctx.exit(0)
if export_format == "json-raw":
click.secho(json_dump([x.raw for x in data]))
ctx.exit(0)
if export_format == "str":
lines = [str(x) for x in data]
click.secho("\n".join(lines))
ctx.exit(0)
if export_format == "csv":
rows = [x.to_dict() for x in data]
columns = AuditLog._search_properties()
stream = io.StringIO()
writer = csv.DictWriter(stream, fieldnames=columns)
writer.writerow(dict(zip(columns, columns)))
writer.writerows(rows)
content = stream.getvalue()
stream.close()
click.secho(content)
ctx.exit(1)
| 2.15625 | 2 |
system_mapper/provider_azure/azure_mapper.py | dalthviz/query-azure-resource-graph | 0 | 12768605 | <gh_stars>0
# -*- coding: utf-8 -*-
# Licensed under the terms of the MIT License
"""
Azure infrastructure domain mapping.
"""
# Standard library imports
import json
import logging
# Third-party imports
from pandas import DataFrame
from neomodel import DoesNotExist
import requests
from requests.packages.urllib3.exceptions import InsecureRequestWarning
from requests_ntlm import HttpNtlmAuth
import xmltodict
# Local imports
from system_mapper.provider_azure.azhelper import (
az_cli, az_login, az_resource_graph, SUCCESS_CODE)
from system_mapper.graph import (
DeployedApplication, BaseGraphMapper, Database, Disk, NetworkInterface,
NetworkSecurityGroup, ResourceGroup, Subnet, VirtualNetwork,
VirtualMachine, LoadBalancer, PublicIp, PrivateIp, Service, Storage,
Owner)
# Suppress SSL warnings
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
class AzureGraphMapper(BaseGraphMapper):
"""Azure implementation of a graph mapper."""
PROVIDER_NAME = 'AZURE'
def get_app_data(
self,
host):
"""
Get deployed application data from IIS.
"""
port = self.config['port']
app_container_url = self.config['app_container_url']
app_container_token = self.config['app_container_token']
user = self.config['app_container_user']
password = self.config['app_container_password']
try:
response = []
# Check a way to search file
headers = {
'Access-Token': 'Bearer {token}'.format(
token=app_container_token),
'Accept': 'application/hal+json'
}
# TODO: Setup cert file of the IIS server
base_url = 'https://{host}:{port}'.format(host=host, port=port)
base_info = requests.get(
base_url + app_container_url,
headers=headers,
verify=False,
auth=HttpNtlmAuth(user, password))
applications_status = base_info.status_code
applications_content = base_info.text
if applications_status == requests.codes.ok:
applications = json.loads(applications_content)
if 'websites' in applications:
for application in applications['websites']:
app_info_url = application['_links']['self']['href']
app_info = requests.get(
base_url + app_info_url,
headers=headers,
verify=False,
auth=HttpNtlmAuth(user, password))
app_info_status = app_info.status_code
app_info_content = json.loads(
app_info.text)
if app_info_status == requests.codes.ok:
dir_files_url = app_info_content[
'_links']['files']['href']
web_config = {}
dir_files_info = requests.get(
base_url + dir_files_url,
headers=headers,
verify=False,
auth=HttpNtlmAuth(user, password))
dir_files_info_content = json.loads(
dir_files_info.text)
files_info_url = dir_files_info_content[
'_links']['files']['href']
files_info = json.loads(requests.get(
base_url + files_info_url,
headers=headers,
verify=False,
auth=HttpNtlmAuth(user, password)).text)
for file in files_info['files']:
if file['name'] == "web.config":
file_url = file['_links']['self']['href']
file_info = requests.get(
base_url + file_url,
headers=headers,
verify=False,
auth=HttpNtlmAuth(user, password))
file_info_content_url = json.loads(
file_info.text)['file_info'][
'_links']['self']['href']
file_content = requests.get(
base_url +
file_info_content_url.replace(
'/api/files',
'/api/files/content'),
headers=headers,
verify=False,
auth=HttpNtlmAuth(user, password)).text
web_config = dict(
xmltodict.parse(
file_content,
process_namespaces=True))
app_info_content['web_config'] = web_config
response.append(app_info_content)
return response
except Exception as e:
logging.error(e)
return {}
def get_data(self):
"""Use Azure Resource Graph to get the data."""
data = {}
try:
code, login = az_login()
logging.info('Available subscriptions:')
logging.info(code)
logging.info(login)
if code == SUCCESS_CODE:
# Refresh accounts lists
logging.info(az_cli(["account", "list", "--refresh"]))
# Get subscriptions
s_query = (
'resourcecontainers '
'| where type == '
'"microsoft.resources/subscriptions"')
code, data['subscriptions'] = az_resource_graph(
query=s_query)
# Get resource groups
rg_query = (
'resourcecontainers '
'| where type == '
'"microsoft.resources/subscriptions/resourcegroups"')
code, data['resource_groups'] = az_resource_graph(
query=rg_query)
# Get App service instances
app_service_query = (
'resources '
'| where type == "microsoft.web/sites"')
code, data['app_services'] = az_resource_graph(
query=app_service_query)
# Get server farms
app_services_plans_query = (
'resources '
'| where type == "microsoft.web/serverfarms"')
code, data['app_services_plans'] = az_resource_graph(
query=app_services_plans_query)
# Get Storage accounts
storage_accounts_query = (
'resources '
'| where type == '
'"microsoft.storage/storageaccounts"')
code, data['storage_accounts'] = az_resource_graph(
query=storage_accounts_query)
# Get Network interfaces
ni_query = (
'resources '
'| where type == "microsoft.network/networkinterfaces"')
code, data['network_interfaces'] = az_resource_graph(
query=ni_query)
# Get Public IPs
public_ips_query = (
'resources '
'| where type == "microsoft.network/publicipaddresses"')
code, data['public_ips'] = az_resource_graph(
query=public_ips_query)
# Get VMs
vm_query = ("""
resources
| where type =~ 'microsoft.compute/virtualmachines'
| extend nics=array_length(properties.networkProfile.networkInterfaces)
| mv-expand nic=properties.networkProfile.networkInterfaces
| where nics == 1 or nic.properties.primary =~ 'true' or isempty(nic)
| project id, name, size=tostring(properties.hardwareProfile.vmSize),
nicId = tostring(nic.id), type, properties = tostring(properties),
resourceGroup = resourceGroup, tostring(tags), subscriptionId, tenantId
| join kind=leftouter (
Resources
| where type =~ 'microsoft.network/networkinterfaces'
| extend ipConfigsCount=array_length(properties.ipConfigurations)
| mv-expand ipconfig=properties.ipConfigurations
| where ipConfigsCount == 1 or ipconfig.properties.primary =~ 'true'
| project nicId = id,
publicIpId = tostring(ipconfig.properties.publicIPAddress.id),
privateIpAddress = tostring(ipconfig.properties.privateIPAddress))
on nicId
| project-away nicId1
| summarize by id, name, size, nicId, type, properties, resourceGroup, tags,
subscriptionId, tenantId, publicIpId, privateIpAddress
| join kind=leftouter (
Resources
| where type =~ 'microsoft.network/publicipaddresses'
| project publicIpId = id, publicIpAddress = properties.ipAddress)
on publicIpId
| project-away publicIpId1
""")
code, data['raw_virtual_machines'] = az_resource_graph(
query=vm_query)
# Get application data and parse properties:
data['applications'] = []
data['virtual_machines'] = []
for vm in data['raw_virtual_machines']:
vm['properties'] = json.loads(vm['properties'])
data['virtual_machines'].append(vm)
if (not self.is_db_virtual_machine(vm) and
self.config['get_app_container_info']):
# Get application data using public ip
vm_ip = (
vm['publicIpAddress']
if 'publicIpAddress' in vm and vm['publicIpAddress']
else vm['privateIpAddress'])
app_data = {}
app_data['virtual_machine_id'] = vm['id']
app_data['applications'] = self.get_app_data(vm_ip)
apps = data['applications']
apps.append(app_data)
data['applications'] = apps
# Get Networks security groups
nsg_query = (
'resources '
' | where type == '
'"microsoft.network/networksecuritygroups"')
code, data['network_security_groups'] = az_resource_graph(
query=nsg_query)
# Get Networks
v_networks_query = (
'resources '
'| where type == "microsoft.network/virtualnetworks"')
code, data['virtual_networks'] = az_resource_graph(
query=v_networks_query)
# Get disks
disks_query = (
'resources '
'| where type == "microsoft.compute/disks"')
code, data['disks'] = az_resource_graph(
query=disks_query)
# Get load balancers data
lbs_query = (
'resources '
'| where type == "microsoft.network/loadbalancers"')
code, data['load_balancers'] = az_resource_graph(
query=lbs_query)
# Get databases (1)
dbs_query = (
'resources'
' | where type == "microsoft.sql/servers/databases"')
code, data['databases'] = az_resource_graph(
query=dbs_query)
# Get databases (2)
dbs_query = (
'resources'
' | where type == "microsoft.sql/servers"')
code, databases_servers = az_resource_graph(
query=dbs_query)
data['databases'] += databases_servers
# data = data.replace('null', 'None')
logging.info('Data:')
logging.info(json.dumps(data))
return data
except Exception as e:
logging.error("Execution error", exc_info=True)
raise e
# return data
def is_db_virtual_machine(self, data):
"""
Check if the data corresponds to a virtual machine used as a database.
"""
is_db_vm = False
properties = data['properties']
if 'storageProfile' in properties:
storage_profile = properties['storageProfile']
if 'imageReference' in storage_profile:
publisher = storage_profile['imageReference']['publisher']
is_db_vm = publisher in self.config['database_strings']
return is_db_vm
def map_data(self, reset=False):
"""Use data a initialize the database model."""
if reset:
self.clear_database()
data = self.get_data()
# Subscriptions
subscriptions = data['subscriptions']
for s in subscriptions:
subscription = Owner(
uid=s['id'].replace('/subscriptions/', ''),
name=s['name'], properties=s['properties'])
subscription.save()
# Map properties
obj_properties = subscription.object_properties
unwanted_props = [
'properties', 'resourceGroup', 'tags', 'id', 'name']
self.add_properties(
obj_properties, s, unwanted_properties=unwanted_props)
# Map tags
obj_tags = subscription.object_tags
self.add_tags(obj_tags, s['tags'])
# Resource group
resource_groups = data['resource_groups']
for rg in resource_groups:
# TODO: location, zones
resource_group = ResourceGroup(
uid=rg['id'],
subscription_id=rg['subscriptionId'],
name=rg['resourceGroup'], properties=rg['properties'])
resource_group.save()
# Map properties
obj_properties = resource_group.object_properties
unwanted_props = [
'properties', 'resourceGroup', 'tags', 'id', 'name']
self.add_properties(
obj_properties, rg, unwanted_properties=unwanted_props)
# Map tags
obj_tags = resource_group.object_tags
self.add_tags(obj_tags, rg['tags'])
# Map subscription
Owner.nodes.get(
uid=rg['subscriptionId']).resource_groups.connect(
resource_group)
# Public IP
public_ips = data['public_ips']
for pip in public_ips:
p_ip = PublicIp(
uid=pip['id'], name=pip['name'],
properties=pip['properties'],
tags=pip['tags'])
p_ip.save()
# Map properties
obj_properties = p_ip.object_properties
unwanted_props = [
'properties', 'resourceGroup', 'tags', 'id', 'name']
self.add_properties(
obj_properties, pip, unwanted_properties=unwanted_props)
# Map tags
obj_tags = p_ip.object_tags
self.add_tags(obj_tags, pip['tags'])
# Connect public ip with resource groups
public_ip_resource_group = pip['resourceGroup']
ResourceGroup.nodes.get(
name=public_ip_resource_group,
subscription_id=pip['subscriptionId']).elements.connect(
p_ip)
# Map subscription
Owner.nodes.get(
uid=pip['subscriptionId']).elements.connect(p_ip)
# App Services Plan
app_services_plans = data['app_services_plans']
for app_service_plan in app_services_plans:
service_plan = Service(
uid=app_service_plan['id'].lower(),
name=app_service_plan['name'],
service_name='AppServicePlan',
properties=app_service_plan['properties'],
tags=app_service_plan['tags'])
service_plan.save()
# Map properties
obj_properties = service_plan.object_properties
unwanted_props = [
'properties', 'resourceGroup', 'tags', 'id', 'name']
self.add_properties(
obj_properties,
app_service_plan,
unwanted_properties=unwanted_props)
# Map tags
obj_tags = service_plan.object_tags
self.add_tags(obj_tags, app_service_plan['tags'])
# Connect public ip with resource groups
app_resource_group = app_service_plan['resourceGroup']
ResourceGroup.nodes.get(
name=app_resource_group,
subscription_id=app_service_plan['subscriptionId']
).elements.connect(
service_plan)
# Map subscription
Owner.nodes.get(
uid=app_service_plan['subscriptionId']).elements.connect(
service_plan)
# App Services
app_services = data['app_services']
for app_service in app_services:
service = Service(
uid=app_service['id'],
name=app_service['name'],
service_name='AppService',
properties=app_service['properties'],
tags=app_service['tags'])
service.save()
# Map properties
obj_properties = service.object_properties
unwanted_props = [
'properties', 'resourceGroup', 'tags', 'id', 'name']
self.add_properties(
obj_properties,
app_service,
unwanted_properties=unwanted_props)
# Map tags
obj_tags = service.object_tags
self.add_tags(obj_tags, app_service['tags'])
# Connect public ip with resource groups
app_resource_group = app_service['resourceGroup']
ResourceGroup.nodes.get(
name=app_resource_group,
subscription_id=app_service['subscriptionId']
).elements.connect(service)
# Connect to server farm (AppServicePlan)
app_service_plan_id = app_service['properties']['serverFarmId']
Service.nodes.get(
uid=app_service_plan_id.lower()).elements.connect(service)
# Map subscription
Owner.nodes.get(
uid=app_service['subscriptionId']).elements.connect(service)
# Storage Account
storage_accounts = data['storage_accounts']
for storage_account in storage_accounts:
storage = Storage(
uid=storage_account['id'],
name=storage_account['name'],
properties=storage_account['properties'],
tags=storage_account['tags'])
storage.save()
# Map properties
obj_properties = storage.object_properties
unwanted_props = [
'properties', 'resourceGroup', 'tags', 'id', 'name']
self.add_properties(
obj_properties,
storage_account,
unwanted_properties=unwanted_props)
# Map tags
obj_tags = storage.object_tags
self.add_tags(obj_tags, storage_account['tags'])
# Connect public ip with resource groups
storage_resource_group = storage_account['resourceGroup']
ResourceGroup.nodes.get(
name=storage_resource_group,
subscription_id=storage_account['subscriptionId']
).elements.connect(
storage)
# Map subscription
Owner.nodes.get(
uid=storage_account['subscriptionId']).elements.connect(
storage)
# Load balancers
load_balancers = data['load_balancers']
for lb in load_balancers:
lbalancer = LoadBalancer(
uid=lb['id'], name=lb['name'],
properties=lb['properties'],
tags=lb['tags'],
backend_pool_id=lb['properties'][
'backendAddressPools'][0]['id'])
lbalancer.save()
# Map properties
obj_properties = lbalancer.object_properties
unwanted_props = [
'properties', 'resourceGroup', 'tags', 'id', 'name']
self.add_properties(
obj_properties, lb, unwanted_properties=unwanted_props)
# Map tags
obj_tags = lbalancer.object_tags
self.add_tags(obj_tags, lb['tags'])
# Connect load balancer with resource groups
lb_resource_group = lb['resourceGroup']
ResourceGroup.nodes.get(
name=lb_resource_group,
subscription_id=lb['subscriptionId']).elements.connect(
lbalancer)
# Map public Ip address
lb_public_id = lb['properties']['frontendIPConfigurations'][0][
'properties']['publicIPAddress']['id']
lbalancer.public_ip.connect(PublicIp.nodes.get(uid=lb_public_id))
# Map subscription
Owner.nodes.get(
uid=lb['subscriptionId']).elements.connect(lbalancer)
# Virtual Networks
virtual_networks = data['virtual_networks']
for vn in virtual_networks:
virtual_network = VirtualNetwork(
uid=vn['id'], name=vn['name'],
properties=vn['properties'],
tags=vn['tags'])
virtual_network.save()
# Map properties
obj_properties = virtual_network.object_properties
unwanted_props = [
'properties', 'resourceGroup', 'tags', 'id', 'name']
self.add_properties(
obj_properties, vn, unwanted_properties=unwanted_props)
# Map tags
obj_tags = virtual_network.object_tags
self.add_tags(obj_tags, vn['tags'])
# Connect ni with resource groups
vn_resource_group = vn['resourceGroup']
ResourceGroup.nodes.get(
name=vn_resource_group,
subscription_id=vn['subscriptionId']).elements.connect(
virtual_network)
# Subnets
vn_subnets = vn['properties']['subnets']
# TODO: Divide subnets from gateway subnets
for sn in vn_subnets:
subnet = Subnet(
uid=sn['id'], name=sn['name'], properties=sn['properties'])
subnet.save()
virtual_network.subnets.connect(subnet)
# Map subscription
Owner.nodes.get(
uid=vn['subscriptionId']).elements.connect(virtual_network)
# Network Interfaces
network_interfaces = data['network_interfaces']
for ni in network_interfaces:
network_interface = NetworkInterface(
uid=ni['id'], name=ni['name'], properties=ni['properties'],
tags=ni['tags'])
network_interface.save()
# Map properties
obj_properties = network_interface.object_properties
unwanted_props = [
'properties', 'resourceGroup', 'tags', 'id']
self.add_properties(
obj_properties, ni, unwanted_properties=unwanted_props)
# Map tags
obj_tags = network_interface.object_tags
self.add_tags(obj_tags, ni['tags'])
# Connect ni with resource groups
ni_resource_group = ni['resourceGroup']
ResourceGroup.nodes.get(
name=ni_resource_group,
subscription_id=ni['subscriptionId']).elements.connect(
network_interface)
# Map subscription
Owner.nodes.get(
uid=ni['subscriptionId']).elements.connect(network_interface)
ip_configs = ni['properties']['ipConfigurations']
for ipc in ip_configs:
# Subnet assingment
ni_subnet = ipc['properties']['subnet']['id']
network_interface.subnet.connect(Subnet.nodes.get(
uid=ni_subnet))
# Private Ip address
ni_subnet = ipc['properties']['subnet']['id']
private_ip = PrivateIp(
name=ipc['properties']['privateIPAddress'])
private_ip.save()
network_interface.private_ip.connect(private_ip)
# Connect with public ip address
if 'publicIPAddress' in ipc['properties']:
ni_subnet = ipc['properties']['publicIPAddress']['id']
network_interface.public_ip.connect(PublicIp.nodes.get(
uid=ni_subnet))
# Connect with load balancer
if 'loadBalancerBackendAddressPools' in ipc['properties']:
backend_pool_id = ipc['properties'][
'loadBalancerBackendAddressPools'][0]['id']
try:
LoadBalancer.nodes.get(
backend_pool_id=backend_pool_id
).network_interfaces.connect(network_interface)
except DoesNotExist as e:
logging.info(
"Error connecting Load balancer "
"to netwoerk interface")
logging.info(e)
# Network Security Group
ns_groups = data['network_security_groups']
for nsg in ns_groups:
ns_group = NetworkSecurityGroup(
uid=nsg['id'], name=nsg['name'],
properties=nsg['properties'],
tags=nsg['tags'])
ns_group.save()
# Map properties
obj_properties = ns_group.object_properties
unwanted_props = [
'properties', 'resourceGroup', 'tags', 'managedBy', 'id',
'name']
self.add_properties(
obj_properties, nsg, unwanted_properties=unwanted_props)
# Map tags
obj_tags = ns_group.object_tags
self.add_tags(obj_tags, nsg['tags'])
# Connect network security group with resource groups
d_resource_group = nsg['resourceGroup']
ResourceGroup.nodes.get(
name=d_resource_group,
subscription_id=nsg['subscriptionId']).elements.connect(
ns_group)
# Map subscription
Owner.nodes.get(
uid=nsg['subscriptionId']).elements.connect(ns_group)
# Connect NSG with interfaces
if 'networkInterfaces' in nsg['properties']:
for ni in nsg['properties']['networkInterfaces']:
ni_id = ni['id']
ns_group.network_interfaces.connect(
NetworkInterface.nodes.get(uid=ni_id))
# Virtual Machines
virtual_machines = data['virtual_machines']
for vm in virtual_machines:
if self.is_db_virtual_machine(vm):
virtual_machine = Database(
uid=vm['id'],
name=vm['name'],
properties=vm['properties'],
tags=vm['tags'])
virtual_machine.save()
else:
virtual_machine = VirtualMachine(
uid=vm['id'],
name=vm['name'],
properties=vm['properties'],
tags=vm['tags'])
virtual_machine.save()
# Map properties
obj_properties = virtual_machine.object_properties
unwanted_props = [
'properties', 'resourceGroup', 'tags', 'id', 'name']
self.add_properties(
obj_properties, vm, unwanted_properties=unwanted_props)
# Map tags
obj_tags = virtual_machine.object_tags
self.add_tags(obj_tags, vm['tags'])
# Connect virtual machines with resource groups
vm_resource_group = vm['resourceGroup']
ResourceGroup.nodes.get(
name=vm_resource_group,
subscription_id=vm['subscriptionId']).elements.connect(
virtual_machine)
# Connect vm with net_interfaces
nis = vm['properties']['networkProfile']['networkInterfaces']
for ni in nis:
net_interface_id = ni['id']
virtual_machine.network_interfaces.connect(
NetworkInterface.nodes.get(uid=net_interface_id))
# Map subscription
Owner.nodes.get(
uid=vm['subscriptionId']).elements.connect(virtual_machine)
# Map databases
databases = data['databases']
for db in databases:
database = Database(
uid=db['id'],
name=db['name'],
properties=db['properties'],
tags=db['tags'])
database.save()
# Map properties
obj_properties = database.object_properties
unwanted_props = [
'properties', 'resourceGroup', 'tags', 'id', 'name']
self.add_properties(
obj_properties, db, unwanted_properties=unwanted_props)
# Map tags
obj_tags = database.object_tags
self.add_tags(obj_tags, db['tags'])
# Connect virtual machines with resource groups
db_resource_group = db['resourceGroup']
ResourceGroup.nodes.get(
name=db_resource_group,
subscription_id=db['subscriptionId']).elements.connect(
database)
# Map subscription
Owner.nodes.get(
uid=db['subscriptionId']).elements.connect(database)
# Map to IIS data
applications = data['applications']
for vm_app_data in applications:
for app_data in vm_app_data['applications']:
application = DeployedApplication(
uid=app_data['id'],
name=app_data['name'],
properties=app_data)
application.save()
# Map properties
unwanted_properties = ['name', 'id']
self.add_properties(
application.object_properties,
app_data,
unwanted_properties=unwanted_properties)
# Map deployed app to virtual_machine
VirtualMachine.nodes.get(
uid=vm_app_data['virtual_machine_id']
).deployed_applications.connect(application)
# Disks
disks = data['disks']
for d in disks:
disk = Disk(
uid=d['id'], name=d['name'], properties=d['properties'],
tags=d['tags'])
disk.save()
# Map properties
obj_properties = disk.object_properties
unwanted_props = [
'properties', 'resourceGroup', 'tags', 'managedBy', 'id']
self.add_properties(
obj_properties, d, unwanted_properties=unwanted_props)
# Map tags
obj_tags = disk.object_tags
self.add_tags(obj_tags, d['tags'])
# Connect disk with resource groups
d_resource_group = d['resourceGroup']
ResourceGroup.nodes.get(
name=d_resource_group,
subscription_id=d['subscriptionId']).elements.connect(
disk)
try:
# Connect disk with vm
d_virtual_machine = d['managedBy']
VirtualMachine.nodes.get(
uid=d_virtual_machine).disks.connect(disk)
except (DoesNotExist, KeyError) as e:
logging.error(
"Error while connecting disk with virtual machine")
logging.error(e)
# Map subscription
Owner.nodes.get(uid=d['subscriptionId']).elements.connect(disk)
# TODO
# Network Peerings
# Using GatewaySubnets
def run_mapper(reset=True, export_path=None):
"""Run mapper script to add populate database."""
az_mapper = AzureGraphMapper()
az_mapper.map_data(reset=reset)
if export_path is not None:
az_mapper.export_data(export_path=export_path)
| 2 | 2 |
sandbox/bing_news_api.py | lofmat/history_now | 0 | 12768606 | <reponame>lofmat/history_now
import json
import requests
url = "https://bing-news-search1.p.rapidapi.com/news"
querystring = {"safeSearch": "Off", "textFormat": "Raw"}
headers = {
'x-bingapis-sdk': "true",
'x-rapidapi-key': "<KEY>",
'x-rapidapi-host': "bing-news-search1.p.rapidapi.com"
}
from bs4 import BeautifulSoup as BSHTML
from html2markdown import convert
# TODO catch exceptions
response = requests.request("GET", url, headers=headers, params=querystring)
news_json = json.loads(response.text)
if news_json.get('value'):
for news_item in news_json['value']:
# {
# '_type': 'NewsArticle',
# 'name': '<NAME>: Letzte Gelegenheit für die Hauptstadtpresse',
# 'url': 'https://www.msn.com/de-de/nachrichten/politik/angela-merkel-letzte-gelegenheit-f%C3%BCr-die-hauptstadtpresse/ar-AAMq8Og',
# 'image': {'_type': 'ImageObject', 'thumbnail': {'_type': 'ImageObject', 'contentUrl': 'https://www.bing.com/th?id=OVFT.lERKVrGa9FkH_xRNA9sRqC&pid=News', 'width': 600, 'height': 315},
# 'isLicensed': True},
# 'description': 'Die Sommer-Pressekonferenzen der Kanzlerin sind Kult. Sie leben von ihrem spröden Humor und spiegeln die Stimmung im Land. An diesem Mittwoch dürfen Merkel zum letzten Mal Fragen gestellt werden',
# 'provider': [{'_type': 'Organization', 'name': 'SZ.de', 'image': {'_type': 'ImageObject', 'thumbnail': {'_type': 'ImageObject', 'contentUrl': 'https://www.bing.com/th?id=ODF.ljVx36peutQJEGpk199slg&pid=news'}}}],
# 'datePublished': '2021-07-22T07:52:00.0000000Z'}
print('-------------------------------------')
print(news_item)
# x = ''
# print('---------------------------------------------------------------------------------')
# print('---------------------------------------------------------------------------------')
# print('---------------------------------------------------------------------------------')
# # TODO catch exceptions
# r = requests.get(news_item['url'])
# print(r.text)
# print('---------------------------------------------------------------------------------')
# print('<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<')
# s = BSHTML(r.text, 'html.parser')
# p_list = s.find_all('p')
# for i in p_list:
# x += i.text
# print(convert(x))
| 2.828125 | 3 |
src/utils/templates/threadwithstop.py | KeithAzzopardi1998/BFMC_Startup | 1 | 12768607 | # Copyright (c) 2019, Bosch Engineering Center Cluj and BFMC organizers
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE
from threading import Thread
from functools import partial
from inspect import signature
class ThreadWithStop(Thread):
def __init__(self,*args,**kwargs):
"""An extended version of the thread superclass, it contains a new attribute (_running) and a new method (stop).
The '_running' flag can be used to control the state of the 'run' method and the 'stop' method can stop the running by changing its value.
Raises
------
ValueError
the 'target' parameter of the constructor have to be a unbounded function
Examples
--------
Creating a new subclass of 'ThreadWithStop' superclass:
class AThread(ThreadWithStop):
def run(self):
while sel._running:
...
th1 = AThread()
th1.start()
...
th1.stop()
th1.join()
An example with local function and witouth subclass definition:
def examplesFunc(self,param):
while self._running
...
th1 = ThreadWithStop(target = examplesFunc, args = (param,))
th1.start()
...
th1.stop()
th1.join()
"""
#Check the target parameter definition. If it isn't a bounded method, then we have to give like the first parameter the new object. Thus the run method can access the object's field, (like self._running).
if 'target' in kwargs:
if not hasattr(kwargs['target'], '__self__'):
kwargs['target'] = partial(kwargs['target'],self)
else:
raise ValueError("target parameter must be a unbounded function")
super(ThreadWithStop,self).__init__(*args,**kwargs)
self._running = True
def stop(self):
"""This method has role to stop the thread by setting the '_running' flag to false value.
"""
self._running = False
| 1.703125 | 2 |
LeetCode/0074. Search a 2D Matrix/solution.py | InnoFang/oh-my-algorithms | 19 | 12768608 | <filename>LeetCode/0074. Search a 2D Matrix/solution.py
"""
133 / 133 test cases passed.
Runtime: 56 ms
Memory Usage: 15.1 MB
"""
class Solution:
def searchMatrix(self, matrix: List[List[int]], target: int) -> bool:
m, n = len(matrix), len(matrix[0])
l, r = 0, m * n - 1
while l < r:
mid = (l + r + 1) >> 1
if matrix[mid // n][mid % n] <= target:
l = mid
else:
r = mid - 1
return matrix[r // n][r % n] == target
| 3.625 | 4 |
scripts/validate_events.py | aureus5/mimiciII | 2 | 12768609 | <gh_stars>1-10
import os
import argparse
def is_subject_folder(x):
for c in x:
if (c < '0' or c > '9'):
return False
return True
def main():
parser = argparse.ArgumentParser()
parser.add_argument('subjects_root_path', type=str,
help='Directory containing subject sub-directories.')
args = parser.parse_args()
print args
subfolders = os.listdir(args.subjects_root_path)
subjects = filter(is_subject_folder, subfolders)
# get mapping for subject
maps = {}
for (index, subject) in enumerate(subjects):
with open(os.path.join(args.subjects_root_path, subject, "stays.csv")) as f:
rows = f.readlines()[1:]
for row in rows:
hadm_id = row.split(',')[1]
icustay_id = row.split(',')[2]
assert hadm_id != ""
assert icustay_id != ""
if (not subject in maps):
maps[subject] = dict()
maps[subject][hadm_id] = icustay_id
bad_pairs = set()
n_events = 0
emptyhadm = 0
noicustay = 0
recovered = 0
couldnotrecover = 0
icustaymissinginstays = 0
nohadminstay = 0
for (index, subject) in enumerate(subjects):
new_lines = []
with open(os.path.join(args.subjects_root_path, subject, "events.csv")) as f:
lines = f.readlines()
header = lines[0]
lines = lines[1:]
new_lines.append(header)
for line in lines:
mas = line.split(',')
hadm_id = mas[1]
icustay_id = mas[2]
n_events += 1
if (hadm_id == ""):
emptyhadm += 1
continue
try:
icustay_id_from_maps = maps[subject][hadm_id]
if (icustay_id == ""):
noicustay += 1
try:
icustay_id = maps[subject][hadm_id]
recovered += 1
new_line = ','.join(mas[:2] + [icustay_id] + mas[3:])
new_lines.append(new_line)
except:
couldnotrecover += 1
else:
if icustay_id != icustay_id_from_maps:
icustaymissinginstays += 1
else:
new_line = ','.join(mas[:2] + [icustay_id] + mas[3:])
new_lines.append(new_line)
except:
nohadminstay += 1
bad_pairs.add((subject, hadm_id))
with open(os.path.join(args.subjects_root_path, subject, "events.csv"), "w") as f:
for new_line in new_lines:
f.write(new_line)
if (index % 100 == 0):
print "processed %d / %d" % (index+1, len(subjects)), " \r",
print ""
#print bad_pairs
print('n_events', n_events,
'emptyhadm', emptyhadm,
'noicustay', noicustay,
'recovered', recovered ,
'couldnotrecover', couldnotrecover ,
'icustaymissinginstays', icustaymissinginstays ,
'nohadminstay', nohadminstay )
if __name__=="__main__":
main()
| 2.703125 | 3 |
genedisco/evaluation/hitratio.py | genedisco/genedisco | 11 | 12768610 | <gh_stars>10-100
"""
Copyright (C) 2022 <NAME>, GlaxoSmithKline plc
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions
of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
import os
import pickle
import numpy as np
from typing import Optional, AnyStr
from slingpy.evaluation.metrics.abstract_metric import AbstractMetric
class HitRatio(AbstractMetric):
"""
A metric to measure the ratio of the top mover genes selected by the acquisition function.
"""
def get_abbreviation(self) -> AnyStr:
return "HR"
@staticmethod
def evaluate(top_movers_filepath:AnyStr, super_dir_to_cycle_dirs: AnyStr) -> np.ndarray:
with open(top_movers_filepath, "rb") as f:
top_mover_indices = pickle.load(f)
top_mover_set = set(top_mover_indices)
num_top_hits = len(top_mover_indices)
num_AL_cycles = get_num_AL_cycles(super_dir_to_cycle_dirs)
selected_indices_per_cycle = get_cumulative_selected_indices(
super_dir_to_cycle_dirs)
cumulative_top_hit_ratio = []
for c in range(num_AL_cycles):
selected_indices = selected_indices_per_cycle[c]
num_of_hits = num_top_hits - len(top_mover_set - set(selected_indices))
cumulative_top_hit_ratio.append(num_of_hits/num_top_hits)
return cumulative_top_hit_ratio[-1] # returns the top hit ratio of the current cycle
def get_cumulative_selected_indices(super_dir_to_cycle_dirs: AnyStr):
""" Get a list of selected indiced at cycles of active learning.
Args:
super_dir_to_cycle_dirs: The dir in which the cycle dirs are saved.
seed: The seed of the experiment.
Return a concatenated list of the saved selected indices so far.
"""
num_AL_cycles = get_num_AL_cycles(super_dir_to_cycle_dirs)
selected_indices_per_cycles = []
for c in range(num_AL_cycles):
filename = os.path.join(super_dir_to_cycle_dirs, "cycle_" + str(c), "selected_indices.pickle")
with open(filename, "rb") as f:
selected_indices = pickle.load(f)
# selected_indices = [x.decode("utf-8") for x in selected_indices] # Uncomment this line if the stored Gene names are byte strings.
selected_indices_per_cycles.append(selected_indices)
return selected_indices_per_cycles
def get_num_AL_cycles(super_dir_to_cycle_dirs: AnyStr):
"""Get the number of cycles stored in the provided dir.
"""
all_subdirs = list(os.walk(super_dir_to_cycle_dirs))[0][1]
cycle_subdirs = [folder_name for folder_name in all_subdirs if folder_name.startswith("cycle")]
num_AL_cycles = len(cycle_subdirs)
return num_AL_cycles | 1.742188 | 2 |
Lib/test/test_compiler/test_static/final.py | isabella232/cinder-1 | 1,886 | 12768611 | <gh_stars>1000+
from compiler.errors import TypedSyntaxError
from typing import ClassVar
from .common import StaticTestBase
class FinalTests(StaticTestBase):
def test_final_multiple_typeargs(self):
codestr = """
from typing import Final
from something import hello
x: Final[int, str] = hello()
"""
with self.assertRaisesRegex(
TypedSyntaxError,
r"incorrect number of generic arguments for Final\[T\], expected 1, got 2",
):
self.compile(codestr, modname="foo")
def test_final_annotation_nesting(self):
with self.assertRaisesRegex(
TypedSyntaxError, "Final annotation is only valid in initial declaration"
):
self.compile(
"""
from typing import Final, List
x: List[Final[str]] = []
""",
modname="foo",
)
with self.assertRaisesRegex(
TypedSyntaxError, "Final annotation is only valid in initial declaration"
):
self.compile(
"""
from typing import Final, List
x: List[int | Final] = []
""",
modname="foo",
)
def test_final(self):
codestr = """
from typing import Final
x: Final[int] = 0xdeadbeef
"""
self.compile(codestr, modname="foo")
def test_final_generic(self):
codestr = """
from typing import Final
x: Final[int] = 0xdeadbeef
"""
self.compile(codestr, modname="foo")
def test_final_generic_types(self):
codestr = """
from typing import Final
def g(i: int) -> int:
return i
def f() -> int:
x: Final[int] = 0xdeadbeef
return g(x)
"""
self.compile(codestr, modname="foo")
def test_final_uninitialized(self):
codestr = """
from typing import Final
x: Final[int]
"""
with self.assertRaisesRegex(
TypedSyntaxError, "Must assign a value when declaring a Final"
):
self.compile(codestr, modname="foo")
def test_final_reassign(self):
codestr = """
from typing import Any, Final
x: Final[Any] = 0xdeadbeef
x = "something"
"""
with self.assertRaisesRegex(
TypedSyntaxError, "Cannot assign to a Final variable"
):
self.compile(codestr, modname="foo")
def test_final_reassign_explicit_global(self):
codestr = """
from typing import Final
a: Final[int] = 1337
def fn():
def fn2():
global a
a = 0
"""
with self.assertRaisesRegex(
TypedSyntaxError, "Cannot assign to a Final variable"
):
self.compile(codestr, modname="foo")
def test_final_reassign_explicit_global_shadowed(self):
codestr = """
from typing import Final
a: Final[int] = 1337
def fn():
a = 2
def fn2():
global a
a = 0
"""
with self.assertRaisesRegex(
TypedSyntaxError, "Cannot assign to a Final variable"
):
self.compile(codestr, modname="foo")
def test_final_reassign_nonlocal(self):
codestr = """
from typing import Final
a: Final[int] = 1337
def fn():
def fn2():
nonlocal a
a = 0
"""
with self.assertRaisesRegex(
TypedSyntaxError, "Cannot assign to a Final variable"
):
self.compile(codestr, modname="foo")
def test_final_reassign_nonlocal_shadowed(self):
codestr = """
from typing import Final
a: Final[int] = 1337
def fn():
a = 3
def fn2():
nonlocal a
# should be allowed, we're assigning to the shadowed
# value
a = 0
"""
self.compile(codestr, modname="foo")
def test_final_reassigned_in_tuple(self):
codestr = """
from typing import Final
x: Final[int] = 0xdeadbeef
y = 3
x, y = 4, 5
"""
with self.assertRaisesRegex(
TypedSyntaxError, "Cannot assign to a Final variable"
):
self.compile(codestr, modname="foo")
def test_final_reassigned_in_loop(self):
codestr = """
from typing import Final
x: Final[int] = 0xdeadbeef
for x in [1, 3, 5]:
pass
"""
with self.assertRaisesRegex(
TypedSyntaxError, "Cannot assign to a Final variable"
):
self.compile(codestr, modname="foo")
def test_final_reassigned_in_except(self):
codestr = """
from typing import Final
def f():
e: Final[int] = 3
try:
x = 1 + "2"
except Exception as e:
pass
"""
with self.assertRaisesRegex(
TypedSyntaxError, "Cannot assign to a Final variable"
):
self.compile(codestr, modname="foo")
def test_final_reassigned_in_loop_target_tuple(self):
codestr = """
from typing import Final
x: Final[int] = 0xdeadbeef
for x, y in [(1, 2)]:
pass
"""
with self.assertRaisesRegex(
TypedSyntaxError, "Cannot assign to a Final variable"
):
self.compile(codestr, modname="foo")
def test_final_reassigned_in_ctxmgr(self):
codestr = """
from typing import Final
x: Final[int] = 0xdeadbeef
with open("lol") as x:
pass
"""
with self.assertRaisesRegex(
TypedSyntaxError, "Cannot assign to a Final variable"
):
self.compile(codestr, modname="foo")
def test_final_generic_reassign(self):
codestr = """
from typing import Final
x: Final[int] = 0xdeadbeef
x = 0x5ca1ab1e
"""
with self.assertRaisesRegex(
TypedSyntaxError, "Cannot assign to a Final variable"
):
self.compile(codestr, modname="foo")
def test_final_callable_protocol_retains_inferred_type(self):
codestr = """
from typing import Final, Protocol
def foo(x: int) -> str:
return "A"
class CallableProtocol(Protocol):
def __call__(self, x: int) -> str:
pass
f: Final[CallableProtocol] = foo
def bar(x: int) -> str:
return f(x)
"""
with self.in_module(codestr) as mod:
f = mod.bar
self.assertInBytecode(f, "INVOKE_FUNCTION")
def test_final_in_args(self):
codestr = """
from typing import Final
def f(a: Final) -> None:
pass
"""
with self.assertRaisesRegex(
TypedSyntaxError,
"Final annotation is only valid in initial declaration",
):
self.compile(codestr, modname="foo")
def test_final_returns(self):
codestr = """
from typing import Final
def f() -> Final[int]:
return 1
"""
with self.assertRaisesRegex(
TypedSyntaxError,
"Final annotation is only valid in initial declaration",
):
self.compile(codestr, modname="foo")
def test_final_decorator(self):
codestr = """
from typing import final
class C:
@final
def f():
pass
"""
self.compile(codestr, modname="foo")
def test_final_decorator_override(self):
codestr = """
from typing import final
class C:
@final
def f():
pass
class D(C):
def f():
pass
"""
with self.assertRaisesRegex(
TypedSyntaxError, "Cannot assign to a Final attribute of foo.D:f"
):
self.compile(codestr, modname="foo")
def test_final_decorator_override_with_assignment(self):
codestr = """
from typing import final
class C:
@final
def f():
pass
class D(C):
f = print
"""
with self.assertRaisesRegex(
TypedSyntaxError, "Cannot assign to a Final attribute of foo.D:f"
):
self.compile(codestr, modname="foo")
def test_final_decorator_override_transitivity(self):
codestr = """
from typing import final
class C:
@final
def f():
pass
class D(C):
pass
class E(D):
def f():
pass
"""
with self.assertRaisesRegex(
TypedSyntaxError, "Cannot assign to a Final attribute of foo.E:f"
):
self.compile(codestr, modname="foo")
def test_final_decorator_class(self):
codestr = """
from typing import final
@final
class C:
def f(self):
pass
def f():
return C().f()
"""
c = self.compile(codestr, modname="foo")
f = self.find_code(c, "f")
self.assertInBytecode(f, "INVOKE_FUNCTION")
def test_final_decorator_class_inheritance(self):
codestr = """
from typing import final
@final
class C:
pass
class D(C):
pass
"""
with self.assertRaisesRegex(
TypedSyntaxError, "Class `foo.D` cannot subclass a Final class: `foo.C`"
):
self.compile(codestr, modname="foo")
def test_final_decorator_class_nonstatic_subclass(self):
codestr = """
from typing import final
@final
class C:
pass
"""
with self.in_module(codestr) as mod:
with self.assertRaisesRegex(
TypeError, "type 'C' is not an acceptable base type"
):
class D(mod.C):
pass
def test_final_decorator_class_dynamic(self):
"""We should never mark DYNAMIC_TYPE as final."""
codestr = """
from typing import final, Generic, NamedTuple
@final
class NT(NamedTuple):
x: int
class C(Generic):
pass
"""
# No TypedSyntaxError "cannot inherit from Final class 'dynamic'"
self.compile(codestr)
def test_final_constant_folding_int(self):
codestr = """
from typing import Final
X: Final[int] = 1337
def plus_1337(i: int) -> int:
return i + X
"""
with self.in_module(codestr) as mod:
plus_1337 = mod.plus_1337
self.assertInBytecode(plus_1337, "LOAD_CONST", 1337)
self.assertNotInBytecode(plus_1337, "LOAD_GLOBAL")
self.assertEqual(plus_1337(3), 1340)
def test_final_constant_folding_bool(self):
codestr = """
from typing import Final
X: Final[bool] = True
def f() -> bool:
return not X
"""
with self.in_module(codestr) as mod:
f = mod.f
self.assertInBytecode(f, "LOAD_CONST", True)
self.assertNotInBytecode(f, "LOAD_GLOBAL")
self.assertFalse(f())
def test_final_constant_folding_str(self):
codestr = """
from typing import Final
X: Final[str] = "omg"
def f() -> str:
return X[1]
"""
with self.in_module(codestr) as mod:
f = mod.f
self.assertInBytecode(f, "LOAD_CONST", "omg")
self.assertNotInBytecode(f, "LOAD_GLOBAL")
self.assertEqual(f(), "m")
def test_final_constant_folding_disabled_on_nonfinals(self):
codestr = """
from typing import Final
X: str = "omg"
def f() -> str:
return X[1]
"""
with self.in_module(codestr) as mod:
f = mod.f
self.assertNotInBytecode(f, "LOAD_CONST", "omg")
self.assertInBytecode(f, "LOAD_GLOBAL", "X")
self.assertEqual(f(), "m")
def test_final_constant_folding_disabled_on_nonconstant_finals(self):
codestr = """
from typing import Final
def p() -> str:
return "omg"
X: Final[str] = p()
def f() -> str:
return X[1]
"""
with self.in_module(codestr) as mod:
f = mod.f
self.assertNotInBytecode(f, "LOAD_CONST", "omg")
self.assertInBytecode(f, "LOAD_GLOBAL", "X")
self.assertEqual(f(), "m")
def test_final_constant_folding_shadowing(self):
codestr = """
from typing import Final
X: Final[str] = "omg"
def f() -> str:
X = "lol"
return X[1]
"""
with self.in_module(codestr) as mod:
f = mod.f
self.assertInBytecode(f, "LOAD_CONST", "lol")
self.assertNotInBytecode(f, "LOAD_GLOBAL", "omg")
self.assertEqual(f(), "o")
def test_final_constant_folding_in_module_scope(self):
codestr = """
from typing import Final
X: Final[int] = 21
y = X + 3
"""
c = self.compile(codestr, modname="foo.py")
self.assertNotInBytecode(c, "LOAD_NAME", "X")
with self.in_module(codestr) as mod:
self.assertEqual(mod.y, 24)
def test_final_constant_in_module_scope(self):
codestr = """
from typing import Final
X: Final[int] = 21
"""
with self.in_module(codestr) as mod:
self.assertEqual(mod.__final_constants__, ("X",))
def test_final_nonconstant_in_module_scope(self):
codestr = """
from typing import Final
def p() -> str:
return "omg"
X: Final[str] = p()
"""
with self.in_module(codestr) as mod:
self.assertEqual(mod.__final_constants__, ())
def test_final_method_in_class_slots(self):
codestr = """
from typing import final
class C:
@final
def foo(self):
return self
def bar(self):
return self
"""
with self.in_module(codestr) as mod:
self.assertEqual(mod.C.__final_method_names__, ("foo",))
def test_final_method_in_class_slots_with_inheritance(self):
codestr = """
from typing import final
class C:
@final
def foo(self):
return self
def bar(self):
return self
class D(C):
@final
def bar(self):
return self
def baz(self):
return self
class E(D):
@final
def baz(self):
return self
class F(D):
def baz(self):
return self
"""
with self.in_module(codestr) as mod:
self.assertEqual(mod.C.__final_method_names__, ("foo",))
self.assertEqual(mod.D.__final_method_names__, ("bar", "foo"))
self.assertEqual(mod.E.__final_method_names__, ("bar", "baz", "foo"))
self.assertEqual(mod.F.__final_method_names__, ("bar", "foo"))
def test_final_method_in_class_nonstatic_subclass_slots(self):
codestr = """
from typing import final
class C:
@final
def foo(self):
return self
def bar(self):
return self
"""
with self.in_module(codestr) as mod:
class D(mod.C):
pass
self.assertEqual(D.__final_method_names__, ("foo",))
def test_final_method_nonstatic_override_throws_runtime_type_error(self):
codestr = """
from typing import final
class C:
@final
def foo(self):
return self
def bar(self):
return self
"""
with self.in_module(codestr) as mod:
with self.assertRaisesRegex(
TypeError, r"'foo' overrides a final method in the static base class"
):
class D(mod.C):
def foo(self):
return self
def test_final_method_nonstatic_override_of_static_subclass_throws_runtime_type_error(
self,
):
codestr = """
from typing import final
class C:
@final
def foo(self):
return self
def bar(self):
return self
class D(C):
pass
"""
with self.in_module(codestr) as mod:
with self.assertRaisesRegex(
TypeError, r"'foo' overrides a final method in the static base class"
):
class E(mod.D):
def foo(self):
return self
def test_final_method_nonstatic_subclass_of_static_class_throws_runtime_type_error(
self,
):
codestr = """
from typing import final
class C:
@final
def foo(self):
return self
def bar(self):
return self
"""
with self.in_module(codestr) as mod:
with self.assertRaisesRegex(
TypeError, r"'foo' overrides a final method in the static base class"
):
class D(mod.C):
pass
class E(D):
def foo(self):
return self
def test_final_method_with_other_decorator_throws_type_error(
self,
):
codestr = """
from typing import final
class C:
@final
@staticmethod
def foo():
return self
@staticmethod
@final
def bar():
return self
"""
with self.in_module(codestr) as mod:
with self.assertRaisesRegex(
TypeError, r"'foo' overrides a final method in the static base class"
):
class D(mod.C):
@staticmethod
def foo():
return self
with self.assertRaisesRegex(
TypeError, r"'bar' overrides a final method in the static base class"
):
class D(mod.C):
@staticmethod
def bar():
return self
def test_updating_slot_of_final_method_in_subclass_throws_type_error(
self,
):
codestr = """
from typing import final
class C:
@final
def foo(self) -> int:
return 0
"""
with self.in_module(codestr) as mod:
with self.assertRaisesRegex(
TypeError, r"'foo' overrides a final method in the static base class"
):
class D(mod.C):
pass
D.foo = lambda self: 0
def test_updating_slot_of_final_method_in_base_class_succeeds(
self,
):
codestr = """
from typing import final
class C:
@final
def foo(self) -> int:
return 0
"""
with self.in_module(codestr) as mod:
class D(mod.C):
pass
mod.C.foo = lambda self: 1
self.assertEqual(mod.C().foo(), 1)
def test_final_method_in_non_final_class_emits_invoke_function(
self,
):
codestr = """
from typing import final
class C:
def __init__(self, x: int) -> None:
self.x = x
@final
def foo(self) -> int:
return self.x
def foo(c: C) -> int:
return c.foo()
"""
with self.in_module(codestr) as mod:
class D(mod.C):
def __init__(self):
super().__init__(5)
self.assertInBytecode(mod.foo, "INVOKE_FUNCTION")
self.assertEqual(mod.foo(mod.C(4)), 4)
self.assertEqual(mod.foo(D()), 5)
def test_final_method_in_subclass_of_non_final_class_emits_invoke_function(
self,
):
codestr = """
from typing import final
class C:
def __init__(self, x: int) -> None:
self.x = x
@final
def foo(self) -> int:
return self.x
class D(C):
def __init__(self) -> None:
self.x = 4
def foo(d: D) -> int:
return d.foo()
"""
with self.in_module(codestr) as mod:
self.assertInBytecode(
mod.foo, "INVOKE_FUNCTION", ((mod.__name__, "C", "foo"), 1)
)
self.assertEqual(mod.foo(mod.D()), 4)
def test_final_classmethod_in_non_final_nonstatic_class_emits_invoke_function(
self,
):
codestr = """
from typing import ClassVar, final
class C:
CV: ClassVar[int] = 42
@final
@classmethod
def foo(cls) -> int:
return cls.CV
def foo(c: C) -> int:
return c.foo()
"""
with self.in_module(codestr) as mod:
class D(mod.C):
CV: ClassVar[int] = 84
self.assertInBytecode(
mod.foo, "INVOKE_FUNCTION", ((mod.__name__, "C", "foo"), 1)
)
self.assertEqual(mod.foo(mod.C()), 42)
self.assertEqual(mod.foo(D()), 84)
def test_final_classmethod_in_non_final_static_class_emits_invoke_function(
self,
):
codestr = """
from typing import ClassVar, final
class C:
CV: ClassVar[int] = 42
@final
@classmethod
def foo(cls) -> int:
return cls.CV
class D(C):
CV: ClassVar[int] = 63
def foo(c: C) -> int:
return c.foo()
"""
with self.in_module(codestr) as mod:
self.assertInBytecode(
mod.foo, "INVOKE_FUNCTION", ((mod.__name__, "C", "foo"), 1)
)
self.assertEqual(mod.foo(mod.C()), 42)
self.assertEqual(mod.foo(mod.D()), 63)
| 2.640625 | 3 |
labs/6/py/integrate.py | Sky-Nik/numerical-analysis | 1 | 12768612 | #!/usr/bin/env python
import numpy as np
from typing import Callable
def rectangle(a: float, b: float, f: Callable[[np.array], np.array],
h: float) -> float:
return h * np.sum(f(np.arange(a + h / 2, b + h / 2, h)))
def trapezoid(a: float, b: float, f: Callable[[np.array], np.array],
h: float) -> float:
return h / 2 * (f(a) + 2 * np.sum(f(np.arange(a + h, b, h))) + f(b))
def simpson(a: float, b: float, f: Callable[[np.array], np.array],
h: float) -> float:
return h / 6 * (f(a) + 2 * np.sum(f(np.arange(a + h, b, h))) +
4 * np.sum(f(np.arange(a + h / 2, b + h / 2, h))) + f(b))
| 2.859375 | 3 |
game/views.py | ruippgoncalves/CS2048 | 0 | 12768613 | """Views"""
import json
from math import log2
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required
from django.views.decorators.csrf import csrf_exempt
from django.shortcuts import render
from django.db import IntegrityError
from django.http import JsonResponse, HttpResponseRedirect
from django.urls import reverse
from .models import User, Game
# Index --------------------------------------------------------
def index(request):
"""Index Page"""
if request.method != "GET":
return JsonResponse({"error": "GET request required"}, status=405)
# view
return render(request, "index.html")
# Game ---------------------------------------------------------
@login_required(login_url="game:loginUser")
def play(request):
"""Play Page"""
if request.method != "GET":
return JsonResponse({"error": "GET request required"}, status=405)
# view
return render(request, "game/play.html", {"nav": "play"})
@csrf_exempt
@login_required(login_url="game:loginUser")
def leaderboard(request):
"""LeaderBoard Page"""
if request.method != "POST" and request.method != "GET":
return JsonResponse({"error": "POST or GET request required"}, status=405)
if request.method == "POST":
content = json.loads(request.body)
value = content.get("value")
time = content.get("time")
value = round(log2(value) - 10)
# Check if caule is valid
if value > 6 or value < 1:
return JsonResponse({"error": "Invalid Value"}, status=400)
# Add to DB
game = Game()
game.user = request.user
game.max_value = value
game.time = time
game.save()
return JsonResponse({"saved": True}, status=201)
# view
return render(request, "game/leaderboard.html", {
"nav": "leaderboard",
"content": [content.serialize() for content in Game.objects.order_by("-max_value", "time")]
})
# Users --------------------------------------------------------
def create(request):
"""Create Account"""
if request.method != "POST" and request.method != "GET":
return JsonResponse({"error": "POST or GET request required"}, status=405)
if request.method == "POST":
username = request.POST["username"]
email = request.POST["email"]
# Ensure password matches confirmation
password = request.POST["password"]
confirmation = request.POST["confirmation"]
if password != confirmation:
return render(request, "users/create.html", {
"nav": "create",
"message": "Passwords must match"
})
# Attempt to create new user
try:
user = User.objects.create_user(username, email, password)
user.save()
except IntegrityError:
return render(request, "users/create.html", {
"nav": "create",
"message": "Username already taken"
})
# Login the user and redirect
login(request, user)
return HttpResponseRedirect(reverse("game:index"))
# view
return render(request, "users/create.html", {"nav": "create"})
def login_view(request):
"""Login View"""
if request.method != "POST" and request.method != "GET":
return JsonResponse({"error": "POST or GET request required"}, status=405)
if request.method == "POST":
# Attempt to sign user in
username = request.POST["username"]
password = request.POST["password"]
user = authenticate(request, username=username, password=password)
# Check if authentication successful
if user is not None:
login(request, user)
return HttpResponseRedirect(reverse("game:index"))
return render(request, "users/login.html", {
"nav": "login",
"message": "Invalid username and/or password."
})
# view
return render(request, "users/login.html", {"nav": "login"})
def logout_view(request):
"""LogOut"""
if request.method != "GET":
return JsonResponse({"error": "GET request required"}, status=405)
logout(request)
return HttpResponseRedirect(reverse("game:index"))
| 2.28125 | 2 |
extlibs/freetype-2.5.2/src/tools/docmaker/PaxHeaders.20920/docmaker.py | halak/bibim | 3 | 12768614 | 30 atime=1386526222.135502298
30 ctime=1374498496.154314241
| 0.898438 | 1 |
service.py | rjaus/XeroPhaxio | 0 | 12768615 | # -*- coding: utf-8 -*-
import os
from phaxio import PhaxioApi
from xero import Xero
from xero.auth import PrivateCredentials
# Setup the Xero Private App
# Ensure your private key is listed in your .gitignore, so you doesn't end up in your repo.
# Xero API Consumer Key is retrieved via an environmental variable
# Ensure you add the environment variable on your local machine AND in your AWS Lambda Settings (via config.yaml)
with open('./keys/lambdaprivatekey.pem') as keyfile:
rsa_key = keyfile.read()
credentials = PrivateCredentials(os.getenv("XERO_CONSUMER_KEY_PHAXIO"), rsa_key)
xero = Xero(credentials)
# Setup PhaxioAPI
# Phaxio API keys are set via environment variables
# Ensure you add the environment variables on your local machine AND in your AWS Lambda Settings (via config.yaml)
phaxio = PhaxioApi(os.getenv('PHAXIO_API_KEY'), os.getenv('PHAXIO_API_SECRET'))
fax_number = None
def handler(event, context):
# invoice_id is the only param sent to the lambda function
invoice_id = event.get('invoice_id')
# Use the invoice_id to retrieve the invoice details from the Xero API
invoice = xero.invoices.get(invoice_id)
# Included in the invoice response is the Contact and their Fax Number, lets parse it out.
for phone in invoice[0]['Contact']['Phones']:
if phone['PhoneType'] == "FAX":
# Check to see if the fax number is complete, if not, return an error
if not all((phone['PhoneCountryCode'], phone['PhoneAreaCode'], phone['PhoneNumber'])):
return { "statusCode": 400, "headers": { "Content-Type": "application/json", "Access-Control-Allow-Origin" : "*", "Access-Control-Allow-Credentials" : True }, "body": 'Contact does not have a Fax Number, add Fax Number to Contact in Xero and try again.'}
else:
fax_number = phone['PhoneCountryCode']+phone['PhoneAreaCode']+phone['PhoneNumber']
# Sanity check, did the contact have a fax number? If not, return with relevant error.
if fax_number == None:
return { "statusCode": 400, "headers": { "Content-Type": "application/json", "Access-Control-Allow-Origin" : "*", "Access-Control-Allow-Credentials" : True }, "body": 'Contact does not have a Fax Number, add Fax Number to Contact in Xero and try again.'}
# Now lets prepare to send a fax with Phaxio
# Frist, retrieve the PDF version of the invoice from the Xero API
invoice_pdf = xero.invoices.get(invoice_id, headers={'Accept': 'application/pdf'})
# Save the PDF retrieved to disk temporarily. This is probably unnecessary, but I'm a Python Noob.
pdf_file = open('/tmp/tmp_fax.pdf', 'wb')
pdf_file.write(invoice_pdf)
pdf_file.close()
# Send the PDF invoice as a fax with Phaxio
response = phaxio.Fax.send(fax_number, files='/tmp/tmp_fax.pdf', header_text='Xero Invoice Faxed with Phaxio', tags_dict={'invoice_id': invoice_id})
# Upload a confirmation of the Fax Receipt
# As the Fax is not sent immediately, this needs to be done via a second Lambda function, invoked via the callback url.
# Skipping this feature for now.
# Mark the invoice as sent
invoice[0]['SentToContact'] = True
xero.invoices.save(invoice[0])
# Return with a status code 200, adding the relevant CORS headers to handle cross domain scripting.
return {
"statusCode": 200,
"headers": {
"Content-Type": "application/json",
"Access-Control-Allow-Origin" : "*",
"Access-Control-Allow-Credentials" : True
},
"body": 'Fax Sent Successfully'
}
| 2.453125 | 2 |
custom/icds_reports/utils/aggregation_helpers/distributed/availing_service_forms.py | satyaakam/commcare-hq | 0 | 12768616 | from dateutil.relativedelta import relativedelta
from custom.icds_reports.const import AGG_AVAILING_SERVICES_TABLE
from custom.icds_reports.utils.aggregation_helpers import month_formatter
from custom.icds_reports.utils.aggregation_helpers.distributed.base import (
StateBasedAggregationDistributedHelper,
)
class AvailingServiceFormsAggregationDistributedHelper(StateBasedAggregationDistributedHelper):
helper_key = 'availing_service-forms'
ucr_data_source_id = 'static-availing_service_form'
aggregate_parent_table = AGG_AVAILING_SERVICES_TABLE
def data_from_ucr_query(self):
month = self.month.replace(day=1)
current_month_start = month_formatter(self.month)
next_month_start = month_formatter(self.month + relativedelta(months=1))
query_params = {
"month": month_formatter(month),
"state_id": self.state_id,
"current_month_start": current_month_start,
"next_month_start": next_month_start,
}
return """
SELECT DISTINCT
%(state_id)s AS state_id,
supervisor_id,
awc_id,
%(month)s::DATE AS month,
person_case_id AS person_case_id,
LAST_VALUE(is_registered) OVER w AS is_registered,
timeend AS registration_date
FROM "{ucr_tablename}"
WHERE state_id = %(state_id)s AND
timeend >= %(current_month_start)s AND timeend < %(next_month_start)s AND
person_case_id IS NOT NULL
WINDOW w AS (
PARTITION BY supervisor_id, person_case_id
ORDER BY timeend RANGE BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING
)
""".format(
ucr_tablename=self.ucr_tablename,
tablename=self.aggregate_parent_table,
), query_params
def aggregation_query(self):
month = self.month.replace(day=1)
ucr_query, ucr_query_params = self.data_from_ucr_query()
query_params = {
"month": month_formatter(month),
"state_id": self.state_id,
"previous_month": month_formatter(month - relativedelta(months=1)),
}
query_params.update(ucr_query_params)
return """
INSERT INTO "{tablename}" (
state_id, supervisor_id, awc_id, month, person_case_id, is_registered, registration_date
) (
SELECT
%(state_id)s AS state_id,
COALESCE(ucr.supervisor_id, prev_month.supervisor_id) AS supervisor_id,
COALESCE(ucr.awc_id, prev_month.awc_id) AS awc_id,
%(month)s::DATE AS month,
COALESCE(ucr.person_case_id, prev_month.person_case_id) AS person_case_id,
COALESCE(ucr.is_registered, prev_month.is_registered) as is_registered,
COALESCE(ucr.registration_date, prev_month.registration_date) as registration_date
FROM ({ucr_table_query}) ucr
FULL OUTER JOIN (
SELECT * FROM "{tablename}" WHERE month = %(previous_month)s AND state_id = %(state_id)s
) prev_month
ON ucr.person_case_id = prev_month.person_case_id AND ucr.supervisor_id = prev_month.supervisor_id
WHERE coalesce(ucr.month, %(month)s) = %(month)s
AND coalesce(prev_month.month, %(previous_month)s) = %(previous_month)s
AND coalesce(prev_month.state_id, %(state_id)s) = %(state_id)s
)
""".format(
ucr_tablename=self.ucr_tablename,
tablename=self.aggregate_parent_table,
ucr_table_query=ucr_query
), query_params
| 2.015625 | 2 |
desktop/core/src/desktop/api_public_urls.py | renovate-bot/hue | 1 | 12768617 | #!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from desktop import api_public
from desktop.lib.botserver import api as botserver_api
if sys.version_info[0] > 2:
from django.urls import re_path
else:
from django.conf.urls import url as re_path
# "New" query API (i.e. connector based, lean arguments).
# e.g. https://demo.gethue.com/api/query/execute/hive
urlpatterns = [
re_path(r'^query/create_notebook/?$', api_public.create_notebook, name='query_create_notebook'),
re_path(r'^query/autocomplete/?$', api_public.autocomplete, name='query_autocomplete_databases'),
]
# Compatibility with "old" private API.
# e.g. https://demo.gethue.com/notebook/api/execute/hive
urlpatterns += [
re_path(r'^get_config/?$', api_public.get_config),
re_path(r'^get_namespaces/(?P<interface>[\w\-]+)/?$', api_public.get_context_namespaces), # To remove
re_path(r'^editor/create_notebook/?$', api_public.create_notebook, name='editor_create_notebook'),
re_path(r'^editor/create_session/?$', api_public.create_session, name='editor_create_session'),
re_path(r'^editor/close_session/?$', api_public.close_session, name='editor_close_session'),
re_path(r'^editor/execute(?:/(?P<dialect>.+))?/?$', api_public.execute, name='editor_execute'),
re_path(r'^editor/check_status/?$', api_public.check_status, name='editor_check_status'),
re_path(r'^editor/fetch_result_data/?$', api_public.fetch_result_data, name='editor_fetch_result_data'),
re_path(r'^editor/fetch_result_metadata/?$', api_public.fetch_result_metadata, name='editor_fetch_result_metadata'),
re_path(r'^editor/fetch_result_size/?$', api_public.fetch_result_size, name='editor_fetch_result_size'),
re_path(r'^editor/cancel_statement/?$', api_public.cancel_statement, name='editor_cancel_statement'),
re_path(r'^editor/close_statement/?$', api_public.close_statement, name='editor_close_statement'),
re_path(r'^editor/get_logs/?$', api_public.get_logs, name='editor_get_logs'),
re_path(r'^editor/describe/(?P<database>[^/]*)/?$', api_public.describe, name='editor_describe_database'),
re_path(r'^editor/describe/(?P<database>[^/]*)/(?P<table>[\w_\-]+)/?$', api_public.describe, name='editor_describe_table'),
re_path(r'^editor/describe/(?P<database>[^/]*)/(?P<table>\w+)/stats(?:/(?P<column>\w+))?/?$', api_public.describe, name='editor_describe_column'),
re_path(r'^editor/autocomplete/?$', api_public.autocomplete, name='editor_autocomplete_databases'),
re_path(
r"^editor/autocomplete/(?P<database>[^/?]*)/?$",
api_public.autocomplete,
name="editor_autocomplete_tables",
),
re_path(
r"^editor/autocomplete/(?P<database>[^/?]*)/(?P<table>[\w_\-]+)/?$",
api_public.autocomplete,
name="editor_autocomplete_columns",
),
re_path(
r"^editor/autocomplete/(?P<database>[^/?]*)/(?P<table>[\w_\-]+)/(?P<column>\w+)/?$",
api_public.autocomplete,
name="editor_autocomplete_column",
),
re_path(
r"^editor/autocomplete/(?P<database>[^/?]*)/(?P<table>[\w_\-]+)/(?P<column>\w+)/(?P<nested>.+)/?$",
api_public.autocomplete,
name="editor_autocomplete_nested",
),
]
urlpatterns += [
re_path(r'^storage/view=(?P<path>.*)$', api_public.storage_view, name='storage_view'),
re_path(r'^storage/download=(?P<path>.*)$', api_public.storage_download, name='storage_download'),
re_path(r'^storage/upload/file/?$', api_public.storage_upload_file, name='storage_upload_file'),
]
# Slack install API for using CORS by default
urlpatterns += [
re_path(r'^slack/install/?$', botserver_api.generate_slack_install_link, name='botserver.api.slack_install_link'),
]
| 1.648438 | 2 |
peerscout/server/services/manuscript_subject_areas.py | elifesciences/peerscout | 3 | 12768618 | class ManuscriptSubjectAreaService:
def __init__(self, df):
self._df = df
self._subject_areas_by_id_map = df.groupby(
'version_id')['subject_area'].apply(sorted).to_dict()
@staticmethod
def from_database(db, valid_version_ids=None):
df = db.manuscript_subject_area.read_frame()
if valid_version_ids is not None:
df = df[df['version_id'].isin(valid_version_ids)]
return ManuscriptSubjectAreaService(df)
def get_ids_by_subject_areas(self, subject_areas):
df = self._df[self._df['subject_area'].str.lower().isin(
[s.lower() for s in subject_areas]
)]
return set(df['version_id'])
def get_subject_areas_by_id(self, manuscript_version_id):
return self._subject_areas_by_id_map.get(manuscript_version_id, [])
def get_all_subject_areas(self):
return set(self._df['subject_area'].unique())
| 2.53125 | 3 |
app.py | rselent/schrodingers-server | 2 | 12768619 | <reponame>rselent/schrodingers-server
# Schrödinger's Server
# by <NAME>
#
# Purpose: register and ping URL, and display UP/DOWN status
# (this is expected to change over time)
#
# Requirements: Python (3.9)
#
#
#
# ==========================================================
# ==========================================================
import socket
DEBUG = 2 # DEBUG LEVEL
# 0 = OFF
# 1 = BASIC CONSOLE OUTPUT
# 2 = BASIC + ADVANCED
SOCK4T = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # IPv4 TCP sock
SOCK6T = socket.socket(socket.AF_INET6, socket.SOCK_STREAM) # IPv6 TCP sock
TIMEOUT = 0
# Attempt #1 to solve socket errors -- failed D:
SOCK4T.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
def domainInput():
"""
Asks user for desired domains to check and saves them in a dictionary.
Dataflow:
Prints intro message, asks for initial input >
Loops through 10 iterations (supporting 10 domains):
Creates dict key:value pair, using i for key (like index) >
Checks if input store is empty and if it's the first entry in dict,
breaks loop to move on to next func and error out >
If input store is empty but there are other entries in dict,
removes empty key:value pair and breaks loop to move on >
If above conditions aren't triggered, assigns input store as value for
current key iteration >
Asks for next input from user for all iterations except the final 10th
(because of the first input request, outside of loop)
Returns dictionary, regardless of size
"""
inputDict = {}
print( "\n" * 3, end= '')
print( "Hello user! Please enter up to 10 domains that you'd like to check.")
print( "Pressing ENTER without a domain input will end this early", \
"and advance to the results.")
print( "\nEnter domain:\t ", end= '')
urlInput = input()
for i in range( 10):
inputDict[i] = [""]
if DEBUG == 2:
print( f"input value\t {urlInput}")
if urlInput == "" and len( inputDict) == 1:
break
elif urlInput == "":
inputDict.popitem()
break
inputDict[i] = [urlInput]
if i < 9: # only purpose is to keep console input
urlInput = input("\t\t ") # vertically aligned
if DEBUG == 2:
print( f"\n\t EXIT CALLED\ndict values\t {inputDict}\n")
return inputDict
def domainCheck( urlMaybeDict):
"""
Checks if all dictionary inputs are potentially valid domains.
Dataflow (assuming good faith, no errors):
Iterate through all dictionary key:value pairs:
Forces lowercase, normalizing input string >
If string is prepended by 'https://' or 'http://', removes those chars >
If string is then prepended by 'www.', removes those chars >
If string has '.' in 4th to last or 3rd to last char (eg: .com or .co.uk) >
String is probably a valid domain
"""
global BIGOOF
for i, _ in enumerate( urlMaybeDict):
urlMaybe = urlMaybeDict[i][0] # copy dict value to temp variable
# (makes below easier to read)
if urlMaybe == "":
BIGOOF = 1
print( "ERROR: URL string is empty.\n")
return # can we throw an exception instead?
else:
urlMaybe = urlMaybe.lower()
if urlMaybe[:8] == 'https://':
urlMaybe = urlMaybe[8:]
elif urlMaybe[:7] == 'http://':
urlMaybe = urlMaybe[7:]
if urlMaybe[:4] == 'www.':
urlMaybe = urlMaybe[4:]
if DEBUG == 2: print( f"after check\t {urlMaybe}")
if urlMaybe[-4] == "." or urlMaybe[-3] == ".":
urlMaybeDict[i][0] = urlMaybe # reassign cleaned URL to dict
else:
BIGOOF = 1
print( f"ERROR: URL given ({urlMaybe}) does not look valid.\n")
return # can we throw an exception instead?
return urlMaybeDict
def getIP( urlProbablyDict, port= 80, protocol= socket.IPPROTO_TCP):
"""
Get IPv4 and IPv6 addresses via DNS lookup
"""
global BIGOOF
for i, _ in enumerate( urlProbablyDict):
urlProbably = urlProbablyDict[i] # copy dict value to temp variable
if urlProbably[0] == "":
BIGOOF = 1
print( "ERROR: URL string is empty. Halting DNS lookup\n")
return # can we throw an exception instead?
else:
ipResult = socket.getaddrinfo( urlProbably[0], port, proto= protocol)
urlProbably.append( ipResult[1][4][0]) # ipv4
urlProbably.append( ipResult[0][4][0]) # ipv6
if DEBUG == 2:
print( f"\nip addresses\t{urlProbably}")
print( f"raw info\t {ipResult}\n")
urlProbablyDict[i] = urlProbably
return urlProbablyDict
def headRequest( urlDefinitelyDict, useipv6= False):
"""
Request HEAD of given address
"""
socket.setdefaulttimeout( TIMEOUT)
for i, _ in enumerate( urlDefinitelyDict):
urlDefinitely = urlDefinitelyDict[i] # copy dict value to temp variable
urlDefinitely.append( False) # append False bool to [3]
urlDefinitely.append( False) # append False bool to [4]
# not very dry here, but... ¯\_(ツ)_/¯
try:
SOCK4T.connect( (urlDefinitely[1], 80))
urlDefinitely[3] = True # if it connects, change [3] to True
except socket.error as oof4: # if not, print error to console
print( oof4)
# unsure about ipv6 implementation
# if useipv6 == True:
# try:
# SOCK6T.connect( (urlDefinitely[2], 80, 0, 0))
# SOCK6T.shutdown( socket.SHUT_RDWR)
# SOCK6T.close()
# urlDefinitely[4] = True
# except socket.error as oof6:
# print( oof6)
urlDefinitelyDict[i] = urlDefinitely
# yeah, let's not close sockets until after we're all done...
SOCK4T.shutdown( socket.SHUT_RDWR)
SOCK4T.close()
return urlDefinitelyDict
def dispStatus( upDownDict):
"""
Display UP/DOWN status
"""
if DEBUG >= 1:
print( "\n", end= '')
print( "=" * 120)
print( "\tSERVICE\t\t\t\t\tUP/DOWN")
print( "-" * 120)
for i, _ in enumerate( upDownDict):
upDown = upDownDict[i] # copy dict value to temp variable
if upDown[3] == True or upDown[4] == True:
print( f"\t{upDown[0]}\t\t\t\tUP")
else:
print( f"\t{upDown[0]}\t\t\t\tDOWN")
print( "=" * 120, "\n")
if __name__ == "__main__":
BIGOOF = 0
# domainDict = {}
mainInput = domainInput()
checkedInput = domainCheck( mainInput)
if BIGOOF == 0:
gotIP = getIP( checkedInput)
upDown = headRequest( gotIP, useipv6= False)
dispStatus( upDown)
else:
print( "BIGOOF THROWN. service discovery aborted. please try again with a valid URL.\n")
| 2.65625 | 3 |
src/examples/python/gen_py/datahub/ttypes.py | RogerTangos/datahub-stub | 192 | 12768620 | #
# Autogenerated by Thrift Compiler (0.9.2)
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
# options string: py
#
from thrift.Thrift import TType, TMessageType, TException, TApplicationException
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol, TProtocol
try:
from thrift.protocol import fastbinary
except:
fastbinary = None
class ConnectionParams:
"""
Attributes:
- client_id
- seq_id
- user
- password
- app_id
- app_token
- repo_base
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'client_id', None, None, ), # 1
(2, TType.STRING, 'seq_id', None, None, ), # 2
(3, TType.STRING, 'user', None, None, ), # 3
(4, TType.STRING, 'password', None, None, ), # 4
(5, TType.STRING, 'app_id', None, None, ), # 5
(6, TType.STRING, 'app_token', None, None, ), # 6
(7, TType.STRING, 'repo_base', None, None, ), # 7
)
def __init__(self, client_id=None, seq_id=None, user=None, password=None, app_id=None, app_token=None, repo_base=None,):
self.client_id = client_id
self.seq_id = seq_id
self.user = user
self.password = password
self.app_id = app_id
self.app_token = app_token
self.repo_base = repo_base
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.client_id = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.seq_id = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.user = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRING:
self.password = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.STRING:
self.app_id = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.STRING:
self.app_token = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 7:
if ftype == TType.STRING:
self.repo_base = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('ConnectionParams')
if self.client_id is not None:
oprot.writeFieldBegin('client_id', TType.STRING, 1)
oprot.writeString(self.client_id)
oprot.writeFieldEnd()
if self.seq_id is not None:
oprot.writeFieldBegin('seq_id', TType.STRING, 2)
oprot.writeString(self.seq_id)
oprot.writeFieldEnd()
if self.user is not None:
oprot.writeFieldBegin('user', TType.STRING, 3)
oprot.writeString(self.user)
oprot.writeFieldEnd()
if self.password is not None:
oprot.writeFieldBegin('password', TType.STRING, 4)
oprot.writeString(self.password)
oprot.writeFieldEnd()
if self.app_id is not None:
oprot.writeFieldBegin('app_id', TType.STRING, 5)
oprot.writeString(self.app_id)
oprot.writeFieldEnd()
if self.app_token is not None:
oprot.writeFieldBegin('app_token', TType.STRING, 6)
oprot.writeString(self.app_token)
oprot.writeFieldEnd()
if self.repo_base is not None:
oprot.writeFieldBegin('repo_base', TType.STRING, 7)
oprot.writeString(self.repo_base)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.client_id)
value = (value * 31) ^ hash(self.seq_id)
value = (value * 31) ^ hash(self.user)
value = (value * 31) ^ hash(self.password)
value = (value * 31) ^ hash(self.app_id)
value = (value * 31) ^ hash(self.app_token)
value = (value * 31) ^ hash(self.repo_base)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class Connection:
"""
Attributes:
- client_id
- seq_id
- user
- is_app
- repo_base
- cursor
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'client_id', None, None, ), # 1
(2, TType.STRING, 'seq_id', None, None, ), # 2
(3, TType.STRING, 'user', None, None, ), # 3
(4, TType.BOOL, 'is_app', None, None, ), # 4
(5, TType.STRING, 'repo_base', None, None, ), # 5
(6, TType.I64, 'cursor', None, None, ), # 6
)
def __init__(self, client_id=None, seq_id=None, user=None, is_app=None, repo_base=None, cursor=None,):
self.client_id = client_id
self.seq_id = seq_id
self.user = user
self.is_app = is_app
self.repo_base = repo_base
self.cursor = cursor
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.client_id = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.seq_id = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.user = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.BOOL:
self.is_app = iprot.readBool();
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.STRING:
self.repo_base = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.I64:
self.cursor = iprot.readI64();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('Connection')
if self.client_id is not None:
oprot.writeFieldBegin('client_id', TType.STRING, 1)
oprot.writeString(self.client_id)
oprot.writeFieldEnd()
if self.seq_id is not None:
oprot.writeFieldBegin('seq_id', TType.STRING, 2)
oprot.writeString(self.seq_id)
oprot.writeFieldEnd()
if self.user is not None:
oprot.writeFieldBegin('user', TType.STRING, 3)
oprot.writeString(self.user)
oprot.writeFieldEnd()
if self.is_app is not None:
oprot.writeFieldBegin('is_app', TType.BOOL, 4)
oprot.writeBool(self.is_app)
oprot.writeFieldEnd()
if self.repo_base is not None:
oprot.writeFieldBegin('repo_base', TType.STRING, 5)
oprot.writeString(self.repo_base)
oprot.writeFieldEnd()
if self.cursor is not None:
oprot.writeFieldBegin('cursor', TType.I64, 6)
oprot.writeI64(self.cursor)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.client_id)
value = (value * 31) ^ hash(self.seq_id)
value = (value * 31) ^ hash(self.user)
value = (value * 31) ^ hash(self.is_app)
value = (value * 31) ^ hash(self.repo_base)
value = (value * 31) ^ hash(self.cursor)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class Tuple:
"""
Attributes:
- cells
"""
thrift_spec = (
None, # 0
(1, TType.LIST, 'cells', (TType.STRING,None), None, ), # 1
)
def __init__(self, cells=None,):
self.cells = cells
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.LIST:
self.cells = []
(_etype3, _size0) = iprot.readListBegin()
for _i4 in xrange(_size0):
_elem5 = iprot.readString();
self.cells.append(_elem5)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('Tuple')
if self.cells is not None:
oprot.writeFieldBegin('cells', TType.LIST, 1)
oprot.writeListBegin(TType.STRING, len(self.cells))
for iter6 in self.cells:
oprot.writeString(iter6)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.cells)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class ResultSet:
"""
Attributes:
- status
- con
- num_tuples
- num_more_tuples
- tuples
- field_names
- field_types
"""
thrift_spec = (
None, # 0
(1, TType.BOOL, 'status', None, None, ), # 1
(2, TType.STRUCT, 'con', (Connection, Connection.thrift_spec), None, ), # 2
(3, TType.I64, 'num_tuples', None, None, ), # 3
(4, TType.I64, 'num_more_tuples', None, None, ), # 4
(5, TType.LIST, 'tuples', (TType.STRUCT,(Tuple, Tuple.thrift_spec)), None, ), # 5
(6, TType.LIST, 'field_names', (TType.STRING,None), None, ), # 6
(7, TType.LIST, 'field_types', (TType.STRING,None), None, ), # 7
)
def __init__(self, status=None, con=None, num_tuples=None, num_more_tuples=None, tuples=None, field_names=None, field_types=None,):
self.status = status
self.con = con
self.num_tuples = num_tuples
self.num_more_tuples = num_more_tuples
self.tuples = tuples
self.field_names = field_names
self.field_types = field_types
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.BOOL:
self.status = iprot.readBool();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.con = Connection()
self.con.read(iprot)
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.I64:
self.num_tuples = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.I64:
self.num_more_tuples = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.LIST:
self.tuples = []
(_etype10, _size7) = iprot.readListBegin()
for _i11 in xrange(_size7):
_elem12 = Tuple()
_elem12.read(iprot)
self.tuples.append(_elem12)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.LIST:
self.field_names = []
(_etype16, _size13) = iprot.readListBegin()
for _i17 in xrange(_size13):
_elem18 = iprot.readString();
self.field_names.append(_elem18)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 7:
if ftype == TType.LIST:
self.field_types = []
(_etype22, _size19) = iprot.readListBegin()
for _i23 in xrange(_size19):
_elem24 = iprot.readString();
self.field_types.append(_elem24)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('ResultSet')
if self.status is not None:
oprot.writeFieldBegin('status', TType.BOOL, 1)
oprot.writeBool(self.status)
oprot.writeFieldEnd()
if self.con is not None:
oprot.writeFieldBegin('con', TType.STRUCT, 2)
self.con.write(oprot)
oprot.writeFieldEnd()
if self.num_tuples is not None:
oprot.writeFieldBegin('num_tuples', TType.I64, 3)
oprot.writeI64(self.num_tuples)
oprot.writeFieldEnd()
if self.num_more_tuples is not None:
oprot.writeFieldBegin('num_more_tuples', TType.I64, 4)
oprot.writeI64(self.num_more_tuples)
oprot.writeFieldEnd()
if self.tuples is not None:
oprot.writeFieldBegin('tuples', TType.LIST, 5)
oprot.writeListBegin(TType.STRUCT, len(self.tuples))
for iter25 in self.tuples:
iter25.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.field_names is not None:
oprot.writeFieldBegin('field_names', TType.LIST, 6)
oprot.writeListBegin(TType.STRING, len(self.field_names))
for iter26 in self.field_names:
oprot.writeString(iter26)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.field_types is not None:
oprot.writeFieldBegin('field_types', TType.LIST, 7)
oprot.writeListBegin(TType.STRING, len(self.field_types))
for iter27 in self.field_types:
oprot.writeString(iter27)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.status is None:
raise TProtocol.TProtocolException(message='Required field status is unset!')
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.status)
value = (value * 31) ^ hash(self.con)
value = (value * 31) ^ hash(self.num_tuples)
value = (value * 31) ^ hash(self.num_more_tuples)
value = (value * 31) ^ hash(self.tuples)
value = (value * 31) ^ hash(self.field_names)
value = (value * 31) ^ hash(self.field_types)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class DBException(TException):
"""
Attributes:
- error_code
- message
- details
"""
thrift_spec = (
None, # 0
(1, TType.I32, 'error_code', None, None, ), # 1
(2, TType.STRING, 'message', None, None, ), # 2
(3, TType.STRING, 'details', None, None, ), # 3
)
def __init__(self, error_code=None, message=None, details=None,):
self.error_code = error_code
self.message = message
self.details = details
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I32:
self.error_code = iprot.readI32();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.message = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.details = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('DBException')
if self.error_code is not None:
oprot.writeFieldBegin('error_code', TType.I32, 1)
oprot.writeI32(self.error_code)
oprot.writeFieldEnd()
if self.message is not None:
oprot.writeFieldBegin('message', TType.STRING, 2)
oprot.writeString(self.message)
oprot.writeFieldEnd()
if self.details is not None:
oprot.writeFieldBegin('details', TType.STRING, 3)
oprot.writeString(self.details)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __str__(self):
return repr(self)
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.error_code)
value = (value * 31) ^ hash(self.message)
value = (value * 31) ^ hash(self.details)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
| 1.71875 | 2 |
test/day_10/test_pt1.py | teagles/teagles-advent-2021 | 0 | 12768621 | import unittest
from test.std_test_utils import STDIOTest
from teagles_advent_2021.day_10.pt1 import main
TEST_INPUT = """[({(<(())[]>[[{[]{<()<>>
[(()[<>])]({[<{<<[]>>(
{([(<{}[<>[]}>{[]{[(<()>
(((({<>}<{<{<>}{[]{[]{}
[[<[([]))<([[{}[[()]]]
[{[{({}]{}}([{[{{{}}([]
{<[[]]>}<{[{[{[]{()[[[]
[<(<(<(<{}))><([]([]()
<{([([[(<>()){}]>(<<{{
<{([{{}}[<[[[<>{}]]]>[]]
"""
class TestDay10Part1(STDIOTest):
def test_with_sample_input(self):
self.assert_stdin_n_out(main, TEST_INPUT, "26397\n")
if __name__ == '__main__':
unittest.main()
| 2.703125 | 3 |
pymarsys/connections.py | transcovo/pymarsys | 12 | 12768622 | <reponame>transcovo/pymarsys<filename>pymarsys/connections.py
from abc import ABC, abstractmethod
import base64
import datetime
import hashlib
import json
from urllib.parse import urljoin
import uuid
import aiohttp
import requests
EMARSYS_URI = 'https://api.emarsys.net/'
class ApiCallError(Exception):
pass
class BaseConnection(ABC):
"""
Any connection used to instantiate an Emarsys object or an object inherited
from BaseEndpoint should inherit from this class.
this class.
"""
@abstractmethod
def __init__(self, username, secret, uri):
self.username = username
self.secret = secret
self.uri = uri
def build_authentication_variables(self):
"""
Build the authentication variables Emarsys' authentication system
asks for.
:return: nonce, created, password_digest.
"""
nonce = uuid.uuid4().hex
created = datetime.datetime.utcnow().strftime(
'%Y-%m-%dT%H:%M:%S+00:00'
)
sha1 = hashlib.sha1(
str.encode(nonce + created + self.secret)
).hexdigest()
password_digest = bytes.decode(base64.b64encode(str.encode(sha1)))
return nonce, created, password_digest
def build_headers(self, other_http_headers=None):
"""
Build the headers Emarsys' authentication system asks for.
:return: headers.
"""
if not other_http_headers:
other_http_headers = {}
nonce, created, password_digest = \
self.build_authentication_variables()
wsse_header = ','.join(
(
'UsernameToken Username="{}"'.format(self.username),
'PasswordDigest="{}"'.format(password_digest),
'Nonce="{}"'.format(nonce),
'Created="{}"'.format(created),
)
)
http_headers = {
'X-WSSE': wsse_header,
'Content-Type': 'application/json',
**other_http_headers,
}
return http_headers
class SyncConnection(BaseConnection):
"""
Synchronous connection for Ermasys or inherited-from BaseEndpoint objects.
"""
def __init__(self, username, secret, uri=EMARSYS_URI):
super().__init__(username, secret, uri)
def make_call(self,
method,
endpoint,
headers=None,
payload=None,
params=None):
"""
Make an authenticated synchronous HTTP call to the Emarsys api using
the requests library.
:param method: HTTP method.
:param endpoint: Emarsys' api endpoint.
:param headers: HTTP headers.
:param payload: HTTP payload.
:param params: HTTP params.
:return: Dictionary with the result of the query.
"""
if not payload:
payload = {}
if not params:
params = {}
url = urljoin(self.uri, endpoint)
headers = self.build_headers(headers)
response = requests.request(
method,
url,
headers=headers,
json=payload,
params=params
)
try:
response.raise_for_status()
except requests.exceptions.HTTPError as err:
raise ApiCallError(
'Error message: "{}" \n Error details: "{}"'.format(
err,
response.text
)
)
return response.json()
class AsyncConnection(BaseConnection):
"""
Asynchronous connection for Ermasys or inherited-from BaseEndpoint objects.
"""
def __init__(self, username, secret, uri=EMARSYS_URI):
super().__init__(username, secret, uri)
self.session = aiohttp.ClientSession()
async def make_call(self,
method,
endpoint,
headers=None,
payload=None,
params=None):
"""
Make an authenticated asynchronous HTTP call to the Emarsys api using
the aiohttp library.
:param method: HTTP method.
:param endpoint: Emarsys' api endpoint.
:param headers: HTTP headers.
:param payload: HTTP payload.
:param params : HTTP params.
:return: Coroutine with the result of the query.
"""
if not payload:
payload = {}
if not params:
params = {}
url = urljoin(self.uri, endpoint)
headers = self.build_headers(headers)
async with self.session.request(
method,
url,
headers=headers,
data=json.dumps(payload),
params=params
) as response:
try:
response.raise_for_status()
except aiohttp.ClientError as err:
raise ApiCallError(
'Error message: "{}" \n Error details: "{}"'.format(
err,
await response.text()
)
)
if response.headers['Content-Type'] == 'text/json':
return await response.json()
return json.loads(await response.text())
| 2.71875 | 3 |
services/core/MasterDriverAgent/master_driver/tests/test_revert_mixin.py | Entek-Technical-Services/BEMOSS3.5 | 73 | 12768623 | # -*- coding: utf-8 -*- {{{
# vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et:
#
# Copyright (c) 2015, Battelle Memorial Institute
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are those
# of the authors and should not be interpreted as representing official policies,
# either expressed or implied, of the FreeBSD Project.
#
# This material was prepared as an account of work sponsored by an
# agency of the United States Government. Neither the United States
# Government nor the United States Department of Energy, nor Battelle,
# nor any of their employees, nor any jurisdiction or organization
# that has cooperated in the development of these materials, makes
# any warranty, express or implied, or assumes any legal liability
# or responsibility for the accuracy, completeness, or usefulness or
# any information, apparatus, product, software, or process disclosed,
# or represents that its use would not infringe privately owned rights.
#
# Reference herein to any specific commercial product, process, or
# service by trade name, trademark, manufacturer, or otherwise does
# not necessarily constitute or imply its endorsement, recommendation,
# r favoring by the United States Government or any agency thereof,
# or Battelle Memorial Institute. The views and opinions of authors
# expressed herein do not necessarily state or reflect those of the
# United States Government or any agency thereof.
#
# PACIFIC NORTHWEST NATIONAL LABORATORY
# operated by BATTELLE for the UNITED STATES DEPARTMENT OF ENERGY
# under Contract DE-AC05-76RL01830
#}}}
from master_driver.interfaces.fakedriver import Interface
import pytest
registry_config_string = """Point Name,Volttron Point Name,Units,Units Details,Writable,Starting Value,Type,Notes
Float,Float,F,-100 to 300,TRUE,50,float,CO2 Reading 0.00-2000.0 ppm
FloatNoDefault,FloatNoDefault,F,-100 to 300,TRUE,,float,CO2 Reading 0.00-2000.0 ppm
"""
@pytest.mark.revert
def test_revert_point():
interface = Interface()
interface.configure({}, registry_config_string)
value = interface.get_point("Float")
assert value == 50.0
interface.set_point("Float", 25.0)
value = interface.get_point("Float")
assert value == 25.0
interface.revert_point("Float")
value = interface.get_point("Float")
assert value == 50.0
@pytest.mark.revert
def test_revert_device():
interface = Interface()
interface.configure({}, registry_config_string)
value = interface.get_point("Float")
assert value == 50.0
interface.set_point("Float", 25.0)
value = interface.get_point("Float")
assert value == 25.0
interface.revert_all()
value = interface.get_point("Float")
assert value == 50.0
@pytest.mark.revert
def test_revert_point_no_default():
interface = Interface()
interface.configure({}, registry_config_string)
initial_value = interface.get_point("FloatNoDefault")
scrape_values = interface.scrape_all()
assert scrape_values["FloatNoDefault"] == initial_value
test_value = initial_value + 1.0
interface.set_point("FloatNoDefault", test_value)
temp_value = interface.get_point("FloatNoDefault")
assert temp_value == test_value
interface.revert_point("FloatNoDefault")
temp_value = interface.get_point("FloatNoDefault")
assert temp_value == initial_value
#Do it twice to make sure it restores state after revert
interface.set_point("FloatNoDefault", test_value)
temp_value = interface.get_point("FloatNoDefault")
assert temp_value == test_value
interface.revert_point("FloatNoDefault")
temp_value = interface.get_point("FloatNoDefault")
assert temp_value == initial_value
@pytest.mark.revert
def test_revert_all_no_default():
interface = Interface()
interface.configure({}, registry_config_string)
initial_value = interface.get_point("FloatNoDefault")
scrape_values = interface.scrape_all()
assert scrape_values["FloatNoDefault"] == initial_value
test_value = initial_value + 1.0
interface.set_point("FloatNoDefault", test_value)
temp_value = interface.get_point("FloatNoDefault")
assert temp_value == test_value
interface.revert_all()
temp_value = interface.get_point("FloatNoDefault")
assert temp_value == initial_value
#Do it twice to make sure it restores state after revert
interface.set_point("FloatNoDefault", test_value)
temp_value = interface.get_point("FloatNoDefault")
assert temp_value == test_value
interface.revert_all()
temp_value = interface.get_point("FloatNoDefault")
assert temp_value == initial_value
@pytest.mark.revert
def test_revert_no_default_changing_value():
interface = Interface()
interface.configure({}, registry_config_string)
initial_value = interface.get_point("FloatNoDefault")
#Initialize the revert value.
interface.scrape_all()
new_value = initial_value + 1.0
#Manually update the register values to give us something different to revert to.
register = interface.get_register_by_name("FloatNoDefault")
register.value = new_value
#Update the revert value.
interface.scrape_all()
test_value = new_value + 1.0
interface.set_point("FloatNoDefault", test_value)
temp_value = interface.get_point("FloatNoDefault")
assert temp_value == test_value
interface.revert_point("FloatNoDefault")
temp_value = interface.get_point("FloatNoDefault")
assert temp_value == new_value
assert temp_value != initial_value
#Do it twice to make sure it restores state after revert
interface.set_point("FloatNoDefault", test_value)
temp_value = interface.get_point("FloatNoDefault")
assert temp_value == test_value
interface.revert_point("FloatNoDefault")
temp_value = interface.get_point("FloatNoDefault")
assert temp_value == new_value
| 0.851563 | 1 |
djenga/management/commands/clean_command_output.py | 2ps/djenga | 6 | 12768624 | <reponame>2ps/djenga<gh_stars>1-10
from .statuscommand import StatusCommand
from ...models import ManagementCommand
from ...models import CommandOutput
class Command(StatusCommand):
help = (
'Cleans up the djenga.models.CommandOutput '
'values by removing old entries'
)
def __init__(self):
super(Command, self).__init__()
self.commands = []
self.keep = None
def add_arguments(self, parser):
parser.add_argument(
'-n', '--keep',
help='Saves the last N runs of the command and delete the rest',
type=int,
default=10,
dest='keep')
def load_commands(self):
self.commands = ManagementCommand.objects.all()
def clean_command(self, command_id):
rg_ids = list(CommandOutput.objects.filter(
command_id=command_id
).order_by('-id').values_list('id', flat=True))
if rg_ids and len(rg_ids) > self.keep:
rg_ids = rg_ids[self.keep:]
CommandOutput.objects.filter(
id__in=rg_ids
).delete()
def clean_commands(self):
for x in self.commands:
self.plain_log('Cleaning ')
self.color_log(self.style.SQL_TABLE, x.name)
self.plain_log('...')
self.clean_command(x.id)
self.color_log(self.style.SUCCESS, 'Done.\n')
def handle(self, keep, *args, **options): # pylint: disable=W0221
self.keep = keep
self.load_commands()
self.clean_commands()
return ''
| 2.328125 | 2 |
src/components/client/logica/logica_client.py | adp1002/practica-dms-2019-2020 | 0 | 12768625 | from conexiones.auth_client import AuthClient
from conexiones.hub_client import HubClient
from conexiones.game_client import GameClient
"""
Clase LogicaCliente es un clase con metodos estaticos.
Es la clase que lleva la logica y la que se comunica con la capa de datos.
"""
class LogicaCliente:
""" Registra al usuario actual en el servidor
---
Parameters:
- username: Nombre del usuario a registrar
- password: <PASSWORD>
Returns:
True si el jugador se ha registrado correctamente, si no, False.
"""
@staticmethod
def registrar_usuario(username,password):
return AuthClient.instance().registrar(username,password)
""" Loguea al usuario actual
---
Parameters:
- username: Nombre del usuario a loguear
- password: <PASSWORD>
Returns:
El token de validacion del usuario en caso de que se haya logueado corectamente, si no, None.
"""
@staticmethod
def loguear_usuario(username,password):
token = AuthClient.instance().login(username,password)
return token
""" Obtiene la lista de servidores
---
Returns:
Un array con la información del servidor escogido
"""
@staticmethod
def obtener_servidores(token):
return HubClient.instance().get_servers(token)
""" Registra al usuario del token en el servidor pasado por parametro
---
Parameters:
- token: El token del usuario que realiza la petición
- servidor_seleccionado: El servidor seleccionado para registrar al usuario
Returns:
El servidor de juego en el que se ha registrado el jugador, None si ha surgido un error.
"""
@staticmethod
def registrarse_en_servidor(token,servidor_seleccionado):
servidor_de_juego = GameClient(servidor_seleccionado['host'],servidor_seleccionado['port'])
if((servidor_de_juego is None) or (not servidor_de_juego.registrar_usuario_en_servidor(token))):
print('\nHa ocurrido un error al intentar entrar al servidor. Intenta probar en un nuevo servidor.\n')
return None
else:
return servidor_de_juego
""" Mantiene el curso de la partida de un jugador
---
Parameters:
- token: El token del usuario que va a jugar
- servidor_seleccionado: el servidor seleccionado en el que va a jugando el usuario
- x: Coordenada X donde poner la pieza
- token: Coordenda Y donde poner la pieza
Returns:
El servidor de juego en el que se ha registrado el jugador, None si ha surgido un error.
"""
@staticmethod
def realizar_jugada(token,servidor_de_juego,x,y):
se_ha_realizado_jugada = servidor_de_juego.jugar(token,x,y)
return se_ha_realizado_jugada
""" Manda la señal de que el usuario ha finalizado la partida
---
Parameters:
- token: El token del usuario que va a jugar
- servidor_seleccionado: el servidor seleccionado en el que va a jugando el usuario
Returns:
True si el servidor ha recibido la petición, si no, False.
"""
@staticmethod
def finalizar_partida(token,servidor_seleccionado):
return servidor_seleccionado.finalizar_partida(token)
""" Obtiene el score actual
---
Returns:
El score actual
"""
@staticmethod
def get_score():
return AuthClient.instance().get_score() | 2.671875 | 3 |
experiments/geneticProgramming/packing.py | quintonweenink/gp-1d-bin-packing-objective-func | 0 | 12768626 | import pandas as pd
import numpy as np
class Packing(object):
def getMappedFitness(self, chromosome):
mappedChromosome = self.items[chromosome]
spaces = np.zeros(len(mappedChromosome), dtype=int)
result = np.cumsum(mappedChromosome) - self.BIN_CAPACITY
index_of_old_bin = 0
binsRequired = 0
spacesLeftOpen = []
consumedSpaces = []
itemsInBin = []
while True:
binsRequired += 1
max_accumulate = np.maximum.accumulate(np.flipud(result <= 0))
index_of_new_bin = self.PROBLEM_SIZE - next((idx for idx, val in np.ndenumerate(max_accumulate) if val == True))[0] - 1
space_left_open = np.abs(result[index_of_new_bin])
spaces[index_of_new_bin] = space_left_open
result += space_left_open
spacesLeftOpen.append(space_left_open)
consumedSpaces.append(self.BIN_CAPACITY - space_left_open)
itemsInBin.append(index_of_new_bin - index_of_old_bin)
index_of_old_bin = index_of_new_bin
if np.max(result) <= 0:
break
result -= self.BIN_CAPACITY
exec_result = self.fitTree.execute([spacesLeftOpen, consumedSpaces, itemsInBin], [binsRequired, self.BIN_CAPACITY, 1, 2])
return exec_result, binsRequired
def toStringMappedFitness(self, chromosome):
result = np.cumsum(self.problemSet[chromosome]) - self.BIN_CAPACITY
output = ''
while True:
max_accumulate = np.maximum.accumulate(np.flipud(result <= 0))
index_of_new_bin = self.PROBLEM_SIZE - next((idx for idx, val in np.ndenumerate(max_accumulate) if val == True))[
0] - 1
space_left_open = np.abs(result[index_of_new_bin])
result += space_left_open
output += '|'
output += (self.BIN_CAPACITY - space_left_open - 2) * 'X'
output += '|'
output += '_' * space_left_open
output += '\n'
if np.max(result) <= 0:
break
result -= self.BIN_CAPACITY
return output
def tournamentSelector(self, population, reverse=False):
random_indicies = np.random.randint(self.POPULATION_SIZE, size=self.TOURNAMENT_SIZE).tolist()
tournament = []
for idx, val in np.ndenumerate(random_indicies):
tournament.append(population[val])
results = []
for val in tournament:
result, bin = self.getMappedFitness(val)
results.append(result)
results = np.array(results)
if not reverse:
pos = np.argmin(results)
else:
pos = np.argmax(results)
return population[random_indicies[pos]], random_indicies[pos], results[pos]
def multipleSwapCrossover(self, p1, p2, swaps=4):
draws = np.random.randint(self.PROBLEM_SIZE, size=swaps)
c1 = p1.copy()
c2 = p2.copy()
for i, val in enumerate(draws):
c1item = c1[val]
c2item = c2[val]
c1 = np.delete(c1, np.where(c1 == c2item))
c2 = np.delete(c2, np.where(c2 == c1item))
c1 = np.insert(c1, val, c2item)
c2 = np.insert(c2, val, c1item)
return c1, c2
def multipleMutator(self, p, swaps=4):
draws = np.random.randint(self.PROBLEM_SIZE, size=(swaps, 2))
child = p.copy()
for i, val in enumerate(draws):
tmp = child[val[0]]
child = np.delete(child, val[0])
child = np.insert(child, val[1], tmp)
return child
def tryMutate(self, population):
draw = np.random.rand()
if draw < self.MUTATION_RATE:
p, pos, fit = self.tournamentSelector(population)
_, kpos, _ = self.tournamentSelector(population, reverse=True)
c = self.multipleMutator(p, 1)
population[kpos] = c
return population
def tryCrossover(self, population):
draw = np.random.rand()
if draw < self.CROSSOVER_RATE:
p1, p1pos, p1fit = self.tournamentSelector(population)
p2, p2pos, p2fit = self.tournamentSelector(population)
if any(p1 != p2):
_, k1pos, _ = self.tournamentSelector(population, reverse=True)
_, k2pos, _ = self.tournamentSelector(population, reverse=True)
c1, c2 = self.multipleSwapCrossover(p1, p2, 3)
population[k1pos] = c1
population[k2pos] = c2
else:
p1 = self.multipleMutator(p1, swaps=int(self.PROBLEM_SIZE / 5))
population[p1pos] = p1
return population
def run(self, fitTree, binFile, minBins):
self.problemSet = pd.read_csv(binFile, header=None).values.tolist()
self.PROBLEM_SIZE = self.problemSet.pop(0)[0]
self.BIN_CAPACITY = self.problemSet.pop(0)[0]
self.POPULATION_SIZE = 50
self.TOURNAMENT_SIZE = 4
self.GENERATIONS = 250
self.SAMPLES = 1
self.SAMPLE_RATE = 50
self.MUTATION_RATE = 0.3
self.CROSSOVER_RATE = 1
self.items = pd.DataFrame(self.problemSet)
self.items = np.array(self.items[0])
self.organisedChromosome = np.arange(self.items.size)
assert self.PROBLEM_SIZE == len(self.items)
self.fitTree = fitTree
population = []
chromosome = np.arange(self.PROBLEM_SIZE)
for i in range(self.POPULATION_SIZE):
np.random.shuffle(chromosome)
population.append(chromosome.copy())
foundMin = False
# Mutate and crossover for each generation
for idx, generation in enumerate(range(self.GENERATIONS)):
if foundMin == False:
population = self.tryMutate(population)
population = self.tryCrossover(population)
if idx % self.SAMPLE_RATE == 0:
bins = []
fitness = []
for chromosome in population:
result, bin = self.getMappedFitness(chromosome)
bins.append(bin)
fitness.append(result)
position = int(np.argmin(fitness))
if bins[position] == minBins:
foundMin = True
bins = []
fitness = []
for chromosome in population:
result, bin = self.getMappedFitness(chromosome)
bins.append(bin)
fitness.append(np.array(result))
position = int(np.argmin(fitness))
return fitness[position], bins[position]
| 2.625 | 3 |
src/xbot/cli/__init__.py | qq751220449/xbot | 2 | 12768627 | import click
from .. import __version__
@click.command()
@click.option("--count", default=1, help="Number of greetings.")
@click.version_option(version=__version__)
def main(count):
print(count)
click.echo("Hello, Xbot!")
| 2.359375 | 2 |
rciam_probes/shared/templates.py | rciam/rciam_probes | 2 | 12768628 | <filename>rciam_probes/shared/templates.py<gh_stars>1-10
from string import Template
"""Nagios template output for Login health check"""
login_health_check_nagios_tmpl = Template("SP Login succeeded(${time}${type} time) | 'Login'=${time}${type}")
defaults_login_health_check = {
"time": -1,
"type": "s"
}
"""Nagios template output for Cert health check"""
cert_health_check_tmpl = Template("SSL_CERT(${type}) ${status} - x509 certificate '${subject}' from '${issuer}' is valid until "
"${not_after} (expires in ${expiration_days} days) | 'SSL Metadata Cert'=${"
"expiration_days};${warning};${critical};0;3650")
defaults_cert_health_check = {
"type": "",
"status": "",
"subject": "",
"issuer": "",
"not_after": "",
"expiration_days": "",
"warning": "",
"critical": "",
}
"""Nagios template output for Cert health check - Type all"""
cert_health_check_all_tmpl = Template("SSL_CERT(${type}) ${status}")
defaults_cert_health_check_all = {
"type": "",
"status": ""
} | 2.171875 | 2 |
kotus_json_file_generator.py | timokoola/suomicollectorscripts | 0 | 12768629 | import xmltodict, json, collections, typing, random, time
f = open("kotus-sanalista_v1.xml")
kotus_ = f.read()
f.close()
kotus = xmltodict.parse(kotus_)
def toTn(tn):
pass
def toLine(item):
if "s" not in item or "t" not in item:
return (item["s"], -1,"_")
word = item["s"]
type_ = item["t"]
if isinstance(type_, typing.List):
result = []
for t_ in type_:
tn = int(t_["tn"] if "tn" in t_ else "0")
av = t_["av"] if "av" in t_ else "_"
result.append((word, tn, av))
return result
else:
tn = int(type_["tn"] if "tn" in type_ else "0")
if "av" not in type_:
av = "_"
elif isinstance(type_["av"], collections.OrderedDict) and "#text" in type_["av"]:
av = type_["av"]["#text"]
else:
av = type_["av"] if "av" in type_ else "_"
return (word, tn, av)
def toKey(item):
pass
simple = [toLine(x) for x in kotus['kotus-sanalista']['st'] if "t" in x and not isinstance(x["t"], typing.List)]
complex_ = [toLine(x) for x in kotus['kotus-sanalista']['st'] if "t" in x and isinstance(x["t"], typing.List)]
d = collections.defaultdict(list)
for s in simple:
d[f"{s[1]}{s[2]}"].append({"word": s[0], "tn": s[1], "av": s[2]})
for c in complex_:
for s in c:
d[f"{s[1]}{s[2]}"].append({"word": s[0], "tn": s[1], "av": s[2]})
samples = []
full = []
for k in d.keys():
for x in d[k]:
full.append(x)
if(len(d[k]) < 20):
for x in d[k]:
samples.append(x)
else:
sample = random.sample(d[k], 20)
for x in sample:
samples.append(x)
f = open(f"kotus_samples_{time.time()}.json", "w+")
f.write(json.dumps(samples, sort_keys=True, indent=4))
f.close()
f = open(f"kotus_all.json", "w+")
f.write(json.dumps(full, sort_keys=True, indent=4))
f.close() | 2.8125 | 3 |
Q443.py | Linchin/python_leetcode_git | 0 | 12768630 | """
443
medium
string compression
"""
from typing import List
class Solution:
def compress(self, chars: List[str]) -> int:
cnt = 1
output = 0
for i in range(len(chars) - 1):
if chars[i] == chars[i+1]:
cnt += 1
else:
chars[output] = chars[i]
output += 1
if cnt > 1:
cnt_str = str(cnt)
for c in cnt_str:
chars[output] = c
output += 1
cnt = 1
if cnt:
chars[output] = chars[-1]
output += 1
if cnt > 1:
cnt_str = str(cnt)
for c in cnt_str:
chars[output] = c
output += 1
return output
chars2 = ["a"]
chars = ["a","a","b","b","c","c","c"]
sol = Solution()
print(sol.compress(chars))
print(chars)
| 3.421875 | 3 |
dft/potentials.py | jordan-melendez/dft_emulator | 0 | 12768631 | <reponame>jordan-melendez/dft_emulator
from __future__ import division
import numpy as np
from .constants import pi
def harmonic_oscillator_potential(r, mass, omega):
return mass * (omega * r) ** 2 / 2.0
# def harmonic_oscillator_hamiltonian_ho_basis(omega, ):
# return mass * (omega * r) ** 2 / 2.0
def kohn_sham_lo(rho, mass, g, a_s):
r"""The leading order kohn sham potential (-J_0)
Parameters
----------
rho
mass
g
a_s
Returns
-------
"""
return 4 * pi * a_s / mass * (g - 1) / g * rho
def kohn_sham_exchange_lo(r, rho, mass, g, a_s):
return 4 * pi * r ** 2 * (-4 * pi * a_s) / (2 * mass) * (g - 1) / g * rho ** 2
def kohn_sham_energy_lo(r, dr, rho, mass, g, a_s):
rho_int = 4 * pi * dr * r ** 2 * rho ** 2
pre = -0.5 * (g - 1) / g * 4 * pi * a_s / mass
return pre * np.sum(rho_int)
def b1_lda(g):
return (
4
/ (35 * pi ** 2)
* (g - 1)
* (6 * pi ** 2 / g) ** (4 / 3)
* (11 - 2 * np.log(2))
)
def b2_lda(g):
return (g - 1) / (10 * pi) * (6 * pi ** 2 / g) ** (5 / 3)
def b3_lda(g):
return (g + 1) / (5 * pi) * (6 * pi ** 2 / g) ** (5 / 3)
def b4_lda(g):
return (6 * pi ** 2 / g) ** (5 / 3) * (
0.0755 * (g - 1) + 0.0574 * (g - 1) * (g - 3)
)
def kohn_sham_nlo_lda(rho, mass, g, a_s):
lo = kohn_sham_lo(rho=rho, mass=mass, g=g, a_s=a_s)
b1 = b1_lda(g)
nlo = (7 / 3) * b1 * a_s ** 2 / (2 * mass) * rho ** (4 / 3)
return lo + nlo
def kohn_sham_exchange_nlo(r, rho, mass, g, a_s):
lo = kohn_sham_exchange_lo(r=r, rho=rho, mass=mass, g=g, a_s=a_s)
b1 = b1_lda(g)
return lo + b1 * a_s ** 2 / (2 * mass) * rho ** (7 / 3)
def kohn_sham_energy_nlo(r, dr, rho, mass, g, a_s):
lo = kohn_sham_energy_lo(r=r, dr=dr, rho=rho, mass=mass, g=g, a_s=a_s)
rho_int = 4 * pi * dr * r ** 2 * rho ** (7 / 3)
b1 = b1_lda(g)
pre = -4 / 3 * b1 * a_s ** 2 / (2 * mass)
return lo + pre * np.sum(rho_int)
def kohn_sham_nnlo_lda(rho, mass, g, a_s, a_p, r_s):
nlo = kohn_sham_nlo_lda(rho=rho, mass=mass, g=g, a_s=a_s)
b2 = b2_lda(g)
b3 = b3_lda(g)
b4 = b4_lda(g)
pre = (b2 * a_s ** 2 * r_s) + (b3 * a_p ** 3) + (b4 * a_s ** 3)
return nlo + 8 / 3 * pre / (2 * mass) * rho ** (5 / 3)
def kohn_sham_exchange_nnlo(r, rho, mass, g, a_s, a_p, r_s):
nlo = kohn_sham_exchange_nlo(r=r, rho=rho, mass=mass, g=g, a_s=a_s)
b2 = b2_lda(g)
b3 = b3_lda(g)
b4 = b4_lda(g)
pre = (b2 * a_s ** 2 * r_s) + (b3 * a_p ** 3) + (b4 * a_s ** 3)
return nlo + pre / (2 * mass) * rho ** (8 / 3)
def kohn_sham_energy_nnlo(r, dr, rho, mass, g, a_s, a_p, r_s):
nlo = kohn_sham_energy_nlo(r=r, dr=dr, rho=rho, mass=mass, g=g, a_s=a_s)
rho_int = 4 * pi * dr * r ** 2 * rho ** (8 / 3)
b2 = b2_lda(g)
b3 = b3_lda(g)
b4 = b4_lda(g)
pre = (b2 * a_s ** 2 * r_s) + (b3 * a_p ** 3) + (b4 * a_s ** 3) / (2 * mass)
return nlo - 5 / 3 * pre * np.sum(rho_int)
| 2.375 | 2 |
backend/dynamicanalysis/codecoverage/serializers.py | IsmatAl/thesisProject | 1 | 12768632 | from rest_framework import serializers
class CodeCoverageSerializer(serializers.Serializer):
entry = serializers.CharField()
# id = serializers.CharField()
# inputs = serializers.ListField()
# expectedOutputs = serializers.ListField(required=False)
| 2.078125 | 2 |
index.bzl | zaucy/rules_cc_stamp | 1 | 12768633 | def _cc_stamp_header(ctx):
out = ctx.outputs.out
args = ctx.actions.args()
args.add("--stable_status", ctx.info_file)
args.add("--volatile_status", ctx.version_file)
args.add("--output_header", out)
ctx.actions.run(
outputs = [out],
inputs = [ctx.info_file, ctx.version_file],
arguments = [args],
executable = ctx.executable.tool,
)
return DefaultInfo(files = depset([out]))
cc_stamp_header = rule(
implementation = _cc_stamp_header,
attrs = {
"out": attr.output(
doc = "C++ header file to generate",
mandatory = True,
),
"tool": attr.label(
default = Label("@rules_cc_stamp//cc_stamp_header_generator"),
cfg = "exec",
executable = True,
),
},
)
| 1.929688 | 2 |
z2/part3/updated_part2_batch/jm/parser_errors_2/507857314.py | kozakusek/ipp-2020-testy | 1 | 12768634 | from part1 import (
gamma_board,
gamma_busy_fields,
gamma_delete,
gamma_free_fields,
gamma_golden_move,
gamma_golden_possible,
gamma_move,
gamma_new,
)
"""
scenario: test_random_actions
uuid: 507857314
"""
"""
random actions, total chaos
"""
board = gamma_new(5, 4, 4, 3)
assert board is not None
assert gamma_move(board, 1, 3, 1) == 1
assert gamma_move(board, 2, 2, 4) == 0
assert gamma_move(board, 2, 1, 0) == 1
board295535068 = gamma_board(board)
assert board295535068 is not None
assert board295535068 == (".....\n"
".....\n"
"...1.\n"
".2...\n")
del board295535068
board295535068 = None
assert gamma_move(board, 3, 4, 0) == 1
assert gamma_move(board, 4, 3, 2) == 1
assert gamma_golden_possible(board, 4) == 1
assert gamma_move(board, 1, 3, 1) == 0
assert gamma_free_fields(board, 1) == 16
assert gamma_golden_possible(board, 2) == 1
assert gamma_move(board, 3, 3, 3) == 1
assert gamma_golden_possible(board, 4) == 1
assert gamma_move(board, 1, 2, 4) == 0
assert gamma_move(board, 1, 2, 2) == 1
assert gamma_move(board, 2, 1, 4) == 0
assert gamma_move(board, 2, 3, 3) == 0
assert gamma_move(board, 3, 0, 0) == 1
assert gamma_move(board, 3, 3, 0) == 1
board993913590 = gamma_board(board)
assert board993913590 is not None
assert board993913590 == ("...3.\n"
"..14.\n"
"...1.\n"
"32.33\n")
del board993913590
board993913590 = None
assert gamma_move(board, 4, 1, 4) == 0
assert gamma_move(board, 1, 3, 1) == 0
assert gamma_move(board, 2, 3, 1) == 0
assert gamma_move(board, 3, 1, 0) == 0
assert gamma_move(board, 4, 3, 1) == 0
assert gamma_move(board, 4, 4, 3) == 1
assert gamma_move(board, 1, 3, 3) == 0
assert gamma_move(board, 2, 0, 2) == 1
board946766181 = gamma_board(board)
assert board946766181 is not None
assert board946766181 == ("...34\n"
"2.14.\n"
"...1.\n"
"32.33\n")
del board946766181
board946766181 = None
assert gamma_move(board, 3, 4, 1) == 1
assert gamma_move(board, 4, 4, 2) == 1
assert gamma_move(board, 1, 3, 3) == 0
assert gamma_free_fields(board, 1) == 8
assert gamma_move(board, 2, 2, 2) == 0
assert gamma_move(board, 3, 0, 2) == 0
assert gamma_move(board, 4, 2, 3) == 1
assert gamma_move(board, 1, 1, 0) == 0
assert gamma_move(board, 2, 2, 1) == 1
assert gamma_move(board, 2, 0, 2) == 0
assert gamma_move(board, 3, 4, 0) == 0
assert gamma_move(board, 3, 0, 2) == 0
assert gamma_move(board, 4, 0, 0) == 0
assert gamma_golden_move(board, 4, 0, 0) == 1
assert gamma_move(board, 1, 2, 1) == 0
assert gamma_golden_move(board, 1, 0, 4) == 0
assert gamma_move(board, 2, 2, 1) == 0
assert gamma_move(board, 3, 4, 2) == 0
assert gamma_busy_fields(board, 4) == 5
assert gamma_move(board, 1, 1, 0) == 0
assert gamma_free_fields(board, 1) == 6
assert gamma_move(board, 2, 2, 1) == 0
assert gamma_move(board, 2, 0, 2) == 0
assert gamma_golden_possible(board, 2) == 1
assert gamma_move(board, 3, 1, 1) == 1
assert gamma_golden_move(board, 3, 0, 4) == 0
assert gamma_move(board, 4, 1, 0) == 0
assert gamma_move(board, 4, 2, 2) == 0
assert gamma_move(board, 1, 1, 0) == 0
assert gamma_move(board, 1, 2, 2) == 0
assert gamma_move(board, 2, 1, 1) == 0
assert gamma_move(board, 3, 3, 1) == 0
assert gamma_move(board, 3, 1, 0) == 0
assert gamma_move(board, 4, 0, 1) == 1
gamma_delete(board)
| 2.140625 | 2 |
platform_agent/rerouting/rerouting.py | leogsilva/syntropy-agent | 0 | 12768635 | import threading
import time
import logging
import pyroute2
import ipaddress
import json
import re
from pyroute2 import WireGuard
from platform_agent.cmd.lsmod import module_loaded
from platform_agent.cmd.wg_info import WireGuardRead
from platform_agent.files.tmp_files import read_tmp_file
from platform_agent.routes import Routes
from platform_agent.lib.ctime import now
from platform_agent.wireguard.helpers import WG_NAME_PATTERN, ping_internal_ips, get_peer_info_all
logger = logging.getLogger()
def get_routing_info(wg):
routing_info = {}
peers_internal_ips = []
interfaces = read_tmp_file(file_type='iface_info')
res = {k: v for k, v in interfaces.items() if re.match(WG_NAME_PATTERN, k)}
for ifname in res.keys():
if not res[ifname].get('internal_ip'):
continue
internal_ip = res[ifname]['internal_ip']
metadata = res[ifname]['metadata']
peers = get_peer_info_all(ifname, wg, kind=res[ifname]['kind'])
for peer in peers:
try:
peer_internal_ip = next(
(
ip for ip in peer['allowed_ips']
if
ipaddress.ip_address(ip.split('/')[0]) in ipaddress.ip_network(f"{internal_ip.split('/')[0]}/24",
False)
),
None
)
except ValueError:
continue
if not peer_internal_ip:
continue
peers_internal_ips.append(peer_internal_ip.split('/')[0])
peer['allowed_ips'].remove(peer_internal_ip)
for allowed_ip in peer['allowed_ips']:
if not routing_info.get(allowed_ip):
routing_info[allowed_ip] = {'ifaces': {}}
routing_info[allowed_ip]['ifaces'][ifname] = {
'internal_ip': peer_internal_ip,
'metadata': metadata
}
return routing_info, peers_internal_ips
def get_interface_internal_ip(ifname):
with pyroute2.IPDB() as ipdb:
internal_ip = f"{ipdb.interfaces[ifname]['ipaddr'][0]['address']}"
return internal_ip
def get_fastest_routes(wg):
result = {}
routing_info, peers_internal_ips = get_routing_info(wg)
ping_results = ping_internal_ips(peers_internal_ips, icmp_id=20000)
for dest, routes in routing_info.items():
best_route = None
best_ping = 9999
for iface, data in routes['ifaces'].items():
int_ip = data['internal_ip'].split('/')[0]
if ping_results[int_ip]['latency_ms'] < best_ping:
best_route = {'iface': iface, 'gw': data['internal_ip'], 'metadata': data.get('metadata')}
best_ping = ping_results[int_ip]['latency_ms']
result[dest] = best_route
return result, ping_results
class Rerouting(threading.Thread):
def __init__(self, client, interval=1):
logger.debug(f"[REROUTING] Initializing")
super().__init__()
self.interval = interval
self.client = client
self.wg = WireGuard() if module_loaded("wireguard") else WireGuardRead()
self.routes = Routes()
self.stop_rerouting = threading.Event()
self.daemon = True
def run(self):
logger.debug(f"[REROUTING] Running")
previous_routes = {}
while not self.stop_rerouting.is_set():
new_routes, ping_data = get_fastest_routes(self.wg)
for dest, best_route in new_routes.items():
if not best_route or previous_routes.get(dest) == best_route:
continue
# Do rerouting logic with best_route
logger.debug(f"[REROUTING] Rerouting {dest} via {best_route}", extra={'metadata': best_route.get('metadata')})
try:
self.routes.ip_route_replace(
ifname=best_route['iface'], ip_list=[dest],
gw_ipv4=get_interface_internal_ip(best_route['iface'])
)
except KeyError: # catch if interface was deleted while executing this code
continue
previous_routes = new_routes
time.sleep(int(self.interval))
def send_latency_data(self, data):
self.client.send_log(json.dumps({
'id': "ID." + str(time.time()),
'executed_at': now(),
'type': 'PEERS_LATENCY_DATA',
'data': data
}))
def join(self, timeout=None):
self.stop_rerouting.set()
super().join(timeout)
| 2.046875 | 2 |
LDA_qje/policy_relevant_topics.py | erialc-cal/NLP-FOMC | 0 | 12768636 | <filename>LDA_qje/policy_relevant_topics.py<gh_stars>0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 11 11:47:23 2021
@author: <NAME>
Selecting Topics : paper suggest topic selection policy-wise using Meade (2017)
work on dissent in policies. We may want to use the same method to select
Using Lasso regression shrinkage
"""
| 1.851563 | 2 |
app/application.py | LinkedList/postgis-baselayers | 33 | 12768637 | <filename>app/application.py
import sys
import os
import logging
import json
import shutil
import psycopg2
import time
import subprocess
import copy
import markdown
import tempfile
import functools
from threading import Timer
from glob import glob
from functools import wraps
from io import StringIO, BytesIO
from psycopg2.extras import DictCursor, RealDictCursor
from dotenv import load_dotenv
from huey import SqliteHuey
from flask import Flask, render_template, redirect, url_for, g, request, \
jsonify, Markup
from flask.json import dumps
# Version
__version__ = '0.1.1'
# Set up logging to mirror any messages to stdout
logger = logging.getLogger()
logger.setLevel(logging.INFO)
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.DEBUG)
logger.addHandler(handler)
# Load the dotenv files in the path
load_dotenv()
# Create the application object and configure it
app = Flask(__name__)
app.config.from_object('settings')
# Create the instance directory
try:
os.makedirs(app.instance_path)
except OSError:
pass
# Create some common exceptions for things that can go wrong
class DatabaseException(Exception):
pass
class PostgisMissingException(Exception):
pass
class ConfigError(Exception):
pass
class ApplicationError(Exception):
pass
class InstallTerminated(Exception):
pass
class InstallFailed(Exception):
pass
class ApplicationNotInitialized(Exception):
pass
# Create our SqliteHuey instance in Flask's instance_path directory
huey_file = os.path.join(app.instance_path, 'huey.db')
def get_huey(reset=False):
if reset:
os.remove(huey_file)
return SqliteHuey(filename=huey_file)
huey = get_huey()
# Function to fetch a database connection
def get_db():
conn = psycopg2.connect(dbname=app.config.get("POSTGRES_DB",""),
user=app.config.get("POSTGRES_USER",""),
password=app.config.get("POSTGRES_PASSWORD",""),
port=app.config.get("POSTGRES_PORT", 5432),
host=app.config.get("POSTGRES_HOST",""),
sslmode=app.config.get("POSTGRES_SSLMODE",""),
sslrootcert=app.config.get("POSTGRES_SSLROOTCERT",""))
return conn
def check_username_and_password(username, password):
return username == app.config['PG_BASELAYERS_USERNAME'] and password == app.config['PG_BASELAYERS_PASSWORD']
# Validate the environment to make sure several environment variables are
# defined, and it sets up the database connectivity on flask's g.conn.
@app.before_request
def basic_authentication():
if app.config['PG_BASELAYERS_USERNAME'] == '' and app.config['PG_BASELAYERS_PASSWORD'] == '':
return None
auth = request.authorization
if not auth or not check_username_and_password(auth.username, auth.password):
print("RETURIING AUTHHHENTI")
resp = jsonify({'message': "Please authenticate."})
resp.status_code = 401
resp.headers['WWW-Authenticate'] = 'Basic realm="PostGIS-Baselayers"'
return resp
@app.before_request
def connect_db():
# Create the database connection on g.conn
if not hasattr(g, 'conn'):
g.conn = get_db()
# Don't do these checks when our endpoint is 'try_install_postgis'...
if request.endpoint != 'try_install_postgis':
# Verify PostGIS is installed
cur = g.conn.cursor()
try:
cur.execute("SELECT PostGIS_Lib_Version();")
g.postgis_version = cur.fetchone()[0]
except psycopg2.errors.UndefinedFunction as e:
raise PostgisMissingException("{}\n------------\nIt looks like PostGIS is not installed in this database?\n\nYou can install it manually by running this SQL command on the database: \n\nCREATE EXTENSION postgis;\n\nOr postgis-baselayers can try to do it automatically if you click the button below...\n".format(e))
# Verify that our own schema exists, otherwise raise ApplicationNotInitialized,
# which will show the initialization screen.
cur.execute("SELECT EXISTS(SELECT 1 FROM pg_namespace WHERE nspname = 'postgis_baselayers');")
res = cur.fetchone()[0]
if not res:
raise ApplicationNotInitialized("PostGIS Baselayers is not initialized yet on this database.")
# When the app context is destroyed, close the database connection.
@app.teardown_appcontext
def close_db(error):
if hasattr(g, 'conn'):
g.conn.close()
# Context processor to inject some common data into templates
@app.context_processor
def template_variables():
return dict(pg_baselayers_version=__version__)
# Handle any errors using an error page
@app.errorhandler(DatabaseException)
@app.errorhandler(PostgisMissingException)
@app.errorhandler(ConfigError)
@app.errorhandler(ApplicationError)
@app.errorhandler(psycopg2.OperationalError)
@app.errorhandler(psycopg2.ProgrammingError)
def handle_error(error):
print(type(error))
error_type = type(error).__name__
return render_template("error.html", **locals())
@app.errorhandler(ApplicationNotInitialized)
@app.route('/initialize', methods=['GET','POST'])
def initialize(error=""):
"""
Show template to initialize the database.
"""
if request.method == 'GET':
return render_template('initialize.html')
if request.method == 'POST' and request.form.get("initialize") == 'yes':
cur = g.conn.cursor()
#TODO: put this in a separate 'create_schema.sql' file and run that
cur.execute("""
CREATE SCHEMA IF NOT EXISTS postgis_baselayers;
CREATE TABLE IF NOT EXISTS postgis_baselayers.dataset (
name varchar(256) PRIMARY KEY, -- eg. 'example'
metadata json NOT NULL -- flexible column for later use
);
CREATE TABLE IF NOT EXISTS postgis_baselayers.layer (
key varchar(512) PRIMARY KEY, -- eg. 'example.airports'
name varchar(256) NOT NULL, -- eg. 'airports'
dataset_name varchar(256) REFERENCES postgis_baselayers.dataset(name),
status int DEFAULT 0 NOT NULL, -- 0: not installed; 1: installed; 2: queued; 3: working 4: error
info varchar(512) DEFAULT '', -- additional status info
metadata json NOT NULL -- flexible column for later use
);
CREATE TABLE IF NOT EXISTS postgis_baselayers.log (
id SERIAL PRIMARY KEY,
created TIMESTAMP DEFAULT NOW(),
layer_key varchar(128) REFERENCES postgis_baselayers.layer(key),
task_id varchar(128) NOT NULL,
target varchar(128) NOT NULL, -- eg: 'install' or 'uninstall'
info varchar(512), -- additional status info
log TEXT
);
""")
g.conn.commit()
# Parse and update all the datasets.
metadata_path = os.path.join(app.root_path, "datasets", "*", "metadata.json")
for metadata_file in glob(metadata_path):
with open(metadata_file) as f:
dataset = json.load(f)
cur.execute("""
INSERT INTO postgis_baselayers.dataset (name, metadata)
VALUES (%s, %s)
ON CONFLICT ON CONSTRAINT dataset_pkey
DO UPDATE SET metadata = %s;
""", (dataset['name'], json.dumps(dataset['metadata']), json.dumps(dataset['metadata'])))
for layer in dataset['layers']:
key = "{}.{}".format(dataset['name'], layer['name'])
cur.execute("""
INSERT INTO postgis_baselayers.layer (key, name, dataset_name, metadata)
VALUES (%s, %s, %s, %s)
ON CONFLICT ON CONSTRAINT layer_pkey
DO UPDATE SET metadata = %s;
""", (key, layer['name'], dataset['name'], json.dumps(layer['metadata']), json.dumps(layer['metadata'])))
g.conn.commit()
return redirect(url_for('index'))
else:
abort(404)
@app.route('/reset')
def reset():
huey = get_huey(reset=True)
return "Reset huey"
@app.route('/')
def index():
# Fetch the metadata
cur = g.conn.cursor(cursor_factory=RealDictCursor)
cur.execute("""
SELECT
layer.key,
layer.dataset_name as dataset,
layer.name as layer,
layer.info as info,
layer.status,
layer.metadata as layer_metadata,
dataset.metadata as dataset_metadata
FROM
postgis_baselayers.layer
LEFT JOIN
postgis_baselayers.dataset
ON
layer.dataset_name=dataset.name
ORDER BY
layer.key;
""")
layers = cur.fetchall()
# Fetch processing/waiting info
cur = g.conn.cursor()
cur.execute("SELECT key FROM postgis_baselayers.layer WHERE status=3")
layers_working = [_[0] for _ in cur.fetchall()]
cur.execute("SELECT key FROM postgis_baselayers.layer WHERE status=2")
layers_waiting = [_[0] for _ in cur.fetchall()]
return render_template("index.html", **locals())
@app.route("/try-postgis-install", methods=["POST"])
def try_install_postgis():
"""
Tries to create the postgis extention.
"""
cur = g.conn.cursor()
cur.execute("CREATE EXTENSION postgis;")
g.conn.commit()
return redirect(url_for('index'))
@app.route("/install", methods=['POST'])
def install():
"""
TODO: Validate input before passing to queue, and return/flash error
otherwise.
"""
cur = g.conn.cursor()
# Valid targets are:
valid_targets = ('install', 'uninstall')
# Make a list of valid keys
cur.execute("SELECT key FROM postgis_baselayers.layer")
valid_keys = [_[0] for _ in cur.fetchall()]
for key, target in request.form.items():
if target not in valid_targets:
raise ApplicationError(f"Request contains invalid target: '{target}'")
if key not in valid_keys:
raise ApplicationError(f"Request contains invalid key: '{key}'")
try:
task = run_task(key, target)
cur.execute("UPDATE postgis_baselayers.layer SET status=2 WHERE key=%s;", (key, ))
g.conn.commit()
except:
g.conn.rollback()
raise ApplicationError("Unexpected error while creating task.")
return redirect(url_for('index'))
@app.route("/logs/")
@app.route("/logs/<task_id>/")
def logs(task_id=None):
cur = g.conn.cursor(cursor_factory=RealDictCursor)
logs = None
if not task_id:
cur.execute("""
SELECT
created, info, task_id
FROM
postgis_baselayers.log
ORDER BY
created DESC;
""")
logs = cur.fetchall()
else:
cur.execute("""
SELECT
created, info, task_id, log
FROM
postgis_baselayers.log
WHERE
task_id=%s;
""", (task_id,))
logs = cur.fetchall()
return render_template("logs.html", **locals())
@app.route("/settings/")
def settings():
cur = g.conn.cursor(cursor_factory=RealDictCursor)
cur.execute("""
SELECT
layer.key as key,
layer.dataset_name as dataset,
layer.name as layer
FROM
postgis_baselayers.layer
ORDER BY
layer.key
""")
layers = cur.fetchall()
cur.execute("SELECT PostGIS_Full_Version() AS postgis_version, version() as postgresql_version;")
version_info = cur.fetchone()
return render_template("settings.html", **locals())
@app.route("/dataset/<dataset_name>/")
def dataset(dataset_name):
"""
Show dataset status information
"""
cur = g.conn.cursor(cursor_factory=RealDictCursor)
cur.execute("""
SELECT
layer.key,
layer.dataset_name as dataset,
layer.name as layer,
layer.info as info,
layer.status,
layer.metadata as layer_metadata,
dataset.metadata as dataset_metadata
FROM
postgis_baselayers.layer
LEFT JOIN
postgis_baselayers.dataset
ON
layer.dataset_name=dataset.name
WHERE
layer.dataset_name=%s
""", (dataset_name,))
layers = cur.fetchall()
base_dir = os.path.join(app.root_path, "datasets", dataset_name)
with open(os.path.join(base_dir, "README.md")) as f:
readme = Markup(markdown.markdown(f.read()))
with open(os.path.join(base_dir, "metadata.json")) as f:
dataset = json.load(f)
return render_template("dataset.html", **locals())
@huey.task(context=True)
def run_task(key, target, task=None):
"""
Run a task
TODO: Validate key and target
"""
(dataset, layer) = key.split(".")
status = None
message = ''
# Lock the task based on the dataset name. That way no more than one
# task can be run for any dataset (like installing and uninstalling it at
# the same time, or running two installs simultaneously)
with huey.lock_task(key):
logger = logging.getLogger("task_logger")
logger.setLevel(logging.DEBUG)
logstream = StringIO()
streamhandler = logging.StreamHandler(logstream)
logger.addHandler(streamhandler)
timer = None
conn = get_db()
cur = conn.cursor()
# TODO IMPORTANT!: Validate schema name as it's not escaped here.
cur.execute(f"CREATE SCHEMA IF NOT EXISTS {dataset};")
conn.commit()
try:
temp_dir = tempfile.TemporaryDirectory(prefix='pg-baselayers-task-')
logger.info(f"Running task in temporary directory: {temp_dir.name}")
timeout = int(app.config.get('PG_BASELAYERS_MAKE_TIMEOUT'))
logger.info("Task timeout is {}s".format(timeout))
# Copy the relevant files from the application's dataset directory.
root_dir = os.path.join(app.root_path, 'datasets', dataset)
logger.info("Dataset source dir is {}".format(root_dir))
for f in os.listdir(root_dir):
file_path = os.path.join(root_dir,f)
if os.path.isfile(file_path):
shutil.copy2(file_path, temp_dir.name)
logger.info("Copied {} to {}".format(file_path, os.path.join(temp_dir.name, f)))
# Run make
makefile = os.path.join(temp_dir.name, f"{layer}.make")
cmd = ["/usr/bin/make", "-f", makefile, target]
logger.info(f"Running subprocess '{cmd}'")
# Craft some environment variables to help the subprocess. These
# should be defined already when using a Docker environment, but
# lets not assume that and just inherit them from the application
# config, which will in turn get them from the environment if they
# are defined there.
subprocess_env = {
'POSTGRES_HOST': app.config.get('POSTGRES_HOST'),
'POSTGRES_PORT': app.config.get('POSTGRES_PORT'),
'POSTGRES_DB': app.config.get('POSTGRES_DB'),
'POSTGRES_USER': app.config.get('POSTGRES_USER'),
'POSTGRES_PASSWORD': app.config.get('POSTGRES_PASSWORD'),
'POSTGRES_SSLMODE': app.config.get('POSTGRES_SSLMODE'),
'POSTGRES_SSLROOTCERT': app.config.get('POSTGRES_SSLROOTCERT'),
'POSTGRES_URI': app.config.get('POSTGRES_URI'),
'POSTGRES_OGR': app.config.get('POSTGRES_OGR')
}
process = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=subprocess_env,
cwd=temp_dir.name,
shell=False,
universal_newlines=True)
#TODO: The process.kill() can still take a while to complete. Maybe
# start two timers, one for kill after timeout seconds, and
# one using a more rigorous approach if that's possible.
def process_terminator():
#TODO: Update the status to 'Terminating...' or something.
logger.error("Timer expired, killing process... (This may also take a while to complete)")
process.kill()
timer = Timer(timeout, process_terminator)
logger.info(f"Starting {timeout}s timer within which the installation process needs to finish.")
timer.start()
for line in iter(process.stdout.readline, b''):
if line:
logger.info(line)
if line.startswith("STATUS="):
(_, info) = line.split("=", maxsplit=1)
info = (info[:500] + '(...)') if len(info) > 500 else info
cur.execute("""
UPDATE
postgis_baselayers.layer
SET info=%s
WHERE key=%s;
""", (info, key))
conn.commit()
else:
break
print("Subprocess finished, wait for returncode...")
process.wait()
print("Returncode is {}".format(process.returncode))
#TODO: Do we need a wait() and communicate() here or does
# communicate() imply wait()ing?
stdout, stderr = process.communicate()
if process.returncode < 0:
raise InstallTerminated(stderr)
if process.returncode != 0:
raise InstallFailed(stderr)
if process.returncode == 0:
message = f"Completed {target} task on {key}."
logger.info(message)
if target == 'install':
status = 1
if target == 'uninstall':
status = 0
except InstallFailed as e:
logger.error("ERROR! The installation failed with a non-zero returncode. More info: {}".format(e))
message = f"Failed {target} on {key}. (An error occurred)"
status = 4
except InstallTerminated as e:
logger.error("ERROR! The installation was terminated, most likely because it timed out. More info: {}".format(e))
message = f"Failed {target} on {key}. (Process was terminated)"
status = 4
except Exception as e:
logger.error("ERROR! The installation failed for an unknown reason. More info: {}".format(e))
message = f"Failed {target} on {key}. (Unknown reason)"
status = 4
finally:
# Destroy the temporary directory
logger.info("Cleaning temporary directory...")
temp_dir.cleanup()
# Cancel any timer
logger.info("Cancelling timer...")
if timer:
timer.cancel()
# No matter what happens, flush the log and save in log table.
logger.info("Flushing and saving logs...")
streamhandler.flush()
log_content = logstream.getvalue()
# Remove passwords from logfile
postgres_uri_safe = "postgresql://{POSTGRES_USER}:XXXXXX@{POSTGRES_HOST}:{POSTGRES_PORT}/{POSTGRES_DB}".format(**app.config)
log_content = log_content.replace(app.config.get("POSTGRES_URI"), postgres_uri_safe)
postgres_ogr_safe = 'PG:"dbname={POSTGRES_DB} host={POSTGRES_HOST} port={POSTGRES_PORT} user={POSTGRES_USER} password=<PASSWORD>"'.format(**app.config)
log_content = log_content.replace(app.config.get("POSTGRES_OGR"), postgres_ogr_safe)
cur.execute("""
INSERT INTO
postgis_baselayers.log
(layer_key, task_id, target, info, log)
VALUES
(%s, %s, %s, %s, %s)
""", (task.args[0], task.id, target, message, log_content))
conn.commit()
conn.close()
# Return the status code of the task
return status
@huey.pre_execute()
def pre_exec_hook(task):
"""
Set dataset status to '3: working' before executing task.
"""
logger.info("Pre-exec hook. Setting status to 3.")
conn = get_db()
cur = conn.cursor()
cur.execute("UPDATE postgis_baselayers.layer SET status=3, info='' WHERE key=%s;", (task.args[0],))
conn.commit()
conn.close()
logger.info("Done.")
@huey.post_execute()
def post_exec_hook(task, task_value, exc):
"""
After executing task, set the dataset status to the status code returned
by the task.
"""
logger.info(f"Post-exec hook. Setting status to {task_value}.")
logger.info(f"Post-exec hook exception: {exc}")
conn = get_db()
cur = conn.cursor()
if exc:
logger.info("Exception found. Setting task status to error.")
task_value = 4
if task_value is None:
logger.info("Task completed but did not return a value. Settings task status to error.")
task_value = 4
cur.execute("UPDATE postgis_baselayers.layer SET status=%s, info='' WHERE key=%s;", (task_value, task.args[0]))
conn.commit()
conn.close()
logger.info("Done.")
| 2.3125 | 2 |
auraliser/auralisation.py | FRidh/auraliser | 2 | 12768638 | <gh_stars>1-10
"""
The auralisation module provides tools to perform an auralisation.
The :class:`Auraliser` is a powerful interface for performing auralisations.
It allows one to define a soundscape consisting of several sources and
receivers and performing auralisations at the receivers due to the specified sources.
The `class:`Auralisar` supports several propagation effects:
- **geometrical spreading** resulting in a decrease in sound pressure with increase in source-receiver distance
- **Doppler shift** due to relative motion between the moving aircraft and the non-moving receiver
- **atmospheric absorption** due to relaxation processes
- **reflections** at the ground and facades due to a sudden change in impedance
- **modulations and decorrelation** due to fluctuations caused by atmospheric turbulence
"""
import abc
import acoustics
import auraliser
import collections
import geometry
import ism
import itertools
import math
import numpy as np
import weakref
from numtraits import NumericalTrait
from acoustics import Signal
from acoustics.signal import convolve
# To render the geometry
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from mpl_toolkits.mplot3d.art3d import Poly3DCollection
from multipledispatch import dispatch
import streaming
from streaming.stream import Stream, BlockStream, repeat_each
from streaming.signal import constant
import cytoolz
import copy
import auraliser.tools
logger = auraliser.tools.create_logger(__name__)
@dispatch(object, object)
def unequality(a, b):
if a is None and b is None:
return False
elif a is None or b is None:
return True
elif a.__class__ != b.__class__:
return True
else:
return unequality(a.__dict__, b.__dict__)
#@dispatch(object, None)
#def unequality(a, b):
#return True
#@dispatch(None, None)
#def unequality(a, b):
#return False
@dispatch(int, int)
def unequality(a, b):
return a!=b
@dispatch(float, float)
def unequality(a, b):
return a!=b
@dispatch(str, str)
def unequality(a, b):
return a!=b
@dispatch(object, np.ndarray)
def unequality(a, b):
return True
@dispatch(np.ndarray, np.ndarray)
def unequality(a, b):
return not np.allclose(a, b) # We use all(), not any()
@dispatch(dict, dict)
def unequality(a, b):
try:
return a!=b
except ValueError:
if set(a.keys()) != set(b.keys()):
return True
else:
for key, value in a.items():
if unequality(b[key], value):
return True
else:
return False
def equality(a, b):
return not unequality(a, b)
#def _equalityity_with_arrays(a, b):
#"""Test equalityity between two objects.
#"""
#if self.__class__ == other.__class__:
#for key, value in self.__dict__:
#if key not in other:
#return False
#equality = other[key] != value
##try:
#unequality = not equality
#print(equality)
##except ValueError:
##print(equality)
##unequality = not np.all(equality)
#if unequality:
#return False
#else:
#return True
#return False
def recursive_mapping_update(d, u):
"""Recursively update a mapping/dict.
:param d: Target mapping.
:param u: Source mapping.
"""
if u is not None:
for k, v in u.items():
if isinstance(v, collections.Mapping):
r = recursive_mapping_update(d.get(k, {}), v)
d[k] = r
else:
d[k] = u[k]
return d
def position_for_directivity(mirror):
"""
Get the position to be used for the directivity.
Directivity is given by vector from the first order to the original source after.
However, this directivity is mirrored to
"""
m = mirror
while m.order > 1:
m = getattr(m, 'mother')
return m.position
#class PositionDescriptor(object):
#"""Descriptor that enforces lists of geometry.Points.
#"""
#def __init__(self):
#pass
#def __get__(self, instance, owner):
## we get here when someone calls x.d, and d is a NonNegative instance
## instance = x
## owner = type(x)
#return self.data.get(instance, self.default)
#def __set__(self, instance, value):
## we get here when someone calls x.d = val, and d is a NonNegative instance
## instance = x
## value = val
#if value < 0:
#raise ValueError("Negative value not allowed: %s" % value)
#self.data[instance] = value
class PositionDescriptor(object):
"""Descriptor that enforces positions.
"""
def __init__(self, attr):
self.attr = attr
self.default = (0.0, 0.0, 0.0)
def __get__(self, instance, owner):
pos = instance.__dict__.get(self.attr, self.default)
pos = np.asarray(pos)
if pos.shape==(1,3):
pos = np.tile(pos, (instance._auraliser.samples, 1))
#pos = pos[instance._auraliser.samples, :]
return pos
def __set__(self, instance, value):
if value is None:
value = self.default
if isinstance(value, geometry.Point):# or isinstance(value, geometry.Vector):
instance.__dict__[self.attr] = np.array(value)[None,:]
elif isinstance(value, tuple):
if len(value)==3:
instance.__dict__[self.attr] = np.array(value)[None,:]
else:
raise ValueError("Tuple of wrong size.")
elif isinstance(value, np.ndarray):
if value.ndim!=2:
raise ValueError("Array has wrong amount of dimensions.")
elif value.shape[-1]!= 3:
raise ValueError("Array should have three columns.")
else:
instance.__dict__[self.attr] = np.asarray(value)
else:
raise ValueError("Cannot set value, invalid type '{}'.".format(type(value)))
class Auraliser(object):
"""
An auraliser object contains the model for simulating how :attr:`sources` sound at :attr:`receivers` for a given :attr:`atmosphere`.
"""
def __new__(cls, *args, **kwargs):
obj = super().__new__(cls)
obj._objects = list()
"""List with all objects in model.
"""
return obj
def __init__(self, duration, atmosphere=None, geometry=None, settings=None):
"""
Constructor.
:param duration: Duration of the signal to auralise given in seconds.
:param sample_frequency: Sample frequency
:param atmosphere: Atmosphere
:param geometry: Geometry
:param settings: Dictionary with configuration.
"""
self.settings = get_default_settings()
"""
Configuration of this auraliser.
"""
if settings:
recursive_mapping_update(self.settings, settings)
self.atmosphere = atmosphere if atmosphere else acoustics.atmosphere.Atmosphere()
"""
Atmosphere described by :class:`Auraliser.Atmosphere.Atmosphere`.
"""
self.duration = duration
"""
Duration of auralisation.
"""
#self.sample_frequency = sample_frequency
#"""Sample frequency.
#"""
self.geometry = geometry if geometry else Geometry()
"""
Geometry of the model described by :class:`Geometry`.
"""
def __eq__(self, other):
return equality(self, other)
#return (self.__dict__ == other.__dict__) and (self.__class__ == other.__class__)
def __del__(self):
del self._objects[:]
@property
def sample_frequency(self):
return self.settings['fs']
def _get_real_object(self, name):
"""Get real object by name.
:param name: Name of `object`.
:returns: Real `object`.
"""
name = name if isinstance(name, str) else name.name
for obj in self._objects:
if name == obj.name:
return obj
else:
raise ValueError("Cannot retrieve object. Unknown name {}. ".format(name))
def get_object(self, name):
"""Get object by name.
:param name: Name of `object` or (proxy of) the real object.
:returns: Proxy to `object`.
"""
return weakref.proxy(self._get_real_object(name))
def remove_object(self, name):
"""Delete object from model."""
name = name if isinstance(name, str) else name.name
for obj in self._objects:
if name == obj.name:
logger.debug('Removing object with name "{}"'.format(name))
self._objects.remove(obj)
def remove_objects(self):
"""Delete all objects from model."""
logger.debug('Removing all objects from model.')
del self._objects[:]
def _add_object(self, name, model, *args, **kwargs):
"""Add object to model."""
logger.debug('Adding object with name "{}" to model.'.format(name))
obj = model(weakref.proxy(self), name, *args, **kwargs) # Add hidden hard reference
self._objects.append(obj)
return self.get_object(obj.name)
def add_source(self, name, position):#)*args, **kwargs):
"""Add source to auraliser.
:param name: Name of source.
:param position: Position of source.
"""
return self._add_object(name, Source, position)#*args, **kwargs)
def add_receiver(self, name, position):#*args, **kwargs):
"""Add receiver to auraliser.
:param name: Name of receiver
:param position: Position of receiver.
"""
return self._add_object(name, Receiver, position=position)#*args, **kwargs)
def update_settings(self, settings):
"""Update :attr:`settings` with `settings` in-place recursively using :func:`recursive_mapping_update`.
:param settings: New settings to use.
.. note:: If you want to assign a dictionary with new settings, replacing all old settings, you should just reassign :meth:`settings`.
"""
self.settings = recursive_mapping_update(self.settings, settings)
@property
def time(self):
"""Time vector.
"""
return np.arange(0.0, samples)/ fs
@property
def samples(self):
"""Amount of samples.
"""
return int(np.round(self.duration * self.sample_frequency))
@property
def objects(self):
"""Objects.
"""
yield from (self.get_object(obj.name) for obj in self._objects)
@property
def sources(self):
"""Sources.
"""
yield from (obj for obj in self.objects if isinstance(obj, Source) )
@property
def receivers(self):
"""Receivers.
"""
yield from (obj for obj in self.objects if isinstance(obj, Receiver) )
@property
def subsources(self):
"""Subsources.
"""
yield from (obj for obj in self.objects if isinstance(obj, Subsource) )
@property
def virtualsources(self):
"""Virtual sources.
"""
yield from (obj for obj in self.objects if isinstance(obj, Virtualsource) )
def can_auralise(self):
"""Test whether all sufficient information is available to perform an auralisation.
"""
if not self.sources:
raise ValueError('No sources available')
if not self.receivers:
raise ValueError('No receivers available')
if not self.atmosphere:
raise ValueError('No atmosphere available.')
if not self.geometry:
raise ValueError('No geometry available.')
return True
#def _get_mirror_sources_from_ism(self, source, receiver):
#"""Determine the mirror sources for the given source and receiver.
#:param source: Source position
#:param receiver: Receiver position
#Use the image source method to determine the mirror sources.
#Return only the amount of mirrors as specified in the settings.
#.. note:: Mirror receivers are calculated instead of mirror sources.
#"""
#logger.info("_get_mirror_sources_from_ism: Determining mirrors sources.")
#model = ism.Model(self.geometry.walls, source=receiver, receiver=source, max_order=self.settings['reflections']['order_threshold'])
#mirrors = model.determine(strongest=self.settings['reflections']['mirrors_threshold'])
#yield from mirrors
@staticmethod
def _auralise_subsource(subsource, receiver, settings, geometry, atmosphere):
"""Synthesize the signal of a subsource.
:returns: Generator that yields for each subsource a tuple consisting of the immission signal and immission vector.
We check whether reflections are included or not.
"""
# Generate the emission signals.
logger.info("_auralise_subsource: Generating subsource emission signals.")
subsource.generate_signals()
nblock = settings['nblock']
# Make sure the positions are streams
subsource_position = Stream(iter(subsource.position)).blocks(nblock)
receiver_position = Stream(iter(receiver.position)).blocks(nblock)
# Determine mirrors
logger.info("_auralise_subsource: Determine mirrors.")
if settings['reflections']['include'] and len(geometry.walls) > 0: # Are reflections possible?
logger.info("_auralise_subsource: Searching for mirrors. Reflections are enabled, and we have walls.")
#resolution = settings['reflections']['update_resolution']
mirrors = _ism_mirrors(subsource_position, receiver_position, subsource.signal, geometry.walls, settings)
else: # No walls, so no reflections. Therefore the only source is the real source.
logger.info("_auralise_subsource: Not searching for mirror sources. Either reflections are disabled or there are no walls.")
#emission = subsource.signal( unit_vector(receiver.position - subsource.position)) # Use non-Stream here for now...
emission = subsource.signal(unit_vector_stream(receiver_position.copy() - subsource_position.copy()))
# Final sources
mirrors = [ Mirror(subsource_position, receiver_position, emission) ]
# Yield contribution of each mirror source.
for mirror in mirrors:
#print(mirror.receiver_position.peek())
signal = _apply_propagation_effects(mirror.source_position.copy(), mirror.receiver_position.copy(), mirror.emission, settings, settings['fs'], atmosphere)
# Orientation is the unit vector from source to receiver
orientation = unit_vector_stream(mirror.source_position.copy() - mirror.receiver_position.copy())
yield signal, orientation
@staticmethod
def _auralise_source(source, receiver, settings, geometry, atmosphere):
"""Synthesize the signal at `receiver` due to `source`. This includes all subsources and respective mirror sources.
:param source: Source.
:type source: :class:`Source`
:param receiver: Receiver.
:type receiver: :class:`Receiver`
:param settings: Settings
:type settings: :func:`dict`
:param geometry: Geometry
:type geometry: :class:`Geometry`
:param atmosphere: Atmosphere
:type atmosphere: :class:`acoustics.atmosphere.Atmosphere`
:returns: Generator that yields tuples consisting of immission signal and immission vector.
:rtype: Generator
"""
logger.info("_auralise_source: Auralising source {}".format(source.name))
for subsource in source.subsources:
signals_and_orientations = Auraliser._auralise_subsource(subsource, receiver, settings, geometry, atmosphere)
yield from signals_and_orientations
logger.info("_auralise_source: Finished auralising source {}".format(source.name))
def auralise(self, receiver, sources=None):
"""Synthesise the signal due to one or multiple sources at `receiver`. All subsources are included.
:param receiver: Receiver.
:param sources: Iterable of sources.
"""
receiver = self.get_object(receiver)
logger.info("auralise: Auralising at {}".format(receiver.name))
if self.can_auralise():
logger.info("auralise: Can auralise.")
sources = sources if sources else self.sources
sources = (self.get_object(source) for source in sources)
# We don't want to be able to update the settings during an auralization
settings = copy.deepcopy(self.settings)
geometry = copy.deepcopy(self.geometry)
atmosphere = copy.deepcopy(self.atmosphere)
for source in sources:
yield from Auraliser._auralise_source(source, receiver, settings, geometry, atmosphere)
def plot(self, **kwargs):
"""Plot model.
:seealso: :func:`plot_model`.
"""
return plot_model(self, **kwargs)
def unit_vector_stream(stream):
"""Calculate unit vector for each element in stream.
:type stream: BlockStream
"""
return stream.map(lambda x: x / np.linalg.norm(x, axis=-1)[...,None])
##def _apply_mirror_source_strength(emission, effective, strength, force_hard, ntaps):
##"""Apply mirror source strength"""
##if effective is not None:
##emission = emission * effective
###if force_hard or np.all(strength == 1.0):
##if force_hard:
##logger.info("_apply_source_effects: Hard ground.")
##else:
### Apply mirror source strength. Only necessary when we have a soft surface.
##logger.info("_apply_source_effects: Soft ground.")
###impulse_responses = map(auraliser.propagation.impulse_response, strength)
##impulse_responses = strength.map(auraliser.propagation.impulse_response)
##emission = convolve(emission, impulse_responses, nblock)
###emission = emission.samples().drop(int(ntaps//2))
##return emission
def _ism_get_mirror_sources(source, receiver, walls, order_threshold, mirrors_threshold):
"""Determine the mirror sources for the given source and receiver.
:param source: Source position
:param receiver: Receiver position
Use the image source method to determine the mirror sources.
Return only the amount of mirrors as specified in the settings.
.. note:: Mirror receivers are calculated instead of mirror sources.
"""
logger.info("_ism_get_mirror_sources: Determining mirror sources.")
model = ism.Model(walls=walls, source=source, receiver=receiver, max_order=order_threshold)
mirrors = model.determine(strongest=mirrors_threshold)
yield from mirrors
def _ism_mirrors(subsource_position, receiver_position, emission, walls, settings):
"""Determine mirror sources and their emissions.
:param subsource_position: Position of the subsource.
:type subsource_position: :class:`Stream`
:param receiver_position: Position of the subsource.
:type receiver_position: :class:`Stream`
:param emission: Emission generator.
:param settings: Settings.
:type settings: :func:`dict`
:returns: Yields mirror sources with emission signals.
"""
logger.info("_ism_mirrors: Determining mirror sources and their emissions.")
resolution = settings['reflections']['nhop']
nblock = settings['nblock']
subsource_position_resolution = subsource_position.copy().blocks(resolution)
subsource_position = subsource_position.blocks(nblock)
# Obtain mirrors from Image Source Method.
# Determine mirror sources every `n` samples. We pick the first sample of each block.
_subsource_position = list(subsource_position_resolution.copy().map(lambda x: geometry.Point(*x[0])))
_receiver_position = [ geometry.Point(*receiver_position.copy().samples().peek()) ]
mirrors = _ism_get_mirror_sources(source=_receiver_position,
receiver=_subsource_position,
walls=walls,
order_threshold=settings['reflections']['order_threshold'],
mirrors_threshold=settings['reflections']['mirrors_threshold'])
for mirror in mirrors:
# To determine the directivity correction we need to know the orientation from mirror receiver to source.
# What matters however is not the position of the mirror, but the position of the first order mirror.
# Since we consider a non-moving receiver, the position is a constant. See also above, _receiver_position.
receiver_position_directivity = position_for_directivity(mirror)
# Emission given a vector pointing from receiver to source.
orientation_directivity = unit_vector_stream(receiver_position_directivity - subsource_position.copy())
signal = emission(orientation_directivity)
# We now have an emission signal.
# Now we need to effectiveness and strength.
# These values are determined at a certain resolution
# Single value per hop indicating whether the source was effective or not.
effective = Stream(iter(mirror.effective))
# Spectrum per hop.
strength = Stream(iter(mirror.strength))
# Signal after applying mirror source strength and effectiveness
signal = auraliser.realtime.apply_reflection_strength(signal, resolution, strength, effective, settings['reflections']['ntaps'], settings['reflections']['force_hard'])
mirror_receiver_position = constant(mirror.position)
yield Mirror(subsource_position.copy().blocks(nblock), mirror_receiver_position.blocks(nblock), signal.blocks(nblock))
def _split_iterators(iterator, n=None):
"""Split itererator of tuples into multiple iterators.
:param iterator: Iterator to be split.
:param n: Amount of iterators it will be split in. toolz.peak can be used to determine this value, but that is not lazy.
This is basically the same as x, y, z = zip(*a), however,
this function is lazy.
"""
#if n is None:
# item, iterator = cytoolz.peek(iterator)
# n = len(item)
iterators = itertools.tee(iterator, n)
#iterators = ((sample[i] for sample in iterator) for i, iterator in enumerate(iterators))
# Above does not work?!
out = list()
out.append(s[0] for s in iterators[0])
out.append(s[1] for s in iterators[1])
out.append(s[2] for s in iterators[2])
iterators = out
return iterators
def _stream_split_iterators(stream, n=None):
"""Split stream of samples.
"""
iterators = list(map(Stream, _split_iterators(iter(stream.samples()), n)))
return iterators
def _resample_vectors(signal, delay, fs, n=None):
"""Resample stream of vectors."""
signal = signal.samples()
if n is None:
n = signal.peek()
signal = _stream_split_iterators(signal, n)
apply_delay = auraliser.realtime.apply_doppler
signal = Stream(map(np.array, zip(*[apply_delay(s, d, fs) for s, d in zip(signal, delay.tee(n))])))
return signal
def _apply_propagation_effects(source, receiver, signal, settings, fs, atmosphere):
"""Apply the propagation filters for this specific source-mirror-receiver combination.
:param source: Source position
:type source: :class:`Stream` of :class:`Point`
:param receiver: Receiver position
:type receiver: :class:`Stream` of :class:`Point`
:param signal: Initial signal
:type signal: :class:`Stream` or :class:`BlockStream`
:param settings: Settings
:type settings: :func:`dict`
:param fs: Sample frequency
:param atmosphere: Atmosphere
:type atmosphere: :class:`acoustics.atmosphere.Atmosphere`
:returns: Single-channel signal.
:rtype: :class:`Stream` or :class:`BlockStream`
Propagation effects included are:
- Amplitude decrease due to spherical spreading (:func:`auraliser.realtime.apply_spreading`).
- Delay due to spherical spreading (Doppler shift).
- Atmospheric turbulence.
- Atmospheric attenuation.
"""
logger.info("_apply_propagation_effects: Auralising mirror")
nblock = settings['nblock']
source = source.blocks(nblock)
receiver = receiver.blocks(nblock)
## Source velocity vector
##velocity = diff(source.copy()).blocks(nblock) * fs # streaming.signal.diff works only on scalars!
#velocity = source.copy().map(lambda x: np.diff(x, axis=0)) * fs
## Source speed
#speed = velocity.copy().map(lambda x: np.linalg.norm(x, axis=-1))
# Apply delay due to spreading (Doppler shift)
if settings['doppler']['include'] and settings['doppler']['frequency']:
logger.info("_apply_propagation_effects: Applying Doppler frequency shift.")
# In order to compute the propagation delay, we need to the distance.
# For that, we need the source and receiver positions.
# Distance vector pointing from receiver to source.
distance_vector = source.copy() - receiver.copy()
# Norm of distance vector pointing from receiver to source.
distance = distance_vector.copy().map(lambda x: np.linalg.norm(x, axis=-1))
# Resampled signal
signal = auraliser.realtime.apply_doppler(signal=signal,
delay=distance.copy()/atmosphere.soundspeed,
fs=fs)
# Due to the Doppler shift we also need to resample the source position.
# and update the dependent quantities.
# Warp the source position. Position has n=3 dimensions.
source = _resample_vectors(source, delay=distance.copy()/atmosphere.soundspeed, fs=fs, n=3)
#print(source.peek())
source = source.blocks(nblock)
#print(source.peek())
# Geometrical quantities that now use the right time-axis.
# Distance vector pointing from receiver to source.
distance_vector = source.copy() - receiver.copy()
# Norm of distance vector pointing from receiver to source.
distance = distance_vector.copy().map(lambda x: np.linalg.norm(x, axis=-1))
# Source velocity vector
#velocity = diff(source.copy()).blocks(nblock) * fs # streaming.signal.diff works only on scalars!
velocity = source.copy().map(lambda x: np.diff(x, axis=0)) * fs
# Source speed
speed = velocity.copy().map(lambda x: np.linalg.norm(x, axis=-1))
# Apply spherical spreading.
if settings['spreading']['include']:
logger.info("_apply_propagation_effects: Applying spherical spreading.")
signal = auraliser.realtime.apply_spherical_spreading(signal.blocks(nblock), distance.copy().blocks(nblock))
# Apply atmospheric turbulence
if settings['turbulence']['include']:
logger.info("_apply_propagation_effects: Applying turbulence.")
# We first need to compute the transverse speed
# We also only want to have a single value per block.
# Therefore we pick the first values.
nhop = settings['turbulence']['nhop']
_distance = Stream(distance.copy().blocks(nhop).map(cytoolz.first))
_velocity = Stream(velocity.copy().blocks(nhop).map(cytoolz.first))
_orientation = unit_vector_stream(Stream(distance_vector.copy().blocks(nhop).map(cytoolz.first)))
#_orientation = Stream(_orientation_sr.copy().blocks(nhop).map(lambda x: x[0]))
_transverse_speed = Stream(iter(map(auraliser.realtime.transverse_speed, _velocity, _orientation)))
state = np.random.RandomState(seed=settings['turbulence']['seed'])
signal = auraliser.realtime.apply_turbulence(signal=signal,
fs=fs,
nhop=nhop,
correlation_length=settings['turbulence']['correlation_length'],
speed=_transverse_speed.copy(),
distance=_distance.copy(),
soundspeed=atmosphere.soundspeed,
mean_mu_squared=settings['turbulence']['mean_mu_squared'],
fmin=settings['turbulence']['fs_minimum'],
ntaps_corr=settings['turbulence']['ntaps_corr'],
ntaps_spectra=settings['turbulence']['ntaps_spectra'],
window=settings['turbulence']['window'],
include_saturation=settings['turbulence']['saturation'],
state=state,
include_amplitude=settings['turbulence']['amplitude'],
include_phase=settings['turbulence']['phase'],
)
del _distance, _velocity, _orientation, _transverse_speed, state
# Apply atmospheric absorption.
if settings['atmospheric_absorption']['include']:
logger.info("_apply_propagation_effects: Applying atmospheric absorption.")
signal = auraliser.realtime.apply_atmospheric_attenuation(
signal=signal,
fs=fs,
distance=distance.copy(),
nhop=settings['atmospheric_absorption']['nhop'],
atmosphere=atmosphere,
ntaps=settings['atmospheric_absorption']['ntaps'],
inverse=False,
)
# Force zeros until first real sample arrives. Should only be done when the time delay (Doppler) is applied.
# But force to zero why...? The responsible function should make it zero.
if settings['doppler']['include'] and settings['doppler']['frequency'] and settings['doppler']['purge_zeros']:
logger.info("_apply_propagation_effects: Purging zeros.")
initial_distance = distance.copy().samples().peek()
# Initial delay in samples
initial_delay = int(math.ceil(initial_distance/atmosphere.soundspeed * fs))
signal = signal.samples().drop(initial_delay)
del initial_distance, initial_delay
# Clear memory to prevent memory leaks
del source, receiver, distance_vector, distance, velocity, speed
return signal
class Base(object):
"""
Base class
"""
def __init__(self, auraliser, name, description=None):
"""
Constructor of base class.
"""
self._auraliser = auraliser
"""Auraliser.
"""
self.name = name
"""Name of this object.
"""
self.description = description
"""Description of object.
"""
def __del__(self):
del self._auraliser
def __str__(self):
return "({})".format(self.name)
def __repr__(self):
return "{}({})".format(self.__class__.__name__, str(self))
def __eq__(self, other):
return equality(self, other)
@property
@abc.abstractmethod
def position(self):
"""Position
"""
def plot_position(self, interval=1):
"""Plot position.
"""
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.plot(*self.position[::interval].T)
ax.set_xlabel(r'$x$ in m')
ax.set_ylabel(r'$y$ in m')
ax.set_zlabel(r'$z$ in m')
return fig
class Receiver(Base):
"""Receiver
"""
position = PositionDescriptor('position')
"""Position of object.
"""
def __init__(self, auraliser, name, position):
"""Constructor."""
super().__init__(auraliser, name=name)
self.position = position
def auralise(self, sources=None):
"""Auralise the scene at receiver location.
:param sources: List of sources to include. By default all the sources in the scene are included.
"""
return self._auraliser.auralise(self, sources)
class Source(Base):
"""
Class for modelling a source.
"""
position = PositionDescriptor('position')
"""Position of object.
"""
def __init__(self, auraliser, name, position):
"""Constructor.
"""
super().__init__(auraliser, name)#, description=description)
self.position = position
@property
def subsources(self):
"""Subsources.
"""
yield from (obj for obj in self._auraliser.subsources if obj.source.name==self.name)
def get_subsource(self, name):
"""Get object by name.
"""
return self._auraliser.get_object(name)
def add_subsource(self, name):
"""Add subsource to auraliser.
"""
return self._auraliser._add_object(name, Subsource, self.name)#, **kwargs)
def remove_subsource(self, name):
"""Remove subsource.
"""
self._auraliser.remove_object(name)
class Subsource(Base):
"""
Class for modelling a subsource. A subsource is a component of source having a different position.
"""
position_relative = PositionDescriptor('position_relative')
"""Position of object.
"""
def __init__(self, auraliser, name, source, position_relative=None):
"""Constructor.
"""
super().__init__(auraliser, name)
self.source = source
self.position_relative = position_relative# if position_relative is not None else np.arraygeometry.Vector(0.0, 0.0, 0.0)#geometry.Pointgeometry.Vector()
_source = None
#_position_relative = None
@property
def position(self):
"""Absolute position of subsource.
"""
return self.source.position + self.position_relative
@property
def source(self):
"""Source.
"""
return self._auraliser.get_object(self._source)
@source.setter
def source(self, x):
x = self._auraliser.get_object(x)
self._source = x.name
@property
def virtualsources(self):
"""Generator returning the virtual sources.
"""
yield from (obj for obj in self._auraliser.virtualsources if obj.subsource.name==self.name)
def generate_signals(self):
"""Generate the signal.
"""
for src in self.virtualsources:
src.generate_signal()
def get_virtualsource(self, name):
"""Get object by name.
"""
return self._auraliser.get_object(name)
def add_virtualsource(self, name, **kwargs):
"""Add virtualsource to auraliser.
"""
return self._auraliser._add_object(name, Virtualsource, self.name, **kwargs)
#obj = Virtualsource(self, name=name)
#self._virtualsources.append(obj)
#return self.get_virtualsource(obj.name)
def remove_virtualsource(self, name):
"""Remove a virtual source from this source.
"""
self._auraliser.remove_object(name)
def signal(self, orientation):
"""Return the signal of this subsource as function of SubSource - Receiver orientation.
:param orientation: Orientation.
The signal is the sum of all VirtualSources corrected for directivity.
"""
#return np.array([src.emission(orientation) for src in self.virtualsources]).sum(axis=0)
#signal = 0.0
#for src in self.virtualsources:
#signal += src.emission(orientation)
#return signal
#print(orientation)
return sum((src.emission(orientation.copy()) for src in self.virtualsources))
class Virtualsource(Base):
"""
Class for modelling specific spectral components within a :class:`Auraliser.SubSource` that have the same directivity.
"""
def __init__(self, auraliser, name, subsource, signal, rotation=None, directivity=None, level=94.0, multipole=0):
"""Constructor.
"""
super().__init__(auraliser, name)
self.subsource = subsource
self.signal = signal
self.directivity = directivity if directivity else acoustics.directivity.Omni()
"""Directivity of the signal.
"""
#self.modulation = modulation
"""
Amplitude modulation of the signal in Hz.
"""
self.level = level
"""Level of the signal in decibel.
"""
self._signal_generated = None
"""Generated signal
This value is generated and stored at the beginning of an auralization.
Includes gain, but not yet directivity!
"""
self.multipole = multipole
"""Multipole order.
Valid values are 0 for a monopole, 1 for a dipole and 2 for a quadrupole.
"""
_subsource = None
@property
def position(self):
"""Position.
"""
return self._subsource.position
@property
def subsource(self):
"""Subsource.
"""
return self._auraliser.get_object(self._subsource)
@subsource.setter
def subsource(self, x):
x = self._auraliser.get_object(x)
self._subsource = x.name
def generate_signal(self):
t = self.subsource.source._auraliser.duration
fs = self.subsource.source._auraliser.sample_frequency
self._signal_generated = Signal(self.signal.output(t, fs), fs).calibrate_to(self.level)
def emission(self, orientation):
"""The signal this :class:`Virtualsource` emits as function of orientation.
:param orientation: A vector of cartesian coordinates.
"""
#signal = self._signal_generated * self.directivity.using_cartesian(orientation[:,0], orientation[:,1], orientation[:,2])
#if self._auraliser.settings['doppler']['include'] and self._auraliser.settings['doppler']['amplitude']: # Apply change in amplitude.
#mach = np.gradient(self.subsource.position)[0] * self._auraliser.sample_frequency / self._auraliser.atmosphere.soundspeed
#signal = auraliser.propagation.apply_doppler_amplitude_using_vectors(signal, mach, orientation, self.multipole)
#return signal
settings = self._auraliser.settings
nblock = settings['nblock']
signal = Stream(self._signal_generated).blocks(nblock=nblock)
orientation = orientation.blocks(nblock)
directivity = orientation.copy().map(lambda x: self.directivity.using_cartesian(*(x.T)).T)
# Signal corrected with directivity
signal = signal * directivity
if settings['doppler']['include'] and settings['doppler']['amplitude']:
mach = np.gradient(self.subsource.position)[0] * self._auraliser.sample_frequency / self._auraliser.atmosphere.soundspeed
mach = Stream(mach).blocks(nblock)
signal = BlockStream((auraliser.propagation.apply_doppler_amplitude_using_vectors(s, m, o, self.multipole) for s, m, o in zip(signal, mach, orientation)), nblock)
return signal
#_Mirror = collections.namedtuple('Mirror', ['source_position', 'receiver_position',
#'emission', 'settings', 'samples',
#'sample_frequency', 'atmosphere'])
Mirror = collections.namedtuple('Mirror', ['source_position', 'receiver_position', 'emission'])
"""Mirror container.
"""
def get_default_settings():
d = dict()
d = recursive_mapping_update(d, _DEFAULT_SETTINGS)
return d
_DEFAULT_SETTINGS = {
'nblock' : 8192, # Default blocksize
'fs' : 44100, # Default sample frequency
'reflections':{
'include' : True, # Include reflections
'mirrors_threshold' : 2, # Maximum amount of mirrors to include
'order_threshold' : 3, # Maximum order of reflections
'nhop' : 256, # Update effectiveness every N samples.
'ntaps' : 4096, # Amount of filter taps for ifft mirror strength.
'force_hard' : False, # Force hard reflections.
},
'doppler':{
'include' : True, # Include Doppler shift
'frequency' : True, # Include the frequency shift.
'amplitude' : True, # Include the change in intensity due to convective amplification
'purge_zeros' : False, # Purge the (initial) zeros due to the delay in arrival.
'interpolation' : 'linear', # Lanczos interpolation
'kernelsize' : 10,
},
'spreading':{
'include' : True, # Include spherical spreading
},
'atmospheric_absorption':{
'nhop' : 256,
'include' : True, # Include atmospheric absorption
'ntaps' : 4096, # Amount of filter taps to use for ifft
#'unique_distances' : 100, # Calculate the atmospheric for N amount unique distances.
},
'turbulence':{
'include' : False, # Include modulations and decorrelation due to atmospheric turbulence.
'mean_mu_squared' : 3.0e-7,
'correlation_length': 20.0,
'fs_minimum' : 100., # Sample frequency at which to compute the fluctuations.
'saturation' : True, # Include log-amplitude saturation
'amplitude' : True, # Amplitude modulations
'phase' : True, # Phase modulations
'seed' : 100, # By setting this value to an integer, the 'random' values will be similar for each auralisation.
#'state' : np.random.RandomState(), # Use same state for all propagation paths
'ntaps_corr' : 8192,
'ntaps_spectra' : 512,
'nhop' : 128,
'window' : None,
},
'plot':{
'general':{
'linewidth' : 2.0,
'markersize' : 10.0,
'marker' : '+',
'interval' : 1000,
},
'sources':{
'include' : True,
'color' : 'r',
},
'subsources':{
'include' : False,
'color' : 'y',
},
'receivers':{
'include' : True,
'color' : 'b',
},
'subreceivers':{
'include' : False,
'color' : 'g',
},
'walls':{
'include' : True,
'alpha' : 0.5, # Transparancy of walls.
'normal' : True, # Show normal vectors.
'color' : 'b',
},
},
}
"""
Default settings of :class:`Auraliser'.
All possible settings are included.
"""
class Geometry(object):
"""
Class describing the geometry of the model.
"""
def __init__(self, walls=None):
self.walls = walls if walls else list()
"""List of walls or faces.
"""
def __eq__(self, other):
return equality(self, other)
#return (self.__dict__ == other.__dict__) and (self.__class__ == other.__class__)
def render(self):
"""Render the geometry.
"""
return render_geometry(self)
@classmethod
def ground(cls, nbins=get_default_settings()['reflections']['ntaps']):
corners = [geometry.Point(-1e5, -1e5, 0.0),
geometry.Point(+1e5, -1e5, 0.0),
geometry.Point(+1e5, +1e5, 0.0),
geometry.Point(-1e5, +1e5, 0.0)
]
center = geometry.Point(0.0, 0.0, 0.0)
impedance = np.zeros(nbins, dtype='complex128')
g = ism.Wall(corners, center, impedance)
return cls(walls=[g])
#def render_geometry(geometry):
#"""
#Render geometry.
#"""
#fig = plt.figure()
#ax = fig.add_subplot(111, projection='3d')
#_render_geometry(geometry, ax)
#return fig
def _render_geometry(geometry, ax):
polygons = Poly3DCollection( [wall.points for wall in geometry.walls] )
#polygons.set_color(colors.rgb2hex(sp.rand(3)))
#polygons.tri.set_edgecolor('k')
ax.add_collection3d( polygons )
#ax.relim() # Does not support Collections!!! So we have to manually set the view limits...
#ax.autoscale()#_view()
#coordinates = np.array( [wall.points for wall in geometry.walls] ).reshape((-1,3))
#minimum = coordinates.min(axis=0)
#maximum = coordinates.max(axis=0)
#ax.set_xlim(minimum[0], maximum[0])
#ax.set_ylim(minimum[1], maximum[1])
#ax.set_zlim(minimum[2], maximum[2])
return ax
def plot_model(model, **kwargs):
"""Plot model geometry.
"""
settings = model.settings['plot']
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
# Transducers
transducers = ['sources', 'subsources', 'receivers']#, 'subreceivers']
def _get_plot_items(obj, attr):
return ({'name':item.name, 'position':item.position} for item in getattr(obj, attr))
data = {sort: _get_plot_items(model, sort) for sort in transducers}
for sort in transducers:
if settings[sort]['include']:
for item in data[sort]:
ax.plot(*item['position'][::settings['general']['interval']].T,
label=item['name'],
color=settings[sort]['color'],
marker=settings['general']['marker'],
markersize=settings['general']['markersize']
)
# Walls
ax = _render_geometry(model.geometry, ax)
ax.set_title('Overview')
ax.set_xlabel(r'$x$ in m')
ax.set_ylabel(r'$y$ in m')
ax.set_zlabel(r'$z$ in m')
#ax.relim()
## Sources
#for src in model.sources:
#if sources:
#ax.plot(*src.position[::interval].T, label=src.name, color='r', marker='+', markersize=markersize, linewidth=linewidth)
#if subsources:
#for subsrc in src.subsources:
#ax.plot(*subsrc.position[::interval].T, label=subsrc.name, color='y', markersize=markersize, linewidth=linewidth)
## Receivers
#for rcv in model.receivers:
#if receivers:
#ax.plot(*rcv.position[::interval].T, label=rcv.name, color='r', markersize=markersize, linewidth=linewidth)
#if subreceivers:
#for subsrc in rcv.subreceivers:
#ax.plot(*subrcv.position[::interval].T, label=subrcv.name, color='y', markersize=markersize, linewidth=linewidth)
# Walls
return fig
| 2.53125 | 3 |
python/src/permute.py | xwkuang5/code-fragments | 0 | 12768639 | <filename>python/src/permute.py<gh_stars>0
def permute(obj_list, l, r, level):
"""Helper function to implement the nAr permutation operation
Arguments:
obj_list -- the list of objects from which the permutation should be generated
l -- left end point of current permutation
r -- right end point (exclusive) of current permutation
level -- used to stop the recursion prematruely according to r
"""
if level == 0:
print(obj_list[:l])
else:
for i in range(l, r):
obj_list[l], obj_list[i] = obj_list[i], obj_list[l]
permute(obj_list, l + 1, r, level - 1)
obj_list[l], obj_list[i] = obj_list[i], obj_list[l]
def nAr(obj_list, n, r):
"""Implement the nAr permutation operation
Arguments:
obj_list -- the list of objects from which the permutation should be generated
n -- number of elements in object list
r -- number of chosen elements
"""
assert len(obj_list) == n and r <= n, "incorrect input!"
permute(obj_list, 0, n, r)
obj_list = [1, 2, 3]
nAr(obj_list, len(obj_list), 2)
| 4.0625 | 4 |
cogan/cogan.py | Taoerwang/tensorflow-models-with-simple-ways | 2 | 12768640 | <reponame>Taoerwang/tensorflow-models-with-simple-ways<filename>cogan/cogan.py
from __future__ import print_function, division
import tensorflow as tf
from tensorflow import keras
import numpy as np
from keras.utils import np_utils
import matplotlib.pyplot as plt
import os
import scipy
class COGAN:
model_name = "COGAN"
paper_name = "Coupled Generative Adversarial Network"
paper_url = "https://arxiv.org/abs/1606.07536"
data_sets = "MNIST and Fashion-MNIST"
def __init__(self, data_name):
self.data_name = data_name
self.img_counts = 60000
self.img_rows = 28
self.img_cols = 28
self.dim = 1
self.noise_dim = 100
(self.train_images, self.train_labels), (self.test_images, self.test_labels) = self.load_data()
self.train_images = np.reshape(self.train_images, (-1, self.img_rows * self.img_cols)) / 255
self.test_images = np.reshape(self.test_images, (-1, self.img_rows * self.img_cols)) / 255
self.train_labels = np_utils.to_categorical(self.train_labels)
self.test_labels = np_utils.to_categorical(self.test_labels)
def load_data(self):
if self.data_name == "fashion_mnist":
data_sets = keras.datasets.fashion_mnist
elif self.data_name == "mnist":
data_sets = keras.datasets.mnist
else:
data_sets = keras.datasets.mnist
return data_sets.load_data()
def sample_Z(self, m, n):
return np.random.uniform(-1., 1., size=[m, n])
def discriminator(self, x):
with tf.variable_scope('dis_', reuse=tf.AUTO_REUSE):
x = tf.layers.dense(x, 128, tf.nn.leaky_relu, name='x_1')
x = tf.layers.dense(x, 50, tf.nn.leaky_relu, name='x_2')
out = tf.layers.dense(x, 1, name='x_3')
return out
def discriminator_1(self, x):
with tf.variable_scope('discriminator_1', reuse=tf.AUTO_REUSE):
x = tf.layers.dense(x, 512, tf.nn.leaky_relu, name='x_l')
x = tf.layers.dense(x, 256, tf.nn.leaky_relu, name='x_2')
return x
def discriminator_2(self, x):
with tf.variable_scope('discriminator_2', reuse=tf.AUTO_REUSE):
x = tf.layers.dense(x, 512, tf.nn.leaky_relu, name='x_l')
x = tf.layers.dense(x, 256, tf.nn.leaky_relu, name='x_2')
return x
def generator(self, g):
with tf.variable_scope('gen_', reuse=tf.AUTO_REUSE):
g = tf.layers.dense(g, 256, tf.nn.leaky_relu, name='g_2')
g = tf.layers.batch_normalization(g, momentum=0.8)
g = tf.layers.dense(g, 512, tf.nn.leaky_relu, name='g_3')
g = tf.layers.batch_normalization(g, momentum=0.8)
return g
def generator_1(self, g):
with tf.variable_scope('generator_1', reuse=tf.AUTO_REUSE):
g = tf.layers.dense(g, 512, tf.nn.leaky_relu, name='g_2')
g = tf.layers.batch_normalization(g, momentum=0.8)
out = tf.layers.dense(g, 784, tf.nn.tanh, name='g_3')
return out
def generator_2(self, g):
with tf.variable_scope('generator_2', reuse=tf.AUTO_REUSE):
g = tf.layers.dense(g, 512, tf.nn.leaky_relu, name='g_2')
g = tf.layers.batch_normalization(g, momentum=0.8)
out = tf.layers.dense(g, 784, tf.nn.tanh, name='g_3')
return out
def build_model(self, learning_rate=0.0002):
x_1 = tf.placeholder(tf.float32, [None, self.img_rows * self.img_cols])
x_2 = tf.placeholder(tf.float32, [None, self.img_rows * self.img_cols])
z = tf.placeholder(tf.float32, [None, 100])
# g = tf.placeholder(tf.float32, [None, self.noise_dim])
# z_g = self.generator(z)
x_1_g = self.generator_1(self.generator(x_1))
d_1_fake = self.discriminator(self.discriminator_1(x_1_g))
d_1_real = self.discriminator(self.discriminator_1(x_1))
x_2_g = self.generator_2(self.generator(x_2))
d_2_fake = self.discriminator(self.discriminator_2(x_2_g))
d_2_real = self.discriminator(self.discriminator_2(x_2))
d_loss_real_1 = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.ones_like(d_1_real), logits=d_1_real))
d_loss_fake_1 = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.zeros_like(d_1_fake), logits=d_1_fake))
d_loss_1 = d_loss_real_1 + d_loss_fake_1
g_loss_1 = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.ones_like(d_1_fake), logits=d_1_fake))
d_loss_real_2 = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.ones_like(d_2_real), logits=d_2_real))
d_loss_fake_2 = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.zeros_like(d_2_fake), logits=d_2_fake))
d_loss_2 = d_loss_real_2 + d_loss_fake_2
g_loss_2 = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.ones_like(d_2_fake), logits=d_2_fake))
d_loss = d_loss_1 + d_loss_2
g_loss = g_loss_1 + g_loss_2
t_vars = tf.trainable_variables()
d_vars_1 = [var for var in t_vars if 'discriminator_1' in var.name]\
+ [var for var in t_vars if 'dis_' in var.name]
g_vars_1 = [var for var in t_vars if 'generator_1' in var.name]\
+ [var for var in t_vars if 'gen_' in var.name]
d_vars_2 = [var for var in t_vars if 'discriminator_2' in var.name]\
+ [var for var in t_vars if 'dis_' in var.name]
g_vars_2 = [var for var in t_vars if 'generator_2' in var.name]\
+ [var for var in t_vars if 'gen_' in var.name]
d_vars = [var for var in t_vars if 'discriminator_1' in var.name]\
+ [var for var in t_vars if 'discriminator_2' in var.name]\
+ [var for var in t_vars if 'dis_' in var.name]
g_vars = [var for var in t_vars if 'generator_1' in var.name] \
+ [var for var in t_vars if 'generator_2' in var.name]\
+ [var for var in t_vars if 'gen_' in var.name]
d_optimizer_1 = tf.train.AdamOptimizer(0.0002, 0.5).minimize(d_loss_1, var_list=d_vars_1)
g_optimizer_1 = tf.train.AdamOptimizer(0.0002, 0.5).minimize(g_loss_1, var_list=g_vars_1)
d_optimizer_2 = tf.train.AdamOptimizer(0.0002, 0.5).minimize(d_loss_2, var_list=d_vars_2)
g_optimizer_2 = tf.train.AdamOptimizer(0.0002, 0.5).minimize(g_loss_2, var_list=g_vars_2)
d_optimizer = tf.train.AdamOptimizer(0.0002, 0.5).minimize(d_loss, var_list=d_vars)
g_optimizer = tf.train.AdamOptimizer(0.0002, 0.5).minimize(g_loss, var_list=g_vars)
# 优化器
x_1_2 = self.generator_2(self.generator(x_1))
x_2_1 = self.generator_1(self.generator(x_2))
return x_1, x_2, z, \
d_loss_1, g_loss_1, d_loss_2, g_loss_2, \
d_optimizer_1, g_optimizer_1, d_optimizer_2, g_optimizer_2, \
x_1_g, x_2_g, \
d_loss, g_loss, d_optimizer, g_optimizer, x_1_2, x_2_1
def train(self, train_steps=100000, batch_size=100, learning_rate=0.0002, save_model_numbers=3):
x_1, x_2, z, \
d_loss_1, g_loss_1, d_loss_2, g_loss_2, \
d_optimizer_1, g_optimizer_1, d_optimizer_2, g_optimizer_2, \
x_1_g, x_2_g, \
d_loss, g_loss, d_optimizer, g_optimizer, x_1_2, x_2_1 = self.build_model(learning_rate)
saver = tf.train.Saver(max_to_keep=save_model_numbers)
if not os.path.exists('out/'):
os.makedirs('out/')
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
# merged_summary_op = tf.summary.merge_all()
# summary_writer = tf.summary.FileWriter('log/mnist_with_summaries', sess.graph)
for i in range(train_steps):
index_1 = np.random.randint(0, self.img_counts, batch_size)
index_2 = np.random.randint(0, self.img_counts, batch_size)
batch_real_1 = self.train_images[index_1]
batch_real_2 = self.train_images[index_2]
batch_real_2 = np.reshape(batch_real_2, [-1, 28, 28])
batch_real_2 = scipy.ndimage.interpolation.rotate(batch_real_2, 90, axes=(1, 2))
batch_real_2 = np.reshape(batch_real_2, [-1, 784])
z_noise = self.sample_Z(batch_size, 100)
# sess.run(d_optimizer_1,
# feed_dict={x_1: batch_real_1, x_2: batch_real_2, z: z_noise})
# sess.run(g_optimizer_1,
# feed_dict={x_1: batch_real_1, x_2: batch_real_2, z: z_noise})
#
# sess.run(d_optimizer_2,
# feed_dict={x_1: batch_real_1, x_2: batch_real_2, z: z_noise})
# sess.run(g_optimizer_2,
# feed_dict={x_1: batch_real_1, x_2: batch_real_2, z: z_noise})
sess.run(d_optimizer,
feed_dict={x_1: batch_real_1, x_2: batch_real_2, z: z_noise})
sess.run(g_optimizer,
feed_dict={x_1: batch_real_1, x_2: batch_real_2, z: z_noise})
if i % 1000 == 0:
d_loss_curr = sess.run(d_loss,
feed_dict={x_1: batch_real_1, x_2: batch_real_2, z: z_noise})
g_loss_curr = sess.run(g_loss,
feed_dict={x_1: batch_real_1, x_2: batch_real_2, z: z_noise})
print('Iter: {}'.format(i))
print('D_loss: {:.4}'.format(d_loss_curr))
print('G_loss: {:.4}'.format(g_loss_curr))
print()
saver.save(sess, 'ckpt/mnist.ckpt', global_step=i)
test_noise = np.random.uniform(-1., 1., size=[batch_size, 100])
r, c = 10, 10
samples = batch_real_1
fig, axs = plt.subplots(r, c)
cnt = 0
for p in range(r):
for q in range(c):
axs[p, q].imshow(np.reshape(samples[cnt], (28, 28)), cmap='gray')
axs[p, q].axis('off')
cnt += 1
fig.savefig("out/%d_1_real.png" % i)
plt.close()
r, c = 10, 10
samples = batch_real_2
fig, axs = plt.subplots(r, c)
cnt = 0
for p in range(r):
for q in range(c):
axs[p, q].imshow(np.reshape(samples[cnt], (28, 28)), cmap='gray')
axs[p, q].axis('off')
cnt += 1
fig.savefig("out/%d_2_real.png" % i)
plt.close()
r, c = 10, 10
samples = sess.run(x_1_g, feed_dict={x_1: batch_real_1})
fig, axs = plt.subplots(r, c)
cnt = 0
for p in range(r):
for q in range(c):
axs[p, q].imshow(np.reshape(samples[cnt], (28, 28)), cmap='gray')
axs[p, q].axis('off')
cnt += 1
fig.savefig("out/%d_1_fake.png" % i)
plt.close()
r, c = 10, 10
samples = sess.run(x_2_g, feed_dict={x_2: batch_real_2})
fig, axs = plt.subplots(r, c)
cnt = 0
for p in range(r):
for q in range(c):
axs[p, q].imshow(np.reshape(samples[cnt], (28, 28)), cmap='gray')
axs[p, q].axis('off')
cnt += 1
fig.savefig("out/%d_2_fake.png" % i)
plt.close()
print('over')
def restore_model(self):
x_1, x_2, z, \
d_loss_1, g_loss_1, d_loss_2, g_loss_2, \
d_optimizer_1, g_optimizer_1, d_optimizer_2, g_optimizer_2, \
x_1_g, x_2_g, \
d_loss, g_loss, d_optimizer, g_optimizer, x_1_2, x_2_1 = self.build_model()
saver = tf.train.Saver()
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
model_file = tf.train.latest_checkpoint('ckpt/')
saver.restore(sess, model_file)
i = 0
index_1 = np.random.randint(0, self.img_counts, 100)
index_2 = np.random.randint(0, self.img_counts, 100)
batch_real_1 = self.train_images[index_1]
batch_real_2 = self.train_images[index_2]
batch_real_2 = np.reshape(batch_real_2, [-1, 28, 28])
batch_real_2 = scipy.ndimage.interpolation.rotate(batch_real_2, 90, axes=(1, 2))
batch_real_2 = np.reshape(batch_real_2, [-1, 784])
r, c = 10, 10
samples = batch_real_1
fig, axs = plt.subplots(r, c)
cnt = 0
for p in range(r):
for q in range(c):
axs[p, q].imshow(np.reshape(samples[cnt], (28, 28)), cmap='gray')
axs[p, q].axis('off')
cnt += 1
fig.savefig("test/%d_1_real.png" % i)
plt.close()
r, c = 10, 10
samples = batch_real_2
fig, axs = plt.subplots(r, c)
cnt = 0
for p in range(r):
for q in range(c):
axs[p, q].imshow(np.reshape(samples[cnt], (28, 28)), cmap='gray')
axs[p, q].axis('off')
cnt += 1
fig.savefig("test/%d_2_real.png" % i)
plt.close()
r, c = 10, 10
samples = sess.run(x_1_g, feed_dict={x_1: batch_real_1})
fig, axs = plt.subplots(r, c)
cnt = 0
for p in range(r):
for q in range(c):
axs[p, q].imshow(np.reshape(samples[cnt], (28, 28)), cmap='gray')
axs[p, q].axis('off')
cnt += 1
fig.savefig("test/%d_1_fake.png" % i)
plt.close()
r, c = 10, 10
samples = sess.run(x_2_g, feed_dict={x_2: batch_real_2})
fig, axs = plt.subplots(r, c)
cnt = 0
for p in range(r):
for q in range(c):
axs[p, q].imshow(np.reshape(samples[cnt], (28, 28)), cmap='gray')
axs[p, q].axis('off')
cnt += 1
fig.savefig("test/%d_2_fake.png" % i)
plt.close()
r, c = 10, 10
samples = sess.run(x_1_2, feed_dict={x_1: batch_real_1})
fig, axs = plt.subplots(r, c)
cnt = 0
for p in range(r):
for q in range(c):
axs[p, q].imshow(np.reshape(samples[cnt], (28, 28)), cmap='gray')
axs[p, q].axis('off')
cnt += 1
fig.savefig("test/%d_1_2.png" % i)
plt.close()
r, c = 10, 10
samples = sess.run(x_2_1, feed_dict={x_2: batch_real_2})
fig, axs = plt.subplots(r, c)
cnt = 0
for p in range(r):
for q in range(c):
axs[p, q].imshow(np.reshape(samples[cnt], (28, 28)), cmap='gray')
axs[p, q].axis('off')
cnt += 1
fig.savefig("test/%d_2_1.png" % i)
plt.close()
print('over')
if __name__ == '__main__':
data_sets = ['fashion_mnist', 'mnist']
model = COGAN(data_sets[1])
# model.train()
model.restore_model()
| 2.828125 | 3 |
pKa/run_pKa_calc_dist_nummuts.py | shambo001/peat | 3 | 12768641 | <filename>pKa/run_pKa_calc_dist_nummuts.py<gh_stars>1-10
#!/usr/bin/env python
#
# pKa - various programs and scripts for pKa value analysis, calculation and redesign
# Copyright (C) 2010 <NAME>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Contact information:
# Email: Jens.Nielsen_at_gmail.com
# Normal mail:
# <NAME>
# SBBS, Conway Institute
# University College Dublin
# Dublin 4, Ireland
import os, string
pwd=os.getcwd()
files=os.listdir(pwd)
pdbfile=None
for file in files:
if string.find(file,'pka.pdb')!=-1 and string.find(file,'pka.pdb.')==-1:
pdbfile=file
break
if not pdbfile:
print 'Could not find pdb file???'
#
# Check that we didn't do this already
#
pkafile=pdbfile+'.PKA.DAT'
#
# Got the PDB file
#
# Make symlinks
#
source='/enzyme/nielsen/pkaparms'
files=['DELRAD.DAT','DELCRG.DAT','TOPOLOGY.H']
for file in files:
r_s=os.path.join(source,file)
r_d=os.path.join(pwd,file)
os.system('ln -s %s %s' %(r_s,r_d))
#
# Start pKa calc
#
import pKa
params={'dbcrit':1000,'lowph':0.1,'highph':20.0,'phstep':0.1,'pairene':10.0}
Y=pKa.pKarun(os.getcwd(),pdbfile,params)
if not os.path.isfile(pkafile):
Y.runseq()
#
# Do the design
#
import Design_dist_nummuts
Design_dist_nummuts.main(pdbfile)
#
# All done
#
| 1.945313 | 2 |
experiment.py | daleas0120/PT-MCIMS | 0 | 12768642 | import os
import json
import sys
import PTMCIM_A
import PTMCIM_B
import PTMCIM_C
print(sys.path)
class parameters:
def __init__(self):
# Spin lattice parameters
self.M = 300 # Number of rows
self.N = 300 # Number of cols
self.J = 1 # Coupling between locations
self.mu = 0.003 # field
self.k = 20 # coupling between lattices
# Metropolis Algorithm parameters
self.numTrials = 1
self.BURN_IN = 0
self.STEPS = 5000 # How many time steps
self.BOLTZ_CONST = 8.617333e-5
self.T_MIN = 0
self.T_MAX = 10
self.numTemp = 150
self.T_C = 0.44
def main():
params = parameters()
k_values = [1, 2, 3, 5, 10, 20, 30, 50]
numTrials = 3
# type = {1: 'point', 2: 'line', 3:'plane'}
case_type = {1: 'point', 2: 'line'}
for case in case_type:
case_name = case_type[case]
os.mkdir(case_name)
os.chdir(case_name)
for k in k_values:
params.k = k
trial_folder = case_name + ', ' + 'k = ' + str(k)
os.mkdir(trial_folder)
os.chdir(trial_folder)
for trial in range(numTrials):
print(case_name+', k = '+str(k)+', trialNum = ' + str(trial))
os.mkdir(str(trial))
os.chdir(str(trial))
if case_name == 'point':
PTMCIM_A.routine(params)
elif case_name == 'line':
PTMCIM_B.routine(params)
elif case_name == 'plane':
PTMCIM_C.routine(params)
# save parameters to folder with data
with open("params.json", "w") as outfile:
json.dump(params.__dict__, outfile)
path_parent = os.path.dirname(os.getcwd())
# go up one level to
os.chdir(path_parent)
# new k value follows, so leave this k value level
os.chdir(os.path.dirname(os.getcwd()))
# new case type follows, so leave this case level and go up one
os.chdir(os.path.dirname(os.getcwd()))
if __name__ == "__main__":
main()
| 2.421875 | 2 |
Python101/Challenges/14.py | Yuma-Tsushima07/Python101 | 1 | 12768643 | print('Challenge 14: WAF to check if a number is present in a list or not.')
test_list = [ 1, 6, 3, 5, 3, 4 ]
print("Checking if 6 exists in list: ")
# Checking if 6 exists in list
# using loop
for i in test_list:
if(i == 6) :
print ("Element Exists") | 4.09375 | 4 |
api/urls.py | jaredmichaelsmith/aussie | 0 | 12768644 | from django.conf.urls import include, url
urlpatterns = [
] | 1.15625 | 1 |
scenarios/scripts/plot_var_wifiload.py | ZeronSix/nru-wifi-coexistence-simulator | 0 | 12768645 | #!/usr/bin/env python3
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
df = pd.read_csv('../results/var_wifi_load.tsv', sep='\t', index_col=False, header=0)
nodes = [1, 2, 5, 10]
num_carriers = 20
for n in nodes:
data = df[(df['numEnbs'] == n)]
plt.cla()
for wifi_carriers in [5, 10, 15, 20]:
for scheme in {0, 1}:
T = 75 * num_carriers / n
data_ = data[(data['smart'] == scheme) & (data['numWifiCarriers'] == wifi_carriers)].groupby('txLock')
t = data_['throughputPerCell']\
.agg([np.mean])\
.reset_index()
s = data_['throughputPerCellStDev'] \
.agg([np.mean]) \
.reset_index()
plt.errorbar(t['txLock'], t['mean'] / T, s['mean'] / T, marker='s', label=f'{"smart" if scheme else "basic"} K={wifi_carriers}')
plt.grid(True)
plt.legend(loc='best')
plt.xlabel('D, backoff slots')
plt.ylabel('Relative throughput per cell')
plt.title(f'N={n}')
plt.savefig(f'../plots/pdf/var_wifiload_{n}.pdf')
plt.savefig(f'../plots/png/var_wifiload_{n}.png')
plt.show()
| 2.421875 | 2 |
statistics/stat_multiv_solutions.py | gautard/pystatsml | 123 | 12768646 | '''
Munivariate statistics exercises
================================
'''
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
#%matplotlib inline
np.random.seed(seed=42) # make the example reproducible
'''
### Dot product and Euclidean norm
'''
a = np.array([2,1])
b = np.array([1,1])
def euclidian(x):
return np.sqrt(np.dot(x, x))
euclidian(a)
euclidian(a - b)
np.dot(b, a / euclidian(a))
X = np.random.randn(100, 2)
np.dot(X, a / euclidian(a))
'''
### Covariance matrix and Mahalanobis norm
'''
N = 100
mu = np.array([1, 1])
Cov = np.array([[1, .8],
[.8, 1]])
X = np.random.multivariate_normal(mu, Cov, N)
xbar = np.mean(X, axis=0)
print(xbar)
Xc = (X - xbar)
np.mean(Xc, axis=0)
S = 1 / (N - 1) * np.dot(Xc.T, Xc)
print(S)
#import scipy
Sinv = np.linalg.inv(S)
def mahalanobis(x, xbar, Sinv):
xc = x - xbar
return np.sqrt(np.dot(np.dot(xc, Sinv), xc))
dists = pd.DataFrame(
[[mahalanobis(X[i, :], xbar, Sinv),
euclidian(X[i, :] - xbar)] for i in range(X.shape[0])],
columns = ['Mahalanobis', 'Euclidean'])
print(dists[:10])
x = X[0, :]
import scipy.spatial
assert(mahalanobis(X[0, :], xbar, Sinv) == scipy.spatial.distance.mahalanobis(xbar, X[0, :], Sinv))
assert(mahalanobis(X[1, :], xbar, Sinv) == scipy.spatial.distance.mahalanobis(xbar, X[1, :], Sinv))
| 3.625 | 4 |
scarletio/utils/weak_core.py | HuyaneMatsu/scarletio | 3 | 12768647 | __all__ = ('WeakCallable', 'WeakHasher', 'WeakReferer', 'is_weakreferable', 'weak_method',)
from .compact import NEEDS_DUMMY_INIT
from .docs import has_docs
from .method_like import MethodLike
try:
from _weakref import ref as WeakrefType
except ImportError:
from weakref import ref as WeakrefType
@has_docs
def is_weakreferable(object_):
"""
Returns whether the given object is weakreferable.
Parameters
----------
object_ : `Any`
The object to check.
Returns
-------
is_weakreferable : `bool`
"""
slots = getattr(type(object_), '__slots__', None)
if (slots is not None) and ('__weakref__' in slots):
return True
if hasattr(object_, '__dict__'):
return True
return False
@has_docs
class WeakHasher:
"""
Object to store unhashable weakreferences.
Attributes
----------
_hash : `int`
The hash of the respective reference.
reference : ``WeakReferer``
A dead reference to hash.
"""
__slots__ = ('_hash', 'reference',)
@has_docs
def __init__(self, reference):
"""
Creates a new ``WeakHasher`` from the given reference.
Parameters
----------
reference : ``WeakReferer``
A dead reference to hash.
"""
self._hash = object.__hash__(reference)
self.reference = reference
@has_docs
def __hash__(self):
"""Returns the ``WeakHasher``'s hash value."""
return self._hash
@has_docs
def __eq__(self, other):
"""Returns whether the two ``WeakHasher``-s are the same."""
self_reference = self.reference
if self_reference is other:
return True
if type(self) is not type(other):
return NotImplemented
return (self.reference is other.reference)
@has_docs
def __repr__(self):
"""Returns the ``WeakHasher``'s representation."""
return f'{self.__class__.__name__}({self.reference!r})'
@has_docs
def __getattr__(self, name):
"""Returns the attribute of the ``WeakHasher``'s reference."""
return getattr(self.reference, name)
@has_docs
def add_to_pending_removals(container, reference):
"""
Adds the given weakreference to the given set.
Parameters
----------
container : `Any`
The parent object, which is iterating right now, so it's items cannot be removed.
reference : ``WeakReferer``
The weakreference to add to the set.
"""
try:
hash(reference)
except TypeError:
reference = WeakHasher(reference)
pending_removals = container._pending_removals
if pending_removals is None:
container._pending_removals = pending_removals = set()
pending_removals.add(reference)
# speedup builtin stuff, CPython is welcome
@has_docs
class WeakReferer(WeakrefType):
"""
Weakreferences to an object.
After calling it returns the referenced object or `None` if already dead.
"""
__slots__ = ()
if NEEDS_DUMMY_INIT:
def __init__(self, *args, **kwargs):
pass
else:
__init__ = object.__init__
def __repr__(self):
repr_parts = ['<', self.__class__.__name__]
value = self()
if value is None:
repr_parts.append(' (dead)')
else:
repr_parts.append(' value=')
repr_parts.append(repr(value))
repr_parts.append('>')
return ''.join(repr_parts)
del WeakrefType
@has_docs
class KeyedReferer(WeakReferer):
"""
Weakreferences an object with a key, what can be used to identify it.
Attributes
----------
key : `Any`
Key to identify the weakreferenced object.
"""
__slots__ = ('key', )
@has_docs
def __new__(cls, obj, callback, key, ):
"""
Creates a new ``KeyedReferer`` with the given parameters.
Parameters
----------
obj : `Any`
The object to weakreference.
callback : `Any`
Callback running when the object is garbage collected.
key : `Any`
Key to identify the weakreferenced object.
"""
self = WeakReferer.__new__(cls, obj, callback)
self.key = key
return self
@has_docs
class WeakCallable(WeakReferer):
"""
Weakreferences a callable object.
When the object is called, calls the weakreferenced object if not yet collected.
"""
__slots__ = ()
@has_docs
def __call__(self, *args, **kwargs):
"""
Calls the weakreferenced object if not yet collected.
Parameters
----------
*args : Parameters
Parameters to call the weakreferenced callable with.
**kwargs : Keyword parameters
Keyword parameters to call the weakreferenced callable with..
Returns
-------
result : `Any`
The returned value by the referenced object. Returns `None` if the object is already collected.
Raises
------
BaseException
Raised exception by the referenced callable.
"""
self = WeakReferer.__call__(self)
if self is None:
return
return self(*args, **kwargs)
@has_docs
def is_alive(self):
"""
Returns whether the ``WeakCallable`` is still alive (the referred object by it is not collected yet.)
Returns
-------
is_alive : `bool`
"""
return (WeakReferer.__call__(self) is not None)
@has_docs
class weak_method(WeakReferer, MethodLike):
"""
A method like, what weakreferences it's object not blocking it from being garbage collected.
Attributes
----------
__func__ : `callable`
The function to call as a method.
Class Attributes
----------------
__reserved_argcount__ : `int` = `1`
The amount of reserved parameters by weak_method.
"""
__slots__ = ('__func__',)
__reserved_argcount__ = 1
@has_docs
def __new__(cls, obj, func, callback=None):
"""
Creates a new ``weak_method`` with the given parameter.
Parameters
----------
obj : `Any`
The object to weakreference and pass to `func`.
func : `callable`
The function to call as a method.
callback : `Any` = `None`, Optional
Callback running when the object is garbage collected.
"""
self = WeakReferer.__new__(cls, obj, callback)
self.__func__ = func
return self
@property
@has_docs
def __self__(self):
"""
Returns the weakreferenced object by the ``weak_method``. `None` if it was already garbage collected.
Returns
-------
obj : `Any`
The weakreferenced object if not yet garbage collected. Defaults to `None`.
"""
return WeakReferer.__call__(self)
@has_docs
def __call__(self, *args, **kwargs):
"""
Calls the weak_method object's function with it's object if not yet collected.
Parameters
----------
*args : Parameters
Parameters to call the function with.
**kwargs : Keyword parameters
Keyword parameters to call the function with.
Returns
-------
result : `Any`
The returned value by the function. Returns `None` if the object is already collected.
Raises
------
BaseException
Raised exception by the function.
"""
obj = WeakReferer.__call__(self)
if obj is None:
return
return self.__func__(obj, *args, **kwargs)
@has_docs
def is_alive(self):
"""
Returns whether the ``weak_method``'s object is still alive (the referred object by it is not collected yet.)
Returns
-------
is_alive : `bool`
"""
return (WeakReferer.__call__(self) is not None)
def __getattr__(self, name):
return getattr(self.__func__, name)
@classmethod
@has_docs
def from_method(cls, method_, callback=None):
"""
Creates a new ``weak_method`` from the given `method`.
Parameters
----------
method_ : `method`
The method tu turn into ``weak_method``.
callback : `Any`, Optional
Callback running when the respective object is garbage collected.
"""
self = WeakReferer.__new__(cls, method_.__self__, callback)
self.__func__ = method_.__func__
return self
| 2.140625 | 2 |
yeast/core/media/yp/ypte_no_glc.py | irahorecka/sga-fba | 0 | 12768648 | <reponame>irahorecka/sga-fba
"""
Defines upper bounds of YPTE with no glucose media for FBA
"""
from yeast.core.media.constants import reagents
from yeast.core.media.yp.base import yp
te_no_glc = {
reagents["ergosterol"]: 0.0026,
reagents["zymosterol"]: 0.0026,
reagents["palmitoleate"]: 0.005,
reagents["stearate"]: 0.0016,
reagents["oleate"]: 0.0078,
}
ypte_no_glc = {**yp, **te_no_glc}
| 1.351563 | 1 |
testing/slices.py | aheadley/pynemap | 6 | 12768649 | #!/usr/bin/python
import numpy
import nbt
blocks = numpy.fromstring(nbt.NBTFile('/home/aheadley/.minecraft/saves/World1/0/0/c.0.0.dat','rb')['Level']['Blocks'].value, dtype=numpy.uint8).reshape(16,16,128)
print blocks
| 2.328125 | 2 |
examples/tickOptionBox.py | tgolsson/appJar | 666 | 12768650 | <gh_stars>100-1000
import sys
sys.path.append("../")
new_ticks = ["Dogs2", "Cats2", "-", " ", "Hamsters2", "Fish2", "Spiders2", "", " "]
orig_ticks = ["Dogs", "Cats", "Hamsters", "Fish", "Spiders"]
from appJar import gui
def get(btn):
print(app.getOptionBox("Favourite Pets"))
print(app.getOptionBox("The Action"))
def tickOption(opt):
print("tick box", opt)
app.setOptionBox("Favourite Pets", opt, app.getCheckBox(opt))
def tickOptionBox(opt):
print("menu tick box", opt)
optValue = app.getOptionBox("Favourite Pets")[opt]
app.setCheckBox(opt, optValue, callFunction=False)
def doAction(act):
app.setOptionBox("The Action", app.getOptionBox(act))
def findIndex(act):
app.setOptionBox("The Action", app.getScale(act))
def changeOptions(btn=None):
app.changeOptionBox("Favourite Pets", new_ticks)
app.setOptionBoxChangeFunction("Favourite Pets", tickOptionBox)
def changeOptionsBack(btn=None):
app.changeOptionBox("Favourite Pets", orig_ticks)
app.setOptionBoxChangeFunction("Favourite Pets", tickOptionBox)
app=gui()
app.setFont(20)
app.setBg("PapayaWhip")
app.addLabelTickOptionBox("Favourite Pets", [])
changeOptionsBack()
app.addLabelOptionBox("The Action", ["Pet", "Stroke", "Feed", "Bathe", "Walk"])
app.addLabelOptionBox("Set Action", ["Pet", "Stroke", "Feed", "Bathe", "Walk"])
app.setOptionBoxChangeFunction("Set Action", doAction)
app.addScale("index")
app.setScaleRange("index", 0,4)
app.showScaleValue("index")
app.setScaleChangeFunction("index", findIndex)
app.startLabelFrame("Tick Us")
app.addCheckBox("Dogs")
app.addCheckBox("Cats")
app.addCheckBox("Hamsters")
app.addCheckBox("Fish")
app.addCheckBox("People")
app.setCheckBoxChangeFunction("Dogs", tickOption)
app.setCheckBoxChangeFunction("Cats", tickOption)
app.setCheckBoxChangeFunction("Hamsters", tickOption)
app.setCheckBoxChangeFunction("Fish", tickOption)
app.setCheckBoxChangeFunction("People", tickOption)
app.stopLabelFrame()
app.addButtons(["GET", "CHANGE", "BACK"], [get,changeOptions, changeOptionsBack])
#app.setCheckBox("Dogs", True)
#app.setOptionBox("Favourite Pets", "Dogs")
app.go()
| 2.296875 | 2 |